id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11279602 | <filename>100_days_challenge/days_1-5/day5_practice_adding_even.py
# Find the sum of 2-100 even numbers
total = 0
for number in range(2, 101, 2):
total += number
print(total) | StarcoderdataPython |
4879402 | from danlp.models import load_bert_base_model
import numpy as np
from data_handler.data_handler import DataHandler
from tqdm import tqdm
from torch import from_numpy
from sklearn.decomposition import PCA
#https://www.psycopg.org/docs/cursor.html#cursor-iterable
class AlgorithmHandler(DataHandler):
def embed_text(self):
model = load_bert_base_model()
def embed_sent(sent):
# the DAnlp bert base model cant handle more than X tokens
# the following try/except solves the symptom but not the root cause.
# OBS! when root cause is solved the folowing code may be used instead:
# embed_sent = lambda sent: model.embed_text(sent)
try:
return model.embed_text(sent)[1]
except Exception as E:
if "The size of tensor a" and "must match the size of tensor b" in str(E):
# (n is an arbitrary low number. OBS! This will still fail, if each char becomes a token)
# TODO: this partially be solved with a better word tokenizer
n= 512
# Here the long sentance is split and embedded, whereafter an average is returned
split_sent_on_idx = lambda i: model.embed_text(sent[i:i+n])[1].numpy()
split_points = range(0, len(sent), n)
split_sent_obj = map(split_sent_on_idx, split_points)
split_sent_np = np.array(list(split_sent_obj)).mean(axis=0)
#A PyTorch tensor is returned to type match the rest of the sents
return from_numpy(split_sent_np)
else:
raise E
embed_sent_l = lambda sent: embed_sent(sent)
embed_full_txt = lambda full_txt: map(embed_sent_l, full_txt)
try:
self.embeded_sents_map = tqdm(map(embed_full_txt, self.cleaned_tokenized_map),
"embedding all sents with BERT")
except Exception as E:
raise E
def merge_embsent_to_docsent(self):
#OBS! look into using reduce instead of np.mean
self.embeded_sents = []
def mean_of_emb_and_keep_sents(emb_sents):
emb_sents = list(emb_sents)
self.embeded_sents.append(emb_sents)
return np.array([t.numpy() for t in emb_sents]).mean(axis=0)
try:
self.embeded_doc_map = tqdm(map(mean_of_emb_and_keep_sents, self.embeded_sents_map),
"reducing sentences to a single doc embeding")
except Exception as E:
raise E
# def reduce_dim(self):
# pca = PCA(n_components=100, svd_solver="auto")
# to_100_dim = lambda emb_doc: pca.fit_transform(np.array(list(emb_doc)))
# self.embedded_reduced_doc = to_100_dim(self.embeded_doc)) | StarcoderdataPython |
1999026 | # Simple vanity address generator in python
import multiprocessing
from time import time
from urllib import parse
from algosdk import account
import algosdk
import time
import re
from multiprocessing import Manager, Process
import os
import argparse
def worker_address_search(i, regexes, q):
joined = "|".join(regexes)
print( f"worker {i} searching for {joined}" )
compiled = re.compile(joined,re.IGNORECASE)
i = 0
while 1:
vanity_private_key, vanity_address = account.generate_account()
i+=1
if compiled.match(vanity_address):
result = f"Address:{vanity_address}\n{algosdk.mnemonic.from_private_key(vanity_private_key)}"
q.put((True,result))
else:
if i%(1237+i)==0:
q.put((False,i)) # send and update on number of attempts failed
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='awesome algorand vanity address search')
parser.add_argument('--processes', type=int, default='1', help='number of processes to use')
parser.add_argument('--regex', type=str)
args=parser.parse_args()
print(f"using {args.processes} processes")
print("SECURITY WARNING - your mnemonic will appear below")
print("It is VERY important that you don't share your seed phrase with anyone, not even your mum!")
print("This controls access to all your funds in the wallet")
ps=[]
#regexes = \
#["[0-9]?N[O0][O0][B8]",
#"[0-9x]?NC[O0][I1]N"]
regexes=[args.regex]
with multiprocessing.Pool(processes=args.processes) as pool:
manager = multiprocessing.Manager()
q = manager.Queue()
try:
for i in range(args.processes):
workerbees = pool.apply_async( worker_address_search, (i, regexes, q,) )
start = time.time()
attempts = 0
while 1:
found, result = q.get()
if found:
print( f"{result}" )
else:
attempts += result
now = time.time()
if now-start > 10:
print(f"{attempts}..",end='',flush=True)
start = now
except KeyboardInterrupt:
pool.close()
pool.terminate()
| StarcoderdataPython |
6643902 | <filename>data_managers/data_manager_gatk_picard_index_builder/data_manager/data_manager_gatk_picard_index_builder.py
#!/usr/bin/env python
# <NAME>.
# Uses fasta sorting functions written by <NAME>.
import json
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
CHUNK_SIZE = 2**20
DEFAULT_DATA_TABLE_NAME = "fasta_indexes"
def get_id_name( params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_picard_index( data_manager_dict, fasta_filename, target_directory, dbkey, sequence_id, sequence_name, jar, data_table_name=DEFAULT_DATA_TABLE_NAME ):
fasta_base_name = os.path.split( fasta_filename )[-1]
gatk_sorted_fasta_filename = os.path.join( target_directory, fasta_base_name )
shutil.copy( fasta_filename, gatk_sorted_fasta_filename )
_sort_fasta_gatk( gatk_sorted_fasta_filename )
sam_index_filename = '%s.fai' % gatk_sorted_fasta_filename
if not os.path.exists( sam_index_filename ):
sam_command = [ 'samtools', 'faidx', gatk_sorted_fasta_filename ]
_run_command( sam_command, target_directory )
args = [ 'java', '-jar', jar, 'R=%s' % gatk_sorted_fasta_filename, 'O=%s.dict' % sequence_id ]
_run_command( args, target_directory )
data_table_entry = dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name )
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _run_command( command, target_directory ):
tmp_stderr = tempfile.NamedTemporaryFile( prefix="tmp-data-manager-gatk_picard_index_builder-stderr" )
proc = subprocess.Popen( args=command, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno() )
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek( 0 )
sys.stderr.write( "Error building index:\n" )
while True:
chunk = tmp_stderr.read( CHUNK_SIZE )
if not chunk:
break
sys.stderr.write( chunk )
sys.exit( return_code )
tmp_stderr.close()
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def _move_and_index_fasta_for_sorting( fasta_filename ):
unsorted_filename = tempfile.NamedTemporaryFile().name
shutil.move( fasta_filename, unsorted_filename )
fasta_offsets = {}
unsorted_fh = open( unsorted_filename )
while True:
offset = unsorted_fh.tell()
line = unsorted_fh.readline()
if not line:
break
if line.startswith( ">" ):
line = line.split( None, 1 )[0][1:]
fasta_offsets[ line ] = offset
unsorted_fh.close()
current_order = [x[1] for x in sorted( ( x[1], x[0] ) for x in fasta_offsets.items() )]
return ( unsorted_filename, fasta_offsets, current_order )
def _write_sorted_fasta( sorted_names, fasta_offsets, sorted_fasta_filename, unsorted_fasta_filename ):
unsorted_fh = open( unsorted_fasta_filename )
sorted_fh = open( sorted_fasta_filename, 'wb+' )
for name in sorted_names:
offset = fasta_offsets[ name ]
unsorted_fh.seek( offset )
sorted_fh.write( unsorted_fh.readline() )
while True:
line = unsorted_fh.readline()
if not line or line.startswith( ">" ):
break
sorted_fh.write( line )
unsorted_fh.close()
sorted_fh.close()
def _int_to_roman( integer ):
if not isinstance( integer, int ):
raise TypeError("expected integer, got %s" % type( integer ))
if not 0 < integer < 4000:
raise ValueError("Argument must be between 1 and 3999, got %s" % str( integer ))
ints = ( 1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1 )
nums = ( 'M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I' )
result = ""
for i in range( len( ints ) ):
count = int( integer / ints[ i ] )
result += nums[ i ] * count
integer -= ints[ i ] * count
return result
def _sort_fasta_gatk( fasta_filename ):
( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename )
sorted_names = list(map( str, range( 1, 100 ) )) + list(map( _int_to_roman, range( 1, 100 ) )) + [ 'X', 'Y', 'M' ]
# detect if we have chrN, or just N
has_chr = False
for chrom in sorted_names:
if "chr%s" % chrom in current_order:
has_chr = True
break
if has_chr:
sorted_names = ["chr%s" % x for x in sorted_names]
else:
sorted_names.insert( 0, "MT" )
sorted_names.extend( [ "%s_random" % x for x in sorted_names ] )
existing_sorted_names = []
for name in sorted_names:
# Append each chromosome only once.
if name in current_order and name not in existing_sorted_names:
existing_sorted_names.append( name )
for name in current_order:
# TODO: confirm that non-canonical names do not need to be sorted specially
if name not in existing_sorted_names:
existing_sorted_names.append( name )
if existing_sorted_names == current_order:
shutil.move( unsorted_filename, fasta_filename )
else:
_write_sorted_fasta( existing_sorted_names, fasta_offsets, fasta_filename, unsorted_filename )
def main():
parser = optparse.OptionParser()
parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' )
parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' )
parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' )
parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' )
parser.add_option( '-j', '--jar', dest='jar', action='store', type="string", default=None, help='GATK .jar file' )
(options, args) = parser.parse_args()
filename = args[0]
params = json.loads( open( filename ).read() )
target_directory = params[ 'output_data' ][0]['extra_files_path']
os.mkdir( target_directory )
data_manager_dict = {}
if options.fasta_dbkey in [ None, '', '?' ]:
raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( options.fasta_dbkey ) )
sequence_id, sequence_name = get_id_name( params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description )
# build the index
build_picard_index( data_manager_dict,
options.fasta_filename,
target_directory,
options.fasta_dbkey,
sequence_id,
sequence_name,
options.jar,
data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME )
# save info to json file
open( filename, 'wb' ).write( json.dumps( data_manager_dict ) )
if __name__ == "__main__":
main()
| StarcoderdataPython |
9618065 | <reponame>bobbybabra/codeGuild<gh_stars>0
# 1. The parameter weekday is True if it is a weekday, and
# the parameter vacation is True if we are on vacation.
# We sleep in if it is not a weekday or we're on vacation.
# Return True if we sleep in.
#
# This is the start of the function to get you started.
weekday == TRUE
vacation == TRUE
def sleep_in(weekday, vacation):
return weekend
return vacation
if not (weekend and vacation)
return TRUE
if weekday not vacation
return FALSE
if (not weekday) and vacation
return TRUE
sleep_in(False, False) # Should return True
sleep_in(True, False) # Should return False
sleep_in(False, True) # Should return True
| StarcoderdataPython |
4976978 | """The main runtime of the MythX CLI."""
import logging
import sys
from pathlib import Path
import click
import yaml
from pythx import Client, MythXAPIError
from pythx.middleware.toolname import ClientToolNameMiddleware
from mythx_cli import __version__
from mythx_cli.analysis.list import analysis_list
from mythx_cli.analysis.report import analysis_report
from mythx_cli.analysis.status import analysis_status
from mythx_cli.analyze.command import analyze
from mythx_cli.formatter import FORMAT_RESOLVER
from mythx_cli.fuzz.arm import fuzz_arm
from mythx_cli.fuzz.disarm import fuzz_disarm
from mythx_cli.fuzz.run import fuzz_run
from mythx_cli.group.close import group_close
from mythx_cli.group.list import group_list
from mythx_cli.group.open import group_open
from mythx_cli.group.status import group_status
from mythx_cli.project import project_list
from mythx_cli.render.command import render
from mythx_cli.util import update_context
from mythx_cli.version.command import version
LOGGER = logging.getLogger("mythx-cli")
logging.basicConfig(level=logging.WARNING)
class APIErrorCatcherGroup(click.Group):
"""A custom click group to catch API-related errors.
This custom Group implementation catches :code:`MythXAPIError`
exceptions, which get raised when the API returns a non-200
status code. It is used to notify the user about the error that
happened instead of triggering an uncaught exception traceback.
It is given to the main CLI entrypoint and propagated to all
subcommands.
"""
def __call__(self, *args, **kwargs):
try:
return self.main(*args, **kwargs)
except MythXAPIError as exc:
LOGGER.debug("Caught API error")
click.echo("The API returned an error:\n{}".format(exc))
sys.exit(1)
# noinspection PyIncorrectDocstring
@click.group(cls=APIErrorCatcherGroup)
@click.option(
"--debug",
is_flag=True,
default=False,
envvar="MYTHX_DEBUG",
help="Provide additional debug output",
)
@click.option(
"--api-key", envvar="MYTHX_API_KEY", help="Your MythX API key from the dashboard"
)
@click.option(
"--username", envvar="MYTHX_USERNAME", help="Your MythX account's username"
)
@click.option(
"--password", envvar="MYTHX_PASSWORD", help="Your MythX account's password"
)
@click.option(
"--format",
"fmt",
default=None,
type=click.Choice(FORMAT_RESOLVER.keys()),
help="The format to display the results in",
)
@click.option(
"--ci",
is_flag=True,
default=None,
help="Return exit code 1 if high-severity issue is found",
)
@click.option(
"-y",
"--yes",
is_flag=True,
default=None,
help="Do not prompt for any confirmations",
)
@click.option(
"-o", "--output", default=None, help="Output file to write the results into"
)
@click.option(
"-c",
"--config",
type=click.Path(exists=True),
help="YAML config file for default parameters",
)
@click.option("--stdout", is_flag=True, default=False, help="Force printing to stdout")
@click.option(
"--table-sort-key",
type=click.Choice(["line", "title", "severity", "description"]),
default="line",
help="The column to sort the default table output by",
)
@click.pass_context
def cli(
ctx,
debug: bool,
api_key: str,
username: str,
password: str,
fmt: str,
ci: bool,
output: str,
yes: bool,
config: str,
stdout: bool,
table_sort_key: str,
) -> None:
"""Your CLI for interacting with https://mythx.io/
\f
:param ctx: Click context holding group-level parameters
:param debug: Boolean to enable the `logging` debug mode
:param api_key: User JWT api token from the MythX dashboard
:param username: The MythX account ETH address/username
:param password: The account password from the MythX dashboard
:param fmt: The formatter to use for the subcommand output
:param ci: Boolean to return exit code 1 on medium/high-sev issues
:param output: Output file to write the results into
:param config: YAML config file to read default parameters from
:param stdout: Force printing to stdout and ignore output files
:param table_sort_key: The column to sort the default table output by
"""
# set loggers to debug mode
if debug:
for name in logging.root.manager.loggerDict:
logging.getLogger(name).setLevel(logging.DEBUG)
ctx.obj = {
"debug": debug,
"api_key": api_key,
"username": username,
"password": password,
"fmt": fmt,
"ci": ci,
"output": output,
"yes": yes,
"config": config,
"table_sort_key": table_sort_key,
}
LOGGER.debug("Initializing configuration context")
config_file = config or ".mythx.yml"
if Path(config_file).is_file():
LOGGER.debug(f"Parsing config at {config_file}")
with open(config_file) as config_f:
parsed_config = yaml.safe_load(config_f.read())
else:
parsed_config = {"analyze": {}}
# The analyze/fuzz context is updated separately in the command
# implementation
ctx.obj["analyze"] = parsed_config.get("analyze", {})
ctx.obj["fuzz"] = parsed_config.get("fuzz", {})
# overwrite context with top-level YAML config keys if necessary
update_context(ctx.obj, "ci", parsed_config, "ci", False)
if stdout:
# if forced stdout, don't set output file
ctx.obj["output"] = None
else:
update_context(ctx.obj, "output", parsed_config, "output", None)
update_context(ctx.obj, "fmt", parsed_config, "format", "table")
update_context(ctx.obj, "yes", parsed_config, "confirm", False)
update_context(ctx.obj, "table_sort_key", parsed_config, "table-sort-key", "line")
# set return value - used for CI failures
ctx.obj["retval"] = 0
LOGGER.debug(f"Initializing tool name middleware with {__version__}")
toolname_mw = ClientToolNameMiddleware(name="mythx-cli-{}".format(__version__))
if api_key is not None:
LOGGER.debug("Initializing client with API key")
ctx.obj["client"] = Client(api_key=api_key, middlewares=[toolname_mw])
elif username and password:
LOGGER.debug("Initializing client with username and password")
ctx.obj["client"] = Client(
username=username, password=password, middlewares=[toolname_mw]
)
elif "fuzz" not in sys.argv:
# fuzz subcommand is exempt from API auth
raise click.UsageError(
(
"The trial user has been deprecated. You can still use the MythX CLI for free "
"by signing up for a free account at https://mythx.io/ and entering your access "
"credentials."
)
)
LOGGER.debug("Registering main commands")
cli.add_command(analyze)
cli.add_command(render)
cli.add_command(version)
@cli.group()
def project() -> None:
"""Create, modify, and view analysis projects.
\f
This subcommand holds all project-related actions, such as creating,
listing, and managing projects, as well as fetching the status of one
or more groups inside a project.
"""
pass
LOGGER.debug("Registering project commands")
project.add_command(project_list)
@cli.group()
def group() -> None:
"""Create, modify, and view analysis groups.
\f
This subcommand holds all group-related actions, such as creating,
listing, closing groups, as well as fetching the status of one
or more group IDs.
"""
pass
LOGGER.debug("Registering group commands")
group.add_command(group_list)
group.add_command(group_status)
group.add_command(group_open)
group.add_command(group_close)
@cli.group()
def analysis() -> None:
"""Get information on running and finished analyses.
\f
This subcommand holds all analysis-related actions, such as submitting new
analyses, listing existing ones, fetching their status, as well as fetching
the reports of one or more finished analysis jobs.
"""
pass
LOGGER.debug("Registering analysis commands")
analysis.add_command(analysis_status)
analysis.add_command(analysis_list)
analysis.add_command(analysis_report)
@cli.group()
def fuzz() -> None:
"""Interact with the MythX FaaS solution.
\f
This subcommand holds all fuzz-related actions, such as initializing
new fuzzing campaigns, preparing projects for FaaS submission, and
launching new campaigns.
"""
pass
LOGGER.debug("Registering fuzz commands")
fuzz.add_command(fuzz_run)
fuzz.add_command(fuzz_arm)
fuzz.add_command(fuzz_disarm)
if __name__ == "__main__":
sys.exit(cli()) # pragma: no cover
| StarcoderdataPython |
1603955 | class P2PConfiguration:
def __init__(
self,
# TODO: is there better way of specifying these defaults? so some defaults can use others in their calculation?
network: bytes = bytes.fromhex("feedbeef"), # Mainnet constant TODO: move to a proper constants file
node_id: int = 0,
node_name: str = "FNode0",
peer_request_interval: int = 60 * 3, # 3 minutes
peer_reseed_interval: int = 3600 * 4, # 4 hours
peer_ip_limit_incoming: int = 0,
peer_ip_limit_outgoing: int = 0,
special_peers: str = "",
manual_ban: int = 3600 * 24 * 7, # 1 week
auto_ban: int = 3600 * 24 * 7, # 1 week
persist_file: str = "",
persist_interval: int = 60 * 15, # 15 minutes
outgoing: int = 32,
incoming: int = 150,
fanout: int = 8,
seed_url: str = None,
peer_share_amount: int = 4 * 32, # legacy math (4 * outgoing)
minimum_quality_score: int = 20,
persist_level: int = 2,
persist_minimum: int = 60, # 1 minute
bind_ip: str = "", # bind to all
listen_port: str = "8108",
listen_limit: int = 1,
ping_interval: int = 15, # 15 seconds
persist_age_limit: int = 3600 * 48, # 2 days
redial_interval: int = 20, # 20 seconds
redial_reset: int = 3600 * 12, # 12 hours
redial_attempts: int = 5,
disconnect_lock: int = 20 * 5 + 80, # RedialInterval * RedialAttempts + 80 seconds
read_deadline: int = 60 * 5, # 5 minutes, high enough to accommodate large packets but fail eventually
write_deadline: int = 60 * 5, # 5 minutes, high enough to accommodate large packets but fail eventually
handshake_timeout: int = 10, # 10 seconds, can be quite low
dial_timeout: int = 10, # 10 seconds, can be quite low
duplicate_filter: int = 3600, # 1 hour
duplicate_filter_cleanup: int = 60, # 1 minute
protocol_version: int = 10,
protocol_version_minimum: int = 9,
channel_capacity: int = 5000,
):
"""
Defines the behavior of the gossip network protocol
:param network: the NetworkID of the network to use, eg. MainNet, TestNet, etc
:param node_id:
:param node_name: the internal name of the node
:param peer_request_interval: how often neighbors should be asked for an updated peer list
:param peer_reseed_interval: how often the seed file should be accessed to check for changes
:param peer_ip_limit_incoming: the maximum amount of peers to accept from a single ip address (0 for unlimited)
:param peer_ip_limit_outgoing: the maximum amount of peers to accept from a single ip address (0 for unlimited)
:param special_peers: a list of special peers, comma separated. If no port specified, the entire ip is special
:param manual_ban: the duration to ban an address for when banned manually
:param auto_ban: the duration to ban an address for when their quality score drops too low
:param persist_file: the filepath to the file to save peers
:param persist_interval: how often to save peers
:param outgoing: the number of peers this node attempts to connect to
:param incoming: the number of incoming connections this node is willing to accept
:param fanout: controls how many random peers are selected for propagating messages
(higher values increase fault tolerance but also increase network congestion)
:param seed_url: the URL of the remote seed file
:param peer_share_amount: the number of peers we share (to count as being connected)
:param minimum_quality_score:
:param persist_level: 0 = persist all peers, 1 = persist peers we have had a connection with, or 2 persist only
peers we have been able to dial to
:param persist_minimum: the minimum amount of time a connection has to last to last
:param bind_ip: the ip address to bind to for listening and connecting (leave blank to bind to all)
:param listen_port: the port to listen to incoming tcp connections on
:param listen_limit: the lockout period of accepting connections from a single ip after having a successful
connection from that ip
:param ping_interval: the maximum amount of time a connection can be silent (no writes) before sending a Ping
:param persist_age_limit: how long a peer can be offline before being considered dead
:param redial_interval: how long to wait between connection attempts
:param redial_reset: after how long we should try to reconnect again
:param redial_attempts: the number of redial attempts to make before considering a connection unreachable
:param disconnect_lock: how long the peer manager should wait for an incoming peer to reconnect before
considering dialing to them
:param read_deadline: the maximum time to read a single parcel. if a connection takes longer, it's disconnected
:param write_deadline: the maximum time to send a single parcel. if a connection takes longer, it's disconnected
:param handshake_timeout: the maximum time for an incoming connection to send the first parcel after connecting
:param dial_timeout:
:param duplicate_filter: how long message hashes are cashed to filter out duplicates (0 to disable)
:param duplicate_filter_cleanup: how frequently the cleanup mechanism is run to trim duplicate filter memory
:param protocol_version: the earliest version this package supports
:param protocol_version_minimum:
:param channel_capacity:
"""
self.network = network
self.node_id = node_id
self.node_name = node_name
# === Peer Management Settings ===
self.peer_request_interval = peer_request_interval
self.peer_reseed_interval = peer_reseed_interval
self.peer_ip_limit_outgoing = peer_ip_limit_outgoing
self.peer_ip_limit_incoming = peer_ip_limit_incoming
self.special_peers = special_peers
self.persist_file = persist_file
self.persist_interval = persist_interval
self.persist_level = persist_level
self.persist_minimum = persist_minimum
self.persist_age_limit = persist_age_limit
self.peer_share_amount = peer_share_amount
self.minimum_quality_score = minimum_quality_score
# === Gossip Behavior ===
self.outgoing = outgoing
self.incoming = incoming
self.fanout = fanout
self.seed_url = seed_url
# === Connection Settings ===
self.bind_ip = bind_ip
self.listen_port = listen_port
self.listen_limit = listen_limit
self.ping_interval = ping_interval
self.redial_interval = redial_interval
self.redial_reset = redial_reset
self.redial_attempts = redial_attempts
self.disconnect_lock = disconnect_lock
self.manual_ban = manual_ban
self.auto_ban = auto_ban
self.handshake_timeout = handshake_timeout
self.dial_timeout = dial_timeout
self.read_deadline = read_deadline
self.write_deadline = write_deadline
self.duplicate_filter = duplicate_filter
self.duplicate_filter_cleanup = duplicate_filter_cleanup
self.protocol_version = protocol_version
self.protocol_version_minimum = protocol_version_minimum
self.channel_capacity = channel_capacity
| StarcoderdataPython |
8089738 | a = '/leave'
msg = '/coco'
if not a in msg:
print("nao tem")
else:
print("tem") | StarcoderdataPython |
1721898 | from sqlalchemy import create_engine, Table, Column, Integer, String, Boolean, MetaData, select
import urlparse
DATABASE_NAME = 'data/crawler.sqlite'
HTML_DIR = 'data/html/'
class CrawlerDb:
def __init__(self):
self.connected = False
def connect(self):
self.engine = create_engine('sqlite:///' + DATABASE_NAME)
self.connection = self.engine.connect()
self.connected = True if self.connection else False
self.metadata = MetaData()
# Define the tables
self.website_table = Table('website', self.metadata,
Column('id', Integer, primary_key=True),
Column('url', String, nullable=False),
Column('has_crawled', Boolean, default=False),
Column('emails', String, nullable=True),
)
# Create the tables
self.metadata.create_all(self.engine)
def enqueue(self, url, emails = None):
if not self.connected:
return False
s = select([self.website_table]).where(self.website_table.c.url == url)
res = self.connection.execute(s)
result = res.fetchall()
res.close()
# If we get a result, then this url is not unique
if len(result) > 0:
# print 'Duplicated: %s' % url
return False
args = [{'url':unicode(url)}]
if (emails != None):
args = [{'url':unicode(url), 'has_crawled':True, 'emails':unicode(",".join(emails))}]
result = self.connection.execute(self.website_table.insert(), args)
if result:
return True
return False
def dequeue(self):
if not self.connected:
return False
# Get the first thing in the queue
s = select([self.website_table]).limit(1).where(self.website_table.c.has_crawled == False)
res = self.connection.execute(s)
result = res.fetchall()
res.close()
# If we get a result
if len(result) > 0:
# Remove from the queue ?
# delres = self.connection.execute(self.queue_table.delete().where(self.queue_table.c.id == result[0][0]))
# if not delres:
# return False
# Return the row
# print result[0].url
return result[0]
return False
def crawled(self, website, new_emails=None):
if not self.connected:
return False
stmt = self.website_table.update() \
.where(self.website_table.c.id==website.id) \
.values(has_crawled=True, emails=new_emails)
self.connection.execute(stmt)
def get_all_emails(self):
if not self.connected:
return None
s = select([self.website_table])
res = self.connection.execute(s)
results = res.fetchall()
res.close()
email_set = set()
for result in results:
if (result.emails == None):
continue
for email in result.emails.split(','):
email_set.add(email)
return email_set
def get_all_domains(self):
if not self.connected:
return None
s = select([self.website_table])
res = self.connection.execute(s)
results = res.fetchall()
res.close()
domain_set = set()
for result in results:
if (result.url == None):
continue
url = urlparse.urlparse(result.url)
hostname = url.hostname.split(".")
# Simplistic assumeption of a domain. If 2nd last name is <4 char, then it has 3 parts eg. just2us.com.sg
hostname = ".".join(len(hostname[-2]) < 4 and hostname[-3:] or hostname[-2:])
domain_set.add(hostname)
return domain_set
def close(self):
self.connection.close()
def save_html(filename, html):
filename = os.path.join(HTML_DIR, filename)
file = open(filename,"w+")
file.writelines(html)
file.close()
def test(self):
c = CrawlerDb()
c.connect()
# c.enqueue(['a12222', '11'])
# c.enqueue(['dddaaaaaa2', '22'])
c.enqueue('111')
c.enqueue('222')
website = c.dequeue()
c.crawled(website)
website = c.dequeue()
c.crawled(website, "a,b")
print '---'
c.dequeue()
# CrawlerDb().test()
| StarcoderdataPython |
3314148 | <reponame>kagemeka/atcoder-submissions
import sys
s = sys.stdin.readline().rstrip()
def main():
if s[0] == "A":
if "C" in s[2:-1]:
cnt = 0
for letter in s:
if 65 <= ord(letter) <= 90:
cnt += 1
if cnt == 2:
return "AC"
return "WA"
if __name__ == "__main__":
ans = main()
print(ans)
| StarcoderdataPython |
12808555 | <reponame>stowage/ctypesgen<gh_stars>0
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
"""
Main loop for ctypesgen.
"""
import optparse, sys
from . import options as core_options
from . import parser as core_parser
from . import printer_python, printer_json, processor
from . import messages as msgs
def find_names_in_modules(modules):
names = set()
for module in modules:
try:
mod = __import__(module)
except:
pass
else:
names.union(dir(module))
return names
def option_callback_W(option, opt, value, parser):
# Options preceded by a "-Wl," are simply treated as though the "-Wl,"
# is not there? I don't understand the purpose of this code...
if len(value) < 4 or value[0:3] != 'l,-':
raise optparse.BadOptionError("not in '-Wl,<opt>' form: %s%s"
% (opt, value))
opt = value[2:]
if opt not in ['-L', '-R', '--rpath']:
raise optparse.BadOptionError("-Wl option must be -L, -R"
" or --rpath, not " + value[2:])
# Push the linker option onto the list for further parsing.
parser.rargs.insert(0, value)
def option_callback_libdir(option, opt, value, parser):
# There are two sets of linker search paths: those for use at compile time
# and those for use at runtime. Search paths specified with -L, -R, or
# --rpath are added to both sets.
parser.values.compile_libdirs.append(value)
parser.values.runtime_libdirs.append(value)
def main(givenargs = None):
usage = 'usage: %prog [options] /path/to/header.h ...'
op = optparse.OptionParser(usage=usage)
# Parameters
op.add_option('-o', '--output', dest='output', metavar='FILE',
help='write wrapper to FILE [default stdout]')
op.add_option('-l', '--library', dest='libraries', action='append',
default=[], metavar='LIBRARY', help='link to LIBRARY')
op.add_option('', '--include', dest='other_headers', action='append',
default=[], metavar='HEADER',
help='include system header HEADER (e.g. stdio.h or stdlib.h)')
op.add_option('-m', '--module', '--link-module', action='append',
dest='modules', metavar='MODULE', default=[],
help='use symbols from Python module MODULE')
op.add_option('-I', '--includedir', dest='include_search_paths',
action='append', default=[], metavar='INCLUDEDIR',
help='add INCLUDEDIR as a directory to search for headers')
op.add_option('-W', action="callback", callback=option_callback_W,
metavar="l,OPTION", type="str",
help="where OPTION is -L, -R, or --rpath")
op.add_option("-L", "-R", "--rpath", "--libdir", action="callback",
callback=option_callback_libdir, metavar="LIBDIR", type="str",
help="Add LIBDIR to the search path (both compile-time and run-time)")
op.add_option('', "--compile-libdir", action="append",
dest="compile_libdirs", metavar="LIBDIR", default=[],
help="Add LIBDIR to the compile-time library search path.")
op.add_option('', "--runtime-libdir", action="append",
dest="runtime_libdirs", metavar="LIBDIR", default=[],
help="Add LIBDIR to the run-time library search path.")
# Parser options
op.add_option('', '--cpp', dest='cpp', default='gcc -E',
help='The command to invoke the c preprocessor, including any ' \
'necessary options (default: gcc -E)')
op.add_option('', '--save-preprocessed-headers', metavar='FILENAME',
dest='save_preprocessed_headers', default=None,
help='Save the preprocessed headers to the specified FILENAME')
op.add_option('', '--optimize-lexer', dest='optimize_lexer',
action='store_true', default=False,
help='Run the lexer in optimized mode. This mode requires write '
'access to lextab.py file stored within the ctypesgen package.')
# Processor options
op.add_option('-a', '--all-headers', action='store_true',
dest='all_headers', default=False,
help='include symbols from all headers, including system headers')
op.add_option('', '--builtin-symbols', action='store_true',
dest='builtin_symbols', default=False,
help='include symbols automatically generated by the preprocessor')
op.add_option('', '--no-macros', action='store_false', dest='include_macros',
default=True, help="Don't output macros.")
op.add_option('-i', '--include-symbols', dest='include_symbols',
default=None, help='regular expression for symbols to always include')
op.add_option('-x', '--exclude-symbols', dest='exclude_symbols',
default=None, help='regular expression for symbols to exclude')
op.add_option('', '--no-stddef-types', action='store_true',
dest='no_stddef_types', default=False,
help='Do not support extra C types from stddef.h')
op.add_option('', '--no-gnu-types', action='store_true',
dest='no_gnu_types', default=False,
help='Do not support extra GNU C types')
op.add_option('', '--no-python-types', action='store_true',
dest='no_python_types', default=False,
help='Do not support extra C types built in to Python')
# Printer options
op.add_option('', '--header-template', dest='header_template', default=None,
metavar='TEMPLATE',
help='Use TEMPLATE as the header template in the output file.')
op.add_option('', '--strip-build-path', dest='strip_build_path',
default=None, metavar='BUILD_PATH',
help='Strip build path from header paths in the wrapper file.')
op.add_option('', '--insert-file', dest='inserted_files', default=[],
action='append', metavar='FILENAME',
help='Add the contents of FILENAME to the end of the wrapper file.')
op.add_option('', '--output-language', dest='output_language', metavar='LANGUAGE',
default='py',
choices=('py', 'py32', 'py27', 'py25', 'json'),
help="Choose output language (`py'[default], `py32', `py27', `py25', or "
"`json'). The implementation for py32 does appear to be "
"compatible down to at least Python2.7.15. py25 and py27 are in "
"any case _not_ compatible with >= Python3. The default choice "
"(py) attempts to select `py32', `py27', or `py25' based on the "
"version of Python that runs this script."
)
# Error options
op.add_option('', "--all-errors", action="store_true", default=False,
dest="show_all_errors", help="Display all warnings and errors even " \
"if they would not affect output.")
op.add_option('', "--show-long-errors", action="store_true", default=False,
dest="show_long_errors", help="Display long error messages " \
"instead of abbreviating error messages.")
op.add_option('', "--no-macro-warnings", action="store_false", default=True,
dest="show_macro_warnings", help="Do not print macro warnings.")
op.set_defaults(**core_options.default_values)
(options, args) = op.parse_args(givenargs)
options.headers = args
# Figure out what names will be defined by imported Python modules
options.other_known_names = find_names_in_modules(options.modules)
# Required parameters
if len(args) < 1:
msgs.error_message('No header files specified', cls='usage')
sys.exit(1)
if len(options.libraries) == 0:
msgs.warning_message('No libraries specified', cls='usage')
# Check output language
printer = None
if options.output_language.startswith('py'):
printer = printer_python.WrapperPrinter
elif options.output_language == "json":
printer = printer_json.WrapperPrinter
else:
msgs.error_message("No such output language `" + options.output_language + "'", cls='usage')
sys.exit(1)
# Step 1: Parse
descriptions=core_parser.parse(options.headers,options)
# Step 2: Process
processor.process(descriptions,options)
# Step 3: Print
printer(options.output,options,descriptions)
msgs.status_message("Wrapping complete.")
# Correct what may be a common mistake
if descriptions.all == []:
if not options.all_headers:
msgs.warning_message("There wasn't anything of use in the " \
"specified header file(s). Perhaps you meant to run with " \
"--all-headers to include objects from included sub-headers? ",
cls = 'usage')
| StarcoderdataPython |
6486540 | # config.py
from authomatic.providers import oauth2, oauth1, openid
SECRET = "UUID_ALEATOIRE"
CONFIG = {
'tw': { # Your internal provider name
'short_name': 1,
'class_': oauth1.Twitter,
'consumer_key': 'MA_CLE_TWITTER',
'consumer_secret': 'MON_SECRET_TWITTER',
},
'fb': {
'class_': oauth2.Facebook,
'short_name': 2,
'consumer_key': 'MA_CLE_FACEBOOK',
'consumer_secret': 'MON_SECRET_FACEBOOK',
'scope': ['user_about_me', 'email', 'publish_stream'],
},
'google': {
'class_': oauth2.Google,
'short_name': 3,
'consumer_key': 'MA_CLE_GOOGLE.apps.googleusercontent.com',
'consumer_secret': 'MON_SECRET_GOOGLE',
'scope': ['profile', 'https://www.googleapis.com/auth/plus.login', 'email']
},
}
| StarcoderdataPython |
8100509 | from flask import request
from app import app
import ddbb
import requests
import json
import time
import string
import random
def register_user(code):
if code == None:
return False
r = requests.post("https://accounts.spotify.com/api/token", data={
"grant_type": "authorization_code",
"code": code,
"redirect_uri": ddbb.redirect_uri
}, headers={
"Authorization": "Basic " + ddbb.auth
})
if r.status_code != 200:
return False
obj = json.loads(r.text)
access = obj.get('access_token')
refresh = obj.get('refresh_token')
r = requests.get('https://api.spotify.com/v1/me', headers={
"Authorization": "Bearer " + access
})
if r.status_code != 200:
return False
obj = json.loads(r.text)
name = obj.get('display_name')
username = obj.get('id')
url = obj['external_urls'].get('spotify')
img = obj['images']
if len(img):
img = img[0].get('url')
else:
img = '/img/user.png'
q = ddbb.queryone(
"SELECT id, username, url, img, session FROM user WHERE username=?", username)
if q:
ddbb.query("UPDATE user SET username=?, name=?, url=?, img=?, access=?, refresh=?, valid=? WHERE id=?",
username, name, url, img, access, refresh, time.time(), q[0])
return q
else:
alnum = string.ascii_letters + string.digits
session = ''.join((random.choice(alnum) for i in range(64)))
id = ddbb.insert("INSERT INTO user (username, name, url, img, session, access, refresh, valid) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
username, name, url, img, session, access, refresh, time.time())
return [id, username, url, img, session]
def get_friends(user):
q = ddbb.query(
"SELECT id, name, url, img, access, refresh, valid FROM user WHERE id IN (SELECT friend FROM friends WHERE user=?)", user)
if not len(q):
return []
friends = []
for friend in q:
token = friend[4]
if (time.time() - friend[6]) > 3500 and friend[5] != None:
url = "https://accounts.spotify.com/api/token"
r = requests.post(url, data={
"grant_type": "refresh_token",
"refresh_token": friend[5],
}, headers={
"Authorization": "Basic " + ddbb.auth
})
if r.status_code == 200:
token = json.loads(r.text).get('access_token')
ddbb.query(
"UPDATE user SET access=?, valid=? WHERE id=?", token, time.time(), friend[0])
else:
continue
friends.append({
'id': friend[0],
'name': friend[1],
'url': friend[2],
'img': friend[3],
'token': token
})
return friends
def get_friends_listening(user):
r = []
for friend in get_friends(user):
playing = requests.get("https://api.spotify.com/v1/me/player/currently-playing", headers={
'Authorization': "Bearer " + friend['token']
})
if playing.status_code == 204:
r.append({
'id': friend['id'],
"user": friend['name'],
"user_url": friend['url'],
"user_img": friend['img']
})
continue
if playing.status_code != 200:
continue
playing = json.loads(playing.text)
r.append({
'timestamp': playing.get("timestamp"),
'progress_ms': playing.get("progress_ms"),
'is_playing': playing.get("is_playing"),
'name': playing["item"].get("name"),
'artist': playing["item"]["artists"][0].get("name"),
'url': playing["item"]["external_urls"].get("spotify"),
'album': playing["item"]["album"].get("name"),
'album_img': playing["item"]["album"]["images"][2].get("url"),
'id': friend['id'],
"user": friend['name'],
"user_url": friend['url'],
"user_img": friend['img']
})
return r
def get_history(user):
q = ddbb.queryone(
"SELECT name, url, img, access, refresh, valid FROM user WHERE id=?", user)
if not len(q):
return None
token = q[3]
if (time.time() - q[5]) > 3500 and q[4] != None:
r = requests.post("https://accounts.spotify.com/api/token", data={
"grant_type": "refresh_token",
"refresh_token": q[4],
}, headers={
"Authorization": "Basic " + ddbb.auth
})
if r.status_code == 200:
token = json.loads(r.text).get('access_token')
ddbb.query(
"UPDATE user SET access=?, valid= WHERE id=?", token, user)
else:
return None
history = requests.get("https://api.spotify.com/v1/me/player/recently-played?limit=5", headers={
'Authorization': "Bearer " + token
})
if history.status_code != 200:
return None
history = json.loads(history.text)
r = {
'name': q[0],
'url': q[1],
'img': q[2],
'history': []
}
for song in history.get('items'):
r['history'].append({
'name': song["track"].get("name"),
'artist': song["track"]["artists"][0].get("name"),
'url': song["track"]["external_urls"].get("spotify"),
'album': song["track"]["album"].get("name"),
'album_img': song["track"]["album"]["images"][2].get("url"),
})
return r
def get_songs(user):
q = ddbb.queryone(
"SELECT name, url, img, access, refresh, valid FROM user WHERE id=?", user)
if not len(q):
return None
token = q[3]
if (time.time() - q[5]) > 3500 and q[4] != None:
r = requests.post("https://accounts.spotify.com/api/token", data={
"grant_type": "refresh_token",
"refresh_token": q[4],
}, headers={
"Authorization": "Basic " + ddbb.auth
})
if r.status_code == 200:
token = json.loads(r.text).get('access_token')
ddbb.query(
"UPDATE user SET access=?, valid=? WHERE id=?", token, time.time(), user)
else:
return None
history = requests.get("https://api.spotify.com/v1/me/tracks?limit=5", headers={
'Authorization': "Bearer " + token
})
if history.status_code != 200:
return None
history = json.loads(history.text)
r = {
'name': q[0],
'url': q[1],
'img': q[2],
'songs': []
}
for song in history.get('items'):
r['songs'].append({
'name': song["track"].get("name"),
'artist': song["track"]["artists"][0].get("name"),
'url': song["track"]["external_urls"].get("spotify"),
'album': song["track"]["album"].get("name"),
'album_img': song["track"]["album"]["images"][2].get("url"),
})
return r
def add_friend(user, username, id):
friends = ddbb.query("SELECT friend FROM friends WHERE user=?", user)
for friend in friends:
if friend[0] == id:
return True
if len(friends) > 4:
return False
q = ddbb.queryone(
"SELECT id FROM user WHERE username=? AND id=?", username, id)
if q == None or q[0] != id:
return False
ddbb.insert("INSERT INTO friends (user, friend) VALUES (?, ?)", user, id)
return True
def check_user():
id = request.cookies.get('id')
username = request.cookies.get('username')
session = request.cookies.get('session')
if not isinstance(id, str) or not isinstance(username, str) or not isinstance(session, str):
return False
if len(id) > 10 or len(username) > 50 or len(session) > 150:
return False
q = ddbb.queryone(
"SELECT id FROM user WHERE id=? AND username=? AND session=?", id, username, session)
if q != None and str(q[0]) == id:
return True
return False
| StarcoderdataPython |
1670726 | <filename>lsystem/KochCurve3D.py<gh_stars>1-10
from lsystem.LSystem import LSystem
import math
class KochCurve3D(LSystem):
"""Fractale courbe de koch en 3D"""
def defineParams(self):
self.LSName = "Koch curve 3D"
self.LSAngle = math.pi / 2
self.LSSegment = 0.01
self.LSSteps = 8
self.LSStartingString = "A"
def createVars(self):
self.LSVars = {
'F': self.turtle.forward,
'G': self.turtle.forward,
'H': self.turtle.forward,
'I': self.turtle.forward,
'+': self.turtle.rotZ,
'-': self.turtle.irotZ,
'^': self.turtle.rotY,
'&': self.turtle.irotY,
'<': self.turtle.rotX,
'>': self.turtle.irotX,
'|': self.turtle.rotX,
'[': self.turtle.push,
']': self.turtle.pop
}
self.LSParams = {
'F': self.LSSegment,
'G': self.LSSegment,
'H': self.LSSegment,
'I': self.LSSegment,
'+': self.LSAngle,
'-': self.LSAngle,
'&': self.LSAngle,
'^': self.LSAngle,
'<': self.LSAngle,
'>': self.LSAngle,
'|': self.LSAngle * 2,
'[': None,
']': None
}
def createRules(self):
self.LSRules = {
'A': "[[[[F+F-F-F+F]G<G>G>G<G]H-H+H+H-H]I>I<I<I>I]",
'F': "F+F-F-F+F",
'G': "G<G>G>G<G",
'H': "H-H+H+H-H",
'I': "I>I<I<I>I",
}
| StarcoderdataPython |
3488145 | import os
import json
import matplotlib.pyplot as plt
class Logger(object):
def __init__(self, log_dir):
self.log_dir = log_dir
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.data_dict = {}
def record(self, *args):
assert(len(args)%2 == 0)
for i in range(len(args)//2):
assert(type(args[i*2]) == str)
assert(type(args[i*2+1]) == float or type(args[i*2+1]) == int)
if args[i*2] in self.data_dict:
self.data_dict[args[i*2]].append(args[i*2+1])
else:
self.data_dict[args[i*2]] = [args[i*2+1]]
def save_fig(self, *keys, avg=1, together=False, set_name=None):
if together:
name = ''
for key in keys:
if not key in self.data_dict:
continue
n = len(self.data_dict[key]) // avg
data = []
for i in range(n):
data.append(sum(self.data_dict[key][i*avg : (i+1)*avg]) / avg)
plt.plot(range(n), data, label=key)
plt.legend()
if not together:
plt.savefig(self.log_dir+key+'.jpg')
plt.clf()
else:
name += ('+'+key)
if together:
if set_name is not None:
name = set_name
else:
name = name[1:] + '.jpg'
plt.savefig(self.log_dir+name)
plt.clf()
def save_json(self, *keys):
for key in keys:
if not key in self.data_dict:
continue
with open(self.log_dir+key+'.json', 'w') as f:
json.dump(self.data_dict[key], f)
def clear(self, *keys):
if len(keys) == 0:
self.data_dict = {}
else:
for key in keys:
if key in self.data_dict:
self.data_dict.pop(key)
| StarcoderdataPython |
6528242 | """Tests for query and export utility function."""
import sys
import logging
import unittest
from system_query.query import query_and_export
_LOG = logging.getLogger(__name__)
class Tests(unittest.TestCase):
def test_unsupported_scope(self):
with self.assertRaises(NotImplementedError):
query_and_export('host', 'raw', sys.stdout)
with self.assertRaises(NotImplementedError):
query_and_export('os', 'raw', sys.stdout)
with self.assertRaises(NotImplementedError):
query_and_export('swap', 'raw', sys.stdout)
def test_unsupported_format(self):
with self.assertRaises(NotImplementedError):
query_and_export('all', 'xml', sys.stdout)
with self.assertRaises(NotImplementedError):
query_and_export('all', 'yaml', sys.stdout)
def test_unsupported_target(self):
with self.assertRaises(NotImplementedError):
query_and_export('all', 'raw', 12345)
with self.assertRaises(NotImplementedError):
query_and_export('all', 'json', 12345)
| StarcoderdataPython |
11307738 | import cv2
import mediapipe as mp
import socket
import numpy as np
UDP_IP = "127.0.0.1"
UDP_PORT = 5065
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
# hand_down_overlay = cv2.imread('hand_down.png', cv2.IMREAD_UNCHANGED)
# For webcam input:
cap = cv2.VideoCapture(0)
with mp_pose.Pose(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as pose:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = pose.process(image)
if results.pose_landmarks:
pose_landmarks = results.pose_landmarks.ListFields()[0][1]
left_hand = pose_landmarks[20]
right_hand = pose_landmarks[19]
unity_data_string = f'left_hand {left_hand.x} left_hand_y {left_hand.y} right_hand_x {right_hand.x} right_hand_y {right_hand.y}'
# unity_json = str({
# 'left_hand_x': left_hand.x,
# 'left_hand_y': left_hand.y,
# 'right_hand_x': right_hand.x,
# 'right_hand_y': right_hand.y,
# })
sock.sendto(
(unity_data_string).encode(),
(UDP_IP, UDP_PORT)
)
print(unity_data_string)
# print("_" * 10, "Jump Action Triggered!", "_" * 10)
# Draw the pose annotation on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(
image,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
# print(hand_down_overlay.shape)
# added_image = logo_overlay(image, hand_down_overlay, scale=0.5, y=-100, x=-100)
# added_image = image * (1 - hand_down_overlay[:, :, 3:]) + hand_down_overlay[:, :, 3:]
# added_image = cv2.addWeighted(image, 1., hand_down_overlay, 1., 0)
cv2.imshow('MediaPipe Pose', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
| StarcoderdataPython |
11334288 | <gh_stars>0
# Portions copied over from BCBio.GFF.GFFParser
import re
import copy
import collections
from six.moves import urllib
from gffutils import constants
from gffutils.exceptions import AttributeStringError
import logging
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
gff3_kw_pat = re.compile(r'\w+=')
# Encoding/decoding notes
# -----------------------
# From
# https://github.com/The-Sequence-Ontology/Specifications/blob/master/gff3.md#description-of-the-format:
#
# GFF3 files are nine-column, tab-delimited, plain text files.
# Literal use of tab, newline, carriage return, the percent (%) sign,
# and control characters must be encoded using RFC 3986
# Percent-Encoding; no other characters may be encoded. Backslash and
# other ad-hoc escaping conventions that have been added to the GFF
# format are not allowed. The file contents may include any character
# in the set supported by the operating environment, although for
# portability with other systems, use of Latin-1 or Unicode are
# recommended.
#
# tab (%09)
# newline (%0A)
# carriage return (%0D)
# % percent (%25)
# control characters (%00 through %1F, %7F)
#
# In addition, the following characters have reserved meanings in
# column 9 and must be escaped when used in other contexts:
#
# ; semicolon (%3B)
# = equals (%3D)
# & ampersand (%26)
# , comma (%2C)
#
#
# See also issue #98.
#
# Note that spaces are NOT encoded. Some GFF files have spaces encoded; in
# these cases round-trip invariance will not hold since the %20 will be decoded
# but not re-encoded.
_to_quote = '\n\t\r%;=&,'
_to_quote += ''.join([chr(i) for i in range(32)])
_to_quote += chr(127)
# Caching idea from urllib.parse.Quoter, which uses a defaultdict for
# efficiency. Here we're sort of doing the reverse of the "reserved" idea used
# there.
class Quoter(collections.defaultdict):
def __missing__(self, b):
if b in _to_quote:
res = '%{:02X}'.format(ord(b))
else:
res = b
self[b] = res
return res
quoter = Quoter()
def _reconstruct(keyvals, dialect, keep_order=False,
sort_attribute_values=False):
"""
Reconstructs the original attributes string according to the dialect.
Parameters
==========
keyvals : dict
Attributes from a GFF/GTF feature
dialect : dict
Dialect containing info on how to reconstruct a string version of the
attributes
keep_order : bool
If True, then perform sorting of attribute keys to ensure they are in
the same order as those provided in the original file. Default is
False, which saves time especially on large data sets.
sort_attribute_values : bool
If True, then sort values to ensure they will always be in the same
order. Mostly only useful for testing; default is False.
"""
if not dialect:
raise AttributeStringError()
if not keyvals:
return ""
parts = []
# Re-encode when reconstructing attributes
if constants.ignore_url_escape_characters or dialect['fmt'] != 'gff3':
attributes = keyvals
else:
attributes = {}
for k, v in keyvals.items():
attributes[k] = []
for i in v:
attributes[k].append(''.join([quoter[j] for j in i]))
# May need to split multiple values into multiple key/val pairs
if dialect['repeated keys']:
items = []
for key, val in attributes.items():
if len(val) > 1:
for v in val:
items.append((key, [v]))
else:
items.append((key, val))
else:
items = list(attributes.items())
def sort_key(x):
# sort keys by their order in the dialect; anything not in there will
# be in arbitrary order at the end.
try:
return dialect['order'].index(x[0])
except ValueError:
return 1e6
if keep_order:
items.sort(key=sort_key)
for key, val in items:
# Multival sep is usually a comma:
if val:
if sort_attribute_values:
val = sorted(val)
val_str = dialect['multival separator'].join(val)
if val_str:
# Surround with quotes if needed
if dialect['quoted GFF2 values']:
val_str = '"%s"' % val_str
# Typically "=" for GFF3 or " " otherwise
part = dialect['keyval separator'].join([key, val_str])
else:
part = key
else:
if dialect['fmt'] == 'gtf':
part = dialect['keyval separator'].join([key, '""'])
else:
part = key
parts.append(part)
# Typically ";" or "; "
parts_str = dialect['field separator'].join(parts)
# Sometimes need to add this
if dialect['trailing semicolon']:
parts_str += ';'
return parts_str
# TODO:
# Cythonize -- profiling shows that the bulk of the time is spent on this
# function...
def _split_keyvals(keyval_str, dialect=None):
"""
Given the string attributes field of a GFF-like line, split it into an
attributes dictionary and a "dialect" dictionary which contains information
needed to reconstruct the original string.
Lots of logic here to handle all the corner cases.
If `dialect` is None, then do all the logic to infer a dialect from this
attribute string.
Otherwise, use the provided dialect (and return it at the end).
"""
def _unquote_quals(quals, dialect):
"""
Handles the unquoting (decoding) of percent-encoded characters.
See notes on encoding/decoding above.
"""
if not constants.ignore_url_escape_characters and dialect['fmt'] == 'gff3':
for key, vals in quals.items():
unquoted = [urllib.parse.unquote(v) for v in vals]
quals[key] = unquoted
return quals
infer_dialect = False
if dialect is None:
# Make a copy of default dialect so it can be modified as needed
dialect = copy.copy(constants.dialect)
infer_dialect = True
from gffutils import feature
quals = feature.dict_class()
if not keyval_str:
return quals, dialect
# If a dialect was provided, then use that directly.
if not infer_dialect:
if dialect['trailing semicolon']:
keyval_str = keyval_str.rstrip(';')
parts = keyval_str.split(dialect['field separator'])
kvsep = dialect['keyval separator']
if dialect['leading semicolon']:
pieces = []
for p in parts:
if p and p[0] == ';':
p = p[1:]
pieces.append(p.strip().split(kvsep))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
if dialect['fmt'] == 'gff3':
key_vals = [p.split(kvsep) for p in parts]
else:
leadingsemicolon = dialect['leading semicolon']
pieces = []
for i, p in enumerate(parts):
if i == 0 and leadingsemicolon:
p = p[1:]
pieces.append(p.strip().split(kvsep))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
quoted = dialect['quoted GFF2 values']
for item in key_vals:
# Easy if it follows spec
if len(item) == 2:
key, val = item
# Only key provided?
elif len(item) == 1:
key = item[0]
val = ''
else:
key = item[0]
val = dialect['keyval separator'].join(item[1:])
try:
quals[key]
except KeyError:
quals[key] = []
if quoted:
if (len(val) > 0 and val[0] == '"' and val[-1] == '"'):
val = val[1:-1]
if val:
# TODO: if there are extra commas for a value, just use empty
# strings
# quals[key].extend([v for v in val.split(',') if v])
vals = val.split(',')
quals[key].extend(vals)
quals = _unquote_quals(quals, dialect)
return quals, dialect
# If we got here, then we need to infer the dialect....
#
# Reset the order to an empty list so that it will only be populated with
# keys that are found in the file.
dialect['order'] = []
# ensembl GTF has trailing semicolon
if keyval_str[-1] == ';':
keyval_str = keyval_str[:-1]
dialect['trailing semicolon'] = True
# GFF2/GTF has a semicolon with at least one space after it.
# Spaces can be on both sides (e.g. wormbase)
# GFF3 works with no spaces.
# So split on the first one we can recognize...
for sep in (' ; ', '; ', ';'):
parts = keyval_str.split(sep)
if len(parts) > 1:
dialect['field separator'] = sep
break
# Is it GFF3? They have key-vals separated by "="
if gff3_kw_pat.match(parts[0]):
key_vals = [p.split('=') for p in parts]
dialect['fmt'] = 'gff3'
dialect['keyval separator'] = '='
# Otherwise, key-vals separated by space. Key is first item.
else:
dialect['keyval separator'] = " "
pieces = []
for p in parts:
# Fix misplaced semicolons in keys in some GFF2 files
if p and p[0] == ';':
p = p[1:]
dialect['leading semicolon'] = True
pieces.append(p.strip().split(' '))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
for item in key_vals:
# Easy if it follows spec
if len(item) == 2:
key, val = item
# Only key provided?
elif len(item) == 1:
key = item[0]
val = ''
# Pathological cases where values of a key have within them the key-val
# separator, e.g.,
# Alias=SGN-M1347;ID=T0028;Note=marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126
# ^ ^
else:
key = item[0]
val = dialect['keyval separator'].join(item[1:])
# Is the key already in there?
if key in quals:
dialect['repeated keys'] = True
else:
quals[key] = []
# Remove quotes in GFF2
if len(val) > 0 and val[0] == '"' and val[-1] == '"':
val = val[1:-1]
dialect['quoted GFF2 values'] = True
if val:
# TODO: if there are extra commas for a value, just use empty
# strings
# quals[key].extend([v for v in val.split(',') if v])
vals = val.split(',')
if (len(vals) > 1) and dialect['repeated keys']:
raise AttributeStringError(
"Internally inconsistent attributes formatting: "
"some have repeated keys, some do not.")
quals[key].extend(vals)
# keep track of the order of keys
dialect['order'].append(key)
if (
(dialect['keyval separator'] == ' ') and
(dialect['quoted GFF2 values'])
):
dialect['fmt'] = 'gtf'
quals = _unquote_quals(quals, dialect)
return quals, dialect
| StarcoderdataPython |
5194210 | from typing import List
import numpy as np
import tensorflow as tf
import xarray as xr
from bioimageio.spec import nodes
from bioimageio.spec.utils import get_nn_instance
from ._model_adapter import ModelAdapter
class TensorflowModelAdapter(ModelAdapter):
def __init__(self, *, bioimageio_model: nodes.Model, devices=List[str]):
spec = bioimageio_model
self.name = spec.name
spec.inputs[0]
_output = spec.outputs[0]
# FIXME: TF probably uses different axis names
self._internal_output_axes = _output.axes
self.model = get_nn_instance(bioimageio_model)
self.devices = []
tf_model = tf.keras.models.load_model(spec.weights["tensorflow_saved_model_bundle"].source)
self.model.set_model(tf_model)
def forward(self, input_tensor: xr.DataArray) -> xr.DataArray:
tf_tensor = tf.convert_to_tensor(input_tensor.data)
res = self.model.forward(tf_tensor)
if not isinstance(res, np.ndarray):
res = tf.make_ndarray(res)
return xr.DataArray(res, dims=tuple(self._internal_output_axes))
| StarcoderdataPython |
9671477 | <filename>QCGym/interpolators/__init__.py
from QCGym.interpolators.identity import IdentityInterpolator
| StarcoderdataPython |
11244896 | <gh_stars>10-100
"""module to test high-level extract function in hvc.extract"""
import os
from glob import glob
import hvc
from hvc.utils import annotation
from config import rewrite_config
class TestExtract:
def test_data_dirs_cbins(self, test_data_dir, tmp_output_dir):
# test that calling extract doesn't fail when we
# pass a data_dirs list that contain cbin audio files
data_dirs = ["cbins/gy6or6/032312", "cbins/gy6or6/032412"]
data_dirs = [
os.path.join(test_data_dir, os.path.normpath(data_dir))
for data_dir in data_dirs
]
file_format = "cbin"
labels_to_use = "iabcdefghjk"
feature_group = "knn"
return_features = True
ftrs = hvc.extract(
data_dirs=data_dirs,
file_format=file_format,
labels_to_use=labels_to_use,
feature_group=feature_group,
output_dir=str(tmp_output_dir),
return_features=return_features,
)
assert type(ftrs) == dict
assert sorted(ftrs.keys()) == ["features", "labels"]
def test_annotation_file_cbins(self, test_data_dir, tmp_output_dir):
# test that calling extract doesn't fail when we
# pass a data_dirs list that contain cbin audio files
cbin_dirs = ["cbins/gy6or6/032312", "cbins/gy6or6/032412"]
cbin_dirs = [
os.path.join(test_data_dir, os.path.normpath(cbin_dir))
for cbin_dir in cbin_dirs
]
notmat_list = []
for cbin_dir in cbin_dirs:
notmat_list.extend(glob(os.path.join(cbin_dir, "*.not.mat")))
# below, sorted() so it's the same order on different platforms
notmat_list = sorted(notmat_list)
csv_filename = os.path.join(str(tmp_output_dir), "test.csv")
annotation.notmat_list_to_csv(notmat_list, csv_filename)
file_format = "cbin"
labels_to_use = "iabcdefghjk"
feature_group = "knn"
return_features = True
ftrs = hvc.extract(
file_format=file_format,
annotation_file=csv_filename,
labels_to_use=labels_to_use,
feature_group=feature_group,
output_dir=str(tmp_output_dir),
return_features=return_features,
)
assert type(ftrs) == dict
assert sorted(ftrs.keys()) == ["features", "labels"]
def _yaml_config_asserts(self, extract_yaml_config_file, tmp_output_dir):
replace_dict = {
"output_dir": ("replace with tmp_output_dir", str(tmp_output_dir))
}
# have to put tmp_output_dir into yaml file
extract_config_rewritten = rewrite_config(
extract_yaml_config_file, tmp_output_dir, replace_dict
)
# helper function that is called by tests below
hvc.extract(extract_config_rewritten)
extract_config = hvc.parse_config(extract_config_rewritten, "extract")
for todo in extract_config["todo_list"]:
os.chdir(todo["output_dir"])
extract_outputs = list(filter(os.path.isdir, glob("*extract_output*")))
extract_outputs.sort(key=os.path.getmtime)
os.chdir(extract_outputs[-1]) # most recent
ftr_files = glob("features_from*")
ftr_dicts = []
for ftr_file in ftr_files:
ftr_dicts.append(joblib.load(ftr_file))
if any(["features" in ftr_dict for ftr_dict in ftr_dicts]):
assert all(["features" in ftr_dict for ftr_dict in ftr_dicts])
for ftr_dict in ftr_dicts:
labels = ftr_dict["labels"]
if "features" in ftr_dict:
features = ftr_dict["features"]
assert features.shape[0] == len(labels)
# make sure number of features i.e. columns is constant across feature matrices
ftr_cols = [ftr_dict["features"].shape[1] for ftr_dict in ftr_dicts]
assert np.unique(ftr_cols).shape[-1] == 1
if any(["neuralnets_input_dict" in ftr_dict for ftr_dict in ftr_dicts]):
assert all(
["neuralnets_input_dict" in ftr_dict for ftr_dict in ftr_dicts]
)
def test_extract_knn_yaml(self, tmp_output_dir, configs_path):
extract_yaml_config_file = os.path.join(
configs_path, "test_extract_knn.config.yml"
)
self._yaml_config_asserts(
extract_yaml_config_file=extract_yaml_config_file,
tmp_output_dir=tmp_output_dir,
)
def test_extract_svm_yaml(self, tmp_output_dir, configs_path):
extract_yaml_config_file = os.path.join(
configs_path, "test_extract_svm.config.yml"
)
self._yaml_config_asserts(
extract_yaml_config_file=extract_yaml_config_file,
tmp_output_dir=tmp_output_dir,
)
def test_extract_flatwindow_yaml(self, tmp_output_dir, configs_path):
extract_yaml_config_file = os.path.join(
configs_path, "test_extract_flatwindow.config.yml"
)
self._yaml_config_asserts(
extract_yaml_config_file=extract_yaml_config_file,
tmp_output_dir=tmp_output_dir,
)
def test_extract_multiple_feature_groups_yaml(self, tmp_output_dir, configs_path):
extract_yaml_config_file = os.path.join(
configs_path, "test_extract_multiple_feature_groups.config.yml"
)
self._yaml_config_asserts(
extract_yaml_config_file=extract_yaml_config_file,
tmp_output_dir=tmp_output_dir,
)
| StarcoderdataPython |
13451 | import os
os.system("pip install pytorch_transformers")
import nsml
print(nsml.DATASET_PATH)
os.system('python ./code/train.py --n-labeled 10 --data-path '+ nsml.DATASET_PATH + '/train/ --batch-size 4 --batch-size-u 8 --epochs 20 --val-iteration 1000 --lambda-u 1 --T 0.5 --alpha 16 --mix-layers-set 7 9 12 --lrmain 0.000005 --lrlast 0.00005'
)
| StarcoderdataPython |
3242698 | #!/usr/bin/env python3
try:
from cx_Freeze import setup,Executable
has_cx_freeze = True
except ImportError:
from distutils.core import setup
has_cx_freeze = False
print('Could not import cx_Freeze. Building executable not possible.')
import platform
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
arguments = dict(
name='J3D View',
version='0.5',
description='Nintendo GameCube/Wii BMD/BDL file viewer',
scripts = ['j3dview.py'],
py_modules=['gl','viewer_widget','explorer_widget','forms'],
packages=['btypes','gx','j3d'])
#arguments['ext_modules'] = cythonize(Extension(
# 'gx.texture',
# ['gx/texture.pyx'],
# include_dirs=[numpy.get_include()]))
if has_cx_freeze:
base = 'Win32GUI' if platform.system() == 'Windows' else None
build_exe = dict(
includes=['viewer_widget','explorer_widget','forms', 'numpy.core._methods', 'numpy.lib.format'],
packages=['OpenGL.platform','OpenGL.arrays'],
include_files=[('ui/Editor.ui','ui/Editor.ui'),('ui/ViewSettingsForm.ui','ui/ViewSettingsForm.ui'),
('ui/TextureForm.ui','ui/TextureForm.ui'),("ui/icon.ico", "ui/icon.ico")],
excludes=["numpy.multiarray"]
)
arguments['executables'] = [Executable('j3dview.py',base=base, icon="ui/icon.ico")]
arguments['options'] = dict(build_exe=build_exe)
setup(**arguments)
| StarcoderdataPython |
11262483 | <filename>src/train_v2.py
from argparse import ArgumentParser, Namespace
from engine.main_engine_v2 import MainEngineV2
import importlib
import torch
import ignite.distributed as idist
def run(local_rank, config):
pe = MainEngineV2(local_rank, config)
pe.train(config.run_params)
def main(hyperparams):
with idist.Parallel(**hyperparams.dist_params) as parallel:
parallel.run(run, hyperparams)
if __name__ == '__main__':
parser = ArgumentParser(parents=[])
parser.add_argument('--config', type=str)
params = parser.parse_args()
module = importlib.import_module(params.config, package=None)
hyperparams = module.Parameters()
main(hyperparams) | StarcoderdataPython |
364955 | <reponame>wunderio/WunderMachina
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: upcloud
short_description: Create/delete a server in UpCloud
description:
- Create/delete a server in UpCloud or ensure that an existing server is started
author: "<NAME> (@elnygren)"
options:
state:
description:
- Desired state of the target
default: present
choices: ['present', 'absent']
api_user:
description:
- UpCloud API username. Can be set as environment variable.
api_passwd:
description:
- UpCloud API password. Can be set as environment variable.
title:
description:
- String. Server's title in UpCloud. Optional if state is absent.
hostname:
description:
- String. Server's hostname in UpCloud. Hostname or UUID needed for targeting an existing server. Optional if state is absent.
zone:
description:
- String. Server's zone in UpCloud. Optional if state is absent.
storage_devices:
description:
- Array of storage dicts. Server's storages in UpCloud. Optional if state is absent.
uuid:
description:
- Optional string. Server's UUID. UUID or hostname needed for targeting an existing server.
plan:
description:
- Optional string. Server's plan if using UpCloud preconfigured instances.
core_number:
description:
- Optional integer. Server's CPU cores if using UpCloud freely scalable instances.
memory_amount:
description:
- Optional integer. Server's RAM if using UpCloud freely scalable instances.
ip_addresses:
description:
- Optional array of IP-address dicts. Server's IP-addresses in UpCloud. UpCloud assigns 1 public and 1 private IPv4 by default.
firewall:
description:
- Bool. Firewall on/off in UpCloud.
default: no
choices: [ "yes", "no" ]
vnc:
description:
- Bool. VNC on/off in UpCloud.
default: no
choices: [ "yes", "no" ]
vnc_password:
description:
- Optional string. VNC password in UpCloud.
video_model:
description:
- Optional string. Video adapter in UpCloud.
timezone:
description:
- Optional string. Timezone in UpCloud.
password_delivery:
description:
- Optional string. Password delivery method. UpCloud Ansible module grabs SSH credentials from API response.
- UpCloud's API client defaults to 'none' (as opposed to 'email' or 'sms')
nic_model:
description:
- Optional string. Network adapter in UpCloud.
boot_order:
description:
- Optional string. Boot order in UpCloud.
avoid_host:
description:
- Optional string or integer. Host ID in UpCloud.
user:
description:
- Optional string. Linux user that should be created with given ssh_keys.
- When user and ssh_keys are being used, no password is delivered in API response.
- UpCloud's API defaults to 'root' user
ssh_keys:
description:
- Optional list of strings. SSH keys that should be added to the given user.
- When user and ssh_keys are being used, no password is delivered in API response.
notes:
- UPCLOUD_API_USER and UPCLOUD_API_PASSWD environment variables may be used instead of api_user and api_passwd
- Better description of UpCloud's API available at U(www.upcloud.com/api/)
requirements:
- "python >= 2.6"
- "upcloud-api >= 0.3.4"
'''
EXAMPLES = '''
# Create and destroy a server.
# Step 1: If www1.example.com exists, ensure it is started. If it doesn't exist, create it.
# Step 2: Stop and destroy the server created in step 1.
- name: Create upcloud server
upcloud:
state: present
hostname: www1.example.com
title: www1.example.com
zone: uk-lon1
plan: 1xCPU-1GB
storage_devices:
- { size: 30, os: Ubuntu 14.04 }
- { size: 100 }
user: upclouduser
ssh_keys:
- ssh-rsa AAAAB3NzaC1yc2EAA[...]ptshi44x user@some.host
- ssh-dss AAAAB3NzaC1kc3MAA[...]VHRzAA== someuser@some.other.host
register: upcloud_server
- debug: msg="upcloud_server => {{ upcloud_server }}"
- name: Wait for SSH to come up
wait_for: host={{ upcloud_server.public_ip }} port=22 delay=5 timeout=320 state=started
# tip: hostname can also be used to destroy a server
- name: Destroy upcloud server
upcloud:
state: absent
uuid: "{{ upcloud_server.server.uuid }}"
'''
from distutils.version import LooseVersion
import os
# make sure that upcloud-api is installed
HAS_UPCLOUD = True
try:
import upcloud_api
from upcloud_api import CloudManager
if LooseVersion(upcloud_api.__version__) < LooseVersion('0.3.4'):
HAS_UPCLOUD = False
except ImportError, e:
HAS_UPCLOUD = False
class ServerManager():
"""Helpers for managing upcloud.Server instance"""
def __init__(self, api_user, api_passwd):
self.manager = CloudManager(api_user, api_passwd)
def find_server(self, uuid, hostname):
"""
Finds a server first by uuid (if given) and then by hostname.
Exits if the given hostname has duplicates as this could potentially
lead to destroying the wrong server.
"""
# try with uuid first, if given
if uuid:
try:
server = self.manager.get_server(uuid)
return server
except Exception, e:
pass # no server found
# try with hostname, if given and nothing was found with uuid
if hostname:
servers = self.manager.get_servers()
found_servers = []
for server in servers:
if server.hostname == hostname:
found_servers.append(server)
if len(found_servers) > 1:
module.fail_json(msg='More than one server matched the given hostname. Please use unique hostnames.')
if len(found_servers) == 1:
return found_servers[0]
return None
def create_server(self, module):
"""Create a server from module.params. Filters out unwanted attributes."""
# filter out 'filter_keys' and those who equal None from items to get server's attributes for POST request
items = module.params.items()
filter_keys = set(['state', 'api_user', 'api_passwd', 'user', 'ssh_keys'])
server_dict = dict((key,value) for key, value in items if key not in filter_keys and value is not None)
if module.params.get('ssh_keys'):
login_user = upcloud_api.login_user_block(
username=module.params.get('user'),
ssh_keys=module.params['ssh_keys'],
create_password=False
)
server_dict['login_user'] = login_user
return self.manager.create_server(server_dict)
def run(module, server_manager):
"""create/destroy/start server based on its current state and desired state"""
state = module.params['state']
uuid = module.params.get('uuid')
hostname = module.params.get('hostname')
changed = True
if state == 'present':
server = server_manager.find_server(uuid, hostname)
if not server:
# create server, if one was not found
server = server_manager.create_server(module)
else:
if server.state=='started':
changed = False
server.ensure_started()
module.exit_json(changed=changed, server=server.to_dict(), public_ip=server.get_public_ip())
elif state == 'absent':
server = server_manager.find_server(uuid, hostname)
if server:
server.stop_and_destroy()
module.exit_json(changed=True, msg="destroyed" + server.hostname)
module.exit_json(changed=False, msg="server absent (didn't exist in the first place)")
def main():
"""main execution path"""
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
api_user = dict(aliases=['CLIENT_ID'], no_log=True),
api_passwd = dict(aliases=['API_KEY'], no_log=True),
# required for creation
title = dict(type='str'),
hostname = dict(type='str'),
zone = dict(type='str'),
storage_devices = dict(type='list'),
# required for destroying
uuid = dict(aliases=['id'], type='str', default=None),
# optional, but useful
plan = dict(type='str'),
core_number = dict(type='int'),
memory_amount = dict(type='int'),
ip_addresses = dict(type='list'),
firewall = dict(type='bool'),
ssh_keys = dict(type='list'),
user = dict(type='str'),
# optional, nice-to-have
vnc = dict(type='bool'),
vnc_password = dict(type='str'),
video_model = dict(type='str'),
timezone = dict(type='str'),
password_delivery = dict(type='str'),
nic_model = dict(type='str'),
boot_order = dict(type='str'),
avoid_host = dict(type='str')
),
required_together = (
['core_number', 'memory_amount'],
['api_user', 'api_passwd']
),
mutually_exclusive = (
['plan', 'core_number'],
['plan', 'memory_amount']
),
required_one_of = (
['uuid', 'hostname'],
),
)
# ensure dependencies and API credentials are in place
#
if not HAS_UPCLOUD:
module.fail_json(msg='upcloud-api required for this module (`pip install upcloud-api`)')
api_user = module.params.get('api_user') or os.getenv('UPCLOUD_API_USER')
api_passwd = module.params.get('api_passwd') or os.getenv('UPCLOUD_API_PASSWD')
if not api_user or not api_passwd:
module.fail_json(msg='''Please set UPCLOUD_API_USER and UPCLOUD_API_PASSWD environment variables or provide api_user and api_passwd arguments.''')
# begin execution. Catch all unhandled exceptions.
# Note: UpCloud's API has good error messages that the api client passes on.
#
server_manager = ServerManager(api_user, api_passwd)
try:
run(module, server_manager)
except Exception as e:
import traceback
module.fail_json(msg=str(e) + str(traceback.format_exc()))
# the required module boilerplate
#
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| StarcoderdataPython |
11247689 | <reponame>syqu22/django-react-blog
from django.test import TestCase
from posts.models import Post, Tag
from users.models import User
class TestModels(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username='test', email='<EMAIL>', password='<PASSWORD>')
self.tag1 = Tag.objects.create(name='Tag 1')
self.tag2 = Tag.objects.create(name='Tag 2')
def test_tag_model(self):
"""
New Tag does not change instance and is saved to Database
"""
tag = Tag.objects.create(name='Test tag')
self.assertIsInstance(tag, Tag)
self.assertEqual(Tag.objects.get(name='Test tag'), tag)
def test_post_model(self):
"""
New Post does not change instance and is saved to Database
"""
post = Post.objects.create(title='Test Post', slug='test-post', thumbnail='https://www.test.example.com', author=self.user,
body='Test content of the post', read_time=5, is_public=True)
post.tags.add(self.tag1)
post.tags.add(self.tag2)
self.assertIsInstance(post, Post)
self.assertEqual(Post.objects.get(
slug='test-post').tags.get(name='Tag 1'), self.tag1)
self.assertEqual(Post.objects.get(slug='test-post'), post)
| StarcoderdataPython |
96264 | from __future__ import absolute_import
from textwrap import dedent
class UserError(Exception):
def __init__(self, msg):
self.msg = dedent(msg).strip()
def __unicode__(self):
return self.msg
| StarcoderdataPython |
286712 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def weights_init(m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
class Net(nn.Module):
def __init__(self, seed, parameter_momentum):
super(Net, self).__init__()
L0 = 784
L1 = 300
L2 = 301
L3 = 302
L4 = 303
L5 = 304
L6 = 305
#torch.manual_seed(seed)
self.L1 = nn.Linear(L0, L1, bias=False)
torch.nn.init.xavier_uniform_(self.L1.weight)
self.bn1 = nn.BatchNorm1d(L1, momentum=parameter_momentum)
torch.nn.init.ones_(self.bn1.weight)
self.L2 = nn.Linear(L1, L2, bias=False)
torch.nn.init.xavier_uniform_(self.L2.weight)
self.bn2 = nn.BatchNorm1d(L2, momentum=parameter_momentum)
torch.nn.init.ones_(self.bn2.weight)
self.L3 = nn.Linear(L2, L3, bias=False)
torch.nn.init.xavier_uniform_(self.L3.weight)
self.bn3 = nn.BatchNorm1d(L3, momentum=parameter_momentum)
torch.nn.init.ones_(self.bn3.weight)
self.L4 = nn.Linear(L3, L4, bias=False)
torch.nn.init.xavier_uniform_(self.L4.weight)
self.bn4 = nn.BatchNorm1d(L4, momentum=parameter_momentum)
torch.nn.init.ones_(self.bn4.weight)
# self.L5 = nn.Linear(L4, L5, bias=False)
# torch.nn.init.xavier_uniform_(self.L4.weight)
# self.bn5 = nn.BatchNorm1d(L5, momentum=parameter_momentum)
# torch.nn.init.ones_(self.bn5.weight)
#
# self.L6 = nn.Linear(L5, L6, bias=False)
# torch.nn.init.xavier_uniform_(self.L4.weight)
# self.bn6 = nn.BatchNorm1d(L6, momentum=parameter_momentum)
# torch.nn.init.ones_(self.bn6.weight)
self.L5 = nn.Linear(L4, 1, bias=True)
torch.nn.init.xavier_uniform_(self.L5.weight)
torch.nn.init.zeros_(self.L5.bias)
def forward(self, x):
x = self.L1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.L2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.L3(x)
x = self.bn3(x)
x = F.relu(x)
x = self.L4(x)
x = self.bn4(x)
x = F.relu(x)
# x = self.L5(x)
# x = self.bn5(x)
# x = F.relu(x)
#
# x = self.L6(x)
# x = self.bn6(x)
# x = F.relu(x)
x = self.L5(x)
return x
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = conv3x3(3,64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, 1)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(num_classes=100):
return ResNet(BasicBlock, [2,2,2,2], num_classes)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(3, 96, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(96)
self.conv2 = nn.Conv2d(96, 96, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(96)
self.conv3 = nn.Conv2d(96, 96, kernel_size=3, stride=2, padding=1)
self.bn3 = nn.BatchNorm2d(96)
self.conv4 = nn.Conv2d(96, 192, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm2d(192)
self.conv5 = nn.Conv2d(192, 192, kernel_size=3, padding=1)
self.bn5 = nn.BatchNorm2d(192)
self.conv6 = nn.Conv2d(192, 192, kernel_size=3, stride=2, padding=1)
self.bn6 = nn.BatchNorm2d(192)
self.conv7 = nn.Conv2d(192, 192, kernel_size=3, padding=1)
self.bn7 = nn.BatchNorm2d(192)
self.conv8 = nn.Conv2d(192, 192, kernel_size=1)
self.bn8 = nn.BatchNorm2d(192)
self.conv9 = nn.Conv2d(192, 10, kernel_size=1)
self.bn9 = nn.BatchNorm2d(10)
self.l1 = nn.Linear(640, 1000)
self.l2 = nn.Linear(1000, 1000)
self.l3 = nn.Linear(1000, 1)
self.apply(weights_init)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = F.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = F.relu(x)
x = self.conv6(x)
x = self.bn6(x)
x = F.relu(x)
x = self.conv7(x)
x = self.bn7(x)
x = F.relu(x)
x = self.conv8(x)
x = self.bn8(x)
x = F.relu(x)
x = self.conv9(x)
x = self.bn9(x)
x = F.relu(x)
x = x.view(-1, 640)
x = self.l1(x)
x = F.relu(x)
x = self.l2(x)
x = F.relu(x)
x = self.l3(x)
return x
if __name__ == '__main__':
x = torch.zeros((1, 3, 32, 32))
model = CNN()
print(model(x))
| StarcoderdataPython |
128259 | <gh_stars>0
class Solution:
def XXX(self, nums: List[int]) -> bool:
n=len(nums)
vis=[0 for _ in range(n)]
for i in range(min(nums[0]+1,n)):
vis[i]=1
for i in range(1,n-1):
if vis[i]: #ๆญคไฝ็ฝฎๅฏไปฅ่พพๅฐ
if i+nums[i]>=n-1:
return True
for j in range(nums[i-1],nums[i]+1):
if vis[i+j]==0:
vis[i+j]=1
if vis[-1]==1:
return True
else:
return False
| StarcoderdataPython |
8193047 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import tmpvar
expected_verilog = """
module blinkled #
(
parameter WIDTH = 8
)
(
input CLK,
input RST,
output reg [WIDTH-1:0] LED
);
reg [32-1:0] _tmp_0;
reg [32-1:0] _tmp_1;
wire [32-1:0] _tmp_2;
reg [32-1:0] _tmp_3;
wire [32-1:0] _tmp_4;
reg [32-1:0] _tmp_5;
wire [32-1:0] _tmp_6;
always @(posedge CLK) begin
if(RST) begin
_tmp_0 <= 0;
end else begin
if(_tmp_0 == 1023) begin
_tmp_0 <= 0;
end else begin
_tmp_0 <= _tmp_0 + 1;
end
end
end
always @(posedge CLK) begin
if(RST) begin
LED <= 0;
end else begin
if(_tmp_0 == 1023) begin
LED <= LED + 1;
end
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = tmpvar.mkLed()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
| StarcoderdataPython |
9660018 | <gh_stars>1000+
from collections import deque
from difflib import get_close_matches
from itertools import chain
from warnings import warn
import theano
import numpy as np
from .. import utils
__all__ = [
"get_all_layers",
"get_output",
"get_output_shape",
"get_all_params",
"count_params",
"get_all_param_values",
"set_all_param_values",
]
def get_all_layers(layer, treat_as_input=None):
"""
This function gathers all layers below one or more given :class:`Layer`
instances, including the given layer(s). Its main use is to collect all
layers of a network just given the output layer(s). The layers are
guaranteed to be returned in a topological order: a layer in the result
list is always preceded by all layers its input depends on.
Parameters
----------
layer : Layer or list
the :class:`Layer` instance for which to gather all layers feeding
into it, or a list of :class:`Layer` instances.
treat_as_input : None or iterable
an iterable of :class:`Layer` instances to treat as input layers
with no layers feeding into them. They will show up in the result
list, but their incoming layers will not be collected (unless they
are required for other layers as well).
Returns
-------
list
a list of :class:`Layer` instances feeding into the given
instance(s) either directly or indirectly, and the given
instance(s) themselves, in topological order.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = DenseLayer(l_in, num_units=50)
>>> get_all_layers(l1) == [l_in, l1]
True
>>> l2 = DenseLayer(l_in, num_units=10)
>>> get_all_layers([l2, l1]) == [l_in, l2, l1]
True
>>> get_all_layers([l1, l2]) == [l_in, l1, l2]
True
>>> l3 = DenseLayer(l2, num_units=20)
>>> get_all_layers(l3) == [l_in, l2, l3]
True
>>> get_all_layers(l3, treat_as_input=[l2]) == [l2, l3]
True
"""
# We perform a depth-first search. We add a layer to the result list only
# after adding all its incoming layers (if any) or when detecting a cycle.
# We use a LIFO stack to avoid ever running into recursion depth limits.
try:
queue = deque(layer)
except TypeError:
queue = deque([layer])
seen = set()
done = set()
result = []
# If treat_as_input is given, we pretend we've already collected all their
# incoming layers.
if treat_as_input is not None:
seen.update(treat_as_input)
while queue:
# Peek at the leftmost node in the queue.
layer = queue[0]
if layer is None:
# Some node had an input_layer set to `None`. Just ignore it.
queue.popleft()
elif layer not in seen:
# We haven't seen this node yet: Mark it and queue all incomings
# to be processed first. If there are no incomings, the node will
# be appended to the result list in the next iteration.
seen.add(layer)
if hasattr(layer, 'input_layers'):
queue.extendleft(reversed(layer.input_layers))
elif hasattr(layer, 'input_layer'):
queue.appendleft(layer.input_layer)
else:
# We've been here before: Either we've finished all its incomings,
# or we've detected a cycle. In both cases, we remove the layer
# from the queue and append it to the result list.
queue.popleft()
if layer not in done:
result.append(layer)
done.add(layer)
return result
def get_output(layer_or_layers, inputs=None, **kwargs):
"""
Computes the output of the network at one or more given layers.
Optionally, you can define the input(s) to propagate through the network
instead of using the input variable(s) associated with the network's
input layer(s).
Parameters
----------
layer_or_layers : Layer or list
the :class:`Layer` instance for which to compute the output
expressions, or a list of :class:`Layer` instances.
inputs : None, Theano expression, numpy array, or dict
If None, uses the input variables associated with the
:class:`InputLayer` instances.
If a Theano expression, this defines the input for a single
:class:`InputLayer` instance. Will throw a ValueError if there
are multiple :class:`InputLayer` instances.
If a numpy array, this will be wrapped as a Theano constant
and used just like a Theano expression.
If a dictionary, any :class:`Layer` instance (including the
input layers) can be mapped to a Theano expression or numpy
array to use instead of its regular output.
Returns
-------
output : Theano expression or list
the output of the given layer(s) for the given network input
Notes
-----
Depending on your network architecture, `get_output([l1, l2])` may
be crucially different from `[get_output(l1), get_output(l2)]`. Only
the former ensures that the output expressions depend on the same
intermediate expressions. For example, when `l1` and `l2` depend on
a common dropout layer, the former will use the same dropout mask for
both, while the latter will use two different dropout masks.
"""
from .input import InputLayer
from .base import MergeLayer, Layer
# check if the keys of the dictionary are valid
if isinstance(inputs, dict):
for input_key in inputs.keys():
if (input_key is not None) and (not isinstance(input_key, Layer)):
raise TypeError("The inputs dictionary keys must be"
" lasagne layers not %s." %
type(input_key))
# track accepted kwargs used by get_output_for
accepted_kwargs = {'deterministic'}
# obtain topological ordering of all layers the output layer(s) depend on
treat_as_input = inputs.keys() if isinstance(inputs, dict) else []
all_layers = get_all_layers(layer_or_layers, treat_as_input)
# initialize layer-to-expression mapping from all input layers
all_outputs = dict((layer, layer.input_var)
for layer in all_layers
if isinstance(layer, InputLayer) and
layer not in treat_as_input)
# update layer-to-expression mapping from given input(s), if any
if isinstance(inputs, dict):
all_outputs.update((layer, utils.as_theano_expression(expr))
for layer, expr in inputs.items())
elif inputs is not None:
if len(all_outputs) > 1:
raise ValueError("get_output() was called with a single input "
"expression on a network with multiple input "
"layers. Please call it with a dictionary of "
"input expressions instead.")
for input_layer in all_outputs:
all_outputs[input_layer] = utils.as_theano_expression(inputs)
# update layer-to-expression mapping by propagating the inputs
for layer in all_layers:
if layer not in all_outputs:
try:
if isinstance(layer, MergeLayer):
layer_inputs = [all_outputs[input_layer]
for input_layer in layer.input_layers]
else:
layer_inputs = all_outputs[layer.input_layer]
except KeyError:
# one of the input_layer attributes must have been `None`
raise ValueError("get_output() was called without giving an "
"input expression for the free-floating "
"layer %r. Please call it with a dictionary "
"mapping this layer to an input expression."
% layer)
all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs)
try:
accepted_kwargs |= set(utils.inspect_kwargs(
layer.get_output_for))
except TypeError:
# If introspection is not possible, skip it
pass
accepted_kwargs |= set(layer.get_output_kwargs)
unused_kwargs = set(kwargs.keys()) - accepted_kwargs
if unused_kwargs:
suggestions = []
for kwarg in unused_kwargs:
suggestion = get_close_matches(kwarg, accepted_kwargs)
if suggestion:
suggestions.append('%s (perhaps you meant %s)'
% (kwarg, suggestion[0]))
else:
suggestions.append(kwarg)
warn("get_output() was called with unused kwargs:\n\t%s"
% "\n\t".join(suggestions))
# return the output(s) of the requested layer(s) only
try:
return [all_outputs[layer] for layer in layer_or_layers]
except TypeError:
return all_outputs[layer_or_layers]
def get_output_shape(layer_or_layers, input_shapes=None):
"""
Computes the output shape of the network at one or more given layers.
Parameters
----------
layer_or_layers : Layer or list
the :class:`Layer` instance for which to compute the output
shapes, or a list of :class:`Layer` instances.
input_shapes : None, tuple, or dict
If None, uses the input shapes associated with the
:class:`InputLayer` instances.
If a tuple, this defines the input shape for a single
:class:`InputLayer` instance. Will throw a ValueError if there
are multiple :class:`InputLayer` instances.
If a dictionary, any :class:`Layer` instance (including the
input layers) can be mapped to a shape tuple to use instead of
its regular output shape.
Returns
-------
tuple or list
the output shape of the given layer(s) for the given network input
"""
# shortcut: return precomputed shapes if we do not need to propagate any
if input_shapes is None or input_shapes == {}:
try:
return [layer.output_shape for layer in layer_or_layers]
except TypeError:
return layer_or_layers.output_shape
from .input import InputLayer
from .base import MergeLayer
# obtain topological ordering of all layers the output layer(s) depend on
if isinstance(input_shapes, dict):
treat_as_input = input_shapes.keys()
else:
treat_as_input = []
all_layers = get_all_layers(layer_or_layers, treat_as_input)
# initialize layer-to-shape mapping from all input layers
all_shapes = dict((layer, layer.shape)
for layer in all_layers
if isinstance(layer, InputLayer) and
layer not in treat_as_input)
# update layer-to-shape mapping from given input(s), if any
if isinstance(input_shapes, dict):
all_shapes.update(input_shapes)
elif input_shapes is not None:
if len(all_shapes) > 1:
raise ValueError("get_output_shape() was called with a single "
"input shape on a network with multiple input "
"layers. Please call it with a dictionary of "
"input shapes instead.")
for input_layer in all_shapes:
all_shapes[input_layer] = input_shapes
# update layer-to-shape mapping by propagating the input shapes
for layer in all_layers:
if layer not in all_shapes:
if isinstance(layer, MergeLayer):
input_shapes = [all_shapes[input_layer]
for input_layer in layer.input_layers]
else:
input_shapes = all_shapes[layer.input_layer]
all_shapes[layer] = layer.get_output_shape_for(input_shapes)
# return the output shape(s) of the requested layer(s) only
try:
return [all_shapes[layer] for layer in layer_or_layers]
except TypeError:
return all_shapes[layer_or_layers]
def get_all_params(layer, unwrap_shared=True, **tags):
"""
Returns a list of Theano shared variables or expressions that
parameterize the layer.
This function gathers all parameters of all layers below one or more given
:class:`Layer` instances, including the layer(s) itself. Its main use is to
collect all parameters of a network just given the output layer(s).
By default, all shared variables that participate in the forward pass will
be returned. The list can optionally be filtered by specifying tags as
keyword arguments. For example, ``trainable=True`` will only return
trainable parameters, and ``regularizable=True`` will only return
parameters that can be regularized (e.g., by L2 decay).
Parameters
----------
layer : Layer or list
The :class:`Layer` instance for which to gather all parameters, or a
list of :class:`Layer` instances.
unwrap_shared : bool (default: True)
Affects only parameters that were set to a Theano expression. If
``True`` the function returns the shared variables contained in
the expression, otherwise the Theano expression itself.
**tags (optional)
tags can be specified to filter the list. Specifying ``tag1=True``
will limit the list to parameters that are tagged with ``tag1``.
Specifying ``tag1=False`` will limit the list to parameters that
are not tagged with ``tag1``. Commonly used tags are
``regularizable`` and ``trainable``.
Returns
-------
params : list
A list of Theano shared variables or expressions representing
the parameters.
Notes
-----
If any of the layers' parameters was set to a Theano expression instead
of a shared variable, `unwrap_shared` controls whether to return the
shared variables involved in that expression (``unwrap_shared=True``,
the default), or the expression itself (``unwrap_shared=False``). In
either case, tag filtering applies to the expressions, considering all
variables within an expression to be tagged the same.
Examples
--------
Collecting all parameters from a two-layer network:
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = DenseLayer(l_in, num_units=50)
>>> l2 = DenseLayer(l1, num_units=30)
>>> all_params = get_all_params(l2)
>>> all_params == [l1.W, l1.b, l2.W, l2.b]
True
Parameters can be filtered by tags, and parameter expressions are
unwrapped to return involved shared variables by default:
>>> from lasagne.utils import floatX
>>> w1 = theano.shared(floatX(.01 * np.random.randn(50, 30)))
>>> w2 = theano.shared(floatX(1))
>>> l2 = DenseLayer(l1, num_units=30, W=theano.tensor.exp(w1) - w2, b=None)
>>> all_params = get_all_params(l2, regularizable=True)
>>> all_params == [l1.W, w1, w2]
True
When disabling unwrapping, the expression for ``l2.W`` is returned instead:
>>> all_params = get_all_params(l2, regularizable=True,
... unwrap_shared=False)
>>> all_params == [l1.W, l2.W]
True
"""
layers = get_all_layers(layer)
params = chain.from_iterable(l.get_params(
unwrap_shared=unwrap_shared, **tags) for l in layers)
return utils.unique(params)
def count_params(layer, **tags):
"""
This function counts all parameters (i.e., the number of scalar
values) of all layers below one or more given :class:`Layer` instances,
including the layer(s) itself.
This is useful to compare the capacity of various network architectures.
All parameters returned by the :class:`Layer`s' `get_params` methods are
counted.
Parameters
----------
layer : Layer or list
The :class:`Layer` instance for which to count the parameters, or a
list of :class:`Layer` instances.
**tags (optional)
tags can be specified to filter the list of parameter variables that
will be included in the count. Specifying ``tag1=True``
will limit the list to parameters that are tagged with ``tag1``.
Specifying ``tag1=False`` will limit the list to parameters that
are not tagged with ``tag1``. Commonly used tags are
``regularizable`` and ``trainable``.
Returns
-------
int
The total number of learnable parameters.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = DenseLayer(l_in, num_units=50)
>>> param_count = count_params(l1)
>>> param_count
1050
>>> param_count == 20 * 50 + 50 # 20 input * 50 units + 50 biases
True
"""
params = get_all_params(layer, **tags)
shapes = [p.get_value().shape for p in params]
counts = [np.prod(shape) for shape in shapes]
return sum(counts)
def get_all_param_values(layer, **tags):
"""
This function returns the values of the parameters of all layers below one
or more given :class:`Layer` instances, including the layer(s) itself.
This function can be used in conjunction with set_all_param_values to save
and restore model parameters.
Parameters
----------
layer : Layer or list
The :class:`Layer` instance for which to gather all parameter values,
or a list of :class:`Layer` instances.
**tags (optional)
tags can be specified to filter the list. Specifying ``tag1=True``
will limit the list to parameters that are tagged with ``tag1``.
Specifying ``tag1=False`` will limit the list to parameters that
are not tagged with ``tag1``. Commonly used tags are
``regularizable`` and ``trainable``.
Returns
-------
list of numpy.array
A list of numpy arrays representing the parameter values.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = DenseLayer(l_in, num_units=50)
>>> all_param_values = get_all_param_values(l1)
>>> (all_param_values[0] == l1.W.get_value()).all()
True
>>> (all_param_values[1] == l1.b.get_value()).all()
True
"""
params = get_all_params(layer, **tags)
return [p.get_value() for p in params]
def set_all_param_values(layer, values, **tags):
"""
Given a list of numpy arrays, this function sets the parameters of all
layers below one or more given :class:`Layer` instances (including the
layer(s) itself) to the given values.
This function can be used in conjunction with get_all_param_values to save
and restore model parameters.
Parameters
----------
layer : Layer or list
The :class:`Layer` instance for which to set all parameter values, or a
list of :class:`Layer` instances.
values : list of numpy.array
A list of numpy arrays representing the parameter values, must match
the number of parameters.
Every parameter's shape must match the shape of its new value.
**tags (optional)
tags can be specified to filter the list of parameters to be set.
Specifying ``tag1=True`` will limit the list to parameters that are
tagged with ``tag1``.
Specifying ``tag1=False`` will limit the list to parameters that
are not tagged with ``tag1``. Commonly used tags are
``regularizable`` and ``trainable``.
Raises
------
ValueError
If the number of values is not equal to the number of params, or
if a parameter's shape does not match the shape of its new value.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = DenseLayer(l_in, num_units=50)
>>> all_param_values = get_all_param_values(l1)
>>> # all_param_values is now [l1.W.get_value(), l1.b.get_value()]
>>> # ...
>>> set_all_param_values(l1, all_param_values)
>>> # the parameter values are restored.
"""
params = get_all_params(layer, **tags)
if len(params) != len(values):
raise ValueError("mismatch: got %d values to set %d parameters" %
(len(values), len(params)))
for p, v in zip(params, values):
if p.get_value().shape != v.shape:
raise ValueError("mismatch: parameter has shape %r but value to "
"set has shape %r" %
(p.get_value().shape, v.shape))
else:
p.set_value(v)
| StarcoderdataPython |
266114 | import os
import time
import json
import logging
from six.moves import cPickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlibex.mlplot as plx
import ml.gptheano.vecgpdm.enums as enm
from ml.gptheano.vecgpdm.equations import list_of_nones
from ml.gptheano.vecgpdm.equations import matrix_of_nones
import validation.common as vc
pl = logging.getLogger(__name__)
titlesize = 20
def insert_nans(x=None, y=None, indexes=None):
if x is None:
x = np.arange(y.shape[0])
xx = np.hstack([np.hstack([x[zi], [x[zi][-1]]]) for zi in indexes])
yy = np.vstack([np.vstack([y[zi], [np.nan * y[zi][-1]]]) for zi in indexes])
return xx, yy
def plot_latent_space(model):
pl.info("Plotting the latent space. Coupling matrix Alpha: ")
pl.info(model.ns.get_value(model.alpha))
W = model.param.nparts # number of parts
plt.figure()
for i in range(W):
for j in range(W):
plt.subplot(W, W, 1+i+j*W)
plt.title("Latent space " + str(i), fontsize=titlesize)
x_means = model.ns.get_value(model.x_means[j])
plt.plot(x_means[:, 0], x_means[:, 1], '-', alpha=0.2)
dyn_aug_z = model.ns.get_value(model.dyn_aug_z[j][i])
plt.plot(dyn_aug_z[:, 0], dyn_aug_z[:, 1], 'o', markersize=15, markeredgewidth=2, fillstyle="none")
lvm_aug_z = model.ns.get_value(model.lvm_aug_z[j])
plt.plot(lvm_aug_z[:, 0], lvm_aug_z[:, 1], '+', markersize=15, markeredgewidth=2, fillstyle="none")
plt.show()
def save_plot_latent_space(model, directory=None, prefix=None,
plot_inducing_outputs=True,
plot_sampled_trajectory=True):
if prefix is None:
prefix = "model"
pl.info("Saving the latent space plots.")
stats = {"alpha": model.ns.get_value(model.alpha).tolist()}
if model.param.estimation_mode == enm.EstimationMode.ELBO:
stats.update({"ELBO": model.get_elbo_value().tolist(),
"dyn_ELBO": model.ns.evaluate(model.dyn_elbo).tolist(),
"lvm_ELBO": model.ns.evaluate(model.lvm_elbo).tolist(),
})
else:
stats.update({"Loglikelihood": model.get_loglikelihood_value().tolist(),})
if directory is None:
pl.info(json.dumps(stats))
else:
with open("{}/{}-stats.txt".format(directory, prefix), "w") as outfile:
json.dump(stats, outfile)
W = model.param.nparts # number of parts
# Sample the vector field
xins = list_of_nones(W)
step = 5
for j in range(W):
dyn_xtminus_means = model.ns.evaluate(model.dyn_xtminus_means[j])
xins[j] = dyn_xtminus_means[0:len(dyn_xtminus_means):step]
xins[j] = np.reshape(xins[j], [xins[j].shape[0], 2, -1])
xouts = model.run_generative_dynamics(nsteps=2+1, startpoint=xins)
xinout = [xouts[j][:, :, 0:2] for j in range(W)]
# Run latent dynamics
if plot_sampled_trajectory:
N = model.param.data.N
x_path = model.run_generative_dynamics(N)
if model.param.estimation_mode == enm.EstimationMode.ELBO:
# Augmenting mapping
dyn_auginout = matrix_of_nones(W, W)
for i in range(W):
for j in range(W):
dyn_auginout[j][i] = np.hstack([
model.pp_dyn_aug_z[j][i][:, np.newaxis, 0:2],
model.pp_dyn_aug_z[j][i][:, np.newaxis, model.param.parts[j].Q:model.param.parts[j].Q+2],
model.pp_dyn_aug_u_means[j][i][:, np.newaxis, 0:2]])
for i in range(W):
for j in range(W):
fig = plt.figure(figsize=(5, 5))
x_means = model.ns.get_value(model.x_means[j])
xx, yy = insert_nans(y=x_means, indexes=model.param.data.sequences_indexes)
# Latent points
plt.plot(yy[:, 0], yy[:, 1], '--b', alpha=0.2)
# Vector field
plx.plot_2nd_order_mapping_2d(xinout[j], alpha=0.4)
# Sampled tralectory
if plot_sampled_trajectory and i == j:
plt.plot(x_path[j][:, 0], x_path[j][:, 1], "-k", alpha=1.0, linewidth=0.7)
if model.param.estimation_mode == enm.EstimationMode.ELBO:
# Dyn inducing points
dyn_aug_z = model.ns.get_value(model.dyn_aug_z[j][i])
plt.plot(dyn_aug_z[:, 0], dyn_aug_z[:, 1],
'ob', markersize=10, markeredgewidth=2, fillstyle="none")
plt.plot(dyn_aug_z[:, model.param.parts[j].Q], dyn_aug_z[:, model.param.parts[j].Q+1],
'ob', markersize=10, markeredgewidth=2, fillstyle="none")
if plot_inducing_outputs:
plx.plot_2nd_order_mapping_2d(dyn_auginout[j][i], alpha=1, width=0.01)
else:
plx.plot_arrows_2d(dyn_auginout[j][i][:, 0, :], dyn_auginout[j][i][:, 1, :], alpha=1, width=0.01)
# Lvm inducing points
lvm_aug_z = model.ns.get_value(model.lvm_aug_z[j])
plt.plot(lvm_aug_z[:, 0], lvm_aug_z[:, 1], '+g', markersize=10, markeredgewidth=2, fillstyle="none")
plt.title("Latent space. Part {}".format(j+1), fontsize=titlesize)
if directory is None:
plt.show()
else:
plt.savefig("{}/{}_latent_space_{}_to_{}_ip_{}.pdf".format(
directory, prefix, j, i, plot_inducing_outputs))
plt.close(fig)
def save_plot_latent_vs_generated(model, directory=None, prefix=None):
if prefix is None:
prefix = "model"
pl.info("Saving the latent space plots, latent vs generated.")
W = model.param.nparts # number of parts
N = model.param.data.N
x_path = model.run_generative_dynamics(N)
for i in range(W):
y_i = model.ns.get_value(model.x_means[i])
xx, yy = insert_nans(y=y_i, indexes=model.param.data.sequences_indexes)
fig = plt.figure(figsize=(5, 5))
plt.plot(xx, yy, '--', linewidth=0.7, alpha=0.6)
plt.gca().set_prop_cycle(None)
plt.plot(x_path[i])
plt.title("Latent trajectories. Part {}".format(i+1), fontsize=titlesize)
if directory is None:
plt.show()
else:
plt.savefig("{}/{}_latent_vs_generated_part_{}.pdf".format(
directory, prefix, i))
plt.close(fig)
def plot_latent_vs_generated(model):
N = model.modelparams.data.N
W = model.modelparams.nparts # number of parts
x_path = model.run_generative_dynamics(N)
plt.figure()
for i in range(W):
plt.subplot(W, 1, 1+i)
x_means_i = model.ns.get_value(model.x_means[i])
plt.plot(x_means_i, '--', linewidth=0.7, alpha=0.6)
plt.gca().set_prop_cycle(None)
plt.plot(x_path[i], linewidth=0.7)
plt.show()
def save_plot_training_vs_generated(model, directory=None, prefix=None):
if prefix is None:
prefix = "model"
pl.info("Saving the observed space plots, training vs generated.")
N = model.param.data.N
W = model.param.nparts # number of parts
x_path = model.run_generative_dynamics(N)
y_path = model.lvm_map_to_observed(x_path)
for i in range(W):
y_i = model.param.parts[i].data.Y_value
xx, yy = insert_nans(y=y_i, indexes=model.param.data.sequences_indexes)
fig = plt.figure(figsize=(5, 5))
plt.plot(xx, yy, '--', linewidth=0.5, alpha=0.6)
plt.gca().set_prop_cycle(None)
plt.plot(y_path[i], linewidth=0.7)
plt.title("Training vs. generated. Part {}".format(i+1), fontsize=titlesize)
if directory is None:
plt.show()
else:
plt.savefig("{}/{}_training_vs_generated_part_{}_full.pdf".format(
directory, prefix, i))
plt.close(fig)
fig = plt.figure(figsize=(5, 5))
plt.plot(xx, yy[:, :3], '--', linewidth=0.7, alpha=0.6)
plt.gca().set_prop_cycle(None)
plt.plot(y_path[i][:, :3], linewidth=0.7)
plt.title("Training vs. generated. Part {}".format(i+1), fontsize=titlesize)
if directory is None:
plt.show()
else:
plt.savefig("{}/{}_training_vs_generated_part_{}_selected.pdf".format(
directory, prefix, i))
plt.close(fig)
def plot_training_vs_generated(model):
N = model.param.data.N
W = model.param.nparts # number of parts
x_path = model.run_generative_dynamics(N)
y_path = model.lvm_map_to_observed(x_path)
plt.figure()
for i in range(W):
plt.subplot(W, 1, 1+i)
y_i = model.param.parts[i].data.Y_value
plt.plot(y_i, '-', alpha=0.2)
plt.plot(y_path[i])
plt.show()
def save_all_plots(model, directory, prefix):
if directory is None:
directory = "."
if prefix is None:
prefix = "model"
save_plot_latent_space(model, directory, prefix, plot_inducing_outputs=True)
save_plot_latent_space(model, directory, prefix, plot_inducing_outputs=False)
save_plot_latent_vs_generated(model, directory, prefix)
save_plot_training_vs_generated(model, directory, prefix)
class ModelPlotter(object):
def __init__(self, save_dir=None):
self.save_dir = save_dir
self.counter = 0
def __call__(self, model):
model.precalc_posterior_predictive()
self._on_call(model)
self.counter += 1
def _on_call(self, model):
prefix = "iter({})".format(self.counter)
save_plot_latent_space(model, self.save_dir, prefix, plot_inducing_outputs=True)
save_plot_latent_space(model, self.save_dir, prefix, plot_inducing_outputs=False)
save_plot_latent_vs_generated(model, self.save_dir, prefix)
save_plot_training_vs_generated(model, self.save_dir, prefix)
class MSEWriter(ModelPlotter):
def _on_call(self, model):
super(MSEWriter, self)._on_call(model)
validation = np.array(model.param.data.Y_sequences)[0]
print("Iteration counter: {}".format(self.counter))
T_validation = len(validation)
x_generated = model.run_generative_dynamics(T_validation)
y_generated = model.lvm_map_to_observed(x_generated)
predicted = np.hstack(y_generated)
errors = vc.compute_errors(observed=validation, predicted=predicted)
prefix = "iter({})".format(self.counter)
with open("{}/{}-errors.txt".format(self.save_dir, prefix), "w") as outfile:
json.dump(errors, outfile)
default_model_plotter = ModelPlotter()
| StarcoderdataPython |
3439301 | <reponame>bmacauley/auth0-aws-creds
class Auth0AWSCreds(object):
def __init__(self):
pass | StarcoderdataPython |
6422068 | import cv2,time,pandas
from datetime import datetime
first_frame=None
status_list=[None,None]
times=[]
df=pandas.DataFrame(columns=["Start",";End"])
camera = cv2.VideoCapture(0)
while True:
check, frame = camera.read()
isMoving=0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray=cv2.GaussianBlur(gray,(21,21),0)
if first_frame is None:
first_frame=gray
continue
delta_frame=cv2.absdiff(first_frame,gray)
thresh_frame=cv2.threshold(delta_frame,30,255,cv2.THRESH_BINARY)[1]
thresh_frame=cv2.dilate(thresh_frame,None,iterations=2)
(cnts,_)=cv2.findContours(thresh_frame.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 10000:
continue
isMoving=1
(x,y,w,h)=cv2.boundingRect(contour)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3)
status_list.append(isMoving)
if status_list[-1]==1 and status_list[-2]==0:
times.append(datetime.now())
if status_list[-1] == 0 and status_list[-2] == 1:
times.append(datetime.now())
print(times)
cv2.imshow("Movement detection",frame)
key=cv2.waitKey(1)
print(isMoving)
if key==ord('q'):
if isMoving==1:
times.append(datetime.now())
break
for i in range(0,len(times),2):
df=df.append({"Start":times[i],"End":times[i+1]},ignore_index=True)
df.to_csv("TimesOfMovement.csv")
camera.release()
cv2.destroyAllWindows() | StarcoderdataPython |
5025033 | <reponame>jedhsu/kagi
"""
*Ka* |โ |
Geometrically, middle-west one.
"""
from dataclasses import dataclass
from kagi._index import A
from ._k import K
__all__ = ["Ka"]
@dataclass
class Ka(
K,
A,
):
symbol = "\u2802"
| StarcoderdataPython |
1725283 | # coding=utf-8
import solution as f
"""Test calculate_dimensions for n cases:
1) the height and width of the current image is smaller than the desired
dimensions.
2) the height of the current image is smaller than the desired height but the
width larger.
3) The width of the current image is smaller than the desired width but the
height is larger.
4) The width and height of the current image is larger than the desired
dimensions. """
def test_calculate_dimensions_case_1():
current_size = (300, 480)
desired_size = (600, 800)
assert None is f.Image.calculate_dimensions(current_size, desired_size)
def test_calculate_dimensions_case_2():
current_size = (600, 480)
desired_size = (300, 800)
assert (150, 0, 300, 480) == f.Image.calculate_dimensions(current_size, desired_size)
def test_calculate_dimensions_case_3():
current_size = (300, 800)
desired_size = (600, 480)
assert (0, 160, 300, 480) == f.Image.calculate_dimensions(current_size, desired_size)
def test_calculate_dimensions_case_4():
current_size = (600, 800)
desired_size = (300, 480)
assert (150, 160, 300, 480) == f.Image.calculate_dimensions(current_size, desired_size)
| StarcoderdataPython |
6464107 | <filename>deep-rl/lib/python2.7/site-packages/OpenGL/GL/NV/texture_rectangle.py
'''OpenGL extension NV.texture_rectangle
This module customises the behaviour of the
OpenGL.raw.GL.NV.texture_rectangle to provide a more
Python-friendly API
Overview (from the spec)
OpenGL texturing is limited to images with power-of-two dimensions
and an optional 1-texel border. NV_texture_rectangle extension
adds a new texture target that supports 2D textures without requiring
power-of-two dimensions.
Non-power-of-two dimensioned textures are useful for storing
video images that do not have power-of-two dimensions. Re-sampling
artifacts are avoided and less texture memory may be required by using
non-power-of-two dimensioned textures. Non-power-of-two dimensioned
textures are also useful for shadow maps and window-space texturing.
However, non-power-of-two dimensioned (NPOTD) textures have
limitations that do not apply to power-of-two dimensioned (POT)
textures. NPOTD textures may not use mipmap filtering; POTD
textures support both mipmapped and non-mipmapped filtering.
NPOTD textures support only the GL_CLAMP, GL_CLAMP_TO_EDGE,
and GL_CLAMP_TO_BORDER_ARB wrap modes; POTD textures support
GL_CLAMP_TO_EDGE, GL_REPEAT, GL_CLAMP, GL_MIRRORED_REPEAT_IBM,
and GL_CLAMP_TO_BORDER. NPOTD textures do not support an optional
1-texel border; POTD textures do support an optional 1-texel border.
NPOTD textures are accessed by non-normalized texture coordinates.
So instead of thinking of the texture image lying in a [0..1]x[0..1]
range, the NPOTD texture image lies in a [0..w]x[0..h] range.
This extension adds a new texture target and related state (proxy,
binding, max texture size).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/texture_rectangle.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.texture_rectangle import *
from OpenGL.raw.GL.NV.texture_rectangle import _EXTENSION_NAME
def glInitTextureRectangleNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | StarcoderdataPython |
1697678 | <gh_stars>0
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.codegen.targets.java_antlr_library import JavaAntlrLibrary
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_antlr_library import PythonAntlrLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.targets.doc import Page
from pants.backend.core.targets.resources import Resources
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.benchmark import Benchmark
from pants.backend.jvm.targets.java_agent import JavaAgent
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.java_tests import JavaTests
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_tests import PythonTests
from pants.base.source_root import SourceRoot
def maven_layout(parse_context, basedir=''):
"""Sets up typical maven project source roots for all built-in pants target types.
Shortcut for ``source_root('src/main/java', *java targets*)``,
``source_root('src/main/python', *python targets*)``, ...
:param string basedir: Instead of using this BUILD file's directory as
the base of the source tree, use a subdirectory. E.g., instead of
expecting to find java files in ``src/main/java``, expect them in
``**basedir**/src/main/java``.
"""
def root(path, *types):
SourceRoot.register_mutable(os.path.join(parse_context.rel_path, basedir, path), *types)
root('src/main/antlr', JavaAntlrLibrary, Page, PythonAntlrLibrary)
root('src/main/java', AnnotationProcessor, JavaAgent, JavaLibrary, JvmBinary, Page, Benchmark)
root('src/main/protobuf', JavaProtobufLibrary, Page)
root('src/main/python', Page, PythonBinary, PythonLibrary)
root('src/main/resources', Page, Resources)
root('src/main/scala', JvmBinary, Page, ScalaLibrary, Benchmark)
root('src/main/thrift', JavaThriftLibrary, Page, PythonThriftLibrary)
root('src/test/java', JavaLibrary, JavaTests, Page, Benchmark)
root('src/test/python', Page, PythonLibrary, PythonTests)
root('src/test/resources', Page, Resources)
root('src/test/scala', JavaTests, Page, ScalaLibrary, Benchmark)
| StarcoderdataPython |
11207636 | from chaoslib.exceptions import InterruptExecution
from chaoslib.types import Activity, Run
def before_activity_control(context: Activity, **kwargs):
raise InterruptExecution("let's blow this up")
| StarcoderdataPython |
9662729 | __version__ = 1.0
__all__ = ["Mutator", "Enforcer"]
from Gatekeeper.mutator import Mutator
from Gatekeeper.enforcer import Enforcer
| StarcoderdataPython |
11324088 | #!/usr/bin/env python3
import os
import subprocess
import sys
class CompileError(Exception):
pass
def main(root, in_file, out_file):
cwd = os.getcwd()
os.chdir(root)
intermediate_file = 'intermediate.wasm'
res = subprocess.run(
['arisen-cpp', '-O0', '-c', in_file, '-o', intermediate_file],
capture_output=True
)
if res.returncode > 0:
print(res.args)
raise CompileError(res.stderr)
res = subprocess.run(
['arisen-ld', intermediate_file, '-o', out_file],
capture_output=True
)
if res.returncode > 0:
print(res.args)
raise CompileError(res.stderr)
os.chdir(cwd)
if __name__ == "__main__":
root_path = sys.argv[1]
in_file_path = sys.argv[2] if sys.argv[2] else ""
out_file_path = sys.argv[3] if sys.argv[3] else ""
main(root_path, in_file_path, out_file_path)
| StarcoderdataPython |
12813457 | from direction import Direction
from door import Door
from maze import Maze
from room import Room, RoomWithABomb
from wall import Wall, BombedWall
class MazeGameFactory:
@classmethod
def make_maze(cls):
return Maze()
@classmethod
def make_room(cls, room_number):
return Room(room_number)
@classmethod
def make_wall(cls):
return Wall()
@classmethod
def make_door(cls, r1, r2):
return Door(r1, r2)
def create_maze(cls):
a_maze = cls.make_maze()
r1 = cls.make_room(1)
r2 = cls.make_room(2)
the_door = cls.make_door(r1, r2)
a_maze.add_room(r1)
a_maze.add_room(r2)
r1.set_side(Direction.NORTH, cls.make_wall())
r1.set_side(Direction.EAST, the_door)
r1.set_side(Direction.SOUTH, cls.make_wall())
r1.set_side(Direction.WEST, cls.make_wall())
r2.set_side(Direction.NORTH, cls.make_wall())
r2.set_side(Direction.EAST, cls.make_wall())
r2.set_side(Direction.SOUTH, cls.make_wall())
r2.set_side(Direction.WEST, the_door)
return a_maze
class BombedMazeGameFactory(MazeGameFactory):
@classmethod
def make_wall(cls):
return BombedWall()
@classmethod
def make_room(cls, room_number):
return RoomWithABomb(room_number)
if __name__ == "__main__":
game = MazeGameFactory()
created_maze = game.create_maze()
""" factory method๋ฅผ ์ฌ์ฉํ๋ฉด, ๋ค๋ฅธ create_maze ํจ์(client์์ ์ค์ ๋ก ํธ์ถํ๋
์ธํฐํ์ด์ค ์ญํ )์ ๋ณ๊ฒฝํ ํ์์์ด ๋ค์ํ MazeGame์ ์์ฑํ ์ ์๋ค. """
game_with_bombed_maze = BombedMazeGameFactory()
created_bombed_maze = game_with_bombed_maze.create_maze()
| StarcoderdataPython |
11288278 | <filename>papahana_flask_server_demo/papahana/util.py
import datetime
import six
import typing
import yaml
import pymongo
import urllib
from getpass import getpass
import os
from flask import current_app
def read_mode(config='./config.live.yaml'):
with open(config) as file:
mode_dict = yaml.load(file, Loader=yaml.FullLoader)['mode']
if 'config' in mode_dict:
return mode_dict['config']
else:
return 'production'
def read_config(mode, config='./config.live.yaml'):
with open(config) as file:
config = yaml.load(file, Loader=yaml.FullLoader)[mode]
return config
def read_urls(config='./config.live.yaml'):
with open(config) as file:
urls = yaml.load(file, Loader=yaml.FullLoader)['apis']
return urls
def config_collection(collection, conf=None):
if not conf:
with current_app.app_context():
conf = current_app.config_params
coll = create_collection(conf['dbName'], conf[collection],
port=conf['port'], ip=conf['ip'])
return coll
def create_collection(dbName, collName, port=27017, ip='127.0.0.1',
remote=False, username='papahanauser', password=<PASSWORD>):
""" create_collection
Creates and returns a mongodb collection object
:param dbName: database name
:type dbName: str
:param collName: collection name
:type collName: str
:port: port name
:type port: int
:dbURL: url of database (use for databases)
:dbURL: str
:rtype: pymongo.collection.Collection
"""
if remote:
if not password:
password = getpass()
dbURL = f'mongodb+srv://{urllib.parse.quote(username)}:' \
f'{urllib.parse.quote(password)}@cluster0.gw51m.mongodb.net/' \
f'{dbName}'
elif os.environ.get('DOCKER_DATABASE_CONNECTION', False):
dbURL = f'mongodb://database:{port}'
else:
dbURL = f'mongodb://{ip}:{port}'
client = pymongo.MongoClient(dbURL)
db = client[dbName]
coll = db[collName]
return coll
# swagger generated below here
def _deserialize(data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if klass in six.integer_types or klass in (float, str, bool):
return _deserialize_primitive(data, klass)
elif klass == object:
return _deserialize_object(data)
elif klass == datetime.date:
return deserialize_date(data)
elif klass == datetime.datetime:
return deserialize_datetime(data)
elif type(klass) == typing.GenericMeta:
if klass.__extra__ == list:
return _deserialize_list(data, klass.__args__[0])
if klass.__extra__ == dict:
return _deserialize_dict(data, klass.__args__[1])
else:
return deserialize_model(data, klass)
def _deserialize_primitive(data, klass):
"""Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = six.u(data)
except TypeError:
value = data
return value
def _deserialize_object(value):
"""Return a original value.
:return: object.
"""
return value
def deserialize_date(string):
"""Deserializes string to date.
:param string: str.
:type string: str
:return: date.
:rtype: date
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
def deserialize_datetime(string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:type string: str
:return: datetime.
:rtype: datetime
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
def deserialize_model(data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:type data: dict | list
:param klass: class literal.
:return: model object.
"""
instance = klass()
if not instance.swagger_types:
return data
for attr, attr_type in six.iteritems(instance.swagger_types):
if data is not None \
and instance.attribute_map[attr] in data \
and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
setattr(instance, attr, _deserialize(value, attr_type))
return instance
def _deserialize_list(data, boxed_type):
"""Deserializes a list and its elements.
:param data: list to deserialize.
:type data: list
:param boxed_type: class literal.
:return: deserialized list.
:rtype: list
"""
return [_deserialize(sub_data, boxed_type)
for sub_data in data]
def _deserialize_dict(data, boxed_type):
"""Deserializes a dict and its elements.
:param data: dict to deserialize.
:type data: dict
:param boxed_type: class literal.
:return: deserialized dict.
:rtype: dict
"""
return {k: _deserialize(v, boxed_type)
for k, v in six.iteritems(data)}
| StarcoderdataPython |
5128845 | <reponame>bdh1011/wau
import os
from flask import Flask
from flask.ext.mail import Mail, Message
from flask.ext.sqlalchemy import SQLAlchemy
from celery import Celery
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top-secret!'
# Flask-Mail configuration
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = "<EMAIL>"
app.config['MAIL_PASSWORD'] = "<PASSWORD>"
app.config['MAIL_DEFAULT_SENDER'] = '<EMAIL>'
# Celery configuration
app.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'
app.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'
app.config['CELERY_IMPORTS'] = ("app", )
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres@localhost/wau'
# app.config.from_object(os.environ['APP_SETTINGS'])
db = SQLAlchemy(app)
# from app.controllers import request_async_crawl
# Initialize extensions
mail = Mail(app)
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
# Initialize Celery
celery.conf.update(app.config)
from app import controllers
db.create_all() | StarcoderdataPython |
6634998 | # Generated by Django 2.2.1 on 2019-06-17 01:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0009_subject_classcode'),
]
operations = [
migrations.AlterField(
model_name='subject',
name='classcode',
field=models.CharField(default=None, help_text='Class Code', max_length=5, null=True, verbose_name='Class Code'),
),
]
| StarcoderdataPython |
4878880 | <reponame>EichlerLab/chm1_scripts
#!/usr/bin/env python
import argparse
from Bio import SeqIO
from Bio import Seq
from Bio import SeqRecord
ap = argparse.ArgumentParser(description="Print sequences that are not well masked to an output file.")
ap.add_argument("bedin", help="Input gap-bed file.")
ap.add_argument("bedpass", help="Output gap-bed file, passing filter.")
ap.add_argument("bedfail", help="Output sequences with too low masked content to this file.")
ap.add_argument("--minMasked", help="Minimum masking content", type=float, default=0.80)
args = ap.parse_args()
bedIn = open(args.bedin)
bedPass = open(args.bedpass, 'w')
bedFail = open(args.bedfail, 'w')
for line in bedIn:
vals = line.split()
seq = vals[5]
# nUpper = seq.count("A") + seq.count("T") + seq.count("G") + seq.count("C")
nLower = seq.count("a") + seq.count("t") + seq.count("g") + seq.count("c")
ratio = (float(nLower) / len(seq))
if (ratio >= args.minMasked):
bedPass.write(line)
else:
bedFail.write(line)
| StarcoderdataPython |
48046 | <gh_stars>0
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>, 2018 UMONS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
import numpy as np
class BeamSearch:
def __init__(self, predict, initial_state, prime_labels):
"""Initializes the beam search.
Args:
predict:
A function that takes a `sample` and a `state`. It then performs
the computation on the last word in `sample`.
initial_state:
The initial state of the RNN.
prime_labels:
A list of labels corresponding to the priming text. This must
not be empty.
"""
if not prime_labels:
raise ValueError('prime_labels must be a non-empty list.')
self.predict = predict
self.initial_state = initial_state
self.prime_labels = prime_labels
def predict_samples(self, samples, states):
probs = []
next_states = []
for i in range(len(samples)):
prob, next_state = self.predict(samples[i], states[i])
probs.append(prob.squeeze())
next_states.append(next_state)
return np.array(probs), next_states
def search(self, oov, eos, k=1, maxsample=4000, use_unk=False):
"""Return k samples (beams) and their NLL scores.
Each sample is a sequence of labels, either ending with `eos` or
truncated to length of `maxsample`. `use_unk` allow usage of `oov`
(out-of-vocabulary) label in samples
"""
# A list of probabilities of our samples.
probs = []
prime_sample = []
prime_score = 0
prime_state = self.initial_state
# Initialize the live sample with the prime.
for i, label in enumerate(self.prime_labels):
prime_sample.append(label)
# The first word does not contribute to the score as the probs have
# not yet been determined.
if i > 0:
prime_score = prime_score - np.log(probs[0, label])
probs, prime_state = self.predict(prime_sample, prime_state)
dead_k = 0 # samples that reached eos
dead_samples = []
dead_scores = []
dead_states = []
live_k = 1 # samples that did not yet reached eos
live_samples = [prime_sample]
live_scores = [prime_score]
live_states = [prime_state]
while live_k and dead_k < k:
# total score for every sample is sum of -log of word prb
cand_scores = np.array(live_scores)[:, None] - np.log(probs)
if use_unk and oov is not None:
if isinstance(oov, list):
for word in oov:
cand_scores[:, word] = 1e20
else:
cand_scores[:, oov] = 1e20
cand_flat = cand_scores.flatten()
# find the best (lowest) scores we have from all possible samples and new words
ranks_flat = cand_flat.argsort()[:(k - dead_k)]
live_scores = cand_flat[ranks_flat]
# append the new words to their appropriate live sample
voc_size = probs.shape[1]
live_samples = [live_samples[r // voc_size] + [r % voc_size] for r in ranks_flat]
live_states = [live_states[r // voc_size] for r in ranks_flat]
# live samples that should be dead are...
zombie = [s[-1] == eos or len(s) >= maxsample for s in live_samples]
# add zombies to the dead
dead_samples += [s for s, z in zip(live_samples, zombie) if z] # remove first label == empty
dead_scores += [s for s, z in zip(live_scores, zombie) if z]
dead_states += [s for s, z in zip(live_states, zombie) if z]
dead_k = len(dead_samples)
# remove zombies from the living
live_samples = [s for s, z in zip(live_samples, zombie) if not z]
live_scores = [s for s, z in zip(live_scores, zombie) if not z]
live_states = [s for s, z in zip(live_states, zombie) if not z]
live_k = len(live_samples)
# Finally, compute the next-step probabilities and states.
probs, live_states = self.predict_samples(live_samples, live_states)
return dead_samples + live_samples, dead_scores + live_scores
| StarcoderdataPython |
8063118 | import time
from tkinter import *
canvas = Tk()
canvas.title("Reconnext&Teleplan")
canvas.geometry("1366x768")
canvas.resizable(1,1)
bg = PhotoImage(file="images\mb.png")
canvas.attributes ('-transparentcolor','')
mycan = Canvas( width=1366, height=768)
label = Label(font=("Arial", 140, "bold"), fg="green")
label.pack(pady=20)
def digitalclock():
text_input = time.strftime("%H:%M:%S.%p",)
label.place( relx=0.5, rely=0.5, anchor=CENTER)
label.config( text=text_input)
label.after(1000, digitalclock)
digitalclock()
mycan.create_image(0,0, image=bg, anchor="nw")
mycan.pack(fill="both", expand=True)
text_lab = Label(mycan,text="Teleplan&Reconext",font=("Courier", 60,"bold"), fg="blue")
label.place( relx=1.5, rely=1.5, anchor=S)
text_lab.pack(side=BOTTOM)
canvas.mainloop() | StarcoderdataPython |
4929666 | from peachpy import *
from peachpy.x86_64 import *
def fp16_alt_xmm_to_fp32_xmm(xmm_half):
xmm_zero = XMMRegister()
VPXOR(xmm_zero, xmm_zero, xmm_zero)
xmm_word = XMMRegister()
VPUNPCKLWD(xmm_word, xmm_zero, xmm_half)
xmm_shl1_half = XMMRegister()
VPADDW(xmm_shl1_half, xmm_half, xmm_half)
xmm_shl1_nonsign = XMMRegister()
VPADDD(xmm_shl1_nonsign, xmm_word, xmm_word)
sign_mask = Constant.float32x4(-0.0)
xmm_sign = XMMRegister()
VANDPS(xmm_sign, xmm_word, sign_mask)
xmm_shr3_nonsign = XMMRegister()
VPSRLD(xmm_shr3_nonsign, xmm_shl1_nonsign, 4)
exp_offset = Constant.uint32x4(0x38000000)
xmm_norm_nonsign = XMMRegister()
VPADDD(xmm_norm_nonsign, xmm_shr3_nonsign, exp_offset)
magic_mask = Constant.uint16x8(0x3E80)
xmm_denorm_nonsign = XMMRegister()
VPUNPCKLWD(xmm_denorm_nonsign, xmm_shl1_half, magic_mask)
magic_bias = Constant.float32x4(0.25)
VSUBPS(xmm_denorm_nonsign, xmm_denorm_nonsign, magic_bias)
xmm_denorm_cutoff = XMMRegister()
VMOVDQA(xmm_denorm_cutoff, Constant.uint32x4(0x00800000))
xmm_denorm_mask = XMMRegister()
VPCMPGTD(xmm_denorm_mask, xmm_denorm_cutoff, xmm_shr3_nonsign)
xmm_nonsign = XMMRegister()
VBLENDVPS(xmm_nonsign, xmm_norm_nonsign, xmm_denorm_nonsign, xmm_denorm_mask)
xmm_float = XMMRegister()
VORPS(xmm_float, xmm_nonsign, xmm_sign)
return xmm_float
| StarcoderdataPython |
49244 | """Blueprints package."""
| StarcoderdataPython |
1661344 | <filename>src/devpy/temp.py<gh_stars>100-1000
import tempfile
from pathlib import Path
def temp_dir(name, root=None):
root = root or tempfile.gettempdir()
directory = Path(root) / name
directory.mkdir(exist_ok=True)
return directory
| StarcoderdataPython |
1673175 | from django.shortcuts import render, redirect
from app.forms import CarrosForm
from app.models import Carros
# Create your views here.
def home (request):
data ={}
data ['db'] = Carros.objects.all()
return render ( request, 'index.html', data)
def form(request):
data = {}
data ['form'] = CarrosForm()
return render (request, 'form.html', data )
def create(request):
form = CarrosForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('home')
def view (request, pk):
data = {}
data['db'] = Carros.objects.get(pk=pk)
return render(request,'view.html', data )
def edit (request, pk):
data = {}
data['db'] = Carros.objects.get(pk=pk)
data['form'] = CarrosForm(instance=data['db'])
return render(request,'form.html', data )
def update (request, pk):
data = {}
data['db'] = Carros.objects.get(pk=pk)
form = CarrosForm(request.POST or None, instance=data['db'])
if form.is_valid():
form.save()
return redirect('home')
def delete(request, pk):
db = Carros.objects.get(pk=pk)
db.delete()
return redirect('home')
| StarcoderdataPython |
4820467 | <reponame>eskay993/HomeAssistant-Kasa-HS100-Control
"""Module for smart plugs (HS100, HS110, ..)."""
import logging
from typing import Any, Dict
from .smartdevice import DeviceType, SmartDevice, requires_update
_LOGGER = logging.getLogger(__name__)
class SmartPlug(SmartDevice):
"""Representation of a TP-Link Smart Switch.
To initialize, you have to await :func:`update()` at least once.
This will allow accessing the properties using the exposed properties.
All changes to the device are done using awaitable methods,
which will not change the cached values, but you must await :func:`update()` separately.
Errors reported by the device are raised as :class:`SmartDeviceException`s,
and should be handled by the user of the library.
Examples:
>>> import asyncio
>>> plug = SmartPlug("127.0.0.1")
>>> asyncio.run(plug.update())
>>> plug.alias
Kitchen
Setting the LED state:
>>> asyncio.run(plug.set_led(True))
>>> asyncio.run(plug.update())
>>> plug.led
True
For more examples, see the :class:`SmartDevice` class.
"""
def __init__(self, host: str, authentication=None) -> None:
super().__init__(host, authentication)
self.emeter_type = "emeter"
self._device_type = DeviceType.Plug
@property # type: ignore
@requires_update
def is_on(self) -> bool:
"""Return whether device is on."""
sys_info = self.sys_info
return bool(sys_info["relay_state"])
async def turn_on(self, **kwargs):
"""Turn the switch on."""
return await self._query_helper("system", "set_relay_state", {"state": 1})
async def turn_off(self, **kwargs):
"""Turn the switch off."""
return await self._query_helper("system", "set_relay_state", {"state": 0})
@property # type: ignore
@requires_update
def led(self) -> bool:
"""Return the state of the led."""
sys_info = self.sys_info
return bool(1 - sys_info["led_off"])
async def set_led(self, state: bool):
"""Set the state of the led (night mode)."""
return await self._query_helper(
"system", "set_led_off", {"off": int(not state)}
)
@property # type: ignore
@requires_update
def state_information(self) -> Dict[str, Any]:
"""Return switch-specific state information."""
info = {"LED state": self.led, "On since": self.on_since}
return info
| StarcoderdataPython |
3350655 | <gh_stars>10-100
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TransactionApiMixin(object):
def transaction_get(self, tran_uuid):
""" GET /transactions/{UUID}
Use the /transactions/{UUID} endpoint to retrieve an individual
transaction matching the UUID from the blockchain. The returned
transaction message is defined inside fabric.proto.
```golang
message Transaction {
enum Type {
UNDEFINED = 0;
CHAINCODE_DEPLOY = 1;
CHAINCODE_INVOKE = 2;
CHAINCODE_QUERY = 3;
CHAINCODE_TERMINATE = 4;
}
Type type = 1;
bytes chaincodeID = 2;
bytes payload = 3;
string uuid = 4;
google.protobuf.Timestamp timestamp = 5;
ConfidentialityLevel confidentialityLevel = 6;
bytes nonce = 7;
bytes cert = 8;
bytes signature = 9;
}
```
:param tran_uuid: The uuid of the transaction to retrieve
:return: json body of the transaction info
"""
res = self._get(self._url("/transactions/{0}", tran_uuid))
return self._result(res, json=True)
| StarcoderdataPython |
53387 | <gh_stars>1-10
from contextlib import contextmanager
import time
from ecosante.extensions import cache
@contextmanager
def cache_lock(lock_id, oid):
LOCK_EXPIRE = 60 * 60
timeout_at = time.monotonic() + LOCK_EXPIRE - 180
status = cache.add(lock_id, oid, LOCK_EXPIRE)
try:
yield status
finally:
if time.monotonic() < timeout_at and status:
cache.delete(lock_id)
def cache_unlock(lock_id):
cache.delete(lock_id)
| StarcoderdataPython |
11271908 | <reponame>firebird631/siis
# @date 2018-08-24
# @author <NAME>, All rights reserved without prejudices.
# @license Copyright (c) 2018 Dream Overflow
# Strategy display table formatter helpers for views or notifiers
from datetime import datetime
from terminal.terminal import Color
from terminal import charmap
from common.utils import UTC
from strategy.helpers.closedtradedataset import get_closed_trades
import logging
logger = logging.getLogger('siis.strategy')
error_logger = logging.getLogger('siis.error.strategy')
def closed_trades_stats_table(strategy, style='', offset=None, limit=None, col_ofs=None, quantities=False,
percents=False, group=None, ordering=None, datetime_format='%y-%m-%d %H:%M:%S'):
"""
Returns a table of any closed trades.
"""
columns = ['Symbol', '#', charmap.ARROWUPDN, 'P/L(%)', 'Fees(%)', 'OP', 'SL', 'TP', 'Best', 'Worst', 'TF',
'Signal date', 'Entry date', 'Avg EP', 'Exit date', 'Avg XP', 'Label', 'Status']
if quantities:
columns += ['RPNL', 'Qty', 'Entry Q', 'Exit Q']
columns = tuple(columns)
total_size = (len(columns), 0)
data = []
sub_totals = {}
def localize_datetime(dt):
return datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=UTC()).astimezone().strftime(
datetime_format) if dt else "-"
with strategy._mutex:
closed_trades = get_closed_trades(strategy)
total_size = (len(columns), len(closed_trades))
for t in closed_trades:
# sum of RPNL per quote/currency
if t['stats']['profit-loss-currency'] not in sub_totals:
sub_totals[t['stats']['profit-loss-currency']] = 0.0
sub_totals[t['stats']['profit-loss-currency']] += float(t['stats']['profit-loss'])
if offset is None:
offset = 0
if limit is None:
limit = len(closed_trades)
limit = offset + limit
if group:
# in alpha order + last realized exit datetime
closed_trades.sort(key=lambda x: x['symbol']+x['stats']['last-realized-exit-datetime'],
reverse=True if ordering else False)
else:
closed_trades.sort(key=lambda x: x['stats']['last-realized-exit-datetime'],
reverse=True if ordering else False)
closed_trades = closed_trades[offset:limit]
for t in closed_trades:
direction = Color.colorize_cond(charmap.ARROWUP if t['direction'] == "long" else charmap.ARROWDN,
t['direction'] == "long", style=style, true=Color.GREEN, false=Color.RED)
aep = float(t['avg-entry-price'])
axp = float(t['avg-exit-price'])
best = float(t['stats']['best-price'])
worst = float(t['stats']['worst-price'])
sl = float(t['stop-loss-price'])
tp = float(t['take-profit-price'])
# colorize profit or loss percent
if t['profit-loss-pct'] < 0: # loss
if (t['direction'] == 'long' and best > aep) or (t['direction'] == 'short' and best < aep):
# but was profitable during a time
cr = Color.colorize("%.2f" % t['profit-loss-pct'], Color.ORANGE, style=style)
exit_color = Color.ORANGE
else:
cr = Color.colorize("%.2f" % t['profit-loss-pct'], Color.RED, style=style)
exit_color = Color.RED
elif t['profit-loss-pct'] > 0: # profit
if (t['direction'] == 'long' and worst < aep) or (t['direction'] == 'short' and worst > aep):
# but was in lost during a time
cr = Color.colorize("%.2f" % t['profit-loss-pct'], Color.BLUE, style=style)
exit_color = Color.BLUE
else:
cr = Color.colorize("%.2f" % t['profit-loss-pct'], Color.GREEN, style=style)
exit_color = Color.GREEN
else: # equity
cr = "0.0" if aep else "-"
exit_color = None
# realized profit or loss
rpnl = "%g%s" % (t['stats']['profit-loss'], t['stats']['profit-loss-currency'])
if exit_color:
rpnl = Color.colorize(rpnl, exit_color, style=style)
# colorize TP if hit, similarly for SL, color depend if profit or loss, nothing if close at market
if t['stats']['exit-reason'] in ("stop-loss-market", "stop-loss-limit") and exit_color:
_tp = t['take-profit-price']
_sl = Color.colorize(t['stop-loss-price'], exit_color, style=style)
elif t['stats']['exit-reason'] in ("take-profit-limit", "take-profit-market") and exit_color:
_tp = Color.colorize(t['take-profit-price'], exit_color, style=style)
_sl = t['stop-loss-price']
else:
_tp = t['take-profit-price']
_sl = t['stop-loss-price']
# values in percent
if t['direction'] == "long" and aep:
slpct = (sl - aep) / aep
tppct = (tp - aep) / aep
bpct = (best - aep) / aep - (t['stats']['fees-pct'] * 0.01)
wpct = (worst - aep) / aep - (t['stats']['fees-pct'] * 0.01)
elif t['direction'] == "short" and aep:
slpct = (aep - sl) / aep
tppct = (aep - tp) / aep
bpct = (aep - best) / aep - (t['stats']['fees-pct'] * 0.01)
wpct = (aep - worst) / aep - (t['stats']['fees-pct'] * 0.01)
else:
slpct = 0
tppct = 0
bpct = 0
wpct = 0
def format_with_percent(formatted_value, condition, rate):
return (("%s (%.2f%%)" % (formatted_value,
rate * 100)) if percents else formatted_value) if condition else '-'
row = [
t['symbol'],
t['id'],
direction,
cr,
"%.2f%%" % t['stats']['fees-pct'], # total fees in percent
t['order-price'],
format_with_percent(_sl, sl, slpct),
format_with_percent(_tp, tp, tppct),
format_with_percent(t['stats']['best-price'], best, bpct),
format_with_percent(t['stats']['worst-price'], worst, wpct),
t['timeframe'],
localize_datetime(t['entry-open-time']),
localize_datetime(t['stats']['first-realized-entry-datetime']),
t['avg-entry-price'],
localize_datetime(t['stats']['last-realized-exit-datetime']),
t['avg-exit-price'],
t['label'],
t['state'].capitalize(),
]
if quantities:
row.append(rpnl)
row.append(t['order-qty'])
row.append(t['filled-entry-qty'])
row.append(t['filled-exit-qty'])
data.append(row[0:4] + row[4+col_ofs:])
if sub_totals:
row = [
"------",
'-',
'-',
'------',
'-------',
'--',
'--',
'--',
'----',
'-----',
'--',
'-----------',
'----------',
'------',
'---------',
'------',
'-----',
'------',
]
if quantities:
row.append('----')
row.append('---')
row.append('-------')
row.append('------')
data.append(row[0:4] + row[4+col_ofs:])
for currency, sub_total in sub_totals.items():
if sub_total > 0:
rpnl = Color.colorize("%g%s" % (sub_total, currency), Color.GREEN, style=style)
elif sub_total < 0:
rpnl = Color.colorize("%g%s" % (sub_total, currency), Color.RED, style=style)
else:
rpnl = "%g%s" % (sub_total, currency)
row = [
"SUB",
'-',
'-',
'-',
'-',
'-',
'-',
'-',
'-',
'-',
'-',
'-',
'-',
'-',
'-',
'-',
currency,
'-',
]
if quantities:
row.append(rpnl)
row.append('-')
row.append('-')
row.append('-')
data.append(row[0:4] + row[4+col_ofs:])
return columns[0:4] + columns[4+col_ofs:], data, total_size
| StarcoderdataPython |
3208726 | <gh_stars>0
"""binary_classification
*************************
Binary classification (ResNetv1-10 with CIFAR10)
Source code: `binary_classification.py <https://github.com/siliconlabs/mltk/blob/master/mltk/models/examples/binary_classification.py>`_
This demonstrates how to classify two images:
- Cat
- Dog
Using the `CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ dataset.
The key points required for binary image classification are:
- ``binary_crossentropy`` loss function
- ``binary`` "class mode" for the data generator, this makes it so the generated "y" vector is a 1D vector
- One dense unit in the final layer to do the actual classification
- ``sigmoid`` activation in the last layer of the model so that the output of the model is between 0 and 1
Commands
--------------
.. code-block:: shell
# Dump some of samples generated by this model
# using a custom command defined at this bottom
# of this model specification file
mltk custom binary_classification datagen_dump
# Do a "dry run" test training of the model
mltk train binary_classification-test
# Train the model
mltk train binary_classification
# Evaluate the trained model .tflite model
mltk evaluate binary_classification --tflite
# Profile the model in the MVP hardware accelerator simulator
mltk profile binary_classification --accelerator MVP
# Profile the model on a physical development board
mltk profile binary_classification --accelerator MVP --device
Model Summary
--------------
.. code-block:: shell
mltk summarize binary_classification --tflite
+-------+-----------------+-----------------+-----------------+-----------------------------------------------------+
| Index | OpCode | Input(s) | Output(s) | Config |
+-------+-----------------+-----------------+-----------------+-----------------------------------------------------+
| 0 | conv_2d | 32x32x3 (int8) | 30x30x32 (int8) | Padding:valid stride:1x1 activation:relu |
| | | 3x3x3 (int8) | | |
| | | 32 (int32) | | |
| 1 | max_pool_2d | 30x30x32 (int8) | 15x15x32 (int8) | Padding:valid stride:2x2 filter:2x2 activation:none |
| 2 | conv_2d | 15x15x32 (int8) | 13x13x32 (int8) | Padding:valid stride:1x1 activation:relu |
| | | 3x3x32 (int8) | | |
| | | 32 (int32) | | |
| 3 | max_pool_2d | 13x13x32 (int8) | 6x6x32 (int8) | Padding:valid stride:2x2 filter:2x2 activation:none |
| 4 | conv_2d | 6x6x32 (int8) | 4x4x64 (int8) | Padding:valid stride:1x1 activation:relu |
| | | 3x3x32 (int8) | | |
| | | 64 (int32) | | |
| 5 | max_pool_2d | 4x4x64 (int8) | 2x2x64 (int8) | Padding:valid stride:2x2 filter:2x2 activation:none |
| 6 | reshape | 2x2x64 (int8) | 256 (int8) | BuiltinOptionsType=0 |
| | | 2 (int32) | | |
| 7 | fully_connected | 256 (int8) | 64 (int8) | Activation:relu |
| | | 256 (int8) | | |
| | | 64 (int32) | | |
| 8 | fully_connected | 64 (int8) | 1 (int8) | Activation:none |
| | | 64 (int8) | | |
| | | 1 (int32) | | |
| 9 | logistic | 1 (int8) | 1 (int8) | BuiltinOptionsType=0 |
+-------+-----------------+-----------------+-----------------+-----------------------------------------------------+
Total MACs: 2.646 M
Total OPs: 5.363 M
Name: binary_classification
Version: 1
Description: Example: Binary classification - ResNetv1-10 with CIFAR10
Classes: cat, dog
hash: de33dd53e0afb91a365fd2fff0e4c461
date: 2022-02-11T17:32:37.986Z
runtime_memory_size: 38740
samplewise_norm.rescale: 0.0
samplewise_norm.mean_and_std: False
.tflite file size: 53.8kB
Model Diagram
------------------
.. code-block:: shell
mltk view binary_classification --tflite
.. raw:: html
<div class="model-diagram">
<a href="../../../../_images/models/binary_classification.tflite.png" target="_blank">
<img src="../../../../_images/models/binary_classification.tflite.png" />
<p>Click to enlarge</p>
</a>
</div>
"""
import functools
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from mltk.core.preprocess.image.parallel_generator import ParallelImageDataGenerator
from mltk.datasets.image import cifar10
from mltk.core.model import (
MltkModel,
TrainMixin,
ImageDatasetMixin,
EvaluateClassifierMixin
)
# Instantiate the MltkModel object with the following 'mixins':
# - TrainMixin - Provides classifier model training operations and settings
# - ImageDatasetMixin - Provides image data generation operations and settings
# - EvaluateClassifierMixin - Provides classifier evaluation operations and settings
# @mltk_model # NOTE: This tag is required for this model be discoverable
class MyModel(
MltkModel,
TrainMixin,
ImageDatasetMixin,
EvaluateClassifierMixin
):
pass
my_model = MyModel()
# General parameters
my_model.version = 1
my_model.description = 'Example: Binary classification - ResNetv1-10 with CIFAR10'
#################################################
# Training parameters
my_model.epochs = 200
my_model.batch_size = 40
my_model.optimizer = 'adam'
my_model.metrics = ['accuracy']
my_model.loss = 'binary_crossentropy'
#################################################
# TF-Lite converter settings
my_model.tflite_converter['optimizations'] = [tf.lite.Optimize.DEFAULT]
my_model.tflite_converter['supported_ops'] = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
my_model.tflite_converter['inference_input_type'] = tf.int8 # can also be tf.float32
my_model.tflite_converter['inference_output_type'] = tf.int8
# generate a representative dataset from the validation data
my_model.tflite_converter['representative_dataset'] = 'generate'
#################################################
# Image Dataset Settings
# Default size for CIFAR10 dataset
input_height = 32
input_width = 32
input_depth = 3
# The classification type
my_model.class_mode = 'binary'
# The class labels found in your training dataset directory
my_model.classes = ['cat', 'dog']
# The input shape to the model. The dataset samples will be resized if necessary
my_model.input_shape = [input_height, input_width, input_depth]
def my_dataset_loader(model:MyModel):
(cifar10_x_train, cifar10_y_train), (cifar10_x_test, cifar10_y_test) = cifar10.load_data()
# Extract just the cat and dog samples
cats_and_dogs_x_train = []
cats_and_dogs_y_train = []
cats_and_dogs_x_test = []
cats_and_dogs_y_test = []
n_cat = 0
n_dog = 0
for x, y in zip(cifar10_x_train, cifar10_y_train):
if y == 3: # cat label, see https://www.cs.toronto.edu/~kriz/cifar.html
if model.test_mode_enabled and n_cat > 100:
continue
cats_and_dogs_x_train.append(x)
cats_and_dogs_y_train.append(0) # cat maps to id 0, see the my_model.classes above
n_cat += 1
elif y == 5: # dog label
if model.test_mode_enabled and n_dog > 100:
continue
cats_and_dogs_x_train.append(x)
cats_and_dogs_y_train.append(1)
n_dog += 1
n_cat = 0
n_dog = 0
for x, y in zip(cifar10_x_test, cifar10_y_test):
if y == 3: # cat label, see https://www.cs.toronto.edu/~kriz/cifar.html
if model.test_mode_enabled and n_cat > 100:
continue
cats_and_dogs_x_test.append(x)
cats_and_dogs_y_test.append(0) # cat maps to id 0, see the my_model.classes above
n_cat += 1
elif y == 5: # dog label
if model.test_mode_enabled and n_dog > 100:
continue
cats_and_dogs_x_test.append(x)
cats_and_dogs_y_test.append(1)
n_dog += 1
x_train = np.asarray(cats_and_dogs_x_train)
y_train = np.asarray(cats_and_dogs_y_train)
x_test = np.asarray(cats_and_dogs_x_test)
y_test = np.asarray(cats_and_dogs_y_test)
# Convert for training
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Scale to INT8 range (simple non-adaptive)
x_train = (x_train-128)/128
x_test = (x_test-128)/128
return x_train, y_train, x_test, y_test
my_model.dataset = functools.partial(my_dataset_loader, my_model)
##############################################################
# Training callbacks
#
my_model.datagen = ParallelImageDataGenerator(
cores=.35,
max_batches_pending=32,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
validation_augmentation_enabled=False
)
##############################################################
# Model Layout
def my_model_builder(model: MyModel):
keras_model = Sequential()
keras_model.add(layers.Conv2D(32, (3, 3), input_shape=model.input_shape))
keras_model.add(layers.Activation('relu'))
keras_model.add(layers.MaxPooling2D(pool_size=(2, 2)))
keras_model.add(layers.Conv2D(32, (3, 3)))
keras_model.add(layers.Activation('relu'))
keras_model.add(layers.MaxPooling2D(pool_size=(2, 2)))
keras_model.add(layers.Conv2D(64, (3, 3)))
keras_model.add(layers.Activation('relu'))
keras_model.add(layers.MaxPooling2D(pool_size=(2, 2)))
keras_model.add(layers.Flatten()) # this converts our 3D feature maps to 1D feature vectors
keras_model.add(layers.Dense(64))
keras_model.add(layers.Activation('relu'))
keras_model.add(layers.Dropout(0.5))
keras_model.add(layers.Dense(1)) # Binary so we only need 1 unit for this layer
keras_model.add(layers.Activation('sigmoid')) # Binary so we want the activation to be between 0 and 1 which is what the sigmoid function produces
keras_model.compile(
loss=model.loss,
optimizer=model.optimizer,
metrics=model.metrics
)
return keras_model
my_model.build_model_function = my_model_builder
# Register the "datagen_dump" custom command
import typer
@my_model.cli.command('datagen_dump')
def datagen_dump_custom_command(
count:int = typer.Option(100, '--count',
help='Number of samples to dump'
),
):
"""Custom command to dump the augmented samples
\b
Invoke this command with:
mltk custom binary_classification datagen_dump --count 20
"""
my_model.datagen.save_to_dir = my_model.create_log_dir('datagen_dump', delete_existing=True)
my_model.datagen.debug = True
my_model.datagen.cores = 1
my_model.datagen.max_batches_pending = 1
my_model.datagen.batch_size = 1
my_model.load_dataset(subset='training')
for i, _ in enumerate(my_model.x):
if i >= count:
break
my_model.unload_dataset()
print(f'Generated data dump to: {my_model.datagen.save_to_dir}') | StarcoderdataPython |
12865736 | import math
import string
from itertools import groupby
from operator import itemgetter
from nltk.corpus import stopwords
from nltk.tokenize import wordpunct_tokenize
N = 10788.0 # Number of documents, in float to make division work.
class TermMapper(object):
def __init__(self):
if 'stopwords' in self.params:
with open(self.params['stopwords'], 'r') as excludes:
self._stopwords = set(line.strip() for line in excludes)
else:
self._stopwords = None
self.curdoc = None
def __call__(self, key, value):
if value.startswith('='*34):
self.curdoc = int(value.strip("=").strip())
else:
for word in self.tokenize(value):
if not word in self.stopwords:
yield (word, self.curdoc), 1
def normalize(self, word):
word = word.lower()
if word not in string.punctuation:
return word
def tokenize(self, sentence):
for word in wordpunct_tokenize(sentence):
word = self.normalize(word)
if word: yield word
@property
def stopwords(self):
if not self._stopwords:
self._stopwords = stopwords.words('english')
return self._stopwords
class UnitMapper(object):
def __call__(self, key, value):
term, docid = key
yield term, (docid, value, 1)
class IDFMapper(object):
def __call__(self, key, value):
term, docid = key
tf, n = value
idf = math.log(N/n)
yield (term, docid), idf*tf
class SumReducer(object):
def __call__(self, key, values):
yield key, sum(values)
class BufferReducer(object):
def __call__(self, key, values):
term = key
values = list(values)
n = sum(g[2] for g in values)
for g in values:
yield (term, g[0]), (g[1], n)
class IdentityReducer(object):
def __call__(self, key, values):
for value in values:
yield key, value
def runner(job):
job.additer(TermMapper, SumReducer, combiner=SumReducer)
job.additer(UnitMapper, BufferReducer)
job.additer(IDFMapper, IdentityReducer)
def starter(prog):
excludes = prog.delopt("stopwords")
if excludes: prog.addopt("param", "stopwords="+excludes)
if __name__ == "__main__":
import dumbo
dumbo.main(runner, starter)
| StarcoderdataPython |
384180 | <gh_stars>0
from multiprocessing import Process
def printFunction(country='Pakistan'):
print('The name of Country is : ', country)
if __name__ == "__main__":
names = ['Turkey', 'China', 'Iran']
procs = []
proc = Process(target=printFunction)
procs.append(proc)
proc.start()
for name in names:
proc = Process(target=printFunction, args=(name,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
| StarcoderdataPython |
5023629 | <reponame>Q-Zheng/openmc
from collections import OrderedDict
import re
import os
from six import string_types
from xml.etree import ElementTree as ET
import openmc
import openmc.checkvalue as cv
from openmc.data import NATURAL_ABUNDANCE, atomic_mass
class Element(object):
"""A natural element that auto-expands to add the isotopes of an element to
a material in their natural abundance. Internally, the OpenMC Python API
expands the natural element into isotopes only when the materials.xml file
is created.
Parameters
----------
name : str
Chemical symbol of the element, e.g. Pu
Attributes
----------
name : str
Chemical symbol of the element, e.g. Pu
scattering : {'data', 'iso-in-lab', None}
The type of angular scattering distribution to use
"""
def __init__(self, name=''):
# Initialize class attributes
self._name = ''
self._scattering = None
# Set class attributes
self.name = name
def __eq__(self, other):
if isinstance(other, Element):
if self.name != other.name:
return False
else:
return True
elif isinstance(other, string_types) and other == self.name:
return True
else:
return False
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return repr(self) > repr(other)
def __lt__(self, other):
return not self > other
def __hash__(self):
return hash(repr(self))
def __repr__(self):
string = 'Element - {0}\n'.format(self._name)
if self.scattering is not None:
string += '{0: <16}{1}{2}\n'.format('\tscattering', '=\t',
self.scattering)
return string
@property
def name(self):
return self._name
@property
def scattering(self):
return self._scattering
@name.setter
def name(self, name):
cv.check_type('element name', name, string_types)
cv.check_length('element name', name, 1, 2)
self._name = name
@scattering.setter
def scattering(self, scattering):
if not scattering in ['data', 'iso-in-lab', None]:
msg = 'Unable to set scattering for Element to {0} which ' \
'is not "data", "iso-in-lab", or None'.format(scattering)
raise ValueError(msg)
self._scattering = scattering
def expand(self, percent, percent_type, enrichment=None,
cross_sections=None):
"""Expand natural element into its naturally-occurring isotopes.
An optional cross_sections argument or the OPENMC_CROSS_SECTIONS
environment variable is used to specify a cross_sections.xml file.
If the cross_sections.xml file is found, the element is expanded only
into the isotopes/nuclides present in cross_sections.xml. If no
cross_sections.xml file is found, the element is expanded based on its
naturally occurring isotopes.
Parameters
----------
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}
'ao' for atom percent and 'wo' for weight percent
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
Returns
-------
isotopes : list
Naturally-occurring isotopes of the element. Each item of the list
is a tuple consisting of an openmc.Nuclide instance and the natural
abundance of the isotope.
Notes
-----
When the `enrichment` argument is specified, a correlation from
`ORNL/CSD/TM-244 <https://doi.org/10.2172/5561567>`_ is used to
calculate the weight fractions of U234, U235, U236, and U238. Namely,
the weight fraction of U234 and U236 are taken to be 0.89% and 0.46%,
respectively, of the U235 weight fraction. The remainder of the isotopic
weight is assigned to U238.
"""
# Get the nuclides present in nature
natural_nuclides = set()
for nuclide in sorted(NATURAL_ABUNDANCE.keys()):
if re.match(r'{}\d+'.format(self.name), nuclide):
natural_nuclides.add(nuclide)
# Create dict to store the expanded nuclides and abundances
abundances = OrderedDict()
# If cross_sections is None, get the cross sections from the
# OPENMC_CROSS_SECTIONS environment variable
if cross_sections is None:
cross_sections = os.environ.get('OPENMC_CROSS_SECTIONS')
# If a cross_sections library is present, check natural nuclides
# against the nuclides in the library
if cross_sections is not None:
library_nuclides = set()
tree = ET.parse(cross_sections)
root = tree.getroot()
for child in root:
nuclide = child.attrib['materials']
if re.match(r'{}\d+'.format(self.name), nuclide) and \
'_m' not in nuclide:
library_nuclides.add(nuclide)
# Get a set of the mutual and absent nuclides. Convert to lists
# and sort to avoid different ordering between Python 2 and 3.
mutual_nuclides = natural_nuclides.intersection(library_nuclides)
absent_nuclides = natural_nuclides.difference(mutual_nuclides)
mutual_nuclides = sorted(list(mutual_nuclides))
absent_nuclides = sorted(list(absent_nuclides))
# If all natural nuclides are present in the library, expand element
# using all natural nuclides
if len(absent_nuclides) == 0:
for nuclide in mutual_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# If no natural elements are present in the library, check if the
# 0 nuclide is present. If so, set the abundance to 1 for this
# nuclide. Else, raise an error.
elif len(mutual_nuclides) == 0:
nuclide_0 = self.name + '0'
if nuclide_0 in library_nuclides:
abundances[nuclide_0] = 1.0
else:
msg = 'Unable to expand element {0} because the cross '\
'section library provided does not contain any of '\
'the natural isotopes for that element.'\
.format(self.name)
raise ValueError(msg)
# If some, but not all, natural nuclides are in the library, add
# the mutual nuclides. For the absent nuclides, add them based on
# our knowledge of the common cross section libraries
# (ENDF, JEFF, and JENDL)
else:
# Add the mutual isotopes
for nuclide in mutual_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# Adjust the abundances for the absent nuclides
for nuclide in absent_nuclides:
if nuclide in ['O17', 'O18'] and 'O16' in mutual_nuclides:
abundances['O16'] += NATURAL_ABUNDANCE[nuclide]
elif nuclide == 'Ta180' and 'Ta181' in mutual_nuclides:
abundances['Ta181'] += NATURAL_ABUNDANCE[nuclide]
elif nuclide == 'W180' and 'W182' in mutual_nuclides:
abundances['W182'] += NATURAL_ABUNDANCE[nuclide]
else:
msg = 'Unsure how to partition natural abundance of ' \
'isotope {0} into other natural isotopes of ' \
'this element that are present in the cross ' \
'section library provided. Consider adding ' \
'the isotopes of this element individually.'
raise ValueError(msg)
# If a cross_section library is not present, expand the element into
# its natural nuclides
else:
for nuclide in natural_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# Modify mole fractions if enrichment provided
if enrichment is not None:
# Calculate the mass fractions of isotopes
abundances['U234'] = 0.0089 * enrichment
abundances['U235'] = enrichment
abundances['U236'] = 0.0046 * enrichment
abundances['U238'] = 100.0 - 1.0135 * enrichment
# Convert the mass fractions to mole fractions
for nuclide in abundances.keys():
abundances[nuclide] /= atomic_mass(nuclide)
# Normalize the mole fractions to one
sum_abundances = sum(abundances.values())
for nuclide in abundances.keys():
abundances[nuclide] /= sum_abundances
# Compute the ratio of the nuclide atomic masses to the element
# atomic mass
if percent_type == 'wo':
# Compute the element atomic mass
element_am = 0.
for nuclide in abundances.keys():
element_am += atomic_mass(nuclide) * abundances[nuclide]
# Convert the molar fractions to mass fractions
for nuclide in abundances.keys():
abundances[nuclide] *= atomic_mass(nuclide) / element_am
# Normalize the mass fractions to one
sum_abundances = sum(abundances.values())
for nuclide in abundances.keys():
abundances[nuclide] /= sum_abundances
# Create a list of the isotopes in this element
isotopes = []
for nuclide, abundance in abundances.items():
nuc = openmc.Nuclide(nuclide)
nuc.scattering = self.scattering
isotopes.append((nuc, percent * abundance, percent_type))
return isotopes
| StarcoderdataPython |
6664834 | # -*- coding: utf-8 -
#
# This file is part of pistil released under the MIT license.
# See the NOTICE for more information.
import errno
import logging
import os
import select
import socket
from pistil import util
from pistil.worker import Worker
log = logging.getLogger(__name__)
class TcpSyncWorker(Worker):
def on_init_process(self):
self.socket = self.conf.get('sock')
self.address = self.socket.getsockname()
util.close_on_exec(self.socket)
def run(self):
self.socket.setblocking(0)
while self.alive:
self.notify()
# Accept a connection. If we get an error telling us
# that no connection is waiting we fall down to the
# select which is where we'll wait for a bit for new
# workers to come give us some love.
try:
client, addr = self.socket.accept()
client.setblocking(1)
util.close_on_exec(client)
self.handle(client, addr)
# Keep processing clients until no one is waiting. This
# prevents the need to select() for every client that we
# process.
continue
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
# If our parent changed then we shut down.
if self.ppid != os.getppid():
log.info("Parent changed, shutting down: %s", self)
return
try:
self.notify()
ret = select.select([self.socket], [], self._PIPE,
self.timeout / 2.0)
if ret[0]:
continue
except select.error, e:
if e[0] == errno.EINTR:
continue
if e[0] == errno.EBADF:
if self.nr < 0:
continue
else:
return
raise
def handle(self, client, addr):
raise NotImplementedError
| StarcoderdataPython |
3367474 | from sharpy.utils.sharpydir import SharpyDir
import sharpy.utils.ctypes_utils as ct_utils
import ctypes as ct
import numpy as np
import platform
import os
from sharpy.utils.constants import NDIM, vortex_radius_def
UvlmLib = ct_utils.import_ctypes_lib(SharpyDir + '/lib/UVLM/lib/', 'libuvlm')
class VMopts(ct.Structure):
"""ctypes definition for VMopts class
struct VMopts {
bool ImageMethod;
unsigned int Mstar;
bool Steady;
bool horseshoe;
bool KJMeth;
bool NewAIC;
double DelTime;
bool Rollup;
unsigned int NumCores;
unsigned int NumSurfaces;
bool cfl1;
double vortex_radius;
double vortex_radius_wake_ind;
};
"""
_fields_ = [("ImageMethod", ct.c_bool),
("Steady", ct.c_bool),
("horseshoe", ct.c_bool),
("KJMeth", ct.c_bool),
("NewAIC", ct.c_bool),
("DelTime", ct.c_double),
("Rollup", ct.c_bool),
("NumCores", ct.c_uint),
("NumSurfaces", ct.c_uint),
("dt", ct.c_double),
("n_rollup", ct.c_uint),
("rollup_tolerance", ct.c_double),
("rollup_aic_refresh", ct.c_uint),
("iterative_solver", ct.c_bool),
("iterative_tol", ct.c_double),
("iterative_precond", ct.c_bool),
("cfl1", ct.c_bool),
("vortex_radius", ct.c_double),
("vortex_radius_wake_ind", ct.c_double)]
def __init__(self):
ct.Structure.__init__(self)
self.ImageMethod = ct.c_bool(False)
self.Steady = ct.c_bool(True)
self.horseshoe = ct.c_bool(True)
self.KJMeth = ct.c_bool(False) # legacy var
self.NewAIC = ct.c_bool(False) # legacy var
self.DelTime = ct.c_double(1.0)
self.Rollup = ct.c_bool(False)
self.NumCores = ct.c_uint(4)
self.NumSurfaces = ct.c_uint(1)
self.dt = ct.c_double(0.01)
self.n_rollup = ct.c_uint(0)
self.rollup_tolerance = ct.c_double(1e-5)
self.rollup_aic_refresh = ct.c_uint(1)
self.iterative_solver = ct.c_bool(False)
self.iterative_tol = ct.c_double(0)
self.iterative_precond = ct.c_bool(False)
self.cfl1 = ct.c_bool(True)
self.vortex_radius = ct.c_double(vortex_radius_def)
self.vortex_radius_wake_ind = ct.c_double(vortex_radius_def)
self.rbm_vel_g = np.ctypeslib.as_ctypes(np.zeros((6)))
class UVMopts(ct.Structure):
_fields_ = [("dt", ct.c_double),
("NumCores", ct.c_uint),
("NumSurfaces", ct.c_uint),
# ("steady_n_rollup", ct.c_uint),
# ("steady_rollup_tolerance", ct.c_double),
# ("steady_rollup_aic_refresh", ct.c_uint),
("convection_scheme", ct.c_uint),
# ("Mstar", ct.c_uint),
("ImageMethod", ct.c_bool),
("iterative_solver", ct.c_bool),
("iterative_tol", ct.c_double),
("iterative_precond", ct.c_bool),
("convect_wake", ct.c_bool),
("cfl1", ct.c_bool),
("vortex_radius", ct.c_double),
("vortex_radius_wake_ind", ct.c_double),
("interp_coords", ct.c_uint),
("filter_method", ct.c_uint),
("interp_method", ct.c_uint),
("yaw_slerp", ct.c_double),
("quasi_steady", ct.c_bool),]
def __init__(self):
ct.Structure.__init__(self)
self.dt = ct.c_double(0.01)
self.NumCores = ct.c_uint(4)
self.NumSurfaces = ct.c_uint(1)
self.convection_scheme = ct.c_uint(2)
# self.Mstar = ct.c_uint(10)
self.ImageMethod = ct.c_bool(False)
self.iterative_solver = ct.c_bool(False)
self.iterative_tol = ct.c_double(0)
self.iterative_precond = ct.c_bool(False)
self.convect_wake = ct.c_bool(True)
self.cfl1 = ct.c_bool(True)
self.vortex_radius = ct.c_double(vortex_radius_def)
self.vortex_radius_wake_ind = ct.c_double(vortex_radius_def)
self.yaw_slerp = ct.c_double(0.)
self.quasi_steady = ct.c_bool(False)
class FlightConditions(ct.Structure):
_fields_ = [("uinf", ct.c_double),
("uinf_direction", ct.c_double*3),
("rho", ct.c_double),
("c_ref", ct.c_double)]
def __init__(self):
ct.Structure.__init__(self)
# def __init__(self, fc_dict):
# ct.Structure.__init__(self)
# self.uinf = fc_dict['FlightCon']['u_inf']
# alpha = fc_dict['FlightCon']['alpha']
# beta = fc_dict['FlightCon']['beta']
# uinf_direction_temp = np.array([1, 0, 0], dtype=ct.c_double)
# self.uinf_direction = np.ctypeslib.as_ctypes(uinf_direction_temp)
# self.rho = fc_dict['FlightCon']['rho_inf']
# self.c_ref = fc_dict['FlightCon']['c_ref']
# type for 2d integer matrix
t_2int = ct.POINTER(ct.c_int)*2
def vlm_solver(ts_info, options):
run_VLM = UvlmLib.run_VLM
run_VLM.restype = None
vmopts = VMopts()
vmopts.Steady = ct.c_bool(True)
vmopts.NumSurfaces = ct.c_uint(ts_info.n_surf)
vmopts.horseshoe = ct.c_bool(options['horseshoe'])
vmopts.dt = ct.c_double(options["rollup_dt"])
vmopts.n_rollup = ct.c_uint(options["n_rollup"])
vmopts.rollup_tolerance = ct.c_double(options["rollup_tolerance"])
vmopts.rollup_aic_refresh = ct.c_uint(options['rollup_aic_refresh'])
vmopts.NumCores = ct.c_uint(options['num_cores'])
vmopts.iterative_solver = ct.c_bool(options['iterative_solver'])
vmopts.iterative_tol = ct.c_double(options['iterative_tol'])
vmopts.iterative_precond = ct.c_bool(options['iterative_precond'])
vmopts.cfl1 = ct.c_bool(options['cfl1'])
vmopts.vortex_radius = ct.c_double(options['vortex_radius'])
vmopts.vortex_radius_wake_ind = ct.c_double(options['vortex_radius_wake_ind'])
flightconditions = FlightConditions()
flightconditions.rho = options['rho']
flightconditions.uinf = np.ctypeslib.as_ctypes(np.linalg.norm(ts_info.u_ext[0][:, 0, 0]))
flightconditions.uinf_direction = np.ctypeslib.as_ctypes(ts_info.u_ext[0][:, 0, 0]/flightconditions.uinf)
p_rbm_vel_g = options['rbm_vel_g'].ctypes.data_as(ct.POINTER(ct.c_double))
p_centre_rot_g = options['centre_rot_g'].ctypes.data_as(ct.POINTER(ct.c_double))
ts_info.generate_ctypes_pointers()
run_VLM(ct.byref(vmopts),
ct.byref(flightconditions),
ts_info.ct_p_dimensions,
ts_info.ct_p_dimensions_star,
ts_info.ct_p_zeta,
ts_info.ct_p_zeta_star,
ts_info.ct_p_zeta_dot,
ts_info.ct_p_u_ext,
ts_info.ct_p_gamma,
ts_info.ct_p_gamma_star,
ts_info.ct_p_forces,
p_rbm_vel_g,
p_centre_rot_g)
ts_info.remove_ctypes_pointers()
def uvlm_init(ts_info, options):
init_UVLM = UvlmLib.init_UVLM
init_UVLM.restype = None
vmopts = VMopts()
vmopts.Steady = ct.c_bool(True)
# vmopts.Mstar = ct.c_uint(options['mstar'])
vmopts.NumSurfaces = ct.c_uint(ts_info.n_surf)
vmopts.horseshoe = ct.c_bool(False)
vmopts.dt = options["dt"]
try:
vmopts.n_rollup = ct.c_uint(options["steady_n_rollup"])
vmopts.rollup_tolerance = ct.c_double(options["steady_rollup_tolerance"])
vmopts.rollup_aic_refresh = ct.c_uint(options['steady_rollup_aic_refresh'])
except KeyError:
pass
vmopts.NumCores = ct.c_uint(options['num_cores'])
vmopts.vortex_radius = ct.c_double(options['vortex_radius'])
vmopts.vortex_radius_wake_ind = ct.c_double(options['vortex_radius_wake_ind'])
vmopts.quasi_steady = ct.c_bool(options['quasi_steady'])
flightconditions = FlightConditions()
flightconditions.rho = options['rho']
flightconditions.uinf = np.ctypeslib.as_ctypes(np.linalg.norm(ts_info.u_ext[0][:, 0, 0]))
flightconditions.uinf_direction = np.ctypeslib.as_ctypes(ts_info.u_ext[0][:, 0, 0]/flightconditions.uinf)
# rbm_vel[0:3] = np.dot(inertial2aero.transpose(), rbm_vel[0:3])
# rbm_vel[3:6] = np.dot(inertial2aero.transpose(), rbm_vel[3:6])
p_rbm_vel = np.zeros((6,)).ctypes.data_as(ct.POINTER(ct.c_double))
ts_info.generate_ctypes_pointers()
init_UVLM(ct.byref(vmopts),
ct.byref(flightconditions),
ts_info.ct_p_dimensions,
ts_info.ct_p_dimensions_star,
ts_info.ct_p_u_ext,
ts_info.ct_p_zeta,
ts_info.ct_p_zeta_star,
ts_info.ct_p_zeta_dot,
ts_info.ct_p_zeta_star_dot,
p_rbm_vel,
ts_info.ct_p_gamma,
ts_info.ct_p_gamma_star,
ts_info.ct_p_normals,
ts_info.ct_p_forces)
ts_info.remove_ctypes_pointers()
def uvlm_solver(i_iter, ts_info, struct_ts_info, options, convect_wake=True, dt=None):
run_UVLM = UvlmLib.run_UVLM
run_UVLM.restype = None
uvmopts = UVMopts()
if dt is None:
uvmopts.dt = ct.c_double(options["dt"])
else:
uvmopts.dt = ct.c_double(dt)
uvmopts.NumCores = ct.c_uint(options["num_cores"])
uvmopts.NumSurfaces = ct.c_uint(ts_info.n_surf)
uvmopts.ImageMethod = ct.c_bool(False)
uvmopts.convection_scheme = ct.c_uint(options["convection_scheme"])
uvmopts.iterative_solver = ct.c_bool(options['iterative_solver'])
uvmopts.iterative_tol = ct.c_double(options['iterative_tol'])
uvmopts.iterative_precond = ct.c_bool(options['iterative_precond'])
uvmopts.convect_wake = ct.c_bool(convect_wake)
uvmopts.cfl1 = ct.c_bool(options['cfl1'])
uvmopts.vortex_radius = ct.c_double(options['vortex_radius'])
uvmopts.vortex_radius_wake_ind = ct.c_double(options['vortex_radius_wake_ind'])
uvmopts.interp_coords = ct.c_uint(options["interp_coords"])
uvmopts.filter_method = ct.c_uint(options["filter_method"])
uvmopts.interp_method = ct.c_uint(options["interp_method"])
uvmopts.yaw_slerp = ct.c_double(options["yaw_slerp"])
uvmopts.quasi_steady = ct.c_bool(options['quasi_steady'])
flightconditions = FlightConditions()
flightconditions.rho = options['rho']
flightconditions.uinf = np.ctypeslib.as_ctypes(np.linalg.norm(ts_info.u_ext[0][:, 0, 0]))
# direction = np.array([1.0, 0, 0])
flightconditions.uinf_direction = np.ctypeslib.as_ctypes(ts_info.u_ext[0][:, 0, 0]/flightconditions.uinf)
# flightconditions.uinf_direction = np.ctypeslib.as_ctypes(direction)
rbm_vel = struct_ts_info.for_vel.copy()
rbm_vel[0:3] = np.dot(struct_ts_info.cga(), rbm_vel[0:3])
rbm_vel[3:6] = np.dot(struct_ts_info.cga(), rbm_vel[3:6])
p_rbm_vel = rbm_vel.ctypes.data_as(ct.POINTER(ct.c_double))
p_centre_rot = options['centre_rot'].ctypes.data_as(ct.POINTER(ct.c_double))
i = ct.c_uint(i_iter)
ts_info.generate_ctypes_pointers()
# previous_ts_info.generate_ctypes_pointers()
run_UVLM(ct.byref(uvmopts),
ct.byref(flightconditions),
ts_info.ct_p_dimensions,
ts_info.ct_p_dimensions_star,
ct.byref(i),
ts_info.ct_p_u_ext,
ts_info.ct_p_u_ext_star,
ts_info.ct_p_zeta,
ts_info.ct_p_zeta_star,
ts_info.ct_p_zeta_dot,
p_rbm_vel,
p_centre_rot,
ts_info.ct_p_gamma,
ts_info.ct_p_gamma_star,
ts_info.ct_p_dist_to_orig,
# previous_ts_info.ct_p_gamma,
ts_info.ct_p_normals,
ts_info.ct_p_forces,
ts_info.ct_p_dynamic_forces)
ts_info.remove_ctypes_pointers()
# previous_ts_info.remove_ctypes_pointers()
def uvlm_calculate_unsteady_forces(ts_info,
struct_ts_info,
options,
convect_wake=True,
dt=None):
calculate_unsteady_forces = UvlmLib.calculate_unsteady_forces
calculate_unsteady_forces.restype = None
uvmopts = UVMopts()
if dt is None:
uvmopts.dt = ct.c_double(options["dt"])
else:
uvmopts.dt = ct.c_double(dt)
uvmopts.NumCores = ct.c_uint(options["num_cores"])
uvmopts.NumSurfaces = ct.c_uint(ts_info.n_surf)
uvmopts.ImageMethod = ct.c_bool(False)
uvmopts.convection_scheme = ct.c_uint(options["convection_scheme"])
uvmopts.iterative_solver = ct.c_bool(options['iterative_solver'])
uvmopts.iterative_tol = ct.c_double(options['iterative_tol'])
uvmopts.iterative_precond = ct.c_bool(options['iterative_precond'])
uvmopts.convect_wake = ct.c_bool(convect_wake)
uvmopts.vortex_radius = ct.c_double(options['vortex_radius'])
flightconditions = FlightConditions()
flightconditions.rho = options['rho']
flightconditions.uinf = np.ctypeslib.as_ctypes(np.linalg.norm(ts_info.u_ext[0][:, 0, 0]))
flightconditions.uinf_direction = np.ctypeslib.as_ctypes(ts_info.u_ext[0][:, 0, 0]/flightconditions.uinf)
rbm_vel = struct_ts_info.for_vel.copy()
rbm_vel[0:3] = np.dot(struct_ts_info.cga(), rbm_vel[0:3])
rbm_vel[3:6] = np.dot(struct_ts_info.cga(), rbm_vel[3:6])
p_rbm_vel = rbm_vel.ctypes.data_as(ct.POINTER(ct.c_double))
for i_surf in range(ts_info.n_surf):
ts_info.dynamic_forces[i_surf].fill(0.0)
ts_info.generate_ctypes_pointers()
calculate_unsteady_forces(ct.byref(uvmopts),
ct.byref(flightconditions),
ts_info.ct_p_dimensions,
ts_info.ct_p_dimensions_star,
ts_info.ct_p_zeta,
ts_info.ct_p_zeta_star,
p_rbm_vel,
ts_info.ct_p_gamma,
ts_info.ct_p_gamma_star,
ts_info.ct_p_gamma_dot,
ts_info.ct_p_normals,
ts_info.ct_p_dynamic_forces)
ts_info.remove_ctypes_pointers()
def uvlm_calculate_incidence_angle(ts_info,
struct_ts_info):
calculate_incidence_angle = UvlmLib.UVLM_check_incidence_angle
calculate_incidence_angle.restype = None
rbm_vel = struct_ts_info.for_vel.copy()
rbm_vel[0:3] = np.dot(struct_ts_info.cga(), rbm_vel[0:3])
rbm_vel[3:6] = np.dot(struct_ts_info.cga(), rbm_vel[3:6])
p_rbm_vel = rbm_vel.ctypes.data_as(ct.POINTER(ct.c_double))
n_surf = ct.c_uint(ts_info.n_surf)
ts_info.generate_ctypes_pointers()
calculate_incidence_angle(ct.byref(n_surf),
ts_info.ct_p_dimensions,
ts_info.ct_p_u_ext,
ts_info.ct_p_zeta,
ts_info.ct_p_zeta_dot,
ts_info.ct_p_normals,
p_rbm_vel,
ts_info.postproc_cell['incidence_angle_ct_pointer'])
ts_info.remove_ctypes_pointers()
def uvlm_calculate_total_induced_velocity_at_points(ts_info,
target_triads,
vortex_radius,
for_pos=np.zeros((6)),
ncores=ct.c_uint(1)):
"""
uvlm_calculate_total_induced_velocity_at_points
Caller to the UVLM library to compute the induced velocity of all the
surfaces and wakes at a list of points
Args:
ts_info (AeroTimeStepInfo): Time step information
target_triads (np.array): Point coordinates, size=(npoints, 3)
vortex_radius (float): Vortex radius threshold below which do not compute induced velocity
uind (np.array): Induced velocity
Returns:
uind (np.array): Induced velocity, size=(npoints, 3)
"""
calculate_uind_at_points = UvlmLib.total_induced_velocity_at_points
calculate_uind_at_points.restype = None
uvmopts = UVMopts()
uvmopts.NumSurfaces = ct.c_uint(ts_info.n_surf)
uvmopts.ImageMethod = ct.c_bool(False)
uvmopts.NumCores = ct.c_uint(ncores)
uvmopts.vortex_radius = ct.c_double(vortex_radius)
npoints = target_triads.shape[0]
uind = np.zeros((npoints, 3), dtype=ct.c_double)
if type(target_triads[0,0]) == ct.c_double:
aux_target_triads = target_triads
else:
aux_target_triads = target_triads.astype(dtype=ct.c_double)
p_target_triads = ((ct.POINTER(ct.c_double))(* [np.ctypeslib.as_ctypes(aux_target_triads.reshape(-1))]))
p_uind = ((ct.POINTER(ct.c_double))(* [np.ctypeslib.as_ctypes(uind.reshape(-1))]))
# make a copy of ts info and add for_pos to zeta and zeta_star
ts_info_copy = ts_info.copy()
for i_surf in range(ts_info_copy.n_surf):
# zeta
for iM in range(ts_info_copy.zeta[i_surf].shape[1]):
for iN in range(ts_info_copy.zeta[i_surf].shape[2]):
ts_info_copy.zeta[i_surf][:, iM, iN] += for_pos[0:3]
# zeta_star
for iM in range(ts_info_copy.zeta_star[i_surf].shape[1]):
for iN in range(ts_info_copy.zeta_star[i_surf].shape[2]):
ts_info_copy.zeta_star[i_surf][:, iM, iN] += for_pos[0:3]
ts_info_copy.generate_ctypes_pointers()
calculate_uind_at_points(ct.byref(uvmopts),
ts_info_copy.ct_p_dimensions,
ts_info_copy.ct_p_dimensions_star,
ts_info_copy.ct_p_zeta,
ts_info_copy.ct_p_zeta_star,
ts_info_copy.ct_p_gamma,
ts_info_copy.ct_p_gamma_star,
p_target_triads,
p_uind,
ct.c_uint(npoints))
ts_info_copy.remove_ctypes_pointers()
del p_uind
del p_target_triads
return uind
def biot_panel_cpp(zeta_point, zeta_panel, vortex_radius, gamma=1.0):
"""
Linear UVLM function
Returns the induced velocity at a point ``zeta_point`` due to a panel located at ``zeta_panel`` with circulation
``gamma``.
Args:
zeta_point (np.ndarray): Coordinates of the point with size ``(3,)``.
zeta_panel (np.ndarray): Panel coordinates with size ``(4, 3)``.
gamma (float): Panel circulation.
Returns:
np.ndarray: Induced velocity at point
"""
assert zeta_point.flags['C_CONTIGUOUS'] and zeta_panel.flags['C_CONTIGUOUS'], \
'Input not C contiguous'
if type(vortex_radius) is ct.c_double:
vortex_radius_float = vortex_radius.value
else:
vortex_radius_float = vortex_radius
velP = np.zeros((3,), order='C')
UvlmLib.call_biot_panel(
velP.ctypes.data_as(ct.POINTER(ct.c_double)),
zeta_point.ctypes.data_as(ct.POINTER(ct.c_double)),
zeta_panel.ctypes.data_as(ct.POINTER(ct.c_double)),
ct.byref(ct.c_double(gamma)),
ct.byref(ct.c_double(vortex_radius_float)))
return velP
def eval_panel_cpp(zeta_point, zeta_panel,
vortex_radius, gamma_pan=1.0):
"""
Linear UVLM function
Returns
tuple: The derivative of the induced velocity with respect to point ``P`` and panel vertices ``ZetaP``.
Warnings:
Function may fail if zeta_point is not stored contiguously.
Eg:
The following will fail
zeta_point=Mat[:,2,5]
eval_panel_cpp(zeta_point,zeta_panel, vortex_radius, gamma_pan=1.0)
but
zeta_point=Mat[:,2,5].copy()
eval_panel_cpp(zeta_point,zeta_panel, vortex_radius, gamma_pan=1.0)
will not.
"""
assert zeta_point.flags['C_CONTIGUOUS'] and zeta_panel.flags['C_CONTIGUOUS'], \
'Input not C contiguous'
der_point = np.zeros((3, 3), order='C')
der_vertices = np.zeros((4, 3, 3), order='C')
if type(vortex_radius) is ct.c_double:
vortex_radius_float = vortex_radius.value
else:
vortex_radius_float = vortex_radius
UvlmLib.call_der_biot_panel(
der_point.ctypes.data_as(ct.POINTER(ct.c_double)),
der_vertices.ctypes.data_as(ct.POINTER(ct.c_double)),
zeta_point.ctypes.data_as(ct.POINTER(ct.c_double)),
zeta_panel.ctypes.data_as(ct.POINTER(ct.c_double)),
ct.byref(ct.c_double(gamma_pan)),
ct.byref(ct.c_double(vortex_radius_float)))
return der_point, der_vertices
def get_induced_velocity_cpp(maps, zeta, gamma, zeta_target,
vortex_radius):
"""
Linear UVLM function used in bound surfaces
Computes induced velocity at a point zeta_target.
Args:
maps (sharpy.linear.src.surface.AeroGridSurface): instance of bound surface
zeta (np.ndarray): Coordinates of panel
gamma (float): Panel circulation strength
zeta_target (np.ndarray): Coordinates of target point
Returns:
np.ndarray: Induced velocity by panel at target point
"""
call_ind_vel = UvlmLib.call_ind_vel
call_ind_vel.restype = None
assert zeta_target.flags['C_CONTIGUOUS'], "Input not C contiguous"
M, N = maps.M, maps.N
uind_target = np.zeros((3,), order='C')
if type(vortex_radius) is ct.c_double:
vortex_radius_float = vortex_radius.value
else:
vortex_radius_float = vortex_radius
call_ind_vel(
uind_target.ctypes.data_as(ct.POINTER(ct.c_double)),
zeta_target.ctypes.data_as(ct.POINTER(ct.c_double)),
zeta.ctypes.data_as(ct.POINTER(ct.c_double)),
gamma.ctypes.data_as(ct.POINTER(ct.c_double)),
ct.byref(ct.c_int(M)),
ct.byref(ct.c_int(N)),
ct.byref(ct.c_double(vortex_radius_float)))
return uind_target
def get_aic3_cpp(maps, zeta, zeta_target, vortex_radius):
"""
Linear UVLM function used in bound surfaces
Produces influence coefficient matrix to calculate the induced velocity
at a target point. The aic3 matrix has shape (3,K)
Args:
maps (sharpy.linear.src.surface.AeroGridSurface): instance of linear bound surface
zeta (np.ndarray): Coordinates of panel
zeta_target (np.ndarray): Coordinates of target point
Returns:
np.ndarray: Aerodynamic influence coefficient
"""
assert zeta_target.flags['C_CONTIGUOUS'], "Input not C contiguous"
K = maps.K
aic3 = np.zeros((3, K), order='C')
if type(vortex_radius) is ct.c_double:
vortex_radius_float = vortex_radius.value
else:
vortex_radius_float = vortex_radius
UvlmLib.call_aic3(
aic3.ctypes.data_as(ct.POINTER(ct.c_double)),
zeta_target.ctypes.data_as(ct.POINTER(ct.c_double)),
zeta.ctypes.data_as(ct.POINTER(ct.c_double)),
ct.byref(ct.c_int(maps.M)),
ct.byref(ct.c_int(maps.N)),
ct.byref(ct.c_double(vortex_radius_float)))
return aic3
def dvinddzeta_cpp(zetac, surf_in, is_bound,
vortex_radius, M_in_bound=None):
"""
Linear UVLM function used in the assembly of the linear system
Produces derivatives of induced velocity by surf_in w.r.t. the zetac point.
Derivatives are divided into those associated to the movement of zetac, and
to the movement of the surf_in vertices (DerVert).
If surf_in is bound (is_bound==True), the circulation over the TE due to the
wake is not included in the input.
If surf_in is a wake (is_bound==False), derivatives w.r.t. collocation
points are computed ad the TE contribution on ``der_vert``. In this case, the
chordwise paneling Min_bound of the associated input is required so as to
calculate Kzeta and correctly allocate the derivative matrix.
Returns:
tuple: output derivatives are:
- der_coll: 3 x 3 matrix
- der_vert: 3 x 3*Kzeta (if surf_in is a wake, Kzeta is that of the bound)
Warning:
zetac must be contiguously stored!
"""
M_in, N_in = surf_in.maps.M, surf_in.maps.N
Kzeta_in = surf_in.maps.Kzeta
shape_zeta_in = (3, M_in + 1, N_in + 1)
# allocate matrices
der_coll = np.zeros((3, 3), order='C')
if is_bound:
M_in_bound = M_in
Kzeta_in_bound = (M_in_bound + 1) * (N_in + 1)
der_vert = np.zeros((3, 3 * Kzeta_in_bound))
if type(vortex_radius) is ct.c_double:
vortex_radius_float = vortex_radius.value
else:
vortex_radius_float = vortex_radius
UvlmLib.call_dvinddzeta(
der_coll.ctypes.data_as(ct.POINTER(ct.c_double)),
der_vert.ctypes.data_as(ct.POINTER(ct.c_double)),
zetac.ctypes.data_as(ct.POINTER(ct.c_double)),
surf_in.zeta.ctypes.data_as(ct.POINTER(ct.c_double)),
surf_in.gamma.ctypes.data_as(ct.POINTER(ct.c_double)),
ct.byref(ct.c_int(M_in)),
ct.byref(ct.c_int(N_in)),
ct.byref(ct.c_bool(is_bound)),
ct.byref(ct.c_int(M_in_bound)),
ct.byref(ct.c_double(vortex_radius_float))
)
return der_coll, der_vert
| StarcoderdataPython |
3374164 | <filename>26.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: 26
Description : ^_^ !!!
Author : anglemiku
Eamil : <EMAIL>
date: 2019/9/19
-------------------------------------------------
Change Activity: 2019/9/19:
-------------------------------------------------
"""
# 1-3+5-7+...-(num-2) + num
def one_match(num):
sub = False
result = 0
start = 1
while start <= num:
if sub:
result -= start
else:
result += start
start += 2
sub = (sub == False)
return result
# 1000-2000
def two_match(first, last):
row = 0
result = ''
while first <= last:
if first % 400 == 0 or first % 4 == 0:
if row % 3 == 0:
row = 0
result += '\n'
row += 1
result = result + ' ' + str(first)
first += 1
return result
#
def mult_num(num):
one = 1
result = 1
while one <= num:
result *= one
one += 1
return result
def three_match(num):
one = 1
result = 0
while one <= num:
result = result + mult_num(one)
one += 2
return result
def four_match(first, last):
result = []
for i in range(first, last):
j = 2
for j in range(2, i):
if i % j == 0:
break
else:
result.append(i)
return result
import time
import dateutil
import datetime
if __name__ == '__main__':
# print one_match(101)
# print two_match(1000, 2000)
# print three_match(9)
# print four_match(200, 300)
pass
| StarcoderdataPython |
236194 | class Solution:
def sumNumbers(self, root ):
self.result = 0
self.preOrder(root,0)
return self.result
def preOrder(self,root,tmp):
if not root:
return
if not root.left and not root.right:
# sum here
self.result += tmp*10 + root.val
# print(self.result)
return
self.preOrder(root.left,tmp*10+root.val)
self.preOrder(root.right,tmp*10+root.val)
| StarcoderdataPython |
88900 | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""
Copyright 2015 Nb<<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The parser.
"""
__author__ = 'Kevin'
import unittest
from la_json import parse, serialise
class JSONUnitTest(unittest.TestCase):
def test_parse_json(self):
self.maxDiff = None
with open('test_1.json') as json_1:
test_1_json = parse(json_1.read())
self.assertEqual(test_1_json, {
"Float": 1.5,
"NFloat": -1.55,
"Int": 16,
"NInt": -16,
"SaveLocation": "C:/Users/Kevin/Desktop/R",
"Threads": 16,
"EnterURL": [],
"URL": [],
"Py": {
"Pypy": 3.5,
"CPython": [
2.7,
3.4,
{
"PYTHON": "I LOVE IT",
"true": True,
"false": False,
"null": None,
"set": [
True,
False,
None
]
}
]
},
"URL2": [
"http://bvb-fanabteilung.de/fotos/saison-20142015/33-spieltag-vfl-wolfsburg-borussia-dortmund/",
"http://bvb-fanabteilung.de/fotos/saison-20142015/34-spieltag-borussia-dortmund-sv-werder-bremen/"
]
})
def test_serialise_json(self):
with open('test_2.json') as f:
test_2_json = parse(f.read())
self.assertEqual(test_2_json, {
"AAA": "SS",
"P\"P": [12, 26, 78, {
"BB": "CC",
"DD": -12.5
}, "X\"D"],
"ESCAPE": "\""
})
self.assertEqual(test_2_json, parse(serialise(test_2_json)))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6567041 | <reponame>martinbaste/filterno-server
from flask import Flask, render_template, request
from flask_cors import CORS
from utils.query_GDELT import submit_query, get_key_words
import nltk
app = Flask(__name__)
CORS(app)
nltk.data.path.append('./nltk_dependencies')
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/analyze')
def analyze():
url = request.args.get('url')
keywords = get_key_words(url)
if keywords == None:
return 'Cannot recognize article'
res = submit_query(keywords[0], 'tonechart')
return res
@app.route('/analyzekw')
def analyzekw():
keywords = request.args.get('kw')
res = submit_query(keywords.split(' '), 'tonechart')
return res | StarcoderdataPython |
309759 | class AsyncSerialPy3Mixin:
async def read_exactly(self, n):
data = bytearray()
while len(data) < n:
remaining = n - len(data)
data += await self.read(remaining)
return data
async def write_exactly(self, data):
while data:
res = await self.write(data)
data = data[res:]
| StarcoderdataPython |
8018683 | <gh_stars>1-10
import numpy as np
from nsga2.examples.deep_learning.genotype import NeuralNetwork
from nsga2.nsga2.initializer import Initializer
class NNInitializer(Initializer):
def __init__(self, n_individuals,
data, target, is_classification, solver="lbfgs",
n_layer_range=(1, 10), n_nodes_range=(2, 100), activations=None):
super().__init__(n_individuals)
self.data = data
self.target = target
self.is_classification = is_classification
self.solver = solver
self.n_layer_range = n_layer_range
self.n_nodes_range = n_nodes_range
if activations is None:
activations = ["identity", "logistic", "tanh", "relu"]
self.activations = activations
def _initialize_genotype(self):
n_layers = np.random.randint(*self.n_layer_range)
n_nodes = np.random.randint(*self.n_nodes_range)
activation = np.random.choice(self.activations)
return NeuralNetwork(n_layers=n_layers, n_nodes=n_nodes,
activation=activation, data=self.data,
target=self.target,
is_classification=self.is_classification,
solver=self.solver)
| StarcoderdataPython |
3437809 | <reponame>lpatmo/actionify_the_news<gh_stars>10-100
"""Tests for mailer app"""
| StarcoderdataPython |
5081736 | #
# Lockstep Software Development Kit for Python
#
# (c) 2021-2022 Lockstep, Inc.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
# @author <NAME> <<EMAIL>>
# @copyright 2021-2022 Lockstep, Inc.
# @version 2022.2
# @link https://github.com/Lockstep-Network/lockstep-sdk-python
#
from dataclasses import dataclass
"""
Information about the migration action for a particular group
"""
@dataclass
class MigrationResultModel:
messages: list[str] = None
groupKey: str = None
invoiceCount: int = None
addressCount: int = None
invoiceFieldCount: int = None
lineCount: int = None
contactCount: int = None
companyCount: int = None
paymentCount: int = None
paymentFieldCount: int = None
paymentAppliedCount: int = None
| StarcoderdataPython |
322083 | """A bulk structure of amorphous silica."""
import numpy as np
import mbuild as mb
class AmorphousSilicaBulk(mb.Compound):
"""An amorphous silica box.
density 2.2g/cm^3
"""
def __init__(self):
super(AmorphousSilicaBulk, self).__init__()
mb.load(
"amorphous_silica_bulk.pdb",
compound=self,
relative_to_module=self.__module__,
)
self.periodicity = np.array([5, 5, 5])
if __name__ == "__main__":
bulk = AmorphousSilicaBulk()
bulk.save("bulk.mol2")
| StarcoderdataPython |
1733739 | <gh_stars>1-10
'''This is a reproduction of the IRNN experiment
with pixel-by-pixel sequential MNIST in
"A Simple Way to Initialize Recurrent Networks of Rectified Linear Units"
by <NAME>, <NAME>, <NAME>
arXiv:1504.00941v2 [cs.NE] 7 Apr 2015
http://arxiv.org/pdf/1504.00941v2.pdf
Optimizer is replaced with RMSprop which yields more stable and steady
improvement.
Reaches 0.93 train/test accuracy after 900 epochs
(which roughly corresponds to 1687500 steps in the original paper.)
'''
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import SimpleRNN
from keras.initializations import normal, identity
from keras.optimizers import RMSprop
from keras.utils import np_utils
batch_size = 32
nb_classes = 10
nb_epochs = 200
hidden_units = 100
learning_rate = 1e-6
clip_norm = 1.0
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], -1, 1)
X_test = X_test.reshape(X_test.shape[0], -1, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Evaluate IRNN...')
model = Sequential()
model.add(SimpleRNN(output_dim=hidden_units,
init=lambda shape, name: normal(shape, scale=0.001, name=name),
inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),
activation='relu',
input_shape=X_train.shape[1:]))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
optimizer=rmsprop,
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
verbose=1, validation_data=(X_test, Y_test))
scores = model.evaluate(X_test, Y_test, verbose=0)
print('IRNN test score:', scores[0])
print('IRNN test accuracy:', scores[1])
| StarcoderdataPython |
48222 | import pandas as pd
import nltk
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import os
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet_ic')
nltk.download('genesis')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
from src.Preprocess import Utils
from src.Preprocess import Lexical_Features
from src.Preprocess import WordNet_Features
from src.Normalization import Normalization
# Set seed for all libraries
np.random.seed(123)
# To print the whole df
pd.options.display.width= None
pd.options.display.max_columns= None
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 100)
# Load the datasets
names = [
'PhrasIS_test_h_n',
'PhrasIS_test_h_p',
'PhrasIS_test_i_n',
'PhrasIS_test_i_p',
'PhrasIS_train_h_n',
'PhrasIS_train_h_p',
'PhrasIS_train_i_n',
'PhrasIS_train_i_p'
]
paths = [
'dataset/PhrasIS.test.headlines.negatives.txt',
'dataset/PhrasIS.test.headlines.positives.txt',
'dataset/PhrasIS.test.images.negatives.txt',
'dataset/PhrasIS.test.images.positives.txt',
'dataset/PhrasIS.train.headlines.negatives.txt',
'dataset/PhrasIS.train.headlines.positives.txt',
'dataset/PhrasIS.train.images.negatives.txt',
'dataset/PhrasIS.train.images.positives.txt',
]
# For development only
nrows=30
datasets = dict( {name : Utils.readDataset(path, nrows=nrows) for (name,path) in zip(names,paths)})
# Preprocess dataset
preprocess_pipeline = [
Utils.addColumnsLower,
Utils.addColumnsStrip,
Utils.addColumnsTokenized,
Utils.addColumnsNoPunctuations,
Utils.addColumnsPOStags,
Utils.addColumnsLemmatized,
Utils.addColumnsContentWords,
Utils.addColumnsStopWords
]
step=1
for name,dataset in datasets.items():
for func in preprocess_pipeline:
func(dataset)
print("Processing dataset {}/{}".format(step, len(datasets.keys())))
step+=1
# Compute lexical features
lexical_pipeline = [
Lexical_Features.addColumnsJaccardStripTokenized,
Lexical_Features.addColumnsJaccardContentWords,
Lexical_Features.addColumnsJaccardStopwords,
Lexical_Features.addColumnsLength,
Lexical_Features.addColumnsLeftRight,
Lexical_Features.addColumnsRightLeft
]
step=1
for name,dataset in datasets.items():
for func in lexical_pipeline:
func(dataset)
print("Processing lexical features {}/{}".format(step, len(datasets.keys())))
step+=1
# Compute wordnet features
wordnet_pipeline = [
WordNet_Features.addColumnsPathSimilarity,
WordNet_Features.addColumnsLchSimilarityNouns,
WordNet_Features.addColumnsLchSimilarityVerbs,
WordNet_Features.addColumnsJcnSimilarityBrownNouns,
WordNet_Features.addColumnsJcnSimilarityBrownVerbs,
WordNet_Features.addColumnsJcnSimilarityGenesisNouns,
WordNet_Features.addColumnsJcnSimilarityGenesisVerbs,
WordNet_Features.addColumnsWupSimilarity,
WordNet_Features.addColumnsPathSimilarityRoot,
WordNet_Features.addColumnsLchSimilarityNounsRoot,
WordNet_Features.addColumnsLchSimilarityVerbsRoot,
WordNet_Features.addColumnsWupSimilarityRoot,
WordNet_Features.addColumnsChunkMaximum,
WordNet_Features.addColumnsChunk1Specific,
WordNet_Features.addColumnsChunk2Specific,
WordNet_Features.addColumnsDifference,
WordNet_Features.addColumnsMinimumDifference,
WordNet_Features.addColumnsMaximumDifference
]
step=1
for name,dataset in datasets.items():
for func in wordnet_pipeline:
func(dataset)
print("Processing wordnet features {}/{}".format(step, len(datasets.keys())))
step+=1
# Normalization
normalization_pipeline= [
Normalization.miniMaxNormalization
#Normalization.standardNormalization
]
step=1
for name,dataset in datasets.items():
for func in normalization_pipeline:
func(dataset)
print("Normalizing {}/{}".format(step, len(datasets.keys())))
step += 1
# Save files
saveFolder ="dirty"
if not os.path.exists(saveFolder):
os.makedirs(saveFolder+"/bin")
os.makedirs(saveFolder+ "/csv")
for name, df in datasets.items():
Utils.saveDatasetCSV(df, os.path.join("dirty/csv", name + ".csv"))
Utils.saveDatasetPickle(df, os.path.join("dirty/bin" , name + ".pickle"))
| StarcoderdataPython |
5129016 | import difflib
import os
import unittest
# Test suite for script horn-sat-solver.py
class HornSATSolverTest(unittest.TestCase):
def setUp(self):
pass
def launchTestWithRessource(self, inputFile):
tmpFile = "tmp.file"
os.system(f"cat {inputFile} | python3 src/horn-sat-solver.py > {tmpFile}")
outputFile = inputFile[:-2] + "out"
with open(outputFile) as f1:
f1_content = f1.readlines()
with open(tmpFile) as f2:
f2_content = f2.readlines()
diff = difflib.unified_diff(f1_content, f2_content)
self.assertEqual("".join(diff), "")
os.system(f"rm -f {tmpFile}")
def test_EmptyClause1(self):
self.launchTestWithRessource("test/ressources/empty_clause_1.in")
def test_EmptyClause2(self):
self.launchTestWithRessource("test/ressources/empty_clause_2.in")
def test_NegativeLiteralsOnly1(self):
self.launchTestWithRessource("test/ressources/negative_literals_only_1.in")
def test_NegativeLiteralsOnly2(self):
self.launchTestWithRessource("test/ressources/negative_literals_only_2.in")
def test_OnlyOneNonUnitClause1(self):
self.launchTestWithRessource("test/ressources/only_one_non_unit_clause_1.in")
def test_OnlyOneNonUnitClause2(self):
self.launchTestWithRessource("test/ressources/only_one_non_unit_clause_2.in")
def test_ShortSAT1(self):
self.launchTestWithRessource("test/ressources/short_sat_1.in")
def test_ShortSAT2(self):
self.launchTestWithRessource("test/ressources/short_sat_2.in")
def test_ShortUNSAT1(self):
self.launchTestWithRessource("test/ressources/short_unsat_1.in")
def test_ShortUNSAT2(self):
self.launchTestWithRessource("test/ressources/short_unsat_2.in")
def test_MediumSAT1(self):
self.launchTestWithRessource("test/ressources/medium_sat_1.in")
def test_MediumSAT2(self):
self.launchTestWithRessource("test/ressources/medium_sat_2.in")
def test_MediumUNSAT1(self):
self.launchTestWithRessource("test/ressources/medium_unsat_1.in")
def test_MediumUNSAT2(self):
self.launchTestWithRessource("test/ressources/medium_unsat_2.in")
def test_HugeSAT1(self):
self.launchTestWithRessource("test/ressources/huge_sat_1.in")
def test_HugeSAT2(self):
self.launchTestWithRessource("test/ressources/huge_sat_2.in")
def test_HugeUNSAT1(self):
self.launchTestWithRessource("test/ressources/huge_unsat_1.in")
def test_HugeUNSAT2(self):
self.launchTestWithRessource("test/ressources/huge_unsat_2.in")
#### MAIN ####
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9798551 | <filename>pyramid/config/factories.py
from zope.deprecation import deprecate
from zope.interface import implementer
from pyramid.interfaces import (
IDefaultRootFactory,
IRequestFactory,
IRequestExtensions,
IRootFactory,
ISessionFactory,
)
from pyramid.traversal import DefaultRootFactory
from pyramid.util import (
action_method,
InstancePropertyMixin,
)
class FactoriesConfiguratorMixin(object):
@action_method
def set_root_factory(self, factory):
""" Add a :term:`root factory` to the current configuration
state. If the ``factory`` argument is ``None`` a default root
factory will be registered.
.. note::
Using the ``root_factory`` argument to the
:class:`pyramid.config.Configurator` constructor can be used to
achieve the same purpose.
"""
factory = self.maybe_dotted(factory)
if factory is None:
factory = DefaultRootFactory
def register():
self.registry.registerUtility(factory, IRootFactory)
self.registry.registerUtility(factory, IDefaultRootFactory) # b/c
intr = self.introspectable('root factories',
None,
self.object_description(factory),
'root factory')
intr['factory'] = factory
self.action(IRootFactory, register, introspectables=(intr,))
_set_root_factory = set_root_factory # bw compat
@action_method
def set_session_factory(self, factory):
"""
Configure the application with a :term:`session factory`. If this
method is called, the ``factory`` argument must be a session
factory callable or a :term:`dotted Python name` to that factory.
.. note::
Using the ``session_factory`` argument to the
:class:`pyramid.config.Configurator` constructor can be used to
achieve the same purpose.
"""
factory = self.maybe_dotted(factory)
def register():
self.registry.registerUtility(factory, ISessionFactory)
intr = self.introspectable('session factory', None,
self.object_description(factory),
'session factory')
intr['factory'] = factory
self.action(ISessionFactory, register, introspectables=(intr,))
@action_method
def set_request_factory(self, factory):
""" The object passed as ``factory`` should be an object (or a
:term:`dotted Python name` which refers to an object) which
will be used by the :app:`Pyramid` router to create all
request objects. This factory object must have the same
methods and attributes as the
:class:`pyramid.request.Request` class (particularly
``__call__``, and ``blank``).
See :meth:`pyramid.config.Configurator.add_request_method`
for a less intrusive way to extend the request objects with
custom methods and properties.
.. note::
Using the ``request_factory`` argument to the
:class:`pyramid.config.Configurator` constructor
can be used to achieve the same purpose.
"""
factory = self.maybe_dotted(factory)
def register():
self.registry.registerUtility(factory, IRequestFactory)
intr = self.introspectable('request factory', None,
self.object_description(factory),
'request factory')
intr['factory'] = factory
self.action(IRequestFactory, register, introspectables=(intr,))
@action_method
def add_request_method(self,
callable=None,
name=None,
property=False,
reify=False):
""" Add a property or method to the request object.
When adding a method to the request, ``callable`` may be any
function that receives the request object as the first
parameter. If ``name`` is ``None`` then it will be computed
from the name of the ``callable``.
When adding a property to the request, ``callable`` can either
be a callable that accepts the request as its single positional
parameter, or it can be a property descriptor. If ``name`` is
``None``, the name of the property will be computed from the
name of the ``callable``.
If the ``callable`` is a property descriptor a ``ValueError``
will be raised if ``name`` is ``None`` or ``reify`` is ``True``.
See :meth:`pyramid.request.Request.set_property` for more
details on ``property`` vs ``reify``. When ``reify`` is
``True``, the value of ``property`` is assumed to also be
``True``.
In all cases, ``callable`` may also be a
:term:`dotted Python name` which refers to either a callable or
a property descriptor.
If ``callable`` is ``None`` then the method is only used to
assist in conflict detection between different addons requesting
the same attribute on the request object.
This is the recommended method for extending the request object
and should be used in favor of providing a custom request
factory via
:meth:`pyramid.config.Configurator.set_request_factory`.
.. versionadded:: 1.4
"""
if callable is not None:
callable = self.maybe_dotted(callable)
property = property or reify
if property:
name, callable = InstancePropertyMixin._make_property(
callable, name=name, reify=reify)
elif name is None:
name = callable.__name__
def register():
exts = self.registry.queryUtility(IRequestExtensions)
if exts is None:
exts = _RequestExtensions()
self.registry.registerUtility(exts, IRequestExtensions)
plist = exts.descriptors if property else exts.methods
plist[name] = callable
if callable is None:
self.action(('request extensions', name), None)
elif property:
intr = self.introspectable('request extensions', name,
self.object_description(callable),
'request property')
intr['callable'] = callable
intr['property'] = True
intr['reify'] = reify
self.action(('request extensions', name), register,
introspectables=(intr,))
else:
intr = self.introspectable('request extensions', name,
self.object_description(callable),
'request method')
intr['callable'] = callable
intr['property'] = False
intr['reify'] = False
self.action(('request extensions', name), register,
introspectables=(intr,))
@action_method
@deprecate('set_request_propery() is deprecated as of Pyramid 1.5; use '
'add_request_method() with the property=True argument instead')
def set_request_property(self, callable, name=None, reify=False):
""" Add a property to the request object.
.. deprecated:: 1.5
:meth:`pyramid.config.Configurator.add_request_method` should be
used instead. (This method was docs-deprecated in 1.4 and
issues a real deprecation warning in 1.5).
.. versionadded:: 1.3
"""
self.add_request_method(
callable, name=name, property=not reify, reify=reify)
@implementer(IRequestExtensions)
class _RequestExtensions(object):
def __init__(self):
self.descriptors = {}
self.methods = {}
| StarcoderdataPython |
12846869 | #!/usr/bin/env python
"""
<Program Name>
runtests.py
<Author>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<Started>
May 23, 2016
<Copyright>
See LICENSE for licensing information.
<Purpose>
Script to search, load and run in-toto tests using the Python `unittest`
framework.
"""
from unittest import defaultTestLoader, TextTestRunner
import sys
import os
import subprocess
def check_usable_gpg():
"""Set `TEST_SKIP_GPG` environment variable if neither gpg2 nor gpg is
available.
"""
os.environ["TEST_SKIP_GPG"] = "1"
for gpg in ["gpg2", "gpg"]:
try:
subprocess.check_call([gpg, "--version"])
except OSError:
pass
else:
# If one of the two exists, we can unset the skip envvar and ...
os.environ.pop("TEST_SKIP_GPG", None)
# ... abort the availability check.:
break
check_usable_gpg()
suite = defaultTestLoader.discover(start_dir=".")
result = TextTestRunner(verbosity=2, buffer=True).run(suite)
sys.exit(0 if result.wasSuccessful() else 1)
| StarcoderdataPython |
11383099 | <filename>authcard/__init__.py
name = "authcard" | StarcoderdataPython |
11370384 | <gh_stars>1-10
# Generated by Django 2.0.6 on 2018-07-18 16:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0021_images_datetime'),
]
operations = [
migrations.AlterField(
model_name='userextend',
name='gender',
field=models.CharField(choices=[('m', 'Male'), ('f', 'Female'), ('o', 'Others')], max_length=1),
),
]
| StarcoderdataPython |
9714259 | from molsysmt.lib import box as libbox
import numpy as np
from molsysmt import puw
def box_lengths_from_box_vectors(box):
unit = puw.get_unit(box)
n_frames = box.shape[0]
tmp_box = np.asfortranarray(puw.get_value(box), dtype='float64')
lengths = libbox.length_edges_box(tmp_box, n_frames)
lengths = np.ascontiguousarray(lengths, dtype='float64')
del(tmp_box)
return lengths.round(6)*unit
| StarcoderdataPython |
6549459 | <reponame>IldarRyabkov/backendschool2021
from flask import request, jsonify
from sqlalchemy import exc
from .base import BaseView
from manager.db.schema import db, Courier, Region, WorkingHours
from manager.api.schema import (couriers_response_schema,
validate_request, CouriersSchema)
class Couriers(BaseView):
URL_PATH = "/couriers"
endpoint = "post_couriers"
methods = ['POST']
@validate_request(CouriersSchema)
def post(self):
# ะะพะฑะฐะฒะปะตะฝะธะต ะบัััะตัะพะฒ ะฒ ะฑะฐะทั ะดะฐะฝะฝัั
for data in request.json["data"]:
courier = Courier(data["courier_id"], data["courier_type"])
db.session.add(courier)
for r in data["regions"]:
region = Region(data["courier_id"], r)
db.session.add(region)
for interval in data["working_hours"]:
start, end = interval.split('-')
working_hours = WorkingHours(data["courier_id"], start, end)
db.session.add(working_hours)
# ะขัะฐะฝะทะฐะบัะธั
try:
#courier = Courier(id=1, type="foot")
#db.session.add(courier)
db.session.commit()
except exc.IntegrityError:
msg = "Something went wrong..."
return msg, 400
# ะฃัะฟะตัะฝัะน ะพัะฒะตั
result = couriers_response_schema(request.json["data"])
return jsonify(result), 201
| StarcoderdataPython |
11374947 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 8 15:48:28 2019
@author: yanglinsen
"""
from Bio import SeqIO
from os import walk, listdir, path
import re
filelist = listdir("lbCpf1/")
records = list(SeqIO.parse("../lbCpf1/6His-MBP-TEV-huLbCpf1.gb", "genbank"))
first_record = records[0]
for feature in first_record.features:
start = feature.location.start.position
end= feature.location.end.position
if(end - start > 3600):
lbCpf1 = feature.extract(first_record)
lbCpf1.name = re.split('\.',filelist[0])
| StarcoderdataPython |
1937799 | import os, sys
os.chdir("G:\\My Drive\\Academic\\Research\\Neural Heap")
from neural_heap.dataset.io_synthesis import DATASET_RANGE
HEAP_SIZE = DATASET_RANGE
OP_SIZE = 3
HEAP_NAME = "tf_min_heap_pq" | StarcoderdataPython |
5022909 | from __future__ import absolute_import, division, print_function
from h2o.frame import H2OFrame
import pandas as pd
from .base import check_frame
from ..utils import flatten_all
__all__ = [
'_check_is_1d_frame',
'as_series',
'is_numeric',
'is_integer',
'is_float',
'value_counts'
]
def _check_is_1d_frame(X):
"""Check whether X is an H2OFrame
and that it's a 1d column. If not, will
raise an AssertionError
Parameters
----------
X : H2OFrame, shape=(n_samples, 1)
The H2OFrame to check
Raises
------
AssertionError if the ``X`` variable
is not a 1-dimensional H2OFrame.
Returns
-------
X : H2OFrame, shape=(n_samples, 1)
The frame if is 1d
"""
X = check_frame(X, copy=False)
assert X.shape[1] == 1, 'expected 1d H2OFrame'
return X
def as_series(x):
"""Make a 1d H2OFrame into a pd.Series.
Parameters
----------
x : ``H2OFrame``, shape=(n_samples, 1)
The H2OFrame
Returns
-------
x : Pandas ``Series``, shape=(n_samples,)
The pandas series
"""
x = _check_is_1d_frame(x)
x = x.as_data_frame(use_pandas=True)[x.columns[0]]
return x
def is_numeric(x):
"""Determine whether a 1d H2OFrame is numeric.
Parameters
----------
x : H2OFrame, shape=(n_samples, 1)
The H2OFrame
Returns
-------
bool : True if numeric, else False
"""
_check_is_1d_frame(x)
return flatten_all(x.isnumeric())[0]
def is_integer(x):
"""Determine whether a 1d H2OFrame is
made up of integers.
Parameters
----------
x : H2OFrame, shape=(n_samples, 1)
The H2OFrame
Returns
-------
bool : True if integers, else False
"""
_check_is_1d_frame(x)
if not is_numeric(x):
return False
return (x.round(digits=0) - x).sum() == 0
def is_float(x):
"""Determine whether a 1d H2OFrame is
made up of floats.
Parameters
----------
x : H2OFrame, shape=(n_samples, 1)
The H2OFrame
Returns
-------
bool : True if float, else False
"""
_check_is_1d_frame(x)
return is_numeric(x) and not is_integer(x)
def value_counts(x):
"""Compute a Pandas-esque ``value_counts``
on a 1d H2OFrame.
Parameters
----------
x : H2OFrame, shape=(n_samples, 1)
The H2OFrame
Returns
-------
cts : pd.Series, shape=(n_samples,)
The pandas series
"""
x = _check_is_1d_frame(x)
cts = as_series(x).value_counts()
return cts
| StarcoderdataPython |
1682473 | #!/usr/bin/env python
import numpy as np
import argparse
import json
import threading
import roslib; roslib.load_manifest('flyvr')
roslib.load_manifest('visualization_msgs')
import rospy
import tf.transformations
from sensor_msgs.msg import CameraInfo
import sensor_msgs.msg
import std_msgs.msg
from geometry_msgs.msg import Point, Pose, Transform
import geometry_msgs.msg
import tf.broadcaster
import tf.msg
from visualization_msgs.msg import Marker, MarkerArray
import flyvr.simple_geom as simple_geom
import flyvr.display_client as display_client
from pymvg.camera_model import CameraModel
import pymvg.extern.ros.rviz_utils as rviz_utils
class MyApp:
def __init__(self,name,scale=1.0):
self.name = name
self.scale = scale
self.intrinsics = None
self.translation = None
self.rotation = None
self._lock = threading.Lock()
self.tl = tf.TransformListener()
self.cam = None
ci_name = self.get_frame_id()+'/camera_info'
rospy.loginfo('now listening for CameraInfo message on topic %r'%ci_name)
rospy.Subscriber(ci_name,
CameraInfo, self.on_camera_info)
self.topic_name = self.get_frame_id()+'/frustum'
rospy.loginfo('publishing frustum (scale %s) at %r'%(self.scale,
self.topic_name))
self.publisher = rospy.Publisher(self.topic_name, MarkerArray)
rospy.loginfo('now listening for transform at %r'%self.get_frame_id())
rospy.Timer(rospy.Duration(1.0/20.0), self.on_timer) # 20 fps
def get_frame_id(self):
return '/'+self.name
def on_camera_info(self, msg):
with self._lock:
self.intrinsics = msg
def on_timer(self, _):
now = rospy.Time.now()
try:
translation,rotation = self.tl.lookupTransform('/map',
self.get_frame_id(),
now)
except (tf.LookupException, tf.ExtrapolationException) as err:
return
with self._lock:
self.translation = translation
self.rotation = rotation
self.new_data()
def new_data(self):
with self._lock:
if (self.translation is None or
self.rotation is None or
self.intrinsics is None):
return
newcam = CameraModel.load_camera_from_ROS_tf( translation=self.translation,
rotation=self.rotation,
intrinsics=self.intrinsics,
name=self.get_frame_id(),
)
self.cam = newcam
self.draw()
def draw(self):
r = rviz_utils.get_frustum_markers( self.cam, scale=self.scale )
self.publisher.publish(r['markers'])
def run(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('draw_rviz_frustum',anonymous=True)
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default='cam_0')
parser.add_argument('--scale', type=float, default=1.0)
argv = rospy.myargv()
args = parser.parse_args(argv[1:])
app = MyApp(args.name,scale=args.scale)
app.run()
| StarcoderdataPython |
12827072 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import str
from builtins import open
from future import standard_library
standard_library.install_aliases()
import os
import uuid
import tarfile
def get_image_full_name(repository, name, tag):
return "{base}:{tag}".format(
base=get_image(repository, name),
tag=tag
)
def get_image(repository, name):
return "{repo}/{name}".format(
repo=repository,
name=name
)
def is_runtime_phase():
""" Returns wether the code is currently in the runtime or building phase"""
return os.getenv('FAIRING_RUNTIME', None) != None
def is_running_in_k8s():
return os.path.isdir('/var/run/secrets/kubernetes.io/')
def get_current_k8s_namespace():
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f:
return f.readline()
def get_unique_tag():
id = uuid.uuid4()
return str(id).split('-')[0]
def get_default_target_namespace():
if not is_running_in_k8s():
return 'default'
return get_current_k8s_namespace()
def generate_context_tarball(src_filename, output_tar_filename):
with tarfile.open(output_tar_filename, "w:gz") as tar:
tar.add(src_filename, filter=reset_tar_mtime)
# Reset the mtime on the the tarball for reproducibility
def reset_tar_mtime(tarinfo):
tarinfo.mtime = 0
tarinfo.name = os.path.join("/app", tarinfo.name)
return tarinfo
| StarcoderdataPython |
9637149 | <reponame>kurianbenoy/Tree-Classifier<gh_stars>0
import os
from flask import Flask, request, flash, redirect, render_template
from fastai.vision.all import *
from fastai.vision.core import PILImage
from fastai.vision.all import load_learner
from werkzeug.utils import secure_filename
from api.utils.constants import UPLOAD_FOLDER, ALLOWED_EXTENSIONS
from api.utils.cors import crossdomain
app = Flask(__name__)
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
app.config["MAX_CONTENT_LENGTH"] = 16 * 1000 * 1000 # specify upload file limit
app.config["SECRET_KEY"] = "secret!" # for flash messages
def allowed_files(filename):
"""Reurn list of allowed urls"""
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
def ml_inference(filename):
learn_inf = load_learner("../export.pkl")
img = PILImage.create(os.path.join(app.config["UPLOAD_FOLDER"], filename))
pred_class, pred_idx, outputs = learn_inf.predict(img)
return pred_class
@app.route("/predict/tree", methods=["POST", "GET"])
@crossdomain(origin="*")
def predict_tree():
"""Predict the tree using Flask APIs"""
if request.method == "POST":
# check if the post request has the file part
if "file" not in request.files:
flash("No file part")
return redirect(request.url)
file = request.files["file"]
print(file)
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == "":
flash("No selected file")
return redirect(request.url)
if file and allowed_files(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config["UPLOAD_FOLDER"], filename))
tree_class = ml_inference(filename)
return f"Predicted tree is: {tree_class}"
return "Not supported"
@app.route("/test")
def index():
return render_template("testindex.html", **{"greeting": "Hello from Flask!"})
@app.route("/")
def frontendVue():
return render_template("index.html")
| StarcoderdataPython |
3280570 | <reponame>haizaar/crossbar-examples<filename>demos/votes/kivy/votes.py
###############################################################################
##
## Copyright (C) 2014, Tavendo GmbH and/or collaborators. All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
##
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
##
###############################################################################
from twisted.internet.defer import inlineCallbacks
from autobahn import wamp
from autobahn.twisted.wamp import ApplicationSession
class VotesBackend(ApplicationSession):
def __init__(self, config):
ApplicationSession.__init__(self, config)
self.init()
def init(self):
self._votes = {
'Banana': 0,
'Chocolate': 0,
'Lemon': 0
}
@wamp.register(u'io.crossbar.demo.vote.get')
def getVotes(self):
return [{'subject': key, 'votes': value} for key, value in self._votes.items()]
@wamp.register(u'io.crossbar.demo.vote.vote')
def submitVote(self, subject):
self._votes[subject] += 1
result = {'subject': subject, 'votes': self._votes[subject]}
self.publish('io.crossbar.demo.vote.onvote', result)
return result
@wamp.register(u'io.crossbar.demo.vote.reset')
def resetVotes(self):
self.init()
self.publish('io.crossbar.demo.vote.onreset')
@inlineCallbacks
def onJoin(self, details):
res = yield self.register(self)
print("VotesBackend: {} procedures registered!".format(len(res)))
| StarcoderdataPython |
9655096 | from typing import List
from ems.datasets.location.location_set import LocationSet
from ems.utils import parse_headered_csv
class DemandSet(LocationSet):
def __init__(self,
filename: str = None,
latitudes: List[float] = None,
longitudes: List[float] = None):
if filename is not None:
latitudes, longitudes = self.read_demands(filename)
super().__init__(latitudes, longitudes)
def read_demands(self, filename):
# Read demands from a headered CSV into a pandas dataframe
demand_headers = ["latitude", "longitude"]
demands_df = parse_headered_csv(filename, demand_headers)
# Generate list of models from dataframe
latitudes = []
longitudes = []
for index, row in demands_df.iterrows():
latitudes.append(row["latitude"])
longitudes.append(row["longitude"])
return latitudes, longitudes
| StarcoderdataPython |
4838654 | from pint import UnitRegistry
ureg = UnitRegistry() | StarcoderdataPython |
1954737 | from pydantic_fhir import r4
def test_enums_in_resource():
"""Test schema on a real example with an enum field."""
status_enum = r4.StructureMap.schema()["properties"]["status"]["enum"]
assert sorted(status_enum, key=lambda x: x["value"]) == sorted(
[
{"value": item.value, "description": item.__doc__}
for item in r4.PublicationStatus
],
key=lambda x: x["value"],
)
| StarcoderdataPython |
3293703 | <reponame>NodePing/python3-nodeping-api<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Get, create, update, and delete schedules for notifications.
"""
from . import _query_nodeping_api, _utils, config
API_URL = "{0}schedules".format(config.API_URL)
def get_schedule(token, schedule=None, customerid=None):
""" Get existing schedules in NodePing account
Returns all the data in a dictionary format from the
original JSON that is gathered from NodePing about
the account's notification schedules.
:param token: The NodePing API token for the account
:type token: str
:param schedule: The name of the notification schedule
:type schedule: str
:param customerid: (optional) ID for subaccount
:type customerid: str
:return: Response from NodePing
:rtype: dict
"""
if schedule:
url = "{0}/{1}".format(API_URL, schedule)
url = _utils.create_url(token, url, customerid)
else:
url = _utils.create_url(token, API_URL, customerid)
return _query_nodeping_api.get(url)
def create_schedule(token, data, schedule_name, customerid=None):
""" Create a new notification schedule for the specified NodePing account
Sends data of a custom alert schedule to NodePing to be created
for the specified user account. Returns the results from NodePing
in a dictionary format.
:param: token: The NodePing APi token for the account
:type token: str
:param data: The schedules for each day to receive notifications
:type dict
:param customerid: (optional) ID for subaccount
:type customerid: str
:return: Schedule ID and if the operation was completed or not
:rtype: dict
Example::
{'data': {'friday': {'disabled': True},
'monday': {'allday': True},
'saturday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'},
'sunday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'},
'thursday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'},
'tuesday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'},
'wednesday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'}}}
Days accept certain variables certain key/value pairs such as:
time1: str - start of timespan (24-hour time)
time2: str - end of timespan (24-hour time)
exclude: True/False - inverts the time span so it is all day
except for the time between time1 and time2
disabled: True/False - disables notifications for this day.
allday: True/False - enables notifications for the entire day.
"""
url = "{0}/{1}".format(API_URL, schedule_name)
url = _utils.create_url(token, url, customerid)
return _query_nodeping_api.post(url, data)
def update_schedule(token, data, schedule_name, customerid=None):
""" Update a notification schedule for the specified NodePing account
Sends data of a custom alert schedule to NodePing to modify a schedule
for the specified user account. Returns the results from NodePing
in a dictionary format.
:param: token: The NodePing API token for the account
:type token: str
:param data: The schedules for each day to receive notifications
:type dict
:param customerid: (optional) ID for subaccount
:type customerid: str
:return: Schedule ID and if the operation was completed or not
:rtype: dict
Example::
{'data': {'friday': {'disabled': True},
'monday': {'allday': True},
'saturday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'},
'sunday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'},
'thursday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'},
'tuesday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'},
'wednesday': {'exclude': False, 'time1': '6:00', 'time2': '18:00'}}}
Days accept certain variables certain key/value pairs such as:
time1: str - start of timespan (24-hour time)
time2: str - end of timespan (24-hour time)
exclude: True/False - inverts the time span so it is all day
except for the time between time1 and time2
disabled: True/False - disables notifications for this day.
allday: True/False - enables notifications for the entire day.
"""
url = "{0}/{1}".format(API_URL, schedule_name)
url = _utils.create_url(token, url, customerid)
return _query_nodeping_api.put(url, data)
def delete_schedule(token, schedule, customerid=None):
""" Get existing schedules in NodePing account
Returns all the data in a dictionary format from the
original JSON that is gathered from NodePing about
the account's notification schedules.
:param token: The NodePing API token for the account
:type token: str
:param schedule: The name of the notification schedule
:type schedule: str
:return: Response from NodePing
:rtype: dict
"""
url = "{0}/{1}".format(API_URL, schedule)
url = _utils.create_url(token, url, customerid)
return _query_nodeping_api.delete(url)
| StarcoderdataPython |
8116065 | <filename>EV3(15) Final Version/subscriber15.py
#!/usr/bin/env python3
import paho.mqtt.client as mqtt
import helpers as hp
import wheels
from threading import Thread
from ev3dev.auto import *
#This is the Subscriber
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe("topic/motor-A/dt")
try:
t = Thread(target = wheels.lookForEdge)
t.start()
except:
print("Unable to start thread")
def on_message(client, userdata, msg):
if (msg.payload.isalpha()):
client.disconnect()
elif (int(msg.payload) == 0):
print ("foward")
wheels.goForwards()
elif (int(msg.payload) == 1):
print ("backword")
wheels.goBackwards()
elif (int(msg.payload) == 2):
print("clockwise")
wheels.rotateClockwise()
elif (int(msg.payload) == 3):
print("anticlockwise")
wheels.rotateAntiClockwise()
elif (int(msg.payload) == 4):
wheels.stop()
elif (int(msg.payload) == 1514):
print("1514 Forward")
wheels.goForwardsForTime(mTime=3.5)
elif (int(msg.payload) == 1515):
print("1515 Backward")
wheels.goBackwardsForTime(mTime=3.5)
elif (int(msg.payload) == 1516):
print("1516 Turn around")
wheels.rotateClockwiseAtAngle(angle = 90)
elif (int(msg.payload) == 1517):
print("1517 Spin")
wheels.rotateClockwiseAtAngle(angle = 360)
def subscriber():
client = mqtt.Client()
client.connect("10.42.0.180",1883,60)
client.on_connect = on_connect
client.on_message = on_message
#m.run_direct()
#m.duty_cycle_sp=0
client.loop_forever()
while True:
try:
subscriber()
except:
pass
| StarcoderdataPython |
3588565 | <gh_stars>0
import pygame
import game_utilities as util
from view.gui_variables import GUIVariables
from view.sprite_grid import SpriteGrid
class Ice(pygame.sprite.Sprite):
"""
Class for the ice sprites
"""
def __init__(self, size: int, layer: int, ice_group):
"""
Constructor for the class.
:param size: Size of the ice sprite
:param layer: Layers of ice, 0 is thinnest ice
:param ice_group: Group to put this sprite in
"""
# Call to super constructor
pygame.sprite.Sprite.__init__(self, ice_group)
# Set field variables
self.layer = layer
self.size = size
self.ice_layer = "ice/ice_layer_{}.png".format(self.layer)
self.image, self.rect = util.load_image(self.ice_layer, size)
def update_image(self, layer: int):
"""
Method to update this sprites image
:param layer: New value for how many layers of ice, 0 is thinnest ice
:return: None
"""
self.layer = layer
self.ice_layer = "ice/ice_layer_{}.png".format(self.layer)
self.image, _ = util.load_image(self.ice_layer, self.size)
class IceGrid(SpriteGrid):
"""
Class to hold a grid of ice sprites.
"""
def __init__(self, gui_vars: GUIVariables, group, ice: list):
"""
Constructor for IceGrid class.
:param group: Group that ice sprites should go in
:param ice: Information about ice sprites to be held in grid
"""
# Call to super constructor
super().__init__(gui_vars, group, ice)
def add(self, y_coord: int, x_coord: int, layer: int):
"""
Method to add a ice sprite to the grid.
Implements method required by the superclass.
:param y_coord: Y coordinate to add ice sprite at
:param x_coord: X coordinate to add ice sprite at
:param layer: Layers of ice, 0 is thinnest
:return: None
"""
# Check there is meant to be ice at this location
if layer is not -1:
# Create Ice sprite
ice = Ice(self.gui_vars.cell_size, layer, self.group)
# Calculate pixel coordinates it should go at
y, x = self.grid_to_pixel(y_coord, x_coord)
# Set correct coordinates
ice.rect.left = x
ice.rect.top = y
# Add to ice grid
self.grid[y_coord][x_coord] = ice
| StarcoderdataPython |
4825151 | <filename>ObjectWrapper/GlyphsApp/plugins.py
# encoding: utf-8
from __future__ import print_function
import objc
from Foundation import NSBundle, NSLog, NSObject, NSClassFromString, NSMutableArray, NSMutableOrderedSet, \
NSString, NSAttributedString, NSNumber, NSUserDefaults, NSUserNotification, NSUserNotificationCenter, \
NSNotificationCenter, NSError, NSLocalizedDescriptionKey, NSLocalizedRecoverySuggestionErrorKey, \
NSLocalizedString, NSNotFound, NSPoint, NSMakePoint, NSZeroPoint, NSMakeRect, NSMakeSize, NSMinX, \
NSMinY, NSMaxX, NSMaxY, NSRect, NSSize, NSUnarchiver
from AppKit import NSApplication, NSColor, NSNib, NSMenu, NSMenuItem, NSView, NSImage, NSDocumentController, \
NSBezierPath, NSFont, NSFontAttributeName, NSForegroundColorAttributeName, NSControlKeyMask, \
NSCommandKeyMask, NSShiftKeyMask, NSAlternateKeyMask, NSEvent, NSAffineTransform
import sys
import traceback
from GlyphsApp import Glyphs, LogToConsole, LogError, \
ONSTATE, OFFSTATE, MIXEDSTATE, Message, distance, GSPath
MainBundle = NSBundle.mainBundle()
path = MainBundle.bundlePath() + "/Contents/Scripts"
if path not in sys.path:
sys.path.append(path)
GSFilterPlugin = objc.lookUpClass("GSFilterPlugin")
GSToolSelect = objc.lookUpClass("GSToolSelect")
__all__ = ["Glyphs", "FileFormatPlugin", "FilterWithDialog", "FilterWithoutDialog", "GeneralPlugin", "PalettePlugin", "ReporterPlugin", "SelectTool",
"NSBundle", "NSLog", "NSObject", "NSClassFromString", "NSMutableArray", "NSMutableOrderedSet", "NSString", "NSAttributedString", "NSNumber", "NSUserDefaults",
"NSUserNotification", "NSUserNotificationCenter", "NSNotificationCenter", "NSError", "NSLocalizedDescriptionKey", "NSLocalizedRecoverySuggestionErrorKey",
"NSLocalizedString", "NSNotFound", "NSPoint", "NSMakePoint", "NSZeroPoint", "NSMakeRect", "NSMakeSize", "NSMinX", "NSMinY", "NSMaxX", "NSMaxY", "NSRect",
"NSSize", "NSUnarchiver", "NSApplication", "NSColor", "NSNib", "NSMenu", "NSMenuItem", "NSView", "NSImage", "NSDocumentController", "NSBezierPath",
"NSFont", "NSFontAttributeName", "NSForegroundColorAttributeName", "NSControlKeyMask", "NSCommandKeyMask", "NSShiftKeyMask", "NSAlternateKeyMask", "NSEvent",
"NSAffineTransform", "GSFilterPlugin", "setUpMenuHelper",
"objc"]
############################################################################################
# Helper methods
def LogToConsole_AsClassExtension(self, message):
LogToConsole(message, self.title()) # from GlyhsApp.py
def LogError_AsClassExtension(self, message):
LogError(message) # from GlyhsApp.py
def LoadNib(self, nibname, path=None):
if path and len(path) > 10:
try:
bundlePath = path[:path.find("/Contents/Resources/")]
bundle = NSBundle.bundleWithPath_(bundlePath)
nib = NSNib.alloc().initWithNibNamed_bundle_(nibname, bundle)
if not nib:
LogError("Error loading nib for Class: %s" % self.__class__.__name__)
result = nib.instantiateWithOwner_topLevelObjects_(self, None)
if not bool(result[0]) :
LogError("Error instantiating nib for Class: %s" % self.__class__.__name__)
else:
self.topLevelObjects = result[1]
except:
LogError(traceback.format_exc())
else:
if not NSBundle.loadNibNamed_owner_(nibname, self):
LogError("Error loading %s.nib." % nibname)
def pathForResource(resourceName, extension, path=None):
if path and len(path) > 10:
bundlePath = path[:path.find("/Contents/Resources/")]
bundle = NSBundle.bundleWithPath_(bundlePath)
return bundle.pathForResource_ofType_(resourceName, extension)
else:
raise("Please supply path")
def setUpMenuHelper(Menu, Items, defaultTarget):
if type(Items) == list:
for entry in Items:
if "index" in entry:
index = int(entry["index"])
else:
index = -1
# Use supplied NSMenuItem
if "menu" in entry:
newMenuItem = entry["menu"]
# Create menu item
else:
if "view" in entry and "name" not in entry:
entry["name"] = ""
if "view" in entry and "action" not in entry:
entry["action"] = None
newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(entry["name"], entry["action"], "")
if "view" in entry:
try:
view = entry["view"]
if isinstance(view, NSView):
newMenuItem.setView_(view)
except:
LogToConsole(traceback.format_exc(), "setUpMenuHelper") # from GlyhsApp.py
if "state" in entry:
state = entry["state"]
if state == ONSTATE or state == OFFSTATE or state == MIXEDSTATE:
newMenuItem.setState_(entry["state"])
else:
LogToConsole("illegal state for menu item '%s'" % entry["name"], "setUpMenuHelper")
if "target" in entry:
newMenuItem.setTarget_(entry["target"])
else:
newMenuItem.setTarget_(defaultTarget)
if index >= 0:
Menu.insertItem_atIndex_(newMenuItem, index)
else:
Menu.addItem_(newMenuItem)
############################################################################################
# Plug-in wrapper
BaseFileFormatPlugin = objc.lookUpClass("BaseFileFormatPlugin")
class FileFormatPlugin (BaseFileFormatPlugin):
def init(self):
"""
Do all initializing here.
"""
self = objc.super(FileFormatPlugin, self).init()
# Settings, default values
self.name = 'My File Format'
self.icon = 'ExportIcon'
self.toolbarPosition = 100
if hasattr(self, 'settings'):
self.settings()
# Dialog stuff
# Initiate empty self.dialog here in case of Vanilla dialog,
# where .dialog is not defined at the classโs root.
if not hasattr(self, 'dialog'):
self.dialog = None
if hasattr(self, "__file__"):
path = self.__file__()
thisBundle = NSBundle.bundleWithPath_(path[:path.rfind("Contents/Resources/")])
else:
thisBundle = NSBundle.bundleForClass_(NSClassFromString(self.className()))
self.toolbarIcon = NSImage.alloc().initWithContentsOfFile_(thisBundle.pathForImageResource_(self.icon))
# Using self.toolbarIconName() instead of self.icon to
# make sure registered NSImage name is unique
self.toolbarIcon.setName_(self.toolbarIconName())
if hasattr(self, 'start'):
self.start()
return self
def interfaceVersion(self):
"""
Distinguishes the API version the plugin was built for.
Return 1.
"""
return 1
def title(self):
"""
This is the name as it appears in the menu in combination with 'Show'.
E.g. 'return "Nodes"' will make the menu item read "Show Nodes".
"""
try:
return self.name or self.__class__.__name__ or 'New FileFormat Plugin'
except:
LogError(traceback.format_exc())
def toolbarTitle(self):
"""
Name below the icon in the Export dialog toolbar.
"""
try:
return self.name
except:
LogError(traceback.format_exc())
def toolbarIconName(self):
"""
Used for image and tab tags. Should be unique.
The className + the filename of the icon (without the suffix).
"""
try:
return "{}{}".format(self.className(), self.icon)
except:
LogError(traceback.format_exc())
def groupID(self):
"""
Determines the position in the Export dialog toolbar.
Lower values are further to the left.
"""
try:
return self.toolbarPosition or 100
except:
LogError(traceback.format_exc())
def exportSettingsView(self):
"""
Returns the view to be displayed in the export dialog.
Don't touch this.
"""
return self.dialog
def font(self):
return self._font
def setFont_(self, GSFontObj):
"""
The GSFont object is assigned to the plugin prior to the export.
This is used to publish the export dialog.
"""
try:
self._font = GSFontObj
except:
LogError(traceback.format_exc())
def exportFont_(self, font):
"""
EXPORT dialog
This method is called when the Next button is pressed in the Export dialog,
and should ask the user for the place to store the font (not necessarily, you could choose to hard-wire the destination in the code).
Parameters:
- font: The font object to export
//- error: PyObjc-Requirement. It is required here in order to return the error object upon export failure. Ignore its existence here.
return (True, None) if the export was successful
return (False, NSError) if the export failed
"""
try:
returnStatus, returnMessage = [False, 'export() is not implemented in the plugin.']
if hasattr(self, 'export'):
returnStatus, returnMessage = self.export(font)
# Export successful
# Change the condition (True) to your own assessment on whether or not the export succeeded
if returnStatus:
# Use Mac Notification Center
notification = NSUserNotification.alloc().init()
notification.setTitle_(self.title())
notification.setInformativeText_(returnMessage)
NSUserNotificationCenter.defaultUserNotificationCenter().scheduleNotification_(notification)
return
# Export failed, give reason
else:
error = NSError.errorWithDomain_code_userInfo_(self.title(), -57, {
NSLocalizedDescriptionKey: NSLocalizedString('Export failed', None),
NSLocalizedRecoverySuggestionErrorKey: returnMessage
})
font.parent.presentError_(error)
# Python exception, return error message
except Exception as e:
LogError(traceback.format_exc())
error = NSError.errorWithDomain_code_userInfo_(self.title(), -57, {
NSLocalizedDescriptionKey: NSLocalizedString('Python exception', None),
NSLocalizedRecoverySuggestionErrorKey: str(e) + '\nCheck Macro window output.'
})
font.parent.presentError_(error)
def exportFont_toURL_error_(self, font, destinationURL, error):
"""
EXPORT dialog
This is called from "Export All font" or from other plugins
Parameters:
- font: The font object to export
- destinationURL: The URL where to write to. That can be the final file or a folder
- error: PyObjc-Requirement. It is required here in order to return the error object upon export failure. Ignore its existence here.
return True, None) if the export was successful
return (False, NSError) if the export failed
"""
try:
returnStatus, returnMessage = [False, 'export() is not implemented in the plugin.']
if hasattr(self, 'export'):
returnStatus, returnMessage = self.export(font, destinationURL.path())
# Export successful
# Change the condition (True) to your own assessment on whether or not the export succeeded
if returnStatus:
# Use Mac Notification Center
notification = NSUserNotification.alloc().init()
notification.setTitle_(self.title())
notification.setInformativeText_(returnMessage)
NSUserNotificationCenter.defaultUserNotificationCenter().scheduleNotification_(notification)
return (True, None)
# Export failed, give reason
else:
error = NSError.errorWithDomain_code_userInfo_(self.title(), -57, {
NSLocalizedDescriptionKey: NSLocalizedString('Export failed', None),
NSLocalizedRecoverySuggestionErrorKey: returnMessage
})
return (False, error)
# Python exception, return error message
except Exception as e:
LogError(traceback.format_exc())
error = NSError.errorWithDomain_code_userInfo_(self.title(), -57, {
NSLocalizedDescriptionKey: NSLocalizedString('Python exception', None),
NSLocalizedRecoverySuggestionErrorKey: str(e) + '\nCheck Macro window output.'
})
return (False, error)
return True
########################################################################
#
#
# def writeFont_toURL_error_()
# To be implemented in Glyphs in the future
# Don't delete, it needs to be present in the plugin already
#
#
########################################################################
def writeFont_toURL_error_(self, font, URL, error):
"""
SAVE FONT dialog
This method is called when the save dialog is invoked by the user.
You don't have to create a file dialog.
Parameters:
- font: the font object to save
- URL: the URL (file path) to save the font to
- error: on return, if the document contents could not be read, a pointer to an error object that encapsulates the reason they could not be read.
"""
try:
if hasattr(self, 'export'):
self.export(font, URL.path())
return (True, None)
else:
error = NSError.errorWithDomain_code_userInfo_(self.title(), -57, {NSLocalizedDescriptionKey: "The plugin does not support exporting the file"})
return None, error
except:
LogError(traceback.format_exc())
########################################################################
#
#
# def fontFromURL_ofType_error_()
# Read fonts from files: To be implemented in Glyphs in the future
# Don't delete, it needs to be present in the plugin already
#
#
########################################################################
def fontFromURL_ofType_error_(self, URL, fonttype, error):
"""
Reads a Font object from the specified URL.
URL: the URL to read the font from.
error: on return, if the document contents could not be read, a pointer to an error object that encapsulates the reason they could not be read.
Return the font object, or None if an error occurred.
"""
try:
# Create a new font object:
if hasattr(self, 'read'):
font = self.read(URL.path(), fonttype)
return font, None
else:
error = NSError.errorWithDomain_code_userInfo_(self.title(), -57, {NSLocalizedDescriptionKey: "The plugin does not support opening the file"})
return None, error
except:
print(traceback.format_exc())
return None, None
FileFormatPlugin.loadNib = LoadNib
class FilterWithDialog (GSFilterPlugin):
"""
All 'myValue' and 'myValueField' references are just an example.
They correspond to the 'My Value' field in the .xib file.
Replace and add your own class variables.
"""
def loadPlugin(self):
"""
Do all initializing here.
This is a good place to call random.seed() if you want to use randomization.
In that case, don't forget to import random at the top of this file.
"""
self.menuName = 'My Filter'
self.keyboardShortcut = None # With Cmd+Shift
self.actionButtonLabel = 'Apply'
if hasattr(self, 'settings'):
self.settings()
# Dialog stuff
# Initiate emtpy self.dialog here in case of Vanilla dialog,
# where .dialog is not defined at the classโs root.
if not hasattr(self, 'dialog'):
self.dialog = None
def setup(self):
try:
objc.super(FilterWithDialog, self).setup()
if hasattr(self, 'start'):
self.start()
self.process_(None)
return None
except:
LogError(traceback.format_exc())
def interfaceVersion(self):
"""
Distinguishes the API version the plugin was built for.
Return 1.
"""
return 1
def title(self):
"""
This is the name as it appears in the menu
and in the title of the dialog window.
"""
try:
return self.menuName
except:
LogError(traceback.format_exc())
def actionName(self):
"""
This is the title of the button in the settings dialog.
Use something descriptive like 'Move', 'Rotate', or at least 'Apply'.
"""
try:
return self.actionButtonLabel
except:
LogError(traceback.format_exc())
def keyEquivalent(self):
"""
The key together with Cmd+Shift will be the shortcut for the filter.
Return None if you do not want to set a shortcut.
Users can set their own shortcuts in System Prefs.
"""
try:
return self.keyboardShortcut
except:
LogError(traceback.format_exc())
def processFont_withArguments_(self, Font, Arguments):
"""
Invoked when called as Custom Parameter in an instance at export.
The Arguments come from the custom parameter in the instance settings.
Item 0 in Arguments is the class-name. The consecutive items should be your filter options.
"""
try:
# set glyphList (list of glyphs to be processed) to all glyphs in the font
glyphList = Font.glyphs
# customParameters delivered to filter()
customParameters = {}
unnamedCustomParameterCount = 0
for i in range(1, len(Arguments)):
if 'include' not in Arguments[i] and 'exclude' not in Arguments[i]:
# if key:value pair
if ':' in Arguments[i]:
key, value = Arguments[i].split(':')
# only value given, no key. make key name
else:
key = unnamedCustomParameterCount
unnamedCustomParameterCount += 1
value = Arguments[i]
# attempt conversion to float value
try:
customParameters[key] = float(value)
except:
customParameters[key] = value
# change glyphList to include or exclude glyphs
if len(Arguments) > 1:
if "exclude:" in Arguments[-1]:
excludeList = [n.strip() for n in Arguments.pop(-1).replace("exclude:", "").strip().split(",")]
glyphList = [g for g in glyphList if g.name not in excludeList]
elif "include:" in Arguments[-1]:
includeList = [n.strip() for n in Arguments.pop(-1).replace("include:", "").strip().split(",")]
glyphList = [Font.glyphs[n] for n in includeList]
# With these values, call your code on every glyph:
FontMasterId = Font.fontMasterAtIndex_(0).id
for thisGlyph in glyphList:
Layer = thisGlyph.layerForId_(FontMasterId)
if hasattr(self, 'filter'):
self.filter(Layer, False, customParameters)
except:
# Custom Parameter
if len(Arguments) > 1:
Message(title='Error in %s' % self.menuName, message="There was an error in %s's filter() method when called through a Custom Parameter upon font export. Check your Macro window output." % self.menuName)
LogError(traceback.format_exc())
def processLayer_withArguments_(self, Layer, Arguments):
"""
Invoked when called as Custom Parameter in an instance to generate the Preview.
The Arguments come from the custom parameter in the instance settings.
Item 0 in Arguments is the class-name. The consecutive items should be your filter options.
"""
try:
if not hasattr(self, 'filter'):
print("The filter: %s doesnโt fully support the plugin API. The method 'filter()' is missing" % self.menuName)
return
# customParameters delivered to filter()
customParameters = {}
unnamedCustomParameterCount = 0
for i in range(1, len(Arguments)):
# if key:value pair
if ':' in Arguments[i]:
key, value = Arguments[i].split(':')
# only value given, no key. make key name
else:
key = unnamedCustomParameterCount
unnamedCustomParameterCount += 1
value = Arguments[i]
# attempt conversion to float value
try:
customParameters[key] = float(value)
except:
customParameters[key] = value
self.filter(Layer, False, customParameters)
except:
# Custom Parameter
if len(Arguments) > 1:
Message(title='Error in %s' % self.menuName, message="There was an error in %s's filter() method when called through a Custom Parameter upon font export. Check your Macro window output." % self.menuName)
LogError(traceback.format_exc())
def process_(self, sender):
"""
This method gets called when the user invokes the Dialog.
"""
try:
# Create Preview in Edit View, and save & show original in ShadowLayers:
ShadowLayers = self.valueForKey_("shadowLayers")
Layers = self.valueForKey_("layers")
checkSelection = True
for k in range(len(ShadowLayers)):
ShadowLayer = ShadowLayers[k]
Layer = Layers[k]
Layer.setShapes_(NSMutableArray.alloc().initWithArray_copyItems_(ShadowLayer.pyobjc_instanceMethods.shapes(), True))
Layer.clearSelection()
if len(ShadowLayer.selection) > 0 and checkSelection:
for idx in range(len(ShadowLayer.shapes)):
currShadowPath = ShadowLayer.paths[idx]
if isinstance(currShadowPath, GSPath):
currLayerPath = Layer.shapes[idx]
for j in range(len(currShadowPath.nodes)):
currShadowNode = currShadowPath.nodes[j]
if currShadowNode in ShadowLayer.selection:
Layer.addSelection_(currLayerPath.nodes[j])
self.filter(Layer, True, {}) # add your class variables here
Layer.clearSelection()
# Safe the values in the FontMaster. But could be saved in UserDefaults, too.
# FontMaster = self.valueForKey_("fontMaster")
# FontMaster.userData["____myValue____"] = NSNumber.numberWithInteger_(self.____myValue____)
# call the superclass to trigger the immediate redraw:
objc.super(FilterWithDialog, self).process_(sender)
except:
LogError(traceback.format_exc())
def view(self):
return self.dialog
def update(self):
self.process_(None)
Glyphs.redraw()
def customParameterString(self):
if hasattr(self, 'generateCustomParameter'):
return self.generateCustomParameter()
return objc.nil
FilterWithDialog.loadNib = LoadNib
BaseFilterWithoutDialog = objc.lookUpClass("BaseFilterWithoutDialog")
class FilterWithoutDialog (BaseFilterWithoutDialog):
def loadPlugin(self):
"""
Do all initializing here.
"""
self.menuName = 'My Filter'
self.keyboardShortcut = None
if hasattr(self, 'settings'):
self.settings()
if hasattr(self, 'start'):
self.start()
def interfaceVersion(self):
"""
Distinguishes the API version the plugin was built for.
Return 1.
"""
return 1
def title(self):
"""
This is the human-readable name as it appears in the Filter menu.
"""
return self.menuName
def setController_(self, Controller):
"""
Sets the controller, you can access it with controller().
Do not touch this.
"""
self._controller = Controller
def controller(self):
"""
Do not touch this.
"""
try:
return self._controller
except:
LogError(traceback.format_exc())
def setup(self):
"""
Do not touch this.
"""
return None
def keyEquivalent(self):
"""
The key together with Cmd+Shift will be the shortcut for the filter.
Return None if you do not want to set a shortcut.
Users can set their own shortcuts in System Prefs.
"""
try:
return self.keyboardShortcut
except:
LogError(traceback.format_exc())
@objc.signature(b'c32@0:8@16o^@24')
def runFilterWithLayers_error_(self, Layers, Error):
"""
Invoked when user triggers the filter through the Filter menu
and more than one layer is selected.
"""
try:
for Layer in Layers:
if hasattr(self, 'filter'):
self.filter(Layer, True, {})
return (True, None)
except:
LogError(traceback.format_exc())
@objc.signature(b'c40@0:8@16@24o^@32')
def runFilterWithLayer_options_error_(self, Layer, Options, Error):
"""
Required for compatibility with Glyphs version 702 or later.
Leave this as it is.
"""
try:
return self.runFilterWithLayer_error_(self, Layer, Error)
except:
LogError(traceback.format_exc())
@objc.signature(b'c32@0:8@16o^@24')
def runFilterWithLayer_error_(self, Layer, Error):
"""
Invoked when user triggers the filter through the Filter menu
and only one layer is selected.
"""
try:
if hasattr(self, 'filter'):
self.filter(Layer, True, {})
return (True, None)
except:
LogError(traceback.format_exc())
return (False, None)
@objc.signature(b'@@:@@')
def processFont_withArguments_(self, Font, Arguments):
"""
Invoked when called as Custom Parameter in an instance at export.
The Arguments come from the custom parameter in the instance settings.
Item 0 in Arguments is the class-name. The consecutive items should be your filter options.
"""
try:
# set glyphList to all glyphs
glyphList = Font.glyphs
# customParameters delivered to filter()
customParameters = {}
unnamedCustomParameterCount = 0
for i in range(1, len(Arguments)):
if 'include' not in Arguments[i] and 'exclude' not in Arguments[i]:
# if key:value pair
if ':' in Arguments[i]:
key, value = Arguments[i].split(':')
# only value given, no key. make key name
else:
key = unnamedCustomParameterCount
unnamedCustomParameterCount += 1
value = Arguments[i]
# attempt conversion to float value
try:
customParameters[key] = float(value)
except:
customParameters[key] = value
# change glyphList to include or exclude glyphs
if len(Arguments) > 1:
if "exclude:" in Arguments[-1]:
excludeList = [n.strip() for n in Arguments.pop(-1).replace("exclude:", "").strip().split(",")]
glyphList = [g for g in glyphList if g.name not in excludeList]
elif "include:" in Arguments[-1]:
includeList = [n.strip() for n in Arguments.pop(-1).replace("include:", "").strip().split(",")]
glyphList = [Font.glyphs[n] for n in includeList]
FontMasterId = Font.fontMasterAtIndex_(0).id
for thisGlyph in glyphList:
Layer = thisGlyph.layerForId_(FontMasterId)
if hasattr(self, 'filter'):
self.filter(Layer, False, customParameters)
except:
# Custom Parameter
if len(Arguments) > 1:
Message(title='Error in %s' % self.menuName, message="There was an error in %s's filter() method when called through a Custom Parameter upon font export. Check your Macro window output." % self.menuName)
LogError(traceback.format_exc())
BaseGeneralPlugin = objc.lookUpClass("BaseGeneralPlugin")
class GeneralPlugin (BaseGeneralPlugin):
def interfaceVersion(self):
"""
Distinguishes the API version the plugin was built for.
Return 1.
"""
return 1
def loadPlugin(self):
self.name = 'My General Plugin'
if hasattr(self, 'settings'):
self.settings()
if hasattr(self, 'start'):
self.start()
def title(self):
"""
This is the name as it appears in the menu in combination with 'Show'.
E.g. 'return "Nodes"' will make the menu item read "Show Nodes".
"""
try:
return self.name
except:
LogError(traceback.format_exc())
GeneralPlugin.loadNib = LoadNib
BasePalettePlugin = objc.lookUpClass("BasePalettePlugin")
class PalettePlugin (BasePalettePlugin):
# Define all your IB outlets for your .xib after _theView:
_windowController = None
# _theView = objc.IBOutlet() # Palette view on which you can place UI elements.
def init(self):
"""
Do all initializing here, and customize the quadruple underscore items.
____CFBundleIdentifier____ should be the reverse domain name you specified in Info.plist.
"""
self = objc.super(PalettePlugin, self).init()
self.name = 'My Palette'
self.dialog = None # make sure we have that property
self.sortId = 0
# Call settings
if hasattr(self, 'settings'):
self.settings()
# Dialog stuff
if self.theView() is not None:
Frame = self.theView().frame()
# Set minimum and maximum height to height of Frame
if not hasattr(self, "min"):
self.min = Frame.size.height
if not hasattr(self, "max"):
self.max = Frame.size.height
if hasattr(self, 'start'):
self.start()
try:
self.theView().setController_(self)
except:
pass
return self
def loadPlugin(self):
pass
@objc.signature(b'L@:')
def interfaceVersion(self):
"""
Distinguishes the API version the plugin was built for.
Return 1.
"""
return 1
def title(self):
"""
This is the name as it appears in the Palette section header.
"""
try:
return self.name
except:
LogError(traceback.format_exc())
@objc.signature(b'L@:')
def sortID(self):
return self.sortId
def windowController(self):
try:
return self._windowController
except:
LogError(traceback.format_exc())
def setWindowController_(self, windowController):
try:
self._windowController = windowController
except:
LogError(traceback.format_exc())
def theView(self):
"""
Returns an NSView to be displayed in the palette.
This is the grey background in the palette, on which you can place UI items.
"""
try:
return self.dialog
except:
LogError(traceback.format_exc())
@objc.signature(b'l@:')
def minHeight(self):
"""
The minimum height of the view in pixels.
"""
try:
return self.min
except:
LogError(traceback.format_exc())
@objc.signature(b'l@:')
def maxHeight(self):
"""
The maximum height of the view in pixels.
Must be equal to or bigger than minHeight.
"""
try:
return self.max
except:
LogError(traceback.format_exc())
@objc.signature(b'L@:')
def currentHeight(self):
"""
The current height of the Palette section.
Used for storing the current resized state.
If you have a fixed height, you can also return the height in pixels
"""
try:
# return 150
return NSUserDefaults.standardUserDefaults().integerForKey_(self.name + ".ViewHeight")
except:
LogError(traceback.format_exc())
@objc.signature(b'@::L')
def setCurrentHeight_(self, newHeight):
"""
Sets a new height for the Palette section.
"""
try:
if newHeight >= self.minHeight() and newHeight <= self.maxHeight():
NSUserDefaults.standardUserDefaults().setInteger_forKey_(newHeight, self.name + ".ViewHeight")
except:
LogError(traceback.format_exc())
PalettePlugin.loadNib = LoadNib
BaseReporterPlugin = objc.lookUpClass("BaseReporterPlugin")
class ReporterPlugin (BaseReporterPlugin):
def init(self):
"""
Put any initializations you want to make here.
"""
self = objc.super(ReporterPlugin, self).init()
self.needsExtraMainOutlineDrawingForInactiveLayers = True
# Default values
self.menuName = 'New ReporterPlugin'
self.keyboardShortcut = None
self.keyboardShortcutModifier = 0 # Set any combination of NSShiftKeyMask | NSControlKeyMask | NSCommandKeyMask | NSAlternateKeyMask
self.drawDefaultInactiveLayers = True
self.generalContextMenus = []
if hasattr(self, 'settings'):
self.settings()
if hasattr(self, 'start'):
self.start()
self.hasWarned = False
return self
@objc.signature(b'@:L')
def interfaceVersion(self):
"""
Distinguishes the API version the plugin was built for.
Return 1.
"""
return 1
def title(self):
"""
This is the name as it appears in the menu in combination with 'Show'.
E.g. 'return "Nodes"' will make the menu item read "Show Nodes".
"""
try:
return self.menuName or self.__class__.__name__ or 'New ReporterPlugin'
except:
LogError(traceback.format_exc())
def keyEquivalent(self):
"""
The key for the keyboard shortcut. Set modifier keys in modifierMask() further below.
Pretty tricky to find a shortcut that is not taken yet, so be careful.
If you are not sure, use 'return None'. Users can set their own shortcuts in System Prefs.
"""
try:
return self.keyboardShortcut or None
except:
LogError(traceback.format_exc())
@objc.signature(b'i@:')
def modifierMask(self):
"""
Use any combination of these to determine the modifier keys for your default shortcut:
return NSShiftKeyMask | NSControlKeyMask | NSCommandKeyMask | NSAlternateKeyMask
Or:
return 0
... if you do not want to set a shortcut.
"""
try:
return self.keyboardShortcutModifier or 0
except:
LogError(traceback.format_exc())
def drawForegroundForLayer_options_(self, Layer, options):
"""
Whatever you draw here will be displayed IN FRONT OF the paths.
Setting a color:
NSColor.colorWithCalibratedRed_green_blue_alpha_(1.0, 1.0, 1.0, 1.0).set() # sets RGBA values between 0.0 and 1.0
NSColor.redColor().set() # predefined colors: blackColor, blueColor, brownColor, clearColor, cyanColor, darkGrayColor, grayColor, greenColor, lightGrayColor, magentaColor, orangeColor, purpleColor, redColor, whiteColor, yellowColor
Drawing a path:
myPath = NSBezierPath.alloc().init() # initialize a path object myPath
myPath.appendBezierPath_(subpath) # add subpath to myPath
myPath.fill() # fill myPath with the current NSColor
myPath.stroke() # stroke myPath with the current NSColor
To get an NSBezierPath from a GSPath, use the bezierPath() method:
myPath.bezierPath().fill()
You can apply that to a full layer at once:
if len(myLayer.paths > 0):
myLayer.bezierPath() # all closed paths
myLayer.openBezierPath() # all open paths
See:
https://developer.apple.com/library/mac/documentation/Cocoa/Reference/ApplicationKit/Classes/NSBezierPath_Class/Reference/Reference.html
https://developer.apple.com/library/mac/documentation/cocoa/reference/applicationkit/classes/NSColor_Class/Reference/Reference.html
"""
try:
if hasattr(self, 'foreground'):
self._scale = options["Scale"]
self.black = options["Black"]
self.foreground(Layer)
except:
LogError(traceback.format_exc())
def drawForegroundWithOptions_(self, options):
"""
Whatever you draw here will be displayed IN FRONT OF the paths. The difference to drawForegroundForLayer_options_() is that you need to deal with the scaling and current layer yourself.
examples::
layer = self.activeLayer()
layerPosition = self.activePosition()
scale = options["Scale"]
allLayers = self.controller.graphicView().layoutManager().cachedLayers()
indexOfActiveLayer = self.controller.graphicView().activeIndex()
selectionRange = self.controller.graphicView().selectedRange()
"""
try:
if hasattr(self, 'foregroundInViewCoords'):
self._scale = options["Scale"]
self.black = options["Black"]
self.foregroundInViewCoords()
except:
LogError(traceback.format_exc())
def drawBackgroundForLayer_options_(self, Layer, options):
"""
Whatever you draw here will be displayed BEHIND the paths.
"""
try:
if hasattr(self, 'background'):
self._scale = options["Scale"]
self.black = options["Black"]
self.background(Layer)
except:
LogError(traceback.format_exc())
def drawBackgroundWithOptions_(self, options):
"""
Whatever you draw here will be displayed BEHIND the paths. The difference to drawBackgroundForLayer_options_() is that you need to deal with the scaling and current layer yourself.
"""
try:
if hasattr(self, 'backgroundInViewCoords'):
self._scale = options["Scale"]
self.black = options["Black"]
self.backgroundInViewCoords()
except:
LogError(traceback.format_exc())
def drawBackgroundForInactiveLayer_options_(self, Layer, options):
"""
Whatever you draw here will be displayed behind the paths, but
- for inactive glyphs in the EDIT VIEW
- and for glyphs in the PREVIEW
Please note: If you are using this method, you probably want
self.needsExtraMainOutlineDrawingForInactiveLayer_() to return False
because otherwise Glyphs will draw the main outline on top of it, and
potentially cover up your background drawing.
"""
try:
self._scale = options["Scale"]
self.black = options["Black"]
assert Glyphs
if self.controller:
if hasattr(self, 'inactiveLayerBackground'):
self.inactiveLayerBackground(Layer)
elif hasattr(self, 'inactiveLayers'):
if not self.hasWarned:
print("%s: the method 'inactiveLayers' has been deprecated. Please use 'inactiveLayerBackground'" % self.className())
self.hasWarned = True
self.inactiveLayers(Layer)
else:
if hasattr(self, 'preview'):
self.preview(Layer)
elif hasattr(self, 'inactiveLayers'):
self.inactiveLayers(Layer)
except:
LogError(traceback.format_exc())
def drawForegroundForInactiveLayer_options_(self, Layer, options):
"""
Whatever you draw here will be displayed behind the paths, but
- for inactive glyphs in the EDIT VIEW
- and for glyphs in the PREVIEW
Please note: If you are using this method, you probably want
self.needsExtraMainOutlineDrawingForInactiveLayer_() to return False
because otherwise Glyphs will draw the main outline on top of it, and
potentially cover up your background drawing.
"""
try:
if hasattr(self, 'inactiveLayerForeground') and self.controller:
self._scale = options["Scale"]
self.black = options["Black"]
self.inactiveLayerForeground(Layer)
except:
LogError(traceback.format_exc())
@objc.signature(b'Z@:@')
def needsExtraMainOutlineDrawingForInactiveLayer_(self, Layer):
"""
Decides whether inactive glyphs in Edit View and glyphs in Preview should be drawn
by Glyphs (โthe main outline drawingโ).
Return True to let Glyphs draw the main outline.
Return False to prevent Glyphs from drawing the glyph (the main outline
drawing), which is probably what you want if you are drawing the glyph
yourself in self.inactiveLayerForeground() (self.drawForegroundForInactiveLayer_options_()).
"""
try:
return self.needsExtraMainOutlineDrawingForInactiveLayers
except:
LogError(traceback.format_exc())
def addMenuItemsForEvent_toMenu_(self, event, contextMenu):
'''
The event can tell you where the user had clicked.
'''
try:
if self.generalContextMenus:
setUpMenuHelper(contextMenu, self.generalContextMenus, self)
if hasattr(self, 'conditionalContextMenus'):
contextMenus = self.conditionalContextMenus()
if contextMenus:
setUpMenuHelper(contextMenu, contextMenus, self)
except:
LogError(traceback.format_exc())
@objc.python_method
def drawTextAtPoint(self, text, textPosition, fontSize=10.0, fontColor=NSColor.textColor(), align='bottomleft'):
"""
Use self.drawTextAtPoint("blabla", myNSPoint) to display left-aligned text at myNSPoint.
"""
try:
alignment = {
'topleft': 6,
'topcenter': 7,
'topright': 8,
'left': 3,
'center': 4,
'right': 5,
'bottomleft': 0,
'bottomcenter': 1,
'bottomright': 2
}
currentZoom = self.getScale()
fontAttributes = {
NSFontAttributeName: NSFont.labelFontOfSize_(fontSize / currentZoom),
NSForegroundColorAttributeName: fontColor,
}
displayText = NSAttributedString.alloc().initWithString_attributes_(text, fontAttributes)
textAlignment = alignment[align] # top left: 6, top center: 7, top right: 8, center left: 3, center center: 4, center right: 5, bottom left: 0, bottom center: 1, bottom right: 2
displayText.drawAtPoint_alignment_(textPosition, textAlignment)
except:
LogError(traceback.format_exc())
@objc.python_method
def getHandleSize(self):
"""
Returns the current handle size as set in user preferences.
Use: self.getHandleSize() / self.getScale()
to determine the right size for drawing on the canvas.
"""
try:
Selected = NSUserDefaults.standardUserDefaults().integerForKey_("GSHandleSize")
if Selected == 0:
return 5.0
elif Selected == 2:
return 10.0
else:
return 7.0 # Regular
except:
LogError(traceback.format_exc())
return 7.0
@objc.python_method
def getScale(self):
"""
self.getScale() returns the current scale factor of the Edit View UI.
Divide any scalable size by this value in order to keep the same apparent pixel size.
"""
return self._scale
def activeLayer(self):
try:
return self.controller.graphicView().activeLayer()
except:
LogError(traceback.format_exc())
def activePosition(self):
try:
return self.controller.graphicView().activePosition()
except:
LogError(traceback.format_exc())
@objc.signature(b'v@:@')
def setController_(self, Controller):
"""
Use self.controller as object for the current view controller.
"""
try:
self.controller = Controller
except:
LogError(traceback.format_exc())
ReporterPlugin.loadNib = LoadNib
class SelectTool (GSToolSelect):
def init(self):
"""
By default, toolbar.pdf will be your tool icon.
Use this for any initializations you need.
"""
self = objc.super(SelectTool, self).init()
self.name = 'My Select Tool'
self.toolbarPosition = 100
self._icon = 'toolbar.pdf'
self.keyboardShortcut = None
self.generalContextMenus = ()
# Inspector dialog stuff
# Initiate self.inspectorDialogView here in case of Vanilla dialog,
# where inspectorDialogView is not defined at the classโs root.
if not hasattr(self, 'inspectorDialogView'):
self.inspectorDialogView = None
if hasattr(self, 'settings'):
self.settings()
try:
if hasattr(self, "__file__"):
path = self.__file__()
Bundle = NSBundle.bundleWithPath_(path[:path.rfind("Contents/Resources/")])
else:
Bundle = NSBundle.bundleForClass_(NSClassFromString(self.className()))
if self._icon is not None:
self.tool_bar_image = Bundle.imageForResource_(self._icon)
self.tool_bar_image.setTemplate_(True) # Makes the icon blend in with the toolbar.
except:
LogError(traceback.format_exc())
if hasattr(self, 'start'):
self.start()
return self
def view(self):
return self.inspectorDialogView
def inspectorViewControllers(self):
ViewControllers = objc.super(SelectTool, self).inspectorViewControllers()
if ViewControllers is None:
ViewControllers = []
try:
# self.inspectorDialogView may also be defined witut a .nib,
# so it could be a Vanilla dialog
if self.inspectorDialogView:
ViewControllers.append(self)
except:
LogError(traceback.format_exc())
return ViewControllers
def interfaceVersion(self):
"""
Distinguishes the API version the plugin was built for.
Return 1.
"""
return 1
def title(self):
"""
The name of the Tool as it appears in the tooltip.
"""
try:
return self.name
except:
LogError(traceback.format_exc())
def toolBarIcon(self):
"""
Return a instance of NSImage that represents the toolbar icon as established in init().
Unless you know what you are doing, leave this as it is.
"""
try:
return self.tool_bar_image
except:
LogError(traceback.format_exc())
return objc.nil
def groupID(self):
"""
Determines the position in the toolbar.
Higher values are further to the right.
"""
try:
return self.toolbarPosition
except:
LogError(traceback.format_exc())
@objc.python_method
def trigger(self):
"""
The key to select the tool with keyboard (like v for the select tool).
Either use trigger() or keyEquivalent(), not both. Remove the method(s) you do not use.
"""
try:
return self.keyboardShortcut
except:
LogError(traceback.format_exc())
def willSelectTempTool_(self, TempTool):
"""
Temporary Tool when user presses Cmd key.
Should always be GlyphsToolSelect unless you have a better idea.
"""
try:
return TempTool.__class__.__name__ != "GlyphsToolSelect"
except:
LogError(traceback.format_exc())
def willActivate(self):
"""
Do stuff when the tool is selected.
E.g. show a window, or set a cursor.
"""
try:
objc.super(SelectTool, self).willActivate()
if hasattr(self, 'activate'):
self.activate()
except:
LogError(traceback.format_exc())
def willDeactivate(self):
"""
Do stuff when the tool is deselected.
"""
try:
objc.super(SelectTool, self).willDeactivate()
if hasattr(self, 'deactivate'):
self.deactivate()
except:
LogError(traceback.format_exc())
def elementAtPoint_atLayer_(self, currentPoint, activeLayer):
"""
Return an element in the vicinity of currentPoint (NSPoint), and it will be captured by the tool.
Use Boolean ...
distance(currentPoint, referencePoint) < clickTolerance / Scale)
... for determining whether the NSPoint referencePoint is captured or not.
Use:
myPath.nearestPointOnPath_pathTime_(currentPoint, 0.0)
"""
return objc.super(SelectTool, self).elementAtPoint_atLayer_(currentPoint, activeLayer)
try:
Scale = self.editViewController().graphicView().scale()
clickTolerance = 4.0
for p in activeLayer.paths:
for n in p.nodes:
if distance(currentPoint, n.position) < clickTolerance / Scale:
return n
for a in activeLayer.anchors:
if distance(currentPoint, a.position) < clickTolerance / Scale:
return a
except:
LogError(traceback.format_exc())
# The following four methods are optional, and only necessary
# if you intend to extend the context menu with extra items.
# Remove them if you do not want to change the context menu:
def defaultContextMenu(self):
"""
Sets the default content of the context menu and returns the menu.
Add menu items that do not depend on the context,
e.g., actions that affect the whole layer, no matter what is selected.
Remove this method if you do not want any extra context menu items.
"""
try:
# Get the current default context menu:
theMenu = objc.super(SelectTool, self).defaultContextMenu()
# Add separator at the bottom:
newSeparator = NSMenuItem.separatorItem()
theMenu.addItem_(newSeparator)
# Add menu items at the bottom:
setUpMenuHelper(theMenu, self.generalContextMenus, self)
return theMenu
except:
LogError(traceback.format_exc())
def addMenuItemsForEvent_toMenu_(self, theEvent, theMenu):
"""
Adds menu items to default context menu.
Remove this method if you do not want any extra context menu items.
"""
try:
if hasattr(self, 'conditionalContextMenus'):
contextMenus = self.conditionalContextMenus()
if contextMenus:
# Todo: Make sure that the index is 0 for all items,
# i.e., add at top rather than at bottom of menu:
newSeparator = NSMenuItem.separatorItem()
theMenu.addItem_(newSeparator)
setUpMenuHelper(theMenu, contextMenus, self)
except:
LogError(traceback.format_exc())
def drawForegroundForLayer_(self, Layer):
"""
Whatever you draw here will be displayed IN FRONT OF the paths.
Setting a color:
NSColor.colorWithCalibratedRed_green_blue_alpha_(1.0, 1.0, 1.0, 1.0).set() # sets RGBA values between 0.0 and 1.0
NSColor.redColor().set() # predefined colors: blackColor, blueColor, brownColor, clearColor, cyanColor, darkGrayColor, grayColor, greenColor, lightGrayColor, magentaColor, orangeColor, purpleColor, redColor, whiteColor, yellowColor
Drawing a path:
myPath = NSBezierPath.alloc().init() # initialize a path object myPath
myPath.appendBezierPath_(subpath) # add subpath to myPath
myPath.fill() # fill myPath with the current NSColor
myPath.stroke() # stroke myPath with the current NSColor
To get an NSBezierPath from a GSPath, use the bezierPath() method:
myPath.bezierPath().fill()
You can apply that to a full layer at once:
if len(myLayer.paths > 0):
myLayer.bezierPath() # all closed paths
myLayer.openBezierPath() # all open paths
See:
https://developer.apple.com/library/mac/documentation/Cocoa/Reference/ApplicationKit/Classes/NSBezierPath_Class/Reference/Reference.html
https://developer.apple.com/library/mac/documentation/cocoa/reference/applicationkit/classes/NSColor_Class/Reference/Reference.html
"""
try:
if hasattr(self, 'foreground'):
self.foreground(Layer)
except:
LogError(traceback.format_exc())
def drawBackgroundForLayer_(self, Layer):
"""
Whatever you draw here will be displayed BEHIND the paths.
"""
try:
if hasattr(self, 'background'):
self.background(Layer)
except:
LogError(traceback.format_exc())
SelectTool.loadNib = LoadNib
| StarcoderdataPython |
9771362 | <gh_stars>0
from datetime import datetime, timedelta
from typing import Optional
from jose import jwt
from ..config import SECRET_KEY, ALGORITHM, ACCESS_TOKEN_EXPIRE_MINUTES
def encode_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
def decode_token(token: str):
return jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
| StarcoderdataPython |
1636144 | from tkinter import *
from tkinter.ttk import *
import string
import random
import webbrowser
root = Tk()
root.title("Strong password generator")
root.geometry("350x300+463+200")
lbl1 = Label(root,text="Strong password generator in python")
lbl1.place(x="12", y="12")
url = "https://www.github.com/devendrapoonia"
new = 1
def web():
webbrowser.open(url, new)
button1 = Button(root, text="Follow me on GitHub", command=web)
button1.place(x="220", y="12")
def generatepass():
s1 = string.ascii_lowercase
s2 = string.punctuation
s3 = string.ascii_uppercase
s4 = string.digits
s5 = string.punctuation
s = []
s.extend(list(s1))
s.extend(list(s2))
s.extend(list(s3))
s.extend(list(s4))
s.extend(list(s5))
random.shuffle(s)
plenint= int(plen.get())
result = "".join(s[0:plenint])
if(password == ''):
password.insert(0, result)
password.focus()
else:
password.delete(0, plenint)
password.insert(0, result)
password.focus()
length = Label(text="Enter the length of password:")
length.place(x="12", y="45")
plen = Entry(root)
plen.place(x="12", y="70", width=325)
generatebtn = Button(root, text="Generate password", command=generatepass)
generatebtn.place(x="110", y="110")
genlbl = Label(root, text="Generated password:")
genlbl.place(x="12", y="145")
password = Entry(root)
password.place(x="12", y="170", width=325)
def copypass():
password.clipboard_clear()
password.clipboard_append(password.get())
password.focus()
copybtn = Button(root, text="Copy", command=copypass)
copybtn.place(x="70", y="210")
exit = Button(root, text="Exit", command=root.destroy)
exit.place(x="170", y="210")
root.mainloop()
"""
More softwares on github
visit https://www.github.com/devendrapoonia
"""
| StarcoderdataPython |
6640466 | from random import randint
import numpy as np
import sys
np.set_printoptions(threshold=sys.maxsize)
img_size = 42
max_holes = 10
min_width = 6
EDGE = {0: 'down', 1: 'up', 2: 'right', 3: 'left'}
def gen_square():
# make the base square
sq = np.zeros([img_size, img_size], dtype=int)
x1 = randint(1, 3 * img_size // 4)
y1 = randint(1, 3 * img_size // 4)
x2 = randint(x1 + min_width, img_size - 2)
y2 = randint(y1 + min_width, img_size - 2)
for i in range(x1, x2 + 1, 1):
sq[i][y1] = sq[i][y2] = 1
for j in range(y1, y2 + 1, 1):
sq[x1][j] = sq[x2][j] = 1
n_holes = randint(1, max_holes + 1)
holes = []
for h in range(n_holes):
e_h = EDGE[randint(0, 3)]
if e_h == 'down':
prev = [s for s in holes if 'down' in s]
if len(prev) == 2:
continue
if len(prev) == 1:
if prev[0][1] - x1 > 5:
w1 = randint(x1 + 2, prev[0][1] - 4)
w2 = randint(w1 + 2, prev[0][1] - 2)
elif x2 - prev[0][2] > 5:
w1 = randint(prev[0][2] + 2, x2 - 4)
w2 = randint(w1 + 2, x2 - 2)
else:
continue
else:
w1 = randint(x1 + 2, x2 - 4)
w2 = randint(w1 + 2, x2 - 2)
l = randint(1, y2 - y1 - 1)
stop = l + y1
for j in range(y1, y1 + l + 1):
if stop != l + y1:
break
sq[w1][j] = sq[w2][j] = 1
for i in range(w1, w2 + 1):
if sq[i][j + 2] == 1 or sq[w1 - 1][j + 1] == 1 or sq[w2 + 1][j + 1] == 1:
stop = j
for i in range(w1, w2 + 1):
sq[i][y1] = 0
sq[i][stop] = 1
sq[w1][y1] = sq[w2][y1] = 1
holes.append(['down', w1, w2, stop])
if e_h == 'up':
prev = [s for s in holes if 'up' in s]
if len(prev) == 2:
continue
if len(prev) == 1:
if prev[0][1] - x1 > 5:
w1 = randint(x1 + 2, prev[0][1] - 4)
w2 = randint(w1 + 2, prev[0][1] - 2)
elif x2 - prev[0][2] > 5:
w1 = randint(prev[0][2] + 2, x2 - 4)
w2 = randint(w1 + 2, x2 - 2)
else:
continue
else:
w1 = randint(x1 + 2, x2 - 4)
w2 = randint(w1 + 2, x2 - 2)
l = randint(1, y2 - y1 - 1)
stop = y2 - l
for j in range(y2, y2 - l - 1, -1):
if stop != y2 - l:
break
sq[w1][j] = sq[w2][j] = 1
for i in range(w1, w2 + 1):
if sq[i][j - 2] == 1 or sq[w1 - 1][j - 1] == 1 or sq[w2 + 1][j - 1] == 1:
stop = j
for i in range(w1, w2 + 1):
sq[i][y2] = 0
sq[i][stop] = 1
sq[w1][y2] = sq[w2][y2] = 1
holes.append(['up', w1, w2, stop])
if e_h == 'right':
prev = [s for s in holes if 'right' in s]
if len(prev) == 2:
continue
if len(prev) == 1:
if prev[0][1] - y1 > 5:
w1 = randint(y1 + 2, prev[0][1] - 4)
w2 = randint(w1 + 2, prev[0][1] - 2)
elif y2 - prev[0][2] > 5:
w1 = randint(prev[0][2] + 2, y2 - 4)
w2 = randint(w1 + 2, y2 - 2)
else:
continue
else:
w1 = randint(y1 + 2, y2 - 4)
w2 = randint(w1 + 2, y2 - 2)
l = randint(1, x2 - x1 - 1)
stop = x2 - l
for j in range(x2, x2 - l - 1, -1):
if stop != x2 - l:
break
sq[j][w1] = sq[j][w2] = 1
for i in range(w1, w2 + 1):
if sq[j - 2][i] == 1 or sq[j - 1][w1 - 1] == 1 or sq[j - 1][w2 + 1] == 1:
stop = j
for i in range(w1, w2 + 1):
sq[x2][i] = 0
sq[stop][i] = 1
sq[x2][w1] = sq[x2][w2] = 1
holes.append(['right', w1, w2, stop])
if e_h == 'left':
prev = [s for s in holes if 'left' in s]
if len(prev) == 2:
continue
if len(prev) == 1:
if prev[0][1] - y1 > 5:
w1 = randint(y1 + 2, prev[0][1] - 4)
w2 = randint(w1 + 2, prev[0][1] - 2)
elif y2 - prev[0][2] > 5:
w1 = randint(prev[0][2] + 2, y2 - 4)
w2 = randint(w1 + 2, y2 - 2)
else:
continue
else:
w1 = randint(y1 + 2, y2 - 4)
w2 = randint(w1 + 2, y2 - 2)
l = randint(1, x2 - x1 - 1)
stop = x1 + l
for j in range(x1, x1 + l + 1):
if stop != x1 + l:
break
sq[j][w1] = sq[j][w2] = 1
for i in range(w1, w2 + 1):
if sq[j + 2][i] == 1 or sq[j + 1][w1 - 1] == 1 or sq[j + 1][w2 + 1] == 1:
stop = j
for i in range(w1, w2 + 1):
sq[x1][i] = 0
sq[stop][i] = 1
sq[x1][w1] = sq[x1][w2] = 1
holes.append(['left', w1, w2, stop])
return sq
#print(gen_square())
| StarcoderdataPython |
28272 | <gh_stars>0
import discord
import random
def filtering(message):
manage = "์ํ์ฌ๋ ๐โจ์์ ๋งโจ๐์ ์ฌ์ฉํด๐ก์ฃผ์ธ์~!๐๐"
return manage
def command():
embed = discord.Embed(title=f"๋ช
๋ น์ด ๋ชจ์", description="๊ฟ๋ฒ๋ด์ ํ์ฌ ์๋ ๊ธฐ๋ฅ๋ค์ ์ง์ํ๊ณ ์์ต๋๋ค!", color=0xf3bb76)
embed.set_thumbnail(url="https://mblogthumb-phinf.pstatic.net/MjAxODA1MTdfMjEx/MDAxNTI2NTQ3NTYzMDI0.GGFyQth1IVreeUdrVmYVopJlv8ZX2EsTQGqQ3h6ktjEg.r6jltvwy2lBUvB_Wh4M9xvxw-gwV4RHUR1AXSF-nqpMg.PNG.heekyun93/4fb137544b692e53.png?type=w800")
embed.add_field(name=f"!์ ์ ", value="`!์ ์ ๋๋ค์ (ex. !์ ์ ๋นฝํ์ํ์ )`\nํด๋น ์ ์ ์ ๋ณด๋ฅผ ์นด๋ ํ์์ผ๋ก ๋ณผ ์ ์์ต๋๋ค", inline=False)
embed.add_field(name=f"!์ฑํผ์ธ", value="`!์ฑํผ์ธ ๋ผ์ธ ์ด๋ฆ (ex. !์ฑํผ์ธ ํ ๊ฐ๋ )\n์ฑํผ์ธ๋ช
์ ๋ฐ๋์ โป๊ณต๋ฐฑ์์ดโป ์
๋ ฅํด์ฃผ์ธ์!\n์ฑํผ์ธ๋ช
์ ์ค์ฌ์ ์
๋ ฅํด๋ ๊ฒ์ ๊ฐ๋ฅํฉ๋๋ค.\n[์์]\n์์ฐ๋ ๋ฆฌ์จ ์(x) ์์ฐ๋ ๋ฆฌ์จ์(o) ์์ฐ์(o)`\nํด๋น ๋ผ์ธ์์ ์ฑํผ์ธ์ ์น๋ฅ ํ๋ณธ,\nํ
ํธ๋ฆฌ ์ ๋ณด๋ฅผ ๊ฒ์ํฉ๋๋ค.",
inline=False)
embed.add_field(name="ใ
ค", value="ใ
ค", inline=True)
embed.add_field(name=f"[๊ทธ ์ธ ์ธ๋ชจ ์์ด ๋ณด์ด์ง๋ง ์์ํ ๊ธฐ๋ฅ๋ค]", value="`์จ๊ฒจ์ง ๋ช๊ฐ์ง ์ด์คํฐ์๊ทธ๋ ๋ค!\nโป์ฑํ
์ค ์ฌํ ์์ค์ ์ญ์ ๋ ์ ์์ผ๋\n์ฃผ์ํด ์ฃผ์ธ์โป`", inline=False)
embed.add_field(name=f"์ธ์ฌ", value="`!์๋
(ex. !์๋
, !์๋
ํ์ธ์)`\n๊ฟ๋ฒ๋ด์ด ์ธ์ฌ๋ฅผ ๋ฐ์์ค๋๋ค!", inline=False)
embed.add_field(name=f"์์ฌ ๋ฉ๋ด ์ถ์ฒ", value="`!๋ฐฅ or !๋ฉ๋ด (ex. !๋ฐฅ, !๋ฉ๋ด ์ถ์ฒ์ข)`\n๋ญ ๋จน์์ง ๊ณ ๋ฏผ๋์๋์?\n์์ฌ๋ ๊ผญ ์ฑ๊ฒจ๋์ธ์!", inline=False)
embed.add_field(name="ใ
ค", value="ใ
ค", inline=True)
embed.set_footer(text="๋ฒ๊ทธ ์ ๋ณด ๋ฐ ๋ฌธ์\nhttps://github.com/NyaNyak/2021-OSS",
icon_url="https://mblogthumb-phinf.pstatic.net/MjAxODA1MTdfMjg5/MDAxNTI2NTQ3NTYzMDIz.awWFb8WW9qSk85krQsWf7GXGOShPNS5ilZyVOFyrbIUg.07pMLGfgYvN_IQPPn9JLBRRvVE8yMY_xiN4LzuIfElEg.PNG.heekyun93/4c7a1d3932a211fa.png?type=w800")
return embed
def hello(message):
sentence = ["์๋
ํ์ธ์ ", "์ค๋๋ ์ข์ ํ๋ฃจ ๋์ธ์ ", "ํ์ธ๊ณ ๊ฐํ ํ๋ฃจ! ", "๊ฑด๊ฐ ์กฐ์ฌํ์ธ์ ", "์์ฌ๋ ํ์
จ๋์? "]
i = random.randint(0, len(sentence)-1)
return sentence[i]
def hey(message):
embed = discord.Embed(title="")
embed.set_image(url="https://mblogthumb-phinf.pstatic.net/MjAxODA1MTdfMjg5/MDAxNTI2NTQ3NTYzMDIz.awWFb8WW9qSk85krQsWf7GXGOShPNS5ilZyVOFyrbIUg.07pMLGfgYvN_IQPPn9JLBRRvVE8yMY_xiN4LzuIfElEg.PNG.heekyun93/4c7a1d3932a211fa.png?type=w800")
return embed
def garen(message):
embed = discord.Embed(title="")
embed.set_image(url="https://w.namu.la/s/39d986b83774de090109bcbd0ecfdb983cc21cb29fb02fbdafbc1f8170e59d7c2dd34e70c826538e6cdd9265a9c6bd5460a09495d9623fb866dc515be68abd002b697ccc9c7c5c75f927ccc791c87c8d3d25b791fbc721dce46ff6c83dafb137")
return embed
def meal(message):
food = ["๋๊น์ค", "๊น๋ฐฅ", "ํ๋ฒ๊ฑฐ", "๋ณด์", "์ปต๋ผ๋ฉด", "์ผ๊ฐ๊น๋ฐฅ", "๋ก๋ณถ์ด", "์๊ผฌ์น", "์ง์ฅ๋ฉด",
"ํ์คํ", "๋ฆฌ์กฐ๋", "์ผ๊ฒน์ด", "ํผ์", "์๋๊ตญ", "์นํจ", "์ด๋ฐฅ", "๋ผ๋ฉด", "๋ญ๊ฐ๋น", "์กฑ๋ฐ",
"๊ฐ์ํ", "ํด์ฅ๊ตญ", "์๋๋ถ์ฐ๊ฐ", "์น๋งฅ", "๊น์น์ฐ๊ฐ", "๋ถ๋์ฐ๊ฐ", "๋น๋น๋ฐฅ", "๋ถ๊ณ ๊ธฐ", "๊ณฑ์ฐฝ",
"์นผ๊ตญ์", "์ค๋ ํ", "๊ฐ๋น", "์ผ๊ณํ", "์๊ตฌ์ฐ", "๋๋ฉด", "์งฌ๋ฝ", "๊ฐ๋นํ", "์๋ฌ๋", "๋์๋ฝ",
"ํ", "์๊ตญ์", "๋ง๋ผํ", "๋ฉ๋ฐ์๋ฐ", "๋ผ๋ฉ", "๋ฎ๋ฐฅ", "์ฐ๋", "๊น์น๋ณถ์๋ฐฅ", "์ค๋ฏ๋ผ์ด์ค", "์นด๋ ",
"๋ง๋", "์๋์์น", "๋ญ๋ณถ์ํ", "์ ์ก๋ณถ์", "๋์ฅ์ฐ๊ฐ", "์ ์ง๊ตญ", "์ถ์ดํ", "์ก๊ฐ์ฅ", "์ค๋ธ์ค๋ธ",
"๋ญ๋ฐ", "์ฐ๋ญ", "ํ ์คํธ", "๋ผ์ง๊ตญ๋ฐฅ", "์๋จธ๋ฆฌ๊ตญ๋ฐฅ"]
i = random.randint(0, len(food)-1)
return food[i] | StarcoderdataPython |
1635505 | from typing import List
from .docutils import LATEX_TAG_BEGIN_FORMAT, LATEX_TAG_END_FORMAT
from .snippetExceptions import SnippetException
class Snippet:
BEGIN_TEMPLATE = LATEX_TAG_BEGIN_FORMAT
END_TEMPLATE = LATEX_TAG_END_FORMAT
INDENT = " "
def __init__(self, name: str, tags=[]):
self.__name = name
self.content = ""
self._add_tags(self.BEGIN_TEMPLATE, tags)
self.open = True
@property
def name(self) -> str:
"""
name of snippet
:return: str
"""
return self.__name
def add_content(self, content_to_add: str) -> None:
"""
add new line to content
:param content_to_add: content added
:return: None
"""
if self.is_close():
raise SnippetException(f"Snippet {self.name} already closed.")
self.content += content_to_add
def _add_tags(self, template: str, tags: List[str], reverse: bool = False):
indent_func = lambda i: self.INDENT * i
if reverse:
max_i = len(tags) - 1
indent_func = lambda i: self.INDENT * (max_i - i)
tags = reversed(tags)
self.content += "\n".join(indent_func(i) + template.format(tag=tag) for i, tag in enumerate(tags)) + "\n"
def close(self, tags: List[str]) -> None:
"""
close snippet
"""
self._add_tags(self.END_TEMPLATE, tags, reverse=True)
self.open = False
def is_close(self) -> bool:
"""
True if Snippet is closed
:return: bool
"""
return not self.open
def __str__(self) -> str:
return self.content
| StarcoderdataPython |
3240237 | <gh_stars>0
"""Handle the console interface for the ClassifierNetwork package.
This package, while intended to be used with the ModularMailer project, can
be installed and used a standalone application, which this module handles
the interface for. The intent of of this design is so that this package can
be installed by itself without the ModularMailer project as a dependency on
a machine that is optimized for training the network (Linux box w/GPU).
"""
import argparse
import sys
import classifiernetwork.defaults as defaults
SUPPORTED_OBJECTIVES = (
'mean_squared_error',
'mse',
'mean_absolute_error',
'mae',
'mean_absolute_percentage_error',
'mape',
'mean_squared_logarithmic_error',
'msle',
'squared_hinge',
'hinge',
'binary_crossentropy',
'categorical_crossentropy',
'sparse_categorical_crossentropy',
'kullback_leibler_divergence',
'kld',
'poisson',
'cosine_proximity',
)
SUPPORTED_ACTIVATIONS = (
'softmax',
'softplus',
'softsign',
'relu',
'tanh',
'sigmoid',
'hard_sigmoid',
'linear',
)
def _build_training_subparser(train_parser):
"""Create the options for the 'train' subparser"""
train_parser.add_argument(
'input_vectors', type=str,
help='Path to the numpy array of input vectors (.npy file).'
)
train_parser.add_argument(
'output_vectors', type=str,
help='path to the numpy array of output vectors (.npy file)'
)
train_parser.add_argument(
'save_name', type=str, help='Save trained network file name.'
)
train_parser.add_argument(
'-o', '--output-directory', type=str,
help='Directory for output file. Defaults to input_vectors location.'
)
# Network compilation option
compile_group = train_parser.add_argument_group(
title='Compilation options',
description='Options for the structure of the network.'
)
compile_group.add_argument(
'-i', '--hidden-size', type=int,
help='Size of the hidden layer. Defaults to geometric_mean(in, out).'
)
compile_group.add_argument(
'-a', '--activation', type=str,
default=defaults.ACTIVATION, choices=SUPPORTED_ACTIVATIONS,
help='Activation function for the hidden layer (see Keras docs).'
)
compile_group.add_argument(
'-p', '--dropout', type=float, default=defaults.DROPOUT,
help='Fraction of the input units to drop.'
)
compile_group.add_argument(
'-l', '--loss', type=str,
default=defaults.LOSS, choices=SUPPORTED_OBJECTIVES,
help='The string identifier of an optimizer (see Keras docs).'
)
# Options for the stochastic gradient descent optimizer
sgd_group = train_parser.add_argument_group(
title='Stochastic Gradient Descent optimizer (SGD) options',
description='The network is trained using a SGD optimizer.'
)
sgd_group.add_argument(
'-r', '--learning-rate', type=float, default=defaults.LEARNING_RATE,
help='Learning rate.'
)
sgd_group.add_argument(
'-m', '--momentum', type=float, default=defaults.MOMENTUM,
help='Number of epochs to train the network.'
)
sgd_group.add_argument(
'-d', '--decay', type=float, default=defaults.DECAY,
help='Learning rate decay over each update.'
)
sgd_group.add_argument(
'-n', '--nesterov', action='store_true',
help='Apply Nesterov momentum to the SGD optimizer.'
)
# Options for training the model
train_group = train_parser.add_argument_group(
title='Training options',
description='Options for how the network is to be trained.'
)
train_group.add_argument(
'-e', '--epochs', type=int, default=defaults.EPOCH,
help='The number of epochs to train the model.'
)
train_group.add_argument(
'-s', '--validation-split', type=float,
help='Fraction of the data to use as held-out validation data.'
)
train_group.add_argument(
'--v', '--verbose', type=int,
default=defaults.VERBOSE, choices=(0, 1, 2),
help='0 for no logging, 1 for progress bar, 2 for line per epoch.'
)
train_group.add_argument(
'-b', '--batch-size', type=int,
help='Number of samples per gradient update.'
)
def argument_parser(args):
parser = argparse.ArgumentParser(
description='Trains neural networks from labeled input data.'
)
# Create subparser
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
# Parse 'train' command
train_parser = subparsers.add_parser(
'train', help='Train a neural network from the given input.'
)
_build_training_subparser(train_parser)
# Return parsed arguments
return parser.parse_args(args)
def main():
"""Entry point for the console script usage of this package.
Returns:
int: Error return code.
"""
args = argument_parser(sys.argv[1:])
return 0
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.