max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
tests/completers/test_xompletions.py | cryzed/xonsh | 0 | 6616251 | <reponame>cryzed/xonsh
from xonsh.parsers.completion_context import CommandArg, CommandContext, CompletionContext
from xonsh.completers.xompletions import complete_xonfig, complete_xontrib
def test_xonfig():
assert complete_xonfig(CompletionContext(CommandContext(
args=(CommandArg("xonfig"),), arg_index=1, prefix="-"
))) == {"-h"}
def test_xonfig_colors(monkeypatch):
monkeypatch.setattr("xonsh.tools.color_style_names", lambda: ["blue", "brown", "other"])
assert complete_xonfig(CompletionContext(CommandContext(
args=(CommandArg("xonfig"), CommandArg("colors")), arg_index=2, prefix="b"
))) == {"blue", "brown"}
def test_xontrib():
assert complete_xontrib(CompletionContext(CommandContext(
args=(CommandArg("xontrib"),), arg_index=1, prefix="l"
))) == {"list", "load"}
| from xonsh.parsers.completion_context import CommandArg, CommandContext, CompletionContext
from xonsh.completers.xompletions import complete_xonfig, complete_xontrib
def test_xonfig():
assert complete_xonfig(CompletionContext(CommandContext(
args=(CommandArg("xonfig"),), arg_index=1, prefix="-"
))) == {"-h"}
def test_xonfig_colors(monkeypatch):
monkeypatch.setattr("xonsh.tools.color_style_names", lambda: ["blue", "brown", "other"])
assert complete_xonfig(CompletionContext(CommandContext(
args=(CommandArg("xonfig"), CommandArg("colors")), arg_index=2, prefix="b"
))) == {"blue", "brown"}
def test_xontrib():
assert complete_xontrib(CompletionContext(CommandContext(
args=(CommandArg("xontrib"),), arg_index=1, prefix="l"
))) == {"list", "load"} | none | 1 | 2.317708 | 2 | |
test/TestFunctional.py | B1T0/Wochenende | 0 | 6616252 | <filename>test/TestFunctional.py
import pytest
import subprocess
import os
# import re
from itertools import combinations, product, chain
class TestFunctional:
__test__ = False
"""Tests for the overall functionality of the Wochenende pipeline.
Attributes
----------
_p_fixed_arguments : dict [str, str]
Fixed arguments that need to be set like this for testing. Value is empty if the
argument just needs to exist
_p_options : dict [str, list of str]
Optional arguments that take an input. Each list gives the inputs for testing.
_p_flags : list of str
Optional arguments that do not require an additional input.
Notes
-----
To run only a few combinations, just comment the irrelevant ones.
This test class relies on an existing slurm installation. It might be too much to run
this due to combinatorial explosion. For a simple test of installation, one can rely
on `test_installation.py`.
"""
# TODO: PE support
# TODO: Test partially successful runs, continuation and --force-restart
# TODO: Collect test run information
# TODO: Sum up test run information
_p_fixed_arguments = [
'--metagenome', 'test',
'--debug'
]
_p_options = {
'--aligner': ['bwamem', 'minimap2', 'ngmlr'],
'--readType': ['SE', 'PE'],
'--threads': ['8', '4', '8', '16', '32', '56', '80'],
'--remove_mismatching': ['1', '2', '3', '4', '5'],
}
_p_flags = [
'--fastp',
'--nextera',
'--trim_galore',
'--longread',
'--no_duplicate_removal',
'--no_prinseq',
'--no_fastqc',
'--no_abra',
'--mq30',
'--force_restart'
]
def _gen_fixed_arguments(self):
"""Returns the fixed arguments. Mainly for consistency."""
return self._p_fixed_arguments
def _gen_options(self):
"""Generates all combinations of p_options
Please don't ask how. It works. Trust me. Sorry.
"""
ds = list(chain(*[list(map(dict, combinations(self._p_options.items(), i)))
for i in range(len(self._p_options) + 1)]))
return chain(*[[list(chain(*x)) for x in product(*[[[k, v] for v in d[k]] for k in d.keys()])] for d in ds])
def _gen_flags(self):
"""Generates all combinations of p_flags"""
for length in range(len(self._p_flags) + 1):
for combination in combinations(self._p_flags, length):
yield list(combination)
def _gen_pipeline_arguments(self):
""" Generates pipeline argument strings
Yield
-----
Pipeline argument combinations for a full feature test
"""
print("\n# Arguments to test")
fixed = list(self._gen_fixed_arguments())
print(fixed)
print("# Options to test")
options = list(self._gen_options())
print(options)
print("# Flags to test")
flags = list(self._gen_flags())
# print(flags)
return [fixed + opts + flgs for opts, flgs in product(options, flags)]
def _get_refs_and_reads_paths(self):
"""Returns absolute paths of reference (.fa) and read (.fastq) test files.
Returns
-------
refs : list of os.path
absolute paths to reference file(s)
reads : list of os.path
absolute paths to read file(s)
Note
----
For testing, we provide explicit reference files. No reference from one of the
refseq_dict in the script's configuration section is used. All test files need to
be stored in /path/to/wochenende/test/data.
"""
cwd = os.getcwd()
assert cwd.endswith('test') or cwd.endswith('test/')
datadir = os.path.join(cwd, 'data')
datalist = os.listdir(datadir)
refs = [os.path.join(datadir, f) for f in datalist if f.endswith('.fa')]
reads = [os.path.join(datadir, f) for f in datalist if f.endswith('_R1.fastq')]
return refs, reads
def _get_fixed_files_paths(self):
"""Returns singleton list of list of absolute paths of necessary files."""
cwd = os.getcwd()
assert cwd.endswith('test') or cwd.endswith('test/')
main_we_dir = os.path.join(cwd, '..')
files = [os.path.join(main_we_dir, f) for f in os.listdir(main_we_dir)]
return [files]
@pytest.fixture()
def setup_tmpdir(self, files, read, reference, tmpdir):
"""Sets up the temporary working directory.
Creates the directory and symlinks the necessary files into it.
Parameters
----------
files : list of str
absolute paths to necessary files
read : str
absolute path to read file
reference : str
absolute path to reference file
tmpdir : py.path.local
fixture that creates a temporary directory
Returns
-------
tmpdir : py.path.local
path to the temporary directory
"""
os.symlink(reference, os.path.join(tmpdir, 'ref.fa'))
os.symlink(read, os.path.join(tmpdir, 'reads_R1.fastq'))
if os.path.exists(read.replace('_R1', '_R2')):
os.symlink(read.replace('_R1', '_R2'), os.path.join(tmpdir, 'reads_R2.fastq'))
for f in files:
# print(f.split('/'))
os.symlink(f, os.path.join(tmpdir, f.split('/')[-1]))
# main_we_path = os.path.join(os.getcwd(), "..", "..")
# print(f'##### {main_we_path} #####')
return tmpdir
def pytest_generate_tests(self, metafunc):
"""Main test generator"""
refs, reads = self._get_refs_and_reads_paths()
files = self._get_fixed_files_paths()
if 'files' in metafunc.fixturenames:
metafunc.parametrize('files', files)
if 'reference' in metafunc.fixturenames:
metafunc.parametrize('reference', refs)
if 'read' in metafunc.fixturenames:
metafunc.parametrize('read', reads)
if 'pipeline_arguments' in metafunc.fixturenames:
metafunc.parametrize(argnames='pipeline_arguments',
argvalues=self._gen_pipeline_arguments())
@pytest.mark.slow
def test_pipeline_call(self, setup_tmpdir, pipeline_arguments):
"""Main pipeline test using subprocesses"""
# change to temporary directory
os.chdir(setup_tmpdir)
print(f'\n# Running Tests in: {os.getcwd()}')
print(f'# Seeing files: {os.listdir(setup_tmpdir)}')
# Create slurm job script
slurm_template = os.path.join('test', 'slurm-template.sh')
with open(slurm_template, 'r') as f:
slurm_file = f.read()
slurm_cmd = ['python3', 'run_Wochenende.py'] + list(pipeline_arguments) + \
['$fastq']
slurm_file = slurm_file + ' '.join(slurm_cmd) + '\n wait'
sbatch_test_filename = 'run_Wochenende_SLURM_test.sh'
with open(sbatch_test_filename, 'w') as f:
f.write(slurm_file)
f.flush()
# print(slurm_file)
# Start slurm subprocess
cmd = ['srun', sbatch_test_filename, 'reads_R1.fastq']
# print(' '.join(cmd))
proc = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# shell=True # Seems to be necessary (?)
)
stdout = str(proc.stdout).replace("\\n", "\n\t")
stderr = str(proc.stderr).replace("\\n", "\n\t")
print(f'# Return Code: {str(proc.returncode)}')
print(f'# stdout:\n\t{stdout}')
print(f'# stderr:\n\t{stderr}')
assert proc.returncode == 0
| <filename>test/TestFunctional.py
import pytest
import subprocess
import os
# import re
from itertools import combinations, product, chain
class TestFunctional:
__test__ = False
"""Tests for the overall functionality of the Wochenende pipeline.
Attributes
----------
_p_fixed_arguments : dict [str, str]
Fixed arguments that need to be set like this for testing. Value is empty if the
argument just needs to exist
_p_options : dict [str, list of str]
Optional arguments that take an input. Each list gives the inputs for testing.
_p_flags : list of str
Optional arguments that do not require an additional input.
Notes
-----
To run only a few combinations, just comment the irrelevant ones.
This test class relies on an existing slurm installation. It might be too much to run
this due to combinatorial explosion. For a simple test of installation, one can rely
on `test_installation.py`.
"""
# TODO: PE support
# TODO: Test partially successful runs, continuation and --force-restart
# TODO: Collect test run information
# TODO: Sum up test run information
_p_fixed_arguments = [
'--metagenome', 'test',
'--debug'
]
_p_options = {
'--aligner': ['bwamem', 'minimap2', 'ngmlr'],
'--readType': ['SE', 'PE'],
'--threads': ['8', '4', '8', '16', '32', '56', '80'],
'--remove_mismatching': ['1', '2', '3', '4', '5'],
}
_p_flags = [
'--fastp',
'--nextera',
'--trim_galore',
'--longread',
'--no_duplicate_removal',
'--no_prinseq',
'--no_fastqc',
'--no_abra',
'--mq30',
'--force_restart'
]
def _gen_fixed_arguments(self):
"""Returns the fixed arguments. Mainly for consistency."""
return self._p_fixed_arguments
def _gen_options(self):
"""Generates all combinations of p_options
Please don't ask how. It works. Trust me. Sorry.
"""
ds = list(chain(*[list(map(dict, combinations(self._p_options.items(), i)))
for i in range(len(self._p_options) + 1)]))
return chain(*[[list(chain(*x)) for x in product(*[[[k, v] for v in d[k]] for k in d.keys()])] for d in ds])
def _gen_flags(self):
"""Generates all combinations of p_flags"""
for length in range(len(self._p_flags) + 1):
for combination in combinations(self._p_flags, length):
yield list(combination)
def _gen_pipeline_arguments(self):
""" Generates pipeline argument strings
Yield
-----
Pipeline argument combinations for a full feature test
"""
print("\n# Arguments to test")
fixed = list(self._gen_fixed_arguments())
print(fixed)
print("# Options to test")
options = list(self._gen_options())
print(options)
print("# Flags to test")
flags = list(self._gen_flags())
# print(flags)
return [fixed + opts + flgs for opts, flgs in product(options, flags)]
def _get_refs_and_reads_paths(self):
"""Returns absolute paths of reference (.fa) and read (.fastq) test files.
Returns
-------
refs : list of os.path
absolute paths to reference file(s)
reads : list of os.path
absolute paths to read file(s)
Note
----
For testing, we provide explicit reference files. No reference from one of the
refseq_dict in the script's configuration section is used. All test files need to
be stored in /path/to/wochenende/test/data.
"""
cwd = os.getcwd()
assert cwd.endswith('test') or cwd.endswith('test/')
datadir = os.path.join(cwd, 'data')
datalist = os.listdir(datadir)
refs = [os.path.join(datadir, f) for f in datalist if f.endswith('.fa')]
reads = [os.path.join(datadir, f) for f in datalist if f.endswith('_R1.fastq')]
return refs, reads
def _get_fixed_files_paths(self):
"""Returns singleton list of list of absolute paths of necessary files."""
cwd = os.getcwd()
assert cwd.endswith('test') or cwd.endswith('test/')
main_we_dir = os.path.join(cwd, '..')
files = [os.path.join(main_we_dir, f) for f in os.listdir(main_we_dir)]
return [files]
@pytest.fixture()
def setup_tmpdir(self, files, read, reference, tmpdir):
"""Sets up the temporary working directory.
Creates the directory and symlinks the necessary files into it.
Parameters
----------
files : list of str
absolute paths to necessary files
read : str
absolute path to read file
reference : str
absolute path to reference file
tmpdir : py.path.local
fixture that creates a temporary directory
Returns
-------
tmpdir : py.path.local
path to the temporary directory
"""
os.symlink(reference, os.path.join(tmpdir, 'ref.fa'))
os.symlink(read, os.path.join(tmpdir, 'reads_R1.fastq'))
if os.path.exists(read.replace('_R1', '_R2')):
os.symlink(read.replace('_R1', '_R2'), os.path.join(tmpdir, 'reads_R2.fastq'))
for f in files:
# print(f.split('/'))
os.symlink(f, os.path.join(tmpdir, f.split('/')[-1]))
# main_we_path = os.path.join(os.getcwd(), "..", "..")
# print(f'##### {main_we_path} #####')
return tmpdir
def pytest_generate_tests(self, metafunc):
"""Main test generator"""
refs, reads = self._get_refs_and_reads_paths()
files = self._get_fixed_files_paths()
if 'files' in metafunc.fixturenames:
metafunc.parametrize('files', files)
if 'reference' in metafunc.fixturenames:
metafunc.parametrize('reference', refs)
if 'read' in metafunc.fixturenames:
metafunc.parametrize('read', reads)
if 'pipeline_arguments' in metafunc.fixturenames:
metafunc.parametrize(argnames='pipeline_arguments',
argvalues=self._gen_pipeline_arguments())
@pytest.mark.slow
def test_pipeline_call(self, setup_tmpdir, pipeline_arguments):
"""Main pipeline test using subprocesses"""
# change to temporary directory
os.chdir(setup_tmpdir)
print(f'\n# Running Tests in: {os.getcwd()}')
print(f'# Seeing files: {os.listdir(setup_tmpdir)}')
# Create slurm job script
slurm_template = os.path.join('test', 'slurm-template.sh')
with open(slurm_template, 'r') as f:
slurm_file = f.read()
slurm_cmd = ['python3', 'run_Wochenende.py'] + list(pipeline_arguments) + \
['$fastq']
slurm_file = slurm_file + ' '.join(slurm_cmd) + '\n wait'
sbatch_test_filename = 'run_Wochenende_SLURM_test.sh'
with open(sbatch_test_filename, 'w') as f:
f.write(slurm_file)
f.flush()
# print(slurm_file)
# Start slurm subprocess
cmd = ['srun', sbatch_test_filename, 'reads_R1.fastq']
# print(' '.join(cmd))
proc = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# shell=True # Seems to be necessary (?)
)
stdout = str(proc.stdout).replace("\\n", "\n\t")
stderr = str(proc.stderr).replace("\\n", "\n\t")
print(f'# Return Code: {str(proc.returncode)}')
print(f'# stdout:\n\t{stdout}')
print(f'# stderr:\n\t{stderr}')
assert proc.returncode == 0
| en | 0.643305 | # import re Tests for the overall functionality of the Wochenende pipeline. Attributes ---------- _p_fixed_arguments : dict [str, str] Fixed arguments that need to be set like this for testing. Value is empty if the argument just needs to exist _p_options : dict [str, list of str] Optional arguments that take an input. Each list gives the inputs for testing. _p_flags : list of str Optional arguments that do not require an additional input. Notes ----- To run only a few combinations, just comment the irrelevant ones. This test class relies on an existing slurm installation. It might be too much to run this due to combinatorial explosion. For a simple test of installation, one can rely on `test_installation.py`. # TODO: PE support # TODO: Test partially successful runs, continuation and --force-restart # TODO: Collect test run information # TODO: Sum up test run information Returns the fixed arguments. Mainly for consistency. Generates all combinations of p_options Please don't ask how. It works. Trust me. Sorry. Generates all combinations of p_flags Generates pipeline argument strings Yield ----- Pipeline argument combinations for a full feature test # Arguments to test") # print(flags) Returns absolute paths of reference (.fa) and read (.fastq) test files. Returns ------- refs : list of os.path absolute paths to reference file(s) reads : list of os.path absolute paths to read file(s) Note ---- For testing, we provide explicit reference files. No reference from one of the refseq_dict in the script's configuration section is used. All test files need to be stored in /path/to/wochenende/test/data. Returns singleton list of list of absolute paths of necessary files. Sets up the temporary working directory. Creates the directory and symlinks the necessary files into it. Parameters ---------- files : list of str absolute paths to necessary files read : str absolute path to read file reference : str absolute path to reference file tmpdir : py.path.local fixture that creates a temporary directory Returns ------- tmpdir : py.path.local path to the temporary directory # print(f.split('/')) # main_we_path = os.path.join(os.getcwd(), "..", "..") # print(f'##### {main_we_path} #####') Main test generator Main pipeline test using subprocesses # change to temporary directory # Running Tests in: {os.getcwd()}') # Create slurm job script # print(slurm_file) # Start slurm subprocess # print(' '.join(cmd)) # shell=True # Seems to be necessary (?) | 2.508447 | 3 |
dmf-parser.py | simondotm/dmf_player | 3 | 6616253 | #!/usr/bin/env python
# python script to convert & process DMF files for SN76489 PSG
# by simondotm 2017
# Released under MIT license
# http://deflemask.com/DMF_SPECS.txt
import zlib
import struct
import sys
import binascii
import math
from os.path import basename
if (sys.version_info > (3, 0)):
from io import BytesIO as ByteBuffer
else:
from StringIO import StringIO as ByteBuffer
#-----------------------------------------------------------------------------
class FatalError(Exception):
pass
class DmfStream:
dmf_filename = ''
# constructor - pass in the filename of the DMF
def __init__(self, dmf_filename):
self.dmf_filename = dmf_filename
print " Loading DMF file : '" + dmf_filename + "'"
# open the dmf file and parse it
dmf_file = open(dmf_filename, 'rb')
dmf_data = dmf_file.read()
# Store the DMF data and validate it
self.dmf_data = ByteBuffer(dmf_data)
dmf_file.close()
self.dmf_data.seek(0)
unpacked = zlib.decompress(dmf_data)
self.dmf_data = ByteBuffer(unpacked)
self.dmf_data.seek(0, 2)
size = self.dmf_data.tell()
self.dmf_data.seek(0)
print " DMF file loaded : '" + dmf_filename + "' (" + str(size) + " bytes)"
bin_file = open("dmf.bin", 'wb')
bin_file.write(unpacked)
bin_file.close()
def parse(self):
# Save the current position of the VGM data
original_pos = self.dmf_data.tell()
# Seek to the start of the file
self.dmf_data.seek(0)
data = self.dmf_data.read(16)
header = data.decode("utf-8")
#header = struct.unpack( 's', data )
print header
# Perform basic validation on the given file by checking for the header
if header != ".DelekDefleMask.":
# Could not find the header string
print "ERROR: not a DMF file"
return
def getByte():
return struct.unpack('B', self.dmf_data.read(1) )[0]
def getShort():
return struct.unpack('H', self.dmf_data.read(2) )[0]
def getInt():
return struct.unpack('i', self.dmf_data.read(4) )[0]
def getString(size):
s = self.dmf_data.read(size)
return s.decode("utf-8")
def skipBytes(n):
self.dmf_data.read(n)
version = getByte()
print " DMF version - " + str(version)
if version != 24:
print "ERROR: can only parse DMF version 24 (Deflemask v12)"
return
system = getByte()
print " DMF system - " + str(system)
# must be 3 - this script only parses SMS tunes (SYSTEM_TOTAL_CHANNELS=4)
if system != 3:
print "ERROR: Not an SMS DMF track"
return
#//VISUAL INFORMATION
song_name = getString( getByte() )
song_author = getString( getByte() )
highlight_A = getByte()
highlight_B = getByte()
#//MODULE INFORMATION
time_base = getByte()
tick_time_1 = getByte()
tick_time_2 = getByte()
frames_mode = getByte() # (0 = PAL, 1 = NTSC)
custom_hz = getByte() # (If set to 1, NTSC or PAL is ignored)
custom_hz_1 = getByte()
custom_hz_2 = getByte()
custom_hz_3 = getByte()
total_rows_per_pattern = getInt()
total_rows_in_pattern_matrix = getByte()
print "Song name: " + song_name
print "Song author: " + song_author
print "Time base: " + str(time_base)
print "total_rows_per_pattern " + str(total_rows_per_pattern)
print "total_rows_in_pattern_matrix " + str(total_rows_in_pattern_matrix)
SYSTEM_TOTAL_CHANNELS = 4
pattern_size_0 = total_rows_per_pattern
pattern_size_n = total_rows_per_pattern*2+total_rows_per_pattern
pattern_size_t = pattern_size_0+pattern_size_n*3
print "size of VGM pattern " + str(pattern_size_t) + " bytes"
total_size_t = total_rows_in_pattern_matrix * pattern_size_t * SYSTEM_TOTAL_CHANNELS
print "size of VGM song " + str(total_size_t) + " bytes"
pattern_matrix_array = []
#//PATTERN MATRIX VALUES (A matrix of SYSTEM_TOTAL_CHANNELS x TOTAL_ROWS_IN_PATTERN_MATRIX)
for c in range(0, SYSTEM_TOTAL_CHANNELS):
print "Reading channel " + str(c) + " pattern matrix (" + str(total_rows_in_pattern_matrix) + " rows)"
pattern_matrix = bytearray()
o = ""
for r in range(0, total_rows_in_pattern_matrix):
pattern_id = getByte()
o += " " + str(pattern_id)
pattern_matrix.append( struct.pack('B', pattern_id) )
print o
pattern_matrix_array.append(pattern_matrix)
#//INSTRUMENTS DATA (.DMP format is similar to this part, but there are some discrepancies, please read DMP_Specs.txt for more details)
total_instruments = getByte()
print "total_instruments " + str(total_instruments)
for i in range(0, total_instruments):
print "Reading instrument " + str(i)
instrument_name = getString( getByte() )
print " instrument_name '" + instrument_name + "'"
instrument_mode = getByte() # (0 = STANDARD INS, 1 = FM INS)
if instrument_mode != 0:
print "ERROR: FM instruments not supported on SMS"
# todo, should skip remaining data
else:
#//VOLUME MACRO
envelope_size = getByte() # 1 Byte: ENVELOPE_SIZE (0 - 127)
print " volume envelope size " + str(envelope_size)
# Repeat this ENVELOPE_SIZE times
for e in range(0, envelope_size):
envelope_value = getInt() # 4 Bytes: ENVELOPE_VALUE
if envelope_size > 0:
loop_position = getByte() # 1 Byte: LOOP_POSITION (-1 = NO LOOP)
#//ARPEGGIO MACRO
envelope_size = getByte() # 1 Byte: ENVELOPE_SIZE (0 - 127)
print " arpeggio envelope size " + str(envelope_size)
# Repeat this ENVELOPE_SIZE times
for e in range(0, envelope_size):
envelope_value = getInt() # 4 Bytes: ENVELOPE_VALUE (signed int, offset=12)
if envelope_size > 0:
loop_position = getByte() # 1 Byte: LOOP_POSITION (-1 = NO LOOP)
macro_mode = getByte() # 1 Byte: ARPEGGIO MACRO MODE (0 = Normal, 1 = Fixed)
#//DUTY/NOISE MACRO
envelope_size = getByte() # 1 Byte: ENVELOPE_SIZE (0 - 127)
print " duty/noise envelope size " + str(envelope_size)
# Repeat this ENVELOPE_SIZE times
for e in range(0, envelope_size):
envelope_value = getInt() # 4 Bytes: ENVELOPE_VALUE
if envelope_size > 0:
loop_position = getByte() # 1 Byte: LOOP_POSITION (-1 = NO LOOP)
#//WAVETABLE MACRO
envelope_size = getByte() # 1 Byte: ENVELOPE_SIZE (0 - 127)
print " wavetable envelope size " + str(envelope_size)
# Repeat this ENVELOPE_SIZE times
for e in range(0, envelope_size):
envelope_value = getInt() # 4 Bytes: ENVELOPE_VALUE
if envelope_size > 0:
loop_position = getByte() # 1 Byte: LOOP_POSITION (-1 = NO LOOP)
# per system data, only present for C64
#//END OF INSTRUMENTS DATA
#//WAVETABLES DATA
total_wavetables = getByte()
if total_wavetables != 0:
print "ERROR: unexpected wavetables data"
return
header_data_size = self.dmf_data.tell()
print "size of header is " + str(header_data_size) + " bytes"
#//PATTERNS DATA
for c in range(0, SYSTEM_TOTAL_CHANNELS):
print "Reading patterns for channel " + str(c)
CHANNEL_EFFECTS_COLUMNS_COUNT = getByte()
print " CHANNEL_EFFECTS_COLUMNS_COUNT " + str(CHANNEL_EFFECTS_COLUMNS_COUNT)
note_table = [ "", "C#", "D-", "D#", "E-", "F-", "F#", "G-", "G#", "A-", "A#", "B-", "C-"]
for n in range(0, total_rows_in_pattern_matrix):
print " reading pattern matrix " + str(n)
for r in range(0, total_rows_per_pattern):
note = getShort()
octave = getShort()
#//Note values:
#//01 C#
#//02 D-
#//03 D#
#//04 E-
#//05 F-
#//06 F#
#//07 G-
#//08 G#
#//09 A-
#//10 A#
#//11 B-
#//12 C-
#//Special cases:
#//Note = 0 and octave = 0 means empty.
#//Note = 100 means NOTE OFF, no matter what is inside the octave value.
volume = getShort() #Volume for this index (-1 = Empty)
o = " pattern row " + str(r)
if note == 100:
o += ", note OFF"
else:
if note != 0 and octave != 0:
o += ", note " + note_table[note] + str(octave) #str(note)
#o += ", octave " + str(octave)
else:
o += ", note ---"
if volume < 65535:
o += ", volume " + str(volume)
# effects
# http://battleofthebits.org/lyceum/View/DefleMask+Tracker+Effects+Commands/#SEGA Master System (SN76489)
# These are the effects commands available for the 6 chips supported thus far by DefleMask Tracker.
# The ones that begin with a 1 are always system-specific (also 2 for the SN76489 and SEGA PCM) so make sure you do not mix them up after switching systems!
# 00xy - Arpeggio; fast note shifting in half steps.
# x = Number of half steps from root note for first shift
# y = Number of half steps from root note for second shift
#
# Ex: 037 = Minor chord. 047 = Major chord.
# View article on arps for more examples.
#
#
# 01xx - Portamento up; smooth pitch glide up.
# 02xx - Portamento down; smooth pitch glide down.
# If xx > 00: Speed
# If xx = 00: Off
#
#
# 03xx - Glissando; pitch glide to next note.
# If xx > 00: Speed
# If xx = 00: Off
#
#
# 04xy - Vibrato; pitch vibration.
# If x > 0: Speed
# If x = 0: Off
# y = Depth
#
# Overridden by YMU759; see below.
#
#
# 05xy - Glissando + Volume slide; see Axy below.
# Continues previous 03xx effect without modifying it.
#
#
# 06xy - Vibrato + Volume slide; see Axy below.
# Continued previous 04xy effect without modifying it.
#
#
# 07xy - Tremolo; volume tremor.
# If x > 0: Speed
# If x = 0: Off
# y = Depth
#
#
# 08xy - L/R output setting.
# If x = 0: Left channel output off
# If x = 1: Left channel output on
# If y = 0: Right channel output off
# If y = 1: Right channel output on
#
# Overridden by HuC6280; see below.
#
#
# 09xx - Speed 1 setting; see 0Fxx below.
# If xx = 01-20: Ticks per row for odd rows
#
#
# 0Axy - Volume slide.
# If x = 0 & y = 0: Halt slide
# If x = 0 & y > 0: Volume slide up x ticks depth
# If x > 0 & y = 0: Volume slide down y ticks depth
#
# Note: Same parameters for effects 05xy and 06xy above.
#
#
# 0Bxx - Jump to frame.
# xx = Destination frame number
#
#
# 0Cxx - Retrigger, works only for current row.
# xx = Rate in ticks
#
#
# 0Dxx - Skip to next frame at row xx.
# xx = Destination row number
#
#
# 0Fxx - Speed 2 setting; see 09xx above.
# If xx = 01-20: Ticks per row for even rows
#
#
# E1xy - Note slide up
# E2xy - Note slide down
# x = speed of slide
# y = semitones to slide
#
#
# E5xx - Channel fine pitch setting.
# If xx = 80: Default
# If xx > 80: Increase pitch
# If xx < 80: Decrease pitch
#
#
# EBxx - Set sample bank to xx.
# If xx = 00-0B: Sample bank 0 to 11 is used.
# If xx > 0B: nothin'
#
#
# ECxx - Delayed note cut.
# xx = number of ticks to delay
#
#
# EDxx - Note delay.
# xx = number of ticks to delay
#
#
# EFxx - Global fine pitch setting.
# If xx = 80: Default
# If xx > 80: Increase pitch
# If xx < 80: Decrease pitch
# 20xy - PSG noise channel setting.
# If x = 0: 3-pitch fixed noise
# If x > 0: Variable-pitch noise
# If y = 0: Periodic noise
# If y > 0: White noise
# This effect is also available when the current system is set to Genesis, via the PSG channels.
for fx in range(0, CHANNEL_EFFECTS_COLUMNS_COUNT ):
effect_code = getShort() # Effect Code for this index (-1 = Empty)
effect_value = getShort() # Effect Value for this index (-1 = Empty)
if effect_code < 65535:
o+= ", effect code " + str(effect_code)
if effect_value < 65535:
o+= ", effect val " + str(effect_value)
instrument = getShort() # Instrument for this index (-1 = Empty)
if instrument < 65535:
o += ", instrument " + str(instrument)
print o
#print " pattern row " + str(r) + ", instrument " + str(instrument) + ", note " + str(note) + ", octave " + str(octave) + ", volume " + str(volume)
pattern_data_size = self.dmf_data.tell() - header_data_size
print "size of pattern data is " + str(pattern_data_size) + " bytes"
# //PCM SAMPLES DATA
TOTAL_SAMPLES = getByte()
if TOTAL_SAMPLES != 0:
print "ERROR: Unexpected samples"
return
# //END OF DMF FORMAT
print "All parsed."
#------------------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------------------
# for testing
my_command_line = None
if False:
# for testing...
my_command_line = 'vgmconverter "' + filename + '" -t bbc -q 50 -o "test.vgm"'
#------------------------------------------------------------------------------------------
if my_command_line != None:
argv = my_command_line.split()
else:
argv = sys.argv
argc = len(argv)
if argc < 2:
print "DMF Parser Utility for DMF files based on SMS TI SN76849 programmable sound chips"
print ""
print " Usage:"
print " dmf-parser <dmffile>"
print ""
print " where:"
print " <dmffile> is the source DMF file to be processed. Wildcards are not yet supported."
print ""
print " options:"
exit()
# pre-process argv to merge quoted arguments
argi = 0
inquotes = False
outargv = []
quotedarg = []
#print argv
for s in argv:
#print "s=" + s
#print "quotedarg=" + str(quotedarg)
if s.startswith('"') and s.endswith('"'):
outargv.append(s[1:-1])
continue
if not inquotes and s.startswith('"'):
inquotes = True
quotedarg.append(s[1:] + ' ')
continue
if inquotes and s.endswith('"'):
inquotes = False
quotedarg.append(s[:-1])
outargv.append("".join(quotedarg))
quotedarg = []
continue
if inquotes:
quotedarg.append(s + ' ')
continue
outargv.append(s)
if inquotes:
print "Error parsing command line " + str(" ".join(argv))
exit()
argv = outargv
# validate source file
source_filename = None
if argv[1][0] != '-':
source_filename = argv[1]
# load the DMF
if source_filename == None:
print "ERROR: No source <filename> provided."
exit()
dmf_stream = DmfStream(source_filename)
dmf_stream.parse()
# all done
print ""
print "Processing complete."
| #!/usr/bin/env python
# python script to convert & process DMF files for SN76489 PSG
# by simondotm 2017
# Released under MIT license
# http://deflemask.com/DMF_SPECS.txt
import zlib
import struct
import sys
import binascii
import math
from os.path import basename
if (sys.version_info > (3, 0)):
from io import BytesIO as ByteBuffer
else:
from StringIO import StringIO as ByteBuffer
#-----------------------------------------------------------------------------
class FatalError(Exception):
pass
class DmfStream:
dmf_filename = ''
# constructor - pass in the filename of the DMF
def __init__(self, dmf_filename):
self.dmf_filename = dmf_filename
print " Loading DMF file : '" + dmf_filename + "'"
# open the dmf file and parse it
dmf_file = open(dmf_filename, 'rb')
dmf_data = dmf_file.read()
# Store the DMF data and validate it
self.dmf_data = ByteBuffer(dmf_data)
dmf_file.close()
self.dmf_data.seek(0)
unpacked = zlib.decompress(dmf_data)
self.dmf_data = ByteBuffer(unpacked)
self.dmf_data.seek(0, 2)
size = self.dmf_data.tell()
self.dmf_data.seek(0)
print " DMF file loaded : '" + dmf_filename + "' (" + str(size) + " bytes)"
bin_file = open("dmf.bin", 'wb')
bin_file.write(unpacked)
bin_file.close()
def parse(self):
# Save the current position of the VGM data
original_pos = self.dmf_data.tell()
# Seek to the start of the file
self.dmf_data.seek(0)
data = self.dmf_data.read(16)
header = data.decode("utf-8")
#header = struct.unpack( 's', data )
print header
# Perform basic validation on the given file by checking for the header
if header != ".DelekDefleMask.":
# Could not find the header string
print "ERROR: not a DMF file"
return
def getByte():
return struct.unpack('B', self.dmf_data.read(1) )[0]
def getShort():
return struct.unpack('H', self.dmf_data.read(2) )[0]
def getInt():
return struct.unpack('i', self.dmf_data.read(4) )[0]
def getString(size):
s = self.dmf_data.read(size)
return s.decode("utf-8")
def skipBytes(n):
self.dmf_data.read(n)
version = getByte()
print " DMF version - " + str(version)
if version != 24:
print "ERROR: can only parse DMF version 24 (Deflemask v12)"
return
system = getByte()
print " DMF system - " + str(system)
# must be 3 - this script only parses SMS tunes (SYSTEM_TOTAL_CHANNELS=4)
if system != 3:
print "ERROR: Not an SMS DMF track"
return
#//VISUAL INFORMATION
song_name = getString( getByte() )
song_author = getString( getByte() )
highlight_A = getByte()
highlight_B = getByte()
#//MODULE INFORMATION
time_base = getByte()
tick_time_1 = getByte()
tick_time_2 = getByte()
frames_mode = getByte() # (0 = PAL, 1 = NTSC)
custom_hz = getByte() # (If set to 1, NTSC or PAL is ignored)
custom_hz_1 = getByte()
custom_hz_2 = getByte()
custom_hz_3 = getByte()
total_rows_per_pattern = getInt()
total_rows_in_pattern_matrix = getByte()
print "Song name: " + song_name
print "Song author: " + song_author
print "Time base: " + str(time_base)
print "total_rows_per_pattern " + str(total_rows_per_pattern)
print "total_rows_in_pattern_matrix " + str(total_rows_in_pattern_matrix)
SYSTEM_TOTAL_CHANNELS = 4
pattern_size_0 = total_rows_per_pattern
pattern_size_n = total_rows_per_pattern*2+total_rows_per_pattern
pattern_size_t = pattern_size_0+pattern_size_n*3
print "size of VGM pattern " + str(pattern_size_t) + " bytes"
total_size_t = total_rows_in_pattern_matrix * pattern_size_t * SYSTEM_TOTAL_CHANNELS
print "size of VGM song " + str(total_size_t) + " bytes"
pattern_matrix_array = []
#//PATTERN MATRIX VALUES (A matrix of SYSTEM_TOTAL_CHANNELS x TOTAL_ROWS_IN_PATTERN_MATRIX)
for c in range(0, SYSTEM_TOTAL_CHANNELS):
print "Reading channel " + str(c) + " pattern matrix (" + str(total_rows_in_pattern_matrix) + " rows)"
pattern_matrix = bytearray()
o = ""
for r in range(0, total_rows_in_pattern_matrix):
pattern_id = getByte()
o += " " + str(pattern_id)
pattern_matrix.append( struct.pack('B', pattern_id) )
print o
pattern_matrix_array.append(pattern_matrix)
#//INSTRUMENTS DATA (.DMP format is similar to this part, but there are some discrepancies, please read DMP_Specs.txt for more details)
total_instruments = getByte()
print "total_instruments " + str(total_instruments)
for i in range(0, total_instruments):
print "Reading instrument " + str(i)
instrument_name = getString( getByte() )
print " instrument_name '" + instrument_name + "'"
instrument_mode = getByte() # (0 = STANDARD INS, 1 = FM INS)
if instrument_mode != 0:
print "ERROR: FM instruments not supported on SMS"
# todo, should skip remaining data
else:
#//VOLUME MACRO
envelope_size = getByte() # 1 Byte: ENVELOPE_SIZE (0 - 127)
print " volume envelope size " + str(envelope_size)
# Repeat this ENVELOPE_SIZE times
for e in range(0, envelope_size):
envelope_value = getInt() # 4 Bytes: ENVELOPE_VALUE
if envelope_size > 0:
loop_position = getByte() # 1 Byte: LOOP_POSITION (-1 = NO LOOP)
#//ARPEGGIO MACRO
envelope_size = getByte() # 1 Byte: ENVELOPE_SIZE (0 - 127)
print " arpeggio envelope size " + str(envelope_size)
# Repeat this ENVELOPE_SIZE times
for e in range(0, envelope_size):
envelope_value = getInt() # 4 Bytes: ENVELOPE_VALUE (signed int, offset=12)
if envelope_size > 0:
loop_position = getByte() # 1 Byte: LOOP_POSITION (-1 = NO LOOP)
macro_mode = getByte() # 1 Byte: ARPEGGIO MACRO MODE (0 = Normal, 1 = Fixed)
#//DUTY/NOISE MACRO
envelope_size = getByte() # 1 Byte: ENVELOPE_SIZE (0 - 127)
print " duty/noise envelope size " + str(envelope_size)
# Repeat this ENVELOPE_SIZE times
for e in range(0, envelope_size):
envelope_value = getInt() # 4 Bytes: ENVELOPE_VALUE
if envelope_size > 0:
loop_position = getByte() # 1 Byte: LOOP_POSITION (-1 = NO LOOP)
#//WAVETABLE MACRO
envelope_size = getByte() # 1 Byte: ENVELOPE_SIZE (0 - 127)
print " wavetable envelope size " + str(envelope_size)
# Repeat this ENVELOPE_SIZE times
for e in range(0, envelope_size):
envelope_value = getInt() # 4 Bytes: ENVELOPE_VALUE
if envelope_size > 0:
loop_position = getByte() # 1 Byte: LOOP_POSITION (-1 = NO LOOP)
# per system data, only present for C64
#//END OF INSTRUMENTS DATA
#//WAVETABLES DATA
total_wavetables = getByte()
if total_wavetables != 0:
print "ERROR: unexpected wavetables data"
return
header_data_size = self.dmf_data.tell()
print "size of header is " + str(header_data_size) + " bytes"
#//PATTERNS DATA
for c in range(0, SYSTEM_TOTAL_CHANNELS):
print "Reading patterns for channel " + str(c)
CHANNEL_EFFECTS_COLUMNS_COUNT = getByte()
print " CHANNEL_EFFECTS_COLUMNS_COUNT " + str(CHANNEL_EFFECTS_COLUMNS_COUNT)
note_table = [ "", "C#", "D-", "D#", "E-", "F-", "F#", "G-", "G#", "A-", "A#", "B-", "C-"]
for n in range(0, total_rows_in_pattern_matrix):
print " reading pattern matrix " + str(n)
for r in range(0, total_rows_per_pattern):
note = getShort()
octave = getShort()
#//Note values:
#//01 C#
#//02 D-
#//03 D#
#//04 E-
#//05 F-
#//06 F#
#//07 G-
#//08 G#
#//09 A-
#//10 A#
#//11 B-
#//12 C-
#//Special cases:
#//Note = 0 and octave = 0 means empty.
#//Note = 100 means NOTE OFF, no matter what is inside the octave value.
volume = getShort() #Volume for this index (-1 = Empty)
o = " pattern row " + str(r)
if note == 100:
o += ", note OFF"
else:
if note != 0 and octave != 0:
o += ", note " + note_table[note] + str(octave) #str(note)
#o += ", octave " + str(octave)
else:
o += ", note ---"
if volume < 65535:
o += ", volume " + str(volume)
# effects
# http://battleofthebits.org/lyceum/View/DefleMask+Tracker+Effects+Commands/#SEGA Master System (SN76489)
# These are the effects commands available for the 6 chips supported thus far by DefleMask Tracker.
# The ones that begin with a 1 are always system-specific (also 2 for the SN76489 and SEGA PCM) so make sure you do not mix them up after switching systems!
# 00xy - Arpeggio; fast note shifting in half steps.
# x = Number of half steps from root note for first shift
# y = Number of half steps from root note for second shift
#
# Ex: 037 = Minor chord. 047 = Major chord.
# View article on arps for more examples.
#
#
# 01xx - Portamento up; smooth pitch glide up.
# 02xx - Portamento down; smooth pitch glide down.
# If xx > 00: Speed
# If xx = 00: Off
#
#
# 03xx - Glissando; pitch glide to next note.
# If xx > 00: Speed
# If xx = 00: Off
#
#
# 04xy - Vibrato; pitch vibration.
# If x > 0: Speed
# If x = 0: Off
# y = Depth
#
# Overridden by YMU759; see below.
#
#
# 05xy - Glissando + Volume slide; see Axy below.
# Continues previous 03xx effect without modifying it.
#
#
# 06xy - Vibrato + Volume slide; see Axy below.
# Continued previous 04xy effect without modifying it.
#
#
# 07xy - Tremolo; volume tremor.
# If x > 0: Speed
# If x = 0: Off
# y = Depth
#
#
# 08xy - L/R output setting.
# If x = 0: Left channel output off
# If x = 1: Left channel output on
# If y = 0: Right channel output off
# If y = 1: Right channel output on
#
# Overridden by HuC6280; see below.
#
#
# 09xx - Speed 1 setting; see 0Fxx below.
# If xx = 01-20: Ticks per row for odd rows
#
#
# 0Axy - Volume slide.
# If x = 0 & y = 0: Halt slide
# If x = 0 & y > 0: Volume slide up x ticks depth
# If x > 0 & y = 0: Volume slide down y ticks depth
#
# Note: Same parameters for effects 05xy and 06xy above.
#
#
# 0Bxx - Jump to frame.
# xx = Destination frame number
#
#
# 0Cxx - Retrigger, works only for current row.
# xx = Rate in ticks
#
#
# 0Dxx - Skip to next frame at row xx.
# xx = Destination row number
#
#
# 0Fxx - Speed 2 setting; see 09xx above.
# If xx = 01-20: Ticks per row for even rows
#
#
# E1xy - Note slide up
# E2xy - Note slide down
# x = speed of slide
# y = semitones to slide
#
#
# E5xx - Channel fine pitch setting.
# If xx = 80: Default
# If xx > 80: Increase pitch
# If xx < 80: Decrease pitch
#
#
# EBxx - Set sample bank to xx.
# If xx = 00-0B: Sample bank 0 to 11 is used.
# If xx > 0B: nothin'
#
#
# ECxx - Delayed note cut.
# xx = number of ticks to delay
#
#
# EDxx - Note delay.
# xx = number of ticks to delay
#
#
# EFxx - Global fine pitch setting.
# If xx = 80: Default
# If xx > 80: Increase pitch
# If xx < 80: Decrease pitch
# 20xy - PSG noise channel setting.
# If x = 0: 3-pitch fixed noise
# If x > 0: Variable-pitch noise
# If y = 0: Periodic noise
# If y > 0: White noise
# This effect is also available when the current system is set to Genesis, via the PSG channels.
for fx in range(0, CHANNEL_EFFECTS_COLUMNS_COUNT ):
effect_code = getShort() # Effect Code for this index (-1 = Empty)
effect_value = getShort() # Effect Value for this index (-1 = Empty)
if effect_code < 65535:
o+= ", effect code " + str(effect_code)
if effect_value < 65535:
o+= ", effect val " + str(effect_value)
instrument = getShort() # Instrument for this index (-1 = Empty)
if instrument < 65535:
o += ", instrument " + str(instrument)
print o
#print " pattern row " + str(r) + ", instrument " + str(instrument) + ", note " + str(note) + ", octave " + str(octave) + ", volume " + str(volume)
pattern_data_size = self.dmf_data.tell() - header_data_size
print "size of pattern data is " + str(pattern_data_size) + " bytes"
# //PCM SAMPLES DATA
TOTAL_SAMPLES = getByte()
if TOTAL_SAMPLES != 0:
print "ERROR: Unexpected samples"
return
# //END OF DMF FORMAT
print "All parsed."
#------------------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------------------
# for testing
my_command_line = None
if False:
# for testing...
my_command_line = 'vgmconverter "' + filename + '" -t bbc -q 50 -o "test.vgm"'
#------------------------------------------------------------------------------------------
if my_command_line != None:
argv = my_command_line.split()
else:
argv = sys.argv
argc = len(argv)
if argc < 2:
print "DMF Parser Utility for DMF files based on SMS TI SN76849 programmable sound chips"
print ""
print " Usage:"
print " dmf-parser <dmffile>"
print ""
print " where:"
print " <dmffile> is the source DMF file to be processed. Wildcards are not yet supported."
print ""
print " options:"
exit()
# pre-process argv to merge quoted arguments
argi = 0
inquotes = False
outargv = []
quotedarg = []
#print argv
for s in argv:
#print "s=" + s
#print "quotedarg=" + str(quotedarg)
if s.startswith('"') and s.endswith('"'):
outargv.append(s[1:-1])
continue
if not inquotes and s.startswith('"'):
inquotes = True
quotedarg.append(s[1:] + ' ')
continue
if inquotes and s.endswith('"'):
inquotes = False
quotedarg.append(s[:-1])
outargv.append("".join(quotedarg))
quotedarg = []
continue
if inquotes:
quotedarg.append(s + ' ')
continue
outargv.append(s)
if inquotes:
print "Error parsing command line " + str(" ".join(argv))
exit()
argv = outargv
# validate source file
source_filename = None
if argv[1][0] != '-':
source_filename = argv[1]
# load the DMF
if source_filename == None:
print "ERROR: No source <filename> provided."
exit()
dmf_stream = DmfStream(source_filename)
dmf_stream.parse()
# all done
print ""
print "Processing complete."
| en | 0.597302 | #!/usr/bin/env python # python script to convert & process DMF files for SN76489 PSG # by simondotm 2017 # Released under MIT license # http://deflemask.com/DMF_SPECS.txt #----------------------------------------------------------------------------- # constructor - pass in the filename of the DMF # open the dmf file and parse it # Store the DMF data and validate it # Save the current position of the VGM data # Seek to the start of the file #header = struct.unpack( 's', data ) # Perform basic validation on the given file by checking for the header # Could not find the header string # must be 3 - this script only parses SMS tunes (SYSTEM_TOTAL_CHANNELS=4) #//VISUAL INFORMATION #//MODULE INFORMATION # (0 = PAL, 1 = NTSC) # (If set to 1, NTSC or PAL is ignored) #//PATTERN MATRIX VALUES (A matrix of SYSTEM_TOTAL_CHANNELS x TOTAL_ROWS_IN_PATTERN_MATRIX) #//INSTRUMENTS DATA (.DMP format is similar to this part, but there are some discrepancies, please read DMP_Specs.txt for more details) # (0 = STANDARD INS, 1 = FM INS) # todo, should skip remaining data #//VOLUME MACRO # 1 Byte: ENVELOPE_SIZE (0 - 127) # Repeat this ENVELOPE_SIZE times # 4 Bytes: ENVELOPE_VALUE # 1 Byte: LOOP_POSITION (-1 = NO LOOP) #//ARPEGGIO MACRO # 1 Byte: ENVELOPE_SIZE (0 - 127) # Repeat this ENVELOPE_SIZE times # 4 Bytes: ENVELOPE_VALUE (signed int, offset=12) # 1 Byte: LOOP_POSITION (-1 = NO LOOP) # 1 Byte: ARPEGGIO MACRO MODE (0 = Normal, 1 = Fixed) #//DUTY/NOISE MACRO # 1 Byte: ENVELOPE_SIZE (0 - 127) # Repeat this ENVELOPE_SIZE times # 4 Bytes: ENVELOPE_VALUE # 1 Byte: LOOP_POSITION (-1 = NO LOOP) #//WAVETABLE MACRO # 1 Byte: ENVELOPE_SIZE (0 - 127) # Repeat this ENVELOPE_SIZE times # 4 Bytes: ENVELOPE_VALUE # 1 Byte: LOOP_POSITION (-1 = NO LOOP) # per system data, only present for C64 #//END OF INSTRUMENTS DATA #//WAVETABLES DATA #//PATTERNS DATA #", "D-", "D#", "E-", "F-", "F#", "G-", "G#", "A-", "A#", "B-", "C-"] #//Note values: #//01 C# #//02 D- #//03 D# #//04 E- #//05 F- #//06 F# #//07 G- #//08 G# #//09 A- #//10 A# #//11 B- #//12 C- #//Special cases: #//Note = 0 and octave = 0 means empty. #//Note = 100 means NOTE OFF, no matter what is inside the octave value. #Volume for this index (-1 = Empty) #str(note) #o += ", octave " + str(octave) # effects # http://battleofthebits.org/lyceum/View/DefleMask+Tracker+Effects+Commands/#SEGA Master System (SN76489) # These are the effects commands available for the 6 chips supported thus far by DefleMask Tracker. # The ones that begin with a 1 are always system-specific (also 2 for the SN76489 and SEGA PCM) so make sure you do not mix them up after switching systems! # 00xy - Arpeggio; fast note shifting in half steps. # x = Number of half steps from root note for first shift # y = Number of half steps from root note for second shift # # Ex: 037 = Minor chord. 047 = Major chord. # View article on arps for more examples. # # # 01xx - Portamento up; smooth pitch glide up. # 02xx - Portamento down; smooth pitch glide down. # If xx > 00: Speed # If xx = 00: Off # # # 03xx - Glissando; pitch glide to next note. # If xx > 00: Speed # If xx = 00: Off # # # 04xy - Vibrato; pitch vibration. # If x > 0: Speed # If x = 0: Off # y = Depth # # Overridden by YMU759; see below. # # # 05xy - Glissando + Volume slide; see Axy below. # Continues previous 03xx effect without modifying it. # # # 06xy - Vibrato + Volume slide; see Axy below. # Continued previous 04xy effect without modifying it. # # # 07xy - Tremolo; volume tremor. # If x > 0: Speed # If x = 0: Off # y = Depth # # # 08xy - L/R output setting. # If x = 0: Left channel output off # If x = 1: Left channel output on # If y = 0: Right channel output off # If y = 1: Right channel output on # # Overridden by HuC6280; see below. # # # 09xx - Speed 1 setting; see 0Fxx below. # If xx = 01-20: Ticks per row for odd rows # # # 0Axy - Volume slide. # If x = 0 & y = 0: Halt slide # If x = 0 & y > 0: Volume slide up x ticks depth # If x > 0 & y = 0: Volume slide down y ticks depth # # Note: Same parameters for effects 05xy and 06xy above. # # # 0Bxx - Jump to frame. # xx = Destination frame number # # # 0Cxx - Retrigger, works only for current row. # xx = Rate in ticks # # # 0Dxx - Skip to next frame at row xx. # xx = Destination row number # # # 0Fxx - Speed 2 setting; see 09xx above. # If xx = 01-20: Ticks per row for even rows # # # E1xy - Note slide up # E2xy - Note slide down # x = speed of slide # y = semitones to slide # # # E5xx - Channel fine pitch setting. # If xx = 80: Default # If xx > 80: Increase pitch # If xx < 80: Decrease pitch # # # EBxx - Set sample bank to xx. # If xx = 00-0B: Sample bank 0 to 11 is used. # If xx > 0B: nothin' # # # ECxx - Delayed note cut. # xx = number of ticks to delay # # # EDxx - Note delay. # xx = number of ticks to delay # # # EFxx - Global fine pitch setting. # If xx = 80: Default # If xx > 80: Increase pitch # If xx < 80: Decrease pitch # 20xy - PSG noise channel setting. # If x = 0: 3-pitch fixed noise # If x > 0: Variable-pitch noise # If y = 0: Periodic noise # If y > 0: White noise # This effect is also available when the current system is set to Genesis, via the PSG channels. # Effect Code for this index (-1 = Empty) # Effect Value for this index (-1 = Empty) # Instrument for this index (-1 = Empty) #print " pattern row " + str(r) + ", instrument " + str(instrument) + ", note " + str(note) + ", octave " + str(octave) + ", volume " + str(volume) # //PCM SAMPLES DATA # //END OF DMF FORMAT #------------------------------------------------------------------------------------------ # Main #------------------------------------------------------------------------------------------ # for testing # for testing... #------------------------------------------------------------------------------------------ # pre-process argv to merge quoted arguments #print argv #print "s=" + s #print "quotedarg=" + str(quotedarg) # validate source file # load the DMF # all done | 2.692005 | 3 |
custom_components/wattio/binary_sensor.py | dmoranf/home-assistant-wattio | 5 | 6616254 | <filename>custom_components/wattio/binary_sensor.py
"""Platform for Wattio integration testing."""
import logging
try:
from homeassistant.components.binary_sensor import BinarySensorEntity
except ImportError:
from homeassistant.components.binary_sensor import BinarySensorDevice as BinarySensorEntity
from homeassistant.const import ATTR_BATTERY_LEVEL
from . import WattioDevice
from .const import BINARY_SENSORS, CONF_EXCLUSIONS, DOMAIN, ICON
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Configure Wattio Binary Sensor."""
_LOGGER.debug("Wattio Binary Sensor component running ...")
if discovery_info is None:
_LOGGER.error("No Binary Sensor device(s) discovered")
return
devices = []
for device in hass.data[DOMAIN]["devices"]:
icon = None
if device["type"] in BINARY_SENSORS:
icon = ICON[device["type"]]
if device["ieee"] in hass.data[DOMAIN][CONF_EXCLUSIONS]:
_LOGGER.error("Excluding device with IEEE %s", hass.data[DOMAIN][CONF_EXCLUSIONS])
else:
devices.append(
WattioBinarySensor(device["name"], device["type"], icon, device["ieee"])
)
_LOGGER.debug("Adding device: %s", device["name"])
async_add_entities(devices)
class WattioBinarySensor(WattioDevice, BinarySensorEntity):
"""Representation of Sensor."""
# pylint: disable=too-many-instance-attributes
def __init__(self, name, devtype, icon, ieee):
"""Initialize the sensor."""
self._pre = "bs_"
self._name = name
self._state = None
self._icon = icon
self._apidata = None
self._ieee = ieee
self._devtype = devtype
self._battery = None
self._data = None
self._available = 0
@property
def available(self):
"""Return availability."""
_LOGGER.debug("Device %s - availability: %s", self._name, self._available)
return True if self._available == 1 else False
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the image of the sensor."""
return self._icon
@property
def is_on(self):
"""Return state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of this device."""
attr = {}
if self._battery is not None:
attr[ATTR_BATTERY_LEVEL] = self.get_battery_level()
return attr
@property
def device_class(self):
"""Return device class."""
if self._devtype is not None:
if self._devtype == "motion":
return "motion"
if self._devtype == "door":
return "door"
return None
def get_battery_level(self):
"""Return device battery level."""
if self._battery is not None:
battery_level = round((self._battery * 100) / 4)
return battery_level
return False
async def async_update(self):
"""Update sensor data."""
# Parece que no tira con las CONST devolver 0 o 1
self._data = self.hass.data[DOMAIN]["data"]
_LOGGER.debug("ACTUALIZANDO SENSOR BINARIO %s - %s", self._name, self._ieee)
if self._data is not None:
self._available = 0
for device in self._data:
if device["ieee"] == self._ieee:
self._available = 1
if device["type"] == "motion":
_LOGGER.debug(device["status"]["presence"])
self._battery = device["status"]["battery"]
self._state = device["status"]["presence"]
elif device["type"] == "door":
self._battery = device["status"]["battery"]
self._state = device["status"]["opened"]
_LOGGER.debug(device["status"]["opened"])
elif device["type"] == "siren":
self._state = device["status"]["preAlarm"]
_LOGGER.debug(device["status"]["preAlarm"])
break
_LOGGER.debug(self._state)
return self._state
return False
| <filename>custom_components/wattio/binary_sensor.py
"""Platform for Wattio integration testing."""
import logging
try:
from homeassistant.components.binary_sensor import BinarySensorEntity
except ImportError:
from homeassistant.components.binary_sensor import BinarySensorDevice as BinarySensorEntity
from homeassistant.const import ATTR_BATTERY_LEVEL
from . import WattioDevice
from .const import BINARY_SENSORS, CONF_EXCLUSIONS, DOMAIN, ICON
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Configure Wattio Binary Sensor."""
_LOGGER.debug("Wattio Binary Sensor component running ...")
if discovery_info is None:
_LOGGER.error("No Binary Sensor device(s) discovered")
return
devices = []
for device in hass.data[DOMAIN]["devices"]:
icon = None
if device["type"] in BINARY_SENSORS:
icon = ICON[device["type"]]
if device["ieee"] in hass.data[DOMAIN][CONF_EXCLUSIONS]:
_LOGGER.error("Excluding device with IEEE %s", hass.data[DOMAIN][CONF_EXCLUSIONS])
else:
devices.append(
WattioBinarySensor(device["name"], device["type"], icon, device["ieee"])
)
_LOGGER.debug("Adding device: %s", device["name"])
async_add_entities(devices)
class WattioBinarySensor(WattioDevice, BinarySensorEntity):
"""Representation of Sensor."""
# pylint: disable=too-many-instance-attributes
def __init__(self, name, devtype, icon, ieee):
"""Initialize the sensor."""
self._pre = "bs_"
self._name = name
self._state = None
self._icon = icon
self._apidata = None
self._ieee = ieee
self._devtype = devtype
self._battery = None
self._data = None
self._available = 0
@property
def available(self):
"""Return availability."""
_LOGGER.debug("Device %s - availability: %s", self._name, self._available)
return True if self._available == 1 else False
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the image of the sensor."""
return self._icon
@property
def is_on(self):
"""Return state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of this device."""
attr = {}
if self._battery is not None:
attr[ATTR_BATTERY_LEVEL] = self.get_battery_level()
return attr
@property
def device_class(self):
"""Return device class."""
if self._devtype is not None:
if self._devtype == "motion":
return "motion"
if self._devtype == "door":
return "door"
return None
def get_battery_level(self):
"""Return device battery level."""
if self._battery is not None:
battery_level = round((self._battery * 100) / 4)
return battery_level
return False
async def async_update(self):
"""Update sensor data."""
# Parece que no tira con las CONST devolver 0 o 1
self._data = self.hass.data[DOMAIN]["data"]
_LOGGER.debug("ACTUALIZANDO SENSOR BINARIO %s - %s", self._name, self._ieee)
if self._data is not None:
self._available = 0
for device in self._data:
if device["ieee"] == self._ieee:
self._available = 1
if device["type"] == "motion":
_LOGGER.debug(device["status"]["presence"])
self._battery = device["status"]["battery"]
self._state = device["status"]["presence"]
elif device["type"] == "door":
self._battery = device["status"]["battery"]
self._state = device["status"]["opened"]
_LOGGER.debug(device["status"]["opened"])
elif device["type"] == "siren":
self._state = device["status"]["preAlarm"]
_LOGGER.debug(device["status"]["preAlarm"])
break
_LOGGER.debug(self._state)
return self._state
return False
| en | 0.609227 | Platform for Wattio integration testing. Configure Wattio Binary Sensor. Representation of Sensor. # pylint: disable=too-many-instance-attributes Initialize the sensor. Return availability. No polling needed. Return the name of the sensor. Return the image of the sensor. Return state of the sensor. Return the state attributes of this device. Return device class. Return device battery level. Update sensor data. # Parece que no tira con las CONST devolver 0 o 1 | 2.27878 | 2 |
src/plugins/cloud_s3/__init__.py | tulustul/MusicPlayer | 3 | 6616255 | from core.cloud import CloudProvider
from .s3 import S3Provider
CloudProvider.register_provider('S3', S3Provider)
| from core.cloud import CloudProvider
from .s3 import S3Provider
CloudProvider.register_provider('S3', S3Provider)
| none | 1 | 1.299678 | 1 | |
bomberman/game/board_elements/base_element.py | NaIwo/BomberManAI | 1 | 6616256 | from pygame.sprite import Sprite
from typing import Tuple, List, Optional
import pygame
import numpy as np
from bomberman.game.config import GameProperties, PlayerProperties, Screen, MOVE_TO_NUMBER, Move
from bomberman.game.utils import get_image
class BaseElement(Sprite):
def __init__(self, coordinates_tuple: Tuple, name: str, shape_properties: List, color: Tuple, image_path: str):
super().__init__()
self.current_move: Move = Move.NOT_MOVING
self.name: str = name
self.idx: int = int(self.name[self.name.rfind('_') + 1:])
self._update_properties(coordinates_tuple, shape_properties, color)
if GameProperties.LOAD_IMAGES.value:
self.image: Optional[pygame.Surface] = get_image(image_path, self.rect.width, self.rect.height)
self.clamp_position()
def _update_properties(self, coordinates_tuple, shape_properties: List, color: Tuple):
self.rect = pygame.Rect(*coordinates_tuple)
self.image = pygame.Surface(shape_properties)
self._update_color(color)
def _update_color(self, color: Tuple) -> None:
if not GameProperties.LOAD_IMAGES.value:
self.image.fill(color)
def _update_image(self, image_path: str) -> None:
if GameProperties.LOAD_IMAGES.value:
self.image: Optional[pygame.Surface] = get_image(image_path, self.rect.width, self.rect.height)
def update(self) -> None:
pass
def clamp_position(self) -> None:
max_x: int = Screen.WIDTH.value - PlayerProperties.WIDTH.value
max_y: int = Screen.HEIGHT.value - PlayerProperties.HEIGHT.value
self.rect.x = max(min(max_x, self.rect.x), 0)
self.rect.y = max(min(max_y, self.rect.y), 0)
def get_current_move_as_one_hot(self) -> np.ndarray:
"""Current move in one hot manner.
We add 1 because of that 'NUMBER_OF_MOVES' do not take 'NOT_MOVING' state into account'"""
return np.eye(Move.NUMBER_OF_MOVES.value + 1)[MOVE_TO_NUMBER[self.current_move]]
def __eq__(self, other_idx) -> bool:
"""
This help to search for existing Sprite by **UNIQUE** index instead of creating new object and compare
their properties. This can lead to increase performance. In graphic mode we only read images if it is necessary.
"""
return self.idx == other_idx
def __hash__(self):
return self.idx
| from pygame.sprite import Sprite
from typing import Tuple, List, Optional
import pygame
import numpy as np
from bomberman.game.config import GameProperties, PlayerProperties, Screen, MOVE_TO_NUMBER, Move
from bomberman.game.utils import get_image
class BaseElement(Sprite):
def __init__(self, coordinates_tuple: Tuple, name: str, shape_properties: List, color: Tuple, image_path: str):
super().__init__()
self.current_move: Move = Move.NOT_MOVING
self.name: str = name
self.idx: int = int(self.name[self.name.rfind('_') + 1:])
self._update_properties(coordinates_tuple, shape_properties, color)
if GameProperties.LOAD_IMAGES.value:
self.image: Optional[pygame.Surface] = get_image(image_path, self.rect.width, self.rect.height)
self.clamp_position()
def _update_properties(self, coordinates_tuple, shape_properties: List, color: Tuple):
self.rect = pygame.Rect(*coordinates_tuple)
self.image = pygame.Surface(shape_properties)
self._update_color(color)
def _update_color(self, color: Tuple) -> None:
if not GameProperties.LOAD_IMAGES.value:
self.image.fill(color)
def _update_image(self, image_path: str) -> None:
if GameProperties.LOAD_IMAGES.value:
self.image: Optional[pygame.Surface] = get_image(image_path, self.rect.width, self.rect.height)
def update(self) -> None:
pass
def clamp_position(self) -> None:
max_x: int = Screen.WIDTH.value - PlayerProperties.WIDTH.value
max_y: int = Screen.HEIGHT.value - PlayerProperties.HEIGHT.value
self.rect.x = max(min(max_x, self.rect.x), 0)
self.rect.y = max(min(max_y, self.rect.y), 0)
def get_current_move_as_one_hot(self) -> np.ndarray:
"""Current move in one hot manner.
We add 1 because of that 'NUMBER_OF_MOVES' do not take 'NOT_MOVING' state into account'"""
return np.eye(Move.NUMBER_OF_MOVES.value + 1)[MOVE_TO_NUMBER[self.current_move]]
def __eq__(self, other_idx) -> bool:
"""
This help to search for existing Sprite by **UNIQUE** index instead of creating new object and compare
their properties. This can lead to increase performance. In graphic mode we only read images if it is necessary.
"""
return self.idx == other_idx
def __hash__(self):
return self.idx
| en | 0.901631 | Current move in one hot manner. We add 1 because of that 'NUMBER_OF_MOVES' do not take 'NOT_MOVING' state into account' This help to search for existing Sprite by **UNIQUE** index instead of creating new object and compare their properties. This can lead to increase performance. In graphic mode we only read images if it is necessary. | 2.685447 | 3 |
portfolio/settings.py | NikOneZ1/createfolio | 1 | 6616257 | <reponame>NikOneZ1/createfolio<filename>portfolio/settings.py
"""
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import dj_database_url
import os
from datetime import timedelta
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'django-insecure-$%&r5fteje_fcc7-f%=$n*7ke7o6rv)jku9e8!!qw=9*i6z-pl')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DJANGO_DEBUG', True))
ALLOWED_HOSTS = ['createfolio.herokuapp.com', '127.0.0.1', 'localhost']
# Switch to turn on/off React frontend
REACT_FRONTEND = bool(os.environ.get('REACT_FRONTEND', False))
# Application definition
INSTALLED_APPS = [
'main.apps.MainConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cloudinary',
'cloudinary_storage',
'django_cleanup',
'crispy_forms',
'rest_framework',
'djoser',
'drf_yasg',
'rest_framework_simplejwt',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
if REACT_FRONTEND:
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'portfolio_frontend')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
'CONN_MAX_AGE': 60 * 10,
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
}
SIMPLE_JWT = {
'AUTH_HEADER_TYPES': ('JWT',),
'ACCESS_TOKEN_LIFETIME': timedelta(days=30),
'REFRESH_TOKEN_LIFETIME': timedelta(days=30),
}
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = '/static/'
if REACT_FRONTEND:
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'portfolio_frontend', "build", "static"), # update the STATICFILES_DIRS
)
else:
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CLOUDINARY_STORAGE = {
'CLOUD_NAME': os.environ.get('CLOUD_NAME'),
'API_KEY': os.environ.get('API_KEY'),
'API_SECRET': os.environ.get('API_SECRET'),
}
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'home'
LOGIN_URL = 'login'
# Heroku: Update database configuration from $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
#SMTP Configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['console'],
}
},
}
| """
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import dj_database_url
import os
from datetime import timedelta
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'django-insecure-$%&r5fteje_fcc7-f%=$n*7ke7o6rv)jku9e8!!qw=9*i6z-pl')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DJANGO_DEBUG', True))
ALLOWED_HOSTS = ['createfolio.herokuapp.com', '127.0.0.1', 'localhost']
# Switch to turn on/off React frontend
REACT_FRONTEND = bool(os.environ.get('REACT_FRONTEND', False))
# Application definition
INSTALLED_APPS = [
'main.apps.MainConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cloudinary',
'cloudinary_storage',
'django_cleanup',
'crispy_forms',
'rest_framework',
'djoser',
'drf_yasg',
'rest_framework_simplejwt',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
if REACT_FRONTEND:
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'portfolio_frontend')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
'CONN_MAX_AGE': 60 * 10,
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
}
SIMPLE_JWT = {
'AUTH_HEADER_TYPES': ('JWT',),
'ACCESS_TOKEN_LIFETIME': timedelta(days=30),
'REFRESH_TOKEN_LIFETIME': timedelta(days=30),
}
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = '/static/'
if REACT_FRONTEND:
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'portfolio_frontend', "build", "static"), # update the STATICFILES_DIRS
)
else:
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CLOUDINARY_STORAGE = {
'CLOUD_NAME': os.environ.get('CLOUD_NAME'),
'API_KEY': os.environ.get('API_KEY'),
'API_SECRET': os.environ.get('API_SECRET'),
}
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'home'
LOGIN_URL = 'login'
# Heroku: Update database configuration from $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
#SMTP Configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['console'],
}
},
} | en | 0.632289 | Django settings for portfolio project. Generated by 'django-admin startproject' using Django 3.2.7. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ # Build paths inside the project like this: BASE_DIR / 'subdir'. # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Switch to turn on/off React frontend # Application definition # Database # https://docs.djangoproject.com/en/4.0/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/4.0/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/4.0/howto/static-files/ # update the STATICFILES_DIRS # Default primary key field type # https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field # Heroku: Update database configuration from $DATABASE_URL. #SMTP Configuration | 1.677719 | 2 |
WIZMSGHandler.py | renakim/WIZnet-S2E-Tool-GUI | 15 | 6616258 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import select
import codecs
from WIZ750CMDSET import WIZ750CMDSET
from PyQt5.QtCore import QThread, pyqtSignal
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
exitflag = 0
OP_SEARCHALL = 1
OP_GETCOMMAND = 2
OP_SETCOMMAND = 3
OP_SETFILE = 4
OP_GETFILE = 5
OP_FWUP = 6
# PACKET_SIZE = 1024
PACKET_SIZE = 2048
def timeout_func():
# print('timeout')
global exitflag
exitflag = 1
class WIZMSGHandler(QThread):
search_result = pyqtSignal(int)
set_result = pyqtSignal(int)
searched_data = pyqtSignal(bytes)
def __init__(self, udpsock, cmd_list, what_sock, op_code, timeout):
QThread.__init__(self)
self.sock = udpsock
self.msg = bytearray(PACKET_SIZE)
self.size = 0
try:
self.inputs = [self.sock.sock]
except Exception as e:
print('socket error:', e)
self.terminate()
self.outputs = []
self.errors = []
self.opcode = None
self.iter = 0
self.dest_mac = None
self.isvalid = False
# self.timer1 = None
self.istimeout = False
self.reply = ''
self.setting_pw_wrong = False
self.mac_list = []
self.mode_list = []
self.mn_list = []
self.vr_list = []
self.getreply = []
self.rcv_list = []
self.st_list = []
self.what_sock = what_sock
self.cmd_list = cmd_list
self.opcode = op_code
self.timeout = timeout
self.wiz750cmdObj = WIZ750CMDSET(1)
def timeout_func(self):
self.istimeout = True
def makecommands(self):
self.size = 0
try:
for cmd in self.cmd_list:
# print('cmd[0]: %s, cmd[1]: %s' % (cmd[0], cmd[1]))
try:
self.msg[self.size:] = str.encode(cmd[0])
except Exception as e:
print('[ERROR] makecommands() encode:', cmd[0], e)
self.size += len(cmd[0])
if cmd[0] == "MA":
# sys.stdout.write('cmd[1]: %r\r\n' % cmd[1])
cmd[1] = cmd[1].replace(":", "")
# print(cmd[1])
# hex_string = cmd[1].decode('hex')
try:
hex_string = codecs.decode(cmd[1], 'hex')
except Exception as e:
print('[ERROR] makecommands() decode:',
cmd[0], cmd[1], e)
self.msg[self.size:] = hex_string
self.dest_mac = hex_string
# self.dest_mac = (int(cmd[1], 16)).to_bytes(6, byteorder='big') # Hexadecimal string to hexadecimal binary
# self.msg[self.size:] = self.dest_mac
self.size += 6
else:
try:
self.msg[self.size:] = str.encode(cmd[1])
except Exception as e:
print('[ERROR] makecommands() encode param:',
cmd[0], cmd[1], e)
self.size += len(cmd[1])
if "\r\n" not in cmd[1]:
self.msg[self.size:] = str.encode("\r\n")
self.size += 2
# print(self.size, self.msg)
except Exception as e:
print('[ERROR] WIZMSGHandler makecommands(): %r' % e)
def sendcommands(self):
self.sock.sendto(self.msg)
def sendcommandsTCP(self):
self.sock.write(self.msg)
def check_parameter(self, cmdset):
# print('check_parameter()', cmdset, cmdset[:2], cmdset[2:])
try:
if b'MA' not in cmdset:
# print('check_parameter() OK', cmdset, cmdset[:2], cmdset[2:])
if self.wiz750cmdObj.isvalidparameter(cmdset[:2].decode(), cmdset[2:].decode()):
return True
else:
return False
else:
return False
except Exception as e:
print('[ERROR] WIZMSGHandler check_parameter(): %r' % e)
# def parseresponse(self):
def run(self):
try:
self.makecommands()
if self.what_sock == 'udp':
self.sendcommands()
elif self.what_sock == 'tcp':
self.sendcommandsTCP()
except Exception as e:
print('[ERROR] WIZMSGHandler thread: %r' % e)
readready, writeready, errorready = select.select(
self.inputs, self.outputs, self.errors, self.timeout)
replylists = None
self.getreply = []
self.mac_list = []
self.mn_list = []
self.vr_list = []
self.st_list = []
self.rcv_list = []
# print('readready value: ', len(readready), readready)
# Pre-search / Single search
if self.timeout < 2:
for sock in readready:
if sock == self.sock.sock:
data = self.sock.recvfrom()
self.searched_data.emit(data)
# replylists = data.splitlines()
replylists = data.split(b"\r\n")
# print('replylists', replylists)
self.getreply = replylists
else:
while True:
self.iter += 1
# sys.stdout.write("iter count: %r " % self.iter)
for sock in readready:
if sock == self.sock.sock:
data = self.sock.recvfrom()
# check if data reduplication
if data in self.rcv_list:
replylists = []
else:
self.rcv_list.append(data) # received data backup
# replylists = data.splitlines()
replylists = data.split(b"\r\n")
print('replylists', replylists)
self.getreply = replylists
if self.opcode == OP_SEARCHALL:
try:
for i in range(0, len(replylists)):
if b'MC' in replylists[i]:
if self.check_parameter(replylists[i]):
self.mac_list.append(replylists[i][2:])
if b'MN' in replylists[i]:
if self.check_parameter(replylists[i]):
self.mn_list.append(replylists[i][2:])
if b'VR' in replylists[i]:
if self.check_parameter(replylists[i]):
self.vr_list.append(replylists[i][2:])
if b'OP' in replylists[i]:
if self.check_parameter(replylists[i]):
self.mode_list.append(replylists[i][2:])
if b'ST' in replylists[i]:
if self.check_parameter(replylists[i]):
self.st_list.append(replylists[i][2:])
except Exception as e:
print('[ERROR] WIZMSGHandler makecommands(): %r' % e)
elif self.opcode == OP_FWUP:
for i in range(0, len(replylists)):
if b'MA' in replylists[i][:2]:
pass
# self.isvalid = True
else:
self.isvalid = False
# sys.stdout.write("%r\r\n" % replylists[i][:2])
if b'FW' in replylists[i][:2]:
# sys.stdout.write('self.isvalid == True\r\n')
# param = replylists[i][2:].split(b':')
self.reply = replylists[i][2:]
elif self.opcode == OP_SETCOMMAND:
for i in range(0, len(replylists)):
if b'AP' in replylists[i][:2]:
if replylists[i][2:] == b' ':
self.setting_pw_wrong = True
else:
self.setting_pw_wrong = False
readready, writeready, errorready = select.select(
self.inputs, self.outputs, self.errors, 1)
if not readready or not replylists:
break
if self.opcode == OP_SEARCHALL:
self.msleep(500)
# print('Search device:', self.mac_list)
self.search_result.emit(len(self.mac_list))
# return len(self.mac_list)
if self.opcode == OP_SETCOMMAND:
self.msleep(500)
# print(self.rcv_list)
if len(self.rcv_list) > 0:
# print('OP_SETCOMMAND: rcv_list:', len(self.rcv_list[0]), self.rcv_list[0])
if self.setting_pw_wrong:
self.set_result.emit(-3)
else:
self.set_result.emit(len(self.rcv_list[0]))
else:
self.set_result.emit(-1)
elif self.opcode == OP_FWUP:
return self.reply
# sys.stdout.write("%s\r\n" % self.mac_list)
class DataRefresh(QThread):
resp_check = pyqtSignal(int)
def __init__(self, sock, cmd_list, what_sock, interval):
QThread.__init__(self)
self.sock = sock
self.msg = bytearray(PACKET_SIZE)
self.size = 0
self.inputs = [self.sock.sock]
self.outputs = []
self.errors = []
self.iter = 0
self.dest_mac = None
self.reply = ''
self.mac_list = []
self.rcv_list = []
self.what_sock = what_sock
self.cmd_list = cmd_list
self.interval = interval * 1000
def makecommands(self):
self.size = 0
for cmd in self.cmd_list:
self.msg[self.size:] = str.encode(cmd[0])
self.size += len(cmd[0])
if cmd[0] == "MA":
cmd[1] = cmd[1].replace(":", "")
hex_string = codecs.decode(cmd[1], 'hex')
self.msg[self.size:] = hex_string
self.dest_mac = hex_string
self.size += 6
else:
self.msg[self.size:] = str.encode(cmd[1])
self.size += len(cmd[1])
if "\r\n" not in cmd[1]:
self.msg[self.size:] = str.encode("\r\n")
self.size += 2
def sendcommands(self):
self.sock.sendto(self.msg)
def sendcommandsTCP(self):
self.sock.write(self.msg)
def run(self):
try:
self.makecommands()
if self.what_sock == 'udp':
self.sendcommands()
elif self.what_sock == 'tcp':
self.sendcommandsTCP()
except Exception as e:
print(e)
# replylists = None
checknum = 0
while True:
print('Refresh', checknum)
self.rcv_list = []
readready, writeready, errorready = select.select(
self.inputs, self.outputs, self.errors, 2)
self.iter += 1
# sys.stdout.write("iter count: %r " % self.iter)
for sock in readready:
if sock == self.sock.sock:
data = self.sock.recvfrom()
self.rcv_list.append(data) # 수신 데이터 저장
# replylists = data.splitlines()
# replylists = data.split(b"\r\n")
# print('replylists', replylists)
checknum += 1
self.resp_check.emit(checknum)
if self.interval == 0:
break
else:
self.msleep(self.interval)
self.sendcommands()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import select
import codecs
from WIZ750CMDSET import WIZ750CMDSET
from PyQt5.QtCore import QThread, pyqtSignal
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
exitflag = 0
OP_SEARCHALL = 1
OP_GETCOMMAND = 2
OP_SETCOMMAND = 3
OP_SETFILE = 4
OP_GETFILE = 5
OP_FWUP = 6
# PACKET_SIZE = 1024
PACKET_SIZE = 2048
def timeout_func():
# print('timeout')
global exitflag
exitflag = 1
class WIZMSGHandler(QThread):
search_result = pyqtSignal(int)
set_result = pyqtSignal(int)
searched_data = pyqtSignal(bytes)
def __init__(self, udpsock, cmd_list, what_sock, op_code, timeout):
QThread.__init__(self)
self.sock = udpsock
self.msg = bytearray(PACKET_SIZE)
self.size = 0
try:
self.inputs = [self.sock.sock]
except Exception as e:
print('socket error:', e)
self.terminate()
self.outputs = []
self.errors = []
self.opcode = None
self.iter = 0
self.dest_mac = None
self.isvalid = False
# self.timer1 = None
self.istimeout = False
self.reply = ''
self.setting_pw_wrong = False
self.mac_list = []
self.mode_list = []
self.mn_list = []
self.vr_list = []
self.getreply = []
self.rcv_list = []
self.st_list = []
self.what_sock = what_sock
self.cmd_list = cmd_list
self.opcode = op_code
self.timeout = timeout
self.wiz750cmdObj = WIZ750CMDSET(1)
def timeout_func(self):
self.istimeout = True
def makecommands(self):
self.size = 0
try:
for cmd in self.cmd_list:
# print('cmd[0]: %s, cmd[1]: %s' % (cmd[0], cmd[1]))
try:
self.msg[self.size:] = str.encode(cmd[0])
except Exception as e:
print('[ERROR] makecommands() encode:', cmd[0], e)
self.size += len(cmd[0])
if cmd[0] == "MA":
# sys.stdout.write('cmd[1]: %r\r\n' % cmd[1])
cmd[1] = cmd[1].replace(":", "")
# print(cmd[1])
# hex_string = cmd[1].decode('hex')
try:
hex_string = codecs.decode(cmd[1], 'hex')
except Exception as e:
print('[ERROR] makecommands() decode:',
cmd[0], cmd[1], e)
self.msg[self.size:] = hex_string
self.dest_mac = hex_string
# self.dest_mac = (int(cmd[1], 16)).to_bytes(6, byteorder='big') # Hexadecimal string to hexadecimal binary
# self.msg[self.size:] = self.dest_mac
self.size += 6
else:
try:
self.msg[self.size:] = str.encode(cmd[1])
except Exception as e:
print('[ERROR] makecommands() encode param:',
cmd[0], cmd[1], e)
self.size += len(cmd[1])
if "\r\n" not in cmd[1]:
self.msg[self.size:] = str.encode("\r\n")
self.size += 2
# print(self.size, self.msg)
except Exception as e:
print('[ERROR] WIZMSGHandler makecommands(): %r' % e)
def sendcommands(self):
self.sock.sendto(self.msg)
def sendcommandsTCP(self):
self.sock.write(self.msg)
def check_parameter(self, cmdset):
# print('check_parameter()', cmdset, cmdset[:2], cmdset[2:])
try:
if b'MA' not in cmdset:
# print('check_parameter() OK', cmdset, cmdset[:2], cmdset[2:])
if self.wiz750cmdObj.isvalidparameter(cmdset[:2].decode(), cmdset[2:].decode()):
return True
else:
return False
else:
return False
except Exception as e:
print('[ERROR] WIZMSGHandler check_parameter(): %r' % e)
# def parseresponse(self):
def run(self):
try:
self.makecommands()
if self.what_sock == 'udp':
self.sendcommands()
elif self.what_sock == 'tcp':
self.sendcommandsTCP()
except Exception as e:
print('[ERROR] WIZMSGHandler thread: %r' % e)
readready, writeready, errorready = select.select(
self.inputs, self.outputs, self.errors, self.timeout)
replylists = None
self.getreply = []
self.mac_list = []
self.mn_list = []
self.vr_list = []
self.st_list = []
self.rcv_list = []
# print('readready value: ', len(readready), readready)
# Pre-search / Single search
if self.timeout < 2:
for sock in readready:
if sock == self.sock.sock:
data = self.sock.recvfrom()
self.searched_data.emit(data)
# replylists = data.splitlines()
replylists = data.split(b"\r\n")
# print('replylists', replylists)
self.getreply = replylists
else:
while True:
self.iter += 1
# sys.stdout.write("iter count: %r " % self.iter)
for sock in readready:
if sock == self.sock.sock:
data = self.sock.recvfrom()
# check if data reduplication
if data in self.rcv_list:
replylists = []
else:
self.rcv_list.append(data) # received data backup
# replylists = data.splitlines()
replylists = data.split(b"\r\n")
print('replylists', replylists)
self.getreply = replylists
if self.opcode == OP_SEARCHALL:
try:
for i in range(0, len(replylists)):
if b'MC' in replylists[i]:
if self.check_parameter(replylists[i]):
self.mac_list.append(replylists[i][2:])
if b'MN' in replylists[i]:
if self.check_parameter(replylists[i]):
self.mn_list.append(replylists[i][2:])
if b'VR' in replylists[i]:
if self.check_parameter(replylists[i]):
self.vr_list.append(replylists[i][2:])
if b'OP' in replylists[i]:
if self.check_parameter(replylists[i]):
self.mode_list.append(replylists[i][2:])
if b'ST' in replylists[i]:
if self.check_parameter(replylists[i]):
self.st_list.append(replylists[i][2:])
except Exception as e:
print('[ERROR] WIZMSGHandler makecommands(): %r' % e)
elif self.opcode == OP_FWUP:
for i in range(0, len(replylists)):
if b'MA' in replylists[i][:2]:
pass
# self.isvalid = True
else:
self.isvalid = False
# sys.stdout.write("%r\r\n" % replylists[i][:2])
if b'FW' in replylists[i][:2]:
# sys.stdout.write('self.isvalid == True\r\n')
# param = replylists[i][2:].split(b':')
self.reply = replylists[i][2:]
elif self.opcode == OP_SETCOMMAND:
for i in range(0, len(replylists)):
if b'AP' in replylists[i][:2]:
if replylists[i][2:] == b' ':
self.setting_pw_wrong = True
else:
self.setting_pw_wrong = False
readready, writeready, errorready = select.select(
self.inputs, self.outputs, self.errors, 1)
if not readready or not replylists:
break
if self.opcode == OP_SEARCHALL:
self.msleep(500)
# print('Search device:', self.mac_list)
self.search_result.emit(len(self.mac_list))
# return len(self.mac_list)
if self.opcode == OP_SETCOMMAND:
self.msleep(500)
# print(self.rcv_list)
if len(self.rcv_list) > 0:
# print('OP_SETCOMMAND: rcv_list:', len(self.rcv_list[0]), self.rcv_list[0])
if self.setting_pw_wrong:
self.set_result.emit(-3)
else:
self.set_result.emit(len(self.rcv_list[0]))
else:
self.set_result.emit(-1)
elif self.opcode == OP_FWUP:
return self.reply
# sys.stdout.write("%s\r\n" % self.mac_list)
class DataRefresh(QThread):
resp_check = pyqtSignal(int)
def __init__(self, sock, cmd_list, what_sock, interval):
QThread.__init__(self)
self.sock = sock
self.msg = bytearray(PACKET_SIZE)
self.size = 0
self.inputs = [self.sock.sock]
self.outputs = []
self.errors = []
self.iter = 0
self.dest_mac = None
self.reply = ''
self.mac_list = []
self.rcv_list = []
self.what_sock = what_sock
self.cmd_list = cmd_list
self.interval = interval * 1000
def makecommands(self):
self.size = 0
for cmd in self.cmd_list:
self.msg[self.size:] = str.encode(cmd[0])
self.size += len(cmd[0])
if cmd[0] == "MA":
cmd[1] = cmd[1].replace(":", "")
hex_string = codecs.decode(cmd[1], 'hex')
self.msg[self.size:] = hex_string
self.dest_mac = hex_string
self.size += 6
else:
self.msg[self.size:] = str.encode(cmd[1])
self.size += len(cmd[1])
if "\r\n" not in cmd[1]:
self.msg[self.size:] = str.encode("\r\n")
self.size += 2
def sendcommands(self):
self.sock.sendto(self.msg)
def sendcommandsTCP(self):
self.sock.write(self.msg)
def run(self):
try:
self.makecommands()
if self.what_sock == 'udp':
self.sendcommands()
elif self.what_sock == 'tcp':
self.sendcommandsTCP()
except Exception as e:
print(e)
# replylists = None
checknum = 0
while True:
print('Refresh', checknum)
self.rcv_list = []
readready, writeready, errorready = select.select(
self.inputs, self.outputs, self.errors, 2)
self.iter += 1
# sys.stdout.write("iter count: %r " % self.iter)
for sock in readready:
if sock == self.sock.sock:
data = self.sock.recvfrom()
self.rcv_list.append(data) # 수신 데이터 저장
# replylists = data.splitlines()
# replylists = data.split(b"\r\n")
# print('replylists', replylists)
checknum += 1
self.resp_check.emit(checknum)
if self.interval == 0:
break
else:
self.msleep(self.interval)
self.sendcommands()
| en | 0.323567 | #!/usr/bin/python # -*- coding: utf-8 -*- # PACKET_SIZE = 1024 # print('timeout') # self.timer1 = None # print('cmd[0]: %s, cmd[1]: %s' % (cmd[0], cmd[1])) # sys.stdout.write('cmd[1]: %r\r\n' % cmd[1]) # print(cmd[1]) # hex_string = cmd[1].decode('hex') # self.dest_mac = (int(cmd[1], 16)).to_bytes(6, byteorder='big') # Hexadecimal string to hexadecimal binary # self.msg[self.size:] = self.dest_mac # print(self.size, self.msg) # print('check_parameter()', cmdset, cmdset[:2], cmdset[2:]) # print('check_parameter() OK', cmdset, cmdset[:2], cmdset[2:]) # def parseresponse(self): # print('readready value: ', len(readready), readready) # Pre-search / Single search # replylists = data.splitlines() # print('replylists', replylists) # sys.stdout.write("iter count: %r " % self.iter) # check if data reduplication # received data backup # replylists = data.splitlines() # self.isvalid = True # sys.stdout.write("%r\r\n" % replylists[i][:2]) # sys.stdout.write('self.isvalid == True\r\n') # param = replylists[i][2:].split(b':') # print('Search device:', self.mac_list) # return len(self.mac_list) # print(self.rcv_list) # print('OP_SETCOMMAND: rcv_list:', len(self.rcv_list[0]), self.rcv_list[0]) # sys.stdout.write("%s\r\n" % self.mac_list) # replylists = None # sys.stdout.write("iter count: %r " % self.iter) # 수신 데이터 저장 # replylists = data.splitlines() # replylists = data.split(b"\r\n") # print('replylists', replylists) | 2.317646 | 2 |
intents/drain.py | ng-cdi/intent-deploy | 1 | 6616259 | <gh_stars>1-10
from __future__ import annotations
from typing import Any, cast
import networkx
from intent_deployer import config
from intent_deployer.compile import (
Context,
register_dialogflow_handler,
register_intent_executor,
register_nile_handler,
)
from intent_deployer.deploy import deploy_policy
from intent_deployer.hosts import Hosts
from intent_deployer.network_state import Intent, IntentPath, Topology
from intent_deployer.parser import NileIntent
from intent_deployer.switches import Switches
from intent_deployer.types import IntentDeployException
from pydantic.main import BaseModel
from intents.utils import PushIntentPolicy
class DrainIntent(BaseModel):
node: str
@register_nile_handler("drainIntent")
def handle_nile_intent(intent: NileIntent) -> DrainIntent:
if len(intent.targets) != 1:
raise IntentDeployException(
"Exactly one target should be specified in the drain intent"
)
node = intent.targets[0].name
return DrainIntent(node=node)
@register_dialogflow_handler("drain")
def handle_dialogflow_intent(params: dict[str, Any]) -> DrainIntent:
return DrainIntent(node=params["node"])
@register_intent_executor(DrainIntent, PushIntentPolicy)
async def handle_drain_intent(ctx: Context, intent: DrainIntent):
hosts = await Hosts.from_api()
switches = await Switches.from_api()
topology = await Topology.from_current_state()
drain_intent = drain_node(topology, intent.node)
await ctx.confirm_if_needed(
f"This will drain intents from {intent.node} by rerouting {len(drain_intent.paths)} flows",
lambda: deploy_policy(
config.ngcdi_url / "push_intent",
PushIntentPolicy(
api_key=config.api_key,
routes=drain_intent.as_onos_intent(hosts, switches),
),
),
)
def perform_reroute(
network: networkx.Graph, path: list[str], drain_node: str
) -> IntentPath:
"""Reroute a `path` to avoid a node.
This currently splits the path on the `drain_node` we want to drain from,
then finds the shortest path between the two sides avoiding `drain_node`
`network` should be the network *without* `drain_node`.
This function raises if it is not possible to construct an alternative
route.
"""
node_idx = path.index(drain_node)
lhs, rhs = path[:node_idx], path[node_idx + 1 :]
try:
reroute_path = networkx.shortest_path(network, lhs[-1], rhs[0])
except networkx.NetworkXNoPath:
raise IntentDeployException(
f"No path can be constructed between {lhs[-1]} and {rhs[0]} after the removal of {drain_node}"
)
reroute_path = cast(list[str], reroute_path)
new_path = lhs[:-1] + reroute_path + rhs[1:]
return IntentPath(src=lhs[0], dst=rhs[-1], switch_path=new_path[1:-1])
def drain_node(topology: Topology, node: str) -> Intent:
"""Construct an intent that drains traffic from a node."""
without_node: networkx.Graph = topology.network.copy()
without_node.remove_node(node)
new_intent_paths = [
perform_reroute(without_node, intent_path.path, node)
for intent in topology.intents
for intent_path in intent.paths
if node in intent_path.path
]
return Intent(paths=new_intent_paths)
| from __future__ import annotations
from typing import Any, cast
import networkx
from intent_deployer import config
from intent_deployer.compile import (
Context,
register_dialogflow_handler,
register_intent_executor,
register_nile_handler,
)
from intent_deployer.deploy import deploy_policy
from intent_deployer.hosts import Hosts
from intent_deployer.network_state import Intent, IntentPath, Topology
from intent_deployer.parser import NileIntent
from intent_deployer.switches import Switches
from intent_deployer.types import IntentDeployException
from pydantic.main import BaseModel
from intents.utils import PushIntentPolicy
class DrainIntent(BaseModel):
node: str
@register_nile_handler("drainIntent")
def handle_nile_intent(intent: NileIntent) -> DrainIntent:
if len(intent.targets) != 1:
raise IntentDeployException(
"Exactly one target should be specified in the drain intent"
)
node = intent.targets[0].name
return DrainIntent(node=node)
@register_dialogflow_handler("drain")
def handle_dialogflow_intent(params: dict[str, Any]) -> DrainIntent:
return DrainIntent(node=params["node"])
@register_intent_executor(DrainIntent, PushIntentPolicy)
async def handle_drain_intent(ctx: Context, intent: DrainIntent):
hosts = await Hosts.from_api()
switches = await Switches.from_api()
topology = await Topology.from_current_state()
drain_intent = drain_node(topology, intent.node)
await ctx.confirm_if_needed(
f"This will drain intents from {intent.node} by rerouting {len(drain_intent.paths)} flows",
lambda: deploy_policy(
config.ngcdi_url / "push_intent",
PushIntentPolicy(
api_key=config.api_key,
routes=drain_intent.as_onos_intent(hosts, switches),
),
),
)
def perform_reroute(
network: networkx.Graph, path: list[str], drain_node: str
) -> IntentPath:
"""Reroute a `path` to avoid a node.
This currently splits the path on the `drain_node` we want to drain from,
then finds the shortest path between the two sides avoiding `drain_node`
`network` should be the network *without* `drain_node`.
This function raises if it is not possible to construct an alternative
route.
"""
node_idx = path.index(drain_node)
lhs, rhs = path[:node_idx], path[node_idx + 1 :]
try:
reroute_path = networkx.shortest_path(network, lhs[-1], rhs[0])
except networkx.NetworkXNoPath:
raise IntentDeployException(
f"No path can be constructed between {lhs[-1]} and {rhs[0]} after the removal of {drain_node}"
)
reroute_path = cast(list[str], reroute_path)
new_path = lhs[:-1] + reroute_path + rhs[1:]
return IntentPath(src=lhs[0], dst=rhs[-1], switch_path=new_path[1:-1])
def drain_node(topology: Topology, node: str) -> Intent:
"""Construct an intent that drains traffic from a node."""
without_node: networkx.Graph = topology.network.copy()
without_node.remove_node(node)
new_intent_paths = [
perform_reroute(without_node, intent_path.path, node)
for intent in topology.intents
for intent_path in intent.paths
if node in intent_path.path
]
return Intent(paths=new_intent_paths) | en | 0.83237 | Reroute a `path` to avoid a node. This currently splits the path on the `drain_node` we want to drain from, then finds the shortest path between the two sides avoiding `drain_node` `network` should be the network *without* `drain_node`. This function raises if it is not possible to construct an alternative route. Construct an intent that drains traffic from a node. | 2.200381 | 2 |
src/thread_indexer/__init__.py | cardoso-neto/archive-chan | 1 | 6616260 | <reponame>cardoso-neto/archive-chan<filename>src/thread_indexer/__init__.py
from .json_index import main
| from .json_index import main | none | 1 | 1.004011 | 1 | |
experiments/make_templates.py | MirunaPislar/Do-LSTMs-learn-Syntax | 1 | 6616261 | <gh_stars>1-10
from templates import TenseAgreementTemplates, CoordinationTemplates
from terminals import TenseAgreementTerminals, CoordinationTerminals
import argparse
import os
import pandas as pd
import random
parser = argparse.ArgumentParser(
description="Create targeted syntactic templates.")
parser.add_argument("--filename", type=str, default="data/my_template.tab",
help="Path where to save the templates.")
parser.add_argument("--type", type=str, default="tense",
help="Type of template to construct. "
"Choose between coord or tense.")
args = parser.parse_args()
class MakeAgreementTemplate:
def __init__(self):
if args.type == "coord":
self.terminals = CoordinationTerminals().terminals
self.rules = CoordinationTemplates().rules
elif args.type == "tense":
self.terminals = TenseAgreementTerminals().terminals
self.rules = TenseAgreementTemplates().rules
else:
raise ValueError("Unknown template name %s. Please, choose"
" between coord or tense." % args.type)
@staticmethod
def switch_tense(words, preterms_idx):
valid_verb_switches = {
"presBe": {"sg": ["will", "did", "has"],
"pl": ["will", "did", "have"]},
"past": {"sg": ["is", "was"],
"pl": ["are", "were"]},
"future": {"sg": ["is"],
"pl": ["are"]}}
new_words = []
my_number = preterms_idx.split("_")[-1]
my_tense = preterms_idx.split("_")[-2]
valid_verbs = valid_verb_switches[my_tense][my_number]
for word in words:
splits = word.split()
sampled_verb = random.choice(valid_verbs)
if len(splits) > 1:
new_words.append(" ".join(sampled_verb + splits[1:]))
else:
new_words.append(sampled_verb)
return new_words
@staticmethod
def switch_number(words, preterms_idx):
new_words = []
is_verb = preterms_idx[-1] == "V"
is_mod = len(preterms_idx) > 2 and preterms_idx[-3:] == "MOD"
for word in words:
splits = word.split()
if splits[0] == "is":
new_words.append(" ".join(["are"] + splits[1:]))
elif splits[0] == "are":
new_words.append(" ".join(["is"] + splits[1:]))
elif splits[0] == "was":
new_words.append(" ".join(["were"] + splits[1:]))
elif splits[0] == "were":
new_words.append(" ".join(["was"] + splits[1:]))
elif splits[0] == "has":
new_words.append(" ".join(["have"] + splits[1:]))
elif splits[0] == "have":
new_words.append(" ".join(["has"] + splits[1:]))
elif is_mod:
if len(splits) > 1:
if splits[0][-2:] == "es":
new_words.append(" ".join([splits[0][:-2]] + splits[1:]))
else:
new_words.append(" ".join([splits[0] + "es"] + splits[1:]))
else:
if word[-2:] == "es":
new_words.append(word[:-2])
else:
new_words.append(word + "es")
elif is_verb:
if len(splits) > 1:
if splits[0][-1] == "s":
new_words.append(" ".join([splits[0][:-1]] + splits[1:]))
else:
new_words.append(" ".join([splits[0] + "s"] + splits[1:]))
else:
if word[-1] == "s":
new_words.append(word[:-1])
else:
new_words.append(word + "s")
elif word[-4:] == "self":
new_words.append("themselves")
else:
new_words.append(word + "s")
return new_words
def switch_numbers(self, base_sent, variables, preterms):
new_sent = base_sent[:]
for idx in variables:
if args.type == "coord":
new_sent[idx] = self.switch_number(new_sent[idx], preterms[idx])
elif args.type == "tense":
new_sent[idx] = self.switch_tense(new_sent[idx], preterms[idx])
else:
raise ValueError("Unknown value for template type: %s."
"Choose between tense and coord." % args.type)
return new_sent
def make_variable_sentences(self, preterms, match):
base_sent = [self.terminals[p] for p in preterms]
grammatical = base_sent[:]
ungrammatical = self.switch_numbers(grammatical, match[1], preterms)
return [grammatical, ungrammatical]
class MakeTestCase:
def __init__(self, template, test_case):
self.template = template
self.test_case = test_case
self.sent_templates = self.get_rules()
def get_rules(self):
sent_templates = {"pattern": [], "sent": [], "sent_alt": []}
preterminals, templates = self.template.rules[self.test_case]
if templates is not None:
sentences = self.template.make_variable_sentences(
preterminals, templates["match"])
grammatical = list(self.expand_sent(sentences[0]))
ungrammatical = list(self.expand_sent(sentences[1]))
for i in range(len(grammatical)):
sent_templates["pattern"].append(self.test_case)
sent_templates["sent"].append(grammatical[i])
sent_templates["sent_alt"].append(ungrammatical[i])
return sent_templates
def expand_sent(self, sent, partial="", switch_ds=False):
if len(sent) == 1:
for word in sent[0]:
if switch_ds:
splits = partial.split(" ")
no = splits[0]
the = splits[3]
splits = partial.split()
partial_one = " ".join([x for x in splits[1:3]])
partial_two = " ".join([x for x in splits[4:]])
yield " ".join([the, partial_one, no, partial_two, word])
elif word not in partial:
yield partial + word
else:
yield "None"
else:
for word in sent[0]:
for x in self.expand_sent(
sent=sent[1:],
partial=partial + word + " ",
switch_ds=switch_ds):
if x != "None":
yield x
def main():
if os.path.isfile(args.filename):
os.remove(args.filename)
test_cases = MakeAgreementTemplate()
for case in test_cases.rules.keys():
sentences = MakeTestCase(test_cases.rules, case)
df = pd.DataFrame.from_dict(sentences.sent_templates)
if os.path.isfile(args.filename):
df.to_csv(
args.filename, mode="a", header=False, index=False, sep="\t")
else:
df.to_csv(
args.filename, mode="a", header=True, index=False, sep="\t")
if __name__ == "__main__":
main()
| from templates import TenseAgreementTemplates, CoordinationTemplates
from terminals import TenseAgreementTerminals, CoordinationTerminals
import argparse
import os
import pandas as pd
import random
parser = argparse.ArgumentParser(
description="Create targeted syntactic templates.")
parser.add_argument("--filename", type=str, default="data/my_template.tab",
help="Path where to save the templates.")
parser.add_argument("--type", type=str, default="tense",
help="Type of template to construct. "
"Choose between coord or tense.")
args = parser.parse_args()
class MakeAgreementTemplate:
def __init__(self):
if args.type == "coord":
self.terminals = CoordinationTerminals().terminals
self.rules = CoordinationTemplates().rules
elif args.type == "tense":
self.terminals = TenseAgreementTerminals().terminals
self.rules = TenseAgreementTemplates().rules
else:
raise ValueError("Unknown template name %s. Please, choose"
" between coord or tense." % args.type)
@staticmethod
def switch_tense(words, preterms_idx):
valid_verb_switches = {
"presBe": {"sg": ["will", "did", "has"],
"pl": ["will", "did", "have"]},
"past": {"sg": ["is", "was"],
"pl": ["are", "were"]},
"future": {"sg": ["is"],
"pl": ["are"]}}
new_words = []
my_number = preterms_idx.split("_")[-1]
my_tense = preterms_idx.split("_")[-2]
valid_verbs = valid_verb_switches[my_tense][my_number]
for word in words:
splits = word.split()
sampled_verb = random.choice(valid_verbs)
if len(splits) > 1:
new_words.append(" ".join(sampled_verb + splits[1:]))
else:
new_words.append(sampled_verb)
return new_words
@staticmethod
def switch_number(words, preterms_idx):
new_words = []
is_verb = preterms_idx[-1] == "V"
is_mod = len(preterms_idx) > 2 and preterms_idx[-3:] == "MOD"
for word in words:
splits = word.split()
if splits[0] == "is":
new_words.append(" ".join(["are"] + splits[1:]))
elif splits[0] == "are":
new_words.append(" ".join(["is"] + splits[1:]))
elif splits[0] == "was":
new_words.append(" ".join(["were"] + splits[1:]))
elif splits[0] == "were":
new_words.append(" ".join(["was"] + splits[1:]))
elif splits[0] == "has":
new_words.append(" ".join(["have"] + splits[1:]))
elif splits[0] == "have":
new_words.append(" ".join(["has"] + splits[1:]))
elif is_mod:
if len(splits) > 1:
if splits[0][-2:] == "es":
new_words.append(" ".join([splits[0][:-2]] + splits[1:]))
else:
new_words.append(" ".join([splits[0] + "es"] + splits[1:]))
else:
if word[-2:] == "es":
new_words.append(word[:-2])
else:
new_words.append(word + "es")
elif is_verb:
if len(splits) > 1:
if splits[0][-1] == "s":
new_words.append(" ".join([splits[0][:-1]] + splits[1:]))
else:
new_words.append(" ".join([splits[0] + "s"] + splits[1:]))
else:
if word[-1] == "s":
new_words.append(word[:-1])
else:
new_words.append(word + "s")
elif word[-4:] == "self":
new_words.append("themselves")
else:
new_words.append(word + "s")
return new_words
def switch_numbers(self, base_sent, variables, preterms):
new_sent = base_sent[:]
for idx in variables:
if args.type == "coord":
new_sent[idx] = self.switch_number(new_sent[idx], preterms[idx])
elif args.type == "tense":
new_sent[idx] = self.switch_tense(new_sent[idx], preterms[idx])
else:
raise ValueError("Unknown value for template type: %s."
"Choose between tense and coord." % args.type)
return new_sent
def make_variable_sentences(self, preterms, match):
base_sent = [self.terminals[p] for p in preterms]
grammatical = base_sent[:]
ungrammatical = self.switch_numbers(grammatical, match[1], preterms)
return [grammatical, ungrammatical]
class MakeTestCase:
def __init__(self, template, test_case):
self.template = template
self.test_case = test_case
self.sent_templates = self.get_rules()
def get_rules(self):
sent_templates = {"pattern": [], "sent": [], "sent_alt": []}
preterminals, templates = self.template.rules[self.test_case]
if templates is not None:
sentences = self.template.make_variable_sentences(
preterminals, templates["match"])
grammatical = list(self.expand_sent(sentences[0]))
ungrammatical = list(self.expand_sent(sentences[1]))
for i in range(len(grammatical)):
sent_templates["pattern"].append(self.test_case)
sent_templates["sent"].append(grammatical[i])
sent_templates["sent_alt"].append(ungrammatical[i])
return sent_templates
def expand_sent(self, sent, partial="", switch_ds=False):
if len(sent) == 1:
for word in sent[0]:
if switch_ds:
splits = partial.split(" ")
no = splits[0]
the = splits[3]
splits = partial.split()
partial_one = " ".join([x for x in splits[1:3]])
partial_two = " ".join([x for x in splits[4:]])
yield " ".join([the, partial_one, no, partial_two, word])
elif word not in partial:
yield partial + word
else:
yield "None"
else:
for word in sent[0]:
for x in self.expand_sent(
sent=sent[1:],
partial=partial + word + " ",
switch_ds=switch_ds):
if x != "None":
yield x
def main():
if os.path.isfile(args.filename):
os.remove(args.filename)
test_cases = MakeAgreementTemplate()
for case in test_cases.rules.keys():
sentences = MakeTestCase(test_cases.rules, case)
df = pd.DataFrame.from_dict(sentences.sent_templates)
if os.path.isfile(args.filename):
df.to_csv(
args.filename, mode="a", header=False, index=False, sep="\t")
else:
df.to_csv(
args.filename, mode="a", header=True, index=False, sep="\t")
if __name__ == "__main__":
main() | none | 1 | 2.798775 | 3 | |
src/Account/forms.py | bolshuq/InventoryManagementSystem-API | 2 | 6616262 | <filename>src/Account/forms.py
from django.contrib.auth.forms import UserCreationForm
from .models import Users
from django.contrib.auth import forms
from django.forms import ModelForm
from .models import Users
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm):
model = Users
fields = ['email','username','phone','<PASSWORD>','<PASSWORD>']
class AuthenticationForm(ModelForm): # Note: forms.Form NOT forms.ModelForm
class Meta:
model = Users
fields = ['email', 'password'] | <filename>src/Account/forms.py
from django.contrib.auth.forms import UserCreationForm
from .models import Users
from django.contrib.auth import forms
from django.forms import ModelForm
from .models import Users
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm):
model = Users
fields = ['email','username','phone','<PASSWORD>','<PASSWORD>']
class AuthenticationForm(ModelForm): # Note: forms.Form NOT forms.ModelForm
class Meta:
model = Users
fields = ['email', 'password'] | en | 0.356452 | # Note: forms.Form NOT forms.ModelForm | 2.235816 | 2 |
tests.py | perrygeo/jenks | 99 | 6616263 | <reponame>perrygeo/jenks
import json
from jenks import jenks
def test_json():
data = json.load(open('test.json'))
breaks = jenks(data, 5)
assert [round(float(v), 5) for v in breaks] == \
[0.00281, 2.09355, 4.2055, 6.17815, 8.09176, 9.99798]
def test_short():
data = [1, 2, 3, 100]
breaks = jenks(data, 2)
assert [round(v, 5) for v in breaks] == [1.0, 3.0, 100.0]
| import json
from jenks import jenks
def test_json():
data = json.load(open('test.json'))
breaks = jenks(data, 5)
assert [round(float(v), 5) for v in breaks] == \
[0.00281, 2.09355, 4.2055, 6.17815, 8.09176, 9.99798]
def test_short():
data = [1, 2, 3, 100]
breaks = jenks(data, 2)
assert [round(v, 5) for v in breaks] == [1.0, 3.0, 100.0] | none | 1 | 2.743214 | 3 | |
FinanceAnalysisAlgoTrading/04-Visualization-Matplotlib-Pandas_01-Matplotlib_MatplotlibConceptsLecture.py | enriqueescobar-askida/Kinito.Finance | 2 | 6616264 | #!/usr/bin/env python
# coding: utf-8
# # Matplotlib Overview Lecture
# ## Introduction
# Matplotlib is the "grandfather" library of data visualization with Python. It was created by <NAME>. He created it to try to replicate MatLab's (another programming language) plotting capabilities in Python. So if you happen to be familiar with matlab, matplotlib will feel natural to you.
#
# It is an excellent 2D and 3D graphics library for generating scientific figures.
#
# Some of the major Pros of Matplotlib are:
#
# * Generally easy to get started for simple plots
# * Support for custom labels and texts
# * Great control of every element in a figure
# * High-quality output in many formats
# * Very customizable in general
#
# Matplotlib allows you to create reproducible figures programmatically. Let's learn how to use it! Before continuing this lecture, I encourage you just to explore the official Matplotlib web page: http://matplotlib.org/
# ## Installation
# You'll need to install matplotlib first with either:
# conda install matplotlib
# pip install matplotlib
# ## Importing
# Import the `matplotlib.pyplot` module under the name `plt` (the tidy way):
import matplotlib.pyplot as plt
# You'll also need to use this line to see plots in the notebook:
get_ipython().run_line_magic('matplotlib', 'inline')
# That line is only for jupyter notebooks, if you are using another editor, you'll use: **plt.show()** at the end of all your plotting commands to have the figure pop up in another window.
# # Basic Example
# Let's walk through a very simple example using two numpy arrays:
# ### Example
# Let's walk through a very simple example using two numpy arrays. You can also use lists, but most likely you'll be passing numpy arrays or pandas columns (which essentially also behave like arrays).
# ** The data we want to plot:**
import numpy as np
x = np.linspace(0, 5, 11)
y = x ** 2
# In[4]:
x
# In[5]:
y
# ## Basic Matplotlib Commands
#
# We can create a very simple line plot using the following ( I encourage you to pause and use Shift+Tab along the way to check out the document strings for the functions we are using).
# In[6]:
plt.plot(x, y, 'r') # 'r' is the color red
plt.xlabel('X Axis Title Here')
plt.ylabel('Y Axis Title Here')
plt.title('String Title Here')
plt.show()
# ## Creating Multiplots on Same Canvas
# In[7]:
# plt.subplot(nrows, ncols, plot_number)
plt.subplot(1, 2, 1)
plt.plot(x, y, 'r--') # More on color options later
plt.subplot(1, 2, 2)
plt.plot(y, x, 'g*-');
# ___
# # Matplotlib Object Oriented Method
# Now that we've seen the basics, let's break it all down with a more formal introduction of Matplotlib's Object Oriented API. This means we will instantiate figure objects and then call methods or attributes from that object.
# ## Introduction to the Object Oriented Method
# The main idea in using the more formal Object Oriented method is to create figure objects and then just call methods or attributes off of that object. This approach is nicer when dealing with a canvas that has multiple plots on it.
#
# To begin we create a figure instance. Then we can add axes to that figure:
# In[8]:
# Create Figure (empty canvas)
fig = plt.figure()
# Add set of axes to figure
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1)
# Plot on that set of axes
axes.plot(x, y, 'b')
axes.set_xlabel('Set X Label') # Notice the use of set_ to begin methods
axes.set_ylabel('Set y Label')
axes.set_title('Set Title')
# Code is a little more complicated, but the advantage is that we now have full control of where the plot axes are placed, and we can easily add more than one axis to the figure:
# In[9]:
# Creates blank canvas
fig = plt.figure()
axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes
axes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3]) # inset axes
# Larger Figure Axes 1
axes1.plot(x, y, 'b')
axes1.set_xlabel('X_label_axes2')
axes1.set_ylabel('Y_label_axes2')
axes1.set_title('Axes 2 Title')
# Insert Figure Axes 2
axes2.plot(y, x, 'r')
axes2.set_xlabel('X_label_axes2')
axes2.set_ylabel('Y_label_axes2')
axes2.set_title('Axes 2 Title');
# ## subplots()
#
# The plt.subplots() object will act as a more automatic axis manager.
#
# Basic use cases:
# In[10]:
# Use similar to plt.figure() except use tuple unpacking to grab fig and axes
fig, axes = plt.subplots()
# Now use the axes object to add stuff to plot
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
# Then you can specify the number of rows and columns when creating the subplots() object:
# In[11]:
# Empty canvas of 1 by 2 subplots
fig, axes = plt.subplots(nrows=1, ncols=2)
# In[12]:
# Axes is an array of axes to plot on
axes
# We can iterate through this array:
# In[13]:
for ax in axes:
ax.plot(x, y, 'b')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
# Display the figure object
fig
# A common issue with matplolib is overlapping subplots or figures. We ca use **fig.tight_layout()** or **plt.tight_layout()** method, which automatically adjusts the positions of the axes on the figure canvas so that there is no overlapping content:
# In[14]:
fig, axes = plt.subplots(nrows=1, ncols=2)
for ax in axes:
ax.plot(x, y, 'g')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
fig
plt.tight_layout()
# ### Figure size, aspect ratio and DPI
# Matplotlib allows the aspect ratio, DPI and figure size to be specified when the Figure object is created. You can use the `figsize` and `dpi` keyword arguments.
# * `figsize` is a tuple of the width and height of the figure in inches
# * `dpi` is the dots-per-inch (pixel per inch).
#
# For example:
# In[15]:
fig = plt.figure(figsize=(8, 4), dpi=100)
# The same arguments can also be passed to layout managers, such as the `subplots` function:
# In[16]:
fig, axes = plt.subplots(figsize=(12, 3))
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
# ## Saving figures
# Matplotlib can generate high-quality output in a number formats, including PNG, JPG, EPS, SVG, PGF and PDF.
# To save a figure to a file we can use the `savefig` method in the `Figure` class:
# In[17]:
fig.savefig("04-Visualization-Matplotlib-Pandas_01-Matplotlib_MatplotlibConceptsLecture_filename.png")
# Here we can also optionally specify the DPI and choose between different output formats:
# In[18]:
fig.savefig("04-Visualization-Matplotlib-Pandas_01-Matplotlib_MatplotlibConceptsLecture_filename.png", dpi=200)
# ____
# ## Legends, labels and titles
# Now that we have covered the basics of how to create a figure canvas and add axes instances to the canvas, let's look at how decorate a figure with titles, axis labels, and legends.
# **Figure titles**
#
# A title can be added to each axis instance in a figure. To set the title, use the `set_title` method in the axes instance:
# In[19]:
ax.set_title("title");
# **Axis labels**
#
# Similarly, with the methods `set_xlabel` and `set_ylabel`, we can set the labels of the X and Y axes:
# In[20]:
ax.set_xlabel("x")
ax.set_ylabel("y");
# ### Legends
# You can use the **label="label text"** keyword argument when plots or other objects are added to the figure, and then using the **legend** method without arguments to add the legend to the figure:
# In[21]:
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(x, x ** 2, label="x**2")
ax.plot(x, x ** 3, label="x**3")
ax.legend()
# Notice how are legend overlaps some of the actual plot!
#
# The **legend** function takes an optional keyword argument **loc** that can be used to specify where in the figure the legend is to be drawn. The allowed values of **loc** are numerical codes for the various places the legend can be drawn. See the [documentation page](http://matplotlib.org/users/legend_guide.html#legend-location) for details. Some of the most common **loc** values are:
# In[22]:
# Lots of options....
ax.legend(loc=1) # upper right corner
ax.legend(loc=2) # upper left corner
ax.legend(loc=3) # lower left corner
ax.legend(loc=4) # lower right corner
# .. many more options are available
# Most common to choose
ax.legend(loc=0) # let matplotlib decide the optimal location
fig
# ## Setting colors, linewidths, linetypes
#
# Matplotlib gives you *a lot* of options for customizing colors, linewidths, and linetypes.
#
# There is the basic MATLAB like syntax (which I would suggest you avoid using for more clairty sake:
# ### Colors with MatLab like syntax
# With matplotlib, we can define the colors of lines and other graphical elements in a number of ways. First of all, we can use the MATLAB-like syntax where `'b'` means blue, `'g'` means green, etc. The MATLAB API for selecting line styles are also supported: where, for example, 'b.-' means a blue line with dots:
# In[23]:
# MATLAB style line color and style
fig, ax = plt.subplots()
ax.plot(x, x ** 2, 'b.-') # blue line with dots
ax.plot(x, x ** 3, 'g--') # green dashed line
# ### Colors with the color= parameter
# We can also define colors by their names or RGB hex codes and optionally provide an alpha value using the `color` and `alpha` keyword arguments. Alpha indicates opacity.
# In[24]:
fig, ax = plt.subplots()
ax.plot(x, x + 1, color="blue", alpha=0.5) # half-transparant
ax.plot(x, x + 2, color="#8B008B") # RGB hex code
ax.plot(x, x + 3, color="#FF8C00") # RGB hex code
# ### Line and marker styles
# To change the line width, we can use the `linewidth` or `lw` keyword argument. The line style can be selected using the `linestyle` or `ls` keyword arguments:
# In[25]:
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(x, x + 1, color="red", linewidth=0.25)
ax.plot(x, x + 2, color="red", linewidth=0.50)
ax.plot(x, x + 3, color="red", linewidth=1.00)
ax.plot(x, x + 4, color="red", linewidth=2.00)
# possible linestype options ‘-‘, ‘–’, ‘-.’, ‘:’, ‘steps’
ax.plot(x, x + 5, color="green", lw=3, linestyle='-')
ax.plot(x, x + 6, color="green", lw=3, ls='-.')
ax.plot(x, x + 7, color="green", lw=3, ls=':')
# custom dash
line, = ax.plot(x, x + 8, color="black", lw=1.50)
line.set_dashes([5, 10, 15, 10]) # format: line length, space length, ...
# possible marker symbols: marker = '+', 'o', '*', 's', ',', '.', '1', '2', '3', '4', ...
ax.plot(x, x + 9, color="blue", lw=3, ls='-', marker='+')
ax.plot(x, x + 10, color="blue", lw=3, ls='--', marker='o')
ax.plot(x, x + 11, color="blue", lw=3, ls='-', marker='s')
ax.plot(x, x + 12, color="blue", lw=3, ls='--', marker='1')
# marker size and color
ax.plot(x, x + 13, color="purple", lw=1, ls='-', marker='o', markersize=2)
ax.plot(x, x + 14, color="purple", lw=1, ls='-', marker='o', markersize=4)
ax.plot(x, x + 15, color="purple", lw=1, ls='-', marker='o', markersize=8, markerfacecolor="red")
ax.plot(x, x + 16, color="purple", lw=1, ls='-', marker='s', markersize=8,
markerfacecolor="yellow", markeredgewidth=3, markeredgecolor="green");
# ### Control over axis appearance
# In this section we will look at controlling axis sizing properties in a matplotlib figure.
# ## Plot range
# We can configure the ranges of the axes using the `set_ylim` and `set_xlim` methods in the axis object, or `axis('tight')` for automatically getting "tightly fitted" axes ranges:
# In[26]:
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
axes[0].plot(x, x ** 2, x, x ** 3)
axes[0].set_title("default axes ranges")
axes[1].plot(x, x ** 2, x, x ** 3)
axes[1].axis('tight')
axes[1].set_title("tight axes")
axes[2].plot(x, x ** 2, x, x ** 3)
axes[2].set_ylim([0, 60])
axes[2].set_xlim([2, 5])
axes[2].set_title("custom axes range");
# # Special Plot Types
#
# There are many specialized plots we can create, such as barplots, histograms, scatter plots, and much more. Most of these type of plots we will actually create using pandas. But here are a few examples of these type of plots:
# In[27]:
plt.scatter(x, y)
# In[28]:
from random import sample
data = sample(range(1, 1000), 100)
plt.hist(data)
# In[29]:
data = [np.random.normal(0, std, 100) for std in range(1, 4)]
# rectangular box plot
plt.boxplot(data, vert=True, patch_artist=True);
# ## Further reading
# * http://www.matplotlib.org - The project web page for matplotlib.
# * https://github.com/matplotlib/matplotlib - The source code for matplotlib.
# * http://matplotlib.org/gallery.html - A large gallery showcaseing various types of plots matplotlib can create. Highly recommended!
# * http://www.loria.fr/~rougier/teaching/matplotlib - A good matplotlib tutorial.
# * http://scipy-lectures.github.io/matplotlib/matplotlib.html - Another good matplotlib reference.
#
| #!/usr/bin/env python
# coding: utf-8
# # Matplotlib Overview Lecture
# ## Introduction
# Matplotlib is the "grandfather" library of data visualization with Python. It was created by <NAME>. He created it to try to replicate MatLab's (another programming language) plotting capabilities in Python. So if you happen to be familiar with matlab, matplotlib will feel natural to you.
#
# It is an excellent 2D and 3D graphics library for generating scientific figures.
#
# Some of the major Pros of Matplotlib are:
#
# * Generally easy to get started for simple plots
# * Support for custom labels and texts
# * Great control of every element in a figure
# * High-quality output in many formats
# * Very customizable in general
#
# Matplotlib allows you to create reproducible figures programmatically. Let's learn how to use it! Before continuing this lecture, I encourage you just to explore the official Matplotlib web page: http://matplotlib.org/
# ## Installation
# You'll need to install matplotlib first with either:
# conda install matplotlib
# pip install matplotlib
# ## Importing
# Import the `matplotlib.pyplot` module under the name `plt` (the tidy way):
import matplotlib.pyplot as plt
# You'll also need to use this line to see plots in the notebook:
get_ipython().run_line_magic('matplotlib', 'inline')
# That line is only for jupyter notebooks, if you are using another editor, you'll use: **plt.show()** at the end of all your plotting commands to have the figure pop up in another window.
# # Basic Example
# Let's walk through a very simple example using two numpy arrays:
# ### Example
# Let's walk through a very simple example using two numpy arrays. You can also use lists, but most likely you'll be passing numpy arrays or pandas columns (which essentially also behave like arrays).
# ** The data we want to plot:**
import numpy as np
x = np.linspace(0, 5, 11)
y = x ** 2
# In[4]:
x
# In[5]:
y
# ## Basic Matplotlib Commands
#
# We can create a very simple line plot using the following ( I encourage you to pause and use Shift+Tab along the way to check out the document strings for the functions we are using).
# In[6]:
plt.plot(x, y, 'r') # 'r' is the color red
plt.xlabel('X Axis Title Here')
plt.ylabel('Y Axis Title Here')
plt.title('String Title Here')
plt.show()
# ## Creating Multiplots on Same Canvas
# In[7]:
# plt.subplot(nrows, ncols, plot_number)
plt.subplot(1, 2, 1)
plt.plot(x, y, 'r--') # More on color options later
plt.subplot(1, 2, 2)
plt.plot(y, x, 'g*-');
# ___
# # Matplotlib Object Oriented Method
# Now that we've seen the basics, let's break it all down with a more formal introduction of Matplotlib's Object Oriented API. This means we will instantiate figure objects and then call methods or attributes from that object.
# ## Introduction to the Object Oriented Method
# The main idea in using the more formal Object Oriented method is to create figure objects and then just call methods or attributes off of that object. This approach is nicer when dealing with a canvas that has multiple plots on it.
#
# To begin we create a figure instance. Then we can add axes to that figure:
# In[8]:
# Create Figure (empty canvas)
fig = plt.figure()
# Add set of axes to figure
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1)
# Plot on that set of axes
axes.plot(x, y, 'b')
axes.set_xlabel('Set X Label') # Notice the use of set_ to begin methods
axes.set_ylabel('Set y Label')
axes.set_title('Set Title')
# Code is a little more complicated, but the advantage is that we now have full control of where the plot axes are placed, and we can easily add more than one axis to the figure:
# In[9]:
# Creates blank canvas
fig = plt.figure()
axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes
axes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3]) # inset axes
# Larger Figure Axes 1
axes1.plot(x, y, 'b')
axes1.set_xlabel('X_label_axes2')
axes1.set_ylabel('Y_label_axes2')
axes1.set_title('Axes 2 Title')
# Insert Figure Axes 2
axes2.plot(y, x, 'r')
axes2.set_xlabel('X_label_axes2')
axes2.set_ylabel('Y_label_axes2')
axes2.set_title('Axes 2 Title');
# ## subplots()
#
# The plt.subplots() object will act as a more automatic axis manager.
#
# Basic use cases:
# In[10]:
# Use similar to plt.figure() except use tuple unpacking to grab fig and axes
fig, axes = plt.subplots()
# Now use the axes object to add stuff to plot
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
# Then you can specify the number of rows and columns when creating the subplots() object:
# In[11]:
# Empty canvas of 1 by 2 subplots
fig, axes = plt.subplots(nrows=1, ncols=2)
# In[12]:
# Axes is an array of axes to plot on
axes
# We can iterate through this array:
# In[13]:
for ax in axes:
ax.plot(x, y, 'b')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
# Display the figure object
fig
# A common issue with matplolib is overlapping subplots or figures. We ca use **fig.tight_layout()** or **plt.tight_layout()** method, which automatically adjusts the positions of the axes on the figure canvas so that there is no overlapping content:
# In[14]:
fig, axes = plt.subplots(nrows=1, ncols=2)
for ax in axes:
ax.plot(x, y, 'g')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
fig
plt.tight_layout()
# ### Figure size, aspect ratio and DPI
# Matplotlib allows the aspect ratio, DPI and figure size to be specified when the Figure object is created. You can use the `figsize` and `dpi` keyword arguments.
# * `figsize` is a tuple of the width and height of the figure in inches
# * `dpi` is the dots-per-inch (pixel per inch).
#
# For example:
# In[15]:
fig = plt.figure(figsize=(8, 4), dpi=100)
# The same arguments can also be passed to layout managers, such as the `subplots` function:
# In[16]:
fig, axes = plt.subplots(figsize=(12, 3))
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
# ## Saving figures
# Matplotlib can generate high-quality output in a number formats, including PNG, JPG, EPS, SVG, PGF and PDF.
# To save a figure to a file we can use the `savefig` method in the `Figure` class:
# In[17]:
fig.savefig("04-Visualization-Matplotlib-Pandas_01-Matplotlib_MatplotlibConceptsLecture_filename.png")
# Here we can also optionally specify the DPI and choose between different output formats:
# In[18]:
fig.savefig("04-Visualization-Matplotlib-Pandas_01-Matplotlib_MatplotlibConceptsLecture_filename.png", dpi=200)
# ____
# ## Legends, labels and titles
# Now that we have covered the basics of how to create a figure canvas and add axes instances to the canvas, let's look at how decorate a figure with titles, axis labels, and legends.
# **Figure titles**
#
# A title can be added to each axis instance in a figure. To set the title, use the `set_title` method in the axes instance:
# In[19]:
ax.set_title("title");
# **Axis labels**
#
# Similarly, with the methods `set_xlabel` and `set_ylabel`, we can set the labels of the X and Y axes:
# In[20]:
ax.set_xlabel("x")
ax.set_ylabel("y");
# ### Legends
# You can use the **label="label text"** keyword argument when plots or other objects are added to the figure, and then using the **legend** method without arguments to add the legend to the figure:
# In[21]:
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(x, x ** 2, label="x**2")
ax.plot(x, x ** 3, label="x**3")
ax.legend()
# Notice how are legend overlaps some of the actual plot!
#
# The **legend** function takes an optional keyword argument **loc** that can be used to specify where in the figure the legend is to be drawn. The allowed values of **loc** are numerical codes for the various places the legend can be drawn. See the [documentation page](http://matplotlib.org/users/legend_guide.html#legend-location) for details. Some of the most common **loc** values are:
# In[22]:
# Lots of options....
ax.legend(loc=1) # upper right corner
ax.legend(loc=2) # upper left corner
ax.legend(loc=3) # lower left corner
ax.legend(loc=4) # lower right corner
# .. many more options are available
# Most common to choose
ax.legend(loc=0) # let matplotlib decide the optimal location
fig
# ## Setting colors, linewidths, linetypes
#
# Matplotlib gives you *a lot* of options for customizing colors, linewidths, and linetypes.
#
# There is the basic MATLAB like syntax (which I would suggest you avoid using for more clairty sake:
# ### Colors with MatLab like syntax
# With matplotlib, we can define the colors of lines and other graphical elements in a number of ways. First of all, we can use the MATLAB-like syntax where `'b'` means blue, `'g'` means green, etc. The MATLAB API for selecting line styles are also supported: where, for example, 'b.-' means a blue line with dots:
# In[23]:
# MATLAB style line color and style
fig, ax = plt.subplots()
ax.plot(x, x ** 2, 'b.-') # blue line with dots
ax.plot(x, x ** 3, 'g--') # green dashed line
# ### Colors with the color= parameter
# We can also define colors by their names or RGB hex codes and optionally provide an alpha value using the `color` and `alpha` keyword arguments. Alpha indicates opacity.
# In[24]:
fig, ax = plt.subplots()
ax.plot(x, x + 1, color="blue", alpha=0.5) # half-transparant
ax.plot(x, x + 2, color="#8B008B") # RGB hex code
ax.plot(x, x + 3, color="#FF8C00") # RGB hex code
# ### Line and marker styles
# To change the line width, we can use the `linewidth` or `lw` keyword argument. The line style can be selected using the `linestyle` or `ls` keyword arguments:
# In[25]:
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(x, x + 1, color="red", linewidth=0.25)
ax.plot(x, x + 2, color="red", linewidth=0.50)
ax.plot(x, x + 3, color="red", linewidth=1.00)
ax.plot(x, x + 4, color="red", linewidth=2.00)
# possible linestype options ‘-‘, ‘–’, ‘-.’, ‘:’, ‘steps’
ax.plot(x, x + 5, color="green", lw=3, linestyle='-')
ax.plot(x, x + 6, color="green", lw=3, ls='-.')
ax.plot(x, x + 7, color="green", lw=3, ls=':')
# custom dash
line, = ax.plot(x, x + 8, color="black", lw=1.50)
line.set_dashes([5, 10, 15, 10]) # format: line length, space length, ...
# possible marker symbols: marker = '+', 'o', '*', 's', ',', '.', '1', '2', '3', '4', ...
ax.plot(x, x + 9, color="blue", lw=3, ls='-', marker='+')
ax.plot(x, x + 10, color="blue", lw=3, ls='--', marker='o')
ax.plot(x, x + 11, color="blue", lw=3, ls='-', marker='s')
ax.plot(x, x + 12, color="blue", lw=3, ls='--', marker='1')
# marker size and color
ax.plot(x, x + 13, color="purple", lw=1, ls='-', marker='o', markersize=2)
ax.plot(x, x + 14, color="purple", lw=1, ls='-', marker='o', markersize=4)
ax.plot(x, x + 15, color="purple", lw=1, ls='-', marker='o', markersize=8, markerfacecolor="red")
ax.plot(x, x + 16, color="purple", lw=1, ls='-', marker='s', markersize=8,
markerfacecolor="yellow", markeredgewidth=3, markeredgecolor="green");
# ### Control over axis appearance
# In this section we will look at controlling axis sizing properties in a matplotlib figure.
# ## Plot range
# We can configure the ranges of the axes using the `set_ylim` and `set_xlim` methods in the axis object, or `axis('tight')` for automatically getting "tightly fitted" axes ranges:
# In[26]:
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
axes[0].plot(x, x ** 2, x, x ** 3)
axes[0].set_title("default axes ranges")
axes[1].plot(x, x ** 2, x, x ** 3)
axes[1].axis('tight')
axes[1].set_title("tight axes")
axes[2].plot(x, x ** 2, x, x ** 3)
axes[2].set_ylim([0, 60])
axes[2].set_xlim([2, 5])
axes[2].set_title("custom axes range");
# # Special Plot Types
#
# There are many specialized plots we can create, such as barplots, histograms, scatter plots, and much more. Most of these type of plots we will actually create using pandas. But here are a few examples of these type of plots:
# In[27]:
plt.scatter(x, y)
# In[28]:
from random import sample
data = sample(range(1, 1000), 100)
plt.hist(data)
# In[29]:
data = [np.random.normal(0, std, 100) for std in range(1, 4)]
# rectangular box plot
plt.boxplot(data, vert=True, patch_artist=True);
# ## Further reading
# * http://www.matplotlib.org - The project web page for matplotlib.
# * https://github.com/matplotlib/matplotlib - The source code for matplotlib.
# * http://matplotlib.org/gallery.html - A large gallery showcaseing various types of plots matplotlib can create. Highly recommended!
# * http://www.loria.fr/~rougier/teaching/matplotlib - A good matplotlib tutorial.
# * http://scipy-lectures.github.io/matplotlib/matplotlib.html - Another good matplotlib reference.
#
| en | 0.763873 | #!/usr/bin/env python # coding: utf-8 # # Matplotlib Overview Lecture # ## Introduction # Matplotlib is the "grandfather" library of data visualization with Python. It was created by <NAME>. He created it to try to replicate MatLab's (another programming language) plotting capabilities in Python. So if you happen to be familiar with matlab, matplotlib will feel natural to you. # # It is an excellent 2D and 3D graphics library for generating scientific figures. # # Some of the major Pros of Matplotlib are: # # * Generally easy to get started for simple plots # * Support for custom labels and texts # * Great control of every element in a figure # * High-quality output in many formats # * Very customizable in general # # Matplotlib allows you to create reproducible figures programmatically. Let's learn how to use it! Before continuing this lecture, I encourage you just to explore the official Matplotlib web page: http://matplotlib.org/ # ## Installation # You'll need to install matplotlib first with either: # conda install matplotlib # pip install matplotlib # ## Importing # Import the `matplotlib.pyplot` module under the name `plt` (the tidy way): # You'll also need to use this line to see plots in the notebook: # That line is only for jupyter notebooks, if you are using another editor, you'll use: **plt.show()** at the end of all your plotting commands to have the figure pop up in another window. # # Basic Example # Let's walk through a very simple example using two numpy arrays: # ### Example # Let's walk through a very simple example using two numpy arrays. You can also use lists, but most likely you'll be passing numpy arrays or pandas columns (which essentially also behave like arrays). # ** The data we want to plot:** # In[4]: # In[5]: # ## Basic Matplotlib Commands # # We can create a very simple line plot using the following ( I encourage you to pause and use Shift+Tab along the way to check out the document strings for the functions we are using). # In[6]: # 'r' is the color red # ## Creating Multiplots on Same Canvas # In[7]: # plt.subplot(nrows, ncols, plot_number) # More on color options later # ___ # # Matplotlib Object Oriented Method # Now that we've seen the basics, let's break it all down with a more formal introduction of Matplotlib's Object Oriented API. This means we will instantiate figure objects and then call methods or attributes from that object. # ## Introduction to the Object Oriented Method # The main idea in using the more formal Object Oriented method is to create figure objects and then just call methods or attributes off of that object. This approach is nicer when dealing with a canvas that has multiple plots on it. # # To begin we create a figure instance. Then we can add axes to that figure: # In[8]: # Create Figure (empty canvas) # Add set of axes to figure # left, bottom, width, height (range 0 to 1) # Plot on that set of axes # Notice the use of set_ to begin methods # Code is a little more complicated, but the advantage is that we now have full control of where the plot axes are placed, and we can easily add more than one axis to the figure: # In[9]: # Creates blank canvas # main axes # inset axes # Larger Figure Axes 1 # Insert Figure Axes 2 # ## subplots() # # The plt.subplots() object will act as a more automatic axis manager. # # Basic use cases: # In[10]: # Use similar to plt.figure() except use tuple unpacking to grab fig and axes # Now use the axes object to add stuff to plot # Then you can specify the number of rows and columns when creating the subplots() object: # In[11]: # Empty canvas of 1 by 2 subplots # In[12]: # Axes is an array of axes to plot on # We can iterate through this array: # In[13]: # Display the figure object # A common issue with matplolib is overlapping subplots or figures. We ca use **fig.tight_layout()** or **plt.tight_layout()** method, which automatically adjusts the positions of the axes on the figure canvas so that there is no overlapping content: # In[14]: # ### Figure size, aspect ratio and DPI # Matplotlib allows the aspect ratio, DPI and figure size to be specified when the Figure object is created. You can use the `figsize` and `dpi` keyword arguments. # * `figsize` is a tuple of the width and height of the figure in inches # * `dpi` is the dots-per-inch (pixel per inch). # # For example: # In[15]: # The same arguments can also be passed to layout managers, such as the `subplots` function: # In[16]: # ## Saving figures # Matplotlib can generate high-quality output in a number formats, including PNG, JPG, EPS, SVG, PGF and PDF. # To save a figure to a file we can use the `savefig` method in the `Figure` class: # In[17]: # Here we can also optionally specify the DPI and choose between different output formats: # In[18]: # ____ # ## Legends, labels and titles # Now that we have covered the basics of how to create a figure canvas and add axes instances to the canvas, let's look at how decorate a figure with titles, axis labels, and legends. # **Figure titles** # # A title can be added to each axis instance in a figure. To set the title, use the `set_title` method in the axes instance: # In[19]: # **Axis labels** # # Similarly, with the methods `set_xlabel` and `set_ylabel`, we can set the labels of the X and Y axes: # In[20]: # ### Legends # You can use the **label="label text"** keyword argument when plots or other objects are added to the figure, and then using the **legend** method without arguments to add the legend to the figure: # In[21]: # Notice how are legend overlaps some of the actual plot! # # The **legend** function takes an optional keyword argument **loc** that can be used to specify where in the figure the legend is to be drawn. The allowed values of **loc** are numerical codes for the various places the legend can be drawn. See the [documentation page](http://matplotlib.org/users/legend_guide.html#legend-location) for details. Some of the most common **loc** values are: # In[22]: # Lots of options.... # upper right corner # upper left corner # lower left corner # lower right corner # .. many more options are available # Most common to choose # let matplotlib decide the optimal location # ## Setting colors, linewidths, linetypes # # Matplotlib gives you *a lot* of options for customizing colors, linewidths, and linetypes. # # There is the basic MATLAB like syntax (which I would suggest you avoid using for more clairty sake: # ### Colors with MatLab like syntax # With matplotlib, we can define the colors of lines and other graphical elements in a number of ways. First of all, we can use the MATLAB-like syntax where `'b'` means blue, `'g'` means green, etc. The MATLAB API for selecting line styles are also supported: where, for example, 'b.-' means a blue line with dots: # In[23]: # MATLAB style line color and style # blue line with dots # green dashed line # ### Colors with the color= parameter # We can also define colors by their names or RGB hex codes and optionally provide an alpha value using the `color` and `alpha` keyword arguments. Alpha indicates opacity. # In[24]: # half-transparant # RGB hex code # RGB hex code # ### Line and marker styles # To change the line width, we can use the `linewidth` or `lw` keyword argument. The line style can be selected using the `linestyle` or `ls` keyword arguments: # In[25]: # possible linestype options ‘-‘, ‘–’, ‘-.’, ‘:’, ‘steps’ # custom dash # format: line length, space length, ... # possible marker symbols: marker = '+', 'o', '*', 's', ',', '.', '1', '2', '3', '4', ... # marker size and color # ### Control over axis appearance # In this section we will look at controlling axis sizing properties in a matplotlib figure. # ## Plot range # We can configure the ranges of the axes using the `set_ylim` and `set_xlim` methods in the axis object, or `axis('tight')` for automatically getting "tightly fitted" axes ranges: # In[26]: # # Special Plot Types # # There are many specialized plots we can create, such as barplots, histograms, scatter plots, and much more. Most of these type of plots we will actually create using pandas. But here are a few examples of these type of plots: # In[27]: # In[28]: # In[29]: # rectangular box plot # ## Further reading # * http://www.matplotlib.org - The project web page for matplotlib. # * https://github.com/matplotlib/matplotlib - The source code for matplotlib. # * http://matplotlib.org/gallery.html - A large gallery showcaseing various types of plots matplotlib can create. Highly recommended! # * http://www.loria.fr/~rougier/teaching/matplotlib - A good matplotlib tutorial. # * http://scipy-lectures.github.io/matplotlib/matplotlib.html - Another good matplotlib reference. # | 4.410585 | 4 |
data_replication/__init__.py | icmanage/django-data-replication | 1 | 6616265 | <reponame>icmanage/django-data-replication
# -*- coding: utf-8 -*-
"""__init__.py: Django data_replication package container"""
from __future__ import unicode_literals
from __future__ import print_function
__name__ = "data_replication"
__author__ = '<NAME>'
__version_info__ = (1, 1, 5)
__version__ = '.'.join(map(str, __version_info__))
__date__ = '9/21/17 07:55'
__copyright__ = 'Copyright 2017 IC Manage. All rights reserved.'
__credits__ = ['<NAME>', ]
__license__ = 'See the file LICENSE.txt for licensing information.'
| # -*- coding: utf-8 -*-
"""__init__.py: Django data_replication package container"""
from __future__ import unicode_literals
from __future__ import print_function
__name__ = "data_replication"
__author__ = '<NAME>'
__version_info__ = (1, 1, 5)
__version__ = '.'.join(map(str, __version_info__))
__date__ = '9/21/17 07:55'
__copyright__ = 'Copyright 2017 IC Manage. All rights reserved.'
__credits__ = ['<NAME>', ]
__license__ = 'See the file LICENSE.txt for licensing information.' | en | 0.448533 | # -*- coding: utf-8 -*- __init__.py: Django data_replication package container | 1.350908 | 1 |
email_send.py | jun168/api_auto_test | 10 | 6616266 | <filename>email_send.py
#!/usr/bin/env python
# encoding: utf-8
"""
@project:artist_api_test
@author:cloudy
@site:
@file:email_send.py
@date:2018/1/22 17:24
@description:邮件发送
"""
import yaml
import smtplib
import os
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class EmailReport(object):
"""
邮件发送
"""
def __init__(self, email_config):
self.email_host = email_config.get("host")
self.email_user = email_config.get("user")
self.email_passwd = email_config.get("passwd")
self.email_receivers = email_config.get("receivers")
def send_report(self, total_info, file_name):
"""
发送邮件
:param report_file:
:return:
"""
if not self.email_receivers:
print "没有邮件接收列表,忽略邮件发送"
return
# 创建一个带附件的实例
msg = MIMEMultipart("related")
# 添加邮件主体内容
msg_body = MIMEText("""
<font color=red>Artist接口自动化测试结果({})</font>
<table >
<thead>
<tr><td>用例总数</td><td>{}</td><tr>
</thead>
<tbody>
<tr><td>成功用例</td><td>{}</td></tr>
<tr><td>失败用例</td><td>{}</td></tr>
<tr><td>跳过用例</td><td>{}</td></tr>
</tbody>
</table>
""".format(file_name, sum([total_info['success_count'], total_info['failure_count'], total_info['skip_count']]),total_info['success_count'], total_info['failure_count'], total_info['skip_count']), "html", "utf-8")
msg.attach(msg_body)
# 构造附件
attach = MIMEText(open(os.path.join(os.getcwd(), "local_report", "{}.html".format(file_name)), 'rb').read(), 'base64', 'gb2312')
attach["Content-Type"] = 'application/octet-stream'
attach["Content-Disposition"] = 'attachment; filename="{}.html"'.format(file_name)
msg.attach(attach)
# 加邮件头
msg['to'] = ";".join(self.email_receivers)
msg['from'] = self.email_user
msg['subject'] = u'Artist接口自动化测试报告({})'.format(file_name)
# 发送邮件
try:
smtpObj = smtplib.SMTP_SSL(self.email_host, 465)
smtpObj.login(self.email_user, self.email_passwd)
msg_str = msg.as_string()
smtpObj.sendmail(self.email_user, self.email_receivers, msg_str)
smtpObj.quit()
print u"邮件发送成功"
except smtplib.SMTPException, e:
print e
| <filename>email_send.py
#!/usr/bin/env python
# encoding: utf-8
"""
@project:artist_api_test
@author:cloudy
@site:
@file:email_send.py
@date:2018/1/22 17:24
@description:邮件发送
"""
import yaml
import smtplib
import os
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class EmailReport(object):
"""
邮件发送
"""
def __init__(self, email_config):
self.email_host = email_config.get("host")
self.email_user = email_config.get("user")
self.email_passwd = email_config.get("passwd")
self.email_receivers = email_config.get("receivers")
def send_report(self, total_info, file_name):
"""
发送邮件
:param report_file:
:return:
"""
if not self.email_receivers:
print "没有邮件接收列表,忽略邮件发送"
return
# 创建一个带附件的实例
msg = MIMEMultipart("related")
# 添加邮件主体内容
msg_body = MIMEText("""
<font color=red>Artist接口自动化测试结果({})</font>
<table >
<thead>
<tr><td>用例总数</td><td>{}</td><tr>
</thead>
<tbody>
<tr><td>成功用例</td><td>{}</td></tr>
<tr><td>失败用例</td><td>{}</td></tr>
<tr><td>跳过用例</td><td>{}</td></tr>
</tbody>
</table>
""".format(file_name, sum([total_info['success_count'], total_info['failure_count'], total_info['skip_count']]),total_info['success_count'], total_info['failure_count'], total_info['skip_count']), "html", "utf-8")
msg.attach(msg_body)
# 构造附件
attach = MIMEText(open(os.path.join(os.getcwd(), "local_report", "{}.html".format(file_name)), 'rb').read(), 'base64', 'gb2312')
attach["Content-Type"] = 'application/octet-stream'
attach["Content-Disposition"] = 'attachment; filename="{}.html"'.format(file_name)
msg.attach(attach)
# 加邮件头
msg['to'] = ";".join(self.email_receivers)
msg['from'] = self.email_user
msg['subject'] = u'Artist接口自动化测试报告({})'.format(file_name)
# 发送邮件
try:
smtpObj = smtplib.SMTP_SSL(self.email_host, 465)
smtpObj.login(self.email_user, self.email_passwd)
msg_str = msg.as_string()
smtpObj.sendmail(self.email_user, self.email_receivers, msg_str)
smtpObj.quit()
print u"邮件发送成功"
except smtplib.SMTPException, e:
print e
| zh | 0.349359 | #!/usr/bin/env python # encoding: utf-8 @project:artist_api_test @author:cloudy @site: @file:email_send.py @date:2018/1/22 17:24 @description:邮件发送 邮件发送 发送邮件 :param report_file: :return: # 创建一个带附件的实例 # 添加邮件主体内容 <font color=red>Artist接口自动化测试结果({})</font> <table > <thead> <tr><td>用例总数</td><td>{}</td><tr> </thead> <tbody> <tr><td>成功用例</td><td>{}</td></tr> <tr><td>失败用例</td><td>{}</td></tr> <tr><td>跳过用例</td><td>{}</td></tr> </tbody> </table> # 构造附件 # 加邮件头 # 发送邮件 | 2.582815 | 3 |
gybe/kubernetes/io/k8s/apimachinery/pkg/__init__.py | peterth3/gybe | 2 | 6616267 | <reponame>peterth3/gybe<filename>gybe/kubernetes/io/k8s/apimachinery/pkg/__init__.py
# generated by datamodel-codegen:
# filename: _definitions.json
# timestamp: 2021-01-20T05:35:16+00:00
| # generated by datamodel-codegen:
# filename: _definitions.json
# timestamp: 2021-01-20T05:35:16+00:00 | en | 0.52111 | # generated by datamodel-codegen: # filename: _definitions.json # timestamp: 2021-01-20T05:35:16+00:00 | 1.149302 | 1 |
Beta_version/Inception/InceptionClassifier.py | rexwangcc/RecoverGAN | 2 | 6616268 | # Batchable Classifier based on Inception and Tensorflow Slim
# @<NAME> @Yt Su
from models import dataset_utils
from models import imagenet
from models import inception_preprocessing
from models import inception_v4 as inception4
from models import inception_v3 as inception3
from models import inception_v2 as inception2
import numpy as np
import os
import tensorflow as tf
from urllib.request import urlopen
import urllib
import matplotlib.pyplot as plt
import glob
class inceptionv4_classifier(object):
def __init__(self, extension='.jpg', path_to_validate='to_validate/', checkpoints_dir='checkpoints/', keyword='cat', top_k=5, print_flag=False, save_result_to_file=True):
print('***Running Claasifier with [Inception-v4] core***')
self.slim = tf.contrib.slim
self.model_url = "http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz"
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
self.checkpoints_dir = checkpoints_dir
if not tf.gfile.Exists(checkpoints_dir + 'inception_v4_2016_09_09.tar.gz'):
dataset_utils.download_and_uncompress_tarball(
self.model_url, self.checkpoints_dir)
self.image_size = inception4.inception_v4.default_image_size
self.extension = extension
self.path_to_validate = path_to_validate
self.files = [filename for filename in glob.glob(
self.path_to_validate + '*' + self.extension)]
self.dim = len(self.files)
print('Total files to perform validation: ' + str(self.dim))
self.image_and_probabilities = []
self.keyword = keyword
self.print_flag = print_flag
self.top_k = top_k
self.accuracy = 0
self.save_result_to_file = save_result_to_file
def image_preprocessor(self, img):
img_str = urlopen('file:' + urllib.request.pathname2url(img)).read()
image = tf.image.decode_jpeg(img_str, channels=3)
processed_image = inception_preprocessing.preprocess_image(
image, self.image_size, self.image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# return a tuple of (tensor, tensor)
return image, processed_images
def main(self):
with tf.Graph().as_default():
self.processed_tensor_list = map(
self.image_preprocessor, self.files)
# Iterate over a map object
for tensor_tuple in self.processed_tensor_list:
# Create the model, use the default arg scope to configure the
# batch norm parameters.
with self.slim.arg_scope(inception4.inception_v4_arg_scope()):
logits, _ = inception4.inception_v4(
tensor_tuple[1], num_classes=1001, is_training=False)
# Append a tuple (image, probability)
self.image_and_probabilities.append(
(tensor_tuple[0], tf.nn.softmax(logits)))
self.init_fn = self.slim.assign_from_checkpoint_fn(
os.path.join(self.checkpoints_dir, 'inception_v4.ckpt'),
self.slim.get_model_variables('InceptionV4'))
with tf.Session() as sess:
self.init_fn(sess)
for idx in range(self.dim):
print('Classifying on image' + str(idx))
_, probabilities = sess.run([self.image_and_probabilities[idx][
0], self.image_and_probabilities[idx][1]])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(
enumerate(-probabilities), key=lambda x:x[1])]
names = imagenet.create_readable_names_for_imagenet_labels()
temp_array = []
for i in range(self.top_k):
index = sorted_inds[i]
temp_array.append(names[index])
if self.print_flag:
print('Probability %0.2f%% => [%s]' % (
probabilities[index], names[index]))
if any(self.keyword in s for s in temp_array):
self.accuracy += 1
print('Classification Accuracy ====> ' +
str(tf.divide(self.accuracy, self.dim)))
if self.save_result_to_file:
with open('Inception_v4_Results.txt', 'wb') as f:
f.write('Classification Accuracy\n')
f.write(str(tf.divide(self.accuracy, self.dim)))
class inceptionv3_classifier(object):
def __init__(self, extension='.jpg', path_to_validate='to_validate/', checkpoints_dir='checkpoints/', keyword='cat', top_k=5, print_flag=False, save_result_to_file=True):
print('***Running Claasifier with [Inception-v3] core***')
self.slim = tf.contrib.slim
self.model_url = "http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz"
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
self.checkpoints_dir = checkpoints_dir
if not tf.gfile.Exists(checkpoints_dir + 'inception_v3_2016_08_28.tar.gz'):
dataset_utils.download_and_uncompress_tarball(
self.model_url, self.checkpoints_dir)
self.image_size = inception3.inception_v3.default_image_size
self.extension = extension
self.path_to_validate = path_to_validate
self.files = [filename for filename in glob.glob(
self.path_to_validate + '*' + self.extension)]
self.dim = len(self.files)
print('Total files to perform validation: ' + str(self.dim))
self.image_and_probabilities = []
self.keyword = keyword
self.print_flag = print_flag
self.top_k = top_k
self.accuracy = 0
self.save_result_to_file = save_result_to_file
def image_preprocessor(self, img):
img_str = urlopen('file:' + urllib.request.pathname2url(img)).read()
image = tf.image.decode_jpeg(img_str, channels=3)
processed_image = inception_preprocessing.preprocess_image(
image, self.image_size, self.image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# return a tuple of (tensor, tensor)
return image, processed_images
def main(self):
with tf.Graph().as_default():
self.processed_tensor_list = map(
self.image_preprocessor, self.files)
# Iterate over a map object
for tensor_tuple in self.processed_tensor_list:
# Create the model, use the default arg scope to configure the
# batch norm parameters.
with self.slim.arg_scope(inception3.inception_v3_arg_scope()):
logits, _ = inception3.inception_v3(
tensor_tuple[1], num_classes=1001, is_training=False)
# Append a tuple (image, probability)
self.image_and_probabilities.append(
(tensor_tuple[0], tf.nn.softmax(logits)))
self.init_fn = self.slim.assign_from_checkpoint_fn(
os.path.join(self.checkpoints_dir, 'inception_v3.ckpt'),
self.slim.get_model_variables('InceptionV3'))
with tf.Session() as sess:
self.init_fn(sess)
for idx in range(self.dim):
print('Classifying on image' + str(idx))
_, probabilities = sess.run([self.image_and_probabilities[idx][
0], self.image_and_probabilities[idx][1]])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(
enumerate(-probabilities), key=lambda x:x[1])]
names = imagenet.create_readable_names_for_imagenet_labels()
temp_array = []
for i in range(self.top_k):
index = sorted_inds[i]
temp_array.append(names[index])
if self.print_flag:
print('Probability %0.2f%% => [%s]' % (
probabilities[index], names[index]))
if any(self.keyword in s for s in temp_array):
self.accuracy += 1
print('Classification Accuracy ====> ' +
str(tf.divide(self.accuracy, self.dim)))
if self.save_result_to_file:
with open('Inception_v3_Results.txt', 'wb') as f:
f.write('Classification Accuracy\n')
f.write(str(tf.divide(self.accuracy, self.dim)))
class inceptionv2_classifier(object):
def __init__(self, extension='.jpg', path_to_validate='to_validate/', checkpoints_dir='checkpoints/', keyword='cat', top_k=5, print_flag=False, save_result_to_file=True):
print('***Running Claasifier with [Inception-v2] core***')
self.slim = tf.contrib.slim
self.model_url = "http://download.tensorflow.org/models/inception_v2_2016_08_28.tar.gz"
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
self.checkpoints_dir = checkpoints_dir
if not tf.gfile.Exists(checkpoints_dir + 'inception_v2_2016_08_28.tar.gz'):
dataset_utils.download_and_uncompress_tarball(
self.model_url, self.checkpoints_dir)
self.image_size = inception2.inception_v2.default_image_size
self.extension = extension
self.path_to_validate = path_to_validate
self.files = [filename for filename in glob.glob(
self.path_to_validate + '*' + self.extension)]
self.dim = len(self.files)
print('Total files to perform validation: ' + str(self.dim))
self.image_and_probabilities = []
self.keyword = keyword
self.print_flag = print_flag
self.top_k = top_k
self.accuracy = 0
self.save_result_to_file = save_result_to_file
def image_preprocessor(self, img):
img_str = urlopen('file:' + urllib.request.pathname2url(img)).read()
image = tf.image.decode_jpeg(img_str, channels=3)
processed_image = inception_preprocessing.preprocess_image(
image, self.image_size, self.image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# return a tuple of (tensor, tensor)
return image, processed_images
def main(self):
with tf.Graph().as_default():
self.processed_tensor_list = map(
self.image_preprocessor, self.files)
# Iterate over a map object
for tensor_tuple in self.processed_tensor_list:
# Create the model, use the default arg scope to configure the
# batch norm parameters.
with self.slim.arg_scope(inception2.inception_v2_arg_scope()):
logits, _ = inception2.inception_v4(
tensor_tuple[1], num_classes=1001, is_training=False)
# Append a tuple (image, probability)
self.image_and_probabilities.append(
(tensor_tuple[0], tf.nn.softmax(logits)))
self.init_fn = self.slim.assign_from_checkpoint_fn(
os.path.join(self.checkpoints_dir, 'inception_v2.ckpt'),
self.slim.get_model_variables('InceptionV2'))
with tf.Session() as sess:
self.init_fn(sess)
for idx in range(self.dim):
print('Classifying on image' + str(idx))
_, probabilities = sess.run([self.image_and_probabilities[idx][
0], self.image_and_probabilities[idx][1]])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(
enumerate(-probabilities), key=lambda x:x[1])]
names = imagenet.create_readable_names_for_imagenet_labels()
temp_array = []
for i in range(self.top_k):
index = sorted_inds[i]
temp_array.append(names[index])
if self.print_flag:
print('Probability %0.2f%% => [%s]' % (
probabilities[index], names[index]))
if any(self.keyword in s for s in temp_array):
self.accuracy += 1
print('Classification Accuracy ====> ' +
str(tf.divide(self.accuracy, self.dim)))
if self.save_result_to_file:
with open('Inception_v2_Results.txt', 'wb') as f:
f.write('Classification Accuracy\n')
f.write(str(tf.divide(self.accuracy, self.dim)))
| # Batchable Classifier based on Inception and Tensorflow Slim
# @<NAME> @Yt Su
from models import dataset_utils
from models import imagenet
from models import inception_preprocessing
from models import inception_v4 as inception4
from models import inception_v3 as inception3
from models import inception_v2 as inception2
import numpy as np
import os
import tensorflow as tf
from urllib.request import urlopen
import urllib
import matplotlib.pyplot as plt
import glob
class inceptionv4_classifier(object):
def __init__(self, extension='.jpg', path_to_validate='to_validate/', checkpoints_dir='checkpoints/', keyword='cat', top_k=5, print_flag=False, save_result_to_file=True):
print('***Running Claasifier with [Inception-v4] core***')
self.slim = tf.contrib.slim
self.model_url = "http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz"
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
self.checkpoints_dir = checkpoints_dir
if not tf.gfile.Exists(checkpoints_dir + 'inception_v4_2016_09_09.tar.gz'):
dataset_utils.download_and_uncompress_tarball(
self.model_url, self.checkpoints_dir)
self.image_size = inception4.inception_v4.default_image_size
self.extension = extension
self.path_to_validate = path_to_validate
self.files = [filename for filename in glob.glob(
self.path_to_validate + '*' + self.extension)]
self.dim = len(self.files)
print('Total files to perform validation: ' + str(self.dim))
self.image_and_probabilities = []
self.keyword = keyword
self.print_flag = print_flag
self.top_k = top_k
self.accuracy = 0
self.save_result_to_file = save_result_to_file
def image_preprocessor(self, img):
img_str = urlopen('file:' + urllib.request.pathname2url(img)).read()
image = tf.image.decode_jpeg(img_str, channels=3)
processed_image = inception_preprocessing.preprocess_image(
image, self.image_size, self.image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# return a tuple of (tensor, tensor)
return image, processed_images
def main(self):
with tf.Graph().as_default():
self.processed_tensor_list = map(
self.image_preprocessor, self.files)
# Iterate over a map object
for tensor_tuple in self.processed_tensor_list:
# Create the model, use the default arg scope to configure the
# batch norm parameters.
with self.slim.arg_scope(inception4.inception_v4_arg_scope()):
logits, _ = inception4.inception_v4(
tensor_tuple[1], num_classes=1001, is_training=False)
# Append a tuple (image, probability)
self.image_and_probabilities.append(
(tensor_tuple[0], tf.nn.softmax(logits)))
self.init_fn = self.slim.assign_from_checkpoint_fn(
os.path.join(self.checkpoints_dir, 'inception_v4.ckpt'),
self.slim.get_model_variables('InceptionV4'))
with tf.Session() as sess:
self.init_fn(sess)
for idx in range(self.dim):
print('Classifying on image' + str(idx))
_, probabilities = sess.run([self.image_and_probabilities[idx][
0], self.image_and_probabilities[idx][1]])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(
enumerate(-probabilities), key=lambda x:x[1])]
names = imagenet.create_readable_names_for_imagenet_labels()
temp_array = []
for i in range(self.top_k):
index = sorted_inds[i]
temp_array.append(names[index])
if self.print_flag:
print('Probability %0.2f%% => [%s]' % (
probabilities[index], names[index]))
if any(self.keyword in s for s in temp_array):
self.accuracy += 1
print('Classification Accuracy ====> ' +
str(tf.divide(self.accuracy, self.dim)))
if self.save_result_to_file:
with open('Inception_v4_Results.txt', 'wb') as f:
f.write('Classification Accuracy\n')
f.write(str(tf.divide(self.accuracy, self.dim)))
class inceptionv3_classifier(object):
def __init__(self, extension='.jpg', path_to_validate='to_validate/', checkpoints_dir='checkpoints/', keyword='cat', top_k=5, print_flag=False, save_result_to_file=True):
print('***Running Claasifier with [Inception-v3] core***')
self.slim = tf.contrib.slim
self.model_url = "http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz"
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
self.checkpoints_dir = checkpoints_dir
if not tf.gfile.Exists(checkpoints_dir + 'inception_v3_2016_08_28.tar.gz'):
dataset_utils.download_and_uncompress_tarball(
self.model_url, self.checkpoints_dir)
self.image_size = inception3.inception_v3.default_image_size
self.extension = extension
self.path_to_validate = path_to_validate
self.files = [filename for filename in glob.glob(
self.path_to_validate + '*' + self.extension)]
self.dim = len(self.files)
print('Total files to perform validation: ' + str(self.dim))
self.image_and_probabilities = []
self.keyword = keyword
self.print_flag = print_flag
self.top_k = top_k
self.accuracy = 0
self.save_result_to_file = save_result_to_file
def image_preprocessor(self, img):
img_str = urlopen('file:' + urllib.request.pathname2url(img)).read()
image = tf.image.decode_jpeg(img_str, channels=3)
processed_image = inception_preprocessing.preprocess_image(
image, self.image_size, self.image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# return a tuple of (tensor, tensor)
return image, processed_images
def main(self):
with tf.Graph().as_default():
self.processed_tensor_list = map(
self.image_preprocessor, self.files)
# Iterate over a map object
for tensor_tuple in self.processed_tensor_list:
# Create the model, use the default arg scope to configure the
# batch norm parameters.
with self.slim.arg_scope(inception3.inception_v3_arg_scope()):
logits, _ = inception3.inception_v3(
tensor_tuple[1], num_classes=1001, is_training=False)
# Append a tuple (image, probability)
self.image_and_probabilities.append(
(tensor_tuple[0], tf.nn.softmax(logits)))
self.init_fn = self.slim.assign_from_checkpoint_fn(
os.path.join(self.checkpoints_dir, 'inception_v3.ckpt'),
self.slim.get_model_variables('InceptionV3'))
with tf.Session() as sess:
self.init_fn(sess)
for idx in range(self.dim):
print('Classifying on image' + str(idx))
_, probabilities = sess.run([self.image_and_probabilities[idx][
0], self.image_and_probabilities[idx][1]])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(
enumerate(-probabilities), key=lambda x:x[1])]
names = imagenet.create_readable_names_for_imagenet_labels()
temp_array = []
for i in range(self.top_k):
index = sorted_inds[i]
temp_array.append(names[index])
if self.print_flag:
print('Probability %0.2f%% => [%s]' % (
probabilities[index], names[index]))
if any(self.keyword in s for s in temp_array):
self.accuracy += 1
print('Classification Accuracy ====> ' +
str(tf.divide(self.accuracy, self.dim)))
if self.save_result_to_file:
with open('Inception_v3_Results.txt', 'wb') as f:
f.write('Classification Accuracy\n')
f.write(str(tf.divide(self.accuracy, self.dim)))
class inceptionv2_classifier(object):
def __init__(self, extension='.jpg', path_to_validate='to_validate/', checkpoints_dir='checkpoints/', keyword='cat', top_k=5, print_flag=False, save_result_to_file=True):
print('***Running Claasifier with [Inception-v2] core***')
self.slim = tf.contrib.slim
self.model_url = "http://download.tensorflow.org/models/inception_v2_2016_08_28.tar.gz"
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
self.checkpoints_dir = checkpoints_dir
if not tf.gfile.Exists(checkpoints_dir + 'inception_v2_2016_08_28.tar.gz'):
dataset_utils.download_and_uncompress_tarball(
self.model_url, self.checkpoints_dir)
self.image_size = inception2.inception_v2.default_image_size
self.extension = extension
self.path_to_validate = path_to_validate
self.files = [filename for filename in glob.glob(
self.path_to_validate + '*' + self.extension)]
self.dim = len(self.files)
print('Total files to perform validation: ' + str(self.dim))
self.image_and_probabilities = []
self.keyword = keyword
self.print_flag = print_flag
self.top_k = top_k
self.accuracy = 0
self.save_result_to_file = save_result_to_file
def image_preprocessor(self, img):
img_str = urlopen('file:' + urllib.request.pathname2url(img)).read()
image = tf.image.decode_jpeg(img_str, channels=3)
processed_image = inception_preprocessing.preprocess_image(
image, self.image_size, self.image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# return a tuple of (tensor, tensor)
return image, processed_images
def main(self):
with tf.Graph().as_default():
self.processed_tensor_list = map(
self.image_preprocessor, self.files)
# Iterate over a map object
for tensor_tuple in self.processed_tensor_list:
# Create the model, use the default arg scope to configure the
# batch norm parameters.
with self.slim.arg_scope(inception2.inception_v2_arg_scope()):
logits, _ = inception2.inception_v4(
tensor_tuple[1], num_classes=1001, is_training=False)
# Append a tuple (image, probability)
self.image_and_probabilities.append(
(tensor_tuple[0], tf.nn.softmax(logits)))
self.init_fn = self.slim.assign_from_checkpoint_fn(
os.path.join(self.checkpoints_dir, 'inception_v2.ckpt'),
self.slim.get_model_variables('InceptionV2'))
with tf.Session() as sess:
self.init_fn(sess)
for idx in range(self.dim):
print('Classifying on image' + str(idx))
_, probabilities = sess.run([self.image_and_probabilities[idx][
0], self.image_and_probabilities[idx][1]])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(
enumerate(-probabilities), key=lambda x:x[1])]
names = imagenet.create_readable_names_for_imagenet_labels()
temp_array = []
for i in range(self.top_k):
index = sorted_inds[i]
temp_array.append(names[index])
if self.print_flag:
print('Probability %0.2f%% => [%s]' % (
probabilities[index], names[index]))
if any(self.keyword in s for s in temp_array):
self.accuracy += 1
print('Classification Accuracy ====> ' +
str(tf.divide(self.accuracy, self.dim)))
if self.save_result_to_file:
with open('Inception_v2_Results.txt', 'wb') as f:
f.write('Classification Accuracy\n')
f.write(str(tf.divide(self.accuracy, self.dim)))
| en | 0.587906 | # Batchable Classifier based on Inception and Tensorflow Slim # @<NAME> @Yt Su # return a tuple of (tensor, tensor) # Iterate over a map object # Create the model, use the default arg scope to configure the # batch norm parameters. # Append a tuple (image, probability) # return a tuple of (tensor, tensor) # Iterate over a map object # Create the model, use the default arg scope to configure the # batch norm parameters. # Append a tuple (image, probability) # return a tuple of (tensor, tensor) # Iterate over a map object # Create the model, use the default arg scope to configure the # batch norm parameters. # Append a tuple (image, probability) | 2.640784 | 3 |
trunk/src/lsc/banzaicat.py | drmegannewsome/lcogtsnpipe | 0 | 6616269 | import lsc
from astropy.io import fits
from scipy.stats import sigmaclip
from operator import itemgetter
def make_cat(filename,datamax=75000,b_sigma=3.0,b_crlim=3.0):
if datamax == None: datamax = 75000
hdul = fits.open(filename)
banzai_cat = hdul['CAT'].data
print "Total number of sources in BANZAI catalog: {0}".format(len(banzai_cat))
ellipticities = [x['ELLIPTICITY'] for x in banzai_cat]
backgrounds = [x['BACKGROUND'] for x in banzai_cat]
fwhms = [x['FWHM'] for x in banzai_cat]
filtered_el, lo, hi = sigmaclip(ellipticities, low=b_sigma, high=b_sigma)
filtered_bg, lo, hi = sigmaclip(backgrounds, low=b_sigma, high=b_sigma)
filtered_fwhm, lo, hi = sigmaclip(fwhms, low=b_sigma, high=b_sigma)
id_num = 0
sources = []
for source in banzai_cat:
if (source['FLAG'] == 0
and source['PEAK'] <= datamax
and source['ELLIPTICITY'] in filtered_el
and source['BACKGROUND'] in filtered_bg
and source['FWHM'] in filtered_fwhm
and source['FWHM'] > b_crlim):
id_num += 1
StN = source['PEAK']/source['BACKGROUND']
sources.append([source['RA'],source['DEC'],StN,id_num])
print ("Number of sources in BANZAI catalog after filtering: "
"{0}".format(len(sources)))
print ("({0}-sigma clipping on source ellipticity, "
"background level, and FWHM.)".format(b_sigma))
#Sort by S/N
sources = sorted(sources, key=itemgetter(2), reverse=True)
header = "# BEGIN CATALOG HEADER\n"
header += "# nfields 13\n"
header += "# ra 1 0 d degrees %10.5f\n"
header += "# dec 2 0 d degrees %10.5f\n"
header += "# id 3 0 c INDEF %15s\n"
header += "# END CATALOG HEADER\n"
header += "#\n"
with open('banzai.cat','w') as banzai_cat_file:
banzai_cat_file.write(header)
for source in sources:
line = "{0:10.5f}\t{1:10.5f}\t{2}\n".format(source[0],source[1],source[3])
banzai_cat_file.write(line)
print "Saving the {0} best sources to banzai.cat".format(len(sources))
hdul.close()
return 'banzai.cat'
| import lsc
from astropy.io import fits
from scipy.stats import sigmaclip
from operator import itemgetter
def make_cat(filename,datamax=75000,b_sigma=3.0,b_crlim=3.0):
if datamax == None: datamax = 75000
hdul = fits.open(filename)
banzai_cat = hdul['CAT'].data
print "Total number of sources in BANZAI catalog: {0}".format(len(banzai_cat))
ellipticities = [x['ELLIPTICITY'] for x in banzai_cat]
backgrounds = [x['BACKGROUND'] for x in banzai_cat]
fwhms = [x['FWHM'] for x in banzai_cat]
filtered_el, lo, hi = sigmaclip(ellipticities, low=b_sigma, high=b_sigma)
filtered_bg, lo, hi = sigmaclip(backgrounds, low=b_sigma, high=b_sigma)
filtered_fwhm, lo, hi = sigmaclip(fwhms, low=b_sigma, high=b_sigma)
id_num = 0
sources = []
for source in banzai_cat:
if (source['FLAG'] == 0
and source['PEAK'] <= datamax
and source['ELLIPTICITY'] in filtered_el
and source['BACKGROUND'] in filtered_bg
and source['FWHM'] in filtered_fwhm
and source['FWHM'] > b_crlim):
id_num += 1
StN = source['PEAK']/source['BACKGROUND']
sources.append([source['RA'],source['DEC'],StN,id_num])
print ("Number of sources in BANZAI catalog after filtering: "
"{0}".format(len(sources)))
print ("({0}-sigma clipping on source ellipticity, "
"background level, and FWHM.)".format(b_sigma))
#Sort by S/N
sources = sorted(sources, key=itemgetter(2), reverse=True)
header = "# BEGIN CATALOG HEADER\n"
header += "# nfields 13\n"
header += "# ra 1 0 d degrees %10.5f\n"
header += "# dec 2 0 d degrees %10.5f\n"
header += "# id 3 0 c INDEF %15s\n"
header += "# END CATALOG HEADER\n"
header += "#\n"
with open('banzai.cat','w') as banzai_cat_file:
banzai_cat_file.write(header)
for source in sources:
line = "{0:10.5f}\t{1:10.5f}\t{2}\n".format(source[0],source[1],source[3])
banzai_cat_file.write(line)
print "Saving the {0} best sources to banzai.cat".format(len(sources))
hdul.close()
return 'banzai.cat'
| en | 0.979285 | #Sort by S/N | 2.293832 | 2 |
utilities/file_utilities.py | drdavidrace/gdrive_connect | 0 | 6616270 | <gh_stars>0
'''
This contains a set of simple utilities that are used to keep the code
simple but also check the quality of the implementation as it proceeds
'''
import os
from os.path import normpath, realpath
from pathlib import Path
import errno
def clean_path(in_name=None):
assert in_name is not None
assert isinstance(in_name, str)
return realpath(normpath(in_name.strip()))
#
def create_file_name(dir_name=None, file_name=None, create_dir=False):
'''
Purpose: Create a file name from a directory name. This will eliminate the
need for using globals outside of the main routine. The create_dir flag is
used to create the directory if necessary.
NOTE: This only creates the file name. The creation of the actual file is
a different function.
Inputs:
dir_name - The name of a directory. This is checked for existance.
file_name - The name of the file
check_dir - Check if the dirctory is a real directory
create_dir - The directory is created if it doesn't exist if this is True
Outputs:
None if the directory doesn't exist and create_dir == True
full path name in other cases
'''
assert dir_name is not None
assert file_name is not None
assert isinstance(dir_name,str)
assert isinstance(file_name,str)
dir_name = clean_path(dir_name)
file_name = file_name.strip()
file_name = os.path.join(dir_name, file_name)
full_path = clean_path(file_name)
if not create_dir:
return clean_path(full_path)
else:
dir_exists = os.path.isdir(dir_name)
if dir_exists:
return full_path
else:
try:
mkpath = os.path.dirname(full_path)
os.makedirs(mkpath,exist_ok=True)
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ''.join([full_path,e]))
return full_path
#
def create_process_lockfile_name(dir_name=None, file_name=None, pid=None, create_dir=False):
'''
Purpose: Create a file name from a process lock file. This will eliminate the
need for using globals outside of the main routine. The create_dir flag is
used to create the directory if necessary.
NOTE: This only creates the file name. The creation of the actual file is
a different function.
Inputs:
dir_name - The name of a directory. This is checked for existance.
file_name - The name of the file
check_dir - Check if the dirctory is a real directory
create_dir - The directory is created if it doesn't exist if this is True
Outputs:
None if the directory doesn't exist and create_dir == True
full path name in other cases
'''
assert dir_name is not None
assert file_name is not None
assert pid is not None
assert isinstance(dir_name,str)
assert isinstance(file_name,str)
dir_name = clean_path(dir_name)
file_name = file_name.strip()
if isinstance(pid,int):
file_name = clean_path(os.path.join(dir_name, ''.join(["{:d}-".format(pid),file_name])))
elif isinstance(pid,str):
pid = pid.strip()
file_name = clean_path(os.path.join(dir_name, ''.join(["{:s}-".format(pid),file_name])))
else:
raise TypeError("Bad type: {}".format(pid))
full_path = clean_path(file_name)
if not create_dir:
return full_path
else:
dir_exists = os.path.isdir(dir_name)
if dir_exists:
return full_path
else:
try:
mkpath = os.path.dirname(full_path)
os.makedirs(mkpath,exist_ok=True)
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ''.join([full_path,e]))
return full_path
#
def create_dir(dir_name=None, create_dir=True):
'''
Purpose: Creates a directory with the given name
Inputs:
dir_name: This is the dirctory name to create
create_dir: If True, create any directory leaves as necessary; otherwise
do not create the directory tree
Outputs:
None if the file was not able to be created
file_name if the file was created or overwritten
Side Effects: A file is created with the given name if able
'''
assert dir_name is not None
assert isinstance(dir_name,str)
dir_name = dir_name if os.path.isabs(dir_name) else os.path.join(os.getcwd(),dir_name)
dir_name = clean_path(dir_name)
if not create_dir:
return None
else:
try:
path = Path(dir_name)
path.mkdir(parents=True, exist_ok=True)
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ''.join([dir_name,e]))
return dir_name
#
def create_file(file_name=None, overwrite=False, create_dir=False):
'''
Purpose: This creates a file using the given file name. It checks to verify if the
path is relative to the cwd or a full path
Inputs:
file_name: This is the file name to create, must be a full file name
overwrite: If True overwrite an existing file if it exists; otherwise, do not
overwrite
create_dir: If True, create any directory leaves as necessary; otherwise
do not create the directory tree
Outputs:
None if the file was not able to be created
file_name if the file was created or overwritten
Side Effects: A file is created with the given name if able
'''
assert file_name is not None
assert isinstance(file_name,str)
work_file_name = clean_path(file_name)
dir_name, f_name = os.path.split(work_file_name)
dir_name = dir_name if os.path.isabs(dir_name) else os.path.join(os.getcwd(),dir_name)
if not create_dir and not os.path.isdir(dir_name):
return None
if create_dir and not os.path.isdir(dir_name):
try:
path = Path(dir_name)
path.mkdir(parents=True, exist_ok=True)
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ''.join([dir_name,e]))
real_file_name = os.path.join(dir_name, f_name)
if not overwrite:
if os.path.isfile(real_file_name):
return real_file_name
else:
try:
with open(real_file_name,'w+') as f:
f.seek(0)
f.truncate()
f.close()
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), dir_name)
return real_file_name
else:
if os.path.isfile(real_file_name):
with open(real_file_name, 'w+') as f:
f.seek(0)
f.truncate()
f.close()
return real_file_name
else:
try:
with open(real_file_name,'w+') as f:
f.seek(0)
f.truncate()
f.close()
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), dir_name)
return real_file_name
| '''
This contains a set of simple utilities that are used to keep the code
simple but also check the quality of the implementation as it proceeds
'''
import os
from os.path import normpath, realpath
from pathlib import Path
import errno
def clean_path(in_name=None):
assert in_name is not None
assert isinstance(in_name, str)
return realpath(normpath(in_name.strip()))
#
def create_file_name(dir_name=None, file_name=None, create_dir=False):
'''
Purpose: Create a file name from a directory name. This will eliminate the
need for using globals outside of the main routine. The create_dir flag is
used to create the directory if necessary.
NOTE: This only creates the file name. The creation of the actual file is
a different function.
Inputs:
dir_name - The name of a directory. This is checked for existance.
file_name - The name of the file
check_dir - Check if the dirctory is a real directory
create_dir - The directory is created if it doesn't exist if this is True
Outputs:
None if the directory doesn't exist and create_dir == True
full path name in other cases
'''
assert dir_name is not None
assert file_name is not None
assert isinstance(dir_name,str)
assert isinstance(file_name,str)
dir_name = clean_path(dir_name)
file_name = file_name.strip()
file_name = os.path.join(dir_name, file_name)
full_path = clean_path(file_name)
if not create_dir:
return clean_path(full_path)
else:
dir_exists = os.path.isdir(dir_name)
if dir_exists:
return full_path
else:
try:
mkpath = os.path.dirname(full_path)
os.makedirs(mkpath,exist_ok=True)
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ''.join([full_path,e]))
return full_path
#
def create_process_lockfile_name(dir_name=None, file_name=None, pid=None, create_dir=False):
'''
Purpose: Create a file name from a process lock file. This will eliminate the
need for using globals outside of the main routine. The create_dir flag is
used to create the directory if necessary.
NOTE: This only creates the file name. The creation of the actual file is
a different function.
Inputs:
dir_name - The name of a directory. This is checked for existance.
file_name - The name of the file
check_dir - Check if the dirctory is a real directory
create_dir - The directory is created if it doesn't exist if this is True
Outputs:
None if the directory doesn't exist and create_dir == True
full path name in other cases
'''
assert dir_name is not None
assert file_name is not None
assert pid is not None
assert isinstance(dir_name,str)
assert isinstance(file_name,str)
dir_name = clean_path(dir_name)
file_name = file_name.strip()
if isinstance(pid,int):
file_name = clean_path(os.path.join(dir_name, ''.join(["{:d}-".format(pid),file_name])))
elif isinstance(pid,str):
pid = pid.strip()
file_name = clean_path(os.path.join(dir_name, ''.join(["{:s}-".format(pid),file_name])))
else:
raise TypeError("Bad type: {}".format(pid))
full_path = clean_path(file_name)
if not create_dir:
return full_path
else:
dir_exists = os.path.isdir(dir_name)
if dir_exists:
return full_path
else:
try:
mkpath = os.path.dirname(full_path)
os.makedirs(mkpath,exist_ok=True)
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ''.join([full_path,e]))
return full_path
#
def create_dir(dir_name=None, create_dir=True):
'''
Purpose: Creates a directory with the given name
Inputs:
dir_name: This is the dirctory name to create
create_dir: If True, create any directory leaves as necessary; otherwise
do not create the directory tree
Outputs:
None if the file was not able to be created
file_name if the file was created or overwritten
Side Effects: A file is created with the given name if able
'''
assert dir_name is not None
assert isinstance(dir_name,str)
dir_name = dir_name if os.path.isabs(dir_name) else os.path.join(os.getcwd(),dir_name)
dir_name = clean_path(dir_name)
if not create_dir:
return None
else:
try:
path = Path(dir_name)
path.mkdir(parents=True, exist_ok=True)
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ''.join([dir_name,e]))
return dir_name
#
def create_file(file_name=None, overwrite=False, create_dir=False):
'''
Purpose: This creates a file using the given file name. It checks to verify if the
path is relative to the cwd or a full path
Inputs:
file_name: This is the file name to create, must be a full file name
overwrite: If True overwrite an existing file if it exists; otherwise, do not
overwrite
create_dir: If True, create any directory leaves as necessary; otherwise
do not create the directory tree
Outputs:
None if the file was not able to be created
file_name if the file was created or overwritten
Side Effects: A file is created with the given name if able
'''
assert file_name is not None
assert isinstance(file_name,str)
work_file_name = clean_path(file_name)
dir_name, f_name = os.path.split(work_file_name)
dir_name = dir_name if os.path.isabs(dir_name) else os.path.join(os.getcwd(),dir_name)
if not create_dir and not os.path.isdir(dir_name):
return None
if create_dir and not os.path.isdir(dir_name):
try:
path = Path(dir_name)
path.mkdir(parents=True, exist_ok=True)
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ''.join([dir_name,e]))
real_file_name = os.path.join(dir_name, f_name)
if not overwrite:
if os.path.isfile(real_file_name):
return real_file_name
else:
try:
with open(real_file_name,'w+') as f:
f.seek(0)
f.truncate()
f.close()
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), dir_name)
return real_file_name
else:
if os.path.isfile(real_file_name):
with open(real_file_name, 'w+') as f:
f.seek(0)
f.truncate()
f.close()
return real_file_name
else:
try:
with open(real_file_name,'w+') as f:
f.seek(0)
f.truncate()
f.close()
except Exception as e:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), dir_name)
return real_file_name | en | 0.851384 | This contains a set of simple utilities that are used to keep the code simple but also check the quality of the implementation as it proceeds # Purpose: Create a file name from a directory name. This will eliminate the need for using globals outside of the main routine. The create_dir flag is used to create the directory if necessary. NOTE: This only creates the file name. The creation of the actual file is a different function. Inputs: dir_name - The name of a directory. This is checked for existance. file_name - The name of the file check_dir - Check if the dirctory is a real directory create_dir - The directory is created if it doesn't exist if this is True Outputs: None if the directory doesn't exist and create_dir == True full path name in other cases # Purpose: Create a file name from a process lock file. This will eliminate the need for using globals outside of the main routine. The create_dir flag is used to create the directory if necessary. NOTE: This only creates the file name. The creation of the actual file is a different function. Inputs: dir_name - The name of a directory. This is checked for existance. file_name - The name of the file check_dir - Check if the dirctory is a real directory create_dir - The directory is created if it doesn't exist if this is True Outputs: None if the directory doesn't exist and create_dir == True full path name in other cases # Purpose: Creates a directory with the given name Inputs: dir_name: This is the dirctory name to create create_dir: If True, create any directory leaves as necessary; otherwise do not create the directory tree Outputs: None if the file was not able to be created file_name if the file was created or overwritten Side Effects: A file is created with the given name if able # Purpose: This creates a file using the given file name. It checks to verify if the path is relative to the cwd or a full path Inputs: file_name: This is the file name to create, must be a full file name overwrite: If True overwrite an existing file if it exists; otherwise, do not overwrite create_dir: If True, create any directory leaves as necessary; otherwise do not create the directory tree Outputs: None if the file was not able to be created file_name if the file was created or overwritten Side Effects: A file is created with the given name if able | 3.599599 | 4 |
remove-duplicates-from-sorted-list-ii/remove-duplicates-from-sorted-list-ii.py | QQuinn03/LeetHub | 0 | 6616271 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not head:
return head
dummy = ListNode(0)
dummy.next = head
pre = dummy
cur = head
while cur and cur.next:
if cur.val ==cur.next.val:
while cur.next and cur.val ==cur.next.val:
cur = cur.next
pre.next = cur.next
cur = cur.next
else:
pre = pre.next
cur = cur.next
return dummy.next
| # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not head:
return head
dummy = ListNode(0)
dummy.next = head
pre = dummy
cur = head
while cur and cur.next:
if cur.val ==cur.next.val:
while cur.next and cur.val ==cur.next.val:
cur = cur.next
pre.next = cur.next
cur = cur.next
else:
pre = pre.next
cur = cur.next
return dummy.next
| en | 0.626952 | # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next | 3.753678 | 4 |
defaultdict_tutorial.py | NightmareQAQ/python-notes | 106 | 6616272 | <reponame>NightmareQAQ/python-notes<gh_stars>100-1000
# defaultdict means that if a key is not found in the dictionary,
# then instead of a KeyError being thrown, a new entry is created.
# The type of this new entry is given by the argument of defaultdict.
from collections import defaultdict
def ex1():
# For the first example, default items are created using int(), which will return the integer object 0.
int_dict = defaultdict(int)
print('int_dict[3]', int_dict[3]) # print int(), thus 0
# For the second example, default items are created using list(), which returns a new empty list object.
list_dict = defaultdict(list)
print('list_dict[test]', list_dict['ok']) # print list(), thus []
# default
dic_list = defaultdict(lambda: 'test')
dic_list['name'] = 'twtrubiks'
print('dic_list[name]', dic_list['name'])
print('dic_list[sex]', dic_list['sex'])
def ex2_letter_frequency(sentence):
frequencies = defaultdict(int)
for letter in sentence:
frequencies[letter] += 1
return frequencies
if __name__ == "__main__":
ex1()
print(ex2_letter_frequency('sentence'))
| # defaultdict means that if a key is not found in the dictionary,
# then instead of a KeyError being thrown, a new entry is created.
# The type of this new entry is given by the argument of defaultdict.
from collections import defaultdict
def ex1():
# For the first example, default items are created using int(), which will return the integer object 0.
int_dict = defaultdict(int)
print('int_dict[3]', int_dict[3]) # print int(), thus 0
# For the second example, default items are created using list(), which returns a new empty list object.
list_dict = defaultdict(list)
print('list_dict[test]', list_dict['ok']) # print list(), thus []
# default
dic_list = defaultdict(lambda: 'test')
dic_list['name'] = 'twtrubiks'
print('dic_list[name]', dic_list['name'])
print('dic_list[sex]', dic_list['sex'])
def ex2_letter_frequency(sentence):
frequencies = defaultdict(int)
for letter in sentence:
frequencies[letter] += 1
return frequencies
if __name__ == "__main__":
ex1()
print(ex2_letter_frequency('sentence')) | en | 0.785396 | # defaultdict means that if a key is not found in the dictionary, # then instead of a KeyError being thrown, a new entry is created. # The type of this new entry is given by the argument of defaultdict. # For the first example, default items are created using int(), which will return the integer object 0. # print int(), thus 0 # For the second example, default items are created using list(), which returns a new empty list object. # print list(), thus [] # default | 4.260402 | 4 |
py/40. Combination Sum II.py | longwangjhu/LeetCode | 3 | 6616273 | <reponame>longwangjhu/LeetCode<filename>py/40. Combination Sum II.py
# https://leetcode.com/problems/combination-sum-ii/
# Given a collection of candidate numbers (candidates) and a target number
# (target), find all unique combinations in candidates where the candidate numbers
# sum to target.
# Each number in candidates may only be used once in the combination.
# Note: The solution set must not contain duplicate combinations.
################################################################################
# all unique combos -> sort and dfs
# avoid duplicate: add child if first child or != prev child
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
ans = []
self.dfs(ans, [], 0, candidates, target)
return ans
def dfs(self, ans, holder, start_idx, candidates, remain):
if remain < 0:
return
if remain == 0:
ans.append(holder.copy())
return
for i in range(start_idx, len(candidates)):
if i == start_idx or candidates[i] != candidates[i-1]: # avoid duplicate
holder.append(candidates[i])
# use i + 1 since cannot reuse, set remain = remain - number
self.dfs(ans, holder, i + 1, candidates, remain - candidates[i])
holder.pop()
| Combination Sum II.py
# https://leetcode.com/problems/combination-sum-ii/
# Given a collection of candidate numbers (candidates) and a target number
# (target), find all unique combinations in candidates where the candidate numbers
# sum to target.
# Each number in candidates may only be used once in the combination.
# Note: The solution set must not contain duplicate combinations.
################################################################################
# all unique combos -> sort and dfs
# avoid duplicate: add child if first child or != prev child
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
ans = []
self.dfs(ans, [], 0, candidates, target)
return ans
def dfs(self, ans, holder, start_idx, candidates, remain):
if remain < 0:
return
if remain == 0:
ans.append(holder.copy())
return
for i in range(start_idx, len(candidates)):
if i == start_idx or candidates[i] != candidates[i-1]: # avoid duplicate
holder.append(candidates[i])
# use i + 1 since cannot reuse, set remain = remain - number
self.dfs(ans, holder, i + 1, candidates, remain - candidates[i])
holder.pop() | en | 0.618468 | # https://leetcode.com/problems/combination-sum-ii/ # Given a collection of candidate numbers (candidates) and a target number # (target), find all unique combinations in candidates where the candidate numbers # sum to target. # Each number in candidates may only be used once in the combination. # Note: The solution set must not contain duplicate combinations. ################################################################################ # all unique combos -> sort and dfs # avoid duplicate: add child if first child or != prev child # avoid duplicate # use i + 1 since cannot reuse, set remain = remain - number | 3.43258 | 3 |
iauctb_py/exceptions.py | sinabakh/iauctb-python | 3 | 6616274 | __author__ = '<NAME>'
class WrongCredentialsException(Exception):
pass
class WebsiteNotAvailableException(Exception):
pass
class UnknownException(Exception):
pass | __author__ = '<NAME>'
class WrongCredentialsException(Exception):
pass
class WebsiteNotAvailableException(Exception):
pass
class UnknownException(Exception):
pass | none | 1 | 1.877069 | 2 | |
fb-realtime-db-demo.py | lucasalbini/firebase-realtime-db-python | 13 | 6616275 | import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
# Fetch the service account key JSON file contents
cred = credentials.Certificate('firebase-adminsdk.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://<your_app_name>.firebaseio.com/'
})
# Save data
ref = db.reference('/')
ref.set(
{
'boxes':
{
'box001': {
'color': 'red',
'width': 1,
'height': 3,
'length': 2
},
'box002': {
'color': 'green',
'width': 1,
'height': 2,
'length': 3
},
'box003': {
'color': 'yellow',
'width': 3,
'height': 2,
'length': 1
},
}
}
)
# Update data
ref = db.reference('boxes')
box_ref = ref.child('box001')
box_ref.update({
'color': 'blue'
})
# Multi-path update data
ref = db.reference('boxes')
ref.update({
'box001/color': 'red',
'box002/color': 'blue'
})
# Save lists of data
ref = db.reference('boxes')
ref.push({
'color': 'purple',
'width': 7,
'height': 8,
'length': 6
})
# Get the Unique Key Generated by push()
ref = db.reference('boxes')
new_box_ref = ref.push({
'color': 'purple',
'width': 7,
'height': 8,
'length': 6
})
box_id = new_box_ref.key
print(box_id)
# Retrieving data
ref = db.reference('boxes')
print(ref.get())
# Querying Data
# Ordering by a specified child key
ref = db.reference('boxes')
snapshot = ref.order_by_child('height').get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
# Ordering by key
ref = db.reference('boxes')
snapshot = ref.order_by_key().get()
print(snapshot)
# Ordering by value
ref = db.reference('/')
ref.set(
{
"weights": {
"person001" : 60,
"person002" : 65,
"person003" : 80,
"person004" : 55,
"person005" : 72
}
}
)
ref = db.reference('weights')
snapshot = ref.order_by_value().get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
# Limit Queries
ref = db.reference('boxes')
snapshot = ref.order_by_child('color').limit_to_last(2).get()
for key in snapshot:
print(key)
snapshot = ref.order_by_child('color').limit_to_last(3).get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
weights_ref = db.reference('weights')
snapshot = weights_ref.order_by_value().limit_to_last(3).get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
# Range Queries
snapshot = ref.order_by_child('color').start_at('r').get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
ref = db.reference('boxes')
snapshot = ref.order_by_key().end_at('w').get()
for key in snapshot:
print(key)
ref = db.reference('boxes')
snapshot = ref.order_by_key().start_at('g').end_at(u'n\uf8ff').get()
for key in snapshot:
print(key)
ref = db.reference('boxes')
snapshot = ref.order_by_child('length').equal_to(3).get()
for key in snapshot:
print(key)
| import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
# Fetch the service account key JSON file contents
cred = credentials.Certificate('firebase-adminsdk.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://<your_app_name>.firebaseio.com/'
})
# Save data
ref = db.reference('/')
ref.set(
{
'boxes':
{
'box001': {
'color': 'red',
'width': 1,
'height': 3,
'length': 2
},
'box002': {
'color': 'green',
'width': 1,
'height': 2,
'length': 3
},
'box003': {
'color': 'yellow',
'width': 3,
'height': 2,
'length': 1
},
}
}
)
# Update data
ref = db.reference('boxes')
box_ref = ref.child('box001')
box_ref.update({
'color': 'blue'
})
# Multi-path update data
ref = db.reference('boxes')
ref.update({
'box001/color': 'red',
'box002/color': 'blue'
})
# Save lists of data
ref = db.reference('boxes')
ref.push({
'color': 'purple',
'width': 7,
'height': 8,
'length': 6
})
# Get the Unique Key Generated by push()
ref = db.reference('boxes')
new_box_ref = ref.push({
'color': 'purple',
'width': 7,
'height': 8,
'length': 6
})
box_id = new_box_ref.key
print(box_id)
# Retrieving data
ref = db.reference('boxes')
print(ref.get())
# Querying Data
# Ordering by a specified child key
ref = db.reference('boxes')
snapshot = ref.order_by_child('height').get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
# Ordering by key
ref = db.reference('boxes')
snapshot = ref.order_by_key().get()
print(snapshot)
# Ordering by value
ref = db.reference('/')
ref.set(
{
"weights": {
"person001" : 60,
"person002" : 65,
"person003" : 80,
"person004" : 55,
"person005" : 72
}
}
)
ref = db.reference('weights')
snapshot = ref.order_by_value().get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
# Limit Queries
ref = db.reference('boxes')
snapshot = ref.order_by_child('color').limit_to_last(2).get()
for key in snapshot:
print(key)
snapshot = ref.order_by_child('color').limit_to_last(3).get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
weights_ref = db.reference('weights')
snapshot = weights_ref.order_by_value().limit_to_last(3).get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
# Range Queries
snapshot = ref.order_by_child('color').start_at('r').get()
for key, val in snapshot.items():
print('{0} => {1}'.format(key, val))
ref = db.reference('boxes')
snapshot = ref.order_by_key().end_at('w').get()
for key in snapshot:
print(key)
ref = db.reference('boxes')
snapshot = ref.order_by_key().start_at('g').end_at(u'n\uf8ff').get()
for key in snapshot:
print(key)
ref = db.reference('boxes')
snapshot = ref.order_by_child('length').equal_to(3).get()
for key in snapshot:
print(key)
| en | 0.742132 | # Fetch the service account key JSON file contents # Initialize the app with a service account, granting admin privileges # Save data # Update data # Multi-path update data # Save lists of data # Get the Unique Key Generated by push() # Retrieving data # Querying Data # Ordering by a specified child key # Ordering by key # Ordering by value # Limit Queries # Range Queries | 2.536744 | 3 |
django_admin_filter/apps.py | flebel/django-admin-filter | 8 | 6616276 | <reponame>flebel/django-admin-filter
from django.apps import AppConfig
class DjangoAdminFilterConfig(AppConfig):
name = 'django_admin_filter'
| from django.apps import AppConfig
class DjangoAdminFilterConfig(AppConfig):
name = 'django_admin_filter' | none | 1 | 1.104384 | 1 | |
test.py | AbinavRavi/outlier_robust_aggregators_fl | 2 | 6616277 | import torch
import loss
import utils
import dataloader
def test(model,device,dataloader): | import torch
import loss
import utils
import dataloader
def test(model,device,dataloader): | none | 1 | 1.277965 | 1 | |
scrape_songs/__version__.py | QualityHammer/Whats-on-this-Album | 0 | 6616278 | <reponame>QualityHammer/Whats-on-this-Album<filename>scrape_songs/__version__.py<gh_stars>0
MAJOR = 0
MINOR = 0
MICRO = 1
__version__ = f"{MAJOR}.{MINOR}.{MICRO}"
| MAJOR = 0
MINOR = 0
MICRO = 1
__version__ = f"{MAJOR}.{MINOR}.{MICRO}" | none | 1 | 1.241533 | 1 | |
strakk_generator.py | mariusmoe/strakk2 | 1 | 6616279 | # from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
import re
import csv
from time import sleep
from config import *
import os.path
# new line problem in db can be solved see: http://stackoverflow.com/questions/35999185/angular2-pipes-output-raw-html
# -- In config --
# url_part1 =
# url_part2 =
# url_img =
# -- end --
# =m&cid=1 # materialer
# =v&cid=1 # videoer
# how to scape a table http://codereview.stackexchange.com/questions/60769/scrape-an-html-table-with-python
myBigList = []
_strakkId = 7743
def html_scraping(strakkId):
"""
@param strakkId id of page to be scraped
will try to scrape the page and retrive images
"""
# html = urlopen( url_part1 + str(strakkId) + url_part2)
html = requests.get( url_part1 + str(strakkId) + url_part2)
data = html.text
# bsObj = BeautifulSoup(html)
bsObj = BeautifulSoup(data, 'html.parser')
# Grab data in lists
# title of strakk
titles = bsObj.findAll("h1", {"class":"pname"})
# intro to strakk
intros = bsObj.findAll("div", {"class":"pattern_intro"})
# price of strakk
prices = bsObj.findAll("p", {"class":"cost1"})
# descriptive text of strakk
txts = bsObj.findAll("div", {"class":"pattern_text"})
# table of strakk properties
table = bsObj.findAll(id="diag_symbols")
# secondary cover images
niceImg = bsObj.findAll(id="glasscase")
# Diagram images
diagramImg = bsObj.findAll("div", {"class":"col-md-pull-3"}) # not ready
def get_image(id, target, dir, type, optional_name=""):
"""
@param target target path for image retrival(need to bee combined with
baseURL)
@param dir directory to store image in
possible dirs at the moment [cover, symbols]
"""
# Decide upon a filename, needed so dups of symbols wont happen
part_of_path = ""
if optional_name != "":
part_of_path = str(optional_name)
else:
part_of_path = str(id)
# Check if photo exists if not SAVE it
if os.path.isfile( "img/" + str(dir) + "/c" + str(part_of_path) + "." + str(type) ):
print "Image exists - no-op"
else:
# write cover photo to file
resource = requests.get( url_img + target).raw
r = requests.get(url_img + target, stream=True)
if r.status_code == 200:
with open("img/" + str(dir) + "/c" + str(part_of_path) + "." + str(type),"wb") as f:
for chunk in r:
f.write(chunk)
# Symbols and symbolText extraction
symbols = []
symbolText = ""
print "(((((((((((((((((((((((())))))))))))))))))))))))"
print table
symbols_link = []
if len(table) != 0:
for tab in table:
symbols.append(tab.findAll("img", { "src":re.compile('/drops/symbols.*\.gif') }))
symbolText = tab.get_text().encode('utf-8').translate(None, '\t').translate(None, '\n').split("=")
symbolText = filter(None, symbolText) # fastest
# print symbols
# print symbolText
for symbol in symbols[0]:
_src = symbol["src"]
symbols_link.append(_src.encode('utf-8'))
get_image(strakkId, _src, "symbols", "gif", _src.strip("/drops/symbols/").strip(".gif"))
# all images
oteherImg = bsObj.findAll("img")
# need secondary images # /drops/mag/173/51/51b-2.jpg
# try to find cover photo - regex thet select all imglinks with '/drops/mag' and ends with .jpg
images = bsObj.findAll("img", {"src":re.compile('/drops/mag.*\.jpg')})
# print("-------------")
# print(images)
#the shortes url matching is always cover photo
shortestImgUrl = len(images[0]["src"])
target = images[0]["src"] # target is the cover photo
for image in images:
if len(image["src"]) < shortestImgUrl:
shortestImgUrl = len(image["src"])
target = image["src"]
print("###############")
print(target)
# extract secondary cover images
glassImg = [] # list with div with image
_glassImage = [] # list of links (pure)
# Find all img tags in css cclass glassImg
for div in niceImg:
glassImg.append(div.findAll("img", { "src":re.compile('/drops/mag.*\.jpg') }))
for immage in glassImg[0]:
_src = immage["src"] # pick the src atribute
_glassImage.append(_src.strip("/drops/mag/").strip(".jpg").replace("/", "-"))
if _src != target:
get_image(strakkId, _src, "cover", "jpg", _src.strip("/drops/mag/").strip(".jpg").replace("/", "-"))
print "----------- GLASS IMG ----------------"
print _glassImage
print "........... END GLASS IMG ............."
def filter_list(full_list, excludes):
s = set(excludes)
return (x for x in full_list if x not in s)
# Extract diagram images
diagramImgList = []
_diagramImg = []
print "////////////////////////////"
# print diagramImg
print "////////////////////////////"
for diagram in diagramImg:
diagramImgList.append(diagram.findAll("img", { "src":re.compile('/drops/mag.*\.jpg') }))
for immage in diagramImgList[0]:
_src = immage["src"]
if _src in symbols_link:
continue
_diagramImg.append(_src.strip("/drops/mag/").strip(".jpg").replace("/", "-").encode('utf-8'))
if _src != target:
get_image(strakkId, _src, "cover", "jpg", _src.strip("/drops/mag/").strip(".jpg").replace("/", "-"))
# enable to get photo, cover is the directory to put the image in
cover = "cover"
coverType = "jpg"
# get_image(strakkId, target, cover, coverType)
# print("^^^^^^^^^^^^")
thisDatapoint = []
thisDatapoint.append(strakkId)
# becouse title is in a list we have to unpack it to be able to use get_text()
for title in titles:
# print(title.get_text().strip()) # remove leading spaces, have been a problem
thisDatapoint.append(str(title.get_text().strip()).encode('utf-8'))
for intro in intros:
# print(intro.get_text())
thisDatapoint.append(str(intro.get_text().encode('utf-8')))
for txt in txts:
# print(txt.get_text())
thisDatapoint.append(txt.get_text().encode('utf-8'))
for price in prices:
# print(price.get_text())
thisDatapoint.append(price.get_text())
# test multivalue cell # thisDatapoint.append(["one", "two"]) - WORKS!
thisDatapoint.append("img/cover/c" + _glassImage[0] + ".jpg")
thisDatapoint.append(symbols_link) # symbolLink
thisDatapoint.append(symbolText) # symbolText
thisDatapoint.append(_glassImage) # oteherImg
thisDatapoint.append(_diagramImg) # diagramImg
# TODO: SOLVED - add table to csv file http://stackoverflow.com/questions/3853614/multiple-values-for-one-field-with-comma-separated-values-csv-format
# TODO: SOLVED - add aa in explanation
# TODO: SOLVED - add diagram images
# TODO: improve stability and error catches
# add all the data to a big list
myBigList.append(thisDatapoint)
# scrape multiple pages
# 7697,7720
# 4710
# 4710,4730 # good sample
for n in range(4710,4730):
try:
html_scraping(n)
except:
pass
sleep(2)
# html_scraping(4719)
# Create a csv file with path to images and the text
# with open("output.csv", "a", newline='', encoding='utf-8') as f:
with open("output.csv", "a") as f:
writer = csv.writer(f)
writer.writerow(['strakkId','title','intro','txt','price','coverimg','symbolLink','symbolText', 'otherImg', 'diagramImg'])
writer.writerows(myBigList)
| # from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
import re
import csv
from time import sleep
from config import *
import os.path
# new line problem in db can be solved see: http://stackoverflow.com/questions/35999185/angular2-pipes-output-raw-html
# -- In config --
# url_part1 =
# url_part2 =
# url_img =
# -- end --
# =m&cid=1 # materialer
# =v&cid=1 # videoer
# how to scape a table http://codereview.stackexchange.com/questions/60769/scrape-an-html-table-with-python
myBigList = []
_strakkId = 7743
def html_scraping(strakkId):
"""
@param strakkId id of page to be scraped
will try to scrape the page and retrive images
"""
# html = urlopen( url_part1 + str(strakkId) + url_part2)
html = requests.get( url_part1 + str(strakkId) + url_part2)
data = html.text
# bsObj = BeautifulSoup(html)
bsObj = BeautifulSoup(data, 'html.parser')
# Grab data in lists
# title of strakk
titles = bsObj.findAll("h1", {"class":"pname"})
# intro to strakk
intros = bsObj.findAll("div", {"class":"pattern_intro"})
# price of strakk
prices = bsObj.findAll("p", {"class":"cost1"})
# descriptive text of strakk
txts = bsObj.findAll("div", {"class":"pattern_text"})
# table of strakk properties
table = bsObj.findAll(id="diag_symbols")
# secondary cover images
niceImg = bsObj.findAll(id="glasscase")
# Diagram images
diagramImg = bsObj.findAll("div", {"class":"col-md-pull-3"}) # not ready
def get_image(id, target, dir, type, optional_name=""):
"""
@param target target path for image retrival(need to bee combined with
baseURL)
@param dir directory to store image in
possible dirs at the moment [cover, symbols]
"""
# Decide upon a filename, needed so dups of symbols wont happen
part_of_path = ""
if optional_name != "":
part_of_path = str(optional_name)
else:
part_of_path = str(id)
# Check if photo exists if not SAVE it
if os.path.isfile( "img/" + str(dir) + "/c" + str(part_of_path) + "." + str(type) ):
print "Image exists - no-op"
else:
# write cover photo to file
resource = requests.get( url_img + target).raw
r = requests.get(url_img + target, stream=True)
if r.status_code == 200:
with open("img/" + str(dir) + "/c" + str(part_of_path) + "." + str(type),"wb") as f:
for chunk in r:
f.write(chunk)
# Symbols and symbolText extraction
symbols = []
symbolText = ""
print "(((((((((((((((((((((((())))))))))))))))))))))))"
print table
symbols_link = []
if len(table) != 0:
for tab in table:
symbols.append(tab.findAll("img", { "src":re.compile('/drops/symbols.*\.gif') }))
symbolText = tab.get_text().encode('utf-8').translate(None, '\t').translate(None, '\n').split("=")
symbolText = filter(None, symbolText) # fastest
# print symbols
# print symbolText
for symbol in symbols[0]:
_src = symbol["src"]
symbols_link.append(_src.encode('utf-8'))
get_image(strakkId, _src, "symbols", "gif", _src.strip("/drops/symbols/").strip(".gif"))
# all images
oteherImg = bsObj.findAll("img")
# need secondary images # /drops/mag/173/51/51b-2.jpg
# try to find cover photo - regex thet select all imglinks with '/drops/mag' and ends with .jpg
images = bsObj.findAll("img", {"src":re.compile('/drops/mag.*\.jpg')})
# print("-------------")
# print(images)
#the shortes url matching is always cover photo
shortestImgUrl = len(images[0]["src"])
target = images[0]["src"] # target is the cover photo
for image in images:
if len(image["src"]) < shortestImgUrl:
shortestImgUrl = len(image["src"])
target = image["src"]
print("###############")
print(target)
# extract secondary cover images
glassImg = [] # list with div with image
_glassImage = [] # list of links (pure)
# Find all img tags in css cclass glassImg
for div in niceImg:
glassImg.append(div.findAll("img", { "src":re.compile('/drops/mag.*\.jpg') }))
for immage in glassImg[0]:
_src = immage["src"] # pick the src atribute
_glassImage.append(_src.strip("/drops/mag/").strip(".jpg").replace("/", "-"))
if _src != target:
get_image(strakkId, _src, "cover", "jpg", _src.strip("/drops/mag/").strip(".jpg").replace("/", "-"))
print "----------- GLASS IMG ----------------"
print _glassImage
print "........... END GLASS IMG ............."
def filter_list(full_list, excludes):
s = set(excludes)
return (x for x in full_list if x not in s)
# Extract diagram images
diagramImgList = []
_diagramImg = []
print "////////////////////////////"
# print diagramImg
print "////////////////////////////"
for diagram in diagramImg:
diagramImgList.append(diagram.findAll("img", { "src":re.compile('/drops/mag.*\.jpg') }))
for immage in diagramImgList[0]:
_src = immage["src"]
if _src in symbols_link:
continue
_diagramImg.append(_src.strip("/drops/mag/").strip(".jpg").replace("/", "-").encode('utf-8'))
if _src != target:
get_image(strakkId, _src, "cover", "jpg", _src.strip("/drops/mag/").strip(".jpg").replace("/", "-"))
# enable to get photo, cover is the directory to put the image in
cover = "cover"
coverType = "jpg"
# get_image(strakkId, target, cover, coverType)
# print("^^^^^^^^^^^^")
thisDatapoint = []
thisDatapoint.append(strakkId)
# becouse title is in a list we have to unpack it to be able to use get_text()
for title in titles:
# print(title.get_text().strip()) # remove leading spaces, have been a problem
thisDatapoint.append(str(title.get_text().strip()).encode('utf-8'))
for intro in intros:
# print(intro.get_text())
thisDatapoint.append(str(intro.get_text().encode('utf-8')))
for txt in txts:
# print(txt.get_text())
thisDatapoint.append(txt.get_text().encode('utf-8'))
for price in prices:
# print(price.get_text())
thisDatapoint.append(price.get_text())
# test multivalue cell # thisDatapoint.append(["one", "two"]) - WORKS!
thisDatapoint.append("img/cover/c" + _glassImage[0] + ".jpg")
thisDatapoint.append(symbols_link) # symbolLink
thisDatapoint.append(symbolText) # symbolText
thisDatapoint.append(_glassImage) # oteherImg
thisDatapoint.append(_diagramImg) # diagramImg
# TODO: SOLVED - add table to csv file http://stackoverflow.com/questions/3853614/multiple-values-for-one-field-with-comma-separated-values-csv-format
# TODO: SOLVED - add aa in explanation
# TODO: SOLVED - add diagram images
# TODO: improve stability and error catches
# add all the data to a big list
myBigList.append(thisDatapoint)
# scrape multiple pages
# 7697,7720
# 4710
# 4710,4730 # good sample
for n in range(4710,4730):
try:
html_scraping(n)
except:
pass
sleep(2)
# html_scraping(4719)
# Create a csv file with path to images and the text
# with open("output.csv", "a", newline='', encoding='utf-8') as f:
with open("output.csv", "a") as f:
writer = csv.writer(f)
writer.writerow(['strakkId','title','intro','txt','price','coverimg','symbolLink','symbolText', 'otherImg', 'diagramImg'])
writer.writerows(myBigList)
| en | 0.654078 | # from urllib.request import urlopen # new line problem in db can be solved see: http://stackoverflow.com/questions/35999185/angular2-pipes-output-raw-html # -- In config -- # url_part1 = # url_part2 = # url_img = # -- end -- # =m&cid=1 # materialer # =v&cid=1 # videoer # how to scape a table http://codereview.stackexchange.com/questions/60769/scrape-an-html-table-with-python @param strakkId id of page to be scraped will try to scrape the page and retrive images # html = urlopen( url_part1 + str(strakkId) + url_part2) # bsObj = BeautifulSoup(html) # Grab data in lists # title of strakk # intro to strakk # price of strakk # descriptive text of strakk # table of strakk properties # secondary cover images # Diagram images # not ready @param target target path for image retrival(need to bee combined with baseURL) @param dir directory to store image in possible dirs at the moment [cover, symbols] # Decide upon a filename, needed so dups of symbols wont happen # Check if photo exists if not SAVE it # write cover photo to file # Symbols and symbolText extraction # fastest # print symbols # print symbolText # all images # need secondary images # /drops/mag/173/51/51b-2.jpg # try to find cover photo - regex thet select all imglinks with '/drops/mag' and ends with .jpg # print("-------------") # print(images) #the shortes url matching is always cover photo # target is the cover photo ##############") # extract secondary cover images # list with div with image # list of links (pure) # Find all img tags in css cclass glassImg # pick the src atribute # Extract diagram images # print diagramImg # enable to get photo, cover is the directory to put the image in # get_image(strakkId, target, cover, coverType) # print("^^^^^^^^^^^^") # becouse title is in a list we have to unpack it to be able to use get_text() # print(title.get_text().strip()) # remove leading spaces, have been a problem # print(intro.get_text()) # print(txt.get_text()) # print(price.get_text()) # test multivalue cell # thisDatapoint.append(["one", "two"]) - WORKS! # symbolLink # symbolText # oteherImg # diagramImg # TODO: SOLVED - add table to csv file http://stackoverflow.com/questions/3853614/multiple-values-for-one-field-with-comma-separated-values-csv-format # TODO: SOLVED - add aa in explanation # TODO: SOLVED - add diagram images # TODO: improve stability and error catches # add all the data to a big list # scrape multiple pages # 7697,7720 # 4710 # 4710,4730 # good sample # html_scraping(4719) # Create a csv file with path to images and the text # with open("output.csv", "a", newline='', encoding='utf-8') as f: | 2.782491 | 3 |
aoc_2016/day05.py | geoffbeier/aoc_2021 | 0 | 6616280 | import itertools
import re
import sys
import operator
from collections import defaultdict, namedtuple
from dataclasses import dataclass
from itertools import product
from math import prod
from typing import List, Dict, Any, Tuple
import aocd
from . import aoc_year
from loguru import logger
from _md5 import md5
aoc_day = 5
try:
if __name__ != "__main__":
assert str(aoc_day) in __name__
except AssertionError:
logger.error(
f"aoc_day={aoc_day} but this module name is {__name__}. aocd.get_data() is not going to behave properly."
)
m = re.match(r".*.day(\d+?)$", __name__)
if m:
aoc_day = int(m.groups()[0])
logger.warning(
f"Attempting to self-correct based on {__name__}. Now aoc_day={aoc_day}"
)
else:
logger.error(f"Unable to guess a day from {__name__}. Exiting")
sys.exit(1)
@dataclass
class AOCContext:
raw: List[str]
door_id: str
def next_digit(door_id: str, next_index: int, prefix: str = "00000"):
found = False
i = next_index
while not found:
digest = md5(f"{door_id}{i}".encode()).hexdigest()
i += 1
if digest.startswith(prefix):
return digest, i
def preprocess():
raw = aocd.get_data(day=aoc_day, year=aoc_year).splitlines()
door_id = raw[0].strip()
context = AOCContext(raw, door_id)
return context
def part1(context: AOCContext):
password = ""
next_i = 0
for _ in range(8):
digest, next_i = next_digit(context.door_id, next_i)
c = digest[5]
password += c
logger.debug(f"password={password}")
return str(password)
def part2(context: AOCContext):
password = "--------"
next_i = 0
while "-" in password:
digest, next_i = next_digit(context.door_id, next_i)
pos = int(digest[5], 16)
c = digest[6]
if pos < len(password) and password[pos] == "-":
password = password[:pos] + c + password[pos + 1 :]
assert len(password) == 8
logger.debug(f"password={password}")
return str(password)
tests = [
(
"""London to Dublin = 464
London to Belfast = 518
Dublin to Belfast = 141
""",
605,
part1,
),
]
def test(start: int = 0, finish: int = len(tests)):
for i, t in enumerate(tests[start:finish]):
def gd(*args, **kwargs):
return t[0]
aocd.get_data = gd
result = t[2](preprocess())
if f"{result}" != f"{t[1]}":
logger.error(f"Test {start + i + 1} failed: got {result}, expected {t[1]}")
break
else:
logger.success(f"Test {start + i + 1}: {t[1]}")
if __name__ == "__main__":
test()
| import itertools
import re
import sys
import operator
from collections import defaultdict, namedtuple
from dataclasses import dataclass
from itertools import product
from math import prod
from typing import List, Dict, Any, Tuple
import aocd
from . import aoc_year
from loguru import logger
from _md5 import md5
aoc_day = 5
try:
if __name__ != "__main__":
assert str(aoc_day) in __name__
except AssertionError:
logger.error(
f"aoc_day={aoc_day} but this module name is {__name__}. aocd.get_data() is not going to behave properly."
)
m = re.match(r".*.day(\d+?)$", __name__)
if m:
aoc_day = int(m.groups()[0])
logger.warning(
f"Attempting to self-correct based on {__name__}. Now aoc_day={aoc_day}"
)
else:
logger.error(f"Unable to guess a day from {__name__}. Exiting")
sys.exit(1)
@dataclass
class AOCContext:
raw: List[str]
door_id: str
def next_digit(door_id: str, next_index: int, prefix: str = "00000"):
found = False
i = next_index
while not found:
digest = md5(f"{door_id}{i}".encode()).hexdigest()
i += 1
if digest.startswith(prefix):
return digest, i
def preprocess():
raw = aocd.get_data(day=aoc_day, year=aoc_year).splitlines()
door_id = raw[0].strip()
context = AOCContext(raw, door_id)
return context
def part1(context: AOCContext):
password = ""
next_i = 0
for _ in range(8):
digest, next_i = next_digit(context.door_id, next_i)
c = digest[5]
password += c
logger.debug(f"password={password}")
return str(password)
def part2(context: AOCContext):
password = "--------"
next_i = 0
while "-" in password:
digest, next_i = next_digit(context.door_id, next_i)
pos = int(digest[5], 16)
c = digest[6]
if pos < len(password) and password[pos] == "-":
password = password[:pos] + c + password[pos + 1 :]
assert len(password) == 8
logger.debug(f"password={password}")
return str(password)
tests = [
(
"""London to Dublin = 464
London to Belfast = 518
Dublin to Belfast = 141
""",
605,
part1,
),
]
def test(start: int = 0, finish: int = len(tests)):
for i, t in enumerate(tests[start:finish]):
def gd(*args, **kwargs):
return t[0]
aocd.get_data = gd
result = t[2](preprocess())
if f"{result}" != f"{t[1]}":
logger.error(f"Test {start + i + 1} failed: got {result}, expected {t[1]}")
break
else:
logger.success(f"Test {start + i + 1}: {t[1]}")
if __name__ == "__main__":
test()
| en | 0.768413 | London to Dublin = 464 London to Belfast = 518 Dublin to Belfast = 141 | 2.633347 | 3 |
cv_workshops/13-section/1-clazz.py | afterloe/opencv-practice | 5 | 6616281 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import numpy as np
"""
OpenCV DNN单张与多张图像的推断
OpenCV DNN中支持单张图像推断,同时还支持分批次方式的图像推断,对应的两个相关API分别为blobFromImage与blobFromImages,它们的返回对象都
是一个四维的Mat对象-按照顺序分别为NCHW 其组织方式如下:
N表示多张图像
C表示接受输入图像的通道数目
H表示接受输入图像的高度
W表示接受输入图像的宽度
"""
bin_model = "../../../raspberry-auto/models/googlenet/bvlc_googlenet.caffemodel"
config = "../../../raspberry-auto/models/googlenet/bvlc_googlenet.prototxt"
txt = "../../../raspberry-auto/models/googlenet/classification_classes_ILSVRC2012.txt"
def main():
net = cv.dnn.readNetFromCaffe(config, bin_model)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
classes = None
with open(txt, "r") as f:
classes = f.read().rstrip("\n").split("\n")
images = [cv.imread("../../../raspberry-auto/pic/70eb501cjw1dwp7pecgewj.jpg"),
cv.imread("../../../raspberry-auto/pic/Meter_in_word.png"),
cv.imread("../../../raspberry-auto/pic/hw_freebuds3_2.jpg")]
data = cv.dnn.blobFromImages(images, 1.0, (224, 224), (104, 117, 123), False, crop=False)
net.setInput(data)
outs = net.forward()
t, _ = net.getPerfProfile()
text = "Inference time: %.2f ms" % (t * 1000.0 / cv.getTickFrequency())
print(text)
for i in range(len(outs)):
out = outs[i]
class_id = int(np.argmax(out))
confidence = out[class_id]
cv.putText(images[i], text, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
label = "%s: %.4f" % (classes[class_id] if classes else "Class #%d" % class_id, confidence)
cv.putText(images[i], label, (50, 50), cv.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)
cv.imshow("googlenet demo", images[i])
cv.waitKey(0)
if "__main__" == __name__:
main()
cv.destroyAllWindows()
| #!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import numpy as np
"""
OpenCV DNN单张与多张图像的推断
OpenCV DNN中支持单张图像推断,同时还支持分批次方式的图像推断,对应的两个相关API分别为blobFromImage与blobFromImages,它们的返回对象都
是一个四维的Mat对象-按照顺序分别为NCHW 其组织方式如下:
N表示多张图像
C表示接受输入图像的通道数目
H表示接受输入图像的高度
W表示接受输入图像的宽度
"""
bin_model = "../../../raspberry-auto/models/googlenet/bvlc_googlenet.caffemodel"
config = "../../../raspberry-auto/models/googlenet/bvlc_googlenet.prototxt"
txt = "../../../raspberry-auto/models/googlenet/classification_classes_ILSVRC2012.txt"
def main():
net = cv.dnn.readNetFromCaffe(config, bin_model)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
classes = None
with open(txt, "r") as f:
classes = f.read().rstrip("\n").split("\n")
images = [cv.imread("../../../raspberry-auto/pic/70eb501cjw1dwp7pecgewj.jpg"),
cv.imread("../../../raspberry-auto/pic/Meter_in_word.png"),
cv.imread("../../../raspberry-auto/pic/hw_freebuds3_2.jpg")]
data = cv.dnn.blobFromImages(images, 1.0, (224, 224), (104, 117, 123), False, crop=False)
net.setInput(data)
outs = net.forward()
t, _ = net.getPerfProfile()
text = "Inference time: %.2f ms" % (t * 1000.0 / cv.getTickFrequency())
print(text)
for i in range(len(outs)):
out = outs[i]
class_id = int(np.argmax(out))
confidence = out[class_id]
cv.putText(images[i], text, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
label = "%s: %.4f" % (classes[class_id] if classes else "Class #%d" % class_id, confidence)
cv.putText(images[i], label, (50, 50), cv.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)
cv.imshow("googlenet demo", images[i])
cv.waitKey(0)
if "__main__" == __name__:
main()
cv.destroyAllWindows() | zh | 0.93353 | #!/usr/bin/env python3 # -*- coding=utf-8 -*- OpenCV DNN单张与多张图像的推断 OpenCV DNN中支持单张图像推断,同时还支持分批次方式的图像推断,对应的两个相关API分别为blobFromImage与blobFromImages,它们的返回对象都 是一个四维的Mat对象-按照顺序分别为NCHW 其组织方式如下: N表示多张图像 C表示接受输入图像的通道数目 H表示接受输入图像的高度 W表示接受输入图像的宽度 #%d" % class_id, confidence) | 2.636948 | 3 |
blossom/parse_intent.py | bbrzycki/evolution-project | 8 | 6616282 | import random
import population_funcs
def parse(intent_list, population_dict):
"""
Determine whether the intent list is valid and fix it in the event of
conflicts.
Parameters
----------
intent_list : list of lists of Organisms
List of lists of organisms with proposed organism states,
after each organism has 'acted'. Length equals number of organisms in
the current time step.
population_dict : dict of Organisms
Dict of current organisms
Returns
-------
updated_population_dict : dict of Organisms
Dict of updated organisms, where organism states that conflict between
intent_list and population_dict are resolved.
Conflicts may be cases in which an organism has different states in the
intent list, perhaps arrising from the actions of other organisms that
somehow effect its state. This method resolves those conflicts, so that
there is only one organism with a given organism id present in the final
output list at all times.
"""
# TODO: Figure out exactly how this should be controlled -- on the scale of
# the universe, the world, or the organisms itself
updated_list = []
id_org_dict = {}
for species in population_dict:
for org in population_dict[species]['organisms']:
id_org_dict[org.organism_id] = org
new_organism_ids = set()
# Randomly sample organism steps to select. Only use sets for conditionals,
# add to saved structures using lists and dicts (since key order is
# preserved)
for organism_set in random.sample(intent_list, len(intent_list)):
set_ids = set(org.organism_id for org in organism_set)
if len(new_organism_ids & set_ids) == 0:
updated_list.extend(organism_set)
new_organism_ids.update(set_ids)
# Add back organisms whose steps were not chosen (and increment status)
for id in id_org_dict.keys():
if id not in new_organism_ids:
org = id_org_dict[id]
if org.alive:
updated_list.append(org.step_without_acting())
return population_funcs.get_population_dict(updated_list,
population_dict.keys())
| import random
import population_funcs
def parse(intent_list, population_dict):
"""
Determine whether the intent list is valid and fix it in the event of
conflicts.
Parameters
----------
intent_list : list of lists of Organisms
List of lists of organisms with proposed organism states,
after each organism has 'acted'. Length equals number of organisms in
the current time step.
population_dict : dict of Organisms
Dict of current organisms
Returns
-------
updated_population_dict : dict of Organisms
Dict of updated organisms, where organism states that conflict between
intent_list and population_dict are resolved.
Conflicts may be cases in which an organism has different states in the
intent list, perhaps arrising from the actions of other organisms that
somehow effect its state. This method resolves those conflicts, so that
there is only one organism with a given organism id present in the final
output list at all times.
"""
# TODO: Figure out exactly how this should be controlled -- on the scale of
# the universe, the world, or the organisms itself
updated_list = []
id_org_dict = {}
for species in population_dict:
for org in population_dict[species]['organisms']:
id_org_dict[org.organism_id] = org
new_organism_ids = set()
# Randomly sample organism steps to select. Only use sets for conditionals,
# add to saved structures using lists and dicts (since key order is
# preserved)
for organism_set in random.sample(intent_list, len(intent_list)):
set_ids = set(org.organism_id for org in organism_set)
if len(new_organism_ids & set_ids) == 0:
updated_list.extend(organism_set)
new_organism_ids.update(set_ids)
# Add back organisms whose steps were not chosen (and increment status)
for id in id_org_dict.keys():
if id not in new_organism_ids:
org = id_org_dict[id]
if org.alive:
updated_list.append(org.step_without_acting())
return population_funcs.get_population_dict(updated_list,
population_dict.keys())
| en | 0.929477 | Determine whether the intent list is valid and fix it in the event of conflicts. Parameters ---------- intent_list : list of lists of Organisms List of lists of organisms with proposed organism states, after each organism has 'acted'. Length equals number of organisms in the current time step. population_dict : dict of Organisms Dict of current organisms Returns ------- updated_population_dict : dict of Organisms Dict of updated organisms, where organism states that conflict between intent_list and population_dict are resolved. Conflicts may be cases in which an organism has different states in the intent list, perhaps arrising from the actions of other organisms that somehow effect its state. This method resolves those conflicts, so that there is only one organism with a given organism id present in the final output list at all times. # TODO: Figure out exactly how this should be controlled -- on the scale of # the universe, the world, or the organisms itself # Randomly sample organism steps to select. Only use sets for conditionals, # add to saved structures using lists and dicts (since key order is # preserved) # Add back organisms whose steps were not chosen (and increment status) | 3.361908 | 3 |
tuesday_speech/speech_processing_basics.py | trungnt13/uef-summerschool2018 | 0 | 6616283 | "https://github.com/Jakobovski/free-spoken-digit-dataset"
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
audio_filename = r'C:\Users\vvestman\Desktop\free-spoken-digit-dataset-master\recordings\9_jackson_0.wav'
target_sr = 8000
signal, sr = librosa.load(audio_filename, target_sr)
win_length = int(0.02 * sr)
hop_length = int(0.005 * sr)
spectrogram = librosa.stft(signal, n_fft=512, win_length=win_length, hop_length=hop_length, window='hamming')
# EXERCISE: Write a code that creates a spectrogram without using librosa (framing --> windowing --> fft ...).
frame_endpoints = list(range(win_length-1, signal.size, hop_length))
frames = np.zeros(shape=(win_length, len(frame_endpoints)))
for i in range(len(frame_endpoints)):
frames[:, i] = signal[frame_endpoints[i] - win_length+1 :
frame_endpoints[i] + 1]
hamming_window = np.hamming(win_length)
frames = frames * hamming_window[:, None]
spectrogram = np.fft.fft(frames, n=512, axis=0)
spectrogram = spectrogram[:257, :]
plt.figure()
plt.subplot(2,1,1)
plt.plot(signal)
plt.subplot(2,1,2)
plt.plot(frames[:, 40])
plt.figure()
spectrogram = librosa.amplitude_to_db(spectrogram, ref=np.max)
librosa.display.specshow(spectrogram, y_axis='linear', x_axis='time', sr=sr, hop_length=hop_length)
plt.colorbar(format='%+2.0f dB')
plt.title('Linear-frequency power spectrogram')
plt.show()
sparse_spectrogram = spectrogram[:, ::10]
plt.plot(np.linspace(0, 4000, spectrogram.shape[0]), sparse_spectrogram + np.arange(sparse_spectrogram.shape[1]) * 30)
plt.show()
| "https://github.com/Jakobovski/free-spoken-digit-dataset"
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
audio_filename = r'C:\Users\vvestman\Desktop\free-spoken-digit-dataset-master\recordings\9_jackson_0.wav'
target_sr = 8000
signal, sr = librosa.load(audio_filename, target_sr)
win_length = int(0.02 * sr)
hop_length = int(0.005 * sr)
spectrogram = librosa.stft(signal, n_fft=512, win_length=win_length, hop_length=hop_length, window='hamming')
# EXERCISE: Write a code that creates a spectrogram without using librosa (framing --> windowing --> fft ...).
frame_endpoints = list(range(win_length-1, signal.size, hop_length))
frames = np.zeros(shape=(win_length, len(frame_endpoints)))
for i in range(len(frame_endpoints)):
frames[:, i] = signal[frame_endpoints[i] - win_length+1 :
frame_endpoints[i] + 1]
hamming_window = np.hamming(win_length)
frames = frames * hamming_window[:, None]
spectrogram = np.fft.fft(frames, n=512, axis=0)
spectrogram = spectrogram[:257, :]
plt.figure()
plt.subplot(2,1,1)
plt.plot(signal)
plt.subplot(2,1,2)
plt.plot(frames[:, 40])
plt.figure()
spectrogram = librosa.amplitude_to_db(spectrogram, ref=np.max)
librosa.display.specshow(spectrogram, y_axis='linear', x_axis='time', sr=sr, hop_length=hop_length)
plt.colorbar(format='%+2.0f dB')
plt.title('Linear-frequency power spectrogram')
plt.show()
sparse_spectrogram = spectrogram[:, ::10]
plt.plot(np.linspace(0, 4000, spectrogram.shape[0]), sparse_spectrogram + np.arange(sparse_spectrogram.shape[1]) * 30)
plt.show()
| en | 0.518681 | # EXERCISE: Write a code that creates a spectrogram without using librosa (framing --> windowing --> fft ...). | 3.117242 | 3 |
cellmesh/test.py | shunfumao/cellmesh | 6 | 6616284 | import unittest
class TestDB(unittest.TestCase):
def test_get_all_cell_id_names(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_get_all_cell_id_names
"""
import os
from cellmesh.db import get_all_cell_id_names
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
results = get_all_cell_id_names(
db_dir=DB_DIR,
include_cell_components=True,
include_chromosomes=False,
include_cell_lines=False)
print('first 5 (cell id, cell name):\n' + str(results[0:5]))
return
def test_get_all_genes(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_get_all_genes
"""
import os
from cellmesh.db import get_all_genes
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
results = get_all_genes(
db_dir=DB_DIR,
species='human',
uppercase_names=True)
print('first 5 genes:\n' + str(results[0:5]))
return
def test_get_cell_genes_pmids(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_get_cell_genes_pmids
"""
import os
from cellmesh.db import get_cell_genes_pmids
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
cell_id = 'D000069261' # Podosomes
results = get_cell_genes_pmids(
cell=cell_id,
db_dir=DB_DIR,
species='human',
threshold=3,
uppercase_gene_names=True)
print('cell \'Podosomes\' has \'human\'genes:\n' + str(results))
return
def test_get_metainfo(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_get_metainfo
"""
import os
from cellmesh.db import get_metainfo
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
get_metainfo()
return
def test_write_csv(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_write_csv
"""
import os
from cellmesh.db import write_csv
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
write_csv(DB_DIR, 'mouse' )
return
class TestQuery(unittest.TestCase):
def test_calc_prob_one_query_one_cell(self):
"""
usage: python -m unittest cellmesh.test.TestQuery.test_calc_prob_one_query_one_cell
"""
from cellmesh.query import calc_prob_one_query_one_cell
print('----- Input -----')
genes = ['CD79A', 'MS4A1', 'CD79B']
print('genes:')
print(genes)
cell_id = 'D001402'
print('cell_id:')
print(cell_id)
cell_gene_count = [('CD79A', 187), ('MS4A1', 12), ('POLM', 4), ('CCR2', 10)]
print('cell_gene_count:')
print(cell_gene_count)
overlapping_genes = ['CD79A', 'MS4A1']
print('overlapping_genes:')
print(overlapping_genes)
params = {'alpha': None}
print('params:')
print(params)
N_all_genes = 27322
print('N_all_genes:')
print(N_all_genes)
args = (genes, cell_id, cell_gene_count, overlapping_genes, params, N_all_genes)
print('\n----- Run calc_prob_one_query_one_cell -----\n')
result = calc_prob_one_query_one_cell(args)
print('----- Output -----')
print('(cell_id, prob_score):')
print(result)
return
def test_prob_test(self):
"""
usage: python -m unittest cellmesh.test.TestQuery.test_prob_test
"""
import os
from cellmesh.query import prob_test
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
# Top 15 genes for B cell in Tabula Muris Droplet dataset
query_genes = ['CD79A','MS4A1','CD79B','TNFRSF13C','BANK1',
'CR2','CD19','CD37','CD22','FCRL1',
'FCRLA','CD74','LTB','BLK','POU2AF1']
query_params = {
'n_proc': 1, # num of proc >= 1
'db_cnt_thre': 3,
'alpha': None}
cell_prob_vals = prob_test(
genes=query_genes,
params=query_params,
db_dir=DB_DIR,
species='mouse')
for i in range(min(len(cell_prob_vals), 10)):
t = cell_prob_vals[i]
print("i=%d, id=%s, name=%s, prob=%f"%(i, t[0], t[1], t[2]))
print(" overlapping genes:" + str(t[3]))
print(" pmids wrt overlapping genes:" + str(t[4]))
print('\n\n')
return
if __name__ == '__main__':
unittest.main()
| import unittest
class TestDB(unittest.TestCase):
def test_get_all_cell_id_names(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_get_all_cell_id_names
"""
import os
from cellmesh.db import get_all_cell_id_names
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
results = get_all_cell_id_names(
db_dir=DB_DIR,
include_cell_components=True,
include_chromosomes=False,
include_cell_lines=False)
print('first 5 (cell id, cell name):\n' + str(results[0:5]))
return
def test_get_all_genes(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_get_all_genes
"""
import os
from cellmesh.db import get_all_genes
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
results = get_all_genes(
db_dir=DB_DIR,
species='human',
uppercase_names=True)
print('first 5 genes:\n' + str(results[0:5]))
return
def test_get_cell_genes_pmids(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_get_cell_genes_pmids
"""
import os
from cellmesh.db import get_cell_genes_pmids
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
cell_id = 'D000069261' # Podosomes
results = get_cell_genes_pmids(
cell=cell_id,
db_dir=DB_DIR,
species='human',
threshold=3,
uppercase_gene_names=True)
print('cell \'Podosomes\' has \'human\'genes:\n' + str(results))
return
def test_get_metainfo(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_get_metainfo
"""
import os
from cellmesh.db import get_metainfo
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
get_metainfo()
return
def test_write_csv(self):
"""
usage: python -m unittest cellmesh.test.TestDB.test_write_csv
"""
import os
from cellmesh.db import write_csv
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
write_csv(DB_DIR, 'mouse' )
return
class TestQuery(unittest.TestCase):
def test_calc_prob_one_query_one_cell(self):
"""
usage: python -m unittest cellmesh.test.TestQuery.test_calc_prob_one_query_one_cell
"""
from cellmesh.query import calc_prob_one_query_one_cell
print('----- Input -----')
genes = ['CD79A', 'MS4A1', 'CD79B']
print('genes:')
print(genes)
cell_id = 'D001402'
print('cell_id:')
print(cell_id)
cell_gene_count = [('CD79A', 187), ('MS4A1', 12), ('POLM', 4), ('CCR2', 10)]
print('cell_gene_count:')
print(cell_gene_count)
overlapping_genes = ['CD79A', 'MS4A1']
print('overlapping_genes:')
print(overlapping_genes)
params = {'alpha': None}
print('params:')
print(params)
N_all_genes = 27322
print('N_all_genes:')
print(N_all_genes)
args = (genes, cell_id, cell_gene_count, overlapping_genes, params, N_all_genes)
print('\n----- Run calc_prob_one_query_one_cell -----\n')
result = calc_prob_one_query_one_cell(args)
print('----- Output -----')
print('(cell_id, prob_score):')
print(result)
return
def test_prob_test(self):
"""
usage: python -m unittest cellmesh.test.TestQuery.test_prob_test
"""
import os
from cellmesh.query import prob_test
PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(PATH, 'data', 'cellmesh.db')
print('database:\n' + DB_DIR)
# Top 15 genes for B cell in Tabula Muris Droplet dataset
query_genes = ['CD79A','MS4A1','CD79B','TNFRSF13C','BANK1',
'CR2','CD19','CD37','CD22','FCRL1',
'FCRLA','CD74','LTB','BLK','POU2AF1']
query_params = {
'n_proc': 1, # num of proc >= 1
'db_cnt_thre': 3,
'alpha': None}
cell_prob_vals = prob_test(
genes=query_genes,
params=query_params,
db_dir=DB_DIR,
species='mouse')
for i in range(min(len(cell_prob_vals), 10)):
t = cell_prob_vals[i]
print("i=%d, id=%s, name=%s, prob=%f"%(i, t[0], t[1], t[2]))
print(" overlapping genes:" + str(t[3]))
print(" pmids wrt overlapping genes:" + str(t[4]))
print('\n\n')
return
if __name__ == '__main__':
unittest.main()
| en | 0.399586 | usage: python -m unittest cellmesh.test.TestDB.test_get_all_cell_id_names usage: python -m unittest cellmesh.test.TestDB.test_get_all_genes usage: python -m unittest cellmesh.test.TestDB.test_get_cell_genes_pmids # Podosomes usage: python -m unittest cellmesh.test.TestDB.test_get_metainfo usage: python -m unittest cellmesh.test.TestDB.test_write_csv usage: python -m unittest cellmesh.test.TestQuery.test_calc_prob_one_query_one_cell usage: python -m unittest cellmesh.test.TestQuery.test_prob_test # Top 15 genes for B cell in Tabula Muris Droplet dataset # num of proc >= 1 | 2.665402 | 3 |
exercicios/ex106.py | DeyvisonR/curso_python | 0 | 6616285 | <reponame>DeyvisonR/curso_python
from time import sleep
cores = {'branco': '\033[7;30m',
'vermelho': '\033[0;30;41m',
'verde': '\033[0;30;42m',
'amarelo': '\033[0;30;43m',
'azul': '\033[0;30;44m',
'roxo': '\033[0;30;45m',
'azulmarinho': '\033[0;30;46m',
'cinza': '\033[0;30;47m',
'fechar': '\033[m'}
def ajuda(comando):
help(f'{comando}')
def titulo(txt, cor='fechar'):
tam = len(txt)
print(cores[f'{cor}'], end='')
print(f'-=' * tam)
print(f'{txt:^{tam * 2}}')
print('-=' * tam)
print(cores['fechar'], end='')
while True:
titulo('SISTEMA DE AJUDA PYHELP', 'verde')
sleep(1)
comando = str(input('Função ou Biblioteca: '))
if comando.upper() == 'FIM':
break
else:
titulo(f'ACESSANDO O MANUAL DO "{comando}" ', 'azul')
sleep(1)
print(cores['branco'], end='')
ajuda(comando)
print(cores['fechar'], end='')
sleep(1)
titulo('ATE LOGO', 'vermelho')
| from time import sleep
cores = {'branco': '\033[7;30m',
'vermelho': '\033[0;30;41m',
'verde': '\033[0;30;42m',
'amarelo': '\033[0;30;43m',
'azul': '\033[0;30;44m',
'roxo': '\033[0;30;45m',
'azulmarinho': '\033[0;30;46m',
'cinza': '\033[0;30;47m',
'fechar': '\033[m'}
def ajuda(comando):
help(f'{comando}')
def titulo(txt, cor='fechar'):
tam = len(txt)
print(cores[f'{cor}'], end='')
print(f'-=' * tam)
print(f'{txt:^{tam * 2}}')
print('-=' * tam)
print(cores['fechar'], end='')
while True:
titulo('SISTEMA DE AJUDA PYHELP', 'verde')
sleep(1)
comando = str(input('Função ou Biblioteca: '))
if comando.upper() == 'FIM':
break
else:
titulo(f'ACESSANDO O MANUAL DO "{comando}" ', 'azul')
sleep(1)
print(cores['branco'], end='')
ajuda(comando)
print(cores['fechar'], end='')
sleep(1)
titulo('ATE LOGO', 'vermelho') | none | 1 | 3.509148 | 4 | |
ml/2_kNN/to_tyoutyou.py | GinRyan/Python3Tutorial | 0 | 6616286 | <gh_stars>0
file_path=r'D:\Tutorial4MachineLearning\i_digits.txt'
import os
absfilePath = os.path.abspath(file_path)
print(absfilePath)
with open(file_path) as file_object2:
for line in file_object2:
print(line)
| file_path=r'D:\Tutorial4MachineLearning\i_digits.txt'
import os
absfilePath = os.path.abspath(file_path)
print(absfilePath)
with open(file_path) as file_object2:
for line in file_object2:
print(line) | none | 1 | 3.111199 | 3 | |
feature_selection.py | PhilLint/Expedia-Ranking-Competition | 0 | 6616287 | import numpy as np
import pandas as pd
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from model_test import impute_na, get_sample, split_train_test
from feature_engineering import extract_train_features
from scoring import score_prediction
def feature_selection(data, estimator, n_features=None):
X_train = data.drop(columns=["target", "booking_bool", "click_bool", "position", "random_bool"])
y_train = data["target"]
selector = RFE(estimator=estimator, n_features_to_select=n_features)
selector.fit(X_train, y_train)
cols = selector.support_
print(X_train.loc[:, cols].columns)
def decreasing_features_select(data, estimator, target):
train, test = split_train_test(data, split=4)
X_train = train.drop(columns=["target", "booking_bool", "click_bool", "position", "random_bool"])
y_train = train["target"]
X_test = test.drop(columns=["target", "booking_bool", "click_bool", "position", "random_bool"])
y_test = test[["target", "srch_id", "prop_id", "booking_bool", "click_bool"]]
n_features = len(X_train.columns)
for n in range(n_features,10, -2):
print("##################################")
print(f"Number of features used: {n}.")
selector = RFE(estimator=estimator, n_features_to_select=n)
selector.fit(X_train, y_train)
cols = selector.support_
print("Features used: ")
print(X_train.loc[:, cols].columns)
reduced_train = X_train.loc[:, cols]
estimator.fit(reduced_train, y_train)
reduced_test = X_test.loc[:, cols]
pred = clf_to_predict(estimator, reduced_test, target)
score_prediction(pred, y_test, to_print=True)
def clf_to_predict(estimator, X_test, target):
if target == "book":
prediction = estimator.predict_proba(X_test)[:, 1]
elif target == "score":
predict_array = estimator.predict_proba(X_test)
predict_array[:, 2] = predict_array[:, 2] * pred_weight
prediction = predict_array[:, [1, 2]].sum(axis=1)
else:
print("ERROR. no using classification with score_rank!")
return
return prediction
if __name__ == "main":
pd.options.mode.chained_assignment = None
data = pd.read_csv("C:/Users/Frede/Dropbox/Master/DM/Assignments/2/DM2/final_training_fixed_data.csv")
impute_na(data)
sample = get_sample(data=data, size=0.1)
estimator = RandomForestClassifier(n_estimators=100, n_jobs=-1)
targets = ["score"]
max_rank = 10
pred_weight = 3
for target in targets:
print(f"\nCURRENT CONFIGURATION")
print("########################################################################")
print(f"Target = {target}")
print(f"Max_rank = {max_rank}")
print(f"Pred_weight = {pred_weight}")
print("########################################################################")
extract_train_features(data=sample, target=target, max_rank=max_rank)
#decreasing_features_select(data=sample, estimator=estimator, target=target)
feature_selection(data=sample, estimator=estimator, n_features=10)
| import numpy as np
import pandas as pd
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from model_test import impute_na, get_sample, split_train_test
from feature_engineering import extract_train_features
from scoring import score_prediction
def feature_selection(data, estimator, n_features=None):
X_train = data.drop(columns=["target", "booking_bool", "click_bool", "position", "random_bool"])
y_train = data["target"]
selector = RFE(estimator=estimator, n_features_to_select=n_features)
selector.fit(X_train, y_train)
cols = selector.support_
print(X_train.loc[:, cols].columns)
def decreasing_features_select(data, estimator, target):
train, test = split_train_test(data, split=4)
X_train = train.drop(columns=["target", "booking_bool", "click_bool", "position", "random_bool"])
y_train = train["target"]
X_test = test.drop(columns=["target", "booking_bool", "click_bool", "position", "random_bool"])
y_test = test[["target", "srch_id", "prop_id", "booking_bool", "click_bool"]]
n_features = len(X_train.columns)
for n in range(n_features,10, -2):
print("##################################")
print(f"Number of features used: {n}.")
selector = RFE(estimator=estimator, n_features_to_select=n)
selector.fit(X_train, y_train)
cols = selector.support_
print("Features used: ")
print(X_train.loc[:, cols].columns)
reduced_train = X_train.loc[:, cols]
estimator.fit(reduced_train, y_train)
reduced_test = X_test.loc[:, cols]
pred = clf_to_predict(estimator, reduced_test, target)
score_prediction(pred, y_test, to_print=True)
def clf_to_predict(estimator, X_test, target):
if target == "book":
prediction = estimator.predict_proba(X_test)[:, 1]
elif target == "score":
predict_array = estimator.predict_proba(X_test)
predict_array[:, 2] = predict_array[:, 2] * pred_weight
prediction = predict_array[:, [1, 2]].sum(axis=1)
else:
print("ERROR. no using classification with score_rank!")
return
return prediction
if __name__ == "main":
pd.options.mode.chained_assignment = None
data = pd.read_csv("C:/Users/Frede/Dropbox/Master/DM/Assignments/2/DM2/final_training_fixed_data.csv")
impute_na(data)
sample = get_sample(data=data, size=0.1)
estimator = RandomForestClassifier(n_estimators=100, n_jobs=-1)
targets = ["score"]
max_rank = 10
pred_weight = 3
for target in targets:
print(f"\nCURRENT CONFIGURATION")
print("########################################################################")
print(f"Target = {target}")
print(f"Max_rank = {max_rank}")
print(f"Pred_weight = {pred_weight}")
print("########################################################################")
extract_train_features(data=sample, target=target, max_rank=max_rank)
#decreasing_features_select(data=sample, estimator=estimator, target=target)
feature_selection(data=sample, estimator=estimator, n_features=10)
| de | 0.607632 | #################################") #######################################################################") #######################################################################") #decreasing_features_select(data=sample, estimator=estimator, target=target) | 3.051338 | 3 |
DCGAN.py | qianqianjun/DCGAN | 0 | 6616288 | """
write by qianqianjun
2019.12.20
DCGAN 网络架构实现
"""
from generator import Generator
from discriminater import Discriminator
import tensorflow as tf
class DCGAN(object):
def __init__(self,hps):
"""
建立一个DCGAN的网络架构
:param hps: 网络的所有超参数的集合
"""
g_channels=hps.g_channels
d_channels=hps.d_channels
self._batch_size=hps.batch_size
self._init_conv_size=hps.init_conv_size
self._z_dim=hps.z_dim
self._img_size=hps.img_size
self._generator=Generator(g_channels,self._init_conv_size)
self._discriminator=Discriminator(d_channels)
def build(self):
"""
构建整个计算图
:return:
"""
# 创建随机向量和图片的占位符
self._z_placeholder=tf.placeholder(tf.float32,
(self._batch_size,self._z_dim))
self._img_placeholder=tf.placeholder(tf.float32,
(self._batch_size,
self._img_size,
self._img_size,1))
# 将随机向量输入生成器生成图片
generated_imgs=self._generator(self._z_placeholder,training=True)
# 将来生成的图片经过判别器来得到 生成图像的logits
fake_img_logits=self._discriminator(
generated_imgs,training=True
)
# 将真实的图片经过判别器得到真实图像的 logits
real_img_logits=self._discriminator(
self._img_placeholder,training=True
)
"""
定义损失函数
包括生成器的损失函数和判别器的损失函数。
生成器的目的是使得生成图像经过判别器之后尽量被判断为真的
判别器的目的是使得生成器生成的图像被判断为假的,同时真实图像经过判别器要被判断为真的
"""
## 生层器的损失函数,只需要使得假的图片被判断为真即可
fake_is_real_loss=tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.ones([self._batch_size],dtype=tf.int64),
logits=fake_img_logits
)
)
## 判别器的损失函数,只需要使得生成的图像被判断为假的,真实的图像被判断为真的即可
# 真的被判断为真的:
real_is_real_loss=tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.ones([self._batch_size],dtype=tf.int64),
logits=real_img_logits
)
)
# 假的被判断为假的:
fake_is_fake_loss=tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.zeros([self._batch_size],dtype=tf.int64),
logits=fake_img_logits
)
)
# 将损失函数集中管理:
tf.add_to_collection('g_losses',fake_is_real_loss)
tf.add_to_collection('d_losses',real_is_real_loss)
tf.add_to_collection('d_losses',fake_is_fake_loss)
loss={
'g':tf.add_n(tf.get_collection('g_losses'),name='total_g_loss'),
'd':tf.add_n(tf.get_collection('d_losses'),name='total_d_loss')
}
return (self._z_placeholder,self._img_placeholder,generated_imgs,loss)
def build_train_op(self,losses,learning_rate,beta1):
"""
定义训练过程
:param losses: 损失函数集合
:param learning_rate: 学习率
:param beta1: 指数衰减率估计
:return:
"""
g_opt=tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=beta1)
d_opt=tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=beta1)
g_opt_op=g_opt.minimize(
losses['g'],
var_list=self._generator.variables
)
d_opt_op=d_opt.minimize(
losses['d'],
var_list=self._discriminator.variables
)
with tf.control_dependencies([g_opt_op,d_opt_op]):
return tf.no_op(name='train')
| """
write by qianqianjun
2019.12.20
DCGAN 网络架构实现
"""
from generator import Generator
from discriminater import Discriminator
import tensorflow as tf
class DCGAN(object):
def __init__(self,hps):
"""
建立一个DCGAN的网络架构
:param hps: 网络的所有超参数的集合
"""
g_channels=hps.g_channels
d_channels=hps.d_channels
self._batch_size=hps.batch_size
self._init_conv_size=hps.init_conv_size
self._z_dim=hps.z_dim
self._img_size=hps.img_size
self._generator=Generator(g_channels,self._init_conv_size)
self._discriminator=Discriminator(d_channels)
def build(self):
"""
构建整个计算图
:return:
"""
# 创建随机向量和图片的占位符
self._z_placeholder=tf.placeholder(tf.float32,
(self._batch_size,self._z_dim))
self._img_placeholder=tf.placeholder(tf.float32,
(self._batch_size,
self._img_size,
self._img_size,1))
# 将随机向量输入生成器生成图片
generated_imgs=self._generator(self._z_placeholder,training=True)
# 将来生成的图片经过判别器来得到 生成图像的logits
fake_img_logits=self._discriminator(
generated_imgs,training=True
)
# 将真实的图片经过判别器得到真实图像的 logits
real_img_logits=self._discriminator(
self._img_placeholder,training=True
)
"""
定义损失函数
包括生成器的损失函数和判别器的损失函数。
生成器的目的是使得生成图像经过判别器之后尽量被判断为真的
判别器的目的是使得生成器生成的图像被判断为假的,同时真实图像经过判别器要被判断为真的
"""
## 生层器的损失函数,只需要使得假的图片被判断为真即可
fake_is_real_loss=tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.ones([self._batch_size],dtype=tf.int64),
logits=fake_img_logits
)
)
## 判别器的损失函数,只需要使得生成的图像被判断为假的,真实的图像被判断为真的即可
# 真的被判断为真的:
real_is_real_loss=tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.ones([self._batch_size],dtype=tf.int64),
logits=real_img_logits
)
)
# 假的被判断为假的:
fake_is_fake_loss=tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.zeros([self._batch_size],dtype=tf.int64),
logits=fake_img_logits
)
)
# 将损失函数集中管理:
tf.add_to_collection('g_losses',fake_is_real_loss)
tf.add_to_collection('d_losses',real_is_real_loss)
tf.add_to_collection('d_losses',fake_is_fake_loss)
loss={
'g':tf.add_n(tf.get_collection('g_losses'),name='total_g_loss'),
'd':tf.add_n(tf.get_collection('d_losses'),name='total_d_loss')
}
return (self._z_placeholder,self._img_placeholder,generated_imgs,loss)
def build_train_op(self,losses,learning_rate,beta1):
"""
定义训练过程
:param losses: 损失函数集合
:param learning_rate: 学习率
:param beta1: 指数衰减率估计
:return:
"""
g_opt=tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=beta1)
d_opt=tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=beta1)
g_opt_op=g_opt.minimize(
losses['g'],
var_list=self._generator.variables
)
d_opt_op=d_opt.minimize(
losses['d'],
var_list=self._discriminator.variables
)
with tf.control_dependencies([g_opt_op,d_opt_op]):
return tf.no_op(name='train')
| zh | 0.969338 | write by qianqianjun 2019.12.20 DCGAN 网络架构实现 建立一个DCGAN的网络架构 :param hps: 网络的所有超参数的集合 构建整个计算图 :return: # 创建随机向量和图片的占位符 # 将随机向量输入生成器生成图片 # 将来生成的图片经过判别器来得到 生成图像的logits # 将真实的图片经过判别器得到真实图像的 logits 定义损失函数 包括生成器的损失函数和判别器的损失函数。 生成器的目的是使得生成图像经过判别器之后尽量被判断为真的 判别器的目的是使得生成器生成的图像被判断为假的,同时真实图像经过判别器要被判断为真的 ## 生层器的损失函数,只需要使得假的图片被判断为真即可 ## 判别器的损失函数,只需要使得生成的图像被判断为假的,真实的图像被判断为真的即可 # 真的被判断为真的: # 假的被判断为假的: # 将损失函数集中管理: 定义训练过程 :param losses: 损失函数集合 :param learning_rate: 学习率 :param beta1: 指数衰减率估计 :return: | 2.962665 | 3 |
public/baiduspider/models/jingyan.py | Limourli-liu/Aggregate-search | 5 | 6616289 | """经验搜索返回值模型模块
此文件定义的所有现有的经验搜索返回值模型并编写了自动构建函数。
"""
from typing import Union
from requests.api import get
from baiduspider.models import convert_time, get_attr
from baiduspider.models.typings.typings_jingyan import *
class JingyanPublisher(JingyanPublisher):
"""经验发布者模型
这是一个遵照BaiduSpider经验搜索经验发布者结果模型创建的返回模型类。
Attributes:
name (str): 经验上传者用户名
url (str): 经验上传者链接
plain (dict): 源搜索结果字典
"""
def __init__(self) -> None:
super().__init__()
self.name = ""
self.url = ""
self.plain = {}
@staticmethod
def _build_instance(plain: dict) -> JingyanPublisher:
__returns = JingyanPublisher()
__returns.plain = plain
__returns.name = get_attr(plain, "name")
__returns.url = get_attr(plain, "url")
return __returns
class JingyanNormal(JingyanNormal):
"""普通搜索结果模型
这是一个遵照BaiduSpider经验搜索基本搜索结果结果模型创建的返回模型类。
Attributes:
title (str): 经验标题
url (str): 经验链接
des (str): 经验简介
pub_date (datetime.datetime): 经验发布日期
category (List[str]): 经验分类
votes (int): 经验的支持票数
publisher (JingyanPublisher): 经验发布者信息
is_original (bool): 经验是否为原创
is_outstanding (bool): 经验是否为优秀经验
plain (dict): 源搜索结果字典
"""
def __init__(self) -> None:
super().__init__()
self.title = ""
self.url = ""
self.des = ""
self.pub_date = None
self.category = []
self.votes = 0
self.publisher = None
self.is_original = False
self.is_outstanding = False
self.plain = {}
@staticmethod
def _build_instance(plain: dict) -> JingyanNormal:
__returns = JingyanNormal()
__returns.plain = plain
__returns.title = get_attr(plain, "title")
__returns.url = get_attr(plain, "url")
__returns.des = get_attr(plain, "des")
__returns.pub_date = convert_time(get_attr(plain, "pub_date"))
__returns.category = get_attr(plain, "category")
__returns.votes = get_attr(plain, "votes")
__returns.publisher = JingyanPublisher._build_instance(
get_attr(plain, "publisher")
)
__returns.is_original = get_attr(plain, "is_original")
__returns.is_outstanding = get_attr(plain, "is_outstanding")
return __returns
class JingyanResult(JingyanResult):
"""经验搜索结果模型
这是一个遵照BaiduSpider经验搜索结果结果模型创建的返回模型类。
Attributes:
results (List[JingyanNormal]): 普通搜索结果列表
pages (int): 搜索结果页数
total (int): 搜索结果总数
plain (list): 源搜索结果列表
"""
def __init__(self) -> None:
super().__init__()
self.results = []
self.pages = 0
self.total = 0
self.plain = []
@staticmethod
def _build_instance(plain: list, pages: int, total: int) -> JingyanResult:
__returns = JingyanResult()
__returns.plain = plain
__returns.pages = pages
__returns.total = total
for p in plain:
__returns.results.append(JingyanNormal._build_instance(p))
return __returns
def __getitem__(self, key) -> Union[JingyanNormal, None]:
return self.results[key]
def __repr__(self) -> str:
return "<object JingyanResult>"
| """经验搜索返回值模型模块
此文件定义的所有现有的经验搜索返回值模型并编写了自动构建函数。
"""
from typing import Union
from requests.api import get
from baiduspider.models import convert_time, get_attr
from baiduspider.models.typings.typings_jingyan import *
class JingyanPublisher(JingyanPublisher):
"""经验发布者模型
这是一个遵照BaiduSpider经验搜索经验发布者结果模型创建的返回模型类。
Attributes:
name (str): 经验上传者用户名
url (str): 经验上传者链接
plain (dict): 源搜索结果字典
"""
def __init__(self) -> None:
super().__init__()
self.name = ""
self.url = ""
self.plain = {}
@staticmethod
def _build_instance(plain: dict) -> JingyanPublisher:
__returns = JingyanPublisher()
__returns.plain = plain
__returns.name = get_attr(plain, "name")
__returns.url = get_attr(plain, "url")
return __returns
class JingyanNormal(JingyanNormal):
"""普通搜索结果模型
这是一个遵照BaiduSpider经验搜索基本搜索结果结果模型创建的返回模型类。
Attributes:
title (str): 经验标题
url (str): 经验链接
des (str): 经验简介
pub_date (datetime.datetime): 经验发布日期
category (List[str]): 经验分类
votes (int): 经验的支持票数
publisher (JingyanPublisher): 经验发布者信息
is_original (bool): 经验是否为原创
is_outstanding (bool): 经验是否为优秀经验
plain (dict): 源搜索结果字典
"""
def __init__(self) -> None:
super().__init__()
self.title = ""
self.url = ""
self.des = ""
self.pub_date = None
self.category = []
self.votes = 0
self.publisher = None
self.is_original = False
self.is_outstanding = False
self.plain = {}
@staticmethod
def _build_instance(plain: dict) -> JingyanNormal:
__returns = JingyanNormal()
__returns.plain = plain
__returns.title = get_attr(plain, "title")
__returns.url = get_attr(plain, "url")
__returns.des = get_attr(plain, "des")
__returns.pub_date = convert_time(get_attr(plain, "pub_date"))
__returns.category = get_attr(plain, "category")
__returns.votes = get_attr(plain, "votes")
__returns.publisher = JingyanPublisher._build_instance(
get_attr(plain, "publisher")
)
__returns.is_original = get_attr(plain, "is_original")
__returns.is_outstanding = get_attr(plain, "is_outstanding")
return __returns
class JingyanResult(JingyanResult):
"""经验搜索结果模型
这是一个遵照BaiduSpider经验搜索结果结果模型创建的返回模型类。
Attributes:
results (List[JingyanNormal]): 普通搜索结果列表
pages (int): 搜索结果页数
total (int): 搜索结果总数
plain (list): 源搜索结果列表
"""
def __init__(self) -> None:
super().__init__()
self.results = []
self.pages = 0
self.total = 0
self.plain = []
@staticmethod
def _build_instance(plain: list, pages: int, total: int) -> JingyanResult:
__returns = JingyanResult()
__returns.plain = plain
__returns.pages = pages
__returns.total = total
for p in plain:
__returns.results.append(JingyanNormal._build_instance(p))
return __returns
def __getitem__(self, key) -> Union[JingyanNormal, None]:
return self.results[key]
def __repr__(self) -> str:
return "<object JingyanResult>"
| zh | 0.931794 | 经验搜索返回值模型模块 此文件定义的所有现有的经验搜索返回值模型并编写了自动构建函数。 经验发布者模型 这是一个遵照BaiduSpider经验搜索经验发布者结果模型创建的返回模型类。 Attributes: name (str): 经验上传者用户名 url (str): 经验上传者链接 plain (dict): 源搜索结果字典 普通搜索结果模型 这是一个遵照BaiduSpider经验搜索基本搜索结果结果模型创建的返回模型类。 Attributes: title (str): 经验标题 url (str): 经验链接 des (str): 经验简介 pub_date (datetime.datetime): 经验发布日期 category (List[str]): 经验分类 votes (int): 经验的支持票数 publisher (JingyanPublisher): 经验发布者信息 is_original (bool): 经验是否为原创 is_outstanding (bool): 经验是否为优秀经验 plain (dict): 源搜索结果字典 经验搜索结果模型 这是一个遵照BaiduSpider经验搜索结果结果模型创建的返回模型类。 Attributes: results (List[JingyanNormal]): 普通搜索结果列表 pages (int): 搜索结果页数 total (int): 搜索结果总数 plain (list): 源搜索结果列表 | 2.569108 | 3 |
mayan/apps/appearance/migrations/0005_auto_20220304_1253.py | onprawee/Mayan-EDMS | 0 | 6616290 | # Generated by Django 2.2.24 on 2022-03-04 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appearance', '0004_theme_logo'),
]
operations = [
migrations.AlterField(
model_name='theme',
name='logo',
field=models.TextField(blank=True, help_text='Upload Link your Logo', verbose_name='Logo'),
),
]
| # Generated by Django 2.2.24 on 2022-03-04 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appearance', '0004_theme_logo'),
]
operations = [
migrations.AlterField(
model_name='theme',
name='logo',
field=models.TextField(blank=True, help_text='Upload Link your Logo', verbose_name='Logo'),
),
]
| en | 0.78035 | # Generated by Django 2.2.24 on 2022-03-04 12:53 | 1.457042 | 1 |
lib/osv/impact.py | 6un9-h0-Dan/osv | 1 | 6616291 | <reponame>6un9-h0-Dan/osv
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Impact analysis."""
import collections
import datetime
import logging
import tempfile
import time
import pygit2
CLONE_TRIES = 3
COMMIT_RANGE_LIMIT = 4
CONFIDENCE_FULL = 100
# Flat reduction in confidence for any range.
CONFIDENCE_RANGE_REDUCTION = 20
# Reduction in confidence per commit in a range.
CONFIDENCE_RANGE_REDUCTION_STEP = 10
RETRY_SLEEP_SECONDS = 5
TAG_PREFIX = 'refs/tags/'
# Used in cases where an earlier commit in a regression range cannot be
# determined.
UNKNOWN_COMMIT = 'unknown'
AffectedResult = collections.namedtuple(
'AffectedResult',
'tags commits affected_ranges regress_commits fix_commits confidence')
TagsInfo = collections.namedtuple('TagsInfo', 'tags latest_tag')
class ImpactError(Exception):
"""Impact error."""
def clone_with_retries(git_url, checkout_dir, callbacks=None):
"""Clone with retries."""
logging.info('Cloning %s to %s', git_url, checkout_dir)
for _ in range(CLONE_TRIES):
try:
repo = pygit2.clone_repository(git_url, checkout_dir, callbacks=callbacks)
repo.cache = {}
return repo
except pygit2.GitError as e:
logging.error('Clone failed: %s', str(e))
time.sleep(RETRY_SLEEP_SECONDS)
continue
class RangeCollector:
"""Affected range collector."""
def __init__(self):
self.grouped_ranges = {}
def add(self, introduced_in, fixed_in):
"""Add a new commit range."""
if introduced_in in self.grouped_ranges:
if fixed_in is None:
# New range doesn't add anything new.
return
self.grouped_ranges[introduced_in].append((introduced_in, fixed_in))
# Remove any existing ranges with the same introduced in commit but with a
# None fixed commit.
self.grouped_ranges[introduced_in] = [
commit_range for commit_range in self.grouped_ranges[introduced_in]
if commit_range[1] is not None
]
else:
self.grouped_ranges[introduced_in] = [(introduced_in, fixed_in)]
def ranges(self):
"""Return a set representing the collected commit ranges."""
ranges = set()
for value in self.grouped_ranges.values():
ranges.update(value)
return ranges
def get_affected(repo, regress_commit_or_range, fix_commit_or_range):
""""Get list of affected tags and commits for a bug given regressed and fixed
commits."""
confidence = CONFIDENCE_FULL
# If multiple, assume any commit in the regression range cause the
# regression.
regress_commits = get_commit_range(repo, regress_commit_or_range)
if len(regress_commits) > COMMIT_RANGE_LIMIT:
raise ImpactError('Too many commits in regression range.')
# If multiple, assume all commits are necessary for fixing the regression.
fix_commits = get_commit_range(repo, fix_commit_or_range)
if len(fix_commits) > COMMIT_RANGE_LIMIT:
logging.warning('Too many commits in fix range.')
# Rather than bail out here and potentially leaving a Bug as "unfixed"
# indefinitely, we do the best we can here, by assuming the last
# COMMIT_RANGE_LIMIT commits fix the bug.
fix_commits = fix_commits[-COMMIT_RANGE_LIMIT:]
confidence -= CONFIDENCE_RANGE_REDUCTION
# For every extra commit in the range, reduce the confidence.
if len(regress_commits) > 1:
confidence -= CONFIDENCE_RANGE_REDUCTION
confidence -= (len(regress_commits) - 1) * CONFIDENCE_RANGE_REDUCTION_STEP
# Special case: unknown status for earlier revisions.
unknown_earlier_revisions = UNKNOWN_COMMIT in regress_commit_or_range
if unknown_earlier_revisions:
confidence -= CONFIDENCE_RANGE_REDUCTION
if len(fix_commits) > 1:
confidence -= CONFIDENCE_RANGE_REDUCTION
confidence -= (len(fix_commits) - 1) * CONFIDENCE_RANGE_REDUCTION_STEP
if confidence < 0:
confidence = 0
tags_with_bug = set()
for commit in regress_commits:
tags_with_bug.update(get_tags_with_commits(repo, [commit]))
tags_with_fix = get_tags_with_commits(repo, fix_commits)
affected_tags = list(tags_with_bug - tags_with_fix)
affected_tags.sort()
affected_commits, affected_ranges = get_affected_range(
repo, regress_commits, fix_commits)
if unknown_earlier_revisions:
# Include the unknown marker in resulting entities.
regress_commits.insert(0, UNKNOWN_COMMIT)
return AffectedResult(affected_tags, affected_commits, affected_ranges,
regress_commits, fix_commits, confidence)
def get_affected_range(repo, regress_commits, fix_commits):
"""Get affected range."""
range_collector = RangeCollector()
commits = set()
seen_commits = set()
# Check all branches for cherry picked regress/fix commits.
for branch in repo.branches.remote:
ref = 'refs/remotes/' + branch
# Get the earliest equivalent commit in the regression range.
equivalent_regress_commit = None
for regress_commit in regress_commits:
logging.info('Finding equivalent regress commit to %s in %s',
regress_commit, ref)
equivalent_regress_commit = get_equivalent_commit(repo, ref,
regress_commit)
if equivalent_regress_commit:
break
if not equivalent_regress_commit:
continue
# Get the latest equivalent commit in the fix range.
equivalent_fix_commit = None
for fix_commit in fix_commits:
logging.info('Finding equivalent fix commit to %s in %s', fix_commit, ref)
equivalent_commit = get_equivalent_commit(repo, ref, fix_commit)
if equivalent_commit:
equivalent_fix_commit = equivalent_commit
range_collector.add(equivalent_regress_commit, equivalent_fix_commit)
last_affected_commits = []
if equivalent_fix_commit:
# Last affected commit is the one before the fix.
last_affected_commits.extend(
parent.id
for parent in repo.revparse_single(equivalent_fix_commit).parents)
else:
# Not fixed in this branch. Everything is still vulnerabile.
last_affected_commits.append(repo.revparse_single(ref).id)
commits.add(equivalent_regress_commit)
for last_affected_commit in last_affected_commits:
if (equivalent_regress_commit, last_affected_commit) in seen_commits:
continue
seen_commits.add((equivalent_regress_commit, last_affected_commit))
commits.update(
get_commit_list(repo, equivalent_regress_commit,
last_affected_commit))
return commits, range_collector.ranges()
def get_commit_range(repo, commit_or_range):
"""Get a commit range."""
if not commit_or_range:
return []
if ':' not in commit_or_range:
return [commit_or_range]
start_commit, end_commit = commit_or_range.split(':')
if start_commit == UNKNOWN_COMMIT:
# Special case: No information about earlier builds. Assume the end_commit
# is the regressing commit as that's the best we can do.
return [end_commit]
return get_commit_list(repo, start_commit, end_commit)
def get_tags_with_commits(repo, commits):
"""Get tags with a given commit."""
if not commits:
return set()
affected = set()
logging.info('Getting tags which contain %s', ','.join(commits))
tags = [
ref for ref in repo.listall_references() if ref.startswith(TAG_PREFIX)
]
for tag in tags:
if all(get_equivalent_commit(repo, tag, commit) for commit in commits):
affected.add(tag[len(TAG_PREFIX):])
return affected
def get_commit_list(repo, start_commit, end_commit):
"""Get commit list."""
logging.info('Getting commits %s..%s', start_commit, end_commit)
try:
walker = repo.walk(end_commit,
pygit2.GIT_SORT_TOPOLOGICAL | pygit2.GIT_SORT_REVERSE)
except KeyError as e:
raise ImpactError('Invalid commit.') from e
walker.hide(start_commit)
return [str(commit.id) for commit in walker]
def find_latest_tag(repo, tags):
"""Find the latest tag (by commit time)."""
latest_commit_time = None
latest_tag = None
for tag in tags:
commit = repo.lookup_reference(tag).peel()
commit_time = (
datetime.datetime.fromtimestamp(commit.commit_time) -
datetime.timedelta(minutes=commit.commit_time_offset))
if not latest_commit_time or commit_time > latest_commit_time:
latest_commit_time = commit_time
latest_tag = tag[len(TAG_PREFIX):]
return latest_tag
def get_equivalent_commit(repo, to_search, target_commit):
"""Find an equivalent commit at to_search, or None. The equivalent commit can
be equal to target_commit."""
if not target_commit:
return None
target = repo.revparse_single(target_commit)
target_patch_id = repo.diff(target.parents[0], target).patchid
search = repo.revparse_single(to_search)
try:
commits = repo.walk(search.id)
except ValueError:
# Invalid commit
return None
for commit in commits:
# Ignore commits without parents and merge commits with multiple parents.
if not commit.parents or len(commit.parents) > 1:
continue
patch_id = repo.cache.get(commit.id)
if not patch_id:
diff = repo.diff(commit.parents[0], commit)
patch_id = diff.patchid
repo.cache[commit.id] = patch_id
if patch_id == target_patch_id:
return str(commit.id)
# TODO(ochang): Possibly look at commit message, author etc.
return None
def get_tags(repo_url):
"""Get tags information."""
with tempfile.TemporaryDirectory() as tmp_dir:
repo = clone_with_retries(repo_url, tmp_dir)
tags = [
ref for ref in repo.listall_references() if ref.startswith(TAG_PREFIX)
]
latest_tag = find_latest_tag(repo, tags)
return TagsInfo([tag[len(TAG_PREFIX):] for tag in tags], latest_tag)
| # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Impact analysis."""
import collections
import datetime
import logging
import tempfile
import time
import pygit2
CLONE_TRIES = 3
COMMIT_RANGE_LIMIT = 4
CONFIDENCE_FULL = 100
# Flat reduction in confidence for any range.
CONFIDENCE_RANGE_REDUCTION = 20
# Reduction in confidence per commit in a range.
CONFIDENCE_RANGE_REDUCTION_STEP = 10
RETRY_SLEEP_SECONDS = 5
TAG_PREFIX = 'refs/tags/'
# Used in cases where an earlier commit in a regression range cannot be
# determined.
UNKNOWN_COMMIT = 'unknown'
AffectedResult = collections.namedtuple(
'AffectedResult',
'tags commits affected_ranges regress_commits fix_commits confidence')
TagsInfo = collections.namedtuple('TagsInfo', 'tags latest_tag')
class ImpactError(Exception):
"""Impact error."""
def clone_with_retries(git_url, checkout_dir, callbacks=None):
"""Clone with retries."""
logging.info('Cloning %s to %s', git_url, checkout_dir)
for _ in range(CLONE_TRIES):
try:
repo = pygit2.clone_repository(git_url, checkout_dir, callbacks=callbacks)
repo.cache = {}
return repo
except pygit2.GitError as e:
logging.error('Clone failed: %s', str(e))
time.sleep(RETRY_SLEEP_SECONDS)
continue
class RangeCollector:
"""Affected range collector."""
def __init__(self):
self.grouped_ranges = {}
def add(self, introduced_in, fixed_in):
"""Add a new commit range."""
if introduced_in in self.grouped_ranges:
if fixed_in is None:
# New range doesn't add anything new.
return
self.grouped_ranges[introduced_in].append((introduced_in, fixed_in))
# Remove any existing ranges with the same introduced in commit but with a
# None fixed commit.
self.grouped_ranges[introduced_in] = [
commit_range for commit_range in self.grouped_ranges[introduced_in]
if commit_range[1] is not None
]
else:
self.grouped_ranges[introduced_in] = [(introduced_in, fixed_in)]
def ranges(self):
"""Return a set representing the collected commit ranges."""
ranges = set()
for value in self.grouped_ranges.values():
ranges.update(value)
return ranges
def get_affected(repo, regress_commit_or_range, fix_commit_or_range):
""""Get list of affected tags and commits for a bug given regressed and fixed
commits."""
confidence = CONFIDENCE_FULL
# If multiple, assume any commit in the regression range cause the
# regression.
regress_commits = get_commit_range(repo, regress_commit_or_range)
if len(regress_commits) > COMMIT_RANGE_LIMIT:
raise ImpactError('Too many commits in regression range.')
# If multiple, assume all commits are necessary for fixing the regression.
fix_commits = get_commit_range(repo, fix_commit_or_range)
if len(fix_commits) > COMMIT_RANGE_LIMIT:
logging.warning('Too many commits in fix range.')
# Rather than bail out here and potentially leaving a Bug as "unfixed"
# indefinitely, we do the best we can here, by assuming the last
# COMMIT_RANGE_LIMIT commits fix the bug.
fix_commits = fix_commits[-COMMIT_RANGE_LIMIT:]
confidence -= CONFIDENCE_RANGE_REDUCTION
# For every extra commit in the range, reduce the confidence.
if len(regress_commits) > 1:
confidence -= CONFIDENCE_RANGE_REDUCTION
confidence -= (len(regress_commits) - 1) * CONFIDENCE_RANGE_REDUCTION_STEP
# Special case: unknown status for earlier revisions.
unknown_earlier_revisions = UNKNOWN_COMMIT in regress_commit_or_range
if unknown_earlier_revisions:
confidence -= CONFIDENCE_RANGE_REDUCTION
if len(fix_commits) > 1:
confidence -= CONFIDENCE_RANGE_REDUCTION
confidence -= (len(fix_commits) - 1) * CONFIDENCE_RANGE_REDUCTION_STEP
if confidence < 0:
confidence = 0
tags_with_bug = set()
for commit in regress_commits:
tags_with_bug.update(get_tags_with_commits(repo, [commit]))
tags_with_fix = get_tags_with_commits(repo, fix_commits)
affected_tags = list(tags_with_bug - tags_with_fix)
affected_tags.sort()
affected_commits, affected_ranges = get_affected_range(
repo, regress_commits, fix_commits)
if unknown_earlier_revisions:
# Include the unknown marker in resulting entities.
regress_commits.insert(0, UNKNOWN_COMMIT)
return AffectedResult(affected_tags, affected_commits, affected_ranges,
regress_commits, fix_commits, confidence)
def get_affected_range(repo, regress_commits, fix_commits):
"""Get affected range."""
range_collector = RangeCollector()
commits = set()
seen_commits = set()
# Check all branches for cherry picked regress/fix commits.
for branch in repo.branches.remote:
ref = 'refs/remotes/' + branch
# Get the earliest equivalent commit in the regression range.
equivalent_regress_commit = None
for regress_commit in regress_commits:
logging.info('Finding equivalent regress commit to %s in %s',
regress_commit, ref)
equivalent_regress_commit = get_equivalent_commit(repo, ref,
regress_commit)
if equivalent_regress_commit:
break
if not equivalent_regress_commit:
continue
# Get the latest equivalent commit in the fix range.
equivalent_fix_commit = None
for fix_commit in fix_commits:
logging.info('Finding equivalent fix commit to %s in %s', fix_commit, ref)
equivalent_commit = get_equivalent_commit(repo, ref, fix_commit)
if equivalent_commit:
equivalent_fix_commit = equivalent_commit
range_collector.add(equivalent_regress_commit, equivalent_fix_commit)
last_affected_commits = []
if equivalent_fix_commit:
# Last affected commit is the one before the fix.
last_affected_commits.extend(
parent.id
for parent in repo.revparse_single(equivalent_fix_commit).parents)
else:
# Not fixed in this branch. Everything is still vulnerabile.
last_affected_commits.append(repo.revparse_single(ref).id)
commits.add(equivalent_regress_commit)
for last_affected_commit in last_affected_commits:
if (equivalent_regress_commit, last_affected_commit) in seen_commits:
continue
seen_commits.add((equivalent_regress_commit, last_affected_commit))
commits.update(
get_commit_list(repo, equivalent_regress_commit,
last_affected_commit))
return commits, range_collector.ranges()
def get_commit_range(repo, commit_or_range):
"""Get a commit range."""
if not commit_or_range:
return []
if ':' not in commit_or_range:
return [commit_or_range]
start_commit, end_commit = commit_or_range.split(':')
if start_commit == UNKNOWN_COMMIT:
# Special case: No information about earlier builds. Assume the end_commit
# is the regressing commit as that's the best we can do.
return [end_commit]
return get_commit_list(repo, start_commit, end_commit)
def get_tags_with_commits(repo, commits):
"""Get tags with a given commit."""
if not commits:
return set()
affected = set()
logging.info('Getting tags which contain %s', ','.join(commits))
tags = [
ref for ref in repo.listall_references() if ref.startswith(TAG_PREFIX)
]
for tag in tags:
if all(get_equivalent_commit(repo, tag, commit) for commit in commits):
affected.add(tag[len(TAG_PREFIX):])
return affected
def get_commit_list(repo, start_commit, end_commit):
"""Get commit list."""
logging.info('Getting commits %s..%s', start_commit, end_commit)
try:
walker = repo.walk(end_commit,
pygit2.GIT_SORT_TOPOLOGICAL | pygit2.GIT_SORT_REVERSE)
except KeyError as e:
raise ImpactError('Invalid commit.') from e
walker.hide(start_commit)
return [str(commit.id) for commit in walker]
def find_latest_tag(repo, tags):
"""Find the latest tag (by commit time)."""
latest_commit_time = None
latest_tag = None
for tag in tags:
commit = repo.lookup_reference(tag).peel()
commit_time = (
datetime.datetime.fromtimestamp(commit.commit_time) -
datetime.timedelta(minutes=commit.commit_time_offset))
if not latest_commit_time or commit_time > latest_commit_time:
latest_commit_time = commit_time
latest_tag = tag[len(TAG_PREFIX):]
return latest_tag
def get_equivalent_commit(repo, to_search, target_commit):
"""Find an equivalent commit at to_search, or None. The equivalent commit can
be equal to target_commit."""
if not target_commit:
return None
target = repo.revparse_single(target_commit)
target_patch_id = repo.diff(target.parents[0], target).patchid
search = repo.revparse_single(to_search)
try:
commits = repo.walk(search.id)
except ValueError:
# Invalid commit
return None
for commit in commits:
# Ignore commits without parents and merge commits with multiple parents.
if not commit.parents or len(commit.parents) > 1:
continue
patch_id = repo.cache.get(commit.id)
if not patch_id:
diff = repo.diff(commit.parents[0], commit)
patch_id = diff.patchid
repo.cache[commit.id] = patch_id
if patch_id == target_patch_id:
return str(commit.id)
# TODO(ochang): Possibly look at commit message, author etc.
return None
def get_tags(repo_url):
"""Get tags information."""
with tempfile.TemporaryDirectory() as tmp_dir:
repo = clone_with_retries(repo_url, tmp_dir)
tags = [
ref for ref in repo.listall_references() if ref.startswith(TAG_PREFIX)
]
latest_tag = find_latest_tag(repo, tags)
return TagsInfo([tag[len(TAG_PREFIX):] for tag in tags], latest_tag) | en | 0.857274 | # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Impact analysis. # Flat reduction in confidence for any range. # Reduction in confidence per commit in a range. # Used in cases where an earlier commit in a regression range cannot be # determined. Impact error. Clone with retries. Affected range collector. Add a new commit range. # New range doesn't add anything new. # Remove any existing ranges with the same introduced in commit but with a # None fixed commit. Return a set representing the collected commit ranges. "Get list of affected tags and commits for a bug given regressed and fixed commits. # If multiple, assume any commit in the regression range cause the # regression. # If multiple, assume all commits are necessary for fixing the regression. # Rather than bail out here and potentially leaving a Bug as "unfixed" # indefinitely, we do the best we can here, by assuming the last # COMMIT_RANGE_LIMIT commits fix the bug. # For every extra commit in the range, reduce the confidence. # Special case: unknown status for earlier revisions. # Include the unknown marker in resulting entities. Get affected range. # Check all branches for cherry picked regress/fix commits. # Get the earliest equivalent commit in the regression range. # Get the latest equivalent commit in the fix range. # Last affected commit is the one before the fix. # Not fixed in this branch. Everything is still vulnerabile. Get a commit range. # Special case: No information about earlier builds. Assume the end_commit # is the regressing commit as that's the best we can do. Get tags with a given commit. Get commit list. Find the latest tag (by commit time). Find an equivalent commit at to_search, or None. The equivalent commit can be equal to target_commit. # Invalid commit # Ignore commits without parents and merge commits with multiple parents. # TODO(ochang): Possibly look at commit message, author etc. Get tags information. | 2.185339 | 2 |
generator/table_parser.py | Kushagra-0801/Language | 1 | 6616292 | import csv
from itertools import product
from typing import DefaultDict
EPS = 'ε'
table = []
with open('lltable.txt', newline='') as f:
reader = csv.reader(f, delimiter='`')
table = [*reader]
top_level = [i[0] for i in table[1:]]
print(top_level)
next_level = table[0][1:]
print(next_level)
def parse_transformation(rule):
rule = rule[rule.index('::= ') + 4:]
return [i if i != EPS else 'EPS' for i in rule.split()]
table_map = DefaultDict(dict)
for (nonterminal,
terminal), transformation in zip(product(top_level, next_level),
(i for row in table[1:]
for i in row[1:])):
if transformation:
table_map[nonterminal][terminal] = parse_transformation(transformation)
table_map = {k: v for k, v in table_map.items()}
follows_dict = {}
follows_file = open("follows.txt", "r")
for line in follows_file:
arr = line.split("`")
if (arr[0] == 'Nonterminal'):
continue
nt = arr[0]
arr.pop(0)
arr.pop(-1)
follows_dict[nt] = arr
# print('printing dict')
# for li in follows_dict.values():
# print(li)
for nt in follows_dict:
for t in follows_dict[nt]:
try:
table_map[nt][t]
except KeyError:
table_map[nt][t] = ["SYNCH"]
# from pprint import pformat
# s = pformat(table_map, sort_dicts=False, width=150)
# s = s.replace("'", '"')
# import re
# s = re.sub(r'([^"])\["', r'\1{"', s)
# s = re.sub(r'"\]([^"])', r'"}\1', s)
def vec(v):
s = ""
for i in v:
s += f'"{i}", '
return s
def mid_lvl(v):
s = ""
for k, v in v.items():
s += f'\t\t{{"{k}", {{ {vec(v)} }}}},\n'
return s
s = "{\n"
for k, v in table_map.items():
s += f'\t{{"{k}", {{{mid_lvl(v)}}} }},\n'
s += "\n}"
with open('../src/c_table.hpp', 'w+') as f:
f.write(f"""#include <string>
#include <unordered_map>
#include <vector>
using namespace std;
const unordered_map<string, unordered_map<string, vector<string>>>table_map{s};
""")
| import csv
from itertools import product
from typing import DefaultDict
EPS = 'ε'
table = []
with open('lltable.txt', newline='') as f:
reader = csv.reader(f, delimiter='`')
table = [*reader]
top_level = [i[0] for i in table[1:]]
print(top_level)
next_level = table[0][1:]
print(next_level)
def parse_transformation(rule):
rule = rule[rule.index('::= ') + 4:]
return [i if i != EPS else 'EPS' for i in rule.split()]
table_map = DefaultDict(dict)
for (nonterminal,
terminal), transformation in zip(product(top_level, next_level),
(i for row in table[1:]
for i in row[1:])):
if transformation:
table_map[nonterminal][terminal] = parse_transformation(transformation)
table_map = {k: v for k, v in table_map.items()}
follows_dict = {}
follows_file = open("follows.txt", "r")
for line in follows_file:
arr = line.split("`")
if (arr[0] == 'Nonterminal'):
continue
nt = arr[0]
arr.pop(0)
arr.pop(-1)
follows_dict[nt] = arr
# print('printing dict')
# for li in follows_dict.values():
# print(li)
for nt in follows_dict:
for t in follows_dict[nt]:
try:
table_map[nt][t]
except KeyError:
table_map[nt][t] = ["SYNCH"]
# from pprint import pformat
# s = pformat(table_map, sort_dicts=False, width=150)
# s = s.replace("'", '"')
# import re
# s = re.sub(r'([^"])\["', r'\1{"', s)
# s = re.sub(r'"\]([^"])', r'"}\1', s)
def vec(v):
s = ""
for i in v:
s += f'"{i}", '
return s
def mid_lvl(v):
s = ""
for k, v in v.items():
s += f'\t\t{{"{k}", {{ {vec(v)} }}}},\n'
return s
s = "{\n"
for k, v in table_map.items():
s += f'\t{{"{k}", {{{mid_lvl(v)}}} }},\n'
s += "\n}"
with open('../src/c_table.hpp', 'w+') as f:
f.write(f"""#include <string>
#include <unordered_map>
#include <vector>
using namespace std;
const unordered_map<string, unordered_map<string, vector<string>>>table_map{s};
""")
| en | 0.166876 | # print('printing dict') # for li in follows_dict.values(): # print(li) # from pprint import pformat # s = pformat(table_map, sort_dicts=False, width=150) # s = s.replace("'", '"') # import re # s = re.sub(r'([^"])\["', r'\1{"', s) # s = re.sub(r'"\]([^"])', r'"}\1', s) #include <string> #include <unordered_map> #include <vector> using namespace std; const unordered_map<string, unordered_map<string, vector<string>>>table_map{s}; | 2.83139 | 3 |
lib/roi_data_layer/minibatch.py | zhangyiwen5512/pytorch-faster-rcnn | 2 | 6616293 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import cv2
from model.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
# 随机挑选一个尺度,作为这个batch的roi尺度,[0,n(尺度的数量)],选取size个
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
###改
#######################################################################################################################
if cfg.TRAIN.IMS_PER_BATCH == 1 :
"""
一次处理一张图片
"""
# Get the input image blob, formatted for caffe
# 获取blob并调整格式
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
# 属于前景的图片
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
#创建一个空boxes对象,将roidb的相应对象给他
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[im_blob.shape[1], im_blob.shape[2], im_scales[0]],
dtype=np.float32)
return blobs
elif cfg.TRAIN.IMS_PER_BATCH == 2 :
"""
一次处理两张图片,做mixup
"""
#################################################################################################################
im_blob, im_scales, mix_scale= _get_2_image_blob(roidb, random_scale_inds)
##################################################################################################################
blobs = {'data': im_blob}
assert len(im_scales) == 2, "Single batch only"
assert len(roidb) == 2, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
gt_inds1 = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_inds2 = np.where(roidb[1]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds1 = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_inds2 = np.where(roidb[1]['gt_classes'] != 0 & np.all(roidb[1]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes1 = np.empty((len(gt_inds1), 5), dtype=np.float32)
gt_boxes1[:, 0:4] = roidb[0]['boxes'][gt_inds1, :] * im_scales[0]
gt_boxes1[:, 4] = roidb[0]['gt_classes'][gt_inds1]
gt_boxes2 = np.empty((len(gt_inds2), 5), dtype=np.float32)
gt_boxes2[:, 0:4] = roidb[1]['boxes'][gt_inds2, :] * im_scales[1]
gt_boxes2[:, 0] *= mix_scale[1]#x1
gt_boxes2[:, 1] *= mix_scale[0]#y1
gt_boxes2[:, 2] *= mix_scale[1]#x2
gt_boxes2[:, 3] *= mix_scale[0]#y2
gt_boxes2[:, 4] = roidb[1]['gt_classes'][gt_inds2]#cls
blobs['gt_boxes'] = gt_boxes1
blobs['gt_boxes2'] = gt_boxes2
blobs['im_info'] = np.array(
[im_blob.shape[1], im_blob.shape[2], im_scales[0]],
dtype=np.float32)
return blobs
else:
raise Exception("check cfg.TRAIN.IMS_PER_BACTH in /lib/model/config.py")
###########################################################################################################################
def _get_image_blob(roidb, scale_inds):
"""
Builds an input blob from the images in the roidb at the specified
scales.
构建blob从roidb中,以scale_inds的size
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])#读取图片
if roidb[i]['flipped']:#水平翻转
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]#读取尺度
"""
#在blob。py中,
图像-设定好的均值
im_scale = float(选取的尺度) / float(实际的长宽的小者)
返回调整好的图片和缩放比,此时宽高比与原图一致
"""
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images,
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _get_2_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
lam = cfg.lamda
im1 = processed_ims[0]
im2 = processed_ims[1]
#取得两者的shape
shape1,shape2 = np.array(im1.shape, dtype=np.float32), np.array(im2.shape, dtype=np.float32)
scale = shape1 / shape2
#将im2调整至im1的大小
im2 = cv2.resize(im2, None, None, fx=scale[1], fy=scale[0],
interpolation=cv2.INTER_LINEAR)
assert im1.shape == im2.shape,"im1.shape:{} im2.shape:{} scale:{}".format(im1.shape, im2.shape, scale)
#mixup
im = lam * im1 + (1 - lam) * im2
processed_ims = [im]
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales, scale
| # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import cv2
from model.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
# 随机挑选一个尺度,作为这个batch的roi尺度,[0,n(尺度的数量)],选取size个
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
###改
#######################################################################################################################
if cfg.TRAIN.IMS_PER_BATCH == 1 :
"""
一次处理一张图片
"""
# Get the input image blob, formatted for caffe
# 获取blob并调整格式
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
# 属于前景的图片
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
#创建一个空boxes对象,将roidb的相应对象给他
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[im_blob.shape[1], im_blob.shape[2], im_scales[0]],
dtype=np.float32)
return blobs
elif cfg.TRAIN.IMS_PER_BATCH == 2 :
"""
一次处理两张图片,做mixup
"""
#################################################################################################################
im_blob, im_scales, mix_scale= _get_2_image_blob(roidb, random_scale_inds)
##################################################################################################################
blobs = {'data': im_blob}
assert len(im_scales) == 2, "Single batch only"
assert len(roidb) == 2, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
gt_inds1 = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_inds2 = np.where(roidb[1]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds1 = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_inds2 = np.where(roidb[1]['gt_classes'] != 0 & np.all(roidb[1]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes1 = np.empty((len(gt_inds1), 5), dtype=np.float32)
gt_boxes1[:, 0:4] = roidb[0]['boxes'][gt_inds1, :] * im_scales[0]
gt_boxes1[:, 4] = roidb[0]['gt_classes'][gt_inds1]
gt_boxes2 = np.empty((len(gt_inds2), 5), dtype=np.float32)
gt_boxes2[:, 0:4] = roidb[1]['boxes'][gt_inds2, :] * im_scales[1]
gt_boxes2[:, 0] *= mix_scale[1]#x1
gt_boxes2[:, 1] *= mix_scale[0]#y1
gt_boxes2[:, 2] *= mix_scale[1]#x2
gt_boxes2[:, 3] *= mix_scale[0]#y2
gt_boxes2[:, 4] = roidb[1]['gt_classes'][gt_inds2]#cls
blobs['gt_boxes'] = gt_boxes1
blobs['gt_boxes2'] = gt_boxes2
blobs['im_info'] = np.array(
[im_blob.shape[1], im_blob.shape[2], im_scales[0]],
dtype=np.float32)
return blobs
else:
raise Exception("check cfg.TRAIN.IMS_PER_BACTH in /lib/model/config.py")
###########################################################################################################################
def _get_image_blob(roidb, scale_inds):
"""
Builds an input blob from the images in the roidb at the specified
scales.
构建blob从roidb中,以scale_inds的size
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])#读取图片
if roidb[i]['flipped']:#水平翻转
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]#读取尺度
"""
#在blob。py中,
图像-设定好的均值
im_scale = float(选取的尺度) / float(实际的长宽的小者)
返回调整好的图片和缩放比,此时宽高比与原图一致
"""
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images,
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _get_2_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
lam = cfg.lamda
im1 = processed_ims[0]
im2 = processed_ims[1]
#取得两者的shape
shape1,shape2 = np.array(im1.shape, dtype=np.float32), np.array(im2.shape, dtype=np.float32)
scale = shape1 / shape2
#将im2调整至im1的大小
im2 = cv2.resize(im2, None, None, fx=scale[1], fy=scale[0],
interpolation=cv2.INTER_LINEAR)
assert im1.shape == im2.shape,"im1.shape:{} im2.shape:{} scale:{}".format(im1.shape, im2.shape, scale)
#mixup
im = lam * im1 + (1 - lam) * im2
processed_ims = [im]
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales, scale
| de | 0.203414 | # -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by <NAME> and <NAME> # -------------------------------------------------------- Compute minibatch blobs for training a Fast R-CNN network. Given a roidb, construct a minibatch sampled from it. # Sample random scales to use for each image in this batch # 随机挑选一个尺度,作为这个batch的roi尺度,[0,n(尺度的数量)],选取size个 ###改 ####################################################################################################################### 一次处理一张图片 # Get the input image blob, formatted for caffe # 获取blob并调整格式 # gt boxes: (x1, y1, x2, y2, cls) # Include all ground truth boxes # 属于前景的图片 # For the COCO ground truth boxes, exclude the ones that are ''iscrowd'' #创建一个空boxes对象,将roidb的相应对象给他 一次处理两张图片,做mixup ################################################################################################################# ################################################################################################################## # gt boxes: (x1, y1, x2, y2, cls) # Include all ground truth boxes # For the COCO ground truth boxes, exclude the ones that are ''iscrowd'' #x1 #y1 #x2 #y2 #cls ########################################################################################################################### Builds an input blob from the images in the roidb at the specified scales. 构建blob从roidb中,以scale_inds的size #读取图片 #水平翻转 #读取尺度 #在blob。py中, 图像-设定好的均值 im_scale = float(选取的尺度) / float(实际的长宽的小者) 返回调整好的图片和缩放比,此时宽高比与原图一致 # Create a blob to hold the input images, Builds an input blob from the images in the roidb at the specified scales. #取得两者的shape #将im2调整至im1的大小 #mixup # Create a blob to hold the input images | 2.37363 | 2 |
pxml/gcXmlParser.py | zhujunyong/python3-utils | 0 | 6616294 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
'''
Created on 2017.08.25
@author: zhujunyong
'''
import os
import sys
import time
import xml.etree.ElementTree as ET
import datetime
def parseXml(path):
ns = {'ns':'http://www.ibm.com/j9/verbosegc'}
tree = ET.parse(path)
root = tree.getroot()
content = ''
for gcstart in root.findall('ns:gc-start',ns):
timestamp = gcstart.attrib.get('timestamp')
timestamp = datetime.datetime.strptime(timestamp,'%Y-%m-%dT%H:%M:%S.%f').strftime('%Y-%m-%d %H:%M:%S')
for meminfo in gcstart.findall('ns:mem-info',ns):
for mem in meminfo.findall('ns:mem',ns):
memType = mem.attrib.get('type')
memFree = mem.attrib.get('free')
memTotal = mem.attrib.get('total')
memPercent = mem.attrib.get('percent')
line = "%s,%s,%s,%s,%s\n" % (timestamp, memType, memFree, memTotal, memPercent)
content += line
return content
if __name__ == '__main__':
#检查参数
if len(sys.argv) < 2:
print("usage: python3 gcXmlParser.py <xml file path>")
print("for example : python3 nmonprocessor.py /Users/zhujunyong/Documents/git/python3-utils/pxml/gc.xml")
sys.exit(0)
#给路径赋值
path = sys.argv[1]
print("start parse %s..." % path)
content = parseXml(path)
# if content == '', ignore it
if len(content) == 0:
print('no data')
exit(0)
fmtdFileName = path+'.formatted'
# create or update .formatted file and append parsed content to it
fmtdFile = open(fmtdFileName, 'w')
try :
fmtdFile.write(content)
except Exception as err:
print(err)
finally:
fmtdFile.close()
print('write fmtd file:%s' % fmtdFileName)
| #!/usr/bin/env python3
# -*- coding: utf8 -*-
'''
Created on 2017.08.25
@author: zhujunyong
'''
import os
import sys
import time
import xml.etree.ElementTree as ET
import datetime
def parseXml(path):
ns = {'ns':'http://www.ibm.com/j9/verbosegc'}
tree = ET.parse(path)
root = tree.getroot()
content = ''
for gcstart in root.findall('ns:gc-start',ns):
timestamp = gcstart.attrib.get('timestamp')
timestamp = datetime.datetime.strptime(timestamp,'%Y-%m-%dT%H:%M:%S.%f').strftime('%Y-%m-%d %H:%M:%S')
for meminfo in gcstart.findall('ns:mem-info',ns):
for mem in meminfo.findall('ns:mem',ns):
memType = mem.attrib.get('type')
memFree = mem.attrib.get('free')
memTotal = mem.attrib.get('total')
memPercent = mem.attrib.get('percent')
line = "%s,%s,%s,%s,%s\n" % (timestamp, memType, memFree, memTotal, memPercent)
content += line
return content
if __name__ == '__main__':
#检查参数
if len(sys.argv) < 2:
print("usage: python3 gcXmlParser.py <xml file path>")
print("for example : python3 nmonprocessor.py /Users/zhujunyong/Documents/git/python3-utils/pxml/gc.xml")
sys.exit(0)
#给路径赋值
path = sys.argv[1]
print("start parse %s..." % path)
content = parseXml(path)
# if content == '', ignore it
if len(content) == 0:
print('no data')
exit(0)
fmtdFileName = path+'.formatted'
# create or update .formatted file and append parsed content to it
fmtdFile = open(fmtdFileName, 'w')
try :
fmtdFile.write(content)
except Exception as err:
print(err)
finally:
fmtdFile.close()
print('write fmtd file:%s' % fmtdFileName)
| en | 0.236859 | #!/usr/bin/env python3 # -*- coding: utf8 -*- Created on 2017.08.25 @author: zhujunyong #检查参数 #给路径赋值 # if content == '', ignore it # create or update .formatted file and append parsed content to it | 2.357492 | 2 |
validation_tests/reports/validations_produce_results.py | samcom12/anuga_core | 136 | 6616295 | <filename>validation_tests/reports/validations_produce_results.py
"""
Script to run all the produce_results scripts in the
validation_tests/xxx/xxx/ directories
"""
import os
import time
import anuga
from anuga import indent
#from anuga.validation_utilities.parameters import alg
#from anuga.validation_utilities.parameters import cfl
args = anuga.get_args()
alg = args.alg
np = args.np
verbose = args.verbose
#---------------------------------
# Get the current svn revision
#---------------------------------
timestamp = time.asctime()
major_revision = anuga.get_version()
try:
# This fails if using git for version control
minor_revision = anuga.get_revision_number()
except:
try:
# This works when using git on unix
minor_revision = os.popen("git show-ref --head -s | head -n1").read().strip()
except:
# This is a fallback position
minor_revision = 'unknown'
#----------------------------------
# Now it is ok to create the latex
# macro file with run parameters
#
# FIXME: THis is a little dangerous as
# this is changed before all the tests
# are run.
#----------------------------------
f = open('saved_parameters.tex', 'w')
#f.write('\\newcommand{\\cfl}{\\UScore{%s}}\n' % str(cfl))
f.write('\\newcommand{\\alg}{\\UScore{%s}}\n' % str(alg))
f.write('\\newcommand{\\majorR}{\\UScore{%s}}\n' % str(major_revision))
f.write('\\newcommand{\\minorR}{\\UScore{%s}}\n' % str(minor_revision))
f.write('\\newcommand{\\timeR}{{%s}}\n' % str(timestamp))
f.close()
#---------------------------------
# Run the tests
#---------------------------------
os.chdir('..')
buildroot = os.getcwd()
Upper_dirs = os.listdir('.')
dir = '.'
Upper_dirs = [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
try:
Upper_dirs.remove('.svn')
except ValueError:
pass
try:
Upper_dirs.remove('reports')
except ValueError:
pass
try:
Upper_dirs.remove('case_studies')
except ValueError:
pass
#print Upper_dirs
#os.chdir('./Tests')
#print 'Tests'
print(Upper_dirs)
time_total = 0.0
test_number = 1
for dir in Upper_dirs:
os.chdir(dir)
print(72 * '=')
print('Directory: ' + dir)
print(72 * '=')
#print 'Changing to', os.getcwd()
dir = '.'
Lower_dirs = [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
try:
Lower_dirs.remove('.svn')
except ValueError:
pass
#print Lower_dirs
for l_dir in Lower_dirs:
os.chdir(l_dir)
#print os.getcwd()
print(60 * '=')
print('Subdirectory %g: '% (test_number) + l_dir)
test_number += 1
print(60 * '=')
try:
t0 = time.time()
if verbose:
cmd = 'python produce_results.py -alg %s -np %s -v '% (str(alg),str(np))
else:
cmd = 'python produce_results.py -alg %s -np %s '% (str(alg),str(np))
print(2 * indent + 'Running: ' + cmd)
os.system(cmd)
t1 = time.time() - t0
time_total += t1
print(2 * indent + 'That took ' + str(t1) + ' secs')
except:
print(2 * indent + 'Failed running produce_results in ' + os.getcwd())
pass
os.chdir('..')
#print 'Changing to', os.getcwd()
os.chdir('..')
#print 'Changing to', os.getcwd()
os.chdir(buildroot)
print(72 * '=')
print('That took ' + str(time_total) + ' secs')
print(72 * '=')
# go back to reports directory to typeset report
os.chdir('reports')
os.system('python validations_typeset_report.py')
import subprocess
cmd = 'mv validations_report.pdf validations_report_alg_%s.pdf' % (str(alg))
print(cmd)
subprocess.call([cmd], shell=True)
| <filename>validation_tests/reports/validations_produce_results.py
"""
Script to run all the produce_results scripts in the
validation_tests/xxx/xxx/ directories
"""
import os
import time
import anuga
from anuga import indent
#from anuga.validation_utilities.parameters import alg
#from anuga.validation_utilities.parameters import cfl
args = anuga.get_args()
alg = args.alg
np = args.np
verbose = args.verbose
#---------------------------------
# Get the current svn revision
#---------------------------------
timestamp = time.asctime()
major_revision = anuga.get_version()
try:
# This fails if using git for version control
minor_revision = anuga.get_revision_number()
except:
try:
# This works when using git on unix
minor_revision = os.popen("git show-ref --head -s | head -n1").read().strip()
except:
# This is a fallback position
minor_revision = 'unknown'
#----------------------------------
# Now it is ok to create the latex
# macro file with run parameters
#
# FIXME: THis is a little dangerous as
# this is changed before all the tests
# are run.
#----------------------------------
f = open('saved_parameters.tex', 'w')
#f.write('\\newcommand{\\cfl}{\\UScore{%s}}\n' % str(cfl))
f.write('\\newcommand{\\alg}{\\UScore{%s}}\n' % str(alg))
f.write('\\newcommand{\\majorR}{\\UScore{%s}}\n' % str(major_revision))
f.write('\\newcommand{\\minorR}{\\UScore{%s}}\n' % str(minor_revision))
f.write('\\newcommand{\\timeR}{{%s}}\n' % str(timestamp))
f.close()
#---------------------------------
# Run the tests
#---------------------------------
os.chdir('..')
buildroot = os.getcwd()
Upper_dirs = os.listdir('.')
dir = '.'
Upper_dirs = [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
try:
Upper_dirs.remove('.svn')
except ValueError:
pass
try:
Upper_dirs.remove('reports')
except ValueError:
pass
try:
Upper_dirs.remove('case_studies')
except ValueError:
pass
#print Upper_dirs
#os.chdir('./Tests')
#print 'Tests'
print(Upper_dirs)
time_total = 0.0
test_number = 1
for dir in Upper_dirs:
os.chdir(dir)
print(72 * '=')
print('Directory: ' + dir)
print(72 * '=')
#print 'Changing to', os.getcwd()
dir = '.'
Lower_dirs = [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
try:
Lower_dirs.remove('.svn')
except ValueError:
pass
#print Lower_dirs
for l_dir in Lower_dirs:
os.chdir(l_dir)
#print os.getcwd()
print(60 * '=')
print('Subdirectory %g: '% (test_number) + l_dir)
test_number += 1
print(60 * '=')
try:
t0 = time.time()
if verbose:
cmd = 'python produce_results.py -alg %s -np %s -v '% (str(alg),str(np))
else:
cmd = 'python produce_results.py -alg %s -np %s '% (str(alg),str(np))
print(2 * indent + 'Running: ' + cmd)
os.system(cmd)
t1 = time.time() - t0
time_total += t1
print(2 * indent + 'That took ' + str(t1) + ' secs')
except:
print(2 * indent + 'Failed running produce_results in ' + os.getcwd())
pass
os.chdir('..')
#print 'Changing to', os.getcwd()
os.chdir('..')
#print 'Changing to', os.getcwd()
os.chdir(buildroot)
print(72 * '=')
print('That took ' + str(time_total) + ' secs')
print(72 * '=')
# go back to reports directory to typeset report
os.chdir('reports')
os.system('python validations_typeset_report.py')
import subprocess
cmd = 'mv validations_report.pdf validations_report_alg_%s.pdf' % (str(alg))
print(cmd)
subprocess.call([cmd], shell=True)
| en | 0.366562 | Script to run all the produce_results scripts in the validation_tests/xxx/xxx/ directories #from anuga.validation_utilities.parameters import alg #from anuga.validation_utilities.parameters import cfl #--------------------------------- # Get the current svn revision #--------------------------------- # This fails if using git for version control # This works when using git on unix # This is a fallback position #---------------------------------- # Now it is ok to create the latex # macro file with run parameters # # FIXME: THis is a little dangerous as # this is changed before all the tests # are run. #---------------------------------- #f.write('\\newcommand{\\cfl}{\\UScore{%s}}\n' % str(cfl)) #--------------------------------- # Run the tests #--------------------------------- #print Upper_dirs #os.chdir('./Tests') #print 'Tests' #print 'Changing to', os.getcwd() #print Lower_dirs #print os.getcwd() #print 'Changing to', os.getcwd() #print 'Changing to', os.getcwd() # go back to reports directory to typeset report | 2.391089 | 2 |
Apps/JoinUsuarios/apps.py | Jorge-DevOps/API_HealthTech | 0 | 6616296 | <gh_stars>0
from django.apps import AppConfig
class JointablesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Apps.JoinUsuarios'
| from django.apps import AppConfig
class JointablesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Apps.JoinUsuarios' | none | 1 | 1.33286 | 1 | |
tests/datasets/dataset-unit-test.py | IISH/dpi | 0 | 6616297 | #!/usr/bin/python
from __future__ import absolute_import
import os
import sys
import unittest
import json
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname("__file__"), '../../')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname("__file__"), '../../modules')))
from cliocore.configutils import Configuration
from cliocore.datasets import Dataset
class ConfigTestClass(unittest.TestCase):
def test_settings(self):
self.clioinfra = Dataset()
clioindex = self.clioinfra.clioindex()
self.assertTrue(clioindex.to_html())
uids = '11007,11002'
self.assertTrue(self.clioinfra.findhandles(uids))
self.assertTrue(self.clioinfra.config['dataverseroot'])
self.assertTrue(self.clioinfra.config['apiroot'])
self.assertTrue(self.clioinfra.config['key'])
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/python
from __future__ import absolute_import
import os
import sys
import unittest
import json
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname("__file__"), '../../')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname("__file__"), '../../modules')))
from cliocore.configutils import Configuration
from cliocore.datasets import Dataset
class ConfigTestClass(unittest.TestCase):
def test_settings(self):
self.clioinfra = Dataset()
clioindex = self.clioinfra.clioindex()
self.assertTrue(clioindex.to_html())
uids = '11007,11002'
self.assertTrue(self.clioinfra.findhandles(uids))
self.assertTrue(self.clioinfra.config['dataverseroot'])
self.assertTrue(self.clioinfra.config['apiroot'])
self.assertTrue(self.clioinfra.config['key'])
if __name__ == '__main__':
unittest.main()
| ru | 0.258958 | #!/usr/bin/python | 2.196615 | 2 |
miscutils/classes/__init__.py | matthewgdv/miscutils | 0 | 6616298 | __all__ = [
"Base64", "Counter", "Gender", "NullContext", "OneOrMany", "Profiler", "Timer", "Version"
]
from .base64 import Base64
from .counter import Counter
from .gender import Gender
from .null_context import NullContext
from .one_or_many import OneOrMany
from .profiler import Profiler
from .timer import Timer
from .version import Version
| __all__ = [
"Base64", "Counter", "Gender", "NullContext", "OneOrMany", "Profiler", "Timer", "Version"
]
from .base64 import Base64
from .counter import Counter
from .gender import Gender
from .null_context import NullContext
from .one_or_many import OneOrMany
from .profiler import Profiler
from .timer import Timer
from .version import Version
| none | 1 | 1.373071 | 1 | |
pydenji/test/dummymodule.py | alanfranz/pydenji | 0 | 6616299 | <filename>pydenji/test/dummymodule.py
sbirulabba = 0
| <filename>pydenji/test/dummymodule.py
sbirulabba = 0
| none | 1 | 1.003227 | 1 | |
journalsmanager/sheetmanager.py | seasidesparrow/ADSJournalsDB | 0 | 6616300 | import gspread
from datetime import datetime
from journalsmanager.exceptions import *
PROTECTED_COLS = 'A1:B99999'
HILIGHT_PROTECTED = {'textFormat': {'bold': True},
'backgroundColor': {'red': 1.0,
'green': 0.30,
'blue': 0.30}}
SENSITIVE_COLS = 'C1:C99999'
HILIGHT_SENSITIVE = {'textFormat': {'bold': True},
'backgroundColor': {'red': 1.0,
'green': 0.80,
'blue': 0.20}}
def xform_google(indict):
outdict = {}
for k, v in indict.items():
if v == '':
v = None
if type(v) == str:
if v.lower() == 't' or v.lower() == 'true' :
v = True
elif v.lower() == 'f' or v.lower() == 'false':
v = False
outdict[k] = v
return outdict
class SpreadsheetManager(object):
def __init__(self, creds=None, token=None, sheetid=None, folderid=None, editors=[]):
try:
self.editors = editors
self.folderid = folderid
self.sheetid = sheetid
self.service = gspread.oauth(credentials_filename=creds,
authorized_user_filename=token)
if self.sheetid:
self.open_sheet(sheetid=self.sheetid)
else:
self.sheet = None
except Exception as err:
raise InitSheetManagerException(err)
def open_sheet(self, sheetid=None):
try:
self.sheet = self.service.open_by_key(sheetid)
except Exception as err:
raise OpenSheetException(err)
def create_sheet(self, title=None, folderid=None):
try:
timestamp = '_' + str(datetime.now()).replace(' ','_')
title = title + timestamp
self.sheet = self.service.create(title, folder_id=folderid)
self.sheetid = self.sheet.id
except Exception as err:
raise CreateSheetException(err)
def write_table(self, sheetid=None, data=None, tablename=None, encoding='utf-8'):
try:
self.service.import_csv(sheetid, data=data.encode(encoding))
if self.sheet:
self._protect_rows(tablename)
self.sheet.sheet1.freeze(rows=1)
except Exception as err:
raise WriteTableException(err)
def _protect_rows(self, tablename=None):
try:
if tablename == 'master' or tablename == 'publisher':
self.sheet.sheet1.add_protected_range('A1:A99999', self.editors)
self.sheet.sheet1.format('A1:A99999', HILIGHT_PROTECTED)
self.sheet.sheet1.format('B1:B99999', HILIGHT_SENSITIVE)
else:
self.sheet.sheet1.add_protected_range(PROTECTED_COLS, self.editors)
self.sheet.sheet1.format(PROTECTED_COLS, HILIGHT_PROTECTED)
self.sheet.sheet1.format(SENSITIVE_COLS, HILIGHT_SENSITIVE)
except Exception as err:
raise ProtectColumnsException(err)
def fetch_table(self):
try:
raw_from_google = self.sheet.sheet1.get_all_records(value_render_option='UNFORMATTED_VALUE', numericise_ignore=['all'])
checkin_data = [xform_google(r) for r in raw_from_google]
return checkin_data
except Exception as err:
raise FetchTableException(err)
| import gspread
from datetime import datetime
from journalsmanager.exceptions import *
PROTECTED_COLS = 'A1:B99999'
HILIGHT_PROTECTED = {'textFormat': {'bold': True},
'backgroundColor': {'red': 1.0,
'green': 0.30,
'blue': 0.30}}
SENSITIVE_COLS = 'C1:C99999'
HILIGHT_SENSITIVE = {'textFormat': {'bold': True},
'backgroundColor': {'red': 1.0,
'green': 0.80,
'blue': 0.20}}
def xform_google(indict):
outdict = {}
for k, v in indict.items():
if v == '':
v = None
if type(v) == str:
if v.lower() == 't' or v.lower() == 'true' :
v = True
elif v.lower() == 'f' or v.lower() == 'false':
v = False
outdict[k] = v
return outdict
class SpreadsheetManager(object):
def __init__(self, creds=None, token=None, sheetid=None, folderid=None, editors=[]):
try:
self.editors = editors
self.folderid = folderid
self.sheetid = sheetid
self.service = gspread.oauth(credentials_filename=creds,
authorized_user_filename=token)
if self.sheetid:
self.open_sheet(sheetid=self.sheetid)
else:
self.sheet = None
except Exception as err:
raise InitSheetManagerException(err)
def open_sheet(self, sheetid=None):
try:
self.sheet = self.service.open_by_key(sheetid)
except Exception as err:
raise OpenSheetException(err)
def create_sheet(self, title=None, folderid=None):
try:
timestamp = '_' + str(datetime.now()).replace(' ','_')
title = title + timestamp
self.sheet = self.service.create(title, folder_id=folderid)
self.sheetid = self.sheet.id
except Exception as err:
raise CreateSheetException(err)
def write_table(self, sheetid=None, data=None, tablename=None, encoding='utf-8'):
try:
self.service.import_csv(sheetid, data=data.encode(encoding))
if self.sheet:
self._protect_rows(tablename)
self.sheet.sheet1.freeze(rows=1)
except Exception as err:
raise WriteTableException(err)
def _protect_rows(self, tablename=None):
try:
if tablename == 'master' or tablename == 'publisher':
self.sheet.sheet1.add_protected_range('A1:A99999', self.editors)
self.sheet.sheet1.format('A1:A99999', HILIGHT_PROTECTED)
self.sheet.sheet1.format('B1:B99999', HILIGHT_SENSITIVE)
else:
self.sheet.sheet1.add_protected_range(PROTECTED_COLS, self.editors)
self.sheet.sheet1.format(PROTECTED_COLS, HILIGHT_PROTECTED)
self.sheet.sheet1.format(SENSITIVE_COLS, HILIGHT_SENSITIVE)
except Exception as err:
raise ProtectColumnsException(err)
def fetch_table(self):
try:
raw_from_google = self.sheet.sheet1.get_all_records(value_render_option='UNFORMATTED_VALUE', numericise_ignore=['all'])
checkin_data = [xform_google(r) for r in raw_from_google]
return checkin_data
except Exception as err:
raise FetchTableException(err)
| none | 1 | 2.381409 | 2 | |
migrations/versions/47bcfd24acd2_remoturns_the_dpassword_hash.py | Tyra-hans/1minpitch | 0 | 6616301 | <reponame>Tyra-hans/1minpitch
"""Remoturns the dpassword hash
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2019-10-22 09:12:41.950200
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password_hash', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'password_hash')
# ### end Alembic commands ###
| """Remoturns the dpassword hash
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2019-10-22 09:12:41.950200
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password_hash', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'password_hash')
# ### end Alembic commands ### | en | 0.53007 | Remoturns the dpassword hash Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2019-10-22 09:12:41.950200 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.60474 | 2 |
PyFlow/Packages/PyFlowBase/UI/UIMakeDictNode.py | luzpaz/PyFlow | 1,463 | 6616302 | ## Copyright 2015-2019 <NAME>, <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from Qt.QtWidgets import QComboBox
from PyFlow.UI.Canvas.UINodeBase import UINodeBase
class UIMakeDictNode(UINodeBase):
def __init__(self, raw_node):
super(UIMakeDictNode, self).__init__(raw_node)
self.prevDataType = "AnyPin"
def postCreate(self, jsonTemplate=None):
super(UIMakeDictNode, self).postCreate(jsonTemplate)
self.input = self.getPinSG("KeyType")
def changeType(self, dataType):
self.input._rawPin.initType(
self.input._rawPin._defaultSupportedDataTypes[dataType], True)
def selectStructure(self, name):
self.canvasRef().tryFillPropertiesView(self)
def createInputWidgets(self, inputsCategory, inGroup=None, pins=True):
if pins:
super(UIMakeDictNode, self).createInputWidgets(inputsCategory, inGroup)
selector = QComboBox()
for i in self.input._rawPin._defaultSupportedDataTypes:
selector.addItem(i)
selector.setCurrentIndex(self.input._rawPin._defaultSupportedDataTypes.index(
self.input._rawPin.dataType))
selector.activated.connect(self.changeType)
inputsCategory.insertWidget(0, "DataType", selector, group=inGroup)
| ## Copyright 2015-2019 <NAME>, <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from Qt.QtWidgets import QComboBox
from PyFlow.UI.Canvas.UINodeBase import UINodeBase
class UIMakeDictNode(UINodeBase):
def __init__(self, raw_node):
super(UIMakeDictNode, self).__init__(raw_node)
self.prevDataType = "AnyPin"
def postCreate(self, jsonTemplate=None):
super(UIMakeDictNode, self).postCreate(jsonTemplate)
self.input = self.getPinSG("KeyType")
def changeType(self, dataType):
self.input._rawPin.initType(
self.input._rawPin._defaultSupportedDataTypes[dataType], True)
def selectStructure(self, name):
self.canvasRef().tryFillPropertiesView(self)
def createInputWidgets(self, inputsCategory, inGroup=None, pins=True):
if pins:
super(UIMakeDictNode, self).createInputWidgets(inputsCategory, inGroup)
selector = QComboBox()
for i in self.input._rawPin._defaultSupportedDataTypes:
selector.addItem(i)
selector.setCurrentIndex(self.input._rawPin._defaultSupportedDataTypes.index(
self.input._rawPin.dataType))
selector.activated.connect(self.changeType)
inputsCategory.insertWidget(0, "DataType", selector, group=inGroup)
| en | 0.804266 | ## Copyright 2015-2019 <NAME>, <NAME> ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. | 1.886965 | 2 |
tests/test_openapi.py | jasonwalsh/aptos | 16 | 6616303 | <gh_stars>10-100
import json
import os
import unittest
from aptos.models import OpenAPI, TypeVisitor
from aptos.visitors import RecordVisitor
class OpenAPITestCase(unittest.TestCase):
def runTest(self):
with open(os.path.join(os.path.dirname(__file__), 'schemas', 'petstore')) as fp: # noqa: E501
instance = json.load(fp)
specification = OpenAPI.fromJson(instance)
specification.accept(TypeVisitor(instance))
record = specification.components['schemas']['Pet']
schema = record.accept(RecordVisitor())
print(json.dumps(schema, indent=2))
| import json
import os
import unittest
from aptos.models import OpenAPI, TypeVisitor
from aptos.visitors import RecordVisitor
class OpenAPITestCase(unittest.TestCase):
def runTest(self):
with open(os.path.join(os.path.dirname(__file__), 'schemas', 'petstore')) as fp: # noqa: E501
instance = json.load(fp)
specification = OpenAPI.fromJson(instance)
specification.accept(TypeVisitor(instance))
record = specification.components['schemas']['Pet']
schema = record.accept(RecordVisitor())
print(json.dumps(schema, indent=2)) | it | 0.356793 | # noqa: E501 | 2.73093 | 3 |
network/NW3/server.py | hbyyy/TIL | 0 | 6616304 | import socket
server_socket = socket.socket()
server_socket.bind(('127.0.0.1', 9999))
server_socket.listen()
client_socket, addr = server_socket.accept()
print('connected by ', addr)
while True:
data = client_socket.recv(4096)
if not data:
break;
print(data.decode())
client_socket.close()
server_socket.close()
| import socket
server_socket = socket.socket()
server_socket.bind(('127.0.0.1', 9999))
server_socket.listen()
client_socket, addr = server_socket.accept()
print('connected by ', addr)
while True:
data = client_socket.recv(4096)
if not data:
break;
print(data.decode())
client_socket.close()
server_socket.close()
| none | 1 | 3.113785 | 3 | |
script/site.py | djangli/enviot | 2 | 6616305 | """
Setup 20 monitoring site around Fort Collins area.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
proj_path = '/home/ubuntu/enviot/'#os.path.dirname(os.path.dirname(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "enviot.settings")
sys.path.append(proj_path)
os.chdir(proj_path)
application = get_wsgi_application()
from random import uniform
from monitor.models import Site
def setup_sites():
"""[summary]
"""
number = 20
if Site.objects.count() == number:
return
# Area around Fort Collins
minx, miny, maxx, maxy = (-106.084419, 39.585258, -104.084419, 41.585258)
# Generate 20 sites
for i in range(number):
lat = uniform(miny, maxy)
lon = uniform(minx, maxx)
name = "Site_{}".format(i+1)
site = Site(name=name, lat=lat, lon=lon)
site.save()
if __name__ == "__main__":
setup_sites()
| """
Setup 20 monitoring site around Fort Collins area.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
proj_path = '/home/ubuntu/enviot/'#os.path.dirname(os.path.dirname(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "enviot.settings")
sys.path.append(proj_path)
os.chdir(proj_path)
application = get_wsgi_application()
from random import uniform
from monitor.models import Site
def setup_sites():
"""[summary]
"""
number = 20
if Site.objects.count() == number:
return
# Area around Fort Collins
minx, miny, maxx, maxy = (-106.084419, 39.585258, -104.084419, 41.585258)
# Generate 20 sites
for i in range(number):
lat = uniform(miny, maxy)
lon = uniform(minx, maxx)
name = "Site_{}".format(i+1)
site = Site(name=name, lat=lat, lon=lon)
site.save()
if __name__ == "__main__":
setup_sites()
| en | 0.822719 | Setup 20 monitoring site around Fort Collins area. [summary] # Area around Fort Collins # Generate 20 sites | 2.435289 | 2 |
tracklib/__init__.py | SGrosse-Holz/tracklib | 1 | 6616306 | from .trajectory import Trajectory
from .taggedset import TaggedSet
from . import util
from . import io
from . import clean
from . import models
from . import analysis
| from .trajectory import Trajectory
from .taggedset import TaggedSet
from . import util
from . import io
from . import clean
from . import models
from . import analysis
| none | 1 | 1.065151 | 1 | |
src/stactools/noaa_c_cap/dataset.py | stactools-packages/noaa-c-cap | 0 | 6616307 | import logging
import os.path
from typing import List, Optional
logger = logging.getLogger(__name__)
class Dataset:
"""A NOAA C-CAP dataset.
Could be CONUS, could be somewhere else. Always has a tiff file, can have XML metadata.
"""
tiff_href: str
year: str
location: str
xml_file_name: Optional[str]
xml_href: Optional[str]
classes: List[str]
@classmethod
def from_hrefs(cls, hrefs: List[str]) -> List['Dataset']:
"""Creates one or more datasets from a list of hrefs.
Does the work of looking through the HREFs to associate tifs with xml metadata.
"""
datasets = []
for tif_href in (href for href in hrefs
if os.path.splitext(href)[1] == '.tif'):
dataset = Dataset(tif_href)
if dataset.xml_file_name:
xml_href = next(
(href for href in hrefs
if os.path.basename(href) == dataset.xml_file_name), None)
if xml_href:
dataset.xml_href = xml_href
else:
logger.warn(
f"Could not find XML metadata file for {tif_href}")
datasets.append(dataset)
return datasets
def __init__(self, tiff_href: str, xml_href: Optional[str] = None):
"""Creates a new dataset for a GeoTIFF."""
self.tiff_href = tiff_href
self.xml_href = xml_href
file_name = os.path.basename(tiff_href)
parts = file_name.split('_')
if len(parts) < 2:
raise ValueError(f"Invalid NOAA C-CAP file name: {file_name}")
self.location = parts[0]
self.year = parts[1]
if self.location == 'conus':
self.xml_file_name: Optional[str] = f"CCAP_Parent_{self.year}.xml"
elif self.location == 'hi':
self.xml_file_name = None
elif self.location == 'pr':
self.xml_file_name = f"{self.year}_puerto_rico_ccap.xml"
| import logging
import os.path
from typing import List, Optional
logger = logging.getLogger(__name__)
class Dataset:
"""A NOAA C-CAP dataset.
Could be CONUS, could be somewhere else. Always has a tiff file, can have XML metadata.
"""
tiff_href: str
year: str
location: str
xml_file_name: Optional[str]
xml_href: Optional[str]
classes: List[str]
@classmethod
def from_hrefs(cls, hrefs: List[str]) -> List['Dataset']:
"""Creates one or more datasets from a list of hrefs.
Does the work of looking through the HREFs to associate tifs with xml metadata.
"""
datasets = []
for tif_href in (href for href in hrefs
if os.path.splitext(href)[1] == '.tif'):
dataset = Dataset(tif_href)
if dataset.xml_file_name:
xml_href = next(
(href for href in hrefs
if os.path.basename(href) == dataset.xml_file_name), None)
if xml_href:
dataset.xml_href = xml_href
else:
logger.warn(
f"Could not find XML metadata file for {tif_href}")
datasets.append(dataset)
return datasets
def __init__(self, tiff_href: str, xml_href: Optional[str] = None):
"""Creates a new dataset for a GeoTIFF."""
self.tiff_href = tiff_href
self.xml_href = xml_href
file_name = os.path.basename(tiff_href)
parts = file_name.split('_')
if len(parts) < 2:
raise ValueError(f"Invalid NOAA C-CAP file name: {file_name}")
self.location = parts[0]
self.year = parts[1]
if self.location == 'conus':
self.xml_file_name: Optional[str] = f"CCAP_Parent_{self.year}.xml"
elif self.location == 'hi':
self.xml_file_name = None
elif self.location == 'pr':
self.xml_file_name = f"{self.year}_puerto_rico_ccap.xml"
| en | 0.848112 | A NOAA C-CAP dataset. Could be CONUS, could be somewhere else. Always has a tiff file, can have XML metadata. Creates one or more datasets from a list of hrefs. Does the work of looking through the HREFs to associate tifs with xml metadata. Creates a new dataset for a GeoTIFF. | 2.954609 | 3 |
gfootball/examples/models.py | zixianma/football | 0 | 6616308 | <reponame>zixianma/football
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional models, not available in OpenAI baselines.
gfootball_impala_cnn is architecture used in the paper
(https://arxiv.org/pdf/1907.11180.pdf).
It is illustrated in the appendix. It is similar to Large architecture
from IMPALA paper; we use 4 big blocks instead of 3 though.
"""
# from baselines.common.models import register
# import sonnet as snt
# import tensorflow.compat.v1 as tf
import logging
from typing import Optional
import numpy as np
import gym
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.misc import SlimFC, AppendBiasLayer, \
normc_initializer
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.typing import Dict, TensorType, List, ModelConfigDict
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
# @register('gfootball_impala_cnn')
# def gfootball_impala_cnn():
# def network_fn(frame):
# # Convert to floats.
# frame = tf.to_float(frame)
# frame /= 255
# with tf.variable_scope('convnet'):
# conv_out = frame
# conv_layers = [(16, 2), (32, 2), (32, 2), (32, 2)]
# for i, (num_ch, num_blocks) in enumerate(conv_layers):
# # Downscale.
# conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
# conv_out = tf.nn.pool(
# conv_out,
# window_shape=[3, 3],
# pooling_type='MAX',
# padding='SAME',
# strides=[2, 2])
# # Residual block(s).
# for j in range(num_blocks):
# with tf.variable_scope('residual_%d_%d' % (i, j)):
# block_input = conv_out
# conv_out = tf.nn.relu(conv_out)
# conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
# conv_out = tf.nn.relu(conv_out)
# conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
# conv_out += block_input
# conv_out = tf.nn.relu(conv_out)
# conv_out = snt.BatchFlatten()(conv_out)
# conv_out = snt.Linear(256)(conv_out)
# conv_out = tf.nn.relu(conv_out)
# return conv_out
# return network_fn
class FullyConnectedNetwork(TorchModelV2, nn.Module):
"""Generic fully connected network."""
def __init__(self, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, num_outputs: int,
model_config: ModelConfigDict, name: str,
input_dim: Optional[int] = None):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
hiddens = list(model_config.get("fcnet_hiddens", [])) + \
list(model_config.get("post_fcnet_hiddens", []))
activation = model_config.get("fcnet_activation")
if not model_config.get("fcnet_hiddens", []):
activation = model_config.get("post_fcnet_activation")
no_final_linear = model_config.get("no_final_linear")
self.vf_share_layers = model_config.get("vf_share_layers")
self.free_log_std = model_config.get("free_log_std")
# Generate free-floating bias variables for the second half of
# the outputs.
if self.free_log_std:
assert num_outputs % 2 == 0, (
"num_outputs must be divisible by two", num_outputs)
num_outputs = num_outputs // 2
layers = []
prev_layer_size = input_dim if input_dim else int(np.product(obs_space.shape))
self._logits = None
# Create layers 0 to second-last.
for size in hiddens[:-1]:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=size,
initializer=normc_initializer(1.0),
activation_fn=activation))
prev_layer_size = size
# The last layer is adjusted to be of size num_outputs, but it's a
# layer with activation.
if no_final_linear and num_outputs:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
initializer=normc_initializer(1.0),
activation_fn=activation))
prev_layer_size = num_outputs
# Finish the layers with the provided sizes (`hiddens`), plus -
# iff num_outputs > 0 - a last linear layer of size num_outputs.
else:
if len(hiddens) > 0:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=hiddens[-1],
initializer=normc_initializer(1.0),
activation_fn=activation))
prev_layer_size = hiddens[-1]
if num_outputs:
self._logits = SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
initializer=normc_initializer(0.01),
activation_fn=None)
else:
self.num_outputs = (
[int(np.product(obs_space.shape))] + hiddens[-1:])[-1]
# Layer to add the log std vars to the state-dependent means.
if self.free_log_std and self._logits:
self._append_free_log_std = AppendBiasLayer(num_outputs)
self._hidden_layers = nn.Sequential(*layers)
self._value_branch_separate = None
if not self.vf_share_layers:
# Build a parallel set of hidden layers for the value net.
prev_vf_layer_size = int(np.product(obs_space.shape))
vf_layers = []
for size in hiddens:
vf_layers.append(
SlimFC(
in_size=prev_vf_layer_size,
out_size=size,
activation_fn=activation,
initializer=normc_initializer(1.0)))
prev_vf_layer_size = size
self._value_branch_separate = nn.Sequential(*vf_layers)
self._value_branch = SlimFC(
in_size=prev_layer_size,
out_size=1,
initializer=normc_initializer(0.01),
activation_fn=None)
# Holds the current "base" output (before logits layer).
self._features = None
# Holds the last input, in case value branch is separate.
self._last_flat_in = None
@override(TorchModelV2)
def forward(self, input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType) -> (TensorType, List[TensorType]):
obs = input_dict["obs_flat"].float()
self._last_flat_in = obs.reshape(obs.shape[0], -1)
self._features = self._hidden_layers(self._last_flat_in)
logits = self._logits(self._features) if self._logits else \
self._features
if self.free_log_std:
logits = self._append_free_log_std(logits)
return logits, state
@override(TorchModelV2)
def value_function(self) -> TensorType:
assert self._features is not None, "must call forward() first"
if self._value_branch_separate:
return self._value_branch(
self._value_branch_separate(self._last_flat_in)).squeeze(1)
else:
return self._value_branch(self._features).squeeze(1) | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional models, not available in OpenAI baselines.
gfootball_impala_cnn is architecture used in the paper
(https://arxiv.org/pdf/1907.11180.pdf).
It is illustrated in the appendix. It is similar to Large architecture
from IMPALA paper; we use 4 big blocks instead of 3 though.
"""
# from baselines.common.models import register
# import sonnet as snt
# import tensorflow.compat.v1 as tf
import logging
from typing import Optional
import numpy as np
import gym
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.misc import SlimFC, AppendBiasLayer, \
normc_initializer
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.typing import Dict, TensorType, List, ModelConfigDict
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
# @register('gfootball_impala_cnn')
# def gfootball_impala_cnn():
# def network_fn(frame):
# # Convert to floats.
# frame = tf.to_float(frame)
# frame /= 255
# with tf.variable_scope('convnet'):
# conv_out = frame
# conv_layers = [(16, 2), (32, 2), (32, 2), (32, 2)]
# for i, (num_ch, num_blocks) in enumerate(conv_layers):
# # Downscale.
# conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
# conv_out = tf.nn.pool(
# conv_out,
# window_shape=[3, 3],
# pooling_type='MAX',
# padding='SAME',
# strides=[2, 2])
# # Residual block(s).
# for j in range(num_blocks):
# with tf.variable_scope('residual_%d_%d' % (i, j)):
# block_input = conv_out
# conv_out = tf.nn.relu(conv_out)
# conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
# conv_out = tf.nn.relu(conv_out)
# conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
# conv_out += block_input
# conv_out = tf.nn.relu(conv_out)
# conv_out = snt.BatchFlatten()(conv_out)
# conv_out = snt.Linear(256)(conv_out)
# conv_out = tf.nn.relu(conv_out)
# return conv_out
# return network_fn
class FullyConnectedNetwork(TorchModelV2, nn.Module):
"""Generic fully connected network."""
def __init__(self, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, num_outputs: int,
model_config: ModelConfigDict, name: str,
input_dim: Optional[int] = None):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
hiddens = list(model_config.get("fcnet_hiddens", [])) + \
list(model_config.get("post_fcnet_hiddens", []))
activation = model_config.get("fcnet_activation")
if not model_config.get("fcnet_hiddens", []):
activation = model_config.get("post_fcnet_activation")
no_final_linear = model_config.get("no_final_linear")
self.vf_share_layers = model_config.get("vf_share_layers")
self.free_log_std = model_config.get("free_log_std")
# Generate free-floating bias variables for the second half of
# the outputs.
if self.free_log_std:
assert num_outputs % 2 == 0, (
"num_outputs must be divisible by two", num_outputs)
num_outputs = num_outputs // 2
layers = []
prev_layer_size = input_dim if input_dim else int(np.product(obs_space.shape))
self._logits = None
# Create layers 0 to second-last.
for size in hiddens[:-1]:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=size,
initializer=normc_initializer(1.0),
activation_fn=activation))
prev_layer_size = size
# The last layer is adjusted to be of size num_outputs, but it's a
# layer with activation.
if no_final_linear and num_outputs:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
initializer=normc_initializer(1.0),
activation_fn=activation))
prev_layer_size = num_outputs
# Finish the layers with the provided sizes (`hiddens`), plus -
# iff num_outputs > 0 - a last linear layer of size num_outputs.
else:
if len(hiddens) > 0:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=hiddens[-1],
initializer=normc_initializer(1.0),
activation_fn=activation))
prev_layer_size = hiddens[-1]
if num_outputs:
self._logits = SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
initializer=normc_initializer(0.01),
activation_fn=None)
else:
self.num_outputs = (
[int(np.product(obs_space.shape))] + hiddens[-1:])[-1]
# Layer to add the log std vars to the state-dependent means.
if self.free_log_std and self._logits:
self._append_free_log_std = AppendBiasLayer(num_outputs)
self._hidden_layers = nn.Sequential(*layers)
self._value_branch_separate = None
if not self.vf_share_layers:
# Build a parallel set of hidden layers for the value net.
prev_vf_layer_size = int(np.product(obs_space.shape))
vf_layers = []
for size in hiddens:
vf_layers.append(
SlimFC(
in_size=prev_vf_layer_size,
out_size=size,
activation_fn=activation,
initializer=normc_initializer(1.0)))
prev_vf_layer_size = size
self._value_branch_separate = nn.Sequential(*vf_layers)
self._value_branch = SlimFC(
in_size=prev_layer_size,
out_size=1,
initializer=normc_initializer(0.01),
activation_fn=None)
# Holds the current "base" output (before logits layer).
self._features = None
# Holds the last input, in case value branch is separate.
self._last_flat_in = None
@override(TorchModelV2)
def forward(self, input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType) -> (TensorType, List[TensorType]):
obs = input_dict["obs_flat"].float()
self._last_flat_in = obs.reshape(obs.shape[0], -1)
self._features = self._hidden_layers(self._last_flat_in)
logits = self._logits(self._features) if self._logits else \
self._features
if self.free_log_std:
logits = self._append_free_log_std(logits)
return logits, state
@override(TorchModelV2)
def value_function(self) -> TensorType:
assert self._features is not None, "must call forward() first"
if self._value_branch_separate:
return self._value_branch(
self._value_branch_separate(self._last_flat_in)).squeeze(1)
else:
return self._value_branch(self._features).squeeze(1) | en | 0.690877 | # coding=utf-8 # Copyright 2019 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Additional models, not available in OpenAI baselines. gfootball_impala_cnn is architecture used in the paper (https://arxiv.org/pdf/1907.11180.pdf). It is illustrated in the appendix. It is similar to Large architecture from IMPALA paper; we use 4 big blocks instead of 3 though. # from baselines.common.models import register # import sonnet as snt # import tensorflow.compat.v1 as tf # @register('gfootball_impala_cnn') # def gfootball_impala_cnn(): # def network_fn(frame): # # Convert to floats. # frame = tf.to_float(frame) # frame /= 255 # with tf.variable_scope('convnet'): # conv_out = frame # conv_layers = [(16, 2), (32, 2), (32, 2), (32, 2)] # for i, (num_ch, num_blocks) in enumerate(conv_layers): # # Downscale. # conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out) # conv_out = tf.nn.pool( # conv_out, # window_shape=[3, 3], # pooling_type='MAX', # padding='SAME', # strides=[2, 2]) # # Residual block(s). # for j in range(num_blocks): # with tf.variable_scope('residual_%d_%d' % (i, j)): # block_input = conv_out # conv_out = tf.nn.relu(conv_out) # conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out) # conv_out = tf.nn.relu(conv_out) # conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out) # conv_out += block_input # conv_out = tf.nn.relu(conv_out) # conv_out = snt.BatchFlatten()(conv_out) # conv_out = snt.Linear(256)(conv_out) # conv_out = tf.nn.relu(conv_out) # return conv_out # return network_fn Generic fully connected network. # Generate free-floating bias variables for the second half of # the outputs. # Create layers 0 to second-last. # The last layer is adjusted to be of size num_outputs, but it's a # layer with activation. # Finish the layers with the provided sizes (`hiddens`), plus - # iff num_outputs > 0 - a last linear layer of size num_outputs. # Layer to add the log std vars to the state-dependent means. # Build a parallel set of hidden layers for the value net. # Holds the current "base" output (before logits layer). # Holds the last input, in case value branch is separate. | 1.8672 | 2 |
discord/ext/lazy_slash/from_slash.py | GnomedDev/discord-ext-lazy_slash | 2 | 6616309 | from __future__ import annotations
import functools
import inspect
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union, cast
import discord
from discord.ext import commands
from discord.ext.commands.view import _quotes as supported_quotes
if TYPE_CHECKING:
from discord.types.interactions import ApplicationCommandInteractionData, ApplicationCommandInteractionDataOption
class _FakeSlashMessage(discord.PartialMessage):
activity = application = edited_at = reference = webhook_id = None
attachments = components = reactions = stickers = []
tts = False
raw_mentions = discord.Message.raw_mentions
clean_content = discord.Message.clean_content
channel_mentions = discord.Message.channel_mentions
raw_role_mentions = discord.Message.raw_role_mentions
raw_channel_mentions = discord.Message.raw_channel_mentions
author: Union[discord.User, discord.Member]
@classmethod
def from_interaction(
cls, interaction: discord.Interaction, channel: Union[discord.TextChannel, discord.DMChannel, discord.Thread]
):
self = cls(channel=channel, id=interaction.id)
assert interaction.user is not None
self.author = interaction.user
return self
@functools.cached_property
def mentions(self) -> List[Union[discord.Member, discord.User]]:
client = self._state._get_client()
if self.guild:
ensure_user = lambda id: self.guild.get_member(id) or client.get_user(id) # type: ignore
else:
ensure_user = client.get_user
return discord.utils._unique(filter(None, map(ensure_user, self.raw_mentions))) # type: ignore
@functools.cached_property
def role_mentions(self) -> List[discord.Role]:
if self.guild is None:
return []
return discord.utils._unique(filter(None, map(self.guild.get_role, self.raw_role_mentions))) # type: ignore
def _quote_string_safe(string: str) -> str:
for open, close in supported_quotes.items():
if open not in string and close not in string:
return f"{open}{string}{close}"
raise commands.UnexpectedQuoteError(string)
def _unwrap_slash_groups(
data: ApplicationCommandInteractionData,
) -> Tuple[str, Dict[str, ApplicationCommandInteractionDataOption]]:
command_name = data["name"]
command_options: Any = data.get("options") or []
while True:
try:
option = next(o for o in command_options if o["type"] in {1, 2})
except StopIteration:
return command_name, {o["name"]: o for o in command_options}
else:
command_name += f' {option["name"]}'
command_options = option.get("options") or []
async def process_slash_commands(bot: commands.Bot, interaction: discord.Interaction):
if interaction.type != discord.InteractionType.application_command:
return
if TYPE_CHECKING:
interaction.data = cast(ApplicationCommandInteractionData, interaction.data)
command_name, command_options = _unwrap_slash_groups(interaction.data)
command = bot.get_command(command_name)
if command is None:
raise commands.CommandNotFound(f'Command "{command_name}" is not found')
# Ensure the interaction channel is usable
channel = interaction.channel
if channel is None or isinstance(channel, discord.PartialMessageable):
if interaction.guild is None:
assert interaction.user is not None
channel = await interaction.user.create_dm()
elif interaction.channel_id is not None:
channel = await interaction.guild.fetch_channel(interaction.channel_id)
else:
return # cannot do anything without stable channel
# Make our fake message so we can pass it to ext.commands
message: discord.Message = _FakeSlashMessage.from_interaction(interaction, channel) # type: ignore
message.content = command_name
# Add arguments to fake message content, in the right order
ignored_params = []
for name, param in command.clean_params.items():
if inspect.isclass(param.annotation) and issubclass(param.annotation, commands.FlagConverter):
for name, flag in param.annotation.get_flags().items():
option = command_options.get(name)
if option is None:
if flag.required:
raise commands.MissingRequiredFlag(flag)
else:
prefix = param.annotation.__commands_flag_prefix__
delimiter = param.annotation.__commands_flag_delimiter__
message.content += f" {prefix}{name}{delimiter}{option['value']}" # type: ignore
continue
option = command_options.get(name)
if option is None:
if param.default is param.empty and not command._is_typing_optional(param.annotation):
raise commands.MissingRequiredArgument(param)
elif param.annotation is None or param.annotation == str:
message.content += f" { _quote_string_safe('')}"
else:
ignored_params.append(param) # type: ignore
elif (
option["type"] == 3
and not isinstance(param.annotation, commands.Greedy)
and param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
):
# String with space in without "consume rest"
message.content += f" {_quote_string_safe(option['value'])}"
else:
message.content += f' {option.get("value", "")}'
prefix = await bot.get_prefix(message)
if isinstance(prefix, list):
prefix = prefix[0]
message.content = f"{prefix}{message.content}"
ctx = await bot.get_context(message)
ctx._ignored_params = ignored_params # type: ignore
ctx.interaction = interaction # type: ignore
await bot.invoke(ctx)
| from __future__ import annotations
import functools
import inspect
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union, cast
import discord
from discord.ext import commands
from discord.ext.commands.view import _quotes as supported_quotes
if TYPE_CHECKING:
from discord.types.interactions import ApplicationCommandInteractionData, ApplicationCommandInteractionDataOption
class _FakeSlashMessage(discord.PartialMessage):
activity = application = edited_at = reference = webhook_id = None
attachments = components = reactions = stickers = []
tts = False
raw_mentions = discord.Message.raw_mentions
clean_content = discord.Message.clean_content
channel_mentions = discord.Message.channel_mentions
raw_role_mentions = discord.Message.raw_role_mentions
raw_channel_mentions = discord.Message.raw_channel_mentions
author: Union[discord.User, discord.Member]
@classmethod
def from_interaction(
cls, interaction: discord.Interaction, channel: Union[discord.TextChannel, discord.DMChannel, discord.Thread]
):
self = cls(channel=channel, id=interaction.id)
assert interaction.user is not None
self.author = interaction.user
return self
@functools.cached_property
def mentions(self) -> List[Union[discord.Member, discord.User]]:
client = self._state._get_client()
if self.guild:
ensure_user = lambda id: self.guild.get_member(id) or client.get_user(id) # type: ignore
else:
ensure_user = client.get_user
return discord.utils._unique(filter(None, map(ensure_user, self.raw_mentions))) # type: ignore
@functools.cached_property
def role_mentions(self) -> List[discord.Role]:
if self.guild is None:
return []
return discord.utils._unique(filter(None, map(self.guild.get_role, self.raw_role_mentions))) # type: ignore
def _quote_string_safe(string: str) -> str:
for open, close in supported_quotes.items():
if open not in string and close not in string:
return f"{open}{string}{close}"
raise commands.UnexpectedQuoteError(string)
def _unwrap_slash_groups(
data: ApplicationCommandInteractionData,
) -> Tuple[str, Dict[str, ApplicationCommandInteractionDataOption]]:
command_name = data["name"]
command_options: Any = data.get("options") or []
while True:
try:
option = next(o for o in command_options if o["type"] in {1, 2})
except StopIteration:
return command_name, {o["name"]: o for o in command_options}
else:
command_name += f' {option["name"]}'
command_options = option.get("options") or []
async def process_slash_commands(bot: commands.Bot, interaction: discord.Interaction):
if interaction.type != discord.InteractionType.application_command:
return
if TYPE_CHECKING:
interaction.data = cast(ApplicationCommandInteractionData, interaction.data)
command_name, command_options = _unwrap_slash_groups(interaction.data)
command = bot.get_command(command_name)
if command is None:
raise commands.CommandNotFound(f'Command "{command_name}" is not found')
# Ensure the interaction channel is usable
channel = interaction.channel
if channel is None or isinstance(channel, discord.PartialMessageable):
if interaction.guild is None:
assert interaction.user is not None
channel = await interaction.user.create_dm()
elif interaction.channel_id is not None:
channel = await interaction.guild.fetch_channel(interaction.channel_id)
else:
return # cannot do anything without stable channel
# Make our fake message so we can pass it to ext.commands
message: discord.Message = _FakeSlashMessage.from_interaction(interaction, channel) # type: ignore
message.content = command_name
# Add arguments to fake message content, in the right order
ignored_params = []
for name, param in command.clean_params.items():
if inspect.isclass(param.annotation) and issubclass(param.annotation, commands.FlagConverter):
for name, flag in param.annotation.get_flags().items():
option = command_options.get(name)
if option is None:
if flag.required:
raise commands.MissingRequiredFlag(flag)
else:
prefix = param.annotation.__commands_flag_prefix__
delimiter = param.annotation.__commands_flag_delimiter__
message.content += f" {prefix}{name}{delimiter}{option['value']}" # type: ignore
continue
option = command_options.get(name)
if option is None:
if param.default is param.empty and not command._is_typing_optional(param.annotation):
raise commands.MissingRequiredArgument(param)
elif param.annotation is None or param.annotation == str:
message.content += f" { _quote_string_safe('')}"
else:
ignored_params.append(param) # type: ignore
elif (
option["type"] == 3
and not isinstance(param.annotation, commands.Greedy)
and param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
):
# String with space in without "consume rest"
message.content += f" {_quote_string_safe(option['value'])}"
else:
message.content += f' {option.get("value", "")}'
prefix = await bot.get_prefix(message)
if isinstance(prefix, list):
prefix = prefix[0]
message.content = f"{prefix}{message.content}"
ctx = await bot.get_context(message)
ctx._ignored_params = ignored_params # type: ignore
ctx.interaction = interaction # type: ignore
await bot.invoke(ctx)
| en | 0.639739 | # type: ignore # type: ignore # type: ignore # Ensure the interaction channel is usable # cannot do anything without stable channel # Make our fake message so we can pass it to ext.commands # type: ignore # Add arguments to fake message content, in the right order # type: ignore # type: ignore # String with space in without "consume rest" # type: ignore # type: ignore | 2.27809 | 2 |
plankton_core/dataimports_parsed_format.py | planktontoolbox/plankton-toolbox | 5 | 6616310 | <filename>plankton_core/dataimports_parsed_format.py<gh_stars>1-10
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import dateutil.parser
import toolbox_utils
import plankton_core
class ParsedFormat(plankton_core.FormatBase):
""" """
def __init__(self):
""" Abstract class for parsed import formats. """
super(ParsedFormat, self).__init__()
#
self._parsercommands = []
def replace_method_keywords(self, parse_command, node_level = None, view_format = None):
""" Mapping between Excel parser code and python code."""
command = str(parse_command.strip())
#
if 'Column:' in command:
# An easier notation for "$Text('Example column')": "Column:Example column".
# For simple column name mapping based on the format column.
command = str(command.replace('Column:', '').strip())
if view_format is None:
command = 'self._as_text("' + command + '")'
elif view_format == 'text':
command = 'self._as_text("' + command + '")'
elif view_format == 'integer':
command = 'self._as_integer("' + command + '")'
elif view_format == 'float':
command = 'self._as_float("' + command + '")'
elif view_format == 'sample_date':
command = 'self._as_date("' + command + '")'
else:
command = 'self._as_text("' + command + '")'
#
elif '$' in command:
# Mapping for more advanced alternatives.
command = command.replace('$Text(', 'self._as_text(')
command = command.replace('$Integer(', 'self._as_integer(')
command = command.replace('$Float(', 'self._as_float(')
command = command.replace('$Date(', 'self._as_date(')
command = command.replace('$GetTaxonInfo(', 'self._taxon_info_by_key(')
command = command.replace('$GetSizeClassInfo(', 'self._get_sizeclass_info_by_key(')
command = command.replace('$GetSizeclassInfo(', 'self._get_sizeclass_info_by_key(') # Alternative spelling.
command = command.replace('$GetTrophicType(', 'self._get_trophic_type(')
command = command.replace('$GetPlanktonGroup(', 'self._get_plankton_group(')
# else:
# # For hard-coded values.
# command = ''' + str(command.strip()) + ''"
#
if node_level == 'function_sample':
command = command.replace('$CreateVariable(', 'self._create_variable(currentsample, ')
if node_level == 'function_variable':
command = command.replace('$CopyVariable(', 'self._copy_variable(currentvariable, ')
### TODO: Also replace:
# $Text( --> self._as_text(
# $Year( --> self._asYear(
# $Datetime( --> self._asDatetime(
# $Date( --> self._as_date(
# $Time( --> self._asTime(
# $Int( --> self._asInt(
# $Float( --> self._as_float(
# $Position( --> self._asPosition(
# $Station( --> self._asStation(
# $Param( --> self._asParam(
#
return command
def append_parser_command(self, command_string):
""" """
commanddict = {}
commanddict['command_string'] = command_string
commanddict['command'] = compile(command_string, '', 'exec')
# For development:
print('Parser command: ' + command_string)
self._parsercommands.append(commanddict)
def _as_text(self, column_name):
""" To be called from Excel-based parser. """
column_name = str(column_name)
if column_name in self._header:
index = self._header.index(column_name)
return self._row[index] if len(self._row) > index else ''
else:
return ''
def _as_integer(self, column_name):
""" To be called from Excel-based parser. """
column_name = str(column_name)
if column_name in self._header:
index = self._header.index(column_name)
if len(self._row) > index:
try:
value = self._row[index]
if value:
value = value.replace(' ', '').replace(',', '.')
return int(round(float(value)))
except:
toolbox_utils.Logging().warning('Parser: Failed to convert to integer: ' + self._row[index])
return self._row[index]
return ''
def _as_float(self, column_name):
""" To be called from Excel-based parser. """
column_name = str(column_name)
if column_name in self._header:
index = self._header.index(column_name)
if len(self._row) > index:
try:
value = self._row[index]
if value:
value = value.replace(' ', '').replace(',', '.')
return float(value)
except:
toolbox_utils.Logging().warning('Parser: Failed to convert to float: ' + self._row[index])
return self._row[index]
return ''
def _as_date(self, column_name):
""" Reformat to match the ISO format. (2000-01-01)
To be called from Excel-based parser. """
column_name = str(column_name)
if column_name in self._header:
index = self._header.index(column_name)
if len(self._row) > index:
try:
value = dateutil.parser.parse(self._row[index])
if value:
return value.strftime('%Y-%m-%d')
except:
toolbox_utils.Logging().warning('Parser: Failed to convert to date: ' + self._row[index])
return self._row[index]
return ''
def _get_taxon_info_by_key(self, scientific_name, key):
""" To be called from Excel-based parser. """
scientific_name = str(scientific_name)
key = str(key)
return plankton_core.Species().get_taxon_value(scientific_name, key)
def _get_sizeclass_info_by_key(self, scientific_name, size_class, key):
""" To be called from Excel-based parser. """
scientific_name = str(scientific_name)
key = str(key)
size_class = str(size_class)
value = plankton_core.Species().get_bvol_value(scientific_name, size_class, key)
if value:
return value
return ''
def _get_trophic_type(self, scientific_name, size_class, reported_trophic_type = ''):
""" To be called from Excel-based parser. """
scientific_name = str(scientific_name)
size_class = str(size_class)
reported_trophic_type = str(reported_trophic_type)
value = plankton_core.Species().get_bvol_value(scientific_name, size_class, 'trophic_type')
if not value:
value = plankton_core.Species().get_taxon_value(scientific_name, 'trophic_type')
if not value:
value = reported_trophic_type
#
return value
def _get_plankton_group(self, scientific_name):
""" To be called from Excel-based parser. """
scientific_name = str(scientific_name)
return plankton_core.Species().get_plankton_group_from_taxon_name(scientific_name)
def _to_station(self, current_node, station_name, **kwargs):
""" To be called from Excel-based parser. """
# TODO: For test:
station_name = str(station_name)
current_node.add_data('station_name', station_name)
def _to_position(self, current_node, latitude, longitude, **kwargs):
""" To be called from Excel-based parser. """
latitude = str(latitude)
longitude = str(longitude)
# print('DEBUG: _to_position: ' + latitude + ' ' + longitude)
def _create_variable(self, current_node, **kwargs):
""" To be called from Excel-based parser. """
if isinstance(current_node, plankton_core.VisitNode):
newsample = plankton_core.SampleNode()
current_node.add_child(newsample)
variable = plankton_core.VariableNode()
newsample.add_child(variable)
variable.add_data('parameter', kwargs['p'])
variable.add_data('value', kwargs['v'])
#variable.add_data('value_float', kwargs['v'])
variable.add_data('unit', kwargs['u'])
if isinstance(current_node, plankton_core.SampleNode):
variable = plankton_core.VariableNode()
current_node.add_child(variable)
variable.add_data('parameter', kwargs['p'])
variable.add_data('value', kwargs['v'])
#variable.add_data('value_float', kwargs['v'])
variable.add_data('unit', kwargs['u'])
def _copy_variable(self, current_node, **kwargs):
""" To be called from Excel-based parser. """
if isinstance(current_node, plankton_core.VariableNode):
variable = current_node.clone()
variable.add_data('parameter', kwargs['p'])
variable.add_data('value', kwargs['v'])
#variable.add_data('value_float', kwargs['v'])
variable.add_data('unit', kwargs['u'])
def _modify_variable(self, current_node, **kwargs):
""" To be called from Excel-based parser. """
if isinstance(current_node, plankton_core.VariableNode):
current_node.add_data('parameter', kwargs['p'])
current_node.add_data('value', kwargs['v'])
#current_node.add_data('value_float', kwargs['v'])
current_node.add_data('unit', kwargs['u'])
| <filename>plankton_core/dataimports_parsed_format.py<gh_stars>1-10
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import dateutil.parser
import toolbox_utils
import plankton_core
class ParsedFormat(plankton_core.FormatBase):
""" """
def __init__(self):
""" Abstract class for parsed import formats. """
super(ParsedFormat, self).__init__()
#
self._parsercommands = []
def replace_method_keywords(self, parse_command, node_level = None, view_format = None):
""" Mapping between Excel parser code and python code."""
command = str(parse_command.strip())
#
if 'Column:' in command:
# An easier notation for "$Text('Example column')": "Column:Example column".
# For simple column name mapping based on the format column.
command = str(command.replace('Column:', '').strip())
if view_format is None:
command = 'self._as_text("' + command + '")'
elif view_format == 'text':
command = 'self._as_text("' + command + '")'
elif view_format == 'integer':
command = 'self._as_integer("' + command + '")'
elif view_format == 'float':
command = 'self._as_float("' + command + '")'
elif view_format == 'sample_date':
command = 'self._as_date("' + command + '")'
else:
command = 'self._as_text("' + command + '")'
#
elif '$' in command:
# Mapping for more advanced alternatives.
command = command.replace('$Text(', 'self._as_text(')
command = command.replace('$Integer(', 'self._as_integer(')
command = command.replace('$Float(', 'self._as_float(')
command = command.replace('$Date(', 'self._as_date(')
command = command.replace('$GetTaxonInfo(', 'self._taxon_info_by_key(')
command = command.replace('$GetSizeClassInfo(', 'self._get_sizeclass_info_by_key(')
command = command.replace('$GetSizeclassInfo(', 'self._get_sizeclass_info_by_key(') # Alternative spelling.
command = command.replace('$GetTrophicType(', 'self._get_trophic_type(')
command = command.replace('$GetPlanktonGroup(', 'self._get_plankton_group(')
# else:
# # For hard-coded values.
# command = ''' + str(command.strip()) + ''"
#
if node_level == 'function_sample':
command = command.replace('$CreateVariable(', 'self._create_variable(currentsample, ')
if node_level == 'function_variable':
command = command.replace('$CopyVariable(', 'self._copy_variable(currentvariable, ')
### TODO: Also replace:
# $Text( --> self._as_text(
# $Year( --> self._asYear(
# $Datetime( --> self._asDatetime(
# $Date( --> self._as_date(
# $Time( --> self._asTime(
# $Int( --> self._asInt(
# $Float( --> self._as_float(
# $Position( --> self._asPosition(
# $Station( --> self._asStation(
# $Param( --> self._asParam(
#
return command
def append_parser_command(self, command_string):
""" """
commanddict = {}
commanddict['command_string'] = command_string
commanddict['command'] = compile(command_string, '', 'exec')
# For development:
print('Parser command: ' + command_string)
self._parsercommands.append(commanddict)
def _as_text(self, column_name):
""" To be called from Excel-based parser. """
column_name = str(column_name)
if column_name in self._header:
index = self._header.index(column_name)
return self._row[index] if len(self._row) > index else ''
else:
return ''
def _as_integer(self, column_name):
""" To be called from Excel-based parser. """
column_name = str(column_name)
if column_name in self._header:
index = self._header.index(column_name)
if len(self._row) > index:
try:
value = self._row[index]
if value:
value = value.replace(' ', '').replace(',', '.')
return int(round(float(value)))
except:
toolbox_utils.Logging().warning('Parser: Failed to convert to integer: ' + self._row[index])
return self._row[index]
return ''
def _as_float(self, column_name):
""" To be called from Excel-based parser. """
column_name = str(column_name)
if column_name in self._header:
index = self._header.index(column_name)
if len(self._row) > index:
try:
value = self._row[index]
if value:
value = value.replace(' ', '').replace(',', '.')
return float(value)
except:
toolbox_utils.Logging().warning('Parser: Failed to convert to float: ' + self._row[index])
return self._row[index]
return ''
def _as_date(self, column_name):
""" Reformat to match the ISO format. (2000-01-01)
To be called from Excel-based parser. """
column_name = str(column_name)
if column_name in self._header:
index = self._header.index(column_name)
if len(self._row) > index:
try:
value = dateutil.parser.parse(self._row[index])
if value:
return value.strftime('%Y-%m-%d')
except:
toolbox_utils.Logging().warning('Parser: Failed to convert to date: ' + self._row[index])
return self._row[index]
return ''
def _get_taxon_info_by_key(self, scientific_name, key):
""" To be called from Excel-based parser. """
scientific_name = str(scientific_name)
key = str(key)
return plankton_core.Species().get_taxon_value(scientific_name, key)
def _get_sizeclass_info_by_key(self, scientific_name, size_class, key):
""" To be called from Excel-based parser. """
scientific_name = str(scientific_name)
key = str(key)
size_class = str(size_class)
value = plankton_core.Species().get_bvol_value(scientific_name, size_class, key)
if value:
return value
return ''
def _get_trophic_type(self, scientific_name, size_class, reported_trophic_type = ''):
""" To be called from Excel-based parser. """
scientific_name = str(scientific_name)
size_class = str(size_class)
reported_trophic_type = str(reported_trophic_type)
value = plankton_core.Species().get_bvol_value(scientific_name, size_class, 'trophic_type')
if not value:
value = plankton_core.Species().get_taxon_value(scientific_name, 'trophic_type')
if not value:
value = reported_trophic_type
#
return value
def _get_plankton_group(self, scientific_name):
""" To be called from Excel-based parser. """
scientific_name = str(scientific_name)
return plankton_core.Species().get_plankton_group_from_taxon_name(scientific_name)
def _to_station(self, current_node, station_name, **kwargs):
""" To be called from Excel-based parser. """
# TODO: For test:
station_name = str(station_name)
current_node.add_data('station_name', station_name)
def _to_position(self, current_node, latitude, longitude, **kwargs):
""" To be called from Excel-based parser. """
latitude = str(latitude)
longitude = str(longitude)
# print('DEBUG: _to_position: ' + latitude + ' ' + longitude)
def _create_variable(self, current_node, **kwargs):
""" To be called from Excel-based parser. """
if isinstance(current_node, plankton_core.VisitNode):
newsample = plankton_core.SampleNode()
current_node.add_child(newsample)
variable = plankton_core.VariableNode()
newsample.add_child(variable)
variable.add_data('parameter', kwargs['p'])
variable.add_data('value', kwargs['v'])
#variable.add_data('value_float', kwargs['v'])
variable.add_data('unit', kwargs['u'])
if isinstance(current_node, plankton_core.SampleNode):
variable = plankton_core.VariableNode()
current_node.add_child(variable)
variable.add_data('parameter', kwargs['p'])
variable.add_data('value', kwargs['v'])
#variable.add_data('value_float', kwargs['v'])
variable.add_data('unit', kwargs['u'])
def _copy_variable(self, current_node, **kwargs):
""" To be called from Excel-based parser. """
if isinstance(current_node, plankton_core.VariableNode):
variable = current_node.clone()
variable.add_data('parameter', kwargs['p'])
variable.add_data('value', kwargs['v'])
#variable.add_data('value_float', kwargs['v'])
variable.add_data('unit', kwargs['u'])
def _modify_variable(self, current_node, **kwargs):
""" To be called from Excel-based parser. """
if isinstance(current_node, plankton_core.VariableNode):
current_node.add_data('parameter', kwargs['p'])
current_node.add_data('value', kwargs['v'])
#current_node.add_data('value_float', kwargs['v'])
current_node.add_data('unit', kwargs['u'])
| en | 0.590869 | #!/usr/bin/python3 # -*- coding:utf-8 -*- # Project: http://plankton-toolbox.org # Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute # License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). Abstract class for parsed import formats. # Mapping between Excel parser code and python code. # # An easier notation for "$Text('Example column')": "Column:Example column". # For simple column name mapping based on the format column. # # Mapping for more advanced alternatives. # Alternative spelling. # else: # # For hard-coded values. # command = ''' + str(command.strip()) + ''" # ### TODO: Also replace: # $Text( --> self._as_text( # $Year( --> self._asYear( # $Datetime( --> self._asDatetime( # $Date( --> self._as_date( # $Time( --> self._asTime( # $Int( --> self._asInt( # $Float( --> self._as_float( # $Position( --> self._asPosition( # $Station( --> self._asStation( # $Param( --> self._asParam( # # For development: To be called from Excel-based parser. To be called from Excel-based parser. To be called from Excel-based parser. Reformat to match the ISO format. (2000-01-01)
To be called from Excel-based parser. To be called from Excel-based parser. To be called from Excel-based parser. To be called from Excel-based parser. # To be called from Excel-based parser. To be called from Excel-based parser. # TODO: For test: To be called from Excel-based parser. # print('DEBUG: _to_position: ' + latitude + ' ' + longitude) To be called from Excel-based parser. #variable.add_data('value_float', kwargs['v']) #variable.add_data('value_float', kwargs['v']) To be called from Excel-based parser. #variable.add_data('value_float', kwargs['v']) To be called from Excel-based parser. #current_node.add_data('value_float', kwargs['v']) | 2.488338 | 2 |
saunter/ConfigWrapper.py | timgates42/py.saunter | 50 | 6616311 | <reponame>timgates42/py.saunter
# Copyright 2011 Element 34
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
=============
ConfigWrapper
=============
"""
import os
import os.path
import sys
import yaml
class ConfigWrapper(object):
"""
Singleton reference to the config information
"""
# singleton
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(ConfigWrapper, cls).__new__(cls, *args, **kwargs)
cls._instance._data = {}
return cls._instance
def __str__(self):
return yaml.dump(self._data, default_flow_style=False)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __contains__(self, item):
if item in self._data:
return True
return False
def configure(self, config="saunter.yaml"):
if not os.path.exists(os.path.join("conf", config)):
print("Could not find %s; are you sure you remembered to create one?" % os.path.join("conf", config))
sys.exit(1)
# this should exist since configure() is only called in main.py
config_dir = os.path.join(self._data["saunter"]["base"], "conf")
for root, dirs, files in os.walk(config_dir):
for f in files:
if f.endswith(".yaml"):
file_path = os.path.join(root, f)
relative_path = file_path[len(config_dir) + 1:]
head, tail = os.path.split(relative_path)
section_name = f[:-5]
o = open(file_path, "r")
if head:
if head not in self._data.keys():
self._data[head] = {}
if section_name in self._data[head]:
self._data[head][section_name] = dict(self._data[head][section_name].items() + yaml.load(o).items())
else:
self._data[head][section_name] = yaml.load(o)
else:
if section_name in self._data:
self._data[section_name] = dict(self._data[section_name].items() + yaml.load(o).items())
else:
self._data[section_name] = yaml.load(o)
o.close()
| # Copyright 2011 Element 34
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
=============
ConfigWrapper
=============
"""
import os
import os.path
import sys
import yaml
class ConfigWrapper(object):
"""
Singleton reference to the config information
"""
# singleton
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(ConfigWrapper, cls).__new__(cls, *args, **kwargs)
cls._instance._data = {}
return cls._instance
def __str__(self):
return yaml.dump(self._data, default_flow_style=False)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __contains__(self, item):
if item in self._data:
return True
return False
def configure(self, config="saunter.yaml"):
if not os.path.exists(os.path.join("conf", config)):
print("Could not find %s; are you sure you remembered to create one?" % os.path.join("conf", config))
sys.exit(1)
# this should exist since configure() is only called in main.py
config_dir = os.path.join(self._data["saunter"]["base"], "conf")
for root, dirs, files in os.walk(config_dir):
for f in files:
if f.endswith(".yaml"):
file_path = os.path.join(root, f)
relative_path = file_path[len(config_dir) + 1:]
head, tail = os.path.split(relative_path)
section_name = f[:-5]
o = open(file_path, "r")
if head:
if head not in self._data.keys():
self._data[head] = {}
if section_name in self._data[head]:
self._data[head][section_name] = dict(self._data[head][section_name].items() + yaml.load(o).items())
else:
self._data[head][section_name] = yaml.load(o)
else:
if section_name in self._data:
self._data[section_name] = dict(self._data[section_name].items() + yaml.load(o).items())
else:
self._data[section_name] = yaml.load(o)
o.close() | en | 0.836071 | # Copyright 2011 Element 34 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ============= ConfigWrapper ============= Singleton reference to the config information # singleton # this should exist since configure() is only called in main.py | 2.074091 | 2 |
aws-cfn-control/awscfnctl/getec2keys.py | veloduff/aws-cfn-control | 39 | 6616312 | <reponame>veloduff/aws-cfn-control
#!/usr/bin/env python
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
#
import boto3
ec2 = boto3.client('ec2')
response = ec2.describe_key_pairs()
for pair in (response['KeyPairs']):
print(pair['KeyName'])
| #!/usr/bin/env python
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
#
import boto3
ec2 = boto3.client('ec2')
response = ec2.describe_key_pairs()
for pair in (response['KeyPairs']):
print(pair['KeyName']) | en | 0.875607 | #!/usr/bin/env python # # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file # except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" # BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under the License. # | 2.88507 | 3 |
webapp/game.py | JoshuaGud777/AppleQuest | 0 | 6616313 | '''game.py for applequest.fallenofftheedge.com'''
if __name__ == '__main__':
import library as lib
else:
import webapp.library as lib
import cgitb
cgitb.enable()
def html_print(value='', print_data='', newcookie=''):
'''Prints the HTML to the client with varibals
$$ printdata $$
$$ command $$
$$ newcookie $$
$$ oldcookie $$'''
html = lib.get_html(lib.HTML_DIR + 'game.html')
if True:
html = html.replace('$$oldcookie$$$', cookie_read())
else:
html = html.replace('$$oldcookie$$$', '')
if newcookie != '':
html = html.replace('$$newcookie$$$', str(newcookie))
else:
html = html.replace('$$newcookie$$$', '')
if print_data != '':
html = html.replace('$$printdata$$', print_data)
else:
html = html.replace('$$printdata$$', '')
if value != '':
html = html.replace('$$command$$', value)
else:
html = html.replace('$$command$$', '')
if True is not False:
html = html.replace('$$printcommand$$', '')
else:
print('This is not the case you are looking for!')
print('P.S. The world is about to end!!!')
print(html)
def cookie_read():
'''Reads the cookies sent in the request headers and prints the back
to the client'''
cookie = lib.get_cookies()
if cookie is None:
return 'No saved Cookies'
else:
return str(cookie)
def main():
'''main'''
lib.open_conn(lib.DB_DIR + 'AppleQuest.db')
print_data = ''
newcookie = ''
cookie_read()
form = lib.get_cgi_data()
command = form.getfirst("command")
renew = form.getfirst("newid")
if command is None:
command = ''
elif type(command) != str:
command = str(command)
if renew == 'true':
cookies = lib.get_cookies()
sessioninfo = lib.renew_session_id(cookies['id'].value,
cookies['username'].value)
if type(sessioninfo) == str or sessioninfo is False:
print_data += 'Could not renew\n'
else:
newcookie = lib.cookie_wright(sessioninfo[0], sessioninfo[1],
sessioninfo[2],)
if command == '301':
print_data += '103\n'
elif command == '302':
print_data += '203\n'
elif command == '303':
print_data += '303\n'
else:
print_data += '003\n'
lib.print_header(newcookie)
html_print(command, print_data, newcookie)
lib.save_close_conn()
if __name__ == '__main__':
main()
| '''game.py for applequest.fallenofftheedge.com'''
if __name__ == '__main__':
import library as lib
else:
import webapp.library as lib
import cgitb
cgitb.enable()
def html_print(value='', print_data='', newcookie=''):
'''Prints the HTML to the client with varibals
$$ printdata $$
$$ command $$
$$ newcookie $$
$$ oldcookie $$'''
html = lib.get_html(lib.HTML_DIR + 'game.html')
if True:
html = html.replace('$$oldcookie$$$', cookie_read())
else:
html = html.replace('$$oldcookie$$$', '')
if newcookie != '':
html = html.replace('$$newcookie$$$', str(newcookie))
else:
html = html.replace('$$newcookie$$$', '')
if print_data != '':
html = html.replace('$$printdata$$', print_data)
else:
html = html.replace('$$printdata$$', '')
if value != '':
html = html.replace('$$command$$', value)
else:
html = html.replace('$$command$$', '')
if True is not False:
html = html.replace('$$printcommand$$', '')
else:
print('This is not the case you are looking for!')
print('P.S. The world is about to end!!!')
print(html)
def cookie_read():
'''Reads the cookies sent in the request headers and prints the back
to the client'''
cookie = lib.get_cookies()
if cookie is None:
return 'No saved Cookies'
else:
return str(cookie)
def main():
'''main'''
lib.open_conn(lib.DB_DIR + 'AppleQuest.db')
print_data = ''
newcookie = ''
cookie_read()
form = lib.get_cgi_data()
command = form.getfirst("command")
renew = form.getfirst("newid")
if command is None:
command = ''
elif type(command) != str:
command = str(command)
if renew == 'true':
cookies = lib.get_cookies()
sessioninfo = lib.renew_session_id(cookies['id'].value,
cookies['username'].value)
if type(sessioninfo) == str or sessioninfo is False:
print_data += 'Could not renew\n'
else:
newcookie = lib.cookie_wright(sessioninfo[0], sessioninfo[1],
sessioninfo[2],)
if command == '301':
print_data += '103\n'
elif command == '302':
print_data += '203\n'
elif command == '303':
print_data += '303\n'
else:
print_data += '003\n'
lib.print_header(newcookie)
html_print(command, print_data, newcookie)
lib.save_close_conn()
if __name__ == '__main__':
main()
| en | 0.727011 | game.py for applequest.fallenofftheedge.com Prints the HTML to the client with varibals $$ printdata $$ $$ command $$ $$ newcookie $$ $$ oldcookie $$ Reads the cookies sent in the request headers and prints the back to the client main | 2.833426 | 3 |
devices/migrations/0021_city.py | ticotheps/laker-tech-crm | 0 | 6616314 | <filename>devices/migrations/0021_city.py
# Generated by Django 3.2.5 on 2021-07-22 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('devices', '0020_auto_20210721_1007'),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, unique=True)),
],
options={
'verbose_name': 'City',
'verbose_name_plural': 'Cities',
},
),
]
| <filename>devices/migrations/0021_city.py
# Generated by Django 3.2.5 on 2021-07-22 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('devices', '0020_auto_20210721_1007'),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, unique=True)),
],
options={
'verbose_name': 'City',
'verbose_name_plural': 'Cities',
},
),
]
| en | 0.813275 | # Generated by Django 3.2.5 on 2021-07-22 13:36 | 1.714876 | 2 |
python-para-zumbis/Lista 2/exercicio02_josebarbosa_01.py | kidchenko/playground | 4 | 6616315 | a = int(input('Digite o primeiro lado do triangulo: '))
b = int(input('Digite o segundo lado do triangulo: '))
c = int(input('Digite o terceiro lado do triangulo: '))
if a > b + c or b > b + c or c > a + b:
print('Nao e um triangulo')
else:
if a == b == c:
print('Equilatero')
elif a == b or b == c or a == c:
print('Isosceles')
else:
print('Escaleno')
| a = int(input('Digite o primeiro lado do triangulo: '))
b = int(input('Digite o segundo lado do triangulo: '))
c = int(input('Digite o terceiro lado do triangulo: '))
if a > b + c or b > b + c or c > a + b:
print('Nao e um triangulo')
else:
if a == b == c:
print('Equilatero')
elif a == b or b == c or a == c:
print('Isosceles')
else:
print('Escaleno')
| none | 1 | 4.009719 | 4 | |
2017/day19/day19.py | mdelmage/advent2019 | 2 | 6616316 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Dictionaries that know how to change velocity left or right
left_turn = { (1, 0) : (0, -1),
(0, 1) : (1, 0),
(-1, 0): (0, 1),
(0, -1): (-1, 0)}
right_turn = { (1, 0) : (0, 1),
(0, 1) : (-1, 0),
(-1, 0): (0, -1),
(0, -1): (1, 0)}
# Parse the map file
with open('day19_input.txt') as f:
diagram = [line.rstrip('\n')for line in f]
pos = None
vel = (0, 1)
route_map = {}
route_string = ''
route_len = 1
# Extract any non-space characters into our route map
y = 0
for line in diagram:
for x in range(len(line)):
if line[x] != ' ':
route_map[(x, y)] = line[x]
if not pos: pos = (x, y)
y += 1
# Traverse the route until we can't go left, forward or right
while pos is not None:
# We found a route letter; save it
if route_map[pos].isalpha(): route_string += route_map[pos]
# Pre-calculate what left/straight/right would look like
vel_left = left_turn[vel]
vel_right = right_turn[vel]
pos_ahead = (pos[0] + vel[0], pos[1] + vel[1])
pos_left = (pos[0] + vel_left[0], pos[1] + vel_left[1])
pos_right = (pos[0] + vel_right[0], pos[1] + vel_right[1])
# Prefer straight ahead, but fall back to left/right turns
if pos_ahead in route_map:
pos = pos_ahead
elif pos_left in route_map:
pos = pos_left
vel = vel_left
elif pos_right in route_map:
pos = pos_right
vel = vel_right
else:
pos = None
route_len -= 1
route_len += 1
print 'Part One: Letters seen were {0}.'.format(route_string)
print 'Part Two: Scan length was {0}.'.format(route_len) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Dictionaries that know how to change velocity left or right
left_turn = { (1, 0) : (0, -1),
(0, 1) : (1, 0),
(-1, 0): (0, 1),
(0, -1): (-1, 0)}
right_turn = { (1, 0) : (0, 1),
(0, 1) : (-1, 0),
(-1, 0): (0, -1),
(0, -1): (1, 0)}
# Parse the map file
with open('day19_input.txt') as f:
diagram = [line.rstrip('\n')for line in f]
pos = None
vel = (0, 1)
route_map = {}
route_string = ''
route_len = 1
# Extract any non-space characters into our route map
y = 0
for line in diagram:
for x in range(len(line)):
if line[x] != ' ':
route_map[(x, y)] = line[x]
if not pos: pos = (x, y)
y += 1
# Traverse the route until we can't go left, forward or right
while pos is not None:
# We found a route letter; save it
if route_map[pos].isalpha(): route_string += route_map[pos]
# Pre-calculate what left/straight/right would look like
vel_left = left_turn[vel]
vel_right = right_turn[vel]
pos_ahead = (pos[0] + vel[0], pos[1] + vel[1])
pos_left = (pos[0] + vel_left[0], pos[1] + vel_left[1])
pos_right = (pos[0] + vel_right[0], pos[1] + vel_right[1])
# Prefer straight ahead, but fall back to left/right turns
if pos_ahead in route_map:
pos = pos_ahead
elif pos_left in route_map:
pos = pos_left
vel = vel_left
elif pos_right in route_map:
pos = pos_right
vel = vel_right
else:
pos = None
route_len -= 1
route_len += 1
print 'Part One: Letters seen were {0}.'.format(route_string)
print 'Part Two: Scan length was {0}.'.format(route_len) | en | 0.847901 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Dictionaries that know how to change velocity left or right # Parse the map file # Extract any non-space characters into our route map # Traverse the route until we can't go left, forward or right # We found a route letter; save it # Pre-calculate what left/straight/right would look like # Prefer straight ahead, but fall back to left/right turns | 3.768399 | 4 |
flake8_fastapi/visitors/cors_middleware.py | Bakhtiyar-Garashov/flake8-fastapi | 20 | 6616317 | import ast
from typing import List, Optional
from flake8_plugin_utils import Visitor
from flake8_plugin_utils.plugin import TConfig
from flake8_fastapi.errors import CORSMiddlewareOrderError
class CORSMiddlewareOrder(Visitor):
def __init__(self, config: Optional[TConfig] = None) -> None:
super().__init__(config=config)
self._application_name: Optional[str] = None
self._middlewares: List[str] = []
def generic_visit(self, node: ast.AST):
for stmt in ast.walk(node):
if isinstance(stmt, ast.Assign):
self.visit_Assign(stmt)
if isinstance(stmt, ast.Call):
self.visit_Call(stmt)
def visit_Call(self, node: ast.Call) -> None:
if (
isinstance(node.func, ast.Attribute)
and isinstance(node.func.value, ast.Name)
and node.func.value.id == self._application_name
and node.func.attr == "add_middleware"
):
for arg in node.args:
if isinstance(arg, ast.Name):
self._middlewares.append(arg.id)
for keyword in node.keywords:
if keyword.arg == "middleware_class" and isinstance(
keyword.value, ast.Name
):
self._middlewares.append(keyword.value.id)
if (
"CORSMiddleware" in self._middlewares
and self._middlewares[-1] != "CORSMiddleware"
):
self.error_from_node(CORSMiddlewareOrderError, node)
def visit_Assign(self, node: ast.Assign):
if (
isinstance(node.value, ast.Call)
and isinstance(node.value.func, ast.Name)
and node.value.func.id == "FastAPI"
):
for target in node.targets:
if isinstance(target, ast.Name):
self._application_name = target.id
| import ast
from typing import List, Optional
from flake8_plugin_utils import Visitor
from flake8_plugin_utils.plugin import TConfig
from flake8_fastapi.errors import CORSMiddlewareOrderError
class CORSMiddlewareOrder(Visitor):
def __init__(self, config: Optional[TConfig] = None) -> None:
super().__init__(config=config)
self._application_name: Optional[str] = None
self._middlewares: List[str] = []
def generic_visit(self, node: ast.AST):
for stmt in ast.walk(node):
if isinstance(stmt, ast.Assign):
self.visit_Assign(stmt)
if isinstance(stmt, ast.Call):
self.visit_Call(stmt)
def visit_Call(self, node: ast.Call) -> None:
if (
isinstance(node.func, ast.Attribute)
and isinstance(node.func.value, ast.Name)
and node.func.value.id == self._application_name
and node.func.attr == "add_middleware"
):
for arg in node.args:
if isinstance(arg, ast.Name):
self._middlewares.append(arg.id)
for keyword in node.keywords:
if keyword.arg == "middleware_class" and isinstance(
keyword.value, ast.Name
):
self._middlewares.append(keyword.value.id)
if (
"CORSMiddleware" in self._middlewares
and self._middlewares[-1] != "CORSMiddleware"
):
self.error_from_node(CORSMiddlewareOrderError, node)
def visit_Assign(self, node: ast.Assign):
if (
isinstance(node.value, ast.Call)
and isinstance(node.value.func, ast.Name)
and node.value.func.id == "FastAPI"
):
for target in node.targets:
if isinstance(target, ast.Name):
self._application_name = target.id
| none | 1 | 2.111959 | 2 | |
python/v3_3/find_and_download_file.py | Engage-in-Health/googleads-dfa-reporting-samples | 103 | 6616318 | #!/usr/bin/python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An end-to-end example of how to find and download a report file."""
import argparse
import io
import os
import sys
import dfareporting_utils
from googleapiclient import http
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int, help='The ID of the profile to use')
argparser.add_argument(
'report_id', type=int, help='The ID of the report to get a file for')
# Chunk size to use when downloading report files. Defaults to 32MB.
CHUNK_SIZE = 32 * 1024 * 1024
def main(argv):
# Retrieve command line arguments.
flags = dfareporting_utils.get_arguments(argv, __doc__, parents=[argparser])
# Authenticate and construct service.
service = dfareporting_utils.setup(flags)
profile_id = flags.profile_id
report_id = flags.report_id
try:
# 1. Find a file to download.
report_file = find_file(service, profile_id, report_id)
if report_file:
# 2. (optional) Generate browser URL.
generate_browser_url(service, report_id, report_file['id'])
# 3. Directly download the file.
direct_download_file(service, report_id, report_file['id'])
else:
print 'No file found for profile ID %d and report ID %d.' % (profile_id,
report_id)
except client.AccessTokenRefreshError:
print('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
def find_file(service, profile_id, report_id):
"""Finds a report file to download."""
target = None
request = service.reports().files().list(
profileId=profile_id, reportId=report_id)
while True:
response = request.execute()
for report_file in response['items']:
if is_target_file(report_file):
target = report_file
break
if not target and response['items'] and response['nextPageToken']:
request = service.reports().files().list_next(request, response)
else:
break
if target:
print 'Found file %s with filename "%s".' % (target['id'],
target['fileName'])
return target
print 'Unable to find file for profile ID %d and report ID %d.' % (profile_id,
report_id)
return None
def is_target_file(report_file):
# Provide custom validation logic here.
# For example purposes, any available file is considered valid.
return report_file['status'] == 'REPORT_AVAILABLE'
def generate_browser_url(service, report_id, file_id):
"""Prints the browser download URL for the file."""
report_file = service.files().get(
reportId=report_id, fileId=file_id).execute()
browser_url = report_file['urls']['browserUrl']
print 'File %s has browser URL: %s.' % (report_file['id'], browser_url)
def direct_download_file(service, report_id, file_id):
"""Downloads a report file to disk."""
# Retrieve the file metadata.
report_file = service.files().get(
reportId=report_id, fileId=file_id).execute()
if report_file['status'] == 'REPORT_AVAILABLE':
# Prepare a local file to download the report contents to.
out_file = io.FileIO(generate_file_name(report_file), mode='wb')
# Create a get request.
request = service.files().get_media(reportId=report_id, fileId=file_id)
# Create a media downloader instance.
# Optional: adjust the chunk size used when downloading the file.
downloader = http.MediaIoBaseDownload(
out_file, request, chunksize=CHUNK_SIZE)
# Execute the get request and download the file.
download_finished = False
while download_finished is False:
_, download_finished = downloader.next_chunk()
print('File %s downloaded to %s' % (report_file['id'],
os.path.realpath(out_file.name)))
def generate_file_name(report_file):
"""Generates a report file name based on the file metadata."""
# If no filename is specified, use the file ID instead.
file_name = report_file['fileName'] or report_file['id']
extension = '.csv' if report_file['format'] == 'CSV' else '.xml'
return file_name + extension
if __name__ == '__main__':
main(sys.argv)
| #!/usr/bin/python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An end-to-end example of how to find and download a report file."""
import argparse
import io
import os
import sys
import dfareporting_utils
from googleapiclient import http
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int, help='The ID of the profile to use')
argparser.add_argument(
'report_id', type=int, help='The ID of the report to get a file for')
# Chunk size to use when downloading report files. Defaults to 32MB.
CHUNK_SIZE = 32 * 1024 * 1024
def main(argv):
# Retrieve command line arguments.
flags = dfareporting_utils.get_arguments(argv, __doc__, parents=[argparser])
# Authenticate and construct service.
service = dfareporting_utils.setup(flags)
profile_id = flags.profile_id
report_id = flags.report_id
try:
# 1. Find a file to download.
report_file = find_file(service, profile_id, report_id)
if report_file:
# 2. (optional) Generate browser URL.
generate_browser_url(service, report_id, report_file['id'])
# 3. Directly download the file.
direct_download_file(service, report_id, report_file['id'])
else:
print 'No file found for profile ID %d and report ID %d.' % (profile_id,
report_id)
except client.AccessTokenRefreshError:
print('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
def find_file(service, profile_id, report_id):
"""Finds a report file to download."""
target = None
request = service.reports().files().list(
profileId=profile_id, reportId=report_id)
while True:
response = request.execute()
for report_file in response['items']:
if is_target_file(report_file):
target = report_file
break
if not target and response['items'] and response['nextPageToken']:
request = service.reports().files().list_next(request, response)
else:
break
if target:
print 'Found file %s with filename "%s".' % (target['id'],
target['fileName'])
return target
print 'Unable to find file for profile ID %d and report ID %d.' % (profile_id,
report_id)
return None
def is_target_file(report_file):
# Provide custom validation logic here.
# For example purposes, any available file is considered valid.
return report_file['status'] == 'REPORT_AVAILABLE'
def generate_browser_url(service, report_id, file_id):
"""Prints the browser download URL for the file."""
report_file = service.files().get(
reportId=report_id, fileId=file_id).execute()
browser_url = report_file['urls']['browserUrl']
print 'File %s has browser URL: %s.' % (report_file['id'], browser_url)
def direct_download_file(service, report_id, file_id):
"""Downloads a report file to disk."""
# Retrieve the file metadata.
report_file = service.files().get(
reportId=report_id, fileId=file_id).execute()
if report_file['status'] == 'REPORT_AVAILABLE':
# Prepare a local file to download the report contents to.
out_file = io.FileIO(generate_file_name(report_file), mode='wb')
# Create a get request.
request = service.files().get_media(reportId=report_id, fileId=file_id)
# Create a media downloader instance.
# Optional: adjust the chunk size used when downloading the file.
downloader = http.MediaIoBaseDownload(
out_file, request, chunksize=CHUNK_SIZE)
# Execute the get request and download the file.
download_finished = False
while download_finished is False:
_, download_finished = downloader.next_chunk()
print('File %s downloaded to %s' % (report_file['id'],
os.path.realpath(out_file.name)))
def generate_file_name(report_file):
"""Generates a report file name based on the file metadata."""
# If no filename is specified, use the file ID instead.
file_name = report_file['fileName'] or report_file['id']
extension = '.csv' if report_file['format'] == 'CSV' else '.xml'
return file_name + extension
if __name__ == '__main__':
main(sys.argv)
| en | 0.793106 | #!/usr/bin/python # # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. An end-to-end example of how to find and download a report file. # Declare command-line flags. # Chunk size to use when downloading report files. Defaults to 32MB. # Retrieve command line arguments. # Authenticate and construct service. # 1. Find a file to download. # 2. (optional) Generate browser URL. # 3. Directly download the file. Finds a report file to download. # Provide custom validation logic here. # For example purposes, any available file is considered valid. Prints the browser download URL for the file. Downloads a report file to disk. # Retrieve the file metadata. # Prepare a local file to download the report contents to. # Create a get request. # Create a media downloader instance. # Optional: adjust the chunk size used when downloading the file. # Execute the get request and download the file. Generates a report file name based on the file metadata. # If no filename is specified, use the file ID instead. | 2.934549 | 3 |
examples/environmentmodules_moduleloadtest.py | lmodule/lmodule | 3 | 6616319 | <gh_stars>1-10
import os
import re
import subprocess
import sys
from lmod.module import Module
modules = subprocess.getoutput("module av -t")
modules = modules.split()
pass_counter = 0
fail_counter = 0
total = 0
for module in modules:
# output of module tree is as follows '/path/to/tree:' so we remove trailing colon
tree = module[:-1]
# skip entry when it's module tree
if os.path.exists(tree):
print(f"Skipping tree: {tree}")
continue
if re.search("(\(default\))$", module):
module = module.replace("(default)", "")
cmd = Module(module, debug=True)
ret = cmd.test_modules(login=True)
total += 1
# if returncode is 0 mark as PASS
if ret == 0:
pass_counter += 1
else:
fail_counter += 1
pass_rate = pass_counter * 100 / total
fail_rate = fail_counter * 100 / total
print("-------- SUMMARY ---------")
print(f"Total Pass: {pass_counter}/{total}")
print(f"Total Failure: {fail_counter}/{total}")
print(f"PASS RATE: {pass_rate:.3f}")
print(f"FAIL RATE: {fail_rate:.3f}")
| import os
import re
import subprocess
import sys
from lmod.module import Module
modules = subprocess.getoutput("module av -t")
modules = modules.split()
pass_counter = 0
fail_counter = 0
total = 0
for module in modules:
# output of module tree is as follows '/path/to/tree:' so we remove trailing colon
tree = module[:-1]
# skip entry when it's module tree
if os.path.exists(tree):
print(f"Skipping tree: {tree}")
continue
if re.search("(\(default\))$", module):
module = module.replace("(default)", "")
cmd = Module(module, debug=True)
ret = cmd.test_modules(login=True)
total += 1
# if returncode is 0 mark as PASS
if ret == 0:
pass_counter += 1
else:
fail_counter += 1
pass_rate = pass_counter * 100 / total
fail_rate = fail_counter * 100 / total
print("-------- SUMMARY ---------")
print(f"Total Pass: {pass_counter}/{total}")
print(f"Total Failure: {fail_counter}/{total}")
print(f"PASS RATE: {pass_rate:.3f}")
print(f"FAIL RATE: {fail_rate:.3f}") | en | 0.879955 | # output of module tree is as follows '/path/to/tree:' so we remove trailing colon # skip entry when it's module tree # if returncode is 0 mark as PASS | 2.481915 | 2 |
sponge-rpi-grovepi/examples/rpi-grovepi/grovepi_led_blink.py | mnpas/sponge | 9 | 6616320 | """
Sponge Knowledge Base
Blinking LED
GrovePi board: Connect LED to D4
"""
state = False
led = None
class LedBlink(Trigger):
def onConfigure(self):
self.withEvent("blink")
def onRun(self, event):
global led, state
state = not state
led.set(state)
def onStartup():
global led
led = grovepi.device.getDigitalOut(4)
sponge.event("blink").sendAfter(0, 1000)
def onShutdown():
global led
if led is not None:
led.set(False)
| """
Sponge Knowledge Base
Blinking LED
GrovePi board: Connect LED to D4
"""
state = False
led = None
class LedBlink(Trigger):
def onConfigure(self):
self.withEvent("blink")
def onRun(self, event):
global led, state
state = not state
led.set(state)
def onStartup():
global led
led = grovepi.device.getDigitalOut(4)
sponge.event("blink").sendAfter(0, 1000)
def onShutdown():
global led
if led is not None:
led.set(False)
| en | 0.712397 | Sponge Knowledge Base Blinking LED GrovePi board: Connect LED to D4 | 3.133599 | 3 |
solutions/longest-substring-without-repeating-characters.py | oopsno/leetcode.py | 1 | 6616321 | # encoding: UTF-8
from leetcode import *
from typing import Generator, Tuple
@Problem(3, 'Longest Substring Without Repeating Characters', Difficulty.Medium, Tags.HashTable, Tags.String, Tags.TwoPointers)
class Solution:
@staticmethod
def iterate(s: str) -> Generator[Tuple[int, int], None, None]:
"""
搜索所有不包含重复字符的子串 [begin, end)
"""
begin, sub = 0, {}
for end, char in enumerate(s):
if begin <= sub.get(char, -1):
yield begin, end
begin = sub[char] + 1
sub[char] = end
yield begin, len(s)
def lengthOfLongestSubstring(self, s: str) -> int:
"""
检查并返回 s 中不包含重复字符的最长子串的长度
"""
return max(r - l for l, r in self.iterate(s))
@Solution.test.lengthOfLongestSubstring
def example(fn):
require(fn('abcabcbb') == len('abc'))
require(fn('bbbbb') == len('b'))
require(fn('pwwkew') == len('wke'))
@Solution.test.lengthOfLongestSubstring
def coverage(fn):
require(fn('') == 0)
require(fn('a') == 1)
require(fn('aa') == 1)
require(fn('ab') == 2)
require(fn('abba') == len('ab'))
@Solution.test.lengthOfLongestSubstring
def profile(fn):
require(fn('abc' * 30000) == len('abc'))
| # encoding: UTF-8
from leetcode import *
from typing import Generator, Tuple
@Problem(3, 'Longest Substring Without Repeating Characters', Difficulty.Medium, Tags.HashTable, Tags.String, Tags.TwoPointers)
class Solution:
@staticmethod
def iterate(s: str) -> Generator[Tuple[int, int], None, None]:
"""
搜索所有不包含重复字符的子串 [begin, end)
"""
begin, sub = 0, {}
for end, char in enumerate(s):
if begin <= sub.get(char, -1):
yield begin, end
begin = sub[char] + 1
sub[char] = end
yield begin, len(s)
def lengthOfLongestSubstring(self, s: str) -> int:
"""
检查并返回 s 中不包含重复字符的最长子串的长度
"""
return max(r - l for l, r in self.iterate(s))
@Solution.test.lengthOfLongestSubstring
def example(fn):
require(fn('abcabcbb') == len('abc'))
require(fn('bbbbb') == len('b'))
require(fn('pwwkew') == len('wke'))
@Solution.test.lengthOfLongestSubstring
def coverage(fn):
require(fn('') == 0)
require(fn('a') == 1)
require(fn('aa') == 1)
require(fn('ab') == 2)
require(fn('abba') == len('ab'))
@Solution.test.lengthOfLongestSubstring
def profile(fn):
require(fn('abc' * 30000) == len('abc'))
| zh | 0.907269 | # encoding: UTF-8 搜索所有不包含重复字符的子串 [begin, end) 检查并返回 s 中不包含重复字符的最长子串的长度 | 3.226749 | 3 |
statistics.py | ZetDude/subreddit-analyser | 1 | 6616322 | import time
import praw
import datetime
import operator
import obot
def median(lst):
quotient, remainder = divmod(len(lst), 2)
if remainder:
return sorted(lst)[quotient]
return sum(sorted(lst)[quotient - 1:quotient + 1]) / 2.
subreddit = input("What subreddit to run for? >>> ")
days = int(input("How many days to check for? >>> "))
print("Starting process, hang on tight")
bot = praw.Reddit(user_agent='/r/{} post analysis'.format(subreddit),
client_id=obot.clientId,
client_secret=obot.clientSecret,
username=obot.name,
password=<PASSWORD>)
print("Connection made")
statV = {}
statA = {}
stat = {}
statM = {}
statS = {}
links = []
skip = 0
last = 0
limit = int(input("Limit >>> "))
print("Fetching posts. This may take a while")
posts = list(bot.subreddit(subreddit).new(limit=limit))
postamount = len(posts)
print("Posts fetched! {} got".format(postamount))
amount = 0
for y, i in enumerate(posts):
if i.is_self:
self = "SELFPOST"
else:
if "i.redd.it" in i.url:
self = "IMAGE"
elif "imgur.com" in i.url:
self = "IMAGE"
else:
self = "LINK"
links.append(i.url)
createdfrm = datetime.datetime.utcfromtimestamp(int(i.created_utc))
nowtime = time.time()
nowfrm = datetime.datetime.utcfromtimestamp(int(nowtime))
diff = nowfrm - createdfrm
if diff.total_seconds() < 24*3600:
print("Post is not older than 24 hours, skipping")
skip += 1
continue
if diff.total_seconds() > 86400 * days:
print("Post is older than {} days, ending task".format(days))
break
amount += 1
#flair = i.link_flair_css_class
flair = i.link_flair_text
upvotes = i.score
statV[flair] = statV.get(flair, 0) + upvotes
statA[flair] = statA.get(flair, 0) + 1
statV[self] = statV.get(self, 0) + upvotes
statA[self] = statA.get(self, 0) + 1
statM[flair] = statM.get(flair, []) + [upvotes]
statM[self] = statM.get(self, []) + [upvotes]
print("{}/{}: {} - {} ({}) - {} upvotes".format(y, postamount, i.title, flair, self, upvotes))
last = diff.total_seconds() / 86400
for key, value in statV.items():
stat[key] = round(statV[key] / statA[key])
for key, value in statM.items():
statS[key] = round(median(value))
print(links)
print("\n+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=\n")
print("Ran through {} posts of {}, ({} being the goal) with the last one being {} days old".format(amount, subreddit, len(posts), last))
print("(skipped {} posts)".format(skip))
print("Average upvotes of the past {} days, categorized by flair type:".format(days))
longest = 0
for i in stat:
if len(str(i)) > longest:
longest = len(i)
longest += 1
for key, value in sorted(stat.items(), key=operator.itemgetter(1)):
print("{}{}({}){} : {} average upvotes".format(key, " " * (longest - len(str(key))), statA[key], " " * (3 - len(str(statA[key]))), value))
print("")
print("Median upvotes of the past {} days, categorized by flair type:".format(days))
for key, value in sorted(statS.items(), key=operator.itemgetter(1)):
print("{}{}({}){} : {} upvotes".format(key, " " * (longest - len(str(key))), statA[key], " " * (3- len(str(statA[key]))), value))
| import time
import praw
import datetime
import operator
import obot
def median(lst):
quotient, remainder = divmod(len(lst), 2)
if remainder:
return sorted(lst)[quotient]
return sum(sorted(lst)[quotient - 1:quotient + 1]) / 2.
subreddit = input("What subreddit to run for? >>> ")
days = int(input("How many days to check for? >>> "))
print("Starting process, hang on tight")
bot = praw.Reddit(user_agent='/r/{} post analysis'.format(subreddit),
client_id=obot.clientId,
client_secret=obot.clientSecret,
username=obot.name,
password=<PASSWORD>)
print("Connection made")
statV = {}
statA = {}
stat = {}
statM = {}
statS = {}
links = []
skip = 0
last = 0
limit = int(input("Limit >>> "))
print("Fetching posts. This may take a while")
posts = list(bot.subreddit(subreddit).new(limit=limit))
postamount = len(posts)
print("Posts fetched! {} got".format(postamount))
amount = 0
for y, i in enumerate(posts):
if i.is_self:
self = "SELFPOST"
else:
if "i.redd.it" in i.url:
self = "IMAGE"
elif "imgur.com" in i.url:
self = "IMAGE"
else:
self = "LINK"
links.append(i.url)
createdfrm = datetime.datetime.utcfromtimestamp(int(i.created_utc))
nowtime = time.time()
nowfrm = datetime.datetime.utcfromtimestamp(int(nowtime))
diff = nowfrm - createdfrm
if diff.total_seconds() < 24*3600:
print("Post is not older than 24 hours, skipping")
skip += 1
continue
if diff.total_seconds() > 86400 * days:
print("Post is older than {} days, ending task".format(days))
break
amount += 1
#flair = i.link_flair_css_class
flair = i.link_flair_text
upvotes = i.score
statV[flair] = statV.get(flair, 0) + upvotes
statA[flair] = statA.get(flair, 0) + 1
statV[self] = statV.get(self, 0) + upvotes
statA[self] = statA.get(self, 0) + 1
statM[flair] = statM.get(flair, []) + [upvotes]
statM[self] = statM.get(self, []) + [upvotes]
print("{}/{}: {} - {} ({}) - {} upvotes".format(y, postamount, i.title, flair, self, upvotes))
last = diff.total_seconds() / 86400
for key, value in statV.items():
stat[key] = round(statV[key] / statA[key])
for key, value in statM.items():
statS[key] = round(median(value))
print(links)
print("\n+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=\n")
print("Ran through {} posts of {}, ({} being the goal) with the last one being {} days old".format(amount, subreddit, len(posts), last))
print("(skipped {} posts)".format(skip))
print("Average upvotes of the past {} days, categorized by flair type:".format(days))
longest = 0
for i in stat:
if len(str(i)) > longest:
longest = len(i)
longest += 1
for key, value in sorted(stat.items(), key=operator.itemgetter(1)):
print("{}{}({}){} : {} average upvotes".format(key, " " * (longest - len(str(key))), statA[key], " " * (3 - len(str(statA[key]))), value))
print("")
print("Median upvotes of the past {} days, categorized by flair type:".format(days))
for key, value in sorted(statS.items(), key=operator.itemgetter(1)):
print("{}{}({}){} : {} upvotes".format(key, " " * (longest - len(str(key))), statA[key], " " * (3- len(str(statA[key]))), value))
| zh | 0.066019 | #flair = i.link_flair_css_class | 2.985064 | 3 |
wordembedings.py | hovjdev/CyprusVitalSigns | 0 | 6616323 | <gh_stars>0
#!/usr/bin/python -tt
import json
import logging
import nltk
import re
import cleantext
import random
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from gensim.models import fasttext
from gensim.models import KeyedVectors
from gensim.models import word2vec
from gensim.models import FastText
from gensim.utils import tokenize
from gensim.models import ldamodel
from gensim.models import Word2Vec
from gensim import corpora
from gensim.parsing.preprocessing import preprocess_string, strip_punctuation, strip_numeric
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
from wordcloud import WordCloud
DATA_FILES = []
MODEL_FILE = "../models/cc.en.300.bin"
nltk.download('stopwords')
nltk.download('wordnet')
STOP_WORDS = nltk.corpus.stopwords.words()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def clean_sentence(val):
"remove chars that are not letters or numbers, downcase, then remove stop words"
val = cleantext.clean(val, extra_spaces=True, lowercase=True, numbers=True, punct=True)
regex = re.compile('([^\s\w]|_)+')
sentence = regex.sub('', val).lower()
sentence = sentence.split(" ")
for word in list(sentence):
if word in STOP_WORDS:
sentence.remove(word)
sentence = " ".join(sentence)
return sentence
corpus = []
wholetext = ''
# Opening JSON file
for datafile in DATA_FILES:
f = open(datafile)
data = json.load(f)
f.close()
for i in data:
s=i['content']
s=clean_sentence(s)
word_list = s.split(" ")
corpus.append(word_list)
wholetext += " " + " ".join(word_list)
print(f'len(corpus)={len(corpus)}')
if False:
import yake
kw_extractor = yake.KeywordExtractor()
language = "en"
max_ngram_size = 1
deduplication_threshold = 0.9
numOfKeywords = 2000
custom_kw_extractor = yake.KeywordExtractor(lan=language, n=max_ngram_size, dedupLim=deduplication_threshold, top=numOfKeywords, features=None)
keywords = custom_kw_extractor.extract_keywords(wholetext)
keywords_list = []
for k in keywords:
keywords_list.append(k[0])
print(f"keywords_list: {keywords_list[:100]}")
print(corpus[:10])
print("Get topics")
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
def prepare_text_for_lda(text):
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in STOP_WORDS]
tokens = [get_lemma(token) for token in tokens]
return tokens
text_data = []
for c in corpus:
line = " ".join(c)
tokens = prepare_text_for_lda(line)
text_data.append(tokens)
dictionary = corpora.Dictionary(text_data)
corpus2 = [dictionary.doc2bow(text) for text in text_data]
NUM_TOPICS = 30
ldamodel = ldamodel.LdaModel(corpus2, num_topics = NUM_TOPICS, id2word=dictionary, passes=20)
topics = ldamodel.print_topics(num_words=5)
for topic in topics:
print(topic)
lda_display = gensimvis.prepare(ldamodel, corpus2, dictionary, sort_topics=False)
pyLDAvis.save_html(lda_display, 'lda.html')
for t in range(ldamodel.num_topics):
plt.figure()
wc = WordCloud(width=1600, height=1600)
wc.font_path = "imput/fonts/bauhaus/BauhausRegular.ttf"
plt.imshow(wc.fit_words(dict(ldamodel.show_topic(t, 100))))
plt.axis("off")
plt.savefig(f'wordcloud_{t+1}.png', facecolor='k', bbox_inches='tight', dpi=600)
plt.close('all')
print("Build model")
model=None
if True:
#model = FastText(vector_size=50) #ok
model = Word2Vec(vector_size=10) # ok
model.build_vocab(corpus_iterable=corpus)
print(f"len(corpus): {len(corpus)}")
model.train(corpus_iterable=corpus, total_examples=len(corpus), epochs=100)
if False:
model = FastText.load_fasttext_format(MODEL_FILE)
model.build_vocab(corpus_iterable=corpus, update=True)
model.train(corpus_iterable=corpus, total_examples=len(corpus), epochs=100)
print("Plot model")
def tsne_plot(model, positive):
"Creates and TSNE model and plots it"
labels = []
tokens = []
words = model.wv.most_similar(positive=positive, topn=20)
for word in words:
tokens.append(model.wv.__getitem__(word[0]))
labels.append(word)
tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i],y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.title(" ".join(positive))
plt.show()
tsne_plot(model, positive=["cyprus"])
tsne_plot(model, positive=["aphrodite"])
tsne_plot(model, positive=["food"])
tsne_plot(model, positive=['tourism'])
| #!/usr/bin/python -tt
import json
import logging
import nltk
import re
import cleantext
import random
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from gensim.models import fasttext
from gensim.models import KeyedVectors
from gensim.models import word2vec
from gensim.models import FastText
from gensim.utils import tokenize
from gensim.models import ldamodel
from gensim.models import Word2Vec
from gensim import corpora
from gensim.parsing.preprocessing import preprocess_string, strip_punctuation, strip_numeric
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
from wordcloud import WordCloud
DATA_FILES = []
MODEL_FILE = "../models/cc.en.300.bin"
nltk.download('stopwords')
nltk.download('wordnet')
STOP_WORDS = nltk.corpus.stopwords.words()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def clean_sentence(val):
"remove chars that are not letters or numbers, downcase, then remove stop words"
val = cleantext.clean(val, extra_spaces=True, lowercase=True, numbers=True, punct=True)
regex = re.compile('([^\s\w]|_)+')
sentence = regex.sub('', val).lower()
sentence = sentence.split(" ")
for word in list(sentence):
if word in STOP_WORDS:
sentence.remove(word)
sentence = " ".join(sentence)
return sentence
corpus = []
wholetext = ''
# Opening JSON file
for datafile in DATA_FILES:
f = open(datafile)
data = json.load(f)
f.close()
for i in data:
s=i['content']
s=clean_sentence(s)
word_list = s.split(" ")
corpus.append(word_list)
wholetext += " " + " ".join(word_list)
print(f'len(corpus)={len(corpus)}')
if False:
import yake
kw_extractor = yake.KeywordExtractor()
language = "en"
max_ngram_size = 1
deduplication_threshold = 0.9
numOfKeywords = 2000
custom_kw_extractor = yake.KeywordExtractor(lan=language, n=max_ngram_size, dedupLim=deduplication_threshold, top=numOfKeywords, features=None)
keywords = custom_kw_extractor.extract_keywords(wholetext)
keywords_list = []
for k in keywords:
keywords_list.append(k[0])
print(f"keywords_list: {keywords_list[:100]}")
print(corpus[:10])
print("Get topics")
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
def prepare_text_for_lda(text):
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in STOP_WORDS]
tokens = [get_lemma(token) for token in tokens]
return tokens
text_data = []
for c in corpus:
line = " ".join(c)
tokens = prepare_text_for_lda(line)
text_data.append(tokens)
dictionary = corpora.Dictionary(text_data)
corpus2 = [dictionary.doc2bow(text) for text in text_data]
NUM_TOPICS = 30
ldamodel = ldamodel.LdaModel(corpus2, num_topics = NUM_TOPICS, id2word=dictionary, passes=20)
topics = ldamodel.print_topics(num_words=5)
for topic in topics:
print(topic)
lda_display = gensimvis.prepare(ldamodel, corpus2, dictionary, sort_topics=False)
pyLDAvis.save_html(lda_display, 'lda.html')
for t in range(ldamodel.num_topics):
plt.figure()
wc = WordCloud(width=1600, height=1600)
wc.font_path = "imput/fonts/bauhaus/BauhausRegular.ttf"
plt.imshow(wc.fit_words(dict(ldamodel.show_topic(t, 100))))
plt.axis("off")
plt.savefig(f'wordcloud_{t+1}.png', facecolor='k', bbox_inches='tight', dpi=600)
plt.close('all')
print("Build model")
model=None
if True:
#model = FastText(vector_size=50) #ok
model = Word2Vec(vector_size=10) # ok
model.build_vocab(corpus_iterable=corpus)
print(f"len(corpus): {len(corpus)}")
model.train(corpus_iterable=corpus, total_examples=len(corpus), epochs=100)
if False:
model = FastText.load_fasttext_format(MODEL_FILE)
model.build_vocab(corpus_iterable=corpus, update=True)
model.train(corpus_iterable=corpus, total_examples=len(corpus), epochs=100)
print("Plot model")
def tsne_plot(model, positive):
"Creates and TSNE model and plots it"
labels = []
tokens = []
words = model.wv.most_similar(positive=positive, topn=20)
for word in words:
tokens.append(model.wv.__getitem__(word[0]))
labels.append(word)
tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i],y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.title(" ".join(positive))
plt.show()
tsne_plot(model, positive=["cyprus"])
tsne_plot(model, positive=["aphrodite"])
tsne_plot(model, positive=["food"])
tsne_plot(model, positive=['tourism']) | en | 0.325589 | #!/usr/bin/python -tt # Opening JSON file #model = FastText(vector_size=50) #ok # ok | 2.755266 | 3 |
prototype/classes/serializers.py | AnufriyevT/Prototype | 0 | 6616324 | from drf_writable_nested import WritableNestedModelSerializer
from rest_framework import serializers
from .models import Project, Domain, Vocabulary, Product, Producer, Value, Standard, DataFormat, Pricing, Retail, \
Academic, StandardPrice, VolumePrice, Input, Output
class DomainSerializer(serializers.ModelSerializer):
class Meta:
model = Domain
fields = (
'__all__'
)
class VocabularySerializer(serializers.ModelSerializer):
class Meta:
model = Vocabulary
fields = (
'__all__'
)
class ProducerSerializer(serializers.ModelSerializer):
class Meta:
model = Producer
fields = (
'__all__'
)
class ValueSerializer(serializers.ModelSerializer):
class Meta:
model = Value
fields = (
'__all__'
)
class StandardsSerializer(serializers.ModelSerializer):
class Meta:
model = Standard
fields = (
'__all__'
)
class InputSerializer(serializers.ModelSerializer):
class Meta:
model = Input
fields = (
'__all__'
)
class OutputSerializer(serializers.ModelSerializer):
class Meta:
model = Output
fields = (
'__all__'
)
class DataFormatSerializer(WritableNestedModelSerializer):
input = InputSerializer(many=True)
output = OutputSerializer(many=True)
class Meta:
model = DataFormat
fields = (
'__all__'
)
class StandardPriceSerializer(serializers.ModelSerializer):
class Meta:
model = StandardPrice
fields = (
'__all__'
)
class VolumePriceSerializer(serializers.ModelSerializer):
class Meta:
model = VolumePrice
fields = (
'__all__'
)
class RetailSerializer(WritableNestedModelSerializer):
standard = StandardPriceSerializer()
volume = VolumePriceSerializer()
class Meta:
model = Retail
fields = (
'__all__'
)
class AcademicSerializer(WritableNestedModelSerializer):
standard = StandardPriceSerializer()
volume = VolumePriceSerializer()
class Meta:
model = Academic
fields = (
'__all__'
)
class PricingSerializer(WritableNestedModelSerializer):
retail = RetailSerializer()
academic = AcademicSerializer()
class Meta:
model = Pricing
fields = (
'__all__'
)
class ProductSerializer(WritableNestedModelSerializer):
producer = ProducerSerializer()
value = ValueSerializer()
standards = StandardsSerializer(many=True)
data_format = DataFormatSerializer()
pricing = PricingSerializer()
class Meta:
model = Product
fields = (
'id',
'name',
'description',
'producer',
'leader',
'value',
'standards',
'pricing',
'data_format',
'complementary_products',
)
class ProjectSerializer(WritableNestedModelSerializer):
domain = DomainSerializer()
vocabulary = VocabularySerializer(many=True)
product = ProductSerializer(many=True)
class Meta:
model = Project
fields = (
'id',
'name',
'domain',
'vocabulary',
'product',
'author',
)
extra_kwargs = {
'author': {'read_only': True},
}
| from drf_writable_nested import WritableNestedModelSerializer
from rest_framework import serializers
from .models import Project, Domain, Vocabulary, Product, Producer, Value, Standard, DataFormat, Pricing, Retail, \
Academic, StandardPrice, VolumePrice, Input, Output
class DomainSerializer(serializers.ModelSerializer):
class Meta:
model = Domain
fields = (
'__all__'
)
class VocabularySerializer(serializers.ModelSerializer):
class Meta:
model = Vocabulary
fields = (
'__all__'
)
class ProducerSerializer(serializers.ModelSerializer):
class Meta:
model = Producer
fields = (
'__all__'
)
class ValueSerializer(serializers.ModelSerializer):
class Meta:
model = Value
fields = (
'__all__'
)
class StandardsSerializer(serializers.ModelSerializer):
class Meta:
model = Standard
fields = (
'__all__'
)
class InputSerializer(serializers.ModelSerializer):
class Meta:
model = Input
fields = (
'__all__'
)
class OutputSerializer(serializers.ModelSerializer):
class Meta:
model = Output
fields = (
'__all__'
)
class DataFormatSerializer(WritableNestedModelSerializer):
input = InputSerializer(many=True)
output = OutputSerializer(many=True)
class Meta:
model = DataFormat
fields = (
'__all__'
)
class StandardPriceSerializer(serializers.ModelSerializer):
class Meta:
model = StandardPrice
fields = (
'__all__'
)
class VolumePriceSerializer(serializers.ModelSerializer):
class Meta:
model = VolumePrice
fields = (
'__all__'
)
class RetailSerializer(WritableNestedModelSerializer):
standard = StandardPriceSerializer()
volume = VolumePriceSerializer()
class Meta:
model = Retail
fields = (
'__all__'
)
class AcademicSerializer(WritableNestedModelSerializer):
standard = StandardPriceSerializer()
volume = VolumePriceSerializer()
class Meta:
model = Academic
fields = (
'__all__'
)
class PricingSerializer(WritableNestedModelSerializer):
retail = RetailSerializer()
academic = AcademicSerializer()
class Meta:
model = Pricing
fields = (
'__all__'
)
class ProductSerializer(WritableNestedModelSerializer):
producer = ProducerSerializer()
value = ValueSerializer()
standards = StandardsSerializer(many=True)
data_format = DataFormatSerializer()
pricing = PricingSerializer()
class Meta:
model = Product
fields = (
'id',
'name',
'description',
'producer',
'leader',
'value',
'standards',
'pricing',
'data_format',
'complementary_products',
)
class ProjectSerializer(WritableNestedModelSerializer):
domain = DomainSerializer()
vocabulary = VocabularySerializer(many=True)
product = ProductSerializer(many=True)
class Meta:
model = Project
fields = (
'id',
'name',
'domain',
'vocabulary',
'product',
'author',
)
extra_kwargs = {
'author': {'read_only': True},
}
| none | 1 | 2.222869 | 2 | |
tests/examples/test_echo.py | fopina/tgbotplug | 1 | 6616325 | from tgbot import plugintest
from plugin_examples.echo import EchoPlugin
class EchoPluginTest(plugintest.PluginTestCase):
def setUp(self):
self.bot = self.fake_bot('', plugins=[EchoPlugin()])
def test_reply(self):
self.receive_message('/echo test')
self.assertReplied('test')
def test_need_reply(self):
self.receive_message('/echo')
self.assertReplied('echo what?')
self.receive_message('test')
self.assertReplied('test')
| from tgbot import plugintest
from plugin_examples.echo import EchoPlugin
class EchoPluginTest(plugintest.PluginTestCase):
def setUp(self):
self.bot = self.fake_bot('', plugins=[EchoPlugin()])
def test_reply(self):
self.receive_message('/echo test')
self.assertReplied('test')
def test_need_reply(self):
self.receive_message('/echo')
self.assertReplied('echo what?')
self.receive_message('test')
self.assertReplied('test')
| none | 1 | 2.400036 | 2 | |
my_work.py | mgokcay/CarND-Advanced-Lane-Lines | 0 | 6616326 | import numpy as np
import cv2
import glob
import os
import matplotlib.pyplot as plt
from camera_calib import calibrateCamera
from threshold import threshold
from warp import warp
from lane_detection import find_lane_polynomials
from lane_detection import measure_curvature, measure_position
from moviepy.editor import VideoFileClip
#%matplotlib qt
def process_image(img, cam_mtx, dist_coeff, write_outputs=False, output_file_name=''):
img_undistorted = cv2.undistort(img, cam_mtx, dist_coeff, None, cam_mtx)
if write_outputs:
cv2.imwrite('./output_images/undistorted/' + output_file_name, cv2.cvtColor(img_undistorted, cv2.COLOR_RGB2BGR))
img_threshold, img_threshold_colored = threshold(img_undistorted)
if write_outputs:
cv2.imwrite('./output_images/threshold/' + output_file_name, cv2.cvtColor(img_threshold * 255, cv2.COLOR_RGB2BGR))
M_inv, img_threshold_warped = warp(img_threshold)
if write_outputs:
cv2.imwrite('./output_images/warped/' + output_file_name, cv2.cvtColor(img_threshold_warped * 255, cv2.COLOR_RGB2BGR))
left_fit, right_fit, ploty, out_img, img_poly = find_lane_polynomials(img_threshold_warped)
if write_outputs:
cv2.imwrite('./output_images/lane_lines/' + output_file_name, cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR))
left_curverad = measure_curvature(np.max(ploty), left_fit)
right_curverad = measure_curvature(np.max(ploty), right_fit)
vehicle_position = measure_position(np.max(ploty), img.shape[1], left_fit, right_fit)
img_size = (out_img.shape[1], out_img.shape[0])
img_lane = cv2.warpPerspective(out_img, M_inv, img_size)
img_final = cv2.addWeighted(img_undistorted, 1.0, img_lane, 0.4, 0)
cv2.putText(img_final,
"Radious of curvature: " + str(int((left_curverad+right_curverad)/2)) + " m",
(50, 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(img_final,
"Vehicle position: " + "{:.2f}".format(vehicle_position) + " m",
(50, 100), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2, cv2.LINE_AA)
if write_outputs:
cv2.imwrite('./output_images/final/' + output_file_name, cv2.cvtColor(img_final, cv2.COLOR_RGB2BGR))
# # Plot the result
# f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
# f.tight_layout()
#
# ax1.imshow(img_orig)
# ax1.set_title('Original Image', fontsize=40)
#
# ax2.imshow(img_threshold)
# ax2.set_title('Threshold', fontsize=40)
#
# ax3.imshow(img_threshold_warped)
# ax3.set_title('Warped', fontsize=40)
#
# plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# plt.show()
vis1 = np.concatenate((img_final, img_threshold_colored), axis=1)
warped_color = np.dstack((img_threshold_warped, img_threshold_warped, img_threshold_warped)) * 255
vis2 = np.concatenate((img_poly, out_img), axis=1)
vis = np.concatenate((vis1, vis2), axis=0)
if write_outputs:
cv2.imwrite('./output_images/debug/' + output_file_name, cv2.cvtColor(vis, cv2.COLOR_RGB2BGR))
return vis
# Make a list of calibration images
calib_images = glob.glob('./camera_cal/calibration*.jpg')
# calibrate camera
cam_mtx, dist_coeff = calibrateCamera(calib_images)
test_images = glob.glob('./test_images/test1.jpg')
# for image in test_images:
#
# img_orig = cv2.imread(image)
# img_rgb = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
# process_image(img_rgb, cam_mtx, dist_coeff, True, os.path.basename(image))
# clip1 = VideoFileClip("project_video.mp4").subclip(0,5)
clip1 = VideoFileClip("project_video.mp4")
result_clip = clip1.fl_image(lambda image: process_image(image, cam_mtx, dist_coeff))
result_clip.write_videofile("project_video_output.mp4", audio=False)
print("ok")
| import numpy as np
import cv2
import glob
import os
import matplotlib.pyplot as plt
from camera_calib import calibrateCamera
from threshold import threshold
from warp import warp
from lane_detection import find_lane_polynomials
from lane_detection import measure_curvature, measure_position
from moviepy.editor import VideoFileClip
#%matplotlib qt
def process_image(img, cam_mtx, dist_coeff, write_outputs=False, output_file_name=''):
img_undistorted = cv2.undistort(img, cam_mtx, dist_coeff, None, cam_mtx)
if write_outputs:
cv2.imwrite('./output_images/undistorted/' + output_file_name, cv2.cvtColor(img_undistorted, cv2.COLOR_RGB2BGR))
img_threshold, img_threshold_colored = threshold(img_undistorted)
if write_outputs:
cv2.imwrite('./output_images/threshold/' + output_file_name, cv2.cvtColor(img_threshold * 255, cv2.COLOR_RGB2BGR))
M_inv, img_threshold_warped = warp(img_threshold)
if write_outputs:
cv2.imwrite('./output_images/warped/' + output_file_name, cv2.cvtColor(img_threshold_warped * 255, cv2.COLOR_RGB2BGR))
left_fit, right_fit, ploty, out_img, img_poly = find_lane_polynomials(img_threshold_warped)
if write_outputs:
cv2.imwrite('./output_images/lane_lines/' + output_file_name, cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR))
left_curverad = measure_curvature(np.max(ploty), left_fit)
right_curverad = measure_curvature(np.max(ploty), right_fit)
vehicle_position = measure_position(np.max(ploty), img.shape[1], left_fit, right_fit)
img_size = (out_img.shape[1], out_img.shape[0])
img_lane = cv2.warpPerspective(out_img, M_inv, img_size)
img_final = cv2.addWeighted(img_undistorted, 1.0, img_lane, 0.4, 0)
cv2.putText(img_final,
"Radious of curvature: " + str(int((left_curverad+right_curverad)/2)) + " m",
(50, 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(img_final,
"Vehicle position: " + "{:.2f}".format(vehicle_position) + " m",
(50, 100), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2, cv2.LINE_AA)
if write_outputs:
cv2.imwrite('./output_images/final/' + output_file_name, cv2.cvtColor(img_final, cv2.COLOR_RGB2BGR))
# # Plot the result
# f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
# f.tight_layout()
#
# ax1.imshow(img_orig)
# ax1.set_title('Original Image', fontsize=40)
#
# ax2.imshow(img_threshold)
# ax2.set_title('Threshold', fontsize=40)
#
# ax3.imshow(img_threshold_warped)
# ax3.set_title('Warped', fontsize=40)
#
# plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# plt.show()
vis1 = np.concatenate((img_final, img_threshold_colored), axis=1)
warped_color = np.dstack((img_threshold_warped, img_threshold_warped, img_threshold_warped)) * 255
vis2 = np.concatenate((img_poly, out_img), axis=1)
vis = np.concatenate((vis1, vis2), axis=0)
if write_outputs:
cv2.imwrite('./output_images/debug/' + output_file_name, cv2.cvtColor(vis, cv2.COLOR_RGB2BGR))
return vis
# Make a list of calibration images
calib_images = glob.glob('./camera_cal/calibration*.jpg')
# calibrate camera
cam_mtx, dist_coeff = calibrateCamera(calib_images)
test_images = glob.glob('./test_images/test1.jpg')
# for image in test_images:
#
# img_orig = cv2.imread(image)
# img_rgb = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
# process_image(img_rgb, cam_mtx, dist_coeff, True, os.path.basename(image))
# clip1 = VideoFileClip("project_video.mp4").subclip(0,5)
clip1 = VideoFileClip("project_video.mp4")
result_clip = clip1.fl_image(lambda image: process_image(image, cam_mtx, dist_coeff))
result_clip.write_videofile("project_video_output.mp4", audio=False)
print("ok")
| en | 0.201068 | #%matplotlib qt # # Plot the result # f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9)) # f.tight_layout() # # ax1.imshow(img_orig) # ax1.set_title('Original Image', fontsize=40) # # ax2.imshow(img_threshold) # ax2.set_title('Threshold', fontsize=40) # # ax3.imshow(img_threshold_warped) # ax3.set_title('Warped', fontsize=40) # # plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.) # plt.show() # Make a list of calibration images # calibrate camera # for image in test_images: # # img_orig = cv2.imread(image) # img_rgb = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB) # process_image(img_rgb, cam_mtx, dist_coeff, True, os.path.basename(image)) # clip1 = VideoFileClip("project_video.mp4").subclip(0,5) | 2.397504 | 2 |
qtstyles/demo.py | simongarisch/qstyles | 10 | 6616327 | '''
Includes a run_demo() function that acts as a basic demo for our
StylePicker and StylePickerWidget classes.
'''
import random
from qtpy import QtWidgets, QtCore
from qtstyles.picker import StylePicker
from qtstyles.widget import StylePickerWidget
def run_demo(close_after=None, auto_test=False):
'''
Args:
close_after: either None or integer. Number of seconds
after which the demo will close.
auto_test: boolean. If true then randomly select a style at intervals.
This function provides a demonstration in changing the application
style sheet.
1) The first option is to set the style sheet once with
app.setStyleSheet(StylePicker("default").get_sheet())
2) The second option is to include a style picker widget (QComboBox)
that'll change the application style sheet when a new style is selected
grid.addWidget(StylePickerWidget(), 2, 0)
'''
app = QtWidgets.QApplication.instance()
if not app:
app = QtWidgets.QApplication([])
win = QtWidgets.QMainWindow()
win.setWindowTitle("Style Sheets")
frame = QtWidgets.QFrame()
win.setCentralWidget(frame)
grid = QtWidgets.QGridLayout(frame)
grid.setHorizontalSpacing(5)
grid.setVerticalSpacing(5)
grid.addWidget(QtWidgets.QLabel("Username"), 0, 0)
grid.addWidget(QtWidgets.QLabel("Password"), 1, 0)
user_input = QtWidgets.QLineEdit()
pass_input = QtWidgets.QLineEdit()
grid.addWidget(user_input, 0, 1)
grid.addWidget(pass_input, 1, 1)
picker_widget = StylePickerWidget()
grid.addWidget(picker_widget, 2, 0)
grid.addWidget(QtWidgets.QPushButton("Submit"), 2, 1)
win.show()
def choose_random_style():
''' Select a random style from the picker_widget. '''
style_list = StylePicker().available_styles
chosen_style = random.choice(style_list)
picker_widget.setCurrentIndex(picker_widget.findText(chosen_style))
def close_demo():
''' Close the demo once 'close_after' seconds have elapsed. '''
win.close()
if auto_test:
timer = QtCore.QTimer()
timer.timeout.connect(choose_random_style)
timer.start(1000)
if isinstance(close_after, int):
close_timer = QtCore.QTimer()
close_timer.singleShot(close_after * 1000, close_demo)
app.setStyleSheet(StylePicker("default").get_sheet())
app.exec_()
"""
def main():
''' testing our run_demo function '''
run_demo(close_after=10, auto_test=True)
if __name__ == "__main__":
main()
"""
| '''
Includes a run_demo() function that acts as a basic demo for our
StylePicker and StylePickerWidget classes.
'''
import random
from qtpy import QtWidgets, QtCore
from qtstyles.picker import StylePicker
from qtstyles.widget import StylePickerWidget
def run_demo(close_after=None, auto_test=False):
'''
Args:
close_after: either None or integer. Number of seconds
after which the demo will close.
auto_test: boolean. If true then randomly select a style at intervals.
This function provides a demonstration in changing the application
style sheet.
1) The first option is to set the style sheet once with
app.setStyleSheet(StylePicker("default").get_sheet())
2) The second option is to include a style picker widget (QComboBox)
that'll change the application style sheet when a new style is selected
grid.addWidget(StylePickerWidget(), 2, 0)
'''
app = QtWidgets.QApplication.instance()
if not app:
app = QtWidgets.QApplication([])
win = QtWidgets.QMainWindow()
win.setWindowTitle("Style Sheets")
frame = QtWidgets.QFrame()
win.setCentralWidget(frame)
grid = QtWidgets.QGridLayout(frame)
grid.setHorizontalSpacing(5)
grid.setVerticalSpacing(5)
grid.addWidget(QtWidgets.QLabel("Username"), 0, 0)
grid.addWidget(QtWidgets.QLabel("Password"), 1, 0)
user_input = QtWidgets.QLineEdit()
pass_input = QtWidgets.QLineEdit()
grid.addWidget(user_input, 0, 1)
grid.addWidget(pass_input, 1, 1)
picker_widget = StylePickerWidget()
grid.addWidget(picker_widget, 2, 0)
grid.addWidget(QtWidgets.QPushButton("Submit"), 2, 1)
win.show()
def choose_random_style():
''' Select a random style from the picker_widget. '''
style_list = StylePicker().available_styles
chosen_style = random.choice(style_list)
picker_widget.setCurrentIndex(picker_widget.findText(chosen_style))
def close_demo():
''' Close the demo once 'close_after' seconds have elapsed. '''
win.close()
if auto_test:
timer = QtCore.QTimer()
timer.timeout.connect(choose_random_style)
timer.start(1000)
if isinstance(close_after, int):
close_timer = QtCore.QTimer()
close_timer.singleShot(close_after * 1000, close_demo)
app.setStyleSheet(StylePicker("default").get_sheet())
app.exec_()
"""
def main():
''' testing our run_demo function '''
run_demo(close_after=10, auto_test=True)
if __name__ == "__main__":
main()
"""
| en | 0.734235 | Includes a run_demo() function that acts as a basic demo for our
StylePicker and StylePickerWidget classes. Args:
close_after: either None or integer. Number of seconds
after which the demo will close.
auto_test: boolean. If true then randomly select a style at intervals.
This function provides a demonstration in changing the application
style sheet.
1) The first option is to set the style sheet once with
app.setStyleSheet(StylePicker("default").get_sheet())
2) The second option is to include a style picker widget (QComboBox)
that'll change the application style sheet when a new style is selected
grid.addWidget(StylePickerWidget(), 2, 0) Select a random style from the picker_widget. Close the demo once 'close_after' seconds have elapsed. def main():
''' testing our run_demo function '''
run_demo(close_after=10, auto_test=True)
if __name__ == "__main__":
main() | 3.166369 | 3 |
osuprocessor.py | jaflo/misc | 0 | 6616328 | <gh_stars>0
"""
Processes your osu! song library:
* Retrieves all osu! songs and copies them into a new folder
* Adds ID3-tags to the mp3s with the data provided by the beatmap
Run next to an Songs folder in your osu! folder
Also make a folder named "out" for the numbered files (00000.mp3 - 99999.mp3)
Requires eyed3: http://eyed3.nicfit.net/
Licensed under the MIT license
"""
import shutil
import eyed3 # tested on 0.7.5
import os
import re
from eyed3.id3 import Tag
path = "Songs/"
outpath = "out/"
counter = 0
for dirpath, dirs, files in os.walk(path):
for name in files:
if name.endswith((".mp3")):
if len(name)>4:
newname = outpath+("%05d"%counter)+".mp3"
shutil.copyfile(os.path.join(dirpath, name), newname)
counter+=1
audiofile = eyed3.load(newname)
if audiofile.tag is None:
audiofile.initTag()
if audiofile.info.time_secs > 10:
for file in os.listdir(dirpath):
if file.endswith(".osu"):
with open(os.path.join(dirpath, file), "r") as osumeta:
content = osumeta.read()
title = unicode(re.search("Title:(.*)\n", content).group(1), "UTF-8")
title = re.sub("\ \(tv\ size\)", "", title, flags=re.IGNORECASE)
audiofile.tag.title = title
artist = unicode(re.search("Artist:(.*)\n", content).group(1), "UTF-8")
if (len(artist)>0):
audiofile.tag.artist = artist
audiofile.tag.save(version=eyed3.id3.ID3_V2_3)
print audiofile.tag.title
else:
os.remove(newname)
counter-=1
| """
Processes your osu! song library:
* Retrieves all osu! songs and copies them into a new folder
* Adds ID3-tags to the mp3s with the data provided by the beatmap
Run next to an Songs folder in your osu! folder
Also make a folder named "out" for the numbered files (00000.mp3 - 99999.mp3)
Requires eyed3: http://eyed3.nicfit.net/
Licensed under the MIT license
"""
import shutil
import eyed3 # tested on 0.7.5
import os
import re
from eyed3.id3 import Tag
path = "Songs/"
outpath = "out/"
counter = 0
for dirpath, dirs, files in os.walk(path):
for name in files:
if name.endswith((".mp3")):
if len(name)>4:
newname = outpath+("%05d"%counter)+".mp3"
shutil.copyfile(os.path.join(dirpath, name), newname)
counter+=1
audiofile = eyed3.load(newname)
if audiofile.tag is None:
audiofile.initTag()
if audiofile.info.time_secs > 10:
for file in os.listdir(dirpath):
if file.endswith(".osu"):
with open(os.path.join(dirpath, file), "r") as osumeta:
content = osumeta.read()
title = unicode(re.search("Title:(.*)\n", content).group(1), "UTF-8")
title = re.sub("\ \(tv\ size\)", "", title, flags=re.IGNORECASE)
audiofile.tag.title = title
artist = unicode(re.search("Artist:(.*)\n", content).group(1), "UTF-8")
if (len(artist)>0):
audiofile.tag.artist = artist
audiofile.tag.save(version=eyed3.id3.ID3_V2_3)
print audiofile.tag.title
else:
os.remove(newname)
counter-=1 | en | 0.811177 | Processes your osu! song library: * Retrieves all osu! songs and copies them into a new folder * Adds ID3-tags to the mp3s with the data provided by the beatmap Run next to an Songs folder in your osu! folder Also make a folder named "out" for the numbered files (00000.mp3 - 99999.mp3) Requires eyed3: http://eyed3.nicfit.net/ Licensed under the MIT license # tested on 0.7.5 | 3.150233 | 3 |
python.graficos..py | luizgui05/repositorio-aula | 1 | 6616329 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 07:51:59 2019
python ama o usuário
@author: 42895538859
"""
import numpy as np
import math
import matplotlib.pyplot as plt
x = np.linspace(0,23,20000)
y = np.exp(x*np.log(np.cos(math.factorial(14)*x)))
#soma = 0
#somam = 0
#for k in range(200):
# soma = np.cos(k*(x+11)) + np.sin(k*(x+11))
# somam = soma + somam
#y = somam*(1/200)
#
#plt.xlabel('hora do dia')
#plt.ylabel('fome')
#plt.title('fome ao decorrer do dia $x^2+\cos{x}$')
#
#
print(plt.plot(x,y))
#fig, axes = plt.subplots()
#pontos = [1,3,2,7,5,9,4]
#axes.plot(pontos, 'b',linestyle='dotted')
#axes.plot(pontos, 'y*', markersize=20)
#axes.axis([-1,8,0,10])
#plt.show()
| # -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 07:51:59 2019
python ama o usuário
@author: 42895538859
"""
import numpy as np
import math
import matplotlib.pyplot as plt
x = np.linspace(0,23,20000)
y = np.exp(x*np.log(np.cos(math.factorial(14)*x)))
#soma = 0
#somam = 0
#for k in range(200):
# soma = np.cos(k*(x+11)) + np.sin(k*(x+11))
# somam = soma + somam
#y = somam*(1/200)
#
#plt.xlabel('hora do dia')
#plt.ylabel('fome')
#plt.title('fome ao decorrer do dia $x^2+\cos{x}$')
#
#
print(plt.plot(x,y))
#fig, axes = plt.subplots()
#pontos = [1,3,2,7,5,9,4]
#axes.plot(pontos, 'b',linestyle='dotted')
#axes.plot(pontos, 'y*', markersize=20)
#axes.axis([-1,8,0,10])
#plt.show()
| en | 0.182671 | # -*- coding: utf-8 -*- Created on Tue Apr 30 07:51:59 2019 python ama o usuário @author: 42895538859 #soma = 0 #somam = 0 #for k in range(200): # soma = np.cos(k*(x+11)) + np.sin(k*(x+11)) # somam = soma + somam #y = somam*(1/200) # #plt.xlabel('hora do dia') #plt.ylabel('fome') #plt.title('fome ao decorrer do dia $x^2+\cos{x}$') # # #fig, axes = plt.subplots() #pontos = [1,3,2,7,5,9,4] #axes.plot(pontos, 'b',linestyle='dotted') #axes.plot(pontos, 'y*', markersize=20) #axes.axis([-1,8,0,10]) #plt.show() | 3.322448 | 3 |
solutions/day1/p1/main.py | tosmun/AdventOfCode | 1 | 6616330 | <filename>solutions/day1/p1/main.py
floor = 0
with open('../input.txt', 'r') as fp:
while True:
buffer = fp.read(1024)
if buffer is None or len(buffer) <= 0:
break
for c in buffer:
if c == '(':
floor += 1
elif c == ')':
floor -= 1
print floor
| <filename>solutions/day1/p1/main.py
floor = 0
with open('../input.txt', 'r') as fp:
while True:
buffer = fp.read(1024)
if buffer is None or len(buffer) <= 0:
break
for c in buffer:
if c == '(':
floor += 1
elif c == ')':
floor -= 1
print floor
| none | 1 | 3.128719 | 3 | |
apns_worker/tests/test_feedback.py | bbits/apns-worker | 7 | 6616331 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from binascii import unhexlify
from datetime import datetime, timedelta
from struct import pack
import unittest
from apns_worker.apns import Feedback
_token1 = '1ba97ad1311307c189696e2369c89fa83d652611a6e3c7370881289e45668fd3'
_token2 = '<KEY>'
class FeedbackTestCase(unittest.TestCase):
_when = datetime(2015, 9, 1)
_timestamp = int((_when - datetime(1970, 1, 1)).total_seconds())
def test_parse_empty(self):
buf = b''
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback, None)
self.assertEqual(remainder, buf)
def test_parse_partial_1(self):
buf = pack('!I', self._timestamp)
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback, None)
self.assertEqual(remainder, buf)
def test_parse_partial_2(self):
buf = pack('!IH', self._timestamp, 32)
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback, None)
self.assertEqual(remainder, buf)
def test_parse_partial_3(self):
buf = pack('!IH30s', self._timestamp, 32, unhexlify(_token1))
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback, None)
self.assertEqual(remainder, buf)
def test_parse_one(self):
buf = pack('!IH32s', self._timestamp, 32, unhexlify(_token1))
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback.token, _token1)
self.assertEqual(feedback.when, self._when)
self.assertEqual(remainder, b'')
def test_parse_leftovers(self):
buf = pack('!IH32sI', self._timestamp, 32, unhexlify(_token1), 0x01020304)
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback.token, _token1)
self.assertEqual(feedback.when, self._when)
self.assertEqual(remainder, b'\x01\x02\x03\x04')
def test_parse_two(self):
buf = pack(
'!IH32sIH32s',
self._timestamp, 32, unhexlify(_token1),
self._timestamp + 1, 32, unhexlify(_token2)
)
f1, remainder = Feedback.parse(buf)
f2, remainder = Feedback.parse(remainder)
self.assertEqual(f1.token, _token1)
self.assertEqual(f1.when, self._when)
self.assertEqual(f2.token, _token2)
self.assertEqual(f2.when, self._when + timedelta(seconds=1))
self.assertEqual(remainder, b'')
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from binascii import unhexlify
from datetime import datetime, timedelta
from struct import pack
import unittest
from apns_worker.apns import Feedback
_token1 = '1ba97ad1311307c189696e2369c89fa83d652611a6e3c7370881289e45668fd3'
_token2 = '<KEY>'
class FeedbackTestCase(unittest.TestCase):
_when = datetime(2015, 9, 1)
_timestamp = int((_when - datetime(1970, 1, 1)).total_seconds())
def test_parse_empty(self):
buf = b''
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback, None)
self.assertEqual(remainder, buf)
def test_parse_partial_1(self):
buf = pack('!I', self._timestamp)
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback, None)
self.assertEqual(remainder, buf)
def test_parse_partial_2(self):
buf = pack('!IH', self._timestamp, 32)
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback, None)
self.assertEqual(remainder, buf)
def test_parse_partial_3(self):
buf = pack('!IH30s', self._timestamp, 32, unhexlify(_token1))
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback, None)
self.assertEqual(remainder, buf)
def test_parse_one(self):
buf = pack('!IH32s', self._timestamp, 32, unhexlify(_token1))
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback.token, _token1)
self.assertEqual(feedback.when, self._when)
self.assertEqual(remainder, b'')
def test_parse_leftovers(self):
buf = pack('!IH32sI', self._timestamp, 32, unhexlify(_token1), 0x01020304)
feedback, remainder = Feedback.parse(buf)
self.assertEqual(feedback.token, _token1)
self.assertEqual(feedback.when, self._when)
self.assertEqual(remainder, b'\x01\x02\x03\x04')
def test_parse_two(self):
buf = pack(
'!IH32sIH32s',
self._timestamp, 32, unhexlify(_token1),
self._timestamp + 1, 32, unhexlify(_token2)
)
f1, remainder = Feedback.parse(buf)
f2, remainder = Feedback.parse(remainder)
self.assertEqual(f1.token, _token1)
self.assertEqual(f1.when, self._when)
self.assertEqual(f2.token, _token2)
self.assertEqual(f2.when, self._when + timedelta(seconds=1))
self.assertEqual(remainder, b'') | en | 0.769321 | # -*- coding: utf-8 -*- | 2.54747 | 3 |
sfdatacompare/settings.py | benedwards44/sfdatacompare | 15 | 6616332 | """
Django settings for sfdatacompare project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import urlparse
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ADMINS = (
('<NAME>', '<EMAIL>'),
)
ALLOWED_HOSTS = ['*']
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'comparedata',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'sfdatacompare.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(os.path.dirname(__file__), "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'sfdatacompare.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
import dj_database_url
DATABASES = {
'default': dj_database_url.config()
}
# Celery settings
BROKER_POOL_LIMIT = 1
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
POSTMARK_API_KEY = os.environ.get('POSTMARK_API_KEY')
POSTMARK_SENDER = '<EMAIL>'
POSTMARK_TEST_MODE = False
POSTMARK_TRACK_OPENS = False
# SALESFORCE KEYS
SALESFORCE_CONSUMER_KEY = os.environ['SALESFORCE_CONSUMER_KEY']
SALESFORCE_CONSUMER_SECRET = os.environ['SALESFORCE_CONSUMER_SECRET']
SALESFORCE_REDIRECT_URI = 'https://sfdatacompare.herokuapp.com/oauth-response'
SALESFORCE_API_VERSION = int(os.environ['SALESFORCE_API_VERSION'])
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'static'),
)
# Redis settings
redis_url = urlparse.urlparse(os.environ.get('REDIS_URL'))
CACHES = {
"default": {
"BACKEND": "redis_cache.RedisCache",
"LOCATION": "{0}:{1}".format(redis_url.hostname, redis_url.port),
"OPTIONS": {
"PASSWORD": <PASSWORD>,
"DB": 0,
}
}
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': BASE_DIR + '/debug.log',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'file'],
'level': 'ERROR',
'propagate': True,
},
}
}
| """
Django settings for sfdatacompare project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import urlparse
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ADMINS = (
('<NAME>', '<EMAIL>'),
)
ALLOWED_HOSTS = ['*']
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'comparedata',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'sfdatacompare.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(os.path.dirname(__file__), "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'sfdatacompare.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
import dj_database_url
DATABASES = {
'default': dj_database_url.config()
}
# Celery settings
BROKER_POOL_LIMIT = 1
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
POSTMARK_API_KEY = os.environ.get('POSTMARK_API_KEY')
POSTMARK_SENDER = '<EMAIL>'
POSTMARK_TEST_MODE = False
POSTMARK_TRACK_OPENS = False
# SALESFORCE KEYS
SALESFORCE_CONSUMER_KEY = os.environ['SALESFORCE_CONSUMER_KEY']
SALESFORCE_CONSUMER_SECRET = os.environ['SALESFORCE_CONSUMER_SECRET']
SALESFORCE_REDIRECT_URI = 'https://sfdatacompare.herokuapp.com/oauth-response'
SALESFORCE_API_VERSION = int(os.environ['SALESFORCE_API_VERSION'])
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'static'),
)
# Redis settings
redis_url = urlparse.urlparse(os.environ.get('REDIS_URL'))
CACHES = {
"default": {
"BACKEND": "redis_cache.RedisCache",
"LOCATION": "{0}:{1}".format(redis_url.hostname, redis_url.port),
"OPTIONS": {
"PASSWORD": <PASSWORD>,
"DB": 0,
}
}
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': BASE_DIR + '/debug.log',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'file'],
'level': 'ERROR',
'propagate': True,
},
}
}
| en | 0.684691 | Django settings for sfdatacompare project. Generated by 'django-admin startproject' using Django 1.8.4. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases # Celery settings # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ # SALESFORCE KEYS # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ # Redis settings # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. | 1.950555 | 2 |
app/models.py | LauretteMongina/Tribal_Gallery | 0 | 6616333 | <reponame>LauretteMongina/Tribal_Gallery
from django.db import models
import datetime as dt
from cloudinary.models import CloudinaryField
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def save_category(self):
self.save()
def delete_category(self):
self.delete()
def update_category(self, update):
self.name = update
self.save()
class Location(models.Model):
name = models.CharField(max_length=50, unique=True)
def save_location(self):
self.save()
def update_location(self, name):
self.name = name
self.save()
def delete_location(self):
self.delete()
def __str__(self):
return self.name
class Image(models.Model):
name = models.CharField(max_length=50)
description = models.TextField()
category = models.ForeignKey(Category, on_delete=models.CASCADE)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
image = CloudinaryField('image')
created_at = models.DateTimeField(auto_now_add=True)
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def update_image(self,name, description , location, category):
self.name = name
self.description = description
self.location = location
self.category = category
self.save()
@classmethod
def get_all_images(cls):
images = cls.objects.all()
return images
@classmethod
def get_image_by_id(cls, id):
image = Image.objects.filter(id=id).all()
return image
@classmethod
def filter_by_category(cls,category):
images = Image.objects.filter(category__name=category)
return images
@classmethod
def filter_by_location(cls,location):
images = Image.objects.filter(location__name=location)
return images
@classmethod
def search_image(cls, search_term):
images = cls.objects.filter(name__icontains=search_term)
return images
def __str__(self):
return self.name
| from django.db import models
import datetime as dt
from cloudinary.models import CloudinaryField
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def save_category(self):
self.save()
def delete_category(self):
self.delete()
def update_category(self, update):
self.name = update
self.save()
class Location(models.Model):
name = models.CharField(max_length=50, unique=True)
def save_location(self):
self.save()
def update_location(self, name):
self.name = name
self.save()
def delete_location(self):
self.delete()
def __str__(self):
return self.name
class Image(models.Model):
name = models.CharField(max_length=50)
description = models.TextField()
category = models.ForeignKey(Category, on_delete=models.CASCADE)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
image = CloudinaryField('image')
created_at = models.DateTimeField(auto_now_add=True)
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def update_image(self,name, description , location, category):
self.name = name
self.description = description
self.location = location
self.category = category
self.save()
@classmethod
def get_all_images(cls):
images = cls.objects.all()
return images
@classmethod
def get_image_by_id(cls, id):
image = Image.objects.filter(id=id).all()
return image
@classmethod
def filter_by_category(cls,category):
images = Image.objects.filter(category__name=category)
return images
@classmethod
def filter_by_location(cls,location):
images = Image.objects.filter(location__name=location)
return images
@classmethod
def search_image(cls, search_term):
images = cls.objects.filter(name__icontains=search_term)
return images
def __str__(self):
return self.name | en | 0.963489 | # Create your models here. | 2.366422 | 2 |
natrixclient/command/performance/webdriver/browser.py | creditease-natrix/natrixclient | 9 | 6616334 | <reponame>creditease-natrix/natrixclient
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
class Browser(object):
def __init__(self, headless=True):
self.browser = None
self.headless = headless
"""
parames format
{
"implicitly_wait": 3,
"page_load_timeout": 4,
"script_timeout": 5,
"use_cookies": False,
"proxy": {
"type": "static/pac/auto"
"proxy": {
# see proxy below
}
}
}
.implicitly_wait(2)
time unit is second
Sets a sticky timeout to implicitly wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.
To set the timeout for calls to execute_async_script, see set_script_timeout.
.set_script_timeout(3)
Set the amount of time that the script should wait during an execute_async_script call before throwing an error.
.set_page_load_timeout(300)
Set the amount of time to wait for a page load to complete before throwing an error.
.add_cookie(cookie)
add cookie, ignore
.delete_all_cookies()
delete cookie
proxy use
https://github.com/SeleniumHQ/selenium/blob/master/py/test/selenium/webdriver/common/proxy_tests.py
MANUAL_PROXY = {
'httpProxy': 'some.url:1234',
'ftpProxy': 'ftp.proxy',
'noProxy': 'localhost, foo.localhost',
'sslProxy': 'ssl.proxy:1234',
'socksProxy': 'socks.proxy:65555',
'socksUsername': 'test',
'socksPassword': '<PASSWORD>',
}
PAC_PROXY = {
'proxyAutoconfigUrl': 'http://pac.url:1234',
}
AUTODETECT_PROXY = {
'autodetect': True,
}
screenshot
.get_screenshot_as_file(self, filename)
.save_screenshot(self, filename)
.get_screenshot_as_png(self)
.get_screenshot_as_base64(self)
desired_capabilities
{
u 'takesScreenshot': True,
u 'acceptSslCerts': True,
u 'networkConnectionEnabled': False,
u 'mobileEmulationEnabled': False,
u 'unexpectedAlertBehaviour': u '',
u 'applicationCacheEnabled': False,
u 'locationContextEnabled': True,
u 'rotatable': False,
u 'chrome': {
u 'chromedriverVersion': u '2.33.506092 (733a02544d189eeb751fe0d7ddca79a0ee28cce4)',
u 'userDataDir': u '/tmp/.org.chromium.Chromium.3movVU'
},
u 'hasTouchScreen': False,
u 'platform': u 'Linux',
u 'version': u '69.0.3497.100',
u 'nativeEvents': True,
u 'handlesAlerts': True,
u 'takesHeapSnapshot': True,
u 'javascriptEnabled': True,
u 'databaseEnabled': False,
u 'browserName': u 'chrome',
u 'webStorageEnabled': True,
u 'browserConnectionEnabled': False,
u 'cssSelectorsEnabled': True,
u 'setWindowRect': True,
u 'pageLoadStrategy': u 'normal'
}
https://www.w3.org/TR/navigation-timing/
https://www.w3.org/TR/navigation-timing-2/
https://developer.mozilla.org/en-US/docs/Web/API/Performance
https://www.ibm.com/developerworks/cn/data/library/bd-r-javascript-w3c/
Performance Timing Events flow
navigationStart -> redirectStart -> redirectEnd -> fetchStart -> domainLookupStart -> domainLookupEnd
-> connectStart -> connectEnd -> requestStart -> responseStart -> responseEnd
-> domLoading -> domInteractive -> domContentLoaded -> domComplete -> loadEventStart -> loadEventEnd
window.performance.timing返回值
navigationStart 准备加载新页面的起始时间
redirectStart 如果发生了HTTP重定向,并且从导航开始,中间的每次重定向,都和当前文档同域的话,就返回开始重定向的timing.fetchStart的值。其他情况,则返回0
redirectEnd 如果发生了HTTP重定向,并且从导航开始,中间的每次重定向,都和当前文档同域的话,就返回最后一次重定向,接收到最后一个字节数据后的那个时间.其他情况则返回0
fetchStart 如果一个新的资源获取被发起,则 fetchStart必须返回用户代理开始检查其相关缓存的那个时间,其他情况则返回开始获取该资源的时间
domainLookupStart 返回用户代理对当前文档所属域进行DNS查询开始的时间。如果此请求没有DNS查询过程,如长连接,资源cache,甚至是本地资源等。 那么就返回 fetchStart的值
domainLookupEnd 返回用户代理对结束对当前文档所属域进行DNS查询的时间。如果此请求没有DNS查询过程,如长连接,资源cache,甚至是本地资源等。那么就返回 fetchStart的值
connectStart 返回用户代理向服务器服务器请求文档,开始建立连接的那个时间,如果此连接是一个长连接,又或者直接从缓存中获取资源(即没有与服务器建立连接)。则返回domainLookupEnd的值
(secureConnectionStart) 可选特性。用户代理如果没有对应的东东,就要把这个设置为undefined。如果有这个东东,并且是HTTPS协议,那么就要返回开始SSL握手的那个时间。 如果不是HTTPS, 那么就返回0
connectEnd 返回用户代理向服务器服务器请求文档,建立连接成功后的那个时间,如果此连接是一个长连接,又或者直接从缓存中获取资源(即没有与服务器建立连接)。则返回domainLookupEnd的值
requestStart 返回从服务器、缓存、本地资源等,开始请求文档的时间
responseStart 返回用户代理从服务器、缓存、本地资源中,接收到第一个字节数据的时间
responseEnd 返回用户代理接收到最后一个字符的时间,和当前连接被关闭的时间中,更早的那个。同样,文档可能来自服务器、缓存、或本地资源
domLoading
开始渲染dom的时间
返回用户代理把其文档的 "current document readiness" 设置为 "loading"的时候
domInteractive
返回用户代理把其文档的 "current document readiness" 设置为 "interactive"的时候.
domContentLoadedEventStart
开始触发DomContentLoadedEvent事件的时间
返回文档发生 DOMContentLoaded 事件的时间
domContentLoadedEventEnd
DomContentLoadedEvent事件结束的时间
返回文档 DOMContentLoaded 事件的结束时间
domComplete
返回用户代理把其文档的 "current document readiness" 设置为 "complete"的时候
loadEventStart
文档触发load事件的时间。如果load事件没有触发,那么该接口就返回0
loadEventEnd
文档触发load事件结束后的时间。如果load事件没有触发,那么该接口就返回0
navigation返回值计算
总时间 loadEventEnd - navigationStart
重定向耗时 redirectEnd - redirectStart
DNS缓存时间 domainLookupStart - fetchStart;
DNS查询耗时 domainLookupEnd - domainLookupStart
TCP连接耗时 connectEnd - connectStart
SSL连接耗时 connectEnd - secureConnectionStart
Request请求耗时 responseStart - requestStart
Response返回耗时 responseEnd - responseStart
解析DOM耗时 domContentLoadedEventEnd - domLoading
加载Load事件耗时 loadEventEnd - loadEventStart
TTFB
读取页面第一个字节的时间
TTFB 即 Time To First Byte 的意思
https://en.wikipedia.org/wiki/Time_To_First_Byte
responseStart - navigationStart;
白屏时间
responseStart - navigationStart
domready时间
domContentLoadedEventEnd - navigationStart
onload时间
执行 onload 回调函数的时间
loadEventEnd - navigationStart
https://www.w3.org/TR/resource-timing-1/
https://www.w3.org/TR/resource-timing-2/
https://developer.mozilla.org/en-US/docs/Web/API/PerformanceResourceTiming
所有网络请求都被视为资源。通过网络对它们进行检索时,资源具有不同生命周期
Resource Timing API 为网络事件(如重定向的开始和结束事件, DNS查找的开始和结束事件, 请求开始, 响应开始和结束时间等)生成有高分辨率时间戳( high-resolution timestamps )的资源加载时间线, 并提供了资源大小和资源类型
通过Resource Timing API可以获取和分析应用资源加载的详细网络计时数据, 应用程序可以使用时间度量标准来确定加载特定资源所需要的时间,比如 XMLHttpRequest、<SVG>、图片、或者脚本
resource timing 返回值
name
Returns the resources URL
资源URL
请求资源的绝对地址, 即便请求重定向到一个新的地址此属性也不会改变
entryType
Returns "resource"
统一返回resource
PerformanceResourceTiming 对象的 entryType 属性永远返回字符串 "resource"
initiatorType
代表了资源类型
简单来说 initiatorType 属性返回的内容代表资源是从哪里发生的请求行为.
initiatorType 属性会返回下面列表中列出的字符串中的其中一个:
css 如果请求是从 CSS 中的 url() 指令发出的
xmlhttprequest 通过 XMLHttpRequest 对象发出的请求
fetch 通过 Fetch 方法发出的请求
beacon 通过 beacon 方法发出的请求
link 通过 link 标签发出的请求
script 通过 script 标签发出的请求
iframe 通过 iframe 标签发出的请求
other 没有匹配上面条件的请求
startTime
Returns the timestamp for the time a resource fetch started. This value is equivalent to PerformanceEntry.fetchStart
获取资源的开始时间
用户代理开始排队获取资源的时间. 如果 HTTP 重定则该属性与 redirectStart 属性相同, 其他情况该属性将与 fetchStart 相同
fetchStart
A DOMHighResTimeStamp immediately before the browser starts to fetch the resource.
与startTime相同
redirectStart
A DOMHighResTimeStamp that represents the start time of the fetch which initiates the redirect.
重定向开始时间
redirectEnd
A DOMHighResTimeStamp immediately after receiving the last byte of the response of the last redirect.
重定向结束时间
duration
Returns a timestamp that is the difference between the responseEnd and the startTime properties.
startTime与responseEnd的差值
domainLookupStart
A DOMHighResTimeStamp immediately before the browser starts the domain name lookup for the resource.
域名解析开始时间
domainLookupEnd
A DOMHighResTimeStamp representing the time immediately after the browser finishes the domain name lookup for the resource
域名解析结束时间
connectStart
浏览器开始和服务器建立连接的时间
secureConnectionStart
浏览器在当前连接下,开始与服务器建立安全握手的时间
connectEnd
浏览器与服务器建立连接结束时间
requestStart
A DOMHighResTimeStamp immediately before the browser starts requesting the resource from the server.
responseStart
A DOMHighResTimeStamp immediately after the browser receives the first byte of the response from the server.
responseEnd
A DOMHighResTimeStamp immediately after the browser receives the last byte of the resource or immediately before the transport connection is closed, whichever comes first.
transferSize
A number representing the size (in octets) of the fetched resource. The size includes the response header fields plus the response payload body
获取资源的大小(采用八进制, 请注意转换), 大小包含了response头部和实体
encodedBodySize
A number representing the size (in octets) received from the fetch (HTTP or cache), of the payload body, before removing any applied content-codings.
表示从 HTTP 网络或缓存中接收到的有效内容主体 (Payload Body) 的大小(在删除所有应用内容编码之前)
decodedBodySize
A number that is the size (in octets) received from the fetch (HTTP or cache) of the message body, after removing any applied content-codings.
表示从 HTTP 网络或缓存中接收到的消息主体 (Message Body) 的大小(在删除所有应用内容编码之后)
resourcce timing 计算公式
https://www.cnblogs.com/zhuyang/p/4789020.html
总时间
duration
loadEventEnd - startTime
重定向耗时 redirectEnd - redirectStart
DNS缓存时间 domainLookupStart - fetchStart;
DNS查询耗时 domainLookupEnd - domainLookupStart
TCP连接耗时 connectEnd - connectStart
SSL连接耗时 connectEnd - secureConnectionStart
Request请求耗时 responseStart - requestStart
Response返回耗时 responseEnd - responseStart
https://stackoverflow.com/questions/6509628/how-to-get-http-response-code-using-selenium-webdriver
https://github.com/wkeeling/selenium-wire
"""
def get_performance(self, url, default_timeout=60, page_timeout=60, script_timeout=60, delete_cookies=False):
self.browser.implicitly_wait(default_timeout)
self.browser.set_page_load_timeout(page_timeout)
self.browser.set_script_timeout(script_timeout)
if delete_cookies:
self.browser.delete_all_cookies()
performance = {}
self.browser.get(url)
timing = self.browser.execute_script("return window.performance.timing")
performance["timing"] = timing
# resources = self.browser.execute_script("window.performance.getEntriesByType(\"resource\")")
resources = self.browser.execute_script("return window.performance.getEntries()")
performance["resources"] = resources
# close, Closes the current window.
# quit, Quits the driver and closes every associated window.
# 所有检测使用同一个browser实例可能导致数据混乱,前期每个URL开一个browser
# 但这样会带来较大的性能问题
# TODO
# 后期需要做成进程池,加锁
self.browser.quit()
return performance
"""
navigation timing
https://www.w3.org/TR/navigation-timing/
"""
def get_performance_timing(self, url):
self.browser.get(url)
# the result is dict
perf = self.browser.execute_script("return window.performance.timing")
self.browser.close()
return json.dumps(perf)
def get_performance_resource(self, url):
self.browser.get(url)
# the result is list
perf = self.browser.execute_script("return window.performance.getEntries()")
self.browser.close()
return json.dumps(perf)
if __name__ == '__main__':
from natrixclient.command.performance.webdriver import Firefox
browser = Firefox()
# print browser.get_performance("http://www.baidu.com")
# browser.get_performance_memory("http://www.baidu.com")
# print browser.get_performance_timing("http://www.baidu.com")
# print browser.get_performance_resource("http://www.baidu.com")
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
class Browser(object):
def __init__(self, headless=True):
self.browser = None
self.headless = headless
"""
parames format
{
"implicitly_wait": 3,
"page_load_timeout": 4,
"script_timeout": 5,
"use_cookies": False,
"proxy": {
"type": "static/pac/auto"
"proxy": {
# see proxy below
}
}
}
.implicitly_wait(2)
time unit is second
Sets a sticky timeout to implicitly wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.
To set the timeout for calls to execute_async_script, see set_script_timeout.
.set_script_timeout(3)
Set the amount of time that the script should wait during an execute_async_script call before throwing an error.
.set_page_load_timeout(300)
Set the amount of time to wait for a page load to complete before throwing an error.
.add_cookie(cookie)
add cookie, ignore
.delete_all_cookies()
delete cookie
proxy use
https://github.com/SeleniumHQ/selenium/blob/master/py/test/selenium/webdriver/common/proxy_tests.py
MANUAL_PROXY = {
'httpProxy': 'some.url:1234',
'ftpProxy': 'ftp.proxy',
'noProxy': 'localhost, foo.localhost',
'sslProxy': 'ssl.proxy:1234',
'socksProxy': 'socks.proxy:65555',
'socksUsername': 'test',
'socksPassword': '<PASSWORD>',
}
PAC_PROXY = {
'proxyAutoconfigUrl': 'http://pac.url:1234',
}
AUTODETECT_PROXY = {
'autodetect': True,
}
screenshot
.get_screenshot_as_file(self, filename)
.save_screenshot(self, filename)
.get_screenshot_as_png(self)
.get_screenshot_as_base64(self)
desired_capabilities
{
u 'takesScreenshot': True,
u 'acceptSslCerts': True,
u 'networkConnectionEnabled': False,
u 'mobileEmulationEnabled': False,
u 'unexpectedAlertBehaviour': u '',
u 'applicationCacheEnabled': False,
u 'locationContextEnabled': True,
u 'rotatable': False,
u 'chrome': {
u 'chromedriverVersion': u '2.33.506092 (733a02544d189eeb751fe0d7ddca79a0ee28cce4)',
u 'userDataDir': u '/tmp/.org.chromium.Chromium.3movVU'
},
u 'hasTouchScreen': False,
u 'platform': u 'Linux',
u 'version': u '69.0.3497.100',
u 'nativeEvents': True,
u 'handlesAlerts': True,
u 'takesHeapSnapshot': True,
u 'javascriptEnabled': True,
u 'databaseEnabled': False,
u 'browserName': u 'chrome',
u 'webStorageEnabled': True,
u 'browserConnectionEnabled': False,
u 'cssSelectorsEnabled': True,
u 'setWindowRect': True,
u 'pageLoadStrategy': u 'normal'
}
https://www.w3.org/TR/navigation-timing/
https://www.w3.org/TR/navigation-timing-2/
https://developer.mozilla.org/en-US/docs/Web/API/Performance
https://www.ibm.com/developerworks/cn/data/library/bd-r-javascript-w3c/
Performance Timing Events flow
navigationStart -> redirectStart -> redirectEnd -> fetchStart -> domainLookupStart -> domainLookupEnd
-> connectStart -> connectEnd -> requestStart -> responseStart -> responseEnd
-> domLoading -> domInteractive -> domContentLoaded -> domComplete -> loadEventStart -> loadEventEnd
window.performance.timing返回值
navigationStart 准备加载新页面的起始时间
redirectStart 如果发生了HTTP重定向,并且从导航开始,中间的每次重定向,都和当前文档同域的话,就返回开始重定向的timing.fetchStart的值。其他情况,则返回0
redirectEnd 如果发生了HTTP重定向,并且从导航开始,中间的每次重定向,都和当前文档同域的话,就返回最后一次重定向,接收到最后一个字节数据后的那个时间.其他情况则返回0
fetchStart 如果一个新的资源获取被发起,则 fetchStart必须返回用户代理开始检查其相关缓存的那个时间,其他情况则返回开始获取该资源的时间
domainLookupStart 返回用户代理对当前文档所属域进行DNS查询开始的时间。如果此请求没有DNS查询过程,如长连接,资源cache,甚至是本地资源等。 那么就返回 fetchStart的值
domainLookupEnd 返回用户代理对结束对当前文档所属域进行DNS查询的时间。如果此请求没有DNS查询过程,如长连接,资源cache,甚至是本地资源等。那么就返回 fetchStart的值
connectStart 返回用户代理向服务器服务器请求文档,开始建立连接的那个时间,如果此连接是一个长连接,又或者直接从缓存中获取资源(即没有与服务器建立连接)。则返回domainLookupEnd的值
(secureConnectionStart) 可选特性。用户代理如果没有对应的东东,就要把这个设置为undefined。如果有这个东东,并且是HTTPS协议,那么就要返回开始SSL握手的那个时间。 如果不是HTTPS, 那么就返回0
connectEnd 返回用户代理向服务器服务器请求文档,建立连接成功后的那个时间,如果此连接是一个长连接,又或者直接从缓存中获取资源(即没有与服务器建立连接)。则返回domainLookupEnd的值
requestStart 返回从服务器、缓存、本地资源等,开始请求文档的时间
responseStart 返回用户代理从服务器、缓存、本地资源中,接收到第一个字节数据的时间
responseEnd 返回用户代理接收到最后一个字符的时间,和当前连接被关闭的时间中,更早的那个。同样,文档可能来自服务器、缓存、或本地资源
domLoading
开始渲染dom的时间
返回用户代理把其文档的 "current document readiness" 设置为 "loading"的时候
domInteractive
返回用户代理把其文档的 "current document readiness" 设置为 "interactive"的时候.
domContentLoadedEventStart
开始触发DomContentLoadedEvent事件的时间
返回文档发生 DOMContentLoaded 事件的时间
domContentLoadedEventEnd
DomContentLoadedEvent事件结束的时间
返回文档 DOMContentLoaded 事件的结束时间
domComplete
返回用户代理把其文档的 "current document readiness" 设置为 "complete"的时候
loadEventStart
文档触发load事件的时间。如果load事件没有触发,那么该接口就返回0
loadEventEnd
文档触发load事件结束后的时间。如果load事件没有触发,那么该接口就返回0
navigation返回值计算
总时间 loadEventEnd - navigationStart
重定向耗时 redirectEnd - redirectStart
DNS缓存时间 domainLookupStart - fetchStart;
DNS查询耗时 domainLookupEnd - domainLookupStart
TCP连接耗时 connectEnd - connectStart
SSL连接耗时 connectEnd - secureConnectionStart
Request请求耗时 responseStart - requestStart
Response返回耗时 responseEnd - responseStart
解析DOM耗时 domContentLoadedEventEnd - domLoading
加载Load事件耗时 loadEventEnd - loadEventStart
TTFB
读取页面第一个字节的时间
TTFB 即 Time To First Byte 的意思
https://en.wikipedia.org/wiki/Time_To_First_Byte
responseStart - navigationStart;
白屏时间
responseStart - navigationStart
domready时间
domContentLoadedEventEnd - navigationStart
onload时间
执行 onload 回调函数的时间
loadEventEnd - navigationStart
https://www.w3.org/TR/resource-timing-1/
https://www.w3.org/TR/resource-timing-2/
https://developer.mozilla.org/en-US/docs/Web/API/PerformanceResourceTiming
所有网络请求都被视为资源。通过网络对它们进行检索时,资源具有不同生命周期
Resource Timing API 为网络事件(如重定向的开始和结束事件, DNS查找的开始和结束事件, 请求开始, 响应开始和结束时间等)生成有高分辨率时间戳( high-resolution timestamps )的资源加载时间线, 并提供了资源大小和资源类型
通过Resource Timing API可以获取和分析应用资源加载的详细网络计时数据, 应用程序可以使用时间度量标准来确定加载特定资源所需要的时间,比如 XMLHttpRequest、<SVG>、图片、或者脚本
resource timing 返回值
name
Returns the resources URL
资源URL
请求资源的绝对地址, 即便请求重定向到一个新的地址此属性也不会改变
entryType
Returns "resource"
统一返回resource
PerformanceResourceTiming 对象的 entryType 属性永远返回字符串 "resource"
initiatorType
代表了资源类型
简单来说 initiatorType 属性返回的内容代表资源是从哪里发生的请求行为.
initiatorType 属性会返回下面列表中列出的字符串中的其中一个:
css 如果请求是从 CSS 中的 url() 指令发出的
xmlhttprequest 通过 XMLHttpRequest 对象发出的请求
fetch 通过 Fetch 方法发出的请求
beacon 通过 beacon 方法发出的请求
link 通过 link 标签发出的请求
script 通过 script 标签发出的请求
iframe 通过 iframe 标签发出的请求
other 没有匹配上面条件的请求
startTime
Returns the timestamp for the time a resource fetch started. This value is equivalent to PerformanceEntry.fetchStart
获取资源的开始时间
用户代理开始排队获取资源的时间. 如果 HTTP 重定则该属性与 redirectStart 属性相同, 其他情况该属性将与 fetchStart 相同
fetchStart
A DOMHighResTimeStamp immediately before the browser starts to fetch the resource.
与startTime相同
redirectStart
A DOMHighResTimeStamp that represents the start time of the fetch which initiates the redirect.
重定向开始时间
redirectEnd
A DOMHighResTimeStamp immediately after receiving the last byte of the response of the last redirect.
重定向结束时间
duration
Returns a timestamp that is the difference between the responseEnd and the startTime properties.
startTime与responseEnd的差值
domainLookupStart
A DOMHighResTimeStamp immediately before the browser starts the domain name lookup for the resource.
域名解析开始时间
domainLookupEnd
A DOMHighResTimeStamp representing the time immediately after the browser finishes the domain name lookup for the resource
域名解析结束时间
connectStart
浏览器开始和服务器建立连接的时间
secureConnectionStart
浏览器在当前连接下,开始与服务器建立安全握手的时间
connectEnd
浏览器与服务器建立连接结束时间
requestStart
A DOMHighResTimeStamp immediately before the browser starts requesting the resource from the server.
responseStart
A DOMHighResTimeStamp immediately after the browser receives the first byte of the response from the server.
responseEnd
A DOMHighResTimeStamp immediately after the browser receives the last byte of the resource or immediately before the transport connection is closed, whichever comes first.
transferSize
A number representing the size (in octets) of the fetched resource. The size includes the response header fields plus the response payload body
获取资源的大小(采用八进制, 请注意转换), 大小包含了response头部和实体
encodedBodySize
A number representing the size (in octets) received from the fetch (HTTP or cache), of the payload body, before removing any applied content-codings.
表示从 HTTP 网络或缓存中接收到的有效内容主体 (Payload Body) 的大小(在删除所有应用内容编码之前)
decodedBodySize
A number that is the size (in octets) received from the fetch (HTTP or cache) of the message body, after removing any applied content-codings.
表示从 HTTP 网络或缓存中接收到的消息主体 (Message Body) 的大小(在删除所有应用内容编码之后)
resourcce timing 计算公式
https://www.cnblogs.com/zhuyang/p/4789020.html
总时间
duration
loadEventEnd - startTime
重定向耗时 redirectEnd - redirectStart
DNS缓存时间 domainLookupStart - fetchStart;
DNS查询耗时 domainLookupEnd - domainLookupStart
TCP连接耗时 connectEnd - connectStart
SSL连接耗时 connectEnd - secureConnectionStart
Request请求耗时 responseStart - requestStart
Response返回耗时 responseEnd - responseStart
https://stackoverflow.com/questions/6509628/how-to-get-http-response-code-using-selenium-webdriver
https://github.com/wkeeling/selenium-wire
"""
def get_performance(self, url, default_timeout=60, page_timeout=60, script_timeout=60, delete_cookies=False):
self.browser.implicitly_wait(default_timeout)
self.browser.set_page_load_timeout(page_timeout)
self.browser.set_script_timeout(script_timeout)
if delete_cookies:
self.browser.delete_all_cookies()
performance = {}
self.browser.get(url)
timing = self.browser.execute_script("return window.performance.timing")
performance["timing"] = timing
# resources = self.browser.execute_script("window.performance.getEntriesByType(\"resource\")")
resources = self.browser.execute_script("return window.performance.getEntries()")
performance["resources"] = resources
# close, Closes the current window.
# quit, Quits the driver and closes every associated window.
# 所有检测使用同一个browser实例可能导致数据混乱,前期每个URL开一个browser
# 但这样会带来较大的性能问题
# TODO
# 后期需要做成进程池,加锁
self.browser.quit()
return performance
"""
navigation timing
https://www.w3.org/TR/navigation-timing/
"""
def get_performance_timing(self, url):
self.browser.get(url)
# the result is dict
perf = self.browser.execute_script("return window.performance.timing")
self.browser.close()
return json.dumps(perf)
def get_performance_resource(self, url):
self.browser.get(url)
# the result is list
perf = self.browser.execute_script("return window.performance.getEntries()")
self.browser.close()
return json.dumps(perf)
if __name__ == '__main__':
from natrixclient.command.performance.webdriver import Firefox
browser = Firefox()
# print browser.get_performance("http://www.baidu.com")
# browser.get_performance_memory("http://www.baidu.com")
# print browser.get_performance_timing("http://www.baidu.com")
# print browser.get_performance_resource("http://www.baidu.com") | zh | 0.397455 | #!/usr/bin/env python # -*- coding: utf-8 -*- parames format { "implicitly_wait": 3, "page_load_timeout": 4, "script_timeout": 5, "use_cookies": False, "proxy": { "type": "static/pac/auto" "proxy": { # see proxy below } } } .implicitly_wait(2) time unit is second Sets a sticky timeout to implicitly wait for an element to be found, or a command to complete. This method only needs to be called one time per session. To set the timeout for calls to execute_async_script, see set_script_timeout. .set_script_timeout(3) Set the amount of time that the script should wait during an execute_async_script call before throwing an error. .set_page_load_timeout(300) Set the amount of time to wait for a page load to complete before throwing an error. .add_cookie(cookie) add cookie, ignore .delete_all_cookies() delete cookie proxy use https://github.com/SeleniumHQ/selenium/blob/master/py/test/selenium/webdriver/common/proxy_tests.py MANUAL_PROXY = { 'httpProxy': 'some.url:1234', 'ftpProxy': 'ftp.proxy', 'noProxy': 'localhost, foo.localhost', 'sslProxy': 'ssl.proxy:1234', 'socksProxy': 'socks.proxy:65555', 'socksUsername': 'test', 'socksPassword': '<PASSWORD>', } PAC_PROXY = { 'proxyAutoconfigUrl': 'http://pac.url:1234', } AUTODETECT_PROXY = { 'autodetect': True, } screenshot .get_screenshot_as_file(self, filename) .save_screenshot(self, filename) .get_screenshot_as_png(self) .get_screenshot_as_base64(self) desired_capabilities { u 'takesScreenshot': True, u 'acceptSslCerts': True, u 'networkConnectionEnabled': False, u 'mobileEmulationEnabled': False, u 'unexpectedAlertBehaviour': u '', u 'applicationCacheEnabled': False, u 'locationContextEnabled': True, u 'rotatable': False, u 'chrome': { u 'chromedriverVersion': u '2.33.506092 (733a02544d189eeb751fe0d7ddca79a0ee28cce4)', u 'userDataDir': u '/tmp/.org.chromium.Chromium.3movVU' }, u 'hasTouchScreen': False, u 'platform': u 'Linux', u 'version': u '69.0.3497.100', u 'nativeEvents': True, u 'handlesAlerts': True, u 'takesHeapSnapshot': True, u 'javascriptEnabled': True, u 'databaseEnabled': False, u 'browserName': u 'chrome', u 'webStorageEnabled': True, u 'browserConnectionEnabled': False, u 'cssSelectorsEnabled': True, u 'setWindowRect': True, u 'pageLoadStrategy': u 'normal' } https://www.w3.org/TR/navigation-timing/ https://www.w3.org/TR/navigation-timing-2/ https://developer.mozilla.org/en-US/docs/Web/API/Performance https://www.ibm.com/developerworks/cn/data/library/bd-r-javascript-w3c/ Performance Timing Events flow navigationStart -> redirectStart -> redirectEnd -> fetchStart -> domainLookupStart -> domainLookupEnd -> connectStart -> connectEnd -> requestStart -> responseStart -> responseEnd -> domLoading -> domInteractive -> domContentLoaded -> domComplete -> loadEventStart -> loadEventEnd window.performance.timing返回值 navigationStart 准备加载新页面的起始时间 redirectStart 如果发生了HTTP重定向,并且从导航开始,中间的每次重定向,都和当前文档同域的话,就返回开始重定向的timing.fetchStart的值。其他情况,则返回0 redirectEnd 如果发生了HTTP重定向,并且从导航开始,中间的每次重定向,都和当前文档同域的话,就返回最后一次重定向,接收到最后一个字节数据后的那个时间.其他情况则返回0 fetchStart 如果一个新的资源获取被发起,则 fetchStart必须返回用户代理开始检查其相关缓存的那个时间,其他情况则返回开始获取该资源的时间 domainLookupStart 返回用户代理对当前文档所属域进行DNS查询开始的时间。如果此请求没有DNS查询过程,如长连接,资源cache,甚至是本地资源等。 那么就返回 fetchStart的值 domainLookupEnd 返回用户代理对结束对当前文档所属域进行DNS查询的时间。如果此请求没有DNS查询过程,如长连接,资源cache,甚至是本地资源等。那么就返回 fetchStart的值 connectStart 返回用户代理向服务器服务器请求文档,开始建立连接的那个时间,如果此连接是一个长连接,又或者直接从缓存中获取资源(即没有与服务器建立连接)。则返回domainLookupEnd的值 (secureConnectionStart) 可选特性。用户代理如果没有对应的东东,就要把这个设置为undefined。如果有这个东东,并且是HTTPS协议,那么就要返回开始SSL握手的那个时间。 如果不是HTTPS, 那么就返回0 connectEnd 返回用户代理向服务器服务器请求文档,建立连接成功后的那个时间,如果此连接是一个长连接,又或者直接从缓存中获取资源(即没有与服务器建立连接)。则返回domainLookupEnd的值 requestStart 返回从服务器、缓存、本地资源等,开始请求文档的时间 responseStart 返回用户代理从服务器、缓存、本地资源中,接收到第一个字节数据的时间 responseEnd 返回用户代理接收到最后一个字符的时间,和当前连接被关闭的时间中,更早的那个。同样,文档可能来自服务器、缓存、或本地资源 domLoading 开始渲染dom的时间 返回用户代理把其文档的 "current document readiness" 设置为 "loading"的时候 domInteractive 返回用户代理把其文档的 "current document readiness" 设置为 "interactive"的时候. domContentLoadedEventStart 开始触发DomContentLoadedEvent事件的时间 返回文档发生 DOMContentLoaded 事件的时间 domContentLoadedEventEnd DomContentLoadedEvent事件结束的时间 返回文档 DOMContentLoaded 事件的结束时间 domComplete 返回用户代理把其文档的 "current document readiness" 设置为 "complete"的时候 loadEventStart 文档触发load事件的时间。如果load事件没有触发,那么该接口就返回0 loadEventEnd 文档触发load事件结束后的时间。如果load事件没有触发,那么该接口就返回0 navigation返回值计算 总时间 loadEventEnd - navigationStart 重定向耗时 redirectEnd - redirectStart DNS缓存时间 domainLookupStart - fetchStart; DNS查询耗时 domainLookupEnd - domainLookupStart TCP连接耗时 connectEnd - connectStart SSL连接耗时 connectEnd - secureConnectionStart Request请求耗时 responseStart - requestStart Response返回耗时 responseEnd - responseStart 解析DOM耗时 domContentLoadedEventEnd - domLoading 加载Load事件耗时 loadEventEnd - loadEventStart TTFB 读取页面第一个字节的时间 TTFB 即 Time To First Byte 的意思 https://en.wikipedia.org/wiki/Time_To_First_Byte responseStart - navigationStart; 白屏时间 responseStart - navigationStart domready时间 domContentLoadedEventEnd - navigationStart onload时间 执行 onload 回调函数的时间 loadEventEnd - navigationStart https://www.w3.org/TR/resource-timing-1/ https://www.w3.org/TR/resource-timing-2/ https://developer.mozilla.org/en-US/docs/Web/API/PerformanceResourceTiming 所有网络请求都被视为资源。通过网络对它们进行检索时,资源具有不同生命周期 Resource Timing API 为网络事件(如重定向的开始和结束事件, DNS查找的开始和结束事件, 请求开始, 响应开始和结束时间等)生成有高分辨率时间戳( high-resolution timestamps )的资源加载时间线, 并提供了资源大小和资源类型 通过Resource Timing API可以获取和分析应用资源加载的详细网络计时数据, 应用程序可以使用时间度量标准来确定加载特定资源所需要的时间,比如 XMLHttpRequest、<SVG>、图片、或者脚本 resource timing 返回值 name Returns the resources URL 资源URL 请求资源的绝对地址, 即便请求重定向到一个新的地址此属性也不会改变 entryType Returns "resource" 统一返回resource PerformanceResourceTiming 对象的 entryType 属性永远返回字符串 "resource" initiatorType 代表了资源类型 简单来说 initiatorType 属性返回的内容代表资源是从哪里发生的请求行为. initiatorType 属性会返回下面列表中列出的字符串中的其中一个: css 如果请求是从 CSS 中的 url() 指令发出的 xmlhttprequest 通过 XMLHttpRequest 对象发出的请求 fetch 通过 Fetch 方法发出的请求 beacon 通过 beacon 方法发出的请求 link 通过 link 标签发出的请求 script 通过 script 标签发出的请求 iframe 通过 iframe 标签发出的请求 other 没有匹配上面条件的请求 startTime Returns the timestamp for the time a resource fetch started. This value is equivalent to PerformanceEntry.fetchStart 获取资源的开始时间 用户代理开始排队获取资源的时间. 如果 HTTP 重定则该属性与 redirectStart 属性相同, 其他情况该属性将与 fetchStart 相同 fetchStart A DOMHighResTimeStamp immediately before the browser starts to fetch the resource. 与startTime相同 redirectStart A DOMHighResTimeStamp that represents the start time of the fetch which initiates the redirect. 重定向开始时间 redirectEnd A DOMHighResTimeStamp immediately after receiving the last byte of the response of the last redirect. 重定向结束时间 duration Returns a timestamp that is the difference between the responseEnd and the startTime properties. startTime与responseEnd的差值 domainLookupStart A DOMHighResTimeStamp immediately before the browser starts the domain name lookup for the resource. 域名解析开始时间 domainLookupEnd A DOMHighResTimeStamp representing the time immediately after the browser finishes the domain name lookup for the resource 域名解析结束时间 connectStart 浏览器开始和服务器建立连接的时间 secureConnectionStart 浏览器在当前连接下,开始与服务器建立安全握手的时间 connectEnd 浏览器与服务器建立连接结束时间 requestStart A DOMHighResTimeStamp immediately before the browser starts requesting the resource from the server. responseStart A DOMHighResTimeStamp immediately after the browser receives the first byte of the response from the server. responseEnd A DOMHighResTimeStamp immediately after the browser receives the last byte of the resource or immediately before the transport connection is closed, whichever comes first. transferSize A number representing the size (in octets) of the fetched resource. The size includes the response header fields plus the response payload body 获取资源的大小(采用八进制, 请注意转换), 大小包含了response头部和实体 encodedBodySize A number representing the size (in octets) received from the fetch (HTTP or cache), of the payload body, before removing any applied content-codings. 表示从 HTTP 网络或缓存中接收到的有效内容主体 (Payload Body) 的大小(在删除所有应用内容编码之前) decodedBodySize A number that is the size (in octets) received from the fetch (HTTP or cache) of the message body, after removing any applied content-codings. 表示从 HTTP 网络或缓存中接收到的消息主体 (Message Body) 的大小(在删除所有应用内容编码之后) resourcce timing 计算公式 https://www.cnblogs.com/zhuyang/p/4789020.html 总时间 duration loadEventEnd - startTime 重定向耗时 redirectEnd - redirectStart DNS缓存时间 domainLookupStart - fetchStart; DNS查询耗时 domainLookupEnd - domainLookupStart TCP连接耗时 connectEnd - connectStart SSL连接耗时 connectEnd - secureConnectionStart Request请求耗时 responseStart - requestStart Response返回耗时 responseEnd - responseStart https://stackoverflow.com/questions/6509628/how-to-get-http-response-code-using-selenium-webdriver https://github.com/wkeeling/selenium-wire # resources = self.browser.execute_script("window.performance.getEntriesByType(\"resource\")") # close, Closes the current window. # quit, Quits the driver and closes every associated window. # 所有检测使用同一个browser实例可能导致数据混乱,前期每个URL开一个browser # 但这样会带来较大的性能问题 # TODO # 后期需要做成进程池,加锁 navigation timing https://www.w3.org/TR/navigation-timing/ # the result is dict # the result is list # print browser.get_performance("http://www.baidu.com") # browser.get_performance_memory("http://www.baidu.com") # print browser.get_performance_timing("http://www.baidu.com") # print browser.get_performance_resource("http://www.baidu.com") | 2.796015 | 3 |
tests/e2e/__init__.py | Onboard-Team/gltflib | 56 | 6616335 | <reponame>Onboard-Team/gltflib
from .test_roundtrip import TestRoundtrip
| from .test_roundtrip import TestRoundtrip | none | 1 | 1.035446 | 1 | |
platform/mcu/atbm6431/tools/pack_ota_bin.py | mu340881/alios_Integrate | 0 | 6616336 | #!/usr/bin/python
import string
import sys
import os
import re
import binascii
import struct
import zlib
chksum = 0
FILE_READ_SIZE=512
def write_file(file_name,data):
if file_name is None:
print 'file_name cannot be none\n'
sys.exit(0)
fp = open(file_name,'ab')
if fp:
fp.seek(0,os.SEEK_END)
fp.write(data)
fp.close()
else:
print '%s write fail\n'%(file_name)
def packotabin():
if len(sys.argv) != 5:
print 'Usage: pack_ota_bin.py fw1.bin fw2.bin -o http_ota_fw_v100.bin'
sys.exit(0)
fw_updata1=sys.argv[1]
fw_updata2=sys.argv[2]
destfile=sys.argv[4]
#print 'fw_updata1=%s \n'%(fw_updata1)
#print 'fw_updata2=%s \n'%(fw_updata2)
if os.path.exists(destfile):
cmd='rm ' + destfile
os.system(cmd)
pSrcFd1 = open(fw_updata1, 'rb')
if pSrcFd1 is None:
print '%s cannot be open\n' % fw_updata1
sys.exit(0)
pSrcFd1.seek(0,os.SEEK_END)
fw1_length = pSrcFd1.tell()
pSrcFd1.seek(0,os.SEEK_SET)
pSrcFd2 = open(fw_updata2, 'rb')
if pSrcFd2 is None:
print '%s cannot be open\n' % fw_updata2
sys.exit(0)
#print 'fw1_length=%d \n'%(fw1_length)
pSrcFd2.seek(0,os.SEEK_END)
fw2_length = pSrcFd2.tell()
pSrcFd2.seek(0,os.SEEK_SET)
#print 'fw2_length=%d \n'%(fw2_length)
#file_total_length = fw2_length + fw1_length
paddingfw1_length = 0
paddingfw2_length = 0
if fw1_length % FILE_READ_SIZE:
paddingfw1_length = FILE_READ_SIZE - (fw1_length % FILE_READ_SIZE)
if fw2_length % FILE_READ_SIZE:
paddingfw2_length = FILE_READ_SIZE - (fw2_length % FILE_READ_SIZE)
magic_data0 = 0x5a
magic_data1 = 0x47
version = 4338
fw1_length = fw1_length + paddingfw1_length
fw2_length = fw2_length
reserved=0x0
header = struct.pack('<BBHIIIIII', int(magic_data0), int(magic_data1),int(version), int(reserved), int(reserved), int(reserved), int(reserved), int(fw1_length),int(fw2_length))
write_file(destfile, header)
data_str = ['00']*(FILE_READ_SIZE - 28)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(destfile, data_bin)
data_bin= pSrcFd1.read(int(fw1_length))
write_file(destfile, data_bin)
if paddingfw1_length:
data_str = ['00']*(int(paddingfw1_length))
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(destfile, data_bin)
data_bin= pSrcFd2.read(int(fw2_length))
write_file(destfile, data_bin)
pSrcFd1.close()
pSrcFd2.close()
if __name__=='__main__':
packotabin()
| #!/usr/bin/python
import string
import sys
import os
import re
import binascii
import struct
import zlib
chksum = 0
FILE_READ_SIZE=512
def write_file(file_name,data):
if file_name is None:
print 'file_name cannot be none\n'
sys.exit(0)
fp = open(file_name,'ab')
if fp:
fp.seek(0,os.SEEK_END)
fp.write(data)
fp.close()
else:
print '%s write fail\n'%(file_name)
def packotabin():
if len(sys.argv) != 5:
print 'Usage: pack_ota_bin.py fw1.bin fw2.bin -o http_ota_fw_v100.bin'
sys.exit(0)
fw_updata1=sys.argv[1]
fw_updata2=sys.argv[2]
destfile=sys.argv[4]
#print 'fw_updata1=%s \n'%(fw_updata1)
#print 'fw_updata2=%s \n'%(fw_updata2)
if os.path.exists(destfile):
cmd='rm ' + destfile
os.system(cmd)
pSrcFd1 = open(fw_updata1, 'rb')
if pSrcFd1 is None:
print '%s cannot be open\n' % fw_updata1
sys.exit(0)
pSrcFd1.seek(0,os.SEEK_END)
fw1_length = pSrcFd1.tell()
pSrcFd1.seek(0,os.SEEK_SET)
pSrcFd2 = open(fw_updata2, 'rb')
if pSrcFd2 is None:
print '%s cannot be open\n' % fw_updata2
sys.exit(0)
#print 'fw1_length=%d \n'%(fw1_length)
pSrcFd2.seek(0,os.SEEK_END)
fw2_length = pSrcFd2.tell()
pSrcFd2.seek(0,os.SEEK_SET)
#print 'fw2_length=%d \n'%(fw2_length)
#file_total_length = fw2_length + fw1_length
paddingfw1_length = 0
paddingfw2_length = 0
if fw1_length % FILE_READ_SIZE:
paddingfw1_length = FILE_READ_SIZE - (fw1_length % FILE_READ_SIZE)
if fw2_length % FILE_READ_SIZE:
paddingfw2_length = FILE_READ_SIZE - (fw2_length % FILE_READ_SIZE)
magic_data0 = 0x5a
magic_data1 = 0x47
version = 4338
fw1_length = fw1_length + paddingfw1_length
fw2_length = fw2_length
reserved=0x0
header = struct.pack('<BBHIIIIII', int(magic_data0), int(magic_data1),int(version), int(reserved), int(reserved), int(reserved), int(reserved), int(fw1_length),int(fw2_length))
write_file(destfile, header)
data_str = ['00']*(FILE_READ_SIZE - 28)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(destfile, data_bin)
data_bin= pSrcFd1.read(int(fw1_length))
write_file(destfile, data_bin)
if paddingfw1_length:
data_str = ['00']*(int(paddingfw1_length))
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(destfile, data_bin)
data_bin= pSrcFd2.read(int(fw2_length))
write_file(destfile, data_bin)
pSrcFd1.close()
pSrcFd2.close()
if __name__=='__main__':
packotabin()
| ru | 0.164449 | #!/usr/bin/python #print 'fw_updata1=%s \n'%(fw_updata1) #print 'fw_updata2=%s \n'%(fw_updata2) #print 'fw1_length=%d \n'%(fw1_length) #print 'fw2_length=%d \n'%(fw2_length) #file_total_length = fw2_length + fw1_length | 2.456243 | 2 |
rater.py | intellectape/Yelp-Dataset-Challenge | 1 | 6616337 | #!/usr/bin/python
import pandas as pd
import jsonpickle
def load_wordlist(filename):
return list(set([line.replace("\n", "") for line in open(filename, "r")]))
class Food:
name = None
pos_reviews = None
neg_reviews = None
pos_count = 0
neg_count = 0
class Restaurant:
buisness_id = -1
food_items = None
def check_positivity(text):
pos_count = 0
neg_count = 0
for word in positives:
if word in text:
pos_count += 1
for word in negatives:
if word in text:
neg_count += 1
if pos_count>=neg_count:
return 1
else:
return -1
# Reading Data From Files
dataset = pd.read_csv("./Data/cleanedSC.csv")
positives = load_wordlist("positive.txt")
negatives = load_wordlist("negative.txt")
restaurant_dict = dict()
total_items = dataset["business_id"].__len__()
#Parsing Data and running Sentiment Analysis
for i in range(0, total_items):
business_id = dataset["business_id"][i]
food_name = dataset["field1"][i]
text = dataset["text"][i]
rating = dataset["stars"][i]
if business_id in restaurant_dict:
restaurant = restaurant_dict.get(business_id)
if food_name in restaurant.food_items:
food = restaurant.food_items.get(food_name)
else:
food = Food()
food.name = food_name
food.pos_reviews = []
food.neg_reviews = []
food.pos_count = 0
food.neg_count = 0
else:
restaurant = Restaurant()
restaurant.buisness_id = business_id
restaurant.food_items = dict()
food = Food()
food.name = food_name
food.pos_reviews = []
food.neg_reviews = []
food.pos_count = 0
food.neg_count = 0
if int(rating) == 5:
result = 1
elif int(rating) == 1:
result = -1
else:
result = check_positivity(text)
if result > 0:
food.pos_count += 1
food.pos_reviews.append(text)
elif result < 0:
food.neg_count += 1
food.neg_reviews.append(text)
restaurant.food_items[food_name] = food
restaurant_dict[business_id] = restaurant
None
#Converting Output to Json
final_data = jsonpickle.encode(restaurant_dict, unpicklable=False)
final_file = open("final_data.txt", "w")
final_file.write(final_data)
final_file.close()
None
| #!/usr/bin/python
import pandas as pd
import jsonpickle
def load_wordlist(filename):
return list(set([line.replace("\n", "") for line in open(filename, "r")]))
class Food:
name = None
pos_reviews = None
neg_reviews = None
pos_count = 0
neg_count = 0
class Restaurant:
buisness_id = -1
food_items = None
def check_positivity(text):
pos_count = 0
neg_count = 0
for word in positives:
if word in text:
pos_count += 1
for word in negatives:
if word in text:
neg_count += 1
if pos_count>=neg_count:
return 1
else:
return -1
# Reading Data From Files
dataset = pd.read_csv("./Data/cleanedSC.csv")
positives = load_wordlist("positive.txt")
negatives = load_wordlist("negative.txt")
restaurant_dict = dict()
total_items = dataset["business_id"].__len__()
#Parsing Data and running Sentiment Analysis
for i in range(0, total_items):
business_id = dataset["business_id"][i]
food_name = dataset["field1"][i]
text = dataset["text"][i]
rating = dataset["stars"][i]
if business_id in restaurant_dict:
restaurant = restaurant_dict.get(business_id)
if food_name in restaurant.food_items:
food = restaurant.food_items.get(food_name)
else:
food = Food()
food.name = food_name
food.pos_reviews = []
food.neg_reviews = []
food.pos_count = 0
food.neg_count = 0
else:
restaurant = Restaurant()
restaurant.buisness_id = business_id
restaurant.food_items = dict()
food = Food()
food.name = food_name
food.pos_reviews = []
food.neg_reviews = []
food.pos_count = 0
food.neg_count = 0
if int(rating) == 5:
result = 1
elif int(rating) == 1:
result = -1
else:
result = check_positivity(text)
if result > 0:
food.pos_count += 1
food.pos_reviews.append(text)
elif result < 0:
food.neg_count += 1
food.neg_reviews.append(text)
restaurant.food_items[food_name] = food
restaurant_dict[business_id] = restaurant
None
#Converting Output to Json
final_data = jsonpickle.encode(restaurant_dict, unpicklable=False)
final_file = open("final_data.txt", "w")
final_file.write(final_data)
final_file.close()
None
| en | 0.643073 | #!/usr/bin/python # Reading Data From Files #Parsing Data and running Sentiment Analysis #Converting Output to Json | 3.274703 | 3 |
encryptPdf.py | Adarsh232001/Basic-python-scripts | 1 | 6616338 | <reponame>Adarsh232001/Basic-python-scripts
import PyPDF2
pdfFile = open("Desktop\file.pdf",'rb') #file name
pdfReader = PyPDF2.PdfFileReader(pdfFile)
pdfWriter = PyPDF2.PdfFileWriter()
for pageNum in range(pdfReader.numPages):
pdfWriter.addPage(pdfReader.getPage(pageNum))
pdfWriter.encrypt('adarsh') #password
resultPdf = open('encryptedfile.pdf','wb') #outputfile name
pdfWriter.write(resultPdf)
resultPdf.close() | import PyPDF2
pdfFile = open("Desktop\file.pdf",'rb') #file name
pdfReader = PyPDF2.PdfFileReader(pdfFile)
pdfWriter = PyPDF2.PdfFileWriter()
for pageNum in range(pdfReader.numPages):
pdfWriter.addPage(pdfReader.getPage(pageNum))
pdfWriter.encrypt('adarsh') #password
resultPdf = open('encryptedfile.pdf','wb') #outputfile name
pdfWriter.write(resultPdf)
resultPdf.close() | fa | 0.177035 | #file name #password #outputfile name | 3.306979 | 3 |
main2.py | Timsel10/Group-D08-2021 | 1 | 6616339 | <filename>main2.py
# -*- coding: utf-8 -*-
import motionTools2
import numpy as np
import os
print("Loading Simulator Motion Files")
# simMotion_1 = np.genfromtxt("data/MotionCondition_1.csv", delimiter = ",", skip_header = 1)
simMotion_2 = np.genfromtxt("data/MotionCondition_2.csv", delimiter = ",", skip_header = 1)
# simMotion_1[:,0] = (simMotion_1[:,0] - simMotion_1[0,0]) * 0.0001
simMotion_2[:,0] = (simMotion_2[:,0] - simMotion_2[0,0]) * 0.0001
print("Loading Head Motion Files")
# headMotions_1 = []
headMotions_2 = []
for filename in os.listdir("filtered_data"):
# if "MC1" in filename: # and "S03" in filename:
# headMotions_1.append(np.genfromtxt("data/" + filename, delimiter = ",", skip_header = 1))
# print(filename)
if "MC2" in filename: # and "S03" in filename:
headMotions_2.append(np.genfromtxt("data/" + filename, delimiter = ",", skip_header = 1))
print(filename)
headMotionSystems = []
print("Initializing all experiments")
# for i, headMotion in enumerate(headMotions_1):
# headMotion[:,0] = (headMotion[:,0] - headMotion[0,0]) * 0.0001
# if (i >= 2 and i <= 9):
# headMotion[:,0] += 0.02
# headMotionSystems.append(motionTools2.headMotionSystem(simMotion_1, headMotion, (i + 1, 1)))
for i, headMotion in enumerate(headMotions_2):
headMotion[:,0] = (headMotion[:,0] - headMotion[0,0]) * 0.0001
if ((i >= 1 and i <= 4) or i in [8, 10]):
headMotion[:,0] += 0.02
if i + 1 == 2:
headMotionSystems.append(motionTools2.headMotionSystem(simMotion_2, headMotion, (i + 1, 2)))
print("Solving all experiments")
for i, system in enumerate(headMotionSystems):
print("solving:", i)
system.solve()
# results = np.zeros((6, 24, 6))
# for i, system in enumerate(headMotionSystems):
# for j, axis in enumerate(system.results):
# results[j, i, :2] = [system.MC, system.Person]
# results[j, i, 2:] = axis
# np.save("results", results) | <filename>main2.py
# -*- coding: utf-8 -*-
import motionTools2
import numpy as np
import os
print("Loading Simulator Motion Files")
# simMotion_1 = np.genfromtxt("data/MotionCondition_1.csv", delimiter = ",", skip_header = 1)
simMotion_2 = np.genfromtxt("data/MotionCondition_2.csv", delimiter = ",", skip_header = 1)
# simMotion_1[:,0] = (simMotion_1[:,0] - simMotion_1[0,0]) * 0.0001
simMotion_2[:,0] = (simMotion_2[:,0] - simMotion_2[0,0]) * 0.0001
print("Loading Head Motion Files")
# headMotions_1 = []
headMotions_2 = []
for filename in os.listdir("filtered_data"):
# if "MC1" in filename: # and "S03" in filename:
# headMotions_1.append(np.genfromtxt("data/" + filename, delimiter = ",", skip_header = 1))
# print(filename)
if "MC2" in filename: # and "S03" in filename:
headMotions_2.append(np.genfromtxt("data/" + filename, delimiter = ",", skip_header = 1))
print(filename)
headMotionSystems = []
print("Initializing all experiments")
# for i, headMotion in enumerate(headMotions_1):
# headMotion[:,0] = (headMotion[:,0] - headMotion[0,0]) * 0.0001
# if (i >= 2 and i <= 9):
# headMotion[:,0] += 0.02
# headMotionSystems.append(motionTools2.headMotionSystem(simMotion_1, headMotion, (i + 1, 1)))
for i, headMotion in enumerate(headMotions_2):
headMotion[:,0] = (headMotion[:,0] - headMotion[0,0]) * 0.0001
if ((i >= 1 and i <= 4) or i in [8, 10]):
headMotion[:,0] += 0.02
if i + 1 == 2:
headMotionSystems.append(motionTools2.headMotionSystem(simMotion_2, headMotion, (i + 1, 2)))
print("Solving all experiments")
for i, system in enumerate(headMotionSystems):
print("solving:", i)
system.solve()
# results = np.zeros((6, 24, 6))
# for i, system in enumerate(headMotionSystems):
# for j, axis in enumerate(system.results):
# results[j, i, :2] = [system.MC, system.Person]
# results[j, i, 2:] = axis
# np.save("results", results) | en | 0.539852 | # -*- coding: utf-8 -*- # simMotion_1 = np.genfromtxt("data/MotionCondition_1.csv", delimiter = ",", skip_header = 1) # simMotion_1[:,0] = (simMotion_1[:,0] - simMotion_1[0,0]) * 0.0001 # headMotions_1 = [] # if "MC1" in filename: # and "S03" in filename: # headMotions_1.append(np.genfromtxt("data/" + filename, delimiter = ",", skip_header = 1)) # print(filename) # and "S03" in filename: # for i, headMotion in enumerate(headMotions_1): # headMotion[:,0] = (headMotion[:,0] - headMotion[0,0]) * 0.0001 # if (i >= 2 and i <= 9): # headMotion[:,0] += 0.02 # headMotionSystems.append(motionTools2.headMotionSystem(simMotion_1, headMotion, (i + 1, 1))) # results = np.zeros((6, 24, 6)) # for i, system in enumerate(headMotionSystems): # for j, axis in enumerate(system.results): # results[j, i, :2] = [system.MC, system.Person] # results[j, i, 2:] = axis # np.save("results", results) | 2.697854 | 3 |
deploy/gunicorn.conf.py | GlobalFinPrint/global_finprint | 0 | 6616340 | <gh_stars>0
name = 'global_finprint'
user = 'www-data'
group = 'www-data'
bind = 'unix:/tmp/gunicorn.sock'
| name = 'global_finprint'
user = 'www-data'
group = 'www-data'
bind = 'unix:/tmp/gunicorn.sock' | none | 1 | 1.031952 | 1 | |
modules/photons_messages/messages.py | Djelibeybi/photons | 51 | 6616341 | <gh_stars>10-100
from photons_messages import enums, fields
from photons_messages.frame import msg
from photons_protocol.messages import T, Messages, MultiOptions
from photons_protocol.types import Optional
from delfick_project.norms import sb
import math
def empty(pkt, attr):
return pkt.actual(attr) in (Optional, sb.NotSpecified)
def color_zones_response_count(req, res):
req_count = max([1, ((req.end_index - req.start_index) // 8) + 1])
res_count = math.ceil(res.zones_count / 8)
return min([req_count, res_count])
# fmt: off
########################
### CORE
########################
class CoreMessages(Messages):
Acknowledgement = msg(45)
StateUnhandled = msg(223
, ("unhandled_type", T.Uint16)
)
########################
### DISCOVERY
########################
class DiscoveryMessages(Messages):
GetService = msg(2
, multi = -1
)
StateService = msg(3
, ("service", T.Uint8.enum(enums.Services))
, ("port", T.Uint32)
)
########################
### DEVICE
########################
class DeviceMessages(Messages):
GetHostFirmware = msg(14)
StateHostFirmware = msg(15
, ("build", T.Uint64)
, ("reserved6", T.Reserved(64))
, ("version_minor", T.Uint16)
, ("version_major", T.Uint16)
)
GetWifiInfo = msg(16)
StateWifiInfo = msg(17
, ("signal", T.Float)
, ("reserved6", T.Reserved(32))
, ("reserved7", T.Reserved(32))
, ("reserved8", T.Reserved(16))
)
GetWifiFirmware = msg(18)
StateWifiFirmware = msg(19
, ("build", T.Uint64)
, ("reserved6", T.Reserved(64))
, ("version_minor", T.Uint16)
, ("version_major", T.Uint16)
)
GetPower = msg(20)
SetPower = msg(21
, ("level", T.Uint16)
)
StatePower = msg(22
, ("level", T.Uint16)
)
GetLabel = msg(23)
SetLabel = msg(24
, ("label", T.String(32 * 8))
)
StateLabel = SetLabel.using(25)
GetVersion = msg(32)
StateVersion = msg(33
, ("vendor", T.Uint32)
, ("product", T.Uint32)
, ("reserved6", T.Reserved(32))
)
GetInfo = msg(34)
StateInfo = msg(35
, ("time", T.Uint64)
, ("uptime", fields.nano_to_seconds)
, ("downtime", fields.nano_to_seconds)
)
SetReboot = msg(38)
GetLocation = msg(48)
SetLocation = msg(49
, ("location", T.Bytes(16 * 8))
, ("label", T.String(32 * 8))
, ("updated_at", T.Uint64)
)
StateLocation = SetLocation.using(50)
GetGroup = msg(51)
SetGroup = msg(52
, ("group", T.Bytes(16 * 8))
, ("label", T.String(32 * 8))
, ("updated_at", T.Uint64)
)
StateGroup = SetGroup.using(53)
EchoRequest = msg(58
, ("echoing", T.Bytes(64 * 8))
)
EchoResponse = EchoRequest.using(59)
########################
### LIGHT
########################
class LightMessages(Messages):
GetColor = msg(101)
SetColor = msg(102
, ("reserved6", T.Reserved(8))
, *fields.hsbk
, ("duration", fields.duration_type)
)
SetWaveform = msg(103
, ("reserved6", T.Reserved(8))
, ("transient", T.BoolInt.default(0))
, *fields.hsbk
, ("period", fields.waveform_period)
, ("cycles", T.Float.default(1))
, ("skew_ratio", fields.waveform_skew_ratio)
, ("waveform", T.Uint8.enum(enums.Waveform).default(enums.Waveform.SAW))
)
LightState = msg(107
, *fields.hsbk
, ("reserved6", T.Reserved(16))
, ("power", T.Uint16)
, ("label", T.String(32 * 8))
, ("reserved7", T.Reserved(64))
)
GetLightPower = msg(116)
SetLightPower = msg(117
, ("level", T.Uint16)
, ("duration", fields.duration_type)
)
StateLightPower = msg(118
, ("level", T.Uint16)
)
SetWaveformOptional = msg(119
, ("reserved6", T.Reserved(8))
, ("transient", T.BoolInt.default(0))
, *fields.hsbk_with_optional
, ("period", fields.waveform_period)
, ("cycles", T.Float.default(1))
, ("skew_ratio", fields.waveform_skew_ratio)
, ("waveform", T.Uint8.enum(enums.Waveform).default(enums.Waveform.SAW))
, ("set_hue", T.BoolInt.default(lambda pkt: 0 if empty(pkt, "hue") else 1))
, ("set_saturation", T.BoolInt.default(lambda pkt: 0 if empty(pkt, "saturation") else 1))
, ("set_brightness", T.BoolInt.default(lambda pkt: 0 if empty(pkt, "brightness") else 1))
, ("set_kelvin", T.BoolInt.default(lambda pkt: 0 if empty(pkt, "kelvin") else 1))
)
GetInfrared = msg(120)
StateInfrared = msg(121
, ("brightness", T.Uint16)
)
SetInfrared = msg(122
, ("brightness", T.Uint16)
)
GetHevCycle = msg(142)
SetHevCycle = msg(143
, ("enable", T.BoolInt)
, ("duration_s", T.Uint32)
)
StateHevCycle = msg(144
, ("duration_s", T.Uint32)
, ("remaining_s", T.Uint32)
, ("last_power", T.BoolInt)
)
GetHevCycleConfiguration = msg(145)
SetHevCycleConfiguration = msg(146
, ("indication", T.BoolInt)
, ("duration_s", T.Uint32)
)
StateHevCycleConfiguration = msg(147
, ("indication", T.BoolInt)
, ("duration_s", T.Uint32)
)
GetLastHevCycleResult = msg(148)
StateLastHevCycleResult = msg(149
, ("result", T.Uint8.enum(enums.LightLastHevCycleResult))
)
########################
### MULTI_ZONE
########################
class MultiZoneMessages(Messages):
SetColorZones = msg(501
, ("start_index", T.Uint8)
, ("end_index", T.Uint8)
, *fields.hsbk
, ("duration", fields.duration_type)
, ("apply", T.Uint8.enum(enums.MultiZoneApplicationRequest).default(enums.MultiZoneApplicationRequest.APPLY))
, multi = MultiOptions(
lambda req: [MultiZoneMessages.StateZone, MultiZoneMessages.StateMultiZone]
, lambda req, res: color_zones_response_count(req, res)
)
)
GetColorZones = msg(502
, ("start_index", T.Uint8)
, ("end_index", T.Uint8)
, multi = MultiOptions(
lambda req: [MultiZoneMessages.StateZone, MultiZoneMessages.StateMultiZone]
, lambda req, res: color_zones_response_count(req, res)
)
)
StateZone = msg(503
, ("zones_count", T.Uint8)
, ("zone_index", T.Uint8)
, *fields.hsbk
)
StateMultiZone = msg(506
, ("zones_count", T.Uint8)
, ("zone_index", T.Uint8)
, ("colors", T.Bytes(64).multiple(8, kls=fields.Color))
)
GetMultiZoneEffect = msg(507)
SetMultiZoneEffect = msg(508
, *fields.multi_zone_effect_settings
)
StateMultiZoneEffect = SetMultiZoneEffect.using(509)
SetExtendedColorZones = msg(510
, ("duration", fields.duration_type)
, ("apply", T.Uint8.enum(enums.MultiZoneExtendedApplicationRequest).default(enums.MultiZoneExtendedApplicationRequest.APPLY))
, ("zone_index", T.Uint16)
, ("colors_count", T.Uint8)
, ("colors", T.Bytes(64).multiple(82, kls=fields.Color))
)
GetExtendedColorZones = msg(511)
StateExtendedColorZones = msg(512
, ("zones_count", T.Uint16)
, ("zone_index", T.Uint16)
, ("colors_count", T.Uint8)
, ("colors", T.Bytes(64).multiple(82, kls=fields.Color))
)
########################
### RELAY
########################
class RelayMessages(Messages):
GetRPower = msg(816
, ("relay_index", T.Uint8)
)
SetRPower = msg(817
, ("relay_index", T.Uint8)
, ("level", T.Uint16)
)
StateRPower = msg(818
, ("relay_index", T.Uint8)
, ("level", T.Uint16)
)
########################
### TILE
########################
class TileMessages(Messages):
GetDeviceChain = msg(701)
StateDeviceChain = msg(702
, ("start_index", T.Uint8)
, ("tile_devices", T.Bytes(440).multiple(16, kls=fields.Tile))
, ("tile_devices_count", T.Uint8)
)
SetUserPosition = msg(703
, ("tile_index", T.Uint8)
, ("reserved6", T.Reserved(16))
, ("user_x", T.Float)
, ("user_y", T.Float)
)
Get64 = msg(707
, ("tile_index", T.Uint8)
, ("length", T.Uint8)
, *fields.tile_buffer_rect
, multi = MultiOptions(
lambda req: TileMessages.State64
, lambda req, res: MultiOptions.Max(req.length)
)
)
State64 = msg(711
, ("tile_index", T.Uint8)
, *fields.tile_buffer_rect
, ("colors", T.Bytes(64).multiple(64, kls=fields.Color))
)
Set64 = msg(715
, ("tile_index", T.Uint8)
, ("length", T.Uint8)
, *fields.tile_buffer_rect
, ("duration", fields.duration_type)
, ("colors", T.Bytes(64).multiple(64, kls=fields.Color))
)
GetTileEffect = msg(718
, ("reserved6", T.Reserved(8))
, ("reserved7", T.Reserved(8))
)
SetTileEffect = msg(719
, ("reserved8", T.Reserved(8))
, ("reserved9", T.Reserved(8))
, *fields.tile_effect_settings
)
StateTileEffect = msg(720
, ("reserved8", T.Reserved(8))
, *fields.tile_effect_settings
)
# fmt: on
__all__ = [
"CoreMessages",
"DiscoveryMessages",
"DeviceMessages",
"LightMessages",
"MultiZoneMessages",
"RelayMessages",
"TileMessages",
]
| from photons_messages import enums, fields
from photons_messages.frame import msg
from photons_protocol.messages import T, Messages, MultiOptions
from photons_protocol.types import Optional
from delfick_project.norms import sb
import math
def empty(pkt, attr):
return pkt.actual(attr) in (Optional, sb.NotSpecified)
def color_zones_response_count(req, res):
req_count = max([1, ((req.end_index - req.start_index) // 8) + 1])
res_count = math.ceil(res.zones_count / 8)
return min([req_count, res_count])
# fmt: off
########################
### CORE
########################
class CoreMessages(Messages):
Acknowledgement = msg(45)
StateUnhandled = msg(223
, ("unhandled_type", T.Uint16)
)
########################
### DISCOVERY
########################
class DiscoveryMessages(Messages):
GetService = msg(2
, multi = -1
)
StateService = msg(3
, ("service", T.Uint8.enum(enums.Services))
, ("port", T.Uint32)
)
########################
### DEVICE
########################
class DeviceMessages(Messages):
GetHostFirmware = msg(14)
StateHostFirmware = msg(15
, ("build", T.Uint64)
, ("reserved6", T.Reserved(64))
, ("version_minor", T.Uint16)
, ("version_major", T.Uint16)
)
GetWifiInfo = msg(16)
StateWifiInfo = msg(17
, ("signal", T.Float)
, ("reserved6", T.Reserved(32))
, ("reserved7", T.Reserved(32))
, ("reserved8", T.Reserved(16))
)
GetWifiFirmware = msg(18)
StateWifiFirmware = msg(19
, ("build", T.Uint64)
, ("reserved6", T.Reserved(64))
, ("version_minor", T.Uint16)
, ("version_major", T.Uint16)
)
GetPower = msg(20)
SetPower = msg(21
, ("level", T.Uint16)
)
StatePower = msg(22
, ("level", T.Uint16)
)
GetLabel = msg(23)
SetLabel = msg(24
, ("label", T.String(32 * 8))
)
StateLabel = SetLabel.using(25)
GetVersion = msg(32)
StateVersion = msg(33
, ("vendor", T.Uint32)
, ("product", T.Uint32)
, ("reserved6", T.Reserved(32))
)
GetInfo = msg(34)
StateInfo = msg(35
, ("time", T.Uint64)
, ("uptime", fields.nano_to_seconds)
, ("downtime", fields.nano_to_seconds)
)
SetReboot = msg(38)
GetLocation = msg(48)
SetLocation = msg(49
, ("location", T.Bytes(16 * 8))
, ("label", T.String(32 * 8))
, ("updated_at", T.Uint64)
)
StateLocation = SetLocation.using(50)
GetGroup = msg(51)
SetGroup = msg(52
, ("group", T.Bytes(16 * 8))
, ("label", T.String(32 * 8))
, ("updated_at", T.Uint64)
)
StateGroup = SetGroup.using(53)
EchoRequest = msg(58
, ("echoing", T.Bytes(64 * 8))
)
EchoResponse = EchoRequest.using(59)
########################
### LIGHT
########################
class LightMessages(Messages):
GetColor = msg(101)
SetColor = msg(102
, ("reserved6", T.Reserved(8))
, *fields.hsbk
, ("duration", fields.duration_type)
)
SetWaveform = msg(103
, ("reserved6", T.Reserved(8))
, ("transient", T.BoolInt.default(0))
, *fields.hsbk
, ("period", fields.waveform_period)
, ("cycles", T.Float.default(1))
, ("skew_ratio", fields.waveform_skew_ratio)
, ("waveform", T.Uint8.enum(enums.Waveform).default(enums.Waveform.SAW))
)
LightState = msg(107
, *fields.hsbk
, ("reserved6", T.Reserved(16))
, ("power", T.Uint16)
, ("label", T.String(32 * 8))
, ("reserved7", T.Reserved(64))
)
GetLightPower = msg(116)
SetLightPower = msg(117
, ("level", T.Uint16)
, ("duration", fields.duration_type)
)
StateLightPower = msg(118
, ("level", T.Uint16)
)
SetWaveformOptional = msg(119
, ("reserved6", T.Reserved(8))
, ("transient", T.BoolInt.default(0))
, *fields.hsbk_with_optional
, ("period", fields.waveform_period)
, ("cycles", T.Float.default(1))
, ("skew_ratio", fields.waveform_skew_ratio)
, ("waveform", T.Uint8.enum(enums.Waveform).default(enums.Waveform.SAW))
, ("set_hue", T.BoolInt.default(lambda pkt: 0 if empty(pkt, "hue") else 1))
, ("set_saturation", T.BoolInt.default(lambda pkt: 0 if empty(pkt, "saturation") else 1))
, ("set_brightness", T.BoolInt.default(lambda pkt: 0 if empty(pkt, "brightness") else 1))
, ("set_kelvin", T.BoolInt.default(lambda pkt: 0 if empty(pkt, "kelvin") else 1))
)
GetInfrared = msg(120)
StateInfrared = msg(121
, ("brightness", T.Uint16)
)
SetInfrared = msg(122
, ("brightness", T.Uint16)
)
GetHevCycle = msg(142)
SetHevCycle = msg(143
, ("enable", T.BoolInt)
, ("duration_s", T.Uint32)
)
StateHevCycle = msg(144
, ("duration_s", T.Uint32)
, ("remaining_s", T.Uint32)
, ("last_power", T.BoolInt)
)
GetHevCycleConfiguration = msg(145)
SetHevCycleConfiguration = msg(146
, ("indication", T.BoolInt)
, ("duration_s", T.Uint32)
)
StateHevCycleConfiguration = msg(147
, ("indication", T.BoolInt)
, ("duration_s", T.Uint32)
)
GetLastHevCycleResult = msg(148)
StateLastHevCycleResult = msg(149
, ("result", T.Uint8.enum(enums.LightLastHevCycleResult))
)
########################
### MULTI_ZONE
########################
class MultiZoneMessages(Messages):
SetColorZones = msg(501
, ("start_index", T.Uint8)
, ("end_index", T.Uint8)
, *fields.hsbk
, ("duration", fields.duration_type)
, ("apply", T.Uint8.enum(enums.MultiZoneApplicationRequest).default(enums.MultiZoneApplicationRequest.APPLY))
, multi = MultiOptions(
lambda req: [MultiZoneMessages.StateZone, MultiZoneMessages.StateMultiZone]
, lambda req, res: color_zones_response_count(req, res)
)
)
GetColorZones = msg(502
, ("start_index", T.Uint8)
, ("end_index", T.Uint8)
, multi = MultiOptions(
lambda req: [MultiZoneMessages.StateZone, MultiZoneMessages.StateMultiZone]
, lambda req, res: color_zones_response_count(req, res)
)
)
StateZone = msg(503
, ("zones_count", T.Uint8)
, ("zone_index", T.Uint8)
, *fields.hsbk
)
StateMultiZone = msg(506
, ("zones_count", T.Uint8)
, ("zone_index", T.Uint8)
, ("colors", T.Bytes(64).multiple(8, kls=fields.Color))
)
GetMultiZoneEffect = msg(507)
SetMultiZoneEffect = msg(508
, *fields.multi_zone_effect_settings
)
StateMultiZoneEffect = SetMultiZoneEffect.using(509)
SetExtendedColorZones = msg(510
, ("duration", fields.duration_type)
, ("apply", T.Uint8.enum(enums.MultiZoneExtendedApplicationRequest).default(enums.MultiZoneExtendedApplicationRequest.APPLY))
, ("zone_index", T.Uint16)
, ("colors_count", T.Uint8)
, ("colors", T.Bytes(64).multiple(82, kls=fields.Color))
)
GetExtendedColorZones = msg(511)
StateExtendedColorZones = msg(512
, ("zones_count", T.Uint16)
, ("zone_index", T.Uint16)
, ("colors_count", T.Uint8)
, ("colors", T.Bytes(64).multiple(82, kls=fields.Color))
)
########################
### RELAY
########################
class RelayMessages(Messages):
GetRPower = msg(816
, ("relay_index", T.Uint8)
)
SetRPower = msg(817
, ("relay_index", T.Uint8)
, ("level", T.Uint16)
)
StateRPower = msg(818
, ("relay_index", T.Uint8)
, ("level", T.Uint16)
)
########################
### TILE
########################
class TileMessages(Messages):
GetDeviceChain = msg(701)
StateDeviceChain = msg(702
, ("start_index", T.Uint8)
, ("tile_devices", T.Bytes(440).multiple(16, kls=fields.Tile))
, ("tile_devices_count", T.Uint8)
)
SetUserPosition = msg(703
, ("tile_index", T.Uint8)
, ("reserved6", T.Reserved(16))
, ("user_x", T.Float)
, ("user_y", T.Float)
)
Get64 = msg(707
, ("tile_index", T.Uint8)
, ("length", T.Uint8)
, *fields.tile_buffer_rect
, multi = MultiOptions(
lambda req: TileMessages.State64
, lambda req, res: MultiOptions.Max(req.length)
)
)
State64 = msg(711
, ("tile_index", T.Uint8)
, *fields.tile_buffer_rect
, ("colors", T.Bytes(64).multiple(64, kls=fields.Color))
)
Set64 = msg(715
, ("tile_index", T.Uint8)
, ("length", T.Uint8)
, *fields.tile_buffer_rect
, ("duration", fields.duration_type)
, ("colors", T.Bytes(64).multiple(64, kls=fields.Color))
)
GetTileEffect = msg(718
, ("reserved6", T.Reserved(8))
, ("reserved7", T.Reserved(8))
)
SetTileEffect = msg(719
, ("reserved8", T.Reserved(8))
, ("reserved9", T.Reserved(8))
, *fields.tile_effect_settings
)
StateTileEffect = msg(720
, ("reserved8", T.Reserved(8))
, *fields.tile_effect_settings
)
# fmt: on
__all__ = [
"CoreMessages",
"DiscoveryMessages",
"DeviceMessages",
"LightMessages",
"MultiZoneMessages",
"RelayMessages",
"TileMessages",
] | de | 0.821313 | # fmt: off ######################## ### CORE ######################## ######################## ### DISCOVERY ######################## ######################## ### DEVICE ######################## ######################## ### LIGHT ######################## ######################## ### MULTI_ZONE ######################## ######################## ### RELAY ######################## ######################## ### TILE ######################## # fmt: on | 1.898728 | 2 |
app/api/endpoints.py | PetrMixayloff/parser | 0 | 6616342 | from fastapi import APIRouter, Depends, HTTPException, Path
from app.worker.tasks import parse_task
from .session import get_db
from sqlalchemy.orm import Session
from app.crud import get_result_by_id
from fastapi.responses import HTMLResponse
api_router = APIRouter()
@api_router.get("/", response_class=HTMLResponse)
def get_ui():
with open('app/index.html', 'r') as file:
html = file.read()
return html
@api_router.post("/api/parse/{url}",
responses={
200: {
"description": "Добавление нового URL в задания для парсинга",
"content": {
"application/json": {
"example": "77d05ad4-81ae-4c17-afb8-e8607a6656a9"
}
}}
})
def receive_task(url: str = Path(..., example="example.com")) -> str:
req = parse_task.apply_async(args=[url])
return req.id
@api_router.get("/api/results/{result_id}",
responses={
200: {
"description": "Получение результата парсинга по идентификатору",
"content": {
"application/json": {
"example": '{"html": {"count":1, "nested":100}, '
'"body":{"count":1, "nested":99}, "H1": {"count":2,"nested":0}'
}
}},
404: {
"description": "По заданному идентификатору ничего не найдено."
}
})
def get_task_by_id(result_id: str = Path(..., example="77d05ad4-81ae-4c17-afb8-e8607a6656a9"),
db: Session = Depends(get_db)) -> str:
res = get_result_by_id(db=db, result_id=result_id)
if res is not None:
return res.result
raise HTTPException(
status_code=404,
detail="По заданному идентификатору ничего не найдено.",
)
| from fastapi import APIRouter, Depends, HTTPException, Path
from app.worker.tasks import parse_task
from .session import get_db
from sqlalchemy.orm import Session
from app.crud import get_result_by_id
from fastapi.responses import HTMLResponse
api_router = APIRouter()
@api_router.get("/", response_class=HTMLResponse)
def get_ui():
with open('app/index.html', 'r') as file:
html = file.read()
return html
@api_router.post("/api/parse/{url}",
responses={
200: {
"description": "Добавление нового URL в задания для парсинга",
"content": {
"application/json": {
"example": "77d05ad4-81ae-4c17-afb8-e8607a6656a9"
}
}}
})
def receive_task(url: str = Path(..., example="example.com")) -> str:
req = parse_task.apply_async(args=[url])
return req.id
@api_router.get("/api/results/{result_id}",
responses={
200: {
"description": "Получение результата парсинга по идентификатору",
"content": {
"application/json": {
"example": '{"html": {"count":1, "nested":100}, '
'"body":{"count":1, "nested":99}, "H1": {"count":2,"nested":0}'
}
}},
404: {
"description": "По заданному идентификатору ничего не найдено."
}
})
def get_task_by_id(result_id: str = Path(..., example="77d05ad4-81ae-4c17-afb8-e8607a6656a9"),
db: Session = Depends(get_db)) -> str:
res = get_result_by_id(db=db, result_id=result_id)
if res is not None:
return res.result
raise HTTPException(
status_code=404,
detail="По заданному идентификатору ничего не найдено.",
)
| none | 1 | 2.525142 | 3 | |
aml_robot_cutting/process_data.py | McGill-AML/aml_robot_cutting_dataset | 2 | 6616343 | <filename>aml_robot_cutting/process_data.py
#! /usr/bin/env python2
"""
This script loads the robot data from the bagfiles and reorganizes them in CSV files that can be later used for a
data loader.
"""
import argparse
from robot_data_helper import RobotDataHelper
parser = argparse.ArgumentParser(description='Processes robot data')
parser.add_argument('--save_plots', default=False, action='store_true', help='Saves the plots.')
if __name__ == '__main__':
"""
Material labels for the cuts:
0: LVL, 1: Maple, 2: Oak, 3: Birch, 4: Hardwood
Thickness labels for the cuts:
0: 3/16, 1: 1/4, 2: 5/16, 3: 3/8, 4: 7/16
"""
MATERIAL_LABELS = [0, 0, 0, 0, 0, 1, 2, 3, 1, 2, 3, 4]
THICKNESS_LABELS = [0, 1, 2, 3, 4, 1, 1, 1, 3, 3, 3, 3]
N_CUTS = 12
N_EACH_CUT = 15
args = parser.parse_args()
for i in range(N_CUTS):
for j in range(N_EACH_CUT):
bag_name = 'cut_' + str(i + 1) + '/cut_' + str(i + 1) + '_' + str(j + 1) + '.bag'
print('Processing bagfile ' + bag_name)
dh = RobotDataHelper(bag_name,
sampling_rate=10.,
material_label=MATERIAL_LABELS[i],
thickness_label=THICKNESS_LABELS[i])
if args.save_plots:
for data_type in ['position', 'velocity', 'effort']:
dh.plot_joint_state(data_type)
dh.save_data()
| <filename>aml_robot_cutting/process_data.py
#! /usr/bin/env python2
"""
This script loads the robot data from the bagfiles and reorganizes them in CSV files that can be later used for a
data loader.
"""
import argparse
from robot_data_helper import RobotDataHelper
parser = argparse.ArgumentParser(description='Processes robot data')
parser.add_argument('--save_plots', default=False, action='store_true', help='Saves the plots.')
if __name__ == '__main__':
"""
Material labels for the cuts:
0: LVL, 1: Maple, 2: Oak, 3: Birch, 4: Hardwood
Thickness labels for the cuts:
0: 3/16, 1: 1/4, 2: 5/16, 3: 3/8, 4: 7/16
"""
MATERIAL_LABELS = [0, 0, 0, 0, 0, 1, 2, 3, 1, 2, 3, 4]
THICKNESS_LABELS = [0, 1, 2, 3, 4, 1, 1, 1, 3, 3, 3, 3]
N_CUTS = 12
N_EACH_CUT = 15
args = parser.parse_args()
for i in range(N_CUTS):
for j in range(N_EACH_CUT):
bag_name = 'cut_' + str(i + 1) + '/cut_' + str(i + 1) + '_' + str(j + 1) + '.bag'
print('Processing bagfile ' + bag_name)
dh = RobotDataHelper(bag_name,
sampling_rate=10.,
material_label=MATERIAL_LABELS[i],
thickness_label=THICKNESS_LABELS[i])
if args.save_plots:
for data_type in ['position', 'velocity', 'effort']:
dh.plot_joint_state(data_type)
dh.save_data()
| en | 0.794441 | #! /usr/bin/env python2 This script loads the robot data from the bagfiles and reorganizes them in CSV files that can be later used for a data loader. Material labels for the cuts: 0: LVL, 1: Maple, 2: Oak, 3: Birch, 4: Hardwood Thickness labels for the cuts: 0: 3/16, 1: 1/4, 2: 5/16, 3: 3/8, 4: 7/16 | 2.705757 | 3 |
core/models/stam_wrapper.py | CameronTaylorFL/stam | 3 | 6616344 | import pdb
import csv
import progressbar
import time
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
from core.models.STAM_classRepo import *
from core.utils import *
from core.distance_metrics import *
from sklearn import metrics
from sklearn import mixture
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering
from kmodes.kmodes import KModes
from sklearn.metrics import pairwise_distances, jaccard_score
class StamWrapper():
def __init__(self, configs):
# declare properties
self.name = 'STAM'
# extract scenario configs
self.num_classes = configs['num_classes']
self.class_labels = configs['class_labels']
self.vis_train = configs['visualize_train']
self.vis_cluster = configs['visualize_cluster']
self.im_size = configs['im_size']
self.num_c = configs['channels']
self.seed = configs['seed']
self.im_scale = configs['im_scale']
self.scale_flag = configs['scale_flag']
self.num_samples = configs['num_samples']
self.num_phases = configs['num_phases']
# extract stam configs
self.num_layers = len(configs['layers'])
self.rho = configs['rho']
self.nd_fixed = configs['nd_fixed']
print(self.nd_fixed)
# directory paths
self.results_directory = configs['results_directory']
self.plot_directory = configs['plot_directory']
# initialize variables
self.images_seen = 0
self.ltm_cent_counts = np.zeros((self.num_phases,3))
# Informative centroid info storage
self.informative_indices = [[] for i in range(self.num_layers)]
self.informative_indices_2 = [[] for i in range(self.num_layers)]
self.informative_class = [[] for i in range(self.num_layers)]
self.expected_features = [configs['expected_features'] for l in range(self.num_layers)]
self.num_images_init = configs['num_images_init']
print(self.expected_features)
# build stam hierarchy
self.layers = []
self.layers.append(Layer(self.im_size, self.num_c, *configs['layers'][0],
configs['WTA'], self.im_scale, self.scale_flag,
self.seed, configs['kernel'], self.expected_features[0], self.nd_fixed, self.num_images_init, self.plot_directory, self.vis_train))
for l in range(1,self.num_layers):
self.layers.append(Layer(self.im_size, self.num_c, *configs['layers'][l],
configs['WTA'], self.im_scale, self.scale_flag,
self.seed, configs['kernel'], self.expected_features[l], self.nd_fixed, self.num_images_init, self.plot_directory, self.vis_train))
# stam init
self.init_layers()
# classification parameters
self.Fz = [[] for i in range(self.num_samples)]
self.D = [[] for i in range(self.num_samples)]
self.D_sum = [[] for i in range(self.num_samples)]
self.cent_g = [[] for i in range(self.num_samples)]
self.Nl_seen = [0 for i in range(self.num_samples)]
# visualize task boundaries
self.ndy = []
# centroid init - note that current implementation does NOT use random
# init centroids but rather will change these centroids to sampled patch
# values in the learning alogrithm (see STAM_classRepo)
def init_layers(self):
# random seed
np.random.seed(self.seed)
# for all layers
for l in range(self.num_layers):
# number of centroids to initialize
n_l = self.layers[l].num_cents
# random init
self.layers[l].centroids = np.random.randn(n_l,
self.layers[l].recField_size \
* self.layers[l].recField_size \
* self.layers[l].ch) * 0.1
# normalize sum to 1
self.layers[l].centroids -= np.amin(self.layers[l].centroids, axis = 1)[:,None]
self.layers[l].centroids /= np.sum(self.layers[l].centroids, axis = 1)[:,None]
def train(self, x, y, n, experiment_params, sort=False):
self.phase = n - 1
# reset d samples - for visualization
for l in range(self.num_layers):
self.layers[l].d_sample = 100
self.layers[l].delete_n = []
self.ndy.append(self.images_seen+1)
if sort:
sortd = np.argsort(y)
x = x[sortd]
y = y[sortd]
# make sure data is nonzero
if len(x) > 0:
# start progress bar
bar = progressbar.ProgressBar(maxval=len(x), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
# for all data points
for i in range(len(x)):
# reset d samples
if i == len(x) - 100:
for l in range(self.num_layers):
self.layers[l].d_sample = 100
# show image to hierarchy
self.images_seen = self.images_seen + 1
self.train_update(x[i], y[i])
# update progress bar
bar.update(i+1)
# finish progress bar
bar.finish()
if True:
self.save_visualizations(smart_dir(self.plot_directory + '/phase_{}/'.format(n-1)), n-1)
# update step for stam model training
def train_update(self, x, label):
# for all layers
x_i = x
for l in range(self.num_layers):
x_i = self.layers[l].forward(x_i, label, update = True)
# initialize hierarchy classfiication parameters for each
# evaluation data sample
def setTask(self, num_samples, K):
# classification parameters
self.Fz = [[] for i in range(num_samples)]
self.D = [[] for i in range(num_samples)]
self.D_sum = [[] for i in range(num_samples)]
self.cent_g = [[] for i in range(num_samples)]
self.Nl_seen = [0 for i in range(num_samples)]
# set rho
self.rho_task = self.rho+(1/K)
# get percent class informative centroids
def get_ci(self, phase, index = 0, vis=False):
# hold results here
score = [0 for l in range(self.num_layers)]
score_pc = [np.zeros((self.num_classes,)) for l in range(self.num_layers)]
score_multi = [0 for l in range(self.num_layers)]
# for each layer
for l in range(self.num_layers):
# for each centroid
for j in range(len(self.cent_g[index][l])):
# increase score if ci
if max(self.cent_g[index][l][j]) > self.rho_task: #and np.sort(self.cent_g[index][l][j])[-2] <= 0.5 * max(self.cent_g[index][l][j]):
score[l] += 1
if len(np.where(self.cent_g[index][l][j, :] > self.rho_task)) > 1:
score_multi[l] += 1
for k in range(self.num_classes):
if self.cent_g[index][l][j,k] > self.rho_task:
score_pc[l][k] += 1
# calculate percent ci at layer
score[l] /= len(self.cent_g[index][l])
score_pc[l] /= len(self.cent_g[index][l])
score_multi[l] /= len(self.cent_g[index][l])
return np.asarray(score), np.asarray(score_pc), np.asarray(score_multi)
# given labeled data, associate class information with stam centroids
def supervise(self, data, labels, phase, experiment_params, l_list = None, index = 0, image_ret=False, vis=True):
# process inputs
num_data = len(data)
# get centroids for classification
self.cents_ltm = []
self.class_ltm = []
for l in range(self.num_layers):
if self.layers[l].num_ltm > 0:
self.cents_ltm.append(self.layers[l].get_ltm_centroids())
self.class_ltm.append(self.layers[l].get_ltm_classes())
else:
self.cents_ltm.append(self.layers[l].get_stm_centroids())
self.class_ltm.append(self.layers[l].get_stm_classes())
# this is repeat of self.setTask which is kept for scenario
# where labeled data is NOT replayed
if self.Nl_seen[index] == 0:
self.D_sum[index] = [0 for l in range(len(l_list))]
self.D[index] = [[] for l in range(len(l_list))]
self.Fz[index] = [[] for l in range(len(l_list))]
self.cent_g[index] = [[] for l in range(len(l_list))]
self.Nl_seen[index] += num_data
# supervision per layer
for l_index in range(len(l_list)):
# get layer index from list of classification layers
l = l_list[l_index]
# get layer centroids
centroids = self.cents_ltm[l]
num_centroids = int(len(centroids))
# get value of D for task
# we use D to normalize distances wrt average centroid-patch distance
for i in range(num_data):
# get input to layer l
x_i = data[i]
for l_ in range(l):
x_i = self.layers[l_].forward(x_i, None, update = False)
# extract patches
patches = self.layers[l].extract_patches(x_i)
shape = patches.shape
xp = patches.reshape(self.layers[l].num_RFs, -1)
[xp, _, _] = self.layers[l].scale(xp)
# calculate and save distance
cp = centroids.reshape(num_centroids, -1)
d = smart_dist(xp, cp)
close_ind = np.argmin(d, axis = 1)
self.D_sum[index][l_index] += np.sum(d[range(shape[0]),close_ind]) / shape[0]
# final D calculation
self.D[index][l_index] = self.D_sum[index][l_index] / self.Nl_seen[index]
# this holds sum of exponential "score" for each centroid for each class
sum_fz_pool = np.zeros((num_centroids, self.num_classes))
# this code is relevant if we are not replaying labeled data
ncents_past = len(self.Fz[index][l_index])
if ncents_past > 0:
sum_fz_pool[:ncents_past,:] = self.Fz[index][l_index]
# for each image
for i in range(num_data):
# get input to layer l
x_i = data[i]
for l_ in range(l):
x_i = self.layers[l_].forward(x_i, None, update = False)
# extract patches
patches = self.layers[l].extract_patches(x_i)
shape = patches.shape
xp = patches.reshape(self.layers[l].num_RFs, -1)
[xp, _, _] = self.layers[l].scale(xp)
# calculate distance
cp = centroids.reshape(num_centroids, -1)
d = smart_dist(xp, cp)
# get distance of *matched* centroid of each patch
close_ind = np.argmin(d, axis = 1)
dist = (d[range(shape[0]),close_ind])
# get exponential distance and put into sparse array with same shape as
# summed exponential scores if we have two centroid matches in same
# image, only save best match
td = np.zeros(d.shape)
td[range(shape[0]),close_ind] = np.exp(-1*dist/self.D[index][l_index])
fz = np.amax(td, axis = 0)
# update sum of exponential "score" for each centroid for each class
sum_fz_pool[:, int(labels[i])] += fz
# save data scores and calculate g values as exponential "score" normalized
# accross classes (i.e. score of each centroid sums to 1)
self.Fz[index][l_index] = sum_fz_pool
self.cent_g[index][l_index] = np.copy(sum_fz_pool)
for j in range(num_centroids):
self.cent_g[index][l_index][j,:] = self.cent_g[index][l_index][j,:] \
/ (np.sum(self.cent_g[index][l_index][j,:]) + 1e-5)
# call classification function
def classify(self, data, phase, c_type, index, experiment_params):
if c_type == 'hierarchy-vote':
labels = self.topDownClassify(data, index, experiment_params, vis=True, phase=phase)
return labels
# stam primary classification function - hierarchical voting mechanism
def topDownClassify(self, data, index, experiment_params, vis=False, phase=None):
# process inputs and init return labels
num_data = len(data)
labels = -1 * np.ones((num_data,))
# for each data
for i in range(num_data):
# get NN centroid for each patch
close_ind = []
close_distances = []
for l in range(self.num_layers):
# get ltm centroids at layer
centroids = self.cents_ltm[l]
num_centroids = int(len(centroids))
# get input to layer
x_i = data[i]
for l_ in range(l): x_i = self.layers[l_].forward(x_i, None, update = False)
# extract patches
patches = self.layers[l].extract_patches(x_i)
xp = patches.reshape(self.layers[l].num_RFs, -1)
[xp, shifts, scales] = self.layers[l].scale(xp)
# calculate distance
cp = centroids.reshape(num_centroids, -1)
d = smart_dist(xp, cp)
close_ind.append(np.argmin(d, axis = 1))
close_distances.append(np.min(d, axis = 1))
# get highest layer containing at least one CIN centroid
l = self.num_layers-1
found_cin = False
while l > 0 and not found_cin:
# is there at least one CIN centroid?
if np.amax(self.cent_g[index][l][close_ind[l]]) >= self.rho_task:
found_cin = True
else:
l -= 1
l_cin = l
# classification
#
# vote of each class for all layers
wta_total = np.zeros((self.num_classes,)) + 1e-3
# for all cin layers
layer_range = range(l_cin+1)
percent_inform = []
for l in layer_range:
# vote of each class in this layer
wta = np.zeros((self.num_classes,))
# get max g value for matched centroids
votes_g = np.amax(self.cent_g[index][l][close_ind[l]], axis = 1)
# nullify vote of non-cin centroids
votes_g[votes_g < self.rho_task] = 0
a = np.where(votes_g > self.rho_task)
percent_inform.append(len(a[0])/ len(votes_g))
# calculate per class vote at this layer
votes = np.argmax(self.cent_g[index][l][close_ind[l]], axis = 1)
for k in range(self.num_classes):
wta[k] = np.sum(votes_g[votes == k])
# add to cumalitive total and normalize
wta /= len(close_ind[l])
wta_total += wta
# final step
labels[i] = np.argmax(wta_total)
return labels
def embed_centroid(self, X, layer, cents_ltm, experiment_params):
normalization = experiment_params
centroids = cents_ltm.reshape(len(cents_ltm), -1)
X_ = np.zeros((X.shape[0], len(cents_ltm)))
for i, x in enumerate(X):
# ltm centroids were determined in 'supervise' function
# get input to layer l
x_layer = layer.forward(x, None, update=False)
# extract patches
patches = layer.extract_patches(x_layer)
patches = patches.reshape(layer.num_RFs, -1)
patches, _, _ = layer.scale(patches)
# compute distance matrix
d_mat = smart_dist(patches, centroids)
# get indices of closest patch to each centroid and accumulate average
# closest-patch distances
close_cent_dists = np.min(d_mat, axis=0)
# Calculate normalization constant D_l
D_l = np.sum(d_mat) / d_mat.shape[0] * d_mat.shape[1]
if normalization:
X_[i,:] = np.exp((-1 * close_cent_dists) / D_l)
else:
X_[i,:] = close_cent_dists
return X_
def embed_patch(self, X, layer, cents_ltm, experiment_params, cnt=False):
method = experiment_params
# patch size and num_features calculations
p = layer.recField_size
n_cols = len(layer.extract_patches(X[0]))
if cnt:
X_ = np.zeros((X.shape[0], n_cols), dtype=str)
else:
X_ = np.zeros((X.shape[0], n_cols), dtype=int)
for i, x in enumerate(X):
# get input to layer l
x_layer = layer.forward(x, None, update=False)
# extract patches
patches = layer.extract_patches(x_layer)
patches = patches.reshape(layer.num_RFs, -1)
patches, _, _ = layer.scale(patches)
d_mat = smart_dist(patches, cents_ltm)
# get indices of closest patch to each centroid and accumulate average
# closest-patch distances
close_patch_inds = np.argmin(d_mat, axis=1)
if cnt:
counts = np.bincount(close_patch_inds, minlength=max(close_patch_inds))
temp_i = []
for ind, count in enumerate(counts):
if count == 0:
continue
else:
for c in range(count):
temp_i.append("{}.{}".format(ind, c))
X_[i] = np.array(temp_i)
else:
X_[i] = close_patch_inds
print("Got Jaccard Embedding")
return X_
def jaccard(self, x, y):
x = set(x)
y = set(y)
val = len(x.intersection(y)) / len(x.union(y))
if val == None:
return 0
else:
return val
# cluster
def cluster(self, X, Y, phase_num, task_num, dataset, num_classes,
experiment_params, cluster_method='kmeans',
accuracy_method='purity', k_scale=2, eval_layers=[]):
# returns total and per-class accuracy... (float, 1 by k numpy[float])
print('Clustering Task Started...')
embedding_mode, mode_name = experiment_params
print("Embedding Mode: ", embedding_mode)
print("Experiment Name: ", mode_name)
print("Cluster Method: ", cluster_method)
similarity_matrix = np.zeros(10)
if embedding_mode == 0:
X_1 = self.embed_centroid(X, self.layers[0], self.cents_ltm[0], True)
X_2 = self.embed_centroid(X, self.layers[1], self.cents_ltm[1], True)
X_3 = self.embed_centroid(X, self.layers[2], self.cents_ltm[2], True)
embeddings = np.concatenate((X_1, np.concatenate((X_2, X_3), axis=1)), axis=1)
elif embedding_mode == 1:
X_3 = self.embed_patch(X, self.layers[2], self.cents_ltm[2], None, False)
embeddings = X_3
similarity_matrix = pairwise_distances(embeddings, embeddings, metric=self.jaccard)
k = np.unique(Y).shape[0] * k_scale
accu_total = 0
accu_perclass = np.zeros(num_classes, dtype=np.float64)
# Clustering Predictions
if cluster_method == 'kmeans':
cluster_preds = KMeans(n_clusters=k, init='k-means++', n_init=10,
max_iter=300, verbose=0).fit_predict(embeddings)
elif cluster_method == 'spectral':
try:
cluster_preds = SpectralClustering(n_clusters=k, affinity='precomputed', n_init=10,
assign_labels='discretize').fit_predict(similarity_matrix)
except Exception as e:
cluster_preds = np.zeros(len(similarity_matrix))
# Accuracy of Clustering
if accuracy_method == 'purity':
size = k
print("Size ", size)
cluster_counts = np.zeros((size, int(k/k_scale)))
cluster_sizes = np.zeros(size)
correct = np.zeros(size)
total = np.zeros(size)
cluster_indicies = [[] for i in range(num_classes)]
for i in range(size):
cluster_i = np.argwhere(cluster_preds == i).flatten() # indexes of cluster i
cluster_sizes[i] = len(cluster_i)
cluster_counts[i,:] = np.bincount(Y[cluster_i], minlength=int(k/k_scale))
# compute accuracy
cluster_class = np.argmax(cluster_counts[i, :])
correct[i] = cluster_counts[i, :].max()
total[i] = cluster_counts[i, :].sum()
cluster_indicies[cluster_class].append(i)
for j in range(num_classes):
if sum(total[cluster_indicies[j]]) > 0:
accu_perclass[j] = sum(correct[cluster_indicies[j]]) \
/ sum(total[cluster_indicies[j]]) * 100
else:
accu_perclass[j] = 0
accu_total = sum(correct) / sum(total) * 100
return accu_total, accu_perclass
# save STAM visualizations
def save_visualizations(self, save_dir, phase):
# Cent count
plt.figure(figsize=(6,3))
for l in range(self.num_layers):
y = np.asarray(self.layers[l].ltm_history)
x = np.arange(len(y))
plt.plot(x, y, label = 'layer ' + str(l+1))
plt.ylabel('LTM Count', fontsize=12)
plt.xlabel('Unlabeled Images Seen', fontsize=12)
plt.title('LTM Centroid Count History', fontsize=14)
plt.legend(loc='upper left', prop={'size': 8})
plt.grid()
plt.tight_layout()
plt.savefig(smart_dir(save_dir+'cent_plots')+'ltm_count.png', format='png', dpi=200)
plt.close()
for l in range(self.num_layers):
np.savetxt(smart_dir(save_dir+'ltm_csvs') + 'layer-' + str(l+1) + '_ci.csv',
self.layers[l].ltm_history, delimiter=',')
if not self.nd_fixed:
# confidence interval
plt.figure(figsize=(6,3))
p = np.asarray([0, 25, 50, 75, 90, 100])
for l in range(self.num_layers):
dd = np.asarray(self.layers[l].filo_d.getValues())
y = np.percentile(dd, p)
x = np.arange(len(dd)) / len(dd)
plt.plot(x, dd, label = 'layer ' + str(l+1))
plt.plot(p/100., y, 'ro')
plt.axhline(y=self.layers[l].cut_d, color='r', linestyle='--')
plt.xticks(p/100., map(str, p))
plt.ylabel('Distance', fontsize=12)
plt.xlabel('Percentile', fontsize=12)
plt.title('Distribution of Closest Matching Distance', fontsize=14)
plt.legend(loc='lower right', prop={'size': 8})
plt.grid()
plt.tight_layout()
plt.savefig(smart_dir(save_dir+'cent_plots')+'d-thresh.png', format='png', dpi=200)
plt.grid()
plt.close()
# D threshold
plt.figure(figsize=(6,3))
for l in range(self.num_layers):
y = np.asarray(self.layers[l].dthresh_history)
x = np.arange(len(y))
plt.plot(x, y, label = 'layer ' + str(l+1))
plt.ylabel('ND Distance', fontsize=12)
plt.xlabel('Unlabeled Images Seen', fontsize=12)
plt.gca().set_ylim(bottom=0)
plt.title('Novelty Detection Threshold History', fontsize=14)
plt.legend(loc='upper left', prop={'size': 8})
plt.grid()
plt.tight_layout()
plt.savefig(smart_dir(save_dir+'cent_plots')+'d-thresh-history.png',
format='png', dpi=200)
plt.close()
#self.save_reconstructions(save_dir)
#self.detailed_classification_plots(save_dir)
def detailed_classification_plots(self):
index = 0
labels = -1 * np.ones((len(self.sample_images),))
for i in range(len(self.sample_images)):
close_ind = []
for l in range(self.num_layers):
# get ltm centroids at layer
centroids = self.cents_ltm[l]
num_centroids = int(len(centroids))
# get input to layer
x_i = self.sample_images[i]
for l_ in range(l): x_i = self.layers[l_].forward(x_i, None, update = False)
# extract patches
patches = self.layers[l].extract_patches(x_i)
xp = patches.reshape(self.layers[l].num_RFs, -1)
[xp, shifts, scales] = self.layers[l].scale(xp)
# calculate distance
cp = centroids.reshape(num_centroids, -1)
d = smart_dist(xp, cp)
close_ind.append(np.argmin(d, axis = 1))
# get highest layer containing at least one CIN centroid
l = self.num_layers-1
found_cin = False
while l > 0 and not found_cin:
# is there at least one CIN centroid?
if np.amax(self.cent_g[index][l][close_ind[l]]) >= self.rho_task:
found_cin = True
else:
l -= 1
l_cin = l
# classification
#
# vote of each class for all layers
wta_total = np.zeros((self.num_classes,)) + 1e-3
# for all cin layers
layer_range = range(l_cin+1)
percent_inform = []
layer_wta = []
for l in layer_range:
# vote of each class in this layer
wta = np.zeros((self.num_classes,))
# get max g value for matched centroids
votes_g = np.amax(self.cent_g[index][l][close_ind[l]], axis = 1)
# nullify vote of non-cin centroids
votes_g[votes_g < self.rho_task] = 0
a = np.where(votes_g > self.rho_task)
percent_inform.append(len(a[0])/ len(votes_g))
# calculate per class vote at this layer
votes = np.argmax(self.cent_g[index][l][close_ind[l]], axis = 1)
for k in range(self.num_classes):
wta[k] = np.sum(votes_g[votes == k])
# add to cumalitive total
wta /= len(close_ind[l])
layer_wta.append(wta)
wta_total += wta
# final step
labels[i] = np.argmax(wta_total)
# Visualizing Patches and Centroids
for l in range(self.num_layers):
nrows = ncols = int(np.sqrt(self.layers[l].num_RFs) / 2)
rf_size = self.layers[l].recField_size
plt.close()
fig = plt.figure(figsize=(9,11))
# First 3
out_im, out_im_2 = self.layers[l].create_reconstruction(self.sample_images[i], self.sample_labels[i])
ax1 = fig.add_axes([0.1, 0.75, 0.2, 0.2])
ax2 = fig.add_axes([0.35, 0.75, 0.2, 0.2])
ax3 = fig.add_axes([0.63, 0.83, 0.30, 0.12])
ax1.imshow(out_im_2.squeeze())
ax1.set_title('Patches')
ax1.axis('off')
ax2.imshow(out_im.squeeze())
ax2.set_title('Matched Centroids')
ax2.axis('off')
ax3.bar(np.arange(self.num_classes), layer_wta[l])
ax3.set_xticks(np.arange(self.num_classes))
ax3.set_xticklabels(self.class_labels, rotation='vertical')
ax2.tick_params(axis='y', which='major', labelsize=10)
ax3.set_title('Layer {} Vote (1/K + Gamma): {}'.format(l, self.rho_task))
ax3.axis('on')
patches = self.layers[l].extract_patches(x_i)
xp_2 = patches.reshape(self.layers[l].num_RFs, -1)
xp_2 = self.layers[l].scale(xp_2)[0].reshape(self.layers[l].num_RFs, -1)
centroids = self.cents_ltm[l]
num_centroids = int(len(centroids))
cp = centroids.reshape(num_centroids, -1)
for p in range(4):
for j in range(5):
ax1 = fig.add_axes([0.08 + .17*j, 0.57 - .15*p, 0.05, 0.05])
ax2 = fig.add_axes([0.16 + .17*j, 0.57 - .15*p, 0.05, 0.05])
ax3 = fig.add_axes([0.08 + .17*j, 0.65 - .15*p, .13, .05])
ax1.set_title('Patch')
ax1.imshow(xp_2[int((p*ncols*2) + (2*j))].reshape(rf_size, rf_size, self.num_c).squeeze())
ax1.axis('off')
if np.max(self.cent_g[index][l][close_ind[l][int((p*ncols*2) + (j*2))]]) > self.rho_task:
ax2.set_title('Centroid', color='g')
else:
ax2.set_title('Centroid', color='r')
ax2.imshow(cp[close_ind[l][int((p*ncols*2) + (j*2))]].reshape(rf_size, rf_size, self.num_c).squeeze())
ax2.axis('off')
vote = np.argmax(self.cent_g[index][l][close_ind[l][int((p*ncols*2) + (j*2))]])
ax3.set_title('Vote: {}'.format(self.class_labels[vote]))
ax3.bar(np.arange(self.num_classes), self.cent_g[index][l][close_ind[l][int((p*ncols*2) + (j*2))]])
ax3.axes.get_xaxis().set_ticks([])
ax3.tick_params(axis='y', which='major', labelsize=6)
#ax3.axis('off')
fig.suptitle('True Class: {} Predicted Class: {} Layer{}'.format(self.class_labels[int(self.sample_labels[i])], self.class_labels[int(labels[i])], l))
plt.savefig(smart_dir(self.plot_directory + '/phase_{}/ex_{}/'.format(self.phase, i)) + 'layer_{}_vote.png'.format(l))
plt.close()
return labels
def save_reconstructions(self, save_dir):
for i in range(len(self.sample_images)):
for layer in range(self.num_layers):
out_im, out_im_2 = self.layers[layer].create_reconstruction(self.sample_images[i], self.sample_labels[i])
plt.figure(figsize=(6,3))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(out_im.squeeze())
ax2.imshow(out_im_2.squeeze())
plt.title('Class : {}'.format(self.sample_labels[i]))
plt.savefig(smart_dir(save_dir + '/reconstructions/layer_{}'.format(layer)) + 'image_{}'.format(i))
plt.close()
# scale image based on normalization
def scaleImage(self, im_show):
im_show = np.squeeze((im_show - self.im_scale[0]) / (self.im_scale[1] - self.im_scale[0]))
im_show[im_show>1] = 1
im_show[im_show<0] = 0
return im_show
def pick_sample_images(self, x_test, y_test, skip=20):
self.sample_images = []
self.sample_labels = []
self.skip = skip
k = np.unique(y_test)
for i, im in enumerate(x_test):
if i % self.skip == 0:
self.sample_images.append(x_test[i])
self.sample_labels.append(y_test[i])
plt.imshow(im.reshape(self.im_size, self.im_size, self.num_c).squeeze())
plt.title('Class: {}'.format(y_test[i]))
plt.savefig(smart_dir(self.plot_directory + '/sample_imgs/') + 'sample_image_{}'.format(int(i / self.skip)))
plt.close()
def stm_eviction_plot(self):
plt.close()
for l in range(self.num_layers):
plt.figure(figsize=(40,6))
data = self.layers[l].eviction_tracker
x = np.arange(len(data))
plt.boxplot(data, positions=x, showfliers=False)
plt.savefig(smart_dir(self.plot_directory) + '/eviction_layer_{}.png'.format(l))
plt.close()
| import pdb
import csv
import progressbar
import time
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
from core.models.STAM_classRepo import *
from core.utils import *
from core.distance_metrics import *
from sklearn import metrics
from sklearn import mixture
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering
from kmodes.kmodes import KModes
from sklearn.metrics import pairwise_distances, jaccard_score
class StamWrapper():
def __init__(self, configs):
# declare properties
self.name = 'STAM'
# extract scenario configs
self.num_classes = configs['num_classes']
self.class_labels = configs['class_labels']
self.vis_train = configs['visualize_train']
self.vis_cluster = configs['visualize_cluster']
self.im_size = configs['im_size']
self.num_c = configs['channels']
self.seed = configs['seed']
self.im_scale = configs['im_scale']
self.scale_flag = configs['scale_flag']
self.num_samples = configs['num_samples']
self.num_phases = configs['num_phases']
# extract stam configs
self.num_layers = len(configs['layers'])
self.rho = configs['rho']
self.nd_fixed = configs['nd_fixed']
print(self.nd_fixed)
# directory paths
self.results_directory = configs['results_directory']
self.plot_directory = configs['plot_directory']
# initialize variables
self.images_seen = 0
self.ltm_cent_counts = np.zeros((self.num_phases,3))
# Informative centroid info storage
self.informative_indices = [[] for i in range(self.num_layers)]
self.informative_indices_2 = [[] for i in range(self.num_layers)]
self.informative_class = [[] for i in range(self.num_layers)]
self.expected_features = [configs['expected_features'] for l in range(self.num_layers)]
self.num_images_init = configs['num_images_init']
print(self.expected_features)
# build stam hierarchy
self.layers = []
self.layers.append(Layer(self.im_size, self.num_c, *configs['layers'][0],
configs['WTA'], self.im_scale, self.scale_flag,
self.seed, configs['kernel'], self.expected_features[0], self.nd_fixed, self.num_images_init, self.plot_directory, self.vis_train))
for l in range(1,self.num_layers):
self.layers.append(Layer(self.im_size, self.num_c, *configs['layers'][l],
configs['WTA'], self.im_scale, self.scale_flag,
self.seed, configs['kernel'], self.expected_features[l], self.nd_fixed, self.num_images_init, self.plot_directory, self.vis_train))
# stam init
self.init_layers()
# classification parameters
self.Fz = [[] for i in range(self.num_samples)]
self.D = [[] for i in range(self.num_samples)]
self.D_sum = [[] for i in range(self.num_samples)]
self.cent_g = [[] for i in range(self.num_samples)]
self.Nl_seen = [0 for i in range(self.num_samples)]
# visualize task boundaries
self.ndy = []
# centroid init - note that current implementation does NOT use random
# init centroids but rather will change these centroids to sampled patch
# values in the learning alogrithm (see STAM_classRepo)
def init_layers(self):
# random seed
np.random.seed(self.seed)
# for all layers
for l in range(self.num_layers):
# number of centroids to initialize
n_l = self.layers[l].num_cents
# random init
self.layers[l].centroids = np.random.randn(n_l,
self.layers[l].recField_size \
* self.layers[l].recField_size \
* self.layers[l].ch) * 0.1
# normalize sum to 1
self.layers[l].centroids -= np.amin(self.layers[l].centroids, axis = 1)[:,None]
self.layers[l].centroids /= np.sum(self.layers[l].centroids, axis = 1)[:,None]
def train(self, x, y, n, experiment_params, sort=False):
self.phase = n - 1
# reset d samples - for visualization
for l in range(self.num_layers):
self.layers[l].d_sample = 100
self.layers[l].delete_n = []
self.ndy.append(self.images_seen+1)
if sort:
sortd = np.argsort(y)
x = x[sortd]
y = y[sortd]
# make sure data is nonzero
if len(x) > 0:
# start progress bar
bar = progressbar.ProgressBar(maxval=len(x), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
# for all data points
for i in range(len(x)):
# reset d samples
if i == len(x) - 100:
for l in range(self.num_layers):
self.layers[l].d_sample = 100
# show image to hierarchy
self.images_seen = self.images_seen + 1
self.train_update(x[i], y[i])
# update progress bar
bar.update(i+1)
# finish progress bar
bar.finish()
if True:
self.save_visualizations(smart_dir(self.plot_directory + '/phase_{}/'.format(n-1)), n-1)
# update step for stam model training
def train_update(self, x, label):
# for all layers
x_i = x
for l in range(self.num_layers):
x_i = self.layers[l].forward(x_i, label, update = True)
# initialize hierarchy classfiication parameters for each
# evaluation data sample
def setTask(self, num_samples, K):
# classification parameters
self.Fz = [[] for i in range(num_samples)]
self.D = [[] for i in range(num_samples)]
self.D_sum = [[] for i in range(num_samples)]
self.cent_g = [[] for i in range(num_samples)]
self.Nl_seen = [0 for i in range(num_samples)]
# set rho
self.rho_task = self.rho+(1/K)
# get percent class informative centroids
def get_ci(self, phase, index = 0, vis=False):
# hold results here
score = [0 for l in range(self.num_layers)]
score_pc = [np.zeros((self.num_classes,)) for l in range(self.num_layers)]
score_multi = [0 for l in range(self.num_layers)]
# for each layer
for l in range(self.num_layers):
# for each centroid
for j in range(len(self.cent_g[index][l])):
# increase score if ci
if max(self.cent_g[index][l][j]) > self.rho_task: #and np.sort(self.cent_g[index][l][j])[-2] <= 0.5 * max(self.cent_g[index][l][j]):
score[l] += 1
if len(np.where(self.cent_g[index][l][j, :] > self.rho_task)) > 1:
score_multi[l] += 1
for k in range(self.num_classes):
if self.cent_g[index][l][j,k] > self.rho_task:
score_pc[l][k] += 1
# calculate percent ci at layer
score[l] /= len(self.cent_g[index][l])
score_pc[l] /= len(self.cent_g[index][l])
score_multi[l] /= len(self.cent_g[index][l])
return np.asarray(score), np.asarray(score_pc), np.asarray(score_multi)
# given labeled data, associate class information with stam centroids
def supervise(self, data, labels, phase, experiment_params, l_list = None, index = 0, image_ret=False, vis=True):
# process inputs
num_data = len(data)
# get centroids for classification
self.cents_ltm = []
self.class_ltm = []
for l in range(self.num_layers):
if self.layers[l].num_ltm > 0:
self.cents_ltm.append(self.layers[l].get_ltm_centroids())
self.class_ltm.append(self.layers[l].get_ltm_classes())
else:
self.cents_ltm.append(self.layers[l].get_stm_centroids())
self.class_ltm.append(self.layers[l].get_stm_classes())
# this is repeat of self.setTask which is kept for scenario
# where labeled data is NOT replayed
if self.Nl_seen[index] == 0:
self.D_sum[index] = [0 for l in range(len(l_list))]
self.D[index] = [[] for l in range(len(l_list))]
self.Fz[index] = [[] for l in range(len(l_list))]
self.cent_g[index] = [[] for l in range(len(l_list))]
self.Nl_seen[index] += num_data
# supervision per layer
for l_index in range(len(l_list)):
# get layer index from list of classification layers
l = l_list[l_index]
# get layer centroids
centroids = self.cents_ltm[l]
num_centroids = int(len(centroids))
# get value of D for task
# we use D to normalize distances wrt average centroid-patch distance
for i in range(num_data):
# get input to layer l
x_i = data[i]
for l_ in range(l):
x_i = self.layers[l_].forward(x_i, None, update = False)
# extract patches
patches = self.layers[l].extract_patches(x_i)
shape = patches.shape
xp = patches.reshape(self.layers[l].num_RFs, -1)
[xp, _, _] = self.layers[l].scale(xp)
# calculate and save distance
cp = centroids.reshape(num_centroids, -1)
d = smart_dist(xp, cp)
close_ind = np.argmin(d, axis = 1)
self.D_sum[index][l_index] += np.sum(d[range(shape[0]),close_ind]) / shape[0]
# final D calculation
self.D[index][l_index] = self.D_sum[index][l_index] / self.Nl_seen[index]
# this holds sum of exponential "score" for each centroid for each class
sum_fz_pool = np.zeros((num_centroids, self.num_classes))
# this code is relevant if we are not replaying labeled data
ncents_past = len(self.Fz[index][l_index])
if ncents_past > 0:
sum_fz_pool[:ncents_past,:] = self.Fz[index][l_index]
# for each image
for i in range(num_data):
# get input to layer l
x_i = data[i]
for l_ in range(l):
x_i = self.layers[l_].forward(x_i, None, update = False)
# extract patches
patches = self.layers[l].extract_patches(x_i)
shape = patches.shape
xp = patches.reshape(self.layers[l].num_RFs, -1)
[xp, _, _] = self.layers[l].scale(xp)
# calculate distance
cp = centroids.reshape(num_centroids, -1)
d = smart_dist(xp, cp)
# get distance of *matched* centroid of each patch
close_ind = np.argmin(d, axis = 1)
dist = (d[range(shape[0]),close_ind])
# get exponential distance and put into sparse array with same shape as
# summed exponential scores if we have two centroid matches in same
# image, only save best match
td = np.zeros(d.shape)
td[range(shape[0]),close_ind] = np.exp(-1*dist/self.D[index][l_index])
fz = np.amax(td, axis = 0)
# update sum of exponential "score" for each centroid for each class
sum_fz_pool[:, int(labels[i])] += fz
# save data scores and calculate g values as exponential "score" normalized
# accross classes (i.e. score of each centroid sums to 1)
self.Fz[index][l_index] = sum_fz_pool
self.cent_g[index][l_index] = np.copy(sum_fz_pool)
for j in range(num_centroids):
self.cent_g[index][l_index][j,:] = self.cent_g[index][l_index][j,:] \
/ (np.sum(self.cent_g[index][l_index][j,:]) + 1e-5)
# call classification function
def classify(self, data, phase, c_type, index, experiment_params):
if c_type == 'hierarchy-vote':
labels = self.topDownClassify(data, index, experiment_params, vis=True, phase=phase)
return labels
# stam primary classification function - hierarchical voting mechanism
def topDownClassify(self, data, index, experiment_params, vis=False, phase=None):
# process inputs and init return labels
num_data = len(data)
labels = -1 * np.ones((num_data,))
# for each data
for i in range(num_data):
# get NN centroid for each patch
close_ind = []
close_distances = []
for l in range(self.num_layers):
# get ltm centroids at layer
centroids = self.cents_ltm[l]
num_centroids = int(len(centroids))
# get input to layer
x_i = data[i]
for l_ in range(l): x_i = self.layers[l_].forward(x_i, None, update = False)
# extract patches
patches = self.layers[l].extract_patches(x_i)
xp = patches.reshape(self.layers[l].num_RFs, -1)
[xp, shifts, scales] = self.layers[l].scale(xp)
# calculate distance
cp = centroids.reshape(num_centroids, -1)
d = smart_dist(xp, cp)
close_ind.append(np.argmin(d, axis = 1))
close_distances.append(np.min(d, axis = 1))
# get highest layer containing at least one CIN centroid
l = self.num_layers-1
found_cin = False
while l > 0 and not found_cin:
# is there at least one CIN centroid?
if np.amax(self.cent_g[index][l][close_ind[l]]) >= self.rho_task:
found_cin = True
else:
l -= 1
l_cin = l
# classification
#
# vote of each class for all layers
wta_total = np.zeros((self.num_classes,)) + 1e-3
# for all cin layers
layer_range = range(l_cin+1)
percent_inform = []
for l in layer_range:
# vote of each class in this layer
wta = np.zeros((self.num_classes,))
# get max g value for matched centroids
votes_g = np.amax(self.cent_g[index][l][close_ind[l]], axis = 1)
# nullify vote of non-cin centroids
votes_g[votes_g < self.rho_task] = 0
a = np.where(votes_g > self.rho_task)
percent_inform.append(len(a[0])/ len(votes_g))
# calculate per class vote at this layer
votes = np.argmax(self.cent_g[index][l][close_ind[l]], axis = 1)
for k in range(self.num_classes):
wta[k] = np.sum(votes_g[votes == k])
# add to cumalitive total and normalize
wta /= len(close_ind[l])
wta_total += wta
# final step
labels[i] = np.argmax(wta_total)
return labels
def embed_centroid(self, X, layer, cents_ltm, experiment_params):
normalization = experiment_params
centroids = cents_ltm.reshape(len(cents_ltm), -1)
X_ = np.zeros((X.shape[0], len(cents_ltm)))
for i, x in enumerate(X):
# ltm centroids were determined in 'supervise' function
# get input to layer l
x_layer = layer.forward(x, None, update=False)
# extract patches
patches = layer.extract_patches(x_layer)
patches = patches.reshape(layer.num_RFs, -1)
patches, _, _ = layer.scale(patches)
# compute distance matrix
d_mat = smart_dist(patches, centroids)
# get indices of closest patch to each centroid and accumulate average
# closest-patch distances
close_cent_dists = np.min(d_mat, axis=0)
# Calculate normalization constant D_l
D_l = np.sum(d_mat) / d_mat.shape[0] * d_mat.shape[1]
if normalization:
X_[i,:] = np.exp((-1 * close_cent_dists) / D_l)
else:
X_[i,:] = close_cent_dists
return X_
def embed_patch(self, X, layer, cents_ltm, experiment_params, cnt=False):
method = experiment_params
# patch size and num_features calculations
p = layer.recField_size
n_cols = len(layer.extract_patches(X[0]))
if cnt:
X_ = np.zeros((X.shape[0], n_cols), dtype=str)
else:
X_ = np.zeros((X.shape[0], n_cols), dtype=int)
for i, x in enumerate(X):
# get input to layer l
x_layer = layer.forward(x, None, update=False)
# extract patches
patches = layer.extract_patches(x_layer)
patches = patches.reshape(layer.num_RFs, -1)
patches, _, _ = layer.scale(patches)
d_mat = smart_dist(patches, cents_ltm)
# get indices of closest patch to each centroid and accumulate average
# closest-patch distances
close_patch_inds = np.argmin(d_mat, axis=1)
if cnt:
counts = np.bincount(close_patch_inds, minlength=max(close_patch_inds))
temp_i = []
for ind, count in enumerate(counts):
if count == 0:
continue
else:
for c in range(count):
temp_i.append("{}.{}".format(ind, c))
X_[i] = np.array(temp_i)
else:
X_[i] = close_patch_inds
print("Got Jaccard Embedding")
return X_
def jaccard(self, x, y):
x = set(x)
y = set(y)
val = len(x.intersection(y)) / len(x.union(y))
if val == None:
return 0
else:
return val
# cluster
def cluster(self, X, Y, phase_num, task_num, dataset, num_classes,
experiment_params, cluster_method='kmeans',
accuracy_method='purity', k_scale=2, eval_layers=[]):
# returns total and per-class accuracy... (float, 1 by k numpy[float])
print('Clustering Task Started...')
embedding_mode, mode_name = experiment_params
print("Embedding Mode: ", embedding_mode)
print("Experiment Name: ", mode_name)
print("Cluster Method: ", cluster_method)
similarity_matrix = np.zeros(10)
if embedding_mode == 0:
X_1 = self.embed_centroid(X, self.layers[0], self.cents_ltm[0], True)
X_2 = self.embed_centroid(X, self.layers[1], self.cents_ltm[1], True)
X_3 = self.embed_centroid(X, self.layers[2], self.cents_ltm[2], True)
embeddings = np.concatenate((X_1, np.concatenate((X_2, X_3), axis=1)), axis=1)
elif embedding_mode == 1:
X_3 = self.embed_patch(X, self.layers[2], self.cents_ltm[2], None, False)
embeddings = X_3
similarity_matrix = pairwise_distances(embeddings, embeddings, metric=self.jaccard)
k = np.unique(Y).shape[0] * k_scale
accu_total = 0
accu_perclass = np.zeros(num_classes, dtype=np.float64)
# Clustering Predictions
if cluster_method == 'kmeans':
cluster_preds = KMeans(n_clusters=k, init='k-means++', n_init=10,
max_iter=300, verbose=0).fit_predict(embeddings)
elif cluster_method == 'spectral':
try:
cluster_preds = SpectralClustering(n_clusters=k, affinity='precomputed', n_init=10,
assign_labels='discretize').fit_predict(similarity_matrix)
except Exception as e:
cluster_preds = np.zeros(len(similarity_matrix))
# Accuracy of Clustering
if accuracy_method == 'purity':
size = k
print("Size ", size)
cluster_counts = np.zeros((size, int(k/k_scale)))
cluster_sizes = np.zeros(size)
correct = np.zeros(size)
total = np.zeros(size)
cluster_indicies = [[] for i in range(num_classes)]
for i in range(size):
cluster_i = np.argwhere(cluster_preds == i).flatten() # indexes of cluster i
cluster_sizes[i] = len(cluster_i)
cluster_counts[i,:] = np.bincount(Y[cluster_i], minlength=int(k/k_scale))
# compute accuracy
cluster_class = np.argmax(cluster_counts[i, :])
correct[i] = cluster_counts[i, :].max()
total[i] = cluster_counts[i, :].sum()
cluster_indicies[cluster_class].append(i)
for j in range(num_classes):
if sum(total[cluster_indicies[j]]) > 0:
accu_perclass[j] = sum(correct[cluster_indicies[j]]) \
/ sum(total[cluster_indicies[j]]) * 100
else:
accu_perclass[j] = 0
accu_total = sum(correct) / sum(total) * 100
return accu_total, accu_perclass
# save STAM visualizations
def save_visualizations(self, save_dir, phase):
# Cent count
plt.figure(figsize=(6,3))
for l in range(self.num_layers):
y = np.asarray(self.layers[l].ltm_history)
x = np.arange(len(y))
plt.plot(x, y, label = 'layer ' + str(l+1))
plt.ylabel('LTM Count', fontsize=12)
plt.xlabel('Unlabeled Images Seen', fontsize=12)
plt.title('LTM Centroid Count History', fontsize=14)
plt.legend(loc='upper left', prop={'size': 8})
plt.grid()
plt.tight_layout()
plt.savefig(smart_dir(save_dir+'cent_plots')+'ltm_count.png', format='png', dpi=200)
plt.close()
for l in range(self.num_layers):
np.savetxt(smart_dir(save_dir+'ltm_csvs') + 'layer-' + str(l+1) + '_ci.csv',
self.layers[l].ltm_history, delimiter=',')
if not self.nd_fixed:
# confidence interval
plt.figure(figsize=(6,3))
p = np.asarray([0, 25, 50, 75, 90, 100])
for l in range(self.num_layers):
dd = np.asarray(self.layers[l].filo_d.getValues())
y = np.percentile(dd, p)
x = np.arange(len(dd)) / len(dd)
plt.plot(x, dd, label = 'layer ' + str(l+1))
plt.plot(p/100., y, 'ro')
plt.axhline(y=self.layers[l].cut_d, color='r', linestyle='--')
plt.xticks(p/100., map(str, p))
plt.ylabel('Distance', fontsize=12)
plt.xlabel('Percentile', fontsize=12)
plt.title('Distribution of Closest Matching Distance', fontsize=14)
plt.legend(loc='lower right', prop={'size': 8})
plt.grid()
plt.tight_layout()
plt.savefig(smart_dir(save_dir+'cent_plots')+'d-thresh.png', format='png', dpi=200)
plt.grid()
plt.close()
# D threshold
plt.figure(figsize=(6,3))
for l in range(self.num_layers):
y = np.asarray(self.layers[l].dthresh_history)
x = np.arange(len(y))
plt.plot(x, y, label = 'layer ' + str(l+1))
plt.ylabel('ND Distance', fontsize=12)
plt.xlabel('Unlabeled Images Seen', fontsize=12)
plt.gca().set_ylim(bottom=0)
plt.title('Novelty Detection Threshold History', fontsize=14)
plt.legend(loc='upper left', prop={'size': 8})
plt.grid()
plt.tight_layout()
plt.savefig(smart_dir(save_dir+'cent_plots')+'d-thresh-history.png',
format='png', dpi=200)
plt.close()
#self.save_reconstructions(save_dir)
#self.detailed_classification_plots(save_dir)
def detailed_classification_plots(self):
index = 0
labels = -1 * np.ones((len(self.sample_images),))
for i in range(len(self.sample_images)):
close_ind = []
for l in range(self.num_layers):
# get ltm centroids at layer
centroids = self.cents_ltm[l]
num_centroids = int(len(centroids))
# get input to layer
x_i = self.sample_images[i]
for l_ in range(l): x_i = self.layers[l_].forward(x_i, None, update = False)
# extract patches
patches = self.layers[l].extract_patches(x_i)
xp = patches.reshape(self.layers[l].num_RFs, -1)
[xp, shifts, scales] = self.layers[l].scale(xp)
# calculate distance
cp = centroids.reshape(num_centroids, -1)
d = smart_dist(xp, cp)
close_ind.append(np.argmin(d, axis = 1))
# get highest layer containing at least one CIN centroid
l = self.num_layers-1
found_cin = False
while l > 0 and not found_cin:
# is there at least one CIN centroid?
if np.amax(self.cent_g[index][l][close_ind[l]]) >= self.rho_task:
found_cin = True
else:
l -= 1
l_cin = l
# classification
#
# vote of each class for all layers
wta_total = np.zeros((self.num_classes,)) + 1e-3
# for all cin layers
layer_range = range(l_cin+1)
percent_inform = []
layer_wta = []
for l in layer_range:
# vote of each class in this layer
wta = np.zeros((self.num_classes,))
# get max g value for matched centroids
votes_g = np.amax(self.cent_g[index][l][close_ind[l]], axis = 1)
# nullify vote of non-cin centroids
votes_g[votes_g < self.rho_task] = 0
a = np.where(votes_g > self.rho_task)
percent_inform.append(len(a[0])/ len(votes_g))
# calculate per class vote at this layer
votes = np.argmax(self.cent_g[index][l][close_ind[l]], axis = 1)
for k in range(self.num_classes):
wta[k] = np.sum(votes_g[votes == k])
# add to cumalitive total
wta /= len(close_ind[l])
layer_wta.append(wta)
wta_total += wta
# final step
labels[i] = np.argmax(wta_total)
# Visualizing Patches and Centroids
for l in range(self.num_layers):
nrows = ncols = int(np.sqrt(self.layers[l].num_RFs) / 2)
rf_size = self.layers[l].recField_size
plt.close()
fig = plt.figure(figsize=(9,11))
# First 3
out_im, out_im_2 = self.layers[l].create_reconstruction(self.sample_images[i], self.sample_labels[i])
ax1 = fig.add_axes([0.1, 0.75, 0.2, 0.2])
ax2 = fig.add_axes([0.35, 0.75, 0.2, 0.2])
ax3 = fig.add_axes([0.63, 0.83, 0.30, 0.12])
ax1.imshow(out_im_2.squeeze())
ax1.set_title('Patches')
ax1.axis('off')
ax2.imshow(out_im.squeeze())
ax2.set_title('Matched Centroids')
ax2.axis('off')
ax3.bar(np.arange(self.num_classes), layer_wta[l])
ax3.set_xticks(np.arange(self.num_classes))
ax3.set_xticklabels(self.class_labels, rotation='vertical')
ax2.tick_params(axis='y', which='major', labelsize=10)
ax3.set_title('Layer {} Vote (1/K + Gamma): {}'.format(l, self.rho_task))
ax3.axis('on')
patches = self.layers[l].extract_patches(x_i)
xp_2 = patches.reshape(self.layers[l].num_RFs, -1)
xp_2 = self.layers[l].scale(xp_2)[0].reshape(self.layers[l].num_RFs, -1)
centroids = self.cents_ltm[l]
num_centroids = int(len(centroids))
cp = centroids.reshape(num_centroids, -1)
for p in range(4):
for j in range(5):
ax1 = fig.add_axes([0.08 + .17*j, 0.57 - .15*p, 0.05, 0.05])
ax2 = fig.add_axes([0.16 + .17*j, 0.57 - .15*p, 0.05, 0.05])
ax3 = fig.add_axes([0.08 + .17*j, 0.65 - .15*p, .13, .05])
ax1.set_title('Patch')
ax1.imshow(xp_2[int((p*ncols*2) + (2*j))].reshape(rf_size, rf_size, self.num_c).squeeze())
ax1.axis('off')
if np.max(self.cent_g[index][l][close_ind[l][int((p*ncols*2) + (j*2))]]) > self.rho_task:
ax2.set_title('Centroid', color='g')
else:
ax2.set_title('Centroid', color='r')
ax2.imshow(cp[close_ind[l][int((p*ncols*2) + (j*2))]].reshape(rf_size, rf_size, self.num_c).squeeze())
ax2.axis('off')
vote = np.argmax(self.cent_g[index][l][close_ind[l][int((p*ncols*2) + (j*2))]])
ax3.set_title('Vote: {}'.format(self.class_labels[vote]))
ax3.bar(np.arange(self.num_classes), self.cent_g[index][l][close_ind[l][int((p*ncols*2) + (j*2))]])
ax3.axes.get_xaxis().set_ticks([])
ax3.tick_params(axis='y', which='major', labelsize=6)
#ax3.axis('off')
fig.suptitle('True Class: {} Predicted Class: {} Layer{}'.format(self.class_labels[int(self.sample_labels[i])], self.class_labels[int(labels[i])], l))
plt.savefig(smart_dir(self.plot_directory + '/phase_{}/ex_{}/'.format(self.phase, i)) + 'layer_{}_vote.png'.format(l))
plt.close()
return labels
def save_reconstructions(self, save_dir):
for i in range(len(self.sample_images)):
for layer in range(self.num_layers):
out_im, out_im_2 = self.layers[layer].create_reconstruction(self.sample_images[i], self.sample_labels[i])
plt.figure(figsize=(6,3))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(out_im.squeeze())
ax2.imshow(out_im_2.squeeze())
plt.title('Class : {}'.format(self.sample_labels[i]))
plt.savefig(smart_dir(save_dir + '/reconstructions/layer_{}'.format(layer)) + 'image_{}'.format(i))
plt.close()
# scale image based on normalization
def scaleImage(self, im_show):
im_show = np.squeeze((im_show - self.im_scale[0]) / (self.im_scale[1] - self.im_scale[0]))
im_show[im_show>1] = 1
im_show[im_show<0] = 0
return im_show
def pick_sample_images(self, x_test, y_test, skip=20):
self.sample_images = []
self.sample_labels = []
self.skip = skip
k = np.unique(y_test)
for i, im in enumerate(x_test):
if i % self.skip == 0:
self.sample_images.append(x_test[i])
self.sample_labels.append(y_test[i])
plt.imshow(im.reshape(self.im_size, self.im_size, self.num_c).squeeze())
plt.title('Class: {}'.format(y_test[i]))
plt.savefig(smart_dir(self.plot_directory + '/sample_imgs/') + 'sample_image_{}'.format(int(i / self.skip)))
plt.close()
def stm_eviction_plot(self):
plt.close()
for l in range(self.num_layers):
plt.figure(figsize=(40,6))
data = self.layers[l].eviction_tracker
x = np.arange(len(data))
plt.boxplot(data, positions=x, showfliers=False)
plt.savefig(smart_dir(self.plot_directory) + '/eviction_layer_{}.png'.format(l))
plt.close()
| en | 0.728739 | # declare properties # extract scenario configs # extract stam configs # directory paths # initialize variables # Informative centroid info storage # build stam hierarchy # stam init # classification parameters # visualize task boundaries # centroid init - note that current implementation does NOT use random # init centroids but rather will change these centroids to sampled patch # values in the learning alogrithm (see STAM_classRepo) # random seed # for all layers # number of centroids to initialize # random init # normalize sum to 1 # reset d samples - for visualization # make sure data is nonzero # start progress bar # for all data points # reset d samples # show image to hierarchy # update progress bar # finish progress bar # update step for stam model training # for all layers # initialize hierarchy classfiication parameters for each # evaluation data sample # classification parameters # set rho # get percent class informative centroids # hold results here # for each layer # for each centroid # increase score if ci #and np.sort(self.cent_g[index][l][j])[-2] <= 0.5 * max(self.cent_g[index][l][j]): # calculate percent ci at layer # given labeled data, associate class information with stam centroids # process inputs # get centroids for classification # this is repeat of self.setTask which is kept for scenario # where labeled data is NOT replayed # supervision per layer # get layer index from list of classification layers # get layer centroids # get value of D for task # we use D to normalize distances wrt average centroid-patch distance # get input to layer l # extract patches # calculate and save distance # final D calculation # this holds sum of exponential "score" for each centroid for each class # this code is relevant if we are not replaying labeled data # for each image # get input to layer l # extract patches # calculate distance # get distance of *matched* centroid of each patch # get exponential distance and put into sparse array with same shape as # summed exponential scores if we have two centroid matches in same # image, only save best match # update sum of exponential "score" for each centroid for each class # save data scores and calculate g values as exponential "score" normalized # accross classes (i.e. score of each centroid sums to 1) # call classification function # stam primary classification function - hierarchical voting mechanism # process inputs and init return labels # for each data # get NN centroid for each patch # get ltm centroids at layer # get input to layer # extract patches # calculate distance # get highest layer containing at least one CIN centroid # is there at least one CIN centroid? # classification # # vote of each class for all layers # for all cin layers # vote of each class in this layer # get max g value for matched centroids # nullify vote of non-cin centroids # calculate per class vote at this layer # add to cumalitive total and normalize # final step # ltm centroids were determined in 'supervise' function # get input to layer l # extract patches # compute distance matrix # get indices of closest patch to each centroid and accumulate average # closest-patch distances # Calculate normalization constant D_l # patch size and num_features calculations # get input to layer l # extract patches # get indices of closest patch to each centroid and accumulate average # closest-patch distances # cluster # returns total and per-class accuracy... (float, 1 by k numpy[float]) # Clustering Predictions # Accuracy of Clustering # indexes of cluster i # compute accuracy # save STAM visualizations # Cent count # confidence interval # D threshold #self.save_reconstructions(save_dir) #self.detailed_classification_plots(save_dir) # get ltm centroids at layer # get input to layer # extract patches # calculate distance # get highest layer containing at least one CIN centroid # is there at least one CIN centroid? # classification # # vote of each class for all layers # for all cin layers # vote of each class in this layer # get max g value for matched centroids # nullify vote of non-cin centroids # calculate per class vote at this layer # add to cumalitive total # final step # Visualizing Patches and Centroids # First 3 #ax3.axis('off') # scale image based on normalization | 2.015359 | 2 |
object_detection_cam_opencv.py | andrinethomas/Tensorflow-vehicle-detection-using-camera-and-db-accessing-mysql | 1 | 6616345 |
import cv2
import numpy as np
import os
import tensorflow as tf
from utils import label_map_util
from utils import visualization_utils as vis_util
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
cap = cv2.VideoCapture('rtsp://admin:kgisl@123@10.100.10.192/doc/page/preview.asp')
#cap = cv2.VideoCapture('http://192.168.43.83:8080/video')
#cap.set(cv2.CAP_PROP_FPS,300)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
ret = True
while (ret):
ret, image_np = cap.read()
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow('image',cv2.resize(image_np,(1280,960)))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows
cap.release
|
import cv2
import numpy as np
import os
import tensorflow as tf
from utils import label_map_util
from utils import visualization_utils as vis_util
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
cap = cv2.VideoCapture('rtsp://admin:kgisl@123@10.100.10.192/doc/page/preview.asp')
#cap = cv2.VideoCapture('http://192.168.43.83:8080/video')
#cap.set(cv2.CAP_PROP_FPS,300)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
ret = True
while (ret):
ret, image_np = cap.read()
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow('image',cv2.resize(image_np,(1280,960)))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows
cap.release
| en | 0.257073 | #cap = cv2.VideoCapture('http://192.168.43.83:8080/video') #cap.set(cv2.CAP_PROP_FPS,300) | 2.330671 | 2 |
modeling.py | lyuchenyang/Document-level-Sentiment-Analysis-with-User-and-Product-Contex | 11 | 6616346 | import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.activations import gelu, gelu_new, swish
from transformers.configuration_bert import BertConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_utils import PreTrainedModel, prune_linear_layer
from transformers import BertPreTrainedModel, BertModel, InputExample, RobertaModel, BertForSequenceClassification, RobertaForSequenceClassification, RobertaConfig, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_roberta import RobertaClassificationHead
from transformers.modeling_bert import BertOnlyMLMHead
class SecondPretrainedBert(BertPreTrainedModel):
def __init__(self, config, num_embeddings):
super().__init__(config)
self.config=config
self.bert = BertForSequenceClassification(config)
self.embedding_matrix = nn.Embedding(num_embeddings, config.hidden_size)
def forward(self, inputs, user_product):
outputs = self.bert(**inputs)
loss, last_hidden_states = outputs[0], outputs[2][self.config.num_hidden_layers-1]
all_cls = last_hidden_states[:, 0, :]
_all_cls = torch.cat([all_cls.detach().clone(), all_cls.detach().clone()], dim=0)
up_embeddings = self.embedding_matrix(user_product)
self.embedding_matrix.weight.index_copy(0, user_product.view(-1),
up_embeddings.view(-1, self.config.hidden_size).detach() + _all_cls)
return loss
class IncrementalContextBert(BertPreTrainedModel):
def __init__(self, config, num_embeddings, up_vocab):
super().__init__(config)
self.bert = BertModel(config)
if config.do_shrink:
self.embedding = nn.Embedding(num_embeddings, config.inner_size)
self.to_hidden_size = nn.Linear(config.inner_size, config.hidden_size)
self.to_inner_size = nn.Linear(config.hidden_size, config.inner_size)
else:
self.embedding = nn.Embedding(num_embeddings, config.hidden_size)
self.multi_head_attention = torch.nn.MultiheadAttention(config.hidden_size, config.attention_heads)
# Linear layers used to transform cls token, user and product embeddings
self.linear_t = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_u = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_p = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_update = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_f = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_g = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
# Activation functions
self.sigmoid = nn.Sigmoid()
self.gelu = gelu
self.relu = nn.ReLU()
self.celu = nn.CELU()
self.tanh = nn.Tanh()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
# Classification layer
self.classifier = nn.Linear(in_features=config.hidden_size, out_features=config.num_labels)
# An empirical initializad number, still needed to be explored
self.alpha = nn.Parameter(torch.tensor(-10, dtype=torch.float), requires_grad=True)
self.up_vocab = up_vocab
self.init_weights()
def forward(self, inputs, user_product, up_indices=None, up_embeddings=None):
if up_indices is not None and up_embeddings is not None:
p_up_embeddings = self.embedding(up_indices)
update_embeddings = p_up_embeddings + self.sigmoid(self.alpha)*up_embeddings
with torch.no_grad():
self.embedding.weight.index_copy(0, up_indices, update_embeddings)
outputs = self.bert(**inputs)
last_hidden_states, cls_hidden_states = outputs[0].transpose(0, 1), outputs[1]
up_embeddings = self.embedding(user_product)
if self.config.do_shrink:
up_embeddings = self.to_hidden_size(up_embeddings)
att_up = self.multi_head_attention(up_embeddings.transpose(0, 1), last_hidden_states, last_hidden_states)
att_u, att_p = att_up[0][0, :, :], att_up[0][1, :, :]
z_cls = self.sigmoid(self.linear_t(cls_hidden_states))
z_att_u, z_att_p = self.sigmoid(self.linear_u(att_u)), self.sigmoid(self.linear_p(att_p))
z_u = self.sigmoid(z_cls + z_att_u)
z_p = self.sigmoid(z_cls + z_att_p)
cls_input = cls_hidden_states + z_u * att_u + z_p * att_p
logits = self.classifier(cls_input)
# logits = self.softmax(logits)
new_up_embeddings = torch.cat([z_att_u, z_att_p], dim=0)
if self.config.do_shrink:
new_up_embeddings = self.to_inner_size(new_up_embeddings)
return logits, user_product.view(-1).detach(), new_up_embeddings
class IncrementalContextRoberta(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config, num_embeddings, up_vocab):
super().__init__(config)
self.roberta = RobertaModel(config)
if config.do_shrink:
self.embedding = nn.Embedding(num_embeddings, config.inner_size)
self.to_hidden_size = nn.Linear(config.inner_size, config.hidden_size)
self.to_inner_size = nn.Linear(config.hidden_size, config.inner_size)
else:
self.embedding = nn.Embedding(num_embeddings, config.hidden_size)
self.multi_head_attention = torch.nn.MultiheadAttention(config.hidden_size, config.attention_heads)
# Linear layers used to transform cls token, user and product embeddings
self.linear_t = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_u = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_p = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_update = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_f = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_g = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
# Activation functions
self.sigmoid = nn.Sigmoid()
self.gelu = gelu
self.relu = nn.ReLU()
self.celu = nn.CELU()
self.tanh = nn.Tanh()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
# Classification layer
self.classifier = nn.Linear(in_features=config.hidden_size, out_features=config.num_labels)
# An empirical initializad number, still needed to be explored
self.alpha = nn.Parameter(torch.tensor(-10, dtype=torch.float), requires_grad=True)
self.up_vocab = up_vocab
self.init_weights()
def forward(self, inputs, user_product, up_indices=None, up_embeddings=None):
if up_indices is not None and up_embeddings is not None:
p_up_embeddings = self.embedding(up_indices)
update_embeddings = p_up_embeddings + self.sigmoid(self.alpha) * up_embeddings
with torch.no_grad():
self.embedding.weight.index_copy(0, up_indices, update_embeddings)
outputs = self.roberta(**inputs)
last_hidden_states, cls_hidden_states = outputs[0].transpose(0, 1), outputs[1]
up_embeddings = self.embedding(user_product)
if self.config.do_shrink:
up_embeddings = self.to_hidden_size(up_embeddings)
att_up = self.multi_head_attention(up_embeddings.transpose(0, 1), last_hidden_states, last_hidden_states)
att_u, att_p = att_up[0][0, :, :], att_up[0][1, :, :]
z_cls = self.sigmoid(self.linear_t(cls_hidden_states))
z_att_u, z_att_p = self.sigmoid(self.linear_u(att_u)), self.sigmoid(self.linear_p(att_p))
z_u = self.sigmoid(z_cls + z_att_u)
z_p = self.sigmoid(z_cls + z_att_p)
cls_input = cls_hidden_states + z_u * att_u + z_p * att_p
logits = self.classifier(cls_input)
# logits = self.softmax(logits)
new_up_embeddings = torch.cat([z_att_u, z_att_p], dim=0)
if self.config.do_shrink:
new_up_embeddings = self.to_inner_size(new_up_embeddings)
return logits, user_product.view(-1).detach(), new_up_embeddings
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, list):
self.alpha = torch.tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target.view(-1, 1))
logpt = logpt.view(-1)
pt = logpt
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * at
loss = -1 * (1 - pt)**self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
| import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.activations import gelu, gelu_new, swish
from transformers.configuration_bert import BertConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_utils import PreTrainedModel, prune_linear_layer
from transformers import BertPreTrainedModel, BertModel, InputExample, RobertaModel, BertForSequenceClassification, RobertaForSequenceClassification, RobertaConfig, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_roberta import RobertaClassificationHead
from transformers.modeling_bert import BertOnlyMLMHead
class SecondPretrainedBert(BertPreTrainedModel):
def __init__(self, config, num_embeddings):
super().__init__(config)
self.config=config
self.bert = BertForSequenceClassification(config)
self.embedding_matrix = nn.Embedding(num_embeddings, config.hidden_size)
def forward(self, inputs, user_product):
outputs = self.bert(**inputs)
loss, last_hidden_states = outputs[0], outputs[2][self.config.num_hidden_layers-1]
all_cls = last_hidden_states[:, 0, :]
_all_cls = torch.cat([all_cls.detach().clone(), all_cls.detach().clone()], dim=0)
up_embeddings = self.embedding_matrix(user_product)
self.embedding_matrix.weight.index_copy(0, user_product.view(-1),
up_embeddings.view(-1, self.config.hidden_size).detach() + _all_cls)
return loss
class IncrementalContextBert(BertPreTrainedModel):
def __init__(self, config, num_embeddings, up_vocab):
super().__init__(config)
self.bert = BertModel(config)
if config.do_shrink:
self.embedding = nn.Embedding(num_embeddings, config.inner_size)
self.to_hidden_size = nn.Linear(config.inner_size, config.hidden_size)
self.to_inner_size = nn.Linear(config.hidden_size, config.inner_size)
else:
self.embedding = nn.Embedding(num_embeddings, config.hidden_size)
self.multi_head_attention = torch.nn.MultiheadAttention(config.hidden_size, config.attention_heads)
# Linear layers used to transform cls token, user and product embeddings
self.linear_t = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_u = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_p = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_update = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_f = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_g = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
# Activation functions
self.sigmoid = nn.Sigmoid()
self.gelu = gelu
self.relu = nn.ReLU()
self.celu = nn.CELU()
self.tanh = nn.Tanh()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
# Classification layer
self.classifier = nn.Linear(in_features=config.hidden_size, out_features=config.num_labels)
# An empirical initializad number, still needed to be explored
self.alpha = nn.Parameter(torch.tensor(-10, dtype=torch.float), requires_grad=True)
self.up_vocab = up_vocab
self.init_weights()
def forward(self, inputs, user_product, up_indices=None, up_embeddings=None):
if up_indices is not None and up_embeddings is not None:
p_up_embeddings = self.embedding(up_indices)
update_embeddings = p_up_embeddings + self.sigmoid(self.alpha)*up_embeddings
with torch.no_grad():
self.embedding.weight.index_copy(0, up_indices, update_embeddings)
outputs = self.bert(**inputs)
last_hidden_states, cls_hidden_states = outputs[0].transpose(0, 1), outputs[1]
up_embeddings = self.embedding(user_product)
if self.config.do_shrink:
up_embeddings = self.to_hidden_size(up_embeddings)
att_up = self.multi_head_attention(up_embeddings.transpose(0, 1), last_hidden_states, last_hidden_states)
att_u, att_p = att_up[0][0, :, :], att_up[0][1, :, :]
z_cls = self.sigmoid(self.linear_t(cls_hidden_states))
z_att_u, z_att_p = self.sigmoid(self.linear_u(att_u)), self.sigmoid(self.linear_p(att_p))
z_u = self.sigmoid(z_cls + z_att_u)
z_p = self.sigmoid(z_cls + z_att_p)
cls_input = cls_hidden_states + z_u * att_u + z_p * att_p
logits = self.classifier(cls_input)
# logits = self.softmax(logits)
new_up_embeddings = torch.cat([z_att_u, z_att_p], dim=0)
if self.config.do_shrink:
new_up_embeddings = self.to_inner_size(new_up_embeddings)
return logits, user_product.view(-1).detach(), new_up_embeddings
class IncrementalContextRoberta(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config, num_embeddings, up_vocab):
super().__init__(config)
self.roberta = RobertaModel(config)
if config.do_shrink:
self.embedding = nn.Embedding(num_embeddings, config.inner_size)
self.to_hidden_size = nn.Linear(config.inner_size, config.hidden_size)
self.to_inner_size = nn.Linear(config.hidden_size, config.inner_size)
else:
self.embedding = nn.Embedding(num_embeddings, config.hidden_size)
self.multi_head_attention = torch.nn.MultiheadAttention(config.hidden_size, config.attention_heads)
# Linear layers used to transform cls token, user and product embeddings
self.linear_t = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_u = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_p = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_update = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_f = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear_g = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
# Activation functions
self.sigmoid = nn.Sigmoid()
self.gelu = gelu
self.relu = nn.ReLU()
self.celu = nn.CELU()
self.tanh = nn.Tanh()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
# Classification layer
self.classifier = nn.Linear(in_features=config.hidden_size, out_features=config.num_labels)
# An empirical initializad number, still needed to be explored
self.alpha = nn.Parameter(torch.tensor(-10, dtype=torch.float), requires_grad=True)
self.up_vocab = up_vocab
self.init_weights()
def forward(self, inputs, user_product, up_indices=None, up_embeddings=None):
if up_indices is not None and up_embeddings is not None:
p_up_embeddings = self.embedding(up_indices)
update_embeddings = p_up_embeddings + self.sigmoid(self.alpha) * up_embeddings
with torch.no_grad():
self.embedding.weight.index_copy(0, up_indices, update_embeddings)
outputs = self.roberta(**inputs)
last_hidden_states, cls_hidden_states = outputs[0].transpose(0, 1), outputs[1]
up_embeddings = self.embedding(user_product)
if self.config.do_shrink:
up_embeddings = self.to_hidden_size(up_embeddings)
att_up = self.multi_head_attention(up_embeddings.transpose(0, 1), last_hidden_states, last_hidden_states)
att_u, att_p = att_up[0][0, :, :], att_up[0][1, :, :]
z_cls = self.sigmoid(self.linear_t(cls_hidden_states))
z_att_u, z_att_p = self.sigmoid(self.linear_u(att_u)), self.sigmoid(self.linear_p(att_p))
z_u = self.sigmoid(z_cls + z_att_u)
z_p = self.sigmoid(z_cls + z_att_p)
cls_input = cls_hidden_states + z_u * att_u + z_p * att_p
logits = self.classifier(cls_input)
# logits = self.softmax(logits)
new_up_embeddings = torch.cat([z_att_u, z_att_p], dim=0)
if self.config.do_shrink:
new_up_embeddings = self.to_inner_size(new_up_embeddings)
return logits, user_product.view(-1).detach(), new_up_embeddings
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, list):
self.alpha = torch.tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target.view(-1, 1))
logpt = logpt.view(-1)
pt = logpt
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * at
loss = -1 * (1 - pt)**self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
| en | 0.569251 | # Linear layers used to transform cls token, user and product embeddings # Activation functions # Classification layer # An empirical initializad number, still needed to be explored # logits = self.softmax(logits) # Linear layers used to transform cls token, user and product embeddings # Activation functions # Classification layer # An empirical initializad number, still needed to be explored # logits = self.softmax(logits) | 2.151242 | 2 |
func.py | deuteronomy-works/TimerStation_End | 0 | 6616347 | <reponame>deuteronomy-works/TimerStation_End<filename>func.py
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 10:10:11 2019
@author: Ampofo
"""
import os
import threading
from PyQt5.QtCore import QObject, pyqtSlot as Slot, pyqtSignal as Signal
class SwitchKeys(QObject):
def __init__(self):
QObject.__init__(self)
self.bin_folder = "bin"
self.status = ""
@Slot()
def turnOn(self):
on_thread = threading.Thread(target=self._turnOn)
on_thread.daemon = True
on_thread.start()
def _turnOn(self):
# turn on
self.status = "turning on"
print(self.status)
cmd = self.bin_folder + "\\WinKill.exe"
os.system(cmd)
self.status = "turned on"
@Slot()
def turnOff(self):
pass
def _turnOff(self):
pass
| # -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 10:10:11 2019
@author: Ampofo
"""
import os
import threading
from PyQt5.QtCore import QObject, pyqtSlot as Slot, pyqtSignal as Signal
class SwitchKeys(QObject):
def __init__(self):
QObject.__init__(self)
self.bin_folder = "bin"
self.status = ""
@Slot()
def turnOn(self):
on_thread = threading.Thread(target=self._turnOn)
on_thread.daemon = True
on_thread.start()
def _turnOn(self):
# turn on
self.status = "turning on"
print(self.status)
cmd = self.bin_folder + "\\WinKill.exe"
os.system(cmd)
self.status = "turned on"
@Slot()
def turnOff(self):
pass
def _turnOff(self):
pass | en | 0.737167 | # -*- coding: utf-8 -*- Created on Mon Jul 22 10:10:11 2019 @author: Ampofo # turn on | 2.503521 | 3 |
bootcamp/accounts/views.py | elbakouchi/bootcamp | 0 | 6616348 | from django.http import JsonResponse
from allauth.account.views import SignupView, _ajax_response, LogoutView
from django.shortcuts import redirect
from bootcamp.articles.models import Article
from bootcamp.demand.models import Demand
from bootcamp.users.views import UserUpdateView, UserDetailView
from allauth.account.forms import UserForm, SignupForm
from django.forms.fields import BooleanField
from allauth.account.views import SignupView
class CustomSignupForm(SignupForm):
terms = BooleanField()
class CustomSignupView(SignupView):
form_class = CustomSignupForm
class ProfileView(UserDetailView):
def get_context_data(self, **kwargs):
context = super(ProfileView, self).get_context_data()
user_form = UserForm()
demands = Demand.objects.filter(user=self.object.pk).order_by('pk', 'timestamp')
revisions = Article.objects.filter(user=self.object.pk).order_by('pk', 'timestamp')
if revisions.count():
context["last_revision"] = revisions.last().content
context["demands"] = demands
context["revisions"] = revisions
context["user_form"] = user_form
return context
class AjaxLogoutView(LogoutView):
def get(self, *args, **kwargs):
url = self.get_redirect_url()
if self.request.user.is_authenticated:
self.logout()
return JsonResponse({'url': url})
| from django.http import JsonResponse
from allauth.account.views import SignupView, _ajax_response, LogoutView
from django.shortcuts import redirect
from bootcamp.articles.models import Article
from bootcamp.demand.models import Demand
from bootcamp.users.views import UserUpdateView, UserDetailView
from allauth.account.forms import UserForm, SignupForm
from django.forms.fields import BooleanField
from allauth.account.views import SignupView
class CustomSignupForm(SignupForm):
terms = BooleanField()
class CustomSignupView(SignupView):
form_class = CustomSignupForm
class ProfileView(UserDetailView):
def get_context_data(self, **kwargs):
context = super(ProfileView, self).get_context_data()
user_form = UserForm()
demands = Demand.objects.filter(user=self.object.pk).order_by('pk', 'timestamp')
revisions = Article.objects.filter(user=self.object.pk).order_by('pk', 'timestamp')
if revisions.count():
context["last_revision"] = revisions.last().content
context["demands"] = demands
context["revisions"] = revisions
context["user_form"] = user_form
return context
class AjaxLogoutView(LogoutView):
def get(self, *args, **kwargs):
url = self.get_redirect_url()
if self.request.user.is_authenticated:
self.logout()
return JsonResponse({'url': url})
| none | 1 | 1.911138 | 2 | |
oscarapi/views/admin/user.py | ski-family/django-oscar-api | 311 | 6616349 | <reponame>ski-family/django-oscar-api
from django.contrib.auth import get_user_model
from oscarapi.utils.loading import get_api_class
from rest_framework import generics
APIAdminPermission = get_api_class("permissions", "APIAdminPermission")
AdminUserSerializer = get_api_class("serializers.admin.user", "AdminUserSerializer")
User = get_user_model()
class UserAdminList(generics.ListCreateAPIView):
"""
List of all users, either frontend or admin users.
The fields shown in this view can be changed using the ``OSCARAPI_ADMIN_USER_FIELDS``
setting
"""
queryset = User.objects.all()
serializer_class = AdminUserSerializer
permission_classes = (APIAdminPermission,)
class UserAdminDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = AdminUserSerializer
permission_classes = (APIAdminPermission,)
| from django.contrib.auth import get_user_model
from oscarapi.utils.loading import get_api_class
from rest_framework import generics
APIAdminPermission = get_api_class("permissions", "APIAdminPermission")
AdminUserSerializer = get_api_class("serializers.admin.user", "AdminUserSerializer")
User = get_user_model()
class UserAdminList(generics.ListCreateAPIView):
"""
List of all users, either frontend or admin users.
The fields shown in this view can be changed using the ``OSCARAPI_ADMIN_USER_FIELDS``
setting
"""
queryset = User.objects.all()
serializer_class = AdminUserSerializer
permission_classes = (APIAdminPermission,)
class UserAdminDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = AdminUserSerializer
permission_classes = (APIAdminPermission,) | en | 0.682791 | List of all users, either frontend or admin users. The fields shown in this view can be changed using the ``OSCARAPI_ADMIN_USER_FIELDS`` setting | 2.160067 | 2 |
idunn/blocks/covid19.py | QwantResearch/idunn | 26 | 6616350 | import logging
from enum import Enum
from typing import Optional, Literal
from idunn import settings
from idunn.utils.covid19_dataset import get_poi_covid_status
from .base import BaseBlock
from .opening_hour import OpeningHourBlock
logger = logging.getLogger(__name__)
COVID19_BLOCK_COUNTRIES = settings["COVID19_BLOCK_COUNTRIES"].split(",")
class CovidOpeningStatus(str, Enum):
open_as_usual = "open_as_usual"
open = "open"
maybe_open = "maybe_open"
closed = "closed"
unknown = "unknown"
class Covid19Block(BaseBlock):
type: Literal["covid19"] = "covid19"
status: CovidOpeningStatus
opening_hours: Optional[OpeningHourBlock]
note: Optional[str]
contribute_url: Optional[str]
@classmethod
def get_ca_reste_ouvert_url(cls, place):
try:
_, osm_type, osm_id = place.get_id().split(":")
cro_id = f"{osm_type[0]}{osm_id}"
except (IndexError, ValueError):
logger.warning("Failed to build caresteouvert id for %s", place.get_id())
return None
lat = place.get_coord()["lat"]
lon = place.get_coord()["lon"]
return f"https://www.caresteouvert.fr/@{lat:.6f},{lon:.6f},17/place/{cro_id}"
@classmethod
def from_es(cls, place, lang):
if place.PLACE_TYPE != "poi":
return None
if settings["BLOCK_COVID_ENABLED"] is not True:
return None
properties = place.properties
if place.get_country_code() not in COVID19_BLOCK_COUNTRIES:
return None
opening_hours = None
note = place.properties.get("description:covid19")
status = CovidOpeningStatus.unknown
contribute_url = None
raw_opening_hours = properties.get("opening_hours:covid19")
covid_status_from_redis = None
if place.get_meta().source == "osm" and settings["COVID19_USE_REDIS_DATASET"]:
covid_status_from_redis = get_poi_covid_status(place.get_id())
if covid_status_from_redis is not None:
contribute_url = cls.get_ca_reste_ouvert_url(place)
note = covid_status_from_redis.infos or None
if covid_status_from_redis.opening_hours:
opening_hours = OpeningHourBlock.from_es_with_oh(
place, lang, covid_status_from_redis.opening_hours
)
if covid_status_from_redis.status == "ouvert":
status = CovidOpeningStatus.open_as_usual
elif covid_status_from_redis.status == "ouvert_adapté":
status = CovidOpeningStatus.open
elif covid_status_from_redis.status == "partiel":
status = CovidOpeningStatus.maybe_open
elif covid_status_from_redis.status == "fermé":
status = CovidOpeningStatus.closed
elif raw_opening_hours == "same":
opening_hours = OpeningHourBlock.from_es(place, lang)
status = CovidOpeningStatus.open_as_usual
elif raw_opening_hours == "open":
status = CovidOpeningStatus.open
elif raw_opening_hours == "restricted":
status = CovidOpeningStatus.maybe_open
elif raw_opening_hours == "off":
status = CovidOpeningStatus.closed
elif raw_opening_hours is not None:
opening_hours = OpeningHourBlock.from_es_with_oh(place, lang, raw_opening_hours)
if opening_hours is None:
status = CovidOpeningStatus.unknown
elif opening_hours.status in ["open", "closed"]:
if raw_opening_hours == properties.get("opening_hours"):
status = CovidOpeningStatus.open_as_usual
else:
status = CovidOpeningStatus.open
else:
status = CovidOpeningStatus.maybe_open
if (
status == CovidOpeningStatus.unknown
and not settings["COVID19_BLOCK_KEEP_STATUS_UNKNOWN"]
):
return None
return cls(
status=status, note=note, opening_hours=opening_hours, contribute_url=contribute_url
)
| import logging
from enum import Enum
from typing import Optional, Literal
from idunn import settings
from idunn.utils.covid19_dataset import get_poi_covid_status
from .base import BaseBlock
from .opening_hour import OpeningHourBlock
logger = logging.getLogger(__name__)
COVID19_BLOCK_COUNTRIES = settings["COVID19_BLOCK_COUNTRIES"].split(",")
class CovidOpeningStatus(str, Enum):
open_as_usual = "open_as_usual"
open = "open"
maybe_open = "maybe_open"
closed = "closed"
unknown = "unknown"
class Covid19Block(BaseBlock):
type: Literal["covid19"] = "covid19"
status: CovidOpeningStatus
opening_hours: Optional[OpeningHourBlock]
note: Optional[str]
contribute_url: Optional[str]
@classmethod
def get_ca_reste_ouvert_url(cls, place):
try:
_, osm_type, osm_id = place.get_id().split(":")
cro_id = f"{osm_type[0]}{osm_id}"
except (IndexError, ValueError):
logger.warning("Failed to build caresteouvert id for %s", place.get_id())
return None
lat = place.get_coord()["lat"]
lon = place.get_coord()["lon"]
return f"https://www.caresteouvert.fr/@{lat:.6f},{lon:.6f},17/place/{cro_id}"
@classmethod
def from_es(cls, place, lang):
if place.PLACE_TYPE != "poi":
return None
if settings["BLOCK_COVID_ENABLED"] is not True:
return None
properties = place.properties
if place.get_country_code() not in COVID19_BLOCK_COUNTRIES:
return None
opening_hours = None
note = place.properties.get("description:covid19")
status = CovidOpeningStatus.unknown
contribute_url = None
raw_opening_hours = properties.get("opening_hours:covid19")
covid_status_from_redis = None
if place.get_meta().source == "osm" and settings["COVID19_USE_REDIS_DATASET"]:
covid_status_from_redis = get_poi_covid_status(place.get_id())
if covid_status_from_redis is not None:
contribute_url = cls.get_ca_reste_ouvert_url(place)
note = covid_status_from_redis.infos or None
if covid_status_from_redis.opening_hours:
opening_hours = OpeningHourBlock.from_es_with_oh(
place, lang, covid_status_from_redis.opening_hours
)
if covid_status_from_redis.status == "ouvert":
status = CovidOpeningStatus.open_as_usual
elif covid_status_from_redis.status == "ouvert_adapté":
status = CovidOpeningStatus.open
elif covid_status_from_redis.status == "partiel":
status = CovidOpeningStatus.maybe_open
elif covid_status_from_redis.status == "fermé":
status = CovidOpeningStatus.closed
elif raw_opening_hours == "same":
opening_hours = OpeningHourBlock.from_es(place, lang)
status = CovidOpeningStatus.open_as_usual
elif raw_opening_hours == "open":
status = CovidOpeningStatus.open
elif raw_opening_hours == "restricted":
status = CovidOpeningStatus.maybe_open
elif raw_opening_hours == "off":
status = CovidOpeningStatus.closed
elif raw_opening_hours is not None:
opening_hours = OpeningHourBlock.from_es_with_oh(place, lang, raw_opening_hours)
if opening_hours is None:
status = CovidOpeningStatus.unknown
elif opening_hours.status in ["open", "closed"]:
if raw_opening_hours == properties.get("opening_hours"):
status = CovidOpeningStatus.open_as_usual
else:
status = CovidOpeningStatus.open
else:
status = CovidOpeningStatus.maybe_open
if (
status == CovidOpeningStatus.unknown
and not settings["COVID19_BLOCK_KEEP_STATUS_UNKNOWN"]
):
return None
return cls(
status=status, note=note, opening_hours=opening_hours, contribute_url=contribute_url
)
| none | 1 | 2.439121 | 2 |