hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acef48fe380671956911b36e8029a71621cf4992 | 1,152 | py | Python | rlbottraining/common_exercises/wall_play.py | aydensutt/RLBotTraining | e98d7f09971bfd02012bad98e54b882dc059ec8a | [
"MIT"
] | 9 | 2019-01-27T11:59:28.000Z | 2022-03-21T10:20:17.000Z | rlbottraining/common_exercises/wall_play.py | aydensutt/RLBotTraining | e98d7f09971bfd02012bad98e54b882dc059ec8a | [
"MIT"
] | 8 | 2019-01-10T17:42:54.000Z | 2020-02-25T02:19:58.000Z | rlbottraining/common_exercises/wall_play.py | aydensutt/RLBotTraining | e98d7f09971bfd02012bad98e54b882dc059ec8a | [
"MIT"
] | 7 | 2019-01-03T14:19:10.000Z | 2021-06-30T04:27:59.000Z | from dataclasses import dataclass
from rlbot.utils.game_state_util import GameState, BallState, CarState, Physics, Vector3, Rotator
from rlbottraining.common_exercises.common_base_exercises import StrikerExercise
from rlbottraining.rng import SeededRandomNumberGenerator
from rlbottraining.training_exercise import Playlist
@dataclass
class BallRollingTowardsWall(StrikerExercise):
"""A test where the ball is rolling towards the walls"""
def make_game_state(self, rng: SeededRandomNumberGenerator) -> GameState:
car_pos = Vector3(0, 250, 25)
ball_pos = Vector3(500, 0, 100)
ball_state = BallState(Physics(location=ball_pos, velocity=Vector3(1400, 0, 0)))
car_state = CarState(boost_amount=100, jumped=True, double_jumped=True,
physics=Physics(location=car_pos, velocity=Vector3(1399, 0, 0),
rotation=Rotator(0, 0, 0)))
game_state = GameState(ball=ball_state, cars={0: car_state})
return game_state
def make_default_playlist() -> Playlist:
return [
BallRollingTowardsWall('BallRollingTowardsWall'),
]
| 42.666667 | 97 | 0.710069 |
acef491fe23ebc9f07aa4322b73d64ea6f2b2eea | 4,370 | py | Python | ingestion_server/test/generate_integration_test_docker_compose.py | pavitra14/cccatalog-api | aa85014a41e5f1fdd96e50c739ecab999bb06fb0 | [
"MIT"
] | 122 | 2018-09-12T13:49:37.000Z | 2021-12-05T07:04:59.000Z | ingestion_server/test/generate_integration_test_docker_compose.py | senyor/cccatalog-api | a18f75fccdd7345beff820dff4ee69604cd53748 | [
"MIT"
] | 500 | 2018-04-30T15:26:43.000Z | 2021-06-07T16:28:44.000Z | ingestion_server/test/generate_integration_test_docker_compose.py | senyor/cccatalog-api | a18f75fccdd7345beff820dff4ee69604cd53748 | [
"MIT"
] | 144 | 2018-08-11T17:11:50.000Z | 2022-01-12T20:39:09.000Z | #!/usr/bin/env python3
import yaml
import datetime
import os
import sys
import traceback
import textwrap
"""
Parses docker-compose file and generates an integration-test-docker-compose.yml.
The generated file is written to the same directory this script resides in.
Q: Why didn't you just use multiple docker-compose files and inheritance?
A: If you are running the development docker-compose file already, launching
an inherited elasticsearch/postgres service will result in the containers
being destroyed and recreated. Using this approach ensures that:
1) Running tests doesn't interfere with your development environment.
2) The file stays up-to-date without manual copy-pasting.
3) We don't blow up running containers on Travis CI.
"""
this_dir = os.path.dirname(os.path.realpath(__file__))
outname = this_dir + '/integration-test-docker-compose.yml'
parent_docker_compose = this_dir + '/../../docker-compose.yml'
with open(parent_docker_compose, 'r') as docker_compose_file:
docker_compose = yaml.safe_load(docker_compose_file)
try:
db = docker_compose['services']['db']
es = docker_compose['services']['es']
ingestion_server = docker_compose['services']['ingestion-server']
upstream_db = docker_compose['services']['upstream_db']
# Delete services we're not testing.
desired_services = {'es', 'db', 'ingestion-server', 'upstream_db'}
for service in dict(docker_compose['services']):
if service not in desired_services:
del docker_compose['services'][service]
del docker_compose['services']['es']['healthcheck']
# Expose alternate ports. Use the same internal port defined in the
# original docker-compose file.
upstream_db_port = upstream_db['ports'][0].split(':')[1]
upstream_db['ports'][0] = '59999' + ':' + upstream_db_port
db['ports'][0] = '60000' + ':' + db['ports'][0].split(':')[1]
es['ports'][0] = '60001' + ':' + es['ports'][0].split(':')[1]
ingestion_api_port = ingestion_server['ports'][0].split(':')[1]
ingestion_server['ports'][0] = '60002' + ':' + ingestion_api_port
# Configure ingestion server to point to integration containers.
upstream_name = 'integration-upstream'
ingestion_server['environment']['DATABASE_HOST'] = 'integration-db'
ingestion_server['environment']['ELASTICSEARCH_URL'] = 'integration-es'
ingestion_server['environment']['UPSTREAM_DB_HOST'] = upstream_name
ingestion_server['depends_on'] = ['integration-es', 'integration-db']
ingestion_server['build'] = '../'
# Create a volume for the mock data
db['volumes'] = ['./mock_data:/mock_data']
upstream_db['volumes'] = ['./mock_data:/mock_data']
# Rename the services and update ports.
for service in dict(docker_compose['services']):
if service in desired_services:
del docker_compose['services'][service]
docker_compose['services']['integration-db'] = db
docker_compose['services']['integration-es'] = es
docker_compose['services']['integration-ingestion'] = ingestion_server
docker_compose['services']['integration-upstream'] = upstream_db
# Start the document with a warning message
warning_message = '\n'.join(textwrap.wrap(
'This docker-compose file was generated from '
+ parent_docker_compose + '. Do not modify this file directly. '
'Your changes will be overwritten. Last update: '
+ str(datetime.datetime.now()), width=79,
initial_indent='# ', subsequent_indent='# ')) + '\n\n'
with open(outname, 'w') as integration_docker_compose:
integration_docker_compose.truncate()
integration_docker_compose.write(warning_message)
yaml.dump(docker_compose, integration_docker_compose,
default_flow_style=False)
except KeyError as e:
print(traceback.format_exc())
print('Failed to parse docker-compose.yml due to missing key. No file'
' was written to disk. Missing key: ' + str(e))
sys.exit(1)
except Exception as e:
print(traceback.format_exc())
print('Failed to generate', outname, 'due to exception:', e)
| 46.489362 | 80 | 0.66087 |
acef4987f9700f3e63d90b4cca140a809d19bd6a | 15,155 | py | Python | salt/scripts.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 1 | 2020-01-02T09:03:21.000Z | 2020-01-02T09:03:21.000Z | salt/scripts.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | null | null | null | salt/scripts.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 1 | 2020-01-02T09:03:24.000Z | 2020-01-02T09:03:24.000Z | # -*- coding: utf-8 -*-
'''
This module contains the function calls to execute command line scripts
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import signal
import logging
import functools
import threading
import traceback
import signal
import functools
from random import randint
# Import salt libs
from salt.exceptions import SaltSystemExit, SaltClientError, SaltReqTimeoutError
import salt.defaults.exitcodes # pylint: disable=unused-import
log = logging.getLogger(__name__)
def _handle_interrupt(exc, original_exc, hardfail=False, trace=u''):
'''
if hardfailing:
If we got the original stacktrace, log it
If all cases, raise the original exception
but this is logically part the initial
stack.
else just let salt exit gracefully
'''
if hardfail:
if trace:
log.error(trace)
raise original_exc
else:
raise exc
def _handle_signals(client, signum, sigframe):
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
if signum == signal.SIGINT:
exit_msg = u'\nExiting gracefully on Ctrl-c'
try:
jid = client.local_client.pub_data[u'jid']
exit_msg += (
u'\n'
u'This job\'s jid is: {0}\n'
u'The minions may not have all finished running and any remaining '
u'minions will return upon completion. To look up the return data '
u'for this job later, run the following command:\n\n'
u'salt-run jobs.lookup_jid {0}'.format(jid)
)
except (AttributeError, KeyError):
pass
else:
exit_msg = None
_handle_interrupt(
SystemExit(exit_msg),
Exception(u'\nExiting with hard crash on Ctrl-c'),
hardcrash, trace=trace)
def _install_signal_handlers(client):
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
def salt_master():
'''
Start the salt master.
'''
import salt.cli.daemons
master = salt.cli.daemons.Master()
master.start()
def minion_process():
'''
Start a minion process
'''
import salt.utils.platform
import salt.cli.daemons
# salt_minion spawns this function in a new process
salt.utils.appendproctitle(u'KeepAlive')
def handle_hup(manager, sig, frame):
manager.minion.reload()
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: small race issue where the parent PID could be replace
with another process with same PID!
'''
while True:
time.sleep(5)
try:
# check pid alive (Unix only trick!)
if os.getuid() == 0 and not salt.utils.platform.is_windows():
os.kill(parent_pid, 0)
except OSError as exc:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
log.error(u'Minion process encountered exception: %s', exc)
os._exit(salt.defaults.exitcodes.EX_GENERIC)
if not salt.utils.platform.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
minion = salt.cli.daemons.Minion()
signal.signal(signal.SIGHUP,
functools.partial(handle_hup,
minion))
try:
minion.start()
except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.warning(u'Fatal functionality error caught by minion handler:\n', exc_info=True)
log.warning(u'** Restarting minion **')
delay = 60
if minion is not None and hasattr(minion, u'config'):
delay = minion.config.get(u'random_reauth_delay', 60)
delay = randint(1, delay)
log.info(u'waiting random_reauth_delay %ss', delay)
time.sleep(delay)
sys.exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
def salt_minion():
'''
Start the salt minion in a subprocess.
Auto restart minion on error.
'''
import signal
import salt.utils.platform
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
import multiprocessing
if u'' in sys.path:
sys.path.remove(u'')
if salt.utils.platform.is_windows():
minion = salt.cli.daemons.Minion()
minion.start()
return
if u'--disable-keepalive' in sys.argv:
sys.argv.remove(u'--disable-keepalive')
minion = salt.cli.daemons.Minion()
minion.start()
return
def escalate_signal_to_process(pid, signum, sigframe): # pylint: disable=unused-argument
'''
Escalate the signal received to the multiprocessing process that
is actually running the minion
'''
# escalate signal
os.kill(pid, signum)
# keep one minion subprocess running
prev_sigint_handler = signal.getsignal(signal.SIGINT)
prev_sigterm_handler = signal.getsignal(signal.SIGTERM)
while True:
try:
process = multiprocessing.Process(target=minion_process)
process.start()
signal.signal(signal.SIGTERM,
functools.partial(escalate_signal_to_process,
process.pid))
signal.signal(signal.SIGINT,
functools.partial(escalate_signal_to_process,
process.pid))
signal.signal(signal.SIGHUP,
functools.partial(escalate_signal_to_process,
process.pid))
except Exception: # pylint: disable=broad-except
# if multiprocessing does not work
minion = salt.cli.daemons.Minion()
minion.start()
break
process.join()
# Process exited or was terminated. Since we're going to try to restart
# it, we MUST, reset signal handling to the previous handlers
signal.signal(signal.SIGINT, prev_sigint_handler)
signal.signal(signal.SIGTERM, prev_sigterm_handler)
if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE:
sys.exit(process.exitcode)
# ontop of the random_reauth_delay already preformed
# delay extra to reduce flooding and free resources
# NOTE: values are static but should be fine.
time.sleep(2 + randint(1, 10))
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def proxy_minion_process(queue):
'''
Start a proxy minion process
'''
import salt.cli.daemons
import salt.utils.platform
# salt_minion spawns this function in a new process
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: there is a small race issue where the parent PID could be replace
with another process with the same PID!
'''
while True:
time.sleep(5)
try:
# check pid alive (Unix only trick!)
os.kill(parent_pid, 0)
except OSError:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
os._exit(999)
if not salt.utils.platform.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
restart = False
proxyminion = None
status = salt.defaults.exitcodes.EX_OK
try:
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.error(u'Proxy Minion failed to start: ', exc_info=True)
restart = True
# status is superfluous since the process will be restarted
status = salt.defaults.exitcodes.SALT_KEEPALIVE
except SystemExit as exc:
restart = False
status = exc.code
if restart is True:
log.warning(u'** Restarting proxy minion **')
delay = 60
if proxyminion is not None:
if hasattr(proxyminion, u'config'):
delay = proxyminion.config.get(u'random_reauth_delay', 60)
random_delay = randint(1, delay)
log.info(u'Sleeping random_reauth_delay of %s seconds', random_delay)
# preform delay after minion resources have been cleaned
queue.put(random_delay)
else:
queue.put(0)
sys.exit(status)
def salt_proxy():
'''
Start a proxy minion.
'''
import salt.cli.daemons
import salt.utils.platform
import multiprocessing
if u'' in sys.path:
sys.path.remove(u'')
if salt.utils.platform.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if u'--disable-keepalive' in sys.argv:
sys.argv.remove(u'--disable-keepalive')
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception:
# This breaks in containers
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception:
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
sys.exit(process.exitcode)
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def salt_syndic():
'''
Start the salt syndic.
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
pid = os.getpid()
try:
syndic = salt.cli.daemons.Syndic()
syndic.start()
except KeyboardInterrupt:
os.kill(pid, 15)
def salt_key():
'''
Manage the authentication keys with salt-key.
'''
import salt.cli.key
try:
client = salt.cli.key.SaltKey()
_install_signal_handlers(client)
client.run()
except Exception as err:
sys.stderr.write(u"Error: {0}\n".format(err))
def salt_cp():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.cp
client = salt.cli.cp.SaltCPCli()
_install_signal_handlers(client)
client.run()
def salt_call():
'''
Directly call a salt command in the modules, does not require a running
salt minion to run.
'''
import salt.cli.call
if u'' in sys.path:
sys.path.remove(u'')
client = salt.cli.call.SaltCall()
_install_signal_handlers(client)
client.run()
def salt_run():
'''
Execute a salt convenience routine.
'''
import salt.cli.run
if u'' in sys.path:
sys.path.remove(u'')
client = salt.cli.run.SaltRun()
_install_signal_handlers(client)
client.run()
def salt_ssh():
'''
Execute the salt-ssh system
'''
import salt.cli.ssh
if u'' in sys.path:
sys.path.remove(u'')
try:
client = salt.cli.ssh.SaltSSH()
_install_signal_handlers(client)
client.run()
except SaltClientError as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit(err),
err,
hardcrash, trace=trace)
def salt_cloud():
'''
The main function for salt-cloud
'''
# Define 'salt' global so we may use it after ImportError. Otherwise,
# UnboundLocalError will be raised.
global salt # pylint: disable=W0602
try:
# Late-imports for CLI performance
import salt.cloud
import salt.cloud.cli
except ImportError as e:
# No salt cloud on Windows
log.error(u'Error importing salt cloud: %s', e)
print(u'salt-cloud is not available in this system')
sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE)
if u'' in sys.path:
sys.path.remove(u'')
client = salt.cloud.cli.SaltCloud()
_install_signal_handlers(client)
client.run()
def salt_api():
'''
The main function for salt-api
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.api
sapi = salt.cli.api.SaltAPI() # pylint: disable=E1120
sapi.start()
def salt_main():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.salt
if u'' in sys.path:
sys.path.remove(u'')
client = salt.cli.salt.SaltCMD()
_install_signal_handlers(client)
client.run()
def salt_spm():
'''
The main function for spm, the Salt Package Manager
.. versionadded:: 2015.8.0
'''
import salt.cli.spm
spm = salt.cli.spm.SPM() # pylint: disable=E1120
spm.run()
def salt_extend(extension, name, description, salt_dir, merge):
'''
Quickstart for developing on the saltstack installation
.. versionadded:: 2016.11.0
'''
import salt.utils.extend
salt.utils.extend.run(extension=extension,
name=name,
description=description,
salt_dir=salt_dir,
merge=merge)
| 30.009901 | 93 | 0.615308 |
acef49b67590fa67f0e7314deaef55b6ba381cac | 2,545 | py | Python | game/components/equipment.py | HexDecimal/7drl-2022 | 755949875cc11e288908eccaee102c7ca0e43777 | [
"CC0-1.0"
] | null | null | null | game/components/equipment.py | HexDecimal/7drl-2022 | 755949875cc11e288908eccaee102c7ca0e43777 | [
"CC0-1.0"
] | null | null | null | game/components/equipment.py | HexDecimal/7drl-2022 | 755949875cc11e288908eccaee102c7ca0e43777 | [
"CC0-1.0"
] | null | null | null | from __future__ import annotations
from typing import Optional
import game.entity
from equipment_types import EquipmentType
from game.components.base_component import BaseComponent
class Equipment(BaseComponent):
def __init__(self, weapon: Optional[game.entity.Item] = None, armor: Optional[game.entity.Item] = None):
super().__init__()
self.weapon = weapon
self.armor = armor
@property
def defense_bonus(self) -> int:
bonus = 0
if self.weapon is not None and self.weapon.equippable is not None:
bonus += self.weapon.equippable.defense_bonus
if self.armor is not None and self.armor.equippable is not None:
bonus += self.armor.equippable.defense_bonus
return bonus
@property
def power_bonus(self) -> int:
bonus = 0
if self.weapon is not None and self.weapon.equippable is not None:
bonus += self.weapon.equippable.power_bonus
if self.armor is not None and self.armor.equippable is not None:
bonus += self.armor.equippable.power_bonus
return bonus
def item_is_equipped(self, item: game.entity.Item) -> bool:
return self.weapon == item or self.armor == item
def unequip_message(self, item_name: str) -> None:
self.owner.gamemap.engine.message_log.add_message(f"You remove the {item_name}.")
def equip_message(self, item_name: str) -> None:
self.owner.gamemap.engine.message_log.add_message(f"You equip the {item_name}.")
def equip_to_slot(self, slot: str, item: game.entity.Item, add_message: bool) -> None:
current_item = getattr(self, slot)
if current_item is not None:
self.unequip_from_slot(slot, add_message)
setattr(self, slot, item)
if add_message:
self.equip_message(item.name)
def unequip_from_slot(self, slot: str, add_message: bool) -> None:
current_item = getattr(self, slot)
if add_message:
self.unequip_message(current_item.name)
setattr(self, slot, None)
def toggle_equip(self, equippable_item: game.entity.Item, add_message: bool = True) -> None:
if equippable_item.equippable and equippable_item.equippable.equipment_type == EquipmentType.WEAPON:
slot = "weapon"
else:
slot = "armor"
if getattr(self, slot) == equippable_item:
self.unequip_from_slot(slot, add_message)
else:
self.equip_to_slot(slot, equippable_item, add_message)
| 32.628205 | 108 | 0.665226 |
acef4a250a0e8a56837816841adb293227d2a4a4 | 347 | py | Python | setup.py | dbchristenson/Mesindexer | 708bbb1b81f512bc4410f71a68d35942f175c944 | [
"MIT"
] | null | null | null | setup.py | dbchristenson/Mesindexer | 708bbb1b81f512bc4410f71a68d35942f175c944 | [
"MIT"
] | null | null | null | setup.py | dbchristenson/Mesindexer | 708bbb1b81f512bc4410f71a68d35942f175c944 | [
"MIT"
] | null | null | null | from gettext import install
from setuptools import find_packages, setup
setup(
name='mesidexer',
packages=find_packages(include=['requests']),
version=0.1,
description='A basic library that makes interactions with Algorand nodes easier.',
author='DB Christenson',
license='MIT License',
install_requires=['requests']
) | 28.916667 | 86 | 0.729107 |
acef4a269b67e06fb18e47ba85c949f002690554 | 370 | py | Python | guet/commands/usercommands/start/hook_strategy.py | jonnynabors/guet | c705c11aa8955fa7d89ed3aea7db69bcb1293e46 | [
"Apache-2.0"
] | null | null | null | guet/commands/usercommands/start/hook_strategy.py | jonnynabors/guet | c705c11aa8955fa7d89ed3aea7db69bcb1293e46 | [
"Apache-2.0"
] | null | null | null | guet/commands/usercommands/start/hook_strategy.py | jonnynabors/guet | c705c11aa8955fa7d89ed3aea7db69bcb1293e46 | [
"Apache-2.0"
] | null | null | null | from guet.git.git import Git
from guet.commands.strategies.strategy import CommandStrategy
class HookStrategy(CommandStrategy):
def __init__(self, git: Git):
self.git = git
def apply(self):
self._hook_apply()
print('guet successfully started in this repository.')
def _hook_apply(self) -> None:
raise NotImplementedError
| 23.125 | 62 | 0.697297 |
acef4a829c5536ee677d413bb4bcd96cbd11b6b7 | 514 | py | Python | grade_to_gpa.py | jasonlmfong/UofT-Grade-Analytics | e19c5ca812793c676ed005299ac409fa1e44432e | [
"MIT"
] | null | null | null | grade_to_gpa.py | jasonlmfong/UofT-Grade-Analytics | e19c5ca812793c676ed005299ac409fa1e44432e | [
"MIT"
] | null | null | null | grade_to_gpa.py | jasonlmfong/UofT-Grade-Analytics | e19c5ca812793c676ed005299ac409fa1e44432e | [
"MIT"
] | null | null | null | def find_gpa(float):
"""gives the gpa according to uoft scale"""
if 85 <= float <= 100:
return 4
if 80 <= float <= 84:
return 3.7
if 77 <= float <= 79:
return 3.3
if 73 <= float <= 76:
return 3
if 70 <= float <= 72:
return 2.7
if 67 <= float <= 69:
return 2.3
if 63 <= float <= 66:
return 2
if 60 <= float <= 62:
return 1.7
if 57 <= float <= 59:
return 1.3
if 53 <= float <= 56:
return 1
if 50 <= float <= 52:
return 0.7
if 0 <= float <= 49:
return 0
| 19.037037 | 45 | 0.525292 |
acef4af77a72217389f34a06806936e91a555019 | 39 | py | Python | any_case/contrib/__init__.py | jayvdb/any_case | 43feaebd710cbe7ab431cd163123904fbf53bbf4 | [
"MIT"
] | 2 | 2019-04-29T08:42:44.000Z | 2020-04-05T09:13:54.000Z | any_case/contrib/__init__.py | jayvdb/any_case | 43feaebd710cbe7ab431cd163123904fbf53bbf4 | [
"MIT"
] | null | null | null | any_case/contrib/__init__.py | jayvdb/any_case | 43feaebd710cbe7ab431cd163123904fbf53bbf4 | [
"MIT"
] | 2 | 2020-11-15T15:40:30.000Z | 2021-05-10T07:25:37.000Z | __all__ = ['django', 'rest_framework']
| 19.5 | 38 | 0.692308 |
acef4b00435e377390ecd18204c67d91a4df97ea | 1,825 | py | Python | tests/test_default.py | mookrs/tortoise-orm | e1421efbe81880461d298c723b7a02d4a6dc8e09 | [
"Apache-2.0"
] | null | null | null | tests/test_default.py | mookrs/tortoise-orm | e1421efbe81880461d298c723b7a02d4a6dc8e09 | [
"Apache-2.0"
] | null | null | null | tests/test_default.py | mookrs/tortoise-orm | e1421efbe81880461d298c723b7a02d4a6dc8e09 | [
"Apache-2.0"
] | null | null | null | import datetime
from decimal import Decimal
from tests.testmodels import DefaultModel
from tortoise.backends.asyncpg import AsyncpgDBClient
from tortoise.backends.mysql import MySQLClient
from tortoise.backends.sqlite import SqliteClient
from tortoise.contrib import test
class TestDefault(test.TestCase):
async def setUp(self) -> None:
connection = self.__db__
if isinstance(connection, MySQLClient):
await connection.execute_query(
"insert into defaultmodel (`int_default`,`float_default`,`decimal_default`,`bool_default`,`char_default`,`date_default`,`datetime_default`) values (DEFAULT,DEFAULT,DEFAULT,DEFAULT,DEFAULT,DEFAULT,DEFAULT)",
)
elif isinstance(connection, SqliteClient):
await connection.execute_query(
"insert into defaultmodel default values",
)
elif isinstance(connection, AsyncpgDBClient):
await connection.execute_query(
'insert into defaultmodel ("int_default","float_default","decimal_default","bool_default","char_default","date_default","datetime_default") values (DEFAULT,DEFAULT,DEFAULT,DEFAULT,DEFAULT,DEFAULT,DEFAULT)',
)
async def test_default(self):
default_model = await DefaultModel.first()
self.assertEqual(default_model.int_default, 1)
self.assertEqual(default_model.float_default, 1.5)
self.assertEqual(default_model.decimal_default, Decimal(1))
self.assertTrue(default_model.bool_default)
self.assertEqual(default_model.char_default, "tortoise")
self.assertEqual(default_model.date_default, datetime.date.fromisoformat("2020-05-20"))
self.assertEqual(
default_model.datetime_default, datetime.datetime.fromisoformat("2020-05-20 00:00:00")
)
| 48.026316 | 222 | 0.717808 |
acef4ba29ce51c8173cd974f5bc42b126faf427b | 906 | py | Python | tests/test_gateworker.py | joancf/python-gatenlp | 21441d72ded19e9348052e99ac5bc1fc6af7ab6e | [
"Apache-2.0"
] | 30 | 2020-04-18T12:28:15.000Z | 2022-02-18T21:31:18.000Z | tests/test_gateworker.py | joancf/python-gatenlp | 21441d72ded19e9348052e99ac5bc1fc6af7ab6e | [
"Apache-2.0"
] | 133 | 2019-10-16T07:41:59.000Z | 2022-03-31T07:27:07.000Z | tests/test_gateworker.py | joancf/python-gatenlp | 21441d72ded19e9348052e99ac5bc1fc6af7ab6e | [
"Apache-2.0"
] | 4 | 2021-01-20T08:12:19.000Z | 2021-10-21T13:29:44.000Z | """
Module to test the GateWorker and GateWorkerAnnotator
"""
import os
from gatenlp import Document
from gatenlp.utils import init_logger
from gatenlp.gateworker import GateWorker
logger = init_logger("test_gateworker")
should_exit = not os.environ.get("GATE_HOME")
if should_exit:
logger.warning("Environment variable GATE_HOME not set, skipping tests in TestGateWorker")
def make_doc1():
"""
Create and return a document for testing
"""
doc = Document("This is just some test document. It mentions New York.")
return doc
class TestGateWorker:
def test_gateworker01(self):
"""
Unit test method (make linter happy)
"""
if should_exit:
return
txt = "some text"
with GateWorker() as gw1:
gdoc1 = gw1.createDocument(txt)
pdoc1 = gw1.gdoc2pdoc(gdoc1)
assert pdoc1.text == txt
| 23.230769 | 94 | 0.663355 |
acef4db86c1d71846615feb7b84124cdf980d41e | 886 | py | Python | psltdsim/plot/sysH.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | psltdsim/plot/sysH.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | psltdsim/plot/sysH.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | def sysH(mirror, blkFlag=True, printFigs=False):
"""Plot Pe, Pm, and F of given mirror"""
import matplotlib.pyplot as plt
import numpy as np
mir = mirror
xend = max(mir.r_t)
mini = 1 # can be increased to scale width of plots
caseName = mir.simParams['fileName'][:-1]
mins = np.array(mir.r_t)/60.0;
minEnd = max(mins)
## Plot System Frequency
fig, ax = plt.subplots()
ax.plot(mins, mir.r_Hsys,
color='black',
linewidth=1)
ax.set_title('System Inertia\n Case: ' + caseName)
ax.set_ylabel('Inertia [MW s]')
ax.set_xlabel('Time [minutes]')
ax.set_xlim(0,minEnd)
#ax.legend()
ax.grid(True)
fig.set_dpi(150)
fig.set_size_inches(9/mini, 2.5)
fig.tight_layout()
if printFigs: plt.savefig(caseName+'sysH'+'.pdf', dpi=300)
plt.show(block = blkFlag)
plt.pause(0.00001) | 26.058824 | 62 | 0.616253 |
acef4ed4990d68d643f8a7fbe4922449107d5366 | 15,730 | py | Python | openpyxl/writer/tests/test_worksheet.py | sekcheong/openpyxl | e1ba037f171efa348f75431c35a50de5ca277b78 | [
"MIT"
] | null | null | null | openpyxl/writer/tests/test_worksheet.py | sekcheong/openpyxl | e1ba037f171efa348f75431c35a50de5ca277b78 | [
"MIT"
] | null | null | null | openpyxl/writer/tests/test_worksheet.py | sekcheong/openpyxl | e1ba037f171efa348f75431c35a50de5ca277b78 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
import datetime
import decimal
from io import BytesIO
import pytest
from openpyxl.xml.functions import fromstring, tostring, xmlfile
from openpyxl.reader.excel import load_workbook
from openpyxl import Workbook
from .. worksheet import write_worksheet
from openpyxl.tests.helper import compare_xml
from openpyxl.worksheet.properties import PageSetupProperties
from openpyxl.worksheet.dimensions import DimensionHolder
from openpyxl.xml.constants import SHEET_MAIN_NS, REL_NS
from openpyxl import LXML
@pytest.fixture
def worksheet():
from openpyxl import Workbook
wb = Workbook()
return wb.active
@pytest.fixture
def DummyWorksheet():
class DummyWorksheet:
def __init__(self):
self._styles = {}
self.column_dimensions = DimensionHolder(self)
self.parent = Workbook()
return DummyWorksheet()
@pytest.fixture
def ColumnDimension():
from openpyxl.worksheet.dimensions import ColumnDimension
return ColumnDimension
@pytest.fixture
def write_rows():
from .. etree_worksheet import write_rows
return write_rows
@pytest.fixture
def etree_write_cell():
from ..etree_worksheet import etree_write_cell
return etree_write_cell
@pytest.fixture
def lxml_write_cell():
from ..etree_worksheet import lxml_write_cell
return lxml_write_cell
@pytest.fixture(params=['etree', 'lxml'])
def write_cell_implementation(request, etree_write_cell, lxml_write_cell):
if request.param == "lxml" and LXML:
return lxml_write_cell
return etree_write_cell
@pytest.mark.parametrize("value, expected",
[
(9781231231230, """<c t="n" r="A1"><v>9781231231230</v></c>"""),
(decimal.Decimal('3.14'), """<c t="n" r="A1"><v>3.14</v></c>"""),
(1234567890, """<c t="n" r="A1"><v>1234567890</v></c>"""),
("=sum(1+1)", """<c r="A1"><f>sum(1+1)</f><v></v></c>"""),
(True, """<c t="b" r="A1"><v>1</v></c>"""),
("Hello", """<c t="s" r="A1"><v>0</v></c>"""),
("", """<c r="A1" t="s"></c>"""),
(None, """<c r="A1" t="n"></c>"""),
(datetime.date(2011, 12, 25), """<c r="A1" t="n" s="1"><v>40902</v></c>"""),
])
def test_write_cell(worksheet, write_cell_implementation, value, expected):
from openpyxl.cell import Cell
write_cell = write_cell_implementation
ws = worksheet
cell = ws['A1']
cell.value = value
out = BytesIO()
with xmlfile(out) as xf:
write_cell(xf, ws, cell, cell.has_style)
xml = out.getvalue()
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_comment(worksheet, write_cell_implementation):
write_cell = write_cell_implementation
from openpyxl.comments import Comment
ws = worksheet
cell = ws['A1']
cell.comment = Comment("test comment", "test author")
out = BytesIO()
with xmlfile(out) as xf:
write_cell(xf, ws, cell, False)
assert len(ws._comments) == 1
def test_write_formula(worksheet, write_rows):
ws = worksheet
ws['F1'] = 10
ws['F2'] = 32
ws['F3'] = '=F1+F2'
ws['A4'] = '=A1+A2+A3'
ws['B4'] = "=SUM(A10:A14*B10:B14)"
ws.formula_attributes['B4'] = {'t': 'array', 'ref': 'B4:B8'}
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """
<sheetData>
<row r="1" spans="1:6">
<c r="F1" t="n">
<v>10</v>
</c>
</row>
<row r="2" spans="1:6">
<c r="F2" t="n">
<v>32</v>
</c>
</row>
<row r="3" spans="1:6">
<c r="F3">
<f>F1+F2</f>
<v></v>
</c>
</row>
<row r="4" spans="1:6">
<c r="A4">
<f>A1+A2+A3</f>
<v></v>
</c>
<c r="B4">
<f ref="B4:B8" t="array">SUM(A10:A14*B10:B14)</f>
<v></v>
</c>
</row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_height(worksheet, write_rows):
from openpyxl.worksheet.dimensions import RowDimension
ws = worksheet
ws['F1'] = 10
ws.row_dimensions[1] = RowDimension(ws, height=30)
ws.row_dimensions[2] = RowDimension(ws, height=30)
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """
<sheetData>
<row customHeight="1" ht="30" r="1" spans="1:6">
<c r="F1" t="n">
<v>10</v>
</c>
</row>
<row customHeight="1" ht="30" r="2" spans="1:6"></row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_get_rows_to_write(worksheet):
from .. etree_worksheet import get_rows_to_write
ws = worksheet
ws['A10'] = "test"
ws.row_dimensions[10] = None
ws.row_dimensions[2] = None
cells_by_row = get_rows_to_write(ws)
assert cells_by_row == [
(2, []),
(10, [(1, ws['A10'])])
]
def test_merge(worksheet):
from .. worksheet import write_mergecells
ws = worksheet
ws.cell('A1').value = 'Cell A1'
ws.cell('B1').value = 'Cell B1'
ws.merge_cells('A1:B1')
merge = write_mergecells(ws)
xml = tostring(merge)
expected = """
<mergeCells count="1">
<mergeCell ref="A1:B1"/>
</mergeCells>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_no_merge(worksheet):
from .. worksheet import write_mergecells
merge = write_mergecells(worksheet)
assert merge is None
def test_external_hyperlink(worksheet):
from .. worksheet import write_hyperlinks
ws = worksheet
cell = ws['A1']
cell.value = "test"
cell.hyperlink = "http://test.com"
ws._hyperlinks.append(cell.hyperlink)
hyper = write_hyperlinks(ws)
assert len(worksheet._rels) == 1
assert worksheet._rels["rId1"].Target == "http://test.com"
xml = tostring(hyper.to_tree())
expected = """
<hyperlinks xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<hyperlink r:id="rId1" ref="A1"/>
</hyperlinks>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_internal_hyperlink(worksheet):
from .. worksheet import write_hyperlinks
from openpyxl.worksheet.hyperlink import Hyperlink
ws = worksheet
cell = ws['A1']
cell.hyperlink = Hyperlink(ref="", location="'STP nn000TL-10, PKG 2.52'!A1")
ws._hyperlinks.append(cell.hyperlink)
hyper = write_hyperlinks(ws)
xml = tostring(hyper.to_tree())
expected = """
<hyperlinks>
<hyperlink location="'STP nn000TL-10, PKG 2.52'!A1" ref="A1"/>
</hyperlinks>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.xfail
@pytest.mark.pil_required
def test_write_hyperlink_image_rels(Workbook, Image, datadir):
datadir.chdir()
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = "test"
ws.cell('A1').hyperlink = "http://test.com/"
i = Image("plain.png")
ws.add_image(i)
raise ValueError("Resulting file is invalid")
# TODO write integration test with duplicate relation ids then fix
@pytest.fixture
def worksheet_with_cf(worksheet):
from openpyxl.formatting.formatting import ConditionalFormattingList
worksheet.conditional_formating = ConditionalFormattingList()
return worksheet
@pytest.fixture
def write_conditional_formatting():
from .. worksheet import write_conditional_formatting
return write_conditional_formatting
def test_conditional_formatting_customRule(worksheet_with_cf, write_conditional_formatting):
ws = worksheet_with_cf
from openpyxl.formatting.rule import Rule
ws.conditional_formatting.add('C1:C10',
Rule(type='expression',formula=['ISBLANK(C1)'], stopIfTrue='1')
)
cfs = write_conditional_formatting(ws)
xml = b""
for cf in cfs:
xml += tostring(cf)
diff = compare_xml(xml, """
<conditionalFormatting sqref="C1:C10">
<cfRule type="expression" stopIfTrue="1" priority="1">
<formula>ISBLANK(C1)</formula>
</cfRule>
</conditionalFormatting>
""")
assert diff is None, diff
def test_conditional_font(worksheet_with_cf, write_conditional_formatting):
"""Test to verify font style written correctly."""
# Create cf rule
from openpyxl.styles import PatternFill, Font, Color
from openpyxl.formatting.rule import CellIsRule
redFill = PatternFill(start_color=Color('FFEE1111'),
end_color=Color('FFEE1111'),
patternType='solid')
whiteFont = Font(color=Color("FFFFFFFF"))
ws = worksheet_with_cf
ws.conditional_formatting.add('A1:A3',
CellIsRule(operator='equal',
formula=['"Fail"'],
stopIfTrue=False,
font=whiteFont,
fill=redFill))
cfs = write_conditional_formatting(ws)
xml = b""
for cf in cfs:
xml += tostring(cf)
diff = compare_xml(xml, """
<conditionalFormatting sqref="A1:A3">
<cfRule operator="equal" priority="1" type="cellIs" dxfId="0" stopIfTrue="0">
<formula>"Fail"</formula>
</cfRule>
</conditionalFormatting>
""")
assert diff is None, diff
def test_formula_rule(worksheet_with_cf, write_conditional_formatting):
from openpyxl.formatting.rule import FormulaRule
ws = worksheet_with_cf
ws.conditional_formatting.add('C1:C10',
FormulaRule(
formula=['ISBLANK(C1)'],
stopIfTrue=True)
)
cfs = write_conditional_formatting(ws)
xml = b""
for cf in cfs:
xml += tostring(cf)
diff = compare_xml(xml, """
<conditionalFormatting sqref="C1:C10">
<cfRule type="expression" stopIfTrue="1" priority="1">
<formula>ISBLANK(C1)</formula>
</cfRule>
</conditionalFormatting>
""")
assert diff is None, diff
@pytest.fixture
def write_worksheet():
from .. worksheet import write_worksheet
return write_worksheet
def test_write_empty(worksheet, write_worksheet):
ws = worksheet
xml = write_worksheet(ws)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="8" defaultRowHeight="15"/>
<sheetData/>
<pageMargins left="0.75" right="0.75" top="1" bottom="1" header="0.5" footer="0.5"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_vba(worksheet, write_worksheet):
ws = worksheet
ws.vba_code = {"codeName":"Sheet1"}
ws.legacy_drawing = "../drawings/vmlDrawing1.vml"
xml = write_worksheet(ws)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr codeName="Sheet1">
<outlinePr summaryBelow="1" summaryRight="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection activeCell="A1" sqref="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="8" defaultRowHeight="15"/>
<sheetData/>
<pageMargins bottom="1" footer="0.5" header="0.5" left="0.75" right="0.75" top="1"/>
<legacyDrawing r:id="anysvml"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_vba_comments(datadir, write_worksheet):
datadir.chdir()
fname = 'vba+comments.xlsm'
wb = load_workbook(fname, keep_vba=True)
ws = wb['Form Controls']
sheet = fromstring(write_worksheet(ws))
els = sheet.findall('{%s}legacyDrawing' % SHEET_MAIN_NS)
assert len(els) == 1, "Wrong number of legacyDrawing elements %d" % len(els)
assert els[0].get('{%s}id' % REL_NS) == 'anysvml'
def test_write_comments(worksheet, write_worksheet):
ws = worksheet
worksheet._comments = True
xml = write_worksheet(ws)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr>
<outlinePr summaryBelow="1" summaryRight="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection activeCell="A1" sqref="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="8" defaultRowHeight="15"/>
<sheetData/>
<pageMargins bottom="1" footer="0.5" header="0.5" left="0.75" right="0.75" top="1"/>
<legacyDrawing r:id="anysvml"></legacyDrawing>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_drawing(worksheet):
from ..worksheet import write_drawing
worksheet._images = [1]
expected = """
<drawing xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:id="rId1"/>
"""
xml = tostring(write_drawing(worksheet))
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_tables(worksheet, write_worksheet):
from openpyxl.worksheet.table import Table
worksheet.append(list("ABCDEF"))
worksheet._tables = [Table(displayName="Table1", ref="A1:D6")]
xml = write_worksheet(worksheet)
assert len(worksheet._rels) == 1
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:F1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="8" defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:6">
<c r="A1" t="s">
<v>0</v>
</c>
<c r="B1" t="s">
<v>1</v>
</c>
<c r="C1" t="s">
<v>2</v>
</c>
<c r="D1" t="s">
<v>3</v>
</c>
<c r="E1" t="s">
<v>4</v>
</c>
<c r="F1" t="s">
<v>5</v>
</c>
</row>
</sheetData>
<pageMargins left="0.75" right="0.75" top="1" bottom="1" header="0.5" footer="0.5"/>
<tableParts count="1">
<tablePart r:id="rId1" />
</tableParts>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
| 28.968692 | 159 | 0.599046 |
acef510d208ae523dc46f273a6fece89874d67bf | 4,324 | py | Python | edr/edsmserver.py | blacksurgeon/edr | 809b30a0247961f6b92a968696afa4383c867b5e | [
"Apache-2.0"
] | null | null | null | edr/edsmserver.py | blacksurgeon/edr | 809b30a0247961f6b92a968696afa4383c867b5e | [
"Apache-2.0"
] | null | null | null | edr/edsmserver.py | blacksurgeon/edr | 809b30a0247961f6b92a968696afa4383c867b5e | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import json
from edrconfig import EDRConfig
from edrlog import EDRLog
import requests
EDRLOG = EDRLog()
class EDSMServer(object):
SESSION = requests.Session()
def __init__(self):
config = EDRConfig()
self.EDSM_API_KEY = config.edsm_api_key()
self.EDSM_SERVER = config.edsm_server()
def system(self, system_name):
params = {"systemName": system_name, "showCoordinates": 1, "showInformation":1, "showId": 1}
endpoint = "{}/api-v1/systems".format(self.EDSM_SERVER)
resp = EDSMServer.SESSION.get(endpoint, params=params)
if resp.status_code != requests.codes.ok:
EDRLOG.log(u"Failed to retrieve system {} from EDSM: {}.".format(system_name, resp.status_code), "ERROR")
return None
return json.loads(resp.content)
def bodies(self, system_name):
params = {"systemName": system_name}
endpoint = "{}/api-system-v1/bodies".format(self.EDSM_SERVER)
resp = EDSMServer.SESSION.get(endpoint, params=params)
if resp.status_code != requests.codes.ok:
EDRLOG.log(u"Failed to retrieve bodies for {} from EDSM: {}.".format(system_name, resp.status_code), "ERROR")
return None
system_and_bodies = json.loads(resp.content)
return system_and_bodies.get("bodies", None)
def systems_within_radius(self, system_name, radius):
params = {"systemName": system_name, "showCoordinates": 1, "radius": radius, "showInformation": 1, "showId": 1, "showPermit": 1}
endpoint = "{}/api-v1/sphere-systems".format(self.EDSM_SERVER)
resp = EDSMServer.SESSION.get(endpoint, params=params)
if resp.status_code != requests.codes.ok:
EDRLOG.log(u"Failed to retrieve system {} from EDSM: {}.".format(system_name, resp.status_code), "ERROR")
return None
results = json.loads(resp.content)
if not results:
EDRLOG.log(u"Empty systems within radius.", "INFO")
return []
sorted_results = sorted(results, key=lambda t: t["distance"])
return sorted_results
def stations_in_system(self, system_name):
params = {"systemName": system_name}
endpoint = "{}/api-system-v1/stations".format(self.EDSM_SERVER)
resp = EDSMServer.SESSION.get(endpoint, params=params)
if resp.status_code != requests.codes.ok:
EDRLOG.log(u"Failed to retrieve system {} from EDSM: {}.".format(system_name, resp.status_code), "ERROR")
return None
results = json.loads(resp.content)
if not results or not results.get('stations', None):
EDRLOG.log(u"No stations in system {}.".format(system_name), "INFO")
return []
sorted_results = sorted(results['stations'], key=lambda t: t["distanceToArrival"])
return sorted_results
def factions_in_system(self, system_name):
params = {"systemName": system_name}
endpoint = "{}/api-system-v1/factions".format(self.EDSM_SERVER)
resp = EDSMServer.SESSION.get(endpoint, params=params)
if resp.status_code != requests.codes.ok:
EDRLOG.log(u"Failed to retrieve state for system {} from EDSM: {}.".format(system_name, resp.status_code), "ERROR")
return None
return json.loads(resp.content)
def deaths(self, system_name):
params = {"systemName": system_name}
endpoint = "{}/api-system-v1/deaths".format(self.EDSM_SERVER)
resp = EDSMServer.SESSION.get(endpoint, params=params)
if resp.status_code != requests.codes.ok:
EDRLOG.log(u"Failed to retrieve deaths info for {} from EDSM: {}.".format(system_name, resp.status_code), "ERROR")
return None
return json.loads(resp.content)
def traffic(self, system_name):
params = {"systemName": system_name}
endpoint = "{}/api-system-v1/traffic".format(self.EDSM_SERVER)
resp = EDSMServer.SESSION.get(endpoint, params=params)
if resp.status_code != requests.codes.ok:
EDRLOG.log(u"Failed to retrieve traffic info for {} from EDSM: {}.".format(system_name, resp.status_code), "ERROR")
return None
return json.loads(resp.content) | 40.037037 | 136 | 0.643848 |
acef51350657abff4b6e31b425d28645c54e7d00 | 8,805 | py | Python | venv/lib/python3.5/site-packages/engineio/asyncio_socket.py | LavanyaRamkumar/Networking-App_Dynamic-Quiz | 4de8329845712864d3cc8e8b81cfce5a1207224d | [
"MIT"
] | 1 | 2021-06-06T04:10:44.000Z | 2021-06-06T04:10:44.000Z | venv/lib/python3.5/site-packages/engineio/asyncio_socket.py | LavanyaRamkumar/Networking-App_Dynamic-Quiz | 4de8329845712864d3cc8e8b81cfce5a1207224d | [
"MIT"
] | 2 | 2021-02-08T20:23:00.000Z | 2021-04-30T20:40:25.000Z | backend/venv/lib/python3.5/site-packages/engineio/asyncio_socket.py | Siskat/Hira | cf0410b564d02c7647cbbb868102089fcd2884c3 | [
"MIT"
] | 1 | 2019-10-26T04:20:52.000Z | 2019-10-26T04:20:52.000Z | import asyncio
import six
import sys
import time
from . import exceptions
from . import packet
from . import payload
from . import socket
class AsyncSocket(socket.Socket):
def create_queue(self):
return asyncio.Queue()
async def poll(self):
"""Wait for packets to send to the client."""
try:
packets = [await asyncio.wait_for(self.queue.get(),
self.server.ping_timeout)]
self.queue.task_done()
except (asyncio.TimeoutError, asyncio.CancelledError):
raise exceptions.QueueEmpty()
if packets == [None]:
return []
try:
packets.append(self.queue.get_nowait())
self.queue.task_done()
except asyncio.QueueEmpty:
pass
return packets
async def receive(self, pkt):
"""Receive packet from the client."""
self.server.logger.info('%s: Received packet %s data %s',
self.sid, packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes)
else '<binary>')
if pkt.packet_type == packet.PING:
self.last_ping = time.time()
await self.send(packet.Packet(packet.PONG, pkt.data))
elif pkt.packet_type == packet.MESSAGE:
await self.server._trigger_event(
'message', self.sid, pkt.data,
run_async=self.server.async_handlers)
elif pkt.packet_type == packet.UPGRADE:
await self.send(packet.Packet(packet.NOOP))
elif pkt.packet_type == packet.CLOSE:
await self.close(wait=False, abort=True)
else:
raise exceptions.UnknownPacketError()
async def send(self, pkt):
"""Send a packet to the client."""
if self.closed:
raise exceptions.SocketIsClosedError()
if time.time() - self.last_ping > self.server.ping_timeout:
self.server.logger.info('%s: Client is gone, closing socket',
self.sid)
return await self.close(wait=False, abort=True)
self.server.logger.info('%s: Sending packet %s data %s',
self.sid, packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes)
else '<binary>')
await self.queue.put(pkt)
async def handle_get_request(self, environ):
"""Handle a long-polling GET request from the client."""
connections = [
s.strip()
for s in environ.get('HTTP_CONNECTION', '').lower().split(',')]
transport = environ.get('HTTP_UPGRADE', '').lower()
if 'upgrade' in connections and transport in self.upgrade_protocols:
self.server.logger.info('%s: Received request to upgrade to %s',
self.sid, transport)
return await getattr(self, '_upgrade_' + transport)(environ)
try:
packets = await self.poll()
except exceptions.QueueEmpty:
exc = sys.exc_info()
await self.close(wait=False)
six.reraise(*exc)
return packets
async def handle_post_request(self, environ):
"""Handle a long-polling POST request from the client."""
length = int(environ.get('CONTENT_LENGTH', '0'))
if length > self.server.max_http_buffer_size:
raise exceptions.ContentTooLongError()
else:
body = await environ['wsgi.input'].read(length)
p = payload.Payload(encoded_payload=body)
for pkt in p.packets:
await self.receive(pkt)
async def close(self, wait=True, abort=False):
"""Close the socket connection."""
if not self.closed and not self.closing:
self.closing = True
await self.server._trigger_event('disconnect', self.sid)
if not abort:
await self.send(packet.Packet(packet.CLOSE))
self.closed = True
if wait:
await self.queue.join()
async def _upgrade_websocket(self, environ):
"""Upgrade the connection from polling to websocket."""
if self.upgraded:
raise IOError('Socket has been upgraded already')
if self.server._async['websocket'] is None or \
self.server._async['websocket_class'] is None:
# the selected async mode does not support websocket
return self.server._bad_request()
websocket_class = getattr(self.server._async['websocket'],
self.server._async['websocket_class'])
ws = websocket_class(self._websocket_handler)
return await ws(environ)
async def _websocket_handler(self, ws):
"""Engine.IO handler for websocket transport."""
if self.connected:
# the socket was already connected, so this is an upgrade
await self.queue.join() # flush the queue first
pkt = await ws.wait()
if pkt != packet.Packet(packet.PING,
data=six.text_type('probe')).encode(
always_bytes=False):
self.server.logger.info(
'%s: Failed websocket upgrade, no PING packet', self.sid)
return
await ws.send(packet.Packet(
packet.PONG,
data=six.text_type('probe')).encode(always_bytes=False))
await self.send(packet.Packet(packet.NOOP))
pkt = await ws.wait()
decoded_pkt = packet.Packet(encoded_packet=pkt)
if decoded_pkt.packet_type != packet.UPGRADE:
self.upgraded = False
self.server.logger.info(
('%s: Failed websocket upgrade, expected UPGRADE packet, '
'received %s instead.'),
self.sid, pkt)
return
self.upgraded = True
else:
self.connected = True
self.upgraded = True
# start separate writer thread
async def writer():
while True:
packets = None
try:
packets = await self.poll()
except exceptions.QueueEmpty:
break
if not packets:
# empty packet list returned -> connection closed
break
try:
for pkt in packets:
await ws.send(pkt.encode(always_bytes=False))
except:
break
writer_task = asyncio.ensure_future(writer())
self.server.logger.info(
'%s: Upgrade to websocket successful', self.sid)
while True:
p = None
wait_task = asyncio.ensure_future(ws.wait())
try:
p = await asyncio.wait_for(wait_task, self.server.ping_timeout)
except asyncio.CancelledError: # pragma: no cover
# there is a bug (https://bugs.python.org/issue30508) in
# asyncio that causes a "Task exception never retrieved" error
# to appear when wait_task raises an exception before it gets
# cancelled. Calling wait_task.exception() prevents the error
# from being issued in Python 3.6, but causes other errors in
# other versions, so we run it with all errors suppressed and
# hope for the best.
try:
wait_task.exception()
except:
pass
break
except:
break
if p is None:
# connection closed by client
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
pkt = packet.Packet(encoded_packet=p)
try:
await self.receive(pkt)
except exceptions.UnknownPacketError:
pass
except exceptions.SocketIsClosedError:
self.server.logger.info('Receive error -- socket is closed')
break
except: # pragma: no cover
# if we get an unexpected exception we log the error and exit
# the connection properly
self.server.logger.exception('Unknown receive error')
await self.queue.put(None) # unlock the writer task so it can exit
await asyncio.wait_for(writer_task, timeout=None)
await self.close(wait=True, abort=True)
| 41.14486 | 79 | 0.546508 |
acef51d46c343a90a43773e795692cd3670df967 | 978 | py | Python | isi_sdk_8_0_1/test/test_ads_provider_domains_domain.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_0_1/test/test_ads_provider_domains_domain.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_0_1/test/test_ads_provider_domains_domain.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.ads_provider_domains_domain import AdsProviderDomainsDomain # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestAdsProviderDomainsDomain(unittest.TestCase):
"""AdsProviderDomainsDomain unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAdsProviderDomainsDomain(self):
"""Test AdsProviderDomainsDomain"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.ads_provider_domains_domain.AdsProviderDomainsDomain() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.853659 | 107 | 0.728016 |
acef51d77fb73b8a086daa030c4d26dfdcc487bd | 42,019 | py | Python | cca4.py | CausalCog/Solution_Binding_Problem | 0b177f4ef90f92b07b8805b1c2c65d6cfb22d256 | [
"Apache-2.0"
] | null | null | null | cca4.py | CausalCog/Solution_Binding_Problem | 0b177f4ef90f92b07b8805b1c2c65d6cfb22d256 | [
"Apache-2.0"
] | null | null | null | cca4.py | CausalCog/Solution_Binding_Problem | 0b177f4ef90f92b07b8805b1c2c65d6cfb22d256 | [
"Apache-2.0"
] | 1 | 2021-11-11T16:09:09.000Z | 2021-11-11T16:09:09.000Z | #!/usr/bin/env python
# pylint: disable=line-too-long
'''in_use_do_not_archive
cca4.py
Causal Cognitive Architecture 4 (CCA4)
Sept 2021 rewrite for CSR Manuscript
-- Demonstrate architecture
-- Link to equations in the CSR Manuscript
-- Allow users to run on normal Win or Linux system without GPU
-- Purpose is to show reader what steps the Causal Cognitive Architecture
is taking, how it is accomplishing them, etc
-- Swap in full code later for CCA3 --> CCA4 transition
Notes:
-please see old code notes for the voluminous changes from mbca versions to cca version to version
-please the following papers for theory behind the cca3 -- it has been removed from the codebase here
so that actual code does not get overwhelmed with the documentation:
Schneider, H.: The Meaningful-Based Cognitive Architecture Model of Schizophrenia.
Cognitive Systems Research 59:73-90 (2020).
Schneider, H.: Causal cognitive architecture 1: Integration of connectionist elements into a navigation-based framework.
Cognitive Systems Research 66:67-81 (2021).
Schneider, H.: Causal Cognitive Architecture 2: A Solution to the Binding Problem, pending
Schneider, H.: Causal Cognitive Architecture 3: A Solution to the Binding Problem, pending
Notes:
-regarding even older deprecation transition notes:
"nano"/"micro"/"milli"/"full" MBCA coarse/fine grain simulations deprecated code left in some areas still
-November 2019 G12/H12 versions MBLS/MBCA being transitioned to Causal Cognitive Architecture 1
#
overview of cca4.py:
if __name__ == '__main__': main_eval():
-instantiations of data and method structures g, d, h
-loop:
choose species simulation (lamprey to augmented human)
choose first envrt which sets up instinctive primitives
main_mech.cycles()
print_event_log_memory()
clear memory -- re-instantiation of d, h (g persists between scenes)
if not run_again(): break loop and end
-->else loops again for new envrt --^
#
requirements.txt:
#environment:
python 3.9 including standard library
-at this time, not all dependencies will run in other versions, e.g., python 3.10
-please use or create venv with these exact versions
-tested in windows terminal but code should optionally bypass windows-specifc os calls if run on other platforms
-please post platform issues as not tested yet on other platforms
#python original source code:
cca4.py #hyperparameters main_eval() for top level simulation runs
main_mech.py #cycles() is effective main() of evaluation cycle
ddata.py #class MapData --> 'd'
gdata.py #class MultipleSessionsData --> 'g'
hdata.py #class NavMod --> 'h'
constants.py #constants only
#pypi packages:
pypi.org: fuzzywuzzy #use for cca4.py to avoid need for gpu's, nn
pypi.org: numpy #ver 1.19.3 to ensure compatilibity with python 3.9
pypi.org: colorama, pyfiglet, termcolor #for ascii art printing
pypi.org: "pip install types-termcolor" or "mypy --install-types" #install stub packages
#optional -- code will still run without these modules or libraries
optional: *.jpg #images to display; at present in working directory; to deprecate
optional: cca3_images folder #images to display, download from specified github
optional: pypi.org: icecream #for degugging convenience
optional: pypi.org: python-Levenshtein-01.12.2 #to speed up fuzzywuzzy
optional: visual c++ #required by python-Levenshtein-01.12.2
optional: pytorch 1.9
optional: cuda 11.4
'''
##START PRAGMAS
#
# pylint: disable=line-too-long
# prefer to take advantage of longer line length of modern monitors, even with multiple windows
# pylint: disable=invalid-name
# prefer not to use snake_case style for very frequent data structure or small temp variables
# pylint: disable=bare-except
# prefer in some code areas to catch any exception rising
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# prefer to use comfortable number of branches and statements, especially in user menu communication
# pylint: disable=too-many-arguments
# prefer use pass full set of objects g, d, h, m, c, a to/from some methods
# other style notes:
# ------------------
# -before each method it is ok to have comments giving a roadmap of where this method is being called from;
# these were added to hdata methods after the methods were created to aid in the readability of the code
# in further development work, and found to be helpful as such (can consider putting within the docstring
# in the future, ie, __doc__ will include, but for now, seem to work well in reading the code)
#
##END PRAGMAS
## START IMPORTS START IMPORTS
#
##standard imports -- being used by this module
try:
import logging
import pdb
import sys
import platform
import os.path
import random
# import time
# import copy
# from PIL import Image # type: ignore
except ImportError:
print("\nprogram will end -- start module of causal cog arch unable to import standard lib module")
print("please ensure correct version of python can be accessed")
sys.exit()
##PyPI imports -- being used by this module
try:
#import numpy as np # type: ignore
# justification: AwesomePython 9.7, L1 code quality
from icecream import ic # type: ignore
ic('remember to disable icecream (here and other modules) for production code')
# justification: Awesome rating 7.9
# style note: for quick debugging, otherwise logging or create 'verbose' runtime
import colorama # type: ignore
# justification: AwesomePython 6.7
import pyfiglet # type: ignore
# justification: AwesomePython 4.4
import termcolor
from termcolor import colored
# justification: AwesomePython not rated; pypi stable status, 1.1K dependent packages
except ImportError:
print("\nprogram will end -- start module of the causal cog arch unable to import a PyPI module")
print("please check requirements.txt and install all required dependencies")
sys.exit()
##non-PyPI third-party imports -- being used by this module
try:
pass
# justification/ Awesome/LibHunt ratings for non-pypi imports: n/a
except ImportError:
print("program will end -- start module of the causal cog arch unable to import a third-party module")
print("please check requirements.txt and install all required dependencies")
sys.exit()
##CCA3 module imports -- being used by this module
try:
from constants import LIFESPAN, BINDING, SAVE_RECALL_TO_FROM_STORAGE
import gdata
import ddata
import hdata
import main_mech
# import eval_micro #June 2021 deprecated
# import eval_milli #June 2021 deprecated
# import palimpsest #nb without GPU will use excessive resources
except ImportError:
print("program will end -- start module unable to import a causal cognitive architecture module")
print("please check requirements.txt and install all required dependencies")
sys.exit()
#
#
##END IMPORTS END IMPORTS
##START METHODS START METHODS
#
def welcome(g) -> bool:
'''in_use_do_not_archive
CCA3 ver
print welcome message
'''
if g.fastrun:
return False
print('''
CCA3 -- Causal Cognitive Architecture 3 -- Simulation
CCA3 Demonstration Version with References to Equations of
manuscript: 'A Solution to the Binding Problem: Causal Cognitive Architecture 3 (CCA3)'
Pattern recognition via FuzzyWuzzy instead of ANN, thus no GPU required
Schneider, H.: The Meaningful-Based Cognitive Architecture Model of Schizophrenia.
Cognitive Systems Research 59:73-90 (2020)
Schneider, H.: Causal Cognitive Architecture 1 (CCA1): Integration of Connectionist Elements into a
Navigation-Based Framework. Cognitive Systems Research 66:67-81 (2021)
Schneider, H.: Causal Cognitive Architecture 2 (CCA2): A Solution to the Binding Problem, BICA*AI 2021, in press
Schneider, H.: A Solution to the Binding Problem: Causal Cognitive Architecture 3 (CCA3), Cognitive Systems Research, in press
''')
g.fast_input("\nPress ENTER to continue...\n")
g.large_letts_display("OVERVIEW")
print('''
OVERVIEW OF THIS SIMULATION PROGRAM
-----------------------------------
1. In this simulation first you will be asked to specify some of the hyperparameters in terms of loosely analogous
animal equivalents. For example, you can specify a "reptile hippocampal/pallium analogue."
[Note: Augmented human brain features may or may not be available (depending on version) but are simply for
development purposes, with no claims of superintelligence or AGI being made.]
2. The specified brain is then automatically embedded into a robot body. The robot + the CCA3 architecture are
called "CCA3 robot" or just "CCA3" -- thus, when you see "robot" or "CCA3" think of a robot body being
controlled by a CCA3 architecture.
[CCA3 really refers to the architecture controlling the robot body, but for convenience
we simply call the whole thing the "CCA3" or the "robot" or the "CCA3 robot."]
[At this time, you do not have any options with regard to the virtual embodiment specifications. Assume a
generic-like humanoid body with the ability for sensation, locomotion and ability to manipulate objects.]
[A low-level pyboard version exists in the palimpsest code for interface to a real world embodiment, but
currently the CCA3 code and libraries need mods for functional compatibility with MicroPython.]
''')
g.fast_input("\nPress ENTER to continue...\n")
g.large_letts_display("OVERVIEW 2")
print('''
3. Then you will be asked to specify the first scene (i.e., really the first environment)
your newly manufactured robot sees and senses. (Note there can be many sensory scenes one after the other,
taking place in an environment. For example, the 'PATIENT' environment starts off with a first scene in
a hospital room with the robot seeing a patient with a walker. A number of sensory scenes occur after that
first one as the patient asks the robot for a glass of water.)
4. After a simulation in an environment is over, i.e., your robot succeeded or failed or time ran
out, your CCA3 robot can move onto the next environment. Usually its brain and memory will remain intact with
the previous memories. (Continual learning occurs in the core memory systems of the robot -- moving onto a new
scene and learning new memories will not affect the old ones, as often occurs in traditional neural networks.)
If for some reason the robot was physically damaged (e.g., simulation where robot was a search and rescue robot)
it will automatically be repaired when moving onto the next scene.
Although memory is usually kept intact, you do have the option of having the robot's brain erased of previous learning
experiences (sometimes useful if you want to try out a scene again without any prior memories). As well, you also
can choose a different simulation animal analogue (e.g., lamprey to human).
5. Afer an environment, you can decide at this point if you want to move to another environment (i.e., another simulation
in that environment), repeat the same environment, or end the program.
''')
g.fast_input("\nPress ENTER to continue...\n")
# show images related to architecture
# temporary code and positioning for now; consider captions and driving code from store of images and text
g.large_letts_display("DIAGRAMS")
ret_value = g.show_architecture_related("cca3_architecture.jpg", "CCA3 architecture")
ret_value = g.show_architecture_related("binding_spatial_features.jpg", "spatial binding in CCA3")
ret_value = g.show_architecture_related("binding_temporal.jpg", "temporal binding in CCA3")
return ret_value
def choose_simulation(g: gdata.MultipleSessionsData, h: hdata.NavMod, m: hdata.MapFeatures):
'''in_use_do_not_archive
CCA3 ver
Before evaluation cycles of a simulation version start, user can choose which simulation to run.
We have tried to wrap the hyperparameters in loosely analogous biological equivalents, e.g.,
specifying you want the features of the fish brain versus a human brain.
('Hyperparameters' in the sense they cannot be inferred but specify an architecture we want to evaluate, ie,
from a Bayesian pov really a given set of priors we are specifying, but more, also the range of algorithms
we are specifying to manipulating the priors. Future models will consider automatic setting of hyperparameters
but they should be considered static in the current simulation.)
Note: Augmented human brain features may be available but are simply for development purposes, with
noclaims of superintelligence, AGI, and so on being made.
After a scene (i.e., the simulation in the environment) is over, i.e., the CCA3 robot succeeds or perhaps,
unfortunately, it got damaged for example and failed, the CCA3 robot's body is refurbished as a new robot. However,
there is the option of keeping its brain intact with the previous memories or refurbishing its brain to a new robot.
m = hdata.MapFeatures() #m re-initialized between scenes optionally via choose_simulation
input parameters:
d, g data and method structures instantiations
returns:
h, m since h,m will be modified by this method
'''
# display introductory material if first scene
if g.mission_counter > 1:
g.large_letts_display("start envr't\nrun # " + str(g.mission_counter), g.mission_counter)
print(f"new environment {g.mission_counter} is now starting....\n")
else:
# print out computing environment and program title/image
os.system("cls")
g.large_letts_display("Computing Environment\n")
computing_evnrt(h)
input("Press ENTER to continue....")
os.system("cls")
try:
color_letts = ["white", "red", "green", "cyan", "blue", "white", "white", "magenta"][random.randint(0, 7)]
colorama.init(strip=not sys.stdout.isatty()) # do not use colors if stdout
termcolor.cprint(pyfiglet.figlet_format("\n CCA3"), color_letts, attrs=["bold"])
except:
print("CCA3")
print("nb color image did not display\n")
# print out welcome message
welcome(g)
runs_cycles_message(g)
g.fast_input("\nPress ENTER to start the simulation....")
g.large_letts_display("run # " + str(g.mission_counter), g.mission_counter)
print(colored('Equations in the CCA3 Binding paper are for one "evaluation cycle"', 'cyan'))
print(colored('i.e, processing cylcle, or just "cycle"', 'cyan'))
print(colored('"Runs" refer to a new environment of input sensory scene. Equations are the same regardless of scene.', 'cyan'))
g.fast_input("\nPress ENTER to continue....\n")
g.large_letts_display("enter hyper-\nparameters:")
g.large_letts_display("brain type")
print(colored('Equations in the CCA3 Binding paper assume "Human-like brain"', 'cyan'))
# print out simulation (ie, hyperparameter) choices
print('''
CHOOSE BRAIN SPECIFICATIONS\n
Please choose type of "hippocampus"/"brain" which, of course, only loosely
approximates the biological equivalent (you are effectively setting hyperparameters here):
0. SAME AS LAST ENVIRONMENT, DO NOT ERASE/REFURBISH THE MEMORY
1. Lamprey-like brain analogue
2. Fish-like brain
3. Reptile-like brain
4. Mammalian-like brain - note: meaningfulness, precausal
5. Human-like brain - note: meaningfulness plus full causal features
6. Augmented Human level 1 - simultaneous application of multiple primitives
7. Augmented Human level 2 - enhanced generative abilities
''')
if g.mission_counter > 1:
print(f"Previous environment values: hippocampus was {h.current_hippocampus}, and meaningfulness was {h.meaningfulness}.")
# input choice
if g.fastrun:
b_b = 0
else:
try:
b_b = int(input("Please make a selection:"))
except:
print("\n**ENTER or nonstandard input**, therefore will default to the previous environment selection.")
b_b = 0
if b_b not in range(0, 8):
print("Default causal human hippocampus selected.")
b_b = 5
if b_b == 0:
# h.current_hippocampus = no change (or if fist environment 'HUMAN')
if g.mission_counter <= 1:
print("No previous scenes to retrieve robot from. (No copies kept in local or network storage.)")
print("Thus, this is actually a brand new robot, rather than a refurbished robot.")
print("\nWill default at this time to a brain with associative, precausal and some genuine")
print("robust causal features. Given a mammalian brain, meaningfulness is present.\n")
h.current_hippocampus = "HUMAN"
h.meaningfulness = True
else:
print("**CCA3 robot body is refurbished but its brain including memory is left unchanged**")
print("current_hippocampus remains as: ", h.current_hippocampus, " and meaningfulness remains as: ", h.meaningfulness)
return h, m # other portions of the code actually modify h so it is returned
# for other choices, CCA3 brain is refurbished, thus hdata will be re-instantiated
# ddata is re-instantiated within main_eval loop, while gdata persists between scenes
h = hdata.NavMod()
m = hdata.MapFeatures()
#c = hdata.CognitiveMapFeatures()
#a = hdata.AugmentedMapFeatures()
if b_b == 1:
# h.current_hippocampus = 'LAMPREY'
print("\nWill default at this time to a quasi-skewed walk.")
print(
"Current status is clean functional simulation to allow future versions of the software"
)
print("to have more authentic and sophisticated components.\n")
h.current_hippocampus = "LAMPREY"
h.meaningfulness = False
# will default to quasi-skewed walk
if b_b == 2:
# h.current_hippocampus = 'FISH' --> 'LAMPREY'
print("\nWill revert at this time to lamprey pallium analogue.")
print(
"Future versions of the software will have more fish functional components."
)
print("Note that fish brain does not allow meaningfulness.\n")
h.current_hippocampus = "LAMPREY"
h.meaningfulness = False
if b_b == 3:
# h.current_hippocampus = 'REPTILE'
print(
"\nWill default at this time to simple pallium analogue with some precausal features"
)
print("Note that reptilian brain does not allow meaningfulness.\n")
h.current_hippocampus = "REPTILE"
h.meaningfulness = False
if b_b == 4:
# h.current_hippocampus = 'MAMMAL' --> 'REPTILE'
print(
"\nWill revert at this time to reptile pallium analogue. Important evolutionary and"
)
print(
"conceptual advances in the mammalian brain to be put in coming versions of the software."
)
print("However, given mammalian brain, meaningfulness is present.\n")
h.current_hippocampus = "REPTILE"
h.meaningfulness = True
if b_b == 5:
# h.current_hippocampus = 'HUMAN'
print(
"\nWill default at this time to a brain with associative, precausal and some genuine"
)
print(
"robust causal features. Given a mammalian brain, meaningfulness is present.\n"
)
h.current_hippocampus = "HUMAN"
h.meaningfulness = True
if b_b == 6:
# h.current_hippocampus = 'SUPERINTELLIGENCE' --> 'HUMAN'
print(
"\nWill default at this time to a simplified human brain with some associative,"
)
print(
"precausal and some genuine causal features. However, enhanced pattern recognition"
)
print(
"abilities as well as enhanced algorithms for logical operations on the navigation maps."
)
print(
"Of importance, there are multiple full navigation modules in this simulation communicating with"
)
print(
"each other, and allowing simultaneous application of multiple primitives, i.e., not just recognition"
)
print(
"and testing of inputs against multiple navigation maps, but full simultaneous processing of"
)
print(
"effectively multiple hypotheses of processing an input. This is for development purposes, and"
)
print(
"no claim of superintelligence is made. Given supra-mammalian brain, meaningfulness is present."
)
print(
"*Superintelligence features not implemented at present. Reverting to human hippocampus.*\n"
)
h.current_hippocampus = "HUMAN"
h.meaningfulness = True
if b_b == 7:
# h.current_hippocampus = 'SUPERINTELLIGENCE2' --> 'HUMAN'
print(
"\nContains the features of human augmented brain level 1. However, massively enhanced generative"
)
print(
"abilities, i.e., statistically is closer to understanding the full joint probability distribution of for"
)
print(
"example the classic p(x,y) and come up with the best solution to complex problems, rather than more"
)
print(
"discriminitive solutions. In the practical sense, this level of brain augmentation"
)
print(
"can invent at machine speed, and find solutions that otherwise would not seem immediately obvious."
)
print(
"However, no claim of superintelligence is made. Given supra-mammalian brain, meaningfulness is present."
)
print(
"*Superintelligence features not implemented at present. Reverting to human hippocampus.*\n"
)
h.current_hippocampus = "HUMAN"
h.meaningfulness = True
if BINDING:
print("In the related CCA3 binding article, no equations for other species' brain analogues.")
print("Thus, currently the choice of other species brain unavailable -- human-like brain model will be used.")
print("h.current_hippocampus = HUMAN, h.meaningfulness = True")
h.current_hippocampus = "HUMAN"
h.meaningfulness = True
g.fast_input("\nPress ENTER to continue...\n")
# returns h,m since h,m modified by this method
return h, m
def runs_cycles_message(g):
'''in_use_do_not_archive
prints out what is meant by 'runs', 'cycles', 'scenes'
'''
g.large_letts_display("runs & cycles")
print('\nBelow, each simulation run (whether in a PATIENT hospital room environment, in a')
print('SUDOKO environment, and so on) is displayed as "run #1", "run #2", and so on.')
print('\nWithin a simulation "run" there are "evaluation cycles" counted starting from cycle 0,')
print('cycle 1, and so on. When a new simulation run starts again, the evaluation "cycles" (and the')
print('input sensory "scenes") start counting from zero again, i.e., "cycle 0", "scene 0".')
print('\nWithin a simulation "run" there are also "scenes" counted starting from scene 0, scene 1,')
print('and so on. The scenes represent input data from the external world that the CCA3 is')
print('sensing. They represent "sensory scenes" (i.e., visual, auditory, olfactory, radar, etc')
print('sensory information) rather than just a visual scene. If the CCA3 is built and running a')
print('real robot then these scenes are real hardware input signals. However, below in these simulations')
print('the sensory scenes generally are simulated. Please note that the scene numbers do not have to')
print('correspond with the evaluation cycle numbers, since several evaluation cycles may be used')
print('to process a sensory scene.\n')
print('For example:')
print('RUN#1 eg, SUDOKO environment')
print(' evaluation cycle or CYCLE#0 processsing sensory scene SCENE #0 <--scene related to the SUDOKO environment')
print(' CYCLE#1 processing SCENE#0 <--scene related to the SUDOKO environment')
print(' CYCLE#2 processing SCENE#1 <--scene related to the SUDOKO environment')
print(' ....')
print(' ....')
print('RUN#2 eg, HOSPITAL environment')
print(' CYCLE#0 processsing sensory SCENE #0 <--scene related to the HOSPITAL environment')
print(' CYCLE#1 processing SCENE#0 <--scene related to the HOSPITAL environment')
print(' ....')
print(' ....\n\n')
return True
def choose_starting_scene(d: ddata.MapData, g: gdata.MultipleSessionsData, h: hdata.NavMod)-> ddata.MapData:
'''in_use_do_not_archive
CCA3 ver
Below the user is asked to specify the first scene the newly manufactured robot sees and senses.
This first scene will retrieve navigation maps and instincitve primitives related to the scene. For the
remainder of the scene (i.e., until success or fail to reach the goal) causal cognitive embodiment, ie,
the 'robot' will be in an environment related to this first scene.
In future versions of the simulation there will be, of course, the ability to switch environments, as happens
in the real world all the time. However, at present, each scene is in one environment.
input parameters:
d, g data and method structures instantiations
returns:
#returns d since d is modified by this method
'''
# print out the first scene choices
g.large_letts_display("start scene")
print(
'''
CHOOSE ENVIRONMENT FIRST SCENE IS TO START IN\n
The first scene the newly manufactured/refurbished robot sees and senses will retrieve navigation
maps and instincitve primitives related to the scene's environment.
For the remainder of the environment (i.e., until success or fail to reach the goal) the causal cognitive
embodiment, ie, the 'robot' will be in an environment where the scenes are in this environment.
For example, in the PATIENT environment simulation, the first scene is the robot seeing a patient using
a walker in a hospital room. The next scene might be the patient asking for a glass of water. However, all
the scenes are in the hospital room with the patient. When the scenes related to this patient are complete,
i.e., the simulation in the hospital room (environment PATIENT) is complete, then you are asked again to
choose another first scene/environment to run the CCA3 robot in. Perhaps you choose an environment where the
CCA3 plays a game of Sudoku, or perhaps you want to go back to the hospital room and try the previous simulation
over again.
In future versions of the simulation there will be, of course, the ability for the CCA3 to switch
environments on its own, as happens in the real world all the time. However, at present,
each set of scenes is in one environment.
Please specify the first scene (environment) the newly manufactured/refurbished robot sees and senses:
0. Default choice of patient on a walker (ENTER key will also choose)
1. Looking a Sudoku game sheet
2. In the middle of an unknown city
3. Looking at machine filled with gears3
4. Looking at trees in a forest
5. Future use
'''
)
print(colored('Equations assume various sensory stimuli being sensed by the CCA3', 'cyan'))
print(colored('However, since there is not a robot sensing the real world, but', 'cyan'))
print(colored('a simulation, we must also simulate the sensory stimuli. This is what is', 'cyan'))
print(colored('being selected here, i.e, simulation of the external world', 'cyan'))
# input choice selection
if g.fastrun:
b_b = 1 #if run with g.fastrun then this is default first_scene
else:
try:
b_b = int(input("Please make a selection:"))
except:
print(
"\n**ENTER or nonstandard input**, therefore default choice selected."
)
b_b = 0
if b_b not in range(0, 6):
print("**Selection is a nonstandard choice. Thus default choice selected.")
b_b = 0
# input choice sets h.first_scene
if b_b == 0:
# h.first_scene = default choice 'PATIENT'
print("Default first_scene has been selected:")
print("\nCCA3 recognizes a patient on a walker in front of itself.")
print("This will trigger retrieval of the navigation maps associated with the patient,")
print("as well as a goal setting to assist such a patient.")
h.first_scene = "PATIENT"
if b_b == 1:
# h.first_scene = 'SUDOKU'
print("\nCCA3 recognizes a Sudoku game sheet in front of it.")
print("This will trigger retrieval of the navigation maps associated with sudoku,")
print("as well as a goal setting to assist playing such a game.")
h.first_scene = "SUDOKU"
if b_b == 2:
# h.first_scene = 'LOST' --> 'PATIENT'
print("\nCCA3 cannot recognize the environment.")
print(
"Not available in this version. Thus switch first scene to recognizing a patient on a walker in front of it."
)
h.first_scene = "PATIENT"
if b_b == 3:
# h.first_scene = 'GEARS' --> 'PATIENT'
print(
"\nCCA3 recognizes the machine in front of it as a broken machine with gears."
)
print(
"Not available in this version. Thus switch first scene to recognizing a patient on a walker in front of it."
)
h.first_scene = "PATIENT"
if b_b == 4:
##h.first_scene = 'FOREST' --> 'PATIENT
print("\nCCA3 recognizes a forest in front of itself.")
print(
"This will trigger retrieval of the navigation maps associated with the forest,"
)
print("as well as a goal setting to rescue a lost hiker in the forest.")
print("Not available currently -- to be implemented shortly.")
print(
"Thus switch first scene to recognizing a patient on a walker in front of it."
)
h.first_scene = "PATIENT"
if b_b == 5:
# h.first_scene = 'NOT_SPECIFIED' --> 'PATIENT'
print("\nNot specified. Future use..")
print(
"Not available in this version. Thus switch first scene to recognizing a patient on a walker in front of it."
)
h.first_scene = "PATIENT"
if (BINDING and b_b != 0):
print("\nFor the moment, the CCA3 controlling a robot which acts as a patient-aide")
print("is being developed. Thus, default first_scene has been selected:")
print("\nCCA3 recognizes a patient on a walker in front of itself.")
h.first_scene = "PATIENT"
d.current_goal = g.goal_default
g.fast_input("\nPress ENTER to continue...\n")
# returns d since d is modified by this method
return d
def print_event_log_memory(g: gdata.MultipleSessionsData) -> bool:
'''in_use_do_not_archive
CCA3 ver
print out raw event_log memory for now
add more functionality in future versions via
other methods inside the appropriate module
'''
if g.fastrun:
return True
if input("Print out raw event_log memory?") in ("Y", "y", "Yes", "yes"):
g.printout_event_log_memory()
return True
return False
def recall_from_storage(g, d, h, m, c, a):
'''in_use_do_not_archive
CCA4 ver
recalls values of g, d, h, m, c, a from long term storage media
'''
print("recalls values of g, d, h, m, c, a from long term storage media")
print("long-term storage media: ")
print("long term storage not available at present\n")
return g, d, h, m, c, a
def save_to_storage(g, d, h, m, c, a):
'''in_use_do_not_archive
CCA4 ver
saves values of g, d, h, m, c, a to long term storage media
'''
print("saves values of g, d, h, m, c, a to long term storage media")
print("long-term storage media: ")
print("long term storage not available at present\n")
return g, d, h, m, c, a
def run_again() -> bool:
'''in_use_do_not_archive
CCA3 ver
check what action to take at end of a scene, ie, run again?
'''
if input("\nRun again?") in ("N", "n", "NO", "No", "nO", "N0", "no", "0", "stop", "break"):
return False
return True
def start_run_messages(d, g, h):
'''in_use_do_not_archive
messages to user and any other preliminary operations
before a simulation run
'''
print("\n----------\nvalues for software development usage:\nh.meaningfulness, h.current_hippocampus, h.first_scene, d.current_goal: ")
print(h.meaningfulness, h.current_hippocampus, h.first_scene, d.current_goal, "\n----------\n")
print("\nSTART EVALUATION CYCLES")
print("(nb. Each 'evaluation cycle' is one loop through the CCA3 architecture.")
print("Sometimes a new scene will occur after an 'evaluation cycle', sometimes after a few cycles.")
print("Recall that the 'cycle' is a cycle of processing through the architecture of the sensory scene")
print("being presented to the CCA3 architecture. A number of processing cycles may occur for a")
print("particular sensory scene. 'cycle' is internal processing, 'scene' is the external sensory")
print("stimuli being presented (or simulated) to the CCA3.)\n")
print(colored('The equations in the CCA3 Binding paper cover only one "cycle"', 'cyan'))
print(colored('In the next "cycle" the equations largely repeat, although not re-initialized\n', 'cyan'))
g.fast_input(f"Press ENTER to start the CCA3 evaluation cycles for this environment {h.first_scene} (simulation run # {g.mission_counter} since program started) ....")
return True
def exit_program(g) -> None:
'''in_use_do_not_archive
CCA3 ver
orderly shutdown of program
"nano" version no intermediate PyTorch structures to save -- deprecated
'''
print("\nOrderly shutdown of program via exit_program()")
print(
"Please ignore any messages now generated by main/pyboard/etc detection code...."
)
g.large_letts_display("program exit")
sys.exit()
def computing_evnrt(h) -> bool:
'''in_use_do_not_archive
CCA4 ver
displays information about the computing environment
'''
print(colored("** PLEASE MAKE SURE YOUR TERMINAL DISPLAY IS FULL SIZE WITH APPROPRIATE FONT, SIZE 20 **", 'red'))
print("(Windows terminal - right click on the menu bar, left click on 'Properties', click 'Font', 'Size' == 20, 'Font' == Consolas)")
print("(Consolas font is 9px wide, 20 px high; click 'Colors', 'Screen Text' == dark green, 'Screen Background' == black)")
print("(Mac, Linux platforms - please similarly adjust your terminal properties, as needed)")
print("\n\nInformation about computing environment:")
print("CCA3 - CCA4 Transition Sept 2021 Version")
print("(Note: Should bypass any Windows-dependent calls if run on another platform.)")
try:
print("CCA4 Project: Python installed: ", os.path.dirname(sys.executable))
print("Platform Info (via StdLib): \n ", "Python version: ", sys.version, "\n os.name:",
os.name, platform.system(), platform.release(), "sys.platform:", sys.platform, "\n ",
"(Windows note: sys.platform may give 'win32' result even if win64 for backwards compatibility reasons)\n",
" platform.processor:", platform.processor(), "\n ",
"sys.maxsize (9223372036854775807 for 64 bit Python): ", sys.maxsize)
print(" total navigation maps (i.e., cortical mini-column analogues) available via constants.py: ",
h.total_maps)
if BINDING:
print('For this CCA3 demonstration version no GPUs or cloud software required. No GPU checking.\n\n')
else:
try:
# GPU appropriate library required
#print("GPU Pytorch CUDA availability: ", torch.cuda.is_available())
print("Pytorch, CUDA, GPU checking not installed at present")
except:
print("Unable to check correctly if GPU_ENABLED")
print("\n\n")
return True
except:
print("Unable to obtain full computing envrt information\n")
return False
def embedded_main_pyboard(g) -> None:
'''in_use_do_not_archive
CCA3 ver
check palimpsest for embedded_main_pyboard() code
intended to allow interface between the causal cognitive architecure and a robot embodiment
'''
print("'embedded_main_pyboard()' is currently part of deprecated code")
input("Program will now be ended.... click any key to continue....")
exit_program(g)
#
##END METHODS END METHODS
##START INTRO-MAIN START INTRO-MAIN
#
def main_eval() -> None:
'''in_use_do_not_archive
overview:
if __name__ == '__main__': main_eval():
-instantiations of data and method structures g, d, h
- loop:
choose species simulation (lamprey to augmented human)
choose envr't which sets up instinctive primitives
main_mech.cycles()
-sensory scenes feeding into the cca3 architecture
-evaluation cycles occur to process each sensory scene
-when no more scenes to feed in or other end of simulation run,
then exit from evaluation cycles
print_event_log_memory()
clear memory -- re-instantiation of d, h (g persists between scenes)
if not run_again(): break loop and end
-->else loops again for new scene envr't^
'''
# set up
g = gdata.MultipleSessionsData() #persists between runs
d = ddata.MapData() #re-initialized every run
h = hdata.NavMod() #optional re-initialized each run if no choose '0 Same as Last Brain'
m = hdata.MapFeatures() #optional re-initialized each run if no choose '0 Same as Last Brain'
c = hdata.CognitiveMapFeatures() #optional re-initialized each run if no choose '0 Same as Last Brain'
a = hdata.AugmentedMapFeatures() #optional re-initialized each run if no choose '0 Same as Last Brain'
if SAVE_RECALL_TO_FROM_STORAGE:
g, d, h, m, c, a = recall_from_storage(g, d, h, m, c, a)
#input('\ndebug:view startup messages prior to cls... press ENTER to continue....')
g.one_moment_please_display(1)
g.choose_if_g_fastrun_on_off() #set verbosity for devp't
# siml'n run for a given envr't, then repeat for a new envr't or exit
for g.mission_counter in range(1, LIFESPAN): #10,000
# set up data and hyperparameters for the scene
print(colored("\n\n\nCCA3 Binding paper software walk-through note:", 'blue'))
print(colored("main_eval() loop: obtain hyperparameters\n\n", 'blue'))
g.fast_input("Press ENTER to continue...\n")
h, m = choose_simulation(g, h, m)
d = choose_starting_scene(d, g, h)
start_run_messages(d, g, h)
# start simulation run of evaluation cycles for the envr't
print(colored("\n\n\nCCA3 Binding paper software walk-through note:", 'blue'))
print(colored("main_eval() loop: call main_mech.cycles()\n\n", 'blue'))
g.fast_input("Press ENTER to continue...\n")
d, g, h, m = main_mech.cycles(d, g, h, m)
# return from a simulation run
print(colored("\n\n\nCCA3 Binding paper software walk-through note:", 'blue'))
print(colored("main_eval() loop: returned from simulation run\n\n", 'blue'))
g.fast_input("Press ENTER to continue...\n")
print_event_log_memory(g)
if not run_again():
break
d = ddata.MapData() # re-initialize for next simulation run
# if not exited, then select new (or same) envr't and repeats now again ----^
# end program
if SAVE_RECALL_TO_FROM_STORAGE:
g, d, h, m, c, a = save_to_storage(g, d, h, m, c, a)
exit_program(g)
#
##END INTRO-MAIN END INTRO-MAIN
if __name__ == "__main__":
main_eval()
else:
print("\n\n\n\nModule ", __name__, " is not named as __main__, thus pyboard version of main being called\n")
logging.warning('wrong main branch given unavailability of pyboard hardware')
pyboard_instantiation_g = gdata.MultipleSessionsData()
embedded_main_pyboard(pyboard_instantiation_g)
pdb.set_trace()
#
##START PALIMPSEST START PALIMPSEST
# 3408 lines of deprecated code transferred to
# module palimpsest.py (old lines 2615 - 6023 ver 23)
# Feb 2021 -- should not need any of this code at this point
# Feb 2021 -- several thousand lines of other code also cleared out, see prev versions if needed
##END PALIMPSEST END PALIMPSEST
| 48.576879 | 172 | 0.670197 |
acef52f83f70a44902c4956ecc93e8867b2e8a23 | 7,827 | py | Python | watertap/examples/edb/simple_acid.py | srikanthallu/proteuslib | c0d62e6af61afc493bb81b9aab9bbefc3be0bcfd | [
"BSD-3-Clause-LBNL"
] | 3 | 2021-06-03T08:02:59.000Z | 2021-07-17T07:45:56.000Z | watertap/examples/edb/simple_acid.py | srikanthallu/proteuslib | c0d62e6af61afc493bb81b9aab9bbefc3be0bcfd | [
"BSD-3-Clause-LBNL"
] | 128 | 2021-05-19T22:29:59.000Z | 2021-10-04T20:44:58.000Z | watertap/examples/edb/simple_acid.py | srikanthallu/proteuslib | c0d62e6af61afc493bb81b9aab9bbefc3be0bcfd | [
"BSD-3-Clause-LBNL"
] | 13 | 2021-05-19T22:23:19.000Z | 2021-07-07T16:36:09.000Z | ###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
This file demonstrates how to use EDB to create a simple acid problem.
(1) Before we can start, you must install MongoDB (which is installed separately)
[See more information on the ReadTheDocs under 'Getting Started --> Installing WaterTAP']
(2) After installing MongoDB, you will need to 'load' the database using the
command line function 'edb load -b'. This will load the default database
that WaterTAP is bundled with.
[NOTE: If you need to 'reload' the database, simply use the command 'edb drop -d electrolytedb'
in the command line. The database on MongoDB is named "electrolytedb"]
[NOTE 2: You can invoke the command line utility with the "help" keyword to
get more information on funtionality. Command: 'edb --help' or 'edb [arg] --help']
(3) To use EDB in python, start by importing the interface class object 'ElectrolyteDB'
(4) Invoke the 'ElectrolyteDB' object to connect to the database
(5) Grab a 'base' for a configuration dictionary, and place it into a class object
This time, we will grab a base that is for a Liq only problem using FpcTP
state variables.
(6) Get the chemcial species/components for a simulation case. There are a number of ways
to do this. In this example, we will grab them by finding all components that contain
only specific elements. Then, we add those components and their associated parameters
to the configuration dictionary being built from the 'base'.
[NOTE: An alternative method is to provide a list of the names of components you want]
(7) Get the set of reactions you want in your system and put into a 'base' object.
In this case, we are getting all reactions associated with a system of water
and carbonic acid. We should get three reactions:
H2O <--> H_+ + OH_-
H2CO3 <--> H_+ + HCO3_-
HCO3_- <--> H_+ + CO3_2-
(8) When using an reactor object in IDAES, you must always provide a 'reaction_config'
to match with the 'thermo_config'. We can create a base 'reaction' config from
the database and add reactions to that config in the same way we do for the
'thermo_config' when adding reactions as inherent.
[NOTE: If a reaction is added to a 'thermo_config' as 'inherent', it should
NOT be added to a 'reaction_config' as 'equilibrium']
(9) [NEW Step] Build an equilibrium reactor from the 'thermo_config' and 'reaction_config'
that were generated from the EDB.
"""
# ========= These imports (below) are for testing the configs from EDB ===============
# Import specific pyomo objects
from pyomo.environ import (
ConcreteModel,
)
# Import the idaes objects for Generic Properties and Reactions
from idaes.generic_models.properties.core.generic.generic_property import (
GenericParameterBlock,
)
from idaes.generic_models.properties.core.generic.generic_reaction import (
GenericReactionParameterBlock,
)
# Import the idaes object for the EquilibriumReactor unit model
from idaes.generic_models.unit_models.equilibrium_reactor import EquilibriumReactor
# Import the core idaes objects for Flowsheets and types of balances
from idaes.core import FlowsheetBlock
# ========= These imports (above) are for testing the configs from EDB ===============
# ========================== (3 & 4) ================================
# Import ElectrolyteDB object
from watertap.edb import ElectrolyteDB
from watertap.examples.edb.the_basics import (
connect_to_edb,
is_thermo_config_valid,
grab_base_reaction_config,
is_thermo_reaction_pair_valid,
)
__author__ = "Austin Ladshaw"
# ========================== (5) ================================
# Grab a new base config for our thermo, but this time we will use
# one of the newer bases that will use the FpcTP state vars and
# a Liq only system.
def grab_thermo_Liq_FpcTP_base(db):
# Get the base and place into a result object
base = db.get_base("thermo_Liq_FpcTP")
return base
# ========================== (6) ================================
# Get chemical components/species for a simulation case
# NOTE: This function here also returns a 'list' of the
# components that it finds. This is not a built in
# feature of the EDB, but is very useful because
# getting reactions is dependent on the component list.
def get_components_and_add_to_idaes_config(db, base_obj, comp_list):
res_obj_comps = db.get_components(component_names=comp_list)
# Iterate through the results object and add the components
# to the base_obj
for comp_obj in res_obj_comps:
print("Adding " + str(comp_obj.name) + "" )
base_obj.add(comp_obj)
print()
return base_obj
# ========================== (7) ================================
# Grab the reactions associated with the list of components and add
# them to a reaction base as equilibrium reactions
#
def add_equilibrium_reactions_to_react_base(db, react_base_obj, comp_list):
react_obj = db.get_reactions(component_names=comp_list)
for r in react_obj:
print("Found reaction: " + str(r.name))
react_base_obj.add(r)
return react_base_obj
# ========================== (9) ================================
# Create the Pyomo model by using the thermo_config and reaction_config
# that were generated from the EDB.
#
def build_equilibrium_model(thermo_config, reaction_config):
model = ConcreteModel()
model.fs = FlowsheetBlock(default={"dynamic": False})
model.fs.thermo_params = GenericParameterBlock(default=thermo_config)
model.fs.rxn_params = GenericReactionParameterBlock(
default={"property_package": model.fs.thermo_params, **reaction_config}
)
model.fs.unit = EquilibriumReactor(
default={
"property_package": model.fs.thermo_params,
"reaction_package": model.fs.rxn_params,
"has_rate_reactions": False,
"has_equilibrium_reactions": True,
"has_heat_transfer": False,
"has_heat_of_reaction": False,
"has_pressure_change": False,
}
)
return model
# Run script for testing
def run_simple_acid_with_mockdb(db):
base_obj = grab_thermo_Liq_FpcTP_base(db)
# Our components for this problem are as follows:
comp_list = ["H2O", "H_+", "OH_-", "H2CO3", "HCO3_-", "CO3_2-"]
base_obj = get_components_and_add_to_idaes_config(db, base_obj, comp_list)
# Create a reaction config
react_base = grab_base_reaction_config(db)
# Add reactions to the reaction base as 'equilibrium'
react_base = add_equilibrium_reactions_to_react_base(db, react_base, comp_list)
# Now, we can actually see if we created a correct model by looking
# for degrees of freedom, state variables, etc.
thermo_config = base_obj.idaes_config
reaction_config = react_base.idaes_config
model = build_equilibrium_model(thermo_config, reaction_config)
return model
| 40.345361 | 103 | 0.669733 |
acef532f9d0dc454abfee49bc25b981f695b2af8 | 1,233 | py | Python | gerryfair/learner.py | algowatchPenn/GerryFair | e007abe5e9409b87de6189a92ce71d6b2fb21bb6 | [
"MIT"
] | 32 | 2019-01-03T18:54:39.000Z | 2022-02-24T03:48:36.000Z | gerryfair/learner.py | algowatchPenn/GerryFair | e007abe5e9409b87de6189a92ce71d6b2fb21bb6 | [
"MIT"
] | null | null | null | gerryfair/learner.py | algowatchPenn/GerryFair | e007abe5e9409b87de6189a92ce71d6b2fb21bb6 | [
"MIT"
] | 11 | 2018-12-06T17:31:02.000Z | 2022-03-13T21:19:18.000Z | import numpy as np
import copy
from sklearn import linear_model
from gerryfair.reg_oracle_class import RegOracle
class Learner:
def __init__(self, X, y, predictor):
self.X = X
self.y = y
self.predictor = predictor
def best_response(self, costs_0, costs_1):
"""Solve the CSC problem for the learner."""
reg0 = copy.deepcopy(self.predictor)
reg0.fit(self.X, costs_0)
reg1 = copy.deepcopy(self.predictor)
reg1.fit(self.X, costs_1)
func = RegOracle(reg0, reg1)
return func
# Inputs:
# q: the most recent classifier found
# A: the previous set of decisions (probabilities) up to time iter - 1
# iteration: the number of iteration
# Outputs:
# error: the error of the average classifier found thus far (incorporating q)
def generate_predictions(self, q, A, iteration):
"""Return the classifications of the average classifier at time iter."""
new_preds = np.multiply(1.0 / iteration, q.predict(self.X))
ds = np.multiply((iteration - 1.0) / iteration, A)
ds = np.add(ds, new_preds)
error = np.mean([np.abs(ds[k] - self.y[k]) for k in range(len(self.y))])
return (error, ds) | 36.264706 | 81 | 0.641525 |
acef539fb1fad087e564dacee49a87a760648fee | 314 | py | Python | maicroft/subreddits/subreddits.py | thundergolfer-old/mAIcroft | 2efbf853d345a7b6515e0727ac243cd58b8536bc | [
"MIT"
] | 2 | 2019-09-18T16:49:44.000Z | 2021-09-11T21:17:41.000Z | maicroft/subreddits/subreddits.py | thundergolfer-old/mAIcroft | 2efbf853d345a7b6515e0727ac243cd58b8536bc | [
"MIT"
] | null | null | null | maicroft/subreddits/subreddits.py | thundergolfer-old/mAIcroft | 2efbf853d345a7b6515e0727ac243cd58b8536bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from maicroft.subreddits.sub_data import subreddits
subreddits_dict = dict(
(subreddit['name'], subreddit) for subreddit in subreddits
)
ignore_text_subs = [s["name"] for s in subreddits if s["ignore_text"] == "Y"]
default_subs = [s["name"] for s in subreddits if s["default"] == "Y"]
| 26.166667 | 77 | 0.691083 |
acef55d117bbfc3bd8bd57f5554ca1ca404d32a5 | 1,000 | py | Python | backend/mis/urls.py | andrewwdao/MIS-CTU-management-system | 888fa8d14e0709fa9a03d567e2771b5999764637 | [
"MIT"
] | 3 | 2020-05-11T04:08:16.000Z | 2020-07-29T13:39:12.000Z | backend/mis/urls.py | minhan74/MIS-CTU-management-system | 888fa8d14e0709fa9a03d567e2771b5999764637 | [
"MIT"
] | 6 | 2020-08-16T06:31:54.000Z | 2021-09-22T18:40:44.000Z | backend/mis/urls.py | minhan74/MIS-CTU-management-system | 888fa8d14e0709fa9a03d567e2771b5999764637 | [
"MIT"
] | null | null | null | """mis URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('accounts.urls')),
path('', include('equipments.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38.461538 | 78 | 0.7 |
acef5636377f611c5e426d06457668e30791c9d0 | 148 | py | Python | G4 Localizer/a2_Camera/takePhotoSpecific.py | cbrahana/FRC-Localizer-Systems | 740c88ec6e0af490e703e8a5c544434c0f33ee0b | [
"MIT"
] | null | null | null | G4 Localizer/a2_Camera/takePhotoSpecific.py | cbrahana/FRC-Localizer-Systems | 740c88ec6e0af490e703e8a5c544434c0f33ee0b | [
"MIT"
] | null | null | null | G4 Localizer/a2_Camera/takePhotoSpecific.py | cbrahana/FRC-Localizer-Systems | 740c88ec6e0af490e703e8a5c544434c0f33ee0b | [
"MIT"
] | null | null | null | def takePhotoSpecific():
#Takes a photo with the specified camera at default settings in case of bad data, inputs to numpy array
return None | 49.333333 | 107 | 0.763514 |
acef563f301e427f2a632ff74c58e57f160b1538 | 12,640 | py | Python | doc/make.py | jess010/pandas | 9872d6757e5117dce070981141cee562f675694e | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2016-10-05T17:38:58.000Z | 2020-08-24T16:26:37.000Z | doc/make.py | neurodebian/pandas | de3e1e6705b1c1b17f945079201c68a9e8d2ed14 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null | doc/make.py | neurodebian/pandas | de3e1e6705b1c1b17f945079201c68a9e8d2ed14 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | 12 | 2017-05-23T06:01:12.000Z | 2021-08-16T05:09:46.000Z | #!/usr/bin/env python
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for pandas
installed. See the installation instructions for a list of these.
<del>Note: currently latex builds do not work because of table formats that are not
supported in the latex generation.</del>
2014-01-30: Latex has some issues but 'latex_forced' works ok for 0.13.0-400 or so
Usage
-----
python make.py clean
python make.py html
"""
from __future__ import print_function
import io
import glob # noqa
import os
import shutil
import sys
from contextlib import contextmanager
import sphinx # noqa
import argparse
import jinja2 # noqa
os.environ['PYTHONPATH'] = '..'
SPHINX_BUILD = 'sphinxbuild'
def _process_user(user):
if user is None or user is False:
user = ''
else:
user = user + '@'
return user
def upload_dev(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/ -essh'.format(user)):
raise SystemExit('Upload to Pydata Dev failed')
def upload_dev_pdf(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/'.format(user)):
raise SystemExit('PDF upload to Pydata Dev failed')
def upload_stable(user=None):
'push a copy to the pydata stable directory'
user = _process_user(user)
if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/ -essh'.format(user)):
raise SystemExit('Upload to stable failed')
def upload_stable_pdf(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/'.format(user)):
raise SystemExit('PDF upload to stable failed')
def upload_prev(ver, doc_root='./', user=None):
'push a copy of older release to appropriate version directory'
user = _process_user(user)
local_dir = doc_root + 'build/html'
remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver
cmd = 'cd %s; rsync -avz . %spandas.pydata.org:%s -essh'
cmd = cmd % (local_dir, user, remote_dir)
print(cmd)
if os.system(cmd):
raise SystemExit(
'Upload to %s from %s failed' % (remote_dir, local_dir))
local_dir = doc_root + 'build/latex'
pdf_cmd = 'cd %s; scp pandas.pdf %spandas.pydata.org:%s'
pdf_cmd = pdf_cmd % (local_dir, user, remote_dir)
if os.system(pdf_cmd):
raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root))
def build_pandas():
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
def build_prev(ver):
if os.system('git checkout v%s' % ver) != 1:
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
os.system('python make.py clean')
os.system('python make.py html')
os.system('python make.py latex')
os.system('git checkout master')
def clean():
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('source/generated'):
shutil.rmtree('source/generated')
@contextmanager
def maybe_exclude_notebooks():
"""
Skip building the notebooks if pandoc is not installed.
This assumes that nbsphinx is installed.
"""
base = os.path.dirname(__file__)
notebooks = [os.path.join(base, 'source', nb)
for nb in ['style.ipynb']]
contents = {}
def _remove_notebooks():
for nb in notebooks:
with open(nb, 'rt') as f:
contents[nb] = f.read()
os.remove(nb)
# Skip notebook conversion if
# 1. nbconvert isn't installed, or
# 2. nbconvert is installed, but pandoc isn't
try:
import nbconvert
except ImportError:
print("Warning: nbconvert not installed. Skipping notebooks.")
_remove_notebooks()
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
print("Warning: Pandoc is not installed. Skipping notebooks.")
_remove_notebooks()
yield
for nb, content in contents.items():
with open(nb, 'wt') as f:
f.write(content)
def html():
check_build()
with maybe_exclude_notebooks():
if os.system('sphinx-build -P -b html -d build/doctrees '
'source build/html'):
raise SystemExit("Building HTML failed.")
try:
# remove stale file
os.remove('build/html/pandas.zip')
except:
pass
def zip_html():
try:
print("\nZipping up HTML docs...")
# just in case the wonky build box doesn't have zip
# don't fail this.
os.system('cd build; rm -f html/pandas.zip; zip html/pandas.zip -r -q html/* ')
print("\n")
except:
pass
def latex():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
print("Rendering LaTeX failed.")
print("You may still be able to get a usable PDF file by going into 'build/latex'")
print("and executing 'pdflatex pandas.tex' for the requisite number of passes.")
print("Or using the 'latex_forced' target")
raise SystemExit
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def latex_forced():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Manually call pdflatex, 3 passes should ensure latex fixes up
# all the required cross-references and such.
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
raise SystemExit("You should check the file 'build/latex/pandas.pdf' for problems.")
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def check_build():
build_dirs = [
'build', 'build/doctrees', 'build/html',
'build/latex', 'build/plots', 'build/_static',
'build/_templates']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def all():
# clean()
html()
def auto_dev_build(debug=False):
msg = ''
try:
step = 'clean'
clean()
step = 'html'
html()
step = 'upload dev'
upload_dev()
if not debug:
sendmail(step)
step = 'latex'
latex()
step = 'upload pdf'
upload_dev_pdf()
if not debug:
sendmail(step)
except (Exception, SystemExit) as inst:
msg = str(inst) + '\n'
sendmail(step, '[ERROR] ' + msg)
def sendmail(step=None, err_msg=None):
from_name, to_name = _get_config()
if step is None:
step = ''
if err_msg is None or '[ERROR]' not in err_msg:
msgstr = 'Daily docs %s completed successfully' % step
subject = "DOC: %s successful" % step
else:
msgstr = err_msg
subject = "DOC: %s failed" % step
import smtplib
from email.MIMEText import MIMEText
msg = MIMEText(msgstr)
msg['Subject'] = subject
msg['From'] = from_name
msg['To'] = to_name
server_str, port, login, pwd = _get_credentials()
server = smtplib.SMTP(server_str, port)
server.ehlo()
server.starttls()
server.ehlo()
server.login(login, pwd)
try:
server.sendmail(from_name, to_name, msg.as_string())
finally:
server.close()
def _get_dir(subdir=None):
import getpass
USERNAME = getpass.getuser()
if sys.platform == 'darwin':
HOME = '/Users/%s' % USERNAME
else:
HOME = '/home/%s' % USERNAME
if subdir is None:
subdir = '/code/scripts/config'
conf_dir = '%s/%s' % (HOME, subdir)
return conf_dir
def _get_credentials():
tmp_dir = _get_dir()
cred = '%s/credentials' % tmp_dir
with open(cred, 'r') as fh:
server, port, un, domain = fh.read().split(',')
port = int(port)
login = un + '@' + domain + '.com'
import base64
with open('%s/cron_email_pwd' % tmp_dir, 'r') as fh:
pwd = base64.b64decode(fh.read())
return server, port, login, pwd
def _get_config():
tmp_dir = _get_dir()
with open('%s/addresses' % tmp_dir, 'r') as fh:
from_name, to_name = fh.read().split(',')
return from_name, to_name
funcd = {
'html': html,
'zip_html': zip_html,
'upload_dev': upload_dev,
'upload_stable': upload_stable,
'upload_dev_pdf': upload_dev_pdf,
'upload_stable_pdf': upload_stable_pdf,
'latex': latex,
'latex_forced': latex_forced,
'clean': clean,
'auto_dev': auto_dev_build,
'auto_debug': lambda: auto_dev_build(True),
'build_pandas': build_pandas,
'all': all,
}
small_docs = False
# current_dir = os.getcwd()
# os.chdir(os.path.dirname(os.path.join(current_dir, __file__)))
import argparse
argparser = argparse.ArgumentParser(description="""
pandas documentation builder
""".strip())
# argparser.add_argument('-arg_name', '--arg_name',
# metavar='label for arg help',
# type=str|etc,
# nargs='N|*|?|+|argparse.REMAINDER',
# required=False,
# #choices='abc',
# help='help string',
# action='store|store_true')
# args = argparser.parse_args()
#print args.accumulate(args.integers)
def generate_index(api=True, single=False, **kwds):
from jinja2 import Template
with open("source/index.rst.template") as f:
t = Template(f.read())
with open("source/index.rst","w") as f:
f.write(t.render(api=api,single=single,**kwds))
import argparse
argparser = argparse.ArgumentParser(description="pandas documentation builder",
epilog="Targets : %s" % funcd.keys())
argparser.add_argument('--no-api',
default=False,
help='Ommit api and autosummary',
action='store_true')
argparser.add_argument('--single',
metavar='FILENAME',
type=str,
default=False,
help='filename of section to compile, e.g. "indexing"')
argparser.add_argument('--user',
type=str,
default=False,
help='Username to connect to the pydata server')
def main():
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
if args.single:
args.single = os.path.basename(args.single).split(".rst")[0]
if 'clean' in unknown:
args.single=False
generate_index(api=not args.no_api and not args.single, single=args.single)
if len(sys.argv) > 2:
ftype = sys.argv[1]
ver = sys.argv[2]
if ftype == 'build_previous':
build_prev(ver, user=args.user)
if ftype == 'upload_previous':
upload_prev(ver, user=args.user)
elif len(sys.argv) == 2:
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s' % (
arg, list(funcd.keys())))
if args.user:
func(user=args.user)
else:
func()
else:
small_docs = False
all()
# os.chdir(current_dir)
if __name__ == '__main__':
import sys
sys.exit(main())
| 28.792711 | 95 | 0.597073 |
acef565b15faf4da1dc702f8a810652d03b572da | 7,679 | py | Python | nbsafety/data_model/update_protocol.py | nbsafety-project/nbsafety | c79d24bad7eec99b1e9e3ca38d005a24c03b6eb4 | [
"BSD-3-Clause"
] | 96 | 2020-05-18T18:58:44.000Z | 2022-03-19T13:09:07.000Z | nbsafety/data_model/update_protocol.py | nbsafety-project/nbsafety | c79d24bad7eec99b1e9e3ca38d005a24c03b6eb4 | [
"BSD-3-Clause"
] | 56 | 2020-06-01T06:45:49.000Z | 2022-03-27T00:06:52.000Z | nbsafety/data_model/update_protocol.py | nbsafety-project/nbsafety | c79d24bad7eec99b1e9e3ca38d005a24c03b6eb4 | [
"BSD-3-Clause"
] | 4 | 2020-08-25T18:17:02.000Z | 2021-06-02T14:32:12.000Z | # -*- coding: future_annotations -*-
import logging
from typing import TYPE_CHECKING
from nbsafety.data_model.timestamp import Timestamp
from nbsafety.singletons import nbs, tracer
if TYPE_CHECKING:
from typing import Generator, Iterable, Set
# avoid circular imports
from nbsafety.data_model.data_symbol import DataSymbol
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class UpdateProtocol:
def __init__(self, updated_sym: DataSymbol) -> None:
self.updated_sym = updated_sym
self.seen: Set[DataSymbol] = set()
def __call__(self, new_deps: Set[DataSymbol], mutated: bool, propagate_to_namespace_descendents: bool, refresh: bool) -> None:
# in most cases, mutated implies that we should propagate to namespace descendents, since we
# do not know how the mutation affects the namespace members. The exception is for specific
# known events such as 'list.append()' or 'list.extend()' since we know these do not update
# the namespace members.
logger.warning(
"updated sym %s (containing scope %s) with children %s",
self.updated_sym,
self.updated_sym.containing_scope,
self.updated_sym.children,
)
directly_updated_symbols = nbs().aliases[self.updated_sym.obj_id] if mutated else {self.updated_sym}
directly_updated_symbols |= self._maybe_get_adhoc_pandas_updated_syms()
self._collect_updated_symbols_and_refresh_namespaces(
directly_updated_symbols, propagate_to_namespace_descendents
)
logger.warning(
'for symbol %s: mutated=%s; updated_symbols=%s', self.updated_sym, mutated, directly_updated_symbols
)
updated_symbols_with_ancestors = set(self.seen)
logger.warning('all updated symbols for symbol %s: %s', self.updated_sym, updated_symbols_with_ancestors)
tracer().this_stmt_updated_symbols |= self.seen
if refresh:
for updated_sym in directly_updated_symbols:
if not updated_sym.is_stale and updated_sym is not self.updated_sym:
updated_sym.refresh()
self.seen |= new_deps # don't propagate to stuff on RHS
for dsym in updated_symbols_with_ancestors:
self._propagate_staleness_to_deps(dsym, skip_seen_check=True)
def _maybe_get_adhoc_pandas_updated_syms(self):
try:
import pandas
except ImportError:
return set()
if self.updated_sym.obj is None or not isinstance(self.updated_sym.obj, pandas.Series):
return set()
ns = self.updated_sym.containing_namespace
if ns is None or ns.obj is None or not isinstance(ns.obj, pandas.DataFrame):
return set()
name = self.updated_sym.name
return {
ns.lookup_data_symbol_by_name_this_indentation(name, is_subscript=is_sub) for is_sub in [True, False]
} - {None}
def _collect_updated_symbols_and_refresh_namespaces(
self, updated_symbols: Iterable[DataSymbol], refresh_descendent_namespaces: bool
) -> None:
logger.warning('collecting updated symbols and namespaces for %s', updated_symbols)
for dsym in updated_symbols:
if dsym.is_import or dsym in self.seen:
continue
dsym.updated_timestamps.add(Timestamp.current())
self.seen.add(dsym)
for cell in dsym.cells_where_deep_live:
cell.add_used_cell_counter(dsym, nbs().cell_counter())
containing_ns = dsym.containing_namespace
if containing_ns is not None:
logger.warning('containing scope for %s: %s; ids %s, %s', dsym, containing_ns, dsym.obj_id, containing_ns.obj_id)
containing_ns.namespace_stale_symbols.discard(dsym)
containing_ns.max_descendent_timestamp = Timestamp.current()
self._collect_updated_symbols_and_refresh_namespaces(
nbs().aliases[containing_ns.obj_id], refresh_descendent_namespaces
)
if refresh_descendent_namespaces:
dsym_ns = dsym.namespace
if dsym_ns is not None:
self._collect_updated_symbols_and_refresh_namespaces(
dsym_ns.all_data_symbols_this_indentation(), refresh_descendent_namespaces
)
def _propagate_staleness_to_namespace_parents(self, dsym: DataSymbol, skip_seen_check: bool = False) -> None:
if not skip_seen_check and dsym in self.seen:
return
self.seen.add(dsym)
containing_ns = dsym.containing_namespace
if containing_ns is None:
return
logger.warning("add %s to namespace stale symbols of %s", dsym, containing_ns)
containing_ns.namespace_stale_symbols.add(dsym)
for containing_alias in nbs().aliases[containing_ns.obj_id]:
self._propagate_staleness_to_namespace_parents(containing_alias)
for containing_alias in nbs().aliases[containing_ns.obj_id]:
# do this in 2 separate loops to make sure all containing_alias are added to 'seen'
# works around the issue when one alias depends on another
for child in self._non_class_to_instance_children(containing_alias):
logger.warning('propagate from namespace parent of %s to child %s', dsym, child)
self._propagate_staleness_to_deps(child)
def _non_class_to_instance_children(self, dsym: DataSymbol) -> Generator[DataSymbol, None, None]:
if self.updated_sym is dsym:
yield from dsym.children
return
for child in dsym.children:
# Next, complicated check to avoid propagating along a class -> instance edge.
# The only time this is OK is when we changed the class, which will not be the case here.
child_namespace = child.namespace
if child_namespace is not None and child_namespace.cloned_from is not None:
if child_namespace.cloned_from.obj_id == dsym.obj_id:
continue
yield child
def _propagate_staleness_to_namespace_children(self, dsym: DataSymbol, skip_seen_check: bool = False) -> None:
if not skip_seen_check and dsym in self.seen:
return
self.seen.add(dsym)
self_ns = nbs().namespaces.get(dsym.obj_id, None)
if self_ns is None:
return
for ns_child in self_ns.all_data_symbols_this_indentation(exclude_class=True):
logger.warning('propagate from %s to namespace child %s', dsym, ns_child)
self._propagate_staleness_to_deps(ns_child)
def _propagate_staleness_to_deps(self, dsym: DataSymbol, skip_seen_check: bool = False) -> None:
if not skip_seen_check and dsym in self.seen:
return
self.seen.add(dsym)
if dsym not in nbs().updated_symbols and dsym not in tracer().this_stmt_updated_symbols:
if dsym.should_mark_stale(self.updated_sym):
dsym.fresher_ancestors.add(self.updated_sym)
dsym.fresher_ancestor_timestamps.add(self.updated_sym.timestamp)
dsym.required_timestamp = Timestamp.current()
self._propagate_staleness_to_namespace_parents(dsym, skip_seen_check=True)
self._propagate_staleness_to_namespace_children(dsym, skip_seen_check=True)
for child in self._non_class_to_instance_children(dsym):
logger.warning('propagate %s %s to %s', dsym, dsym.obj_id, child)
self._propagate_staleness_to_deps(child)
| 49.863636 | 130 | 0.675739 |
acef59cdb05effc58090c36f2c4a44affedbc218 | 1,222 | py | Python | src/main.py | quizbooks/despise | f904fb026894749aefc303ba40fdd3b14d78d09b | [
"MIT"
] | null | null | null | src/main.py | quizbooks/despise | f904fb026894749aefc303ba40fdd3b14d78d09b | [
"MIT"
] | null | null | null | src/main.py | quizbooks/despise | f904fb026894749aefc303ba40fdd3b14d78d09b | [
"MIT"
] | null | null | null | import logging
from os import environ
import os.path
import sys
sys.dont_write_bytecode = True
import discord
from bot import MaliceBot
from dotenv import load_dotenv
environ["JISHAKU_NO_UNDERSCORE"] = "True"
environ["JISHAKU_HIDE"] = "True"
dotenv_path = os.path.join(os.path.dirname(__file__), "config/.env")
load_dotenv(dotenv_path)
intent_data = {
x: True for x in (
"guilds", "bans", "emojis", "integrations", "webhooks", "invites", "voice_states", "messages", "reactions",
"typing", "members"
)
}
intents = discord.Intents(**intent_data)
mentions = discord.AllowedMentions(
everyone=False, replied_user=False, roles=False, users=True
)
bot_data = {
"max_messages": 750,
"allowed_mentions": mentions,
"case_insensitive": True,
"token": os.environ.get("token"),
"intents": intents
}
# owner_id=852933534704205864,
# strip_after_prefix=True,
malice = MaliceBot(**bot_data)
logging.basicConfig(
filename=f"{malice.cwd}/config/logs/malice.log",
filemode="w",
format="%(asctime)s:%(levelname)s:%(name)s: %(message)s",
datefmt="%d/%m/%y | %H:%M:%S",
level=logging.DEBUG,
)
if __name__ == "__main__":
malice.starter(os.environ.get("token"))
| 23.056604 | 115 | 0.687398 |
acef5a0bafbfdd0ca32c02ea0bffcb56aaadbce5 | 282,446 | py | Python | core/domain/exp_domain_test.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | null | null | null | core/domain/exp_domain_test.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | null | null | null | core/domain/exp_domain_test.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import copy
import os
import re
from constants import constants
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import html_validation_service
from core.domain import param_domain
from core.domain import state_domain
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
import utils
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
def mock_get_filename_with_dimensions(filename, unused_exp_id):
return html_validation_service.regenerate_image_filename_using_dimensions(
filename, 490, 120)
class ExplorationChangeTests(test_utils.GenericTestBase):
def test_exp_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
exp_domain.ExplorationChange({'invalid': 'data'})
def test_exp_change_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
exp_domain.ExplorationChange({'cmd': 'invalid'})
def test_exp_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_value')):
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'property_name': 'content',
'old_value': 'old_value'
})
def test_exp_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'old_state_name',
'new_state_name': 'new_state_name',
'invalid': 'invalid'
})
def test_exp_change_object_with_invalid_exploration_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd edit_exploration_property: '
'invalid is not allowed')):
exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_exp_change_object_with_invalid_state_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd edit_state_property: '
'invalid is not allowed')):
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'state_name',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_exp_change_object_with_create_new(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'create_new',
'category': 'category',
'title': 'title'
})
self.assertEqual(exp_change_object.cmd, 'create_new')
self.assertEqual(exp_change_object.category, 'category')
self.assertEqual(exp_change_object.title, 'title')
def test_exp_change_object_with_add_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'state_name',
})
self.assertEqual(exp_change_object.cmd, 'add_state')
self.assertEqual(exp_change_object.state_name, 'state_name')
def test_exp_change_object_with_rename_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'old_state_name',
'new_state_name': 'new_state_name'
})
self.assertEqual(exp_change_object.cmd, 'rename_state')
self.assertEqual(exp_change_object.old_state_name, 'old_state_name')
self.assertEqual(exp_change_object.new_state_name, 'new_state_name')
def test_exp_change_object_with_delete_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'state_name',
})
self.assertEqual(exp_change_object.cmd, 'delete_state')
self.assertEqual(exp_change_object.state_name, 'state_name')
def test_exp_change_object_with_edit_state_property(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'state_name',
'property_name': 'content',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(exp_change_object.cmd, 'edit_state_property')
self.assertEqual(exp_change_object.state_name, 'state_name')
self.assertEqual(exp_change_object.property_name, 'content')
self.assertEqual(exp_change_object.new_value, 'new_value')
self.assertEqual(exp_change_object.old_value, 'old_value')
def test_exp_change_object_with_edit_exploration_property(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(exp_change_object.cmd, 'edit_exploration_property')
self.assertEqual(exp_change_object.property_name, 'title')
self.assertEqual(exp_change_object.new_value, 'new_value')
self.assertEqual(exp_change_object.old_value, 'old_value')
def test_exp_change_object_with_migrate_states_schema_to_latest_version(
self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'migrate_states_schema_to_latest_version',
'from_version': 'from_version',
'to_version': 'to_version',
})
self.assertEqual(
exp_change_object.cmd, 'migrate_states_schema_to_latest_version')
self.assertEqual(exp_change_object.from_version, 'from_version')
self.assertEqual(exp_change_object.to_version, 'to_version')
def test_exp_change_object_with_revert_commit(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 'version_number'
})
self.assertEqual(
exp_change_object.cmd,
exp_models.ExplorationModel.CMD_REVERT_COMMIT)
self.assertEqual(exp_change_object.version_number, 'version_number')
def test_to_dict(self):
exp_change_dict = {
'cmd': 'create_new',
'title': 'title',
'category': 'category'
}
exp_change_object = exp_domain.ExplorationChange(exp_change_dict)
self.assertEqual(exp_change_object.to_dict(), exp_change_dict)
class ExplorationVersionsDiffDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration versions difference domain object."""
def setUp(self):
super(ExplorationVersionsDiffDomainUnitTests, self).setUp()
self.exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, self.exp_id,
assets_list)
self.exploration = exp_fetchers.get_exploration_by_id(self.exp_id)
def test_correct_creation_of_version_diffs(self):
# Rename a state.
self.exploration.rename_state('Home', 'Renamed state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(
exp_versions_diff.old_to_new_state_names, {
'Home': 'Renamed state'
})
self.exploration.version += 1
# Add a state.
self.exploration.add_states(['New state'])
self.exploration.states['New state'] = copy.deepcopy(
self.exploration.states['Renamed state'])
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, ['New state'])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Delete state.
self.exploration.delete_state('New state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, ['New state'])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test addition and multiple renames.
self.exploration.add_states(['New state'])
self.exploration.states['New state'] = copy.deepcopy(
self.exploration.states['Renamed state'])
self.exploration.rename_state('New state', 'New state2')
self.exploration.rename_state('New state2', 'New state3')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, ['New state3'])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test addition, rename and deletion.
self.exploration.add_states(['New state 2'])
self.exploration.rename_state('New state 2', 'Renamed state 2')
self.exploration.delete_state('Renamed state 2')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state 2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state 2',
'new_state_name': 'Renamed state 2'
}), exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'Renamed state 2'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test multiple renames and deletion.
self.exploration.rename_state('New state3', 'Renamed state 3')
self.exploration.rename_state('Renamed state 3', 'Renamed state 4')
self.exploration.delete_state('Renamed state 4')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state3',
'new_state_name': 'Renamed state 3'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Renamed state 3',
'new_state_name': 'Renamed state 4'
}), exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'Renamed state 4'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(
exp_versions_diff.deleted_state_names, ['New state3'])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
def test_cannot_create_exploration_change_with_invalid_change_dict(self):
with self.assertRaisesRegexp(
Exception, 'Missing cmd key in change dict'):
exp_domain.ExplorationChange({
'invalid_cmd': 'invalid'
})
def test_cannot_create_exploration_change_with_invalid_cmd(self):
with self.assertRaisesRegexp(
Exception, 'Command invalid_cmd is not allowed'):
exp_domain.ExplorationChange({
'cmd': 'invalid_cmd'
})
def test_cannot_create_exploration_change_with_invalid_state_property(self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'state_name': '',
'new_value': ''
})
self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange))
with self.assertRaisesRegexp(
Exception,
'Value for property_name in cmd edit_state_property: '
'invalid_property is not allowed'):
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'invalid_property',
'state_name': '',
'new_value': ''
})
def test_cannot_create_exploration_change_with_invalid_exploration_property(
self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': ''
})
self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange))
with self.assertRaisesRegexp(
Exception,
'Value for property_name in cmd edit_exploration_property: '
'invalid_property is not allowed'):
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'invalid_property',
'new_value': ''
})
def test_revert_exploration_commit(self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 1
})
self.assertEqual(exp_change.version_number, 1)
exp_change = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 2
})
self.assertEqual(exp_change.version_number, 2)
class ExpVersionReferenceTests(test_utils.GenericTestBase):
def test_create_exp_version_reference_object(self):
exp_version_reference = exp_domain.ExpVersionReference('exp_id', 1)
self.assertEqual(
exp_version_reference.to_dict(), {
'exp_id': 'exp_id',
'version': 1
})
def test_validate_exp_version(self):
with self.assertRaisesRegexp(
Exception,
'Expected version to be an int, received invalid_version'):
exp_domain.ExpVersionReference('exp_id', 'invalid_version')
def test_validate_exp_id(self):
with self.assertRaisesRegexp(
Exception, 'Expected exp_id to be a str, received 0'):
exp_domain.ExpVersionReference(0, 1)
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
# TODO(bhenning): The validation tests below should be split into separate
# unit tests. Also, all validation errors should be covered in the tests.
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.init_state_name = ''
exploration.states = {}
exploration.title = 'Hello #'
self._assert_validation_error(exploration, 'Invalid character #')
exploration.title = 'Title'
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = state_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
self._assert_validation_error(
exploration, 'Invalid character / in a state name')
new_state = state_domain.State.create_default_state('ABC')
self.set_interaction_for_state(new_state, 'TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
self._assert_validation_error(
exploration, 'exploration has no states')
exploration.states = {'A string #': new_state}
self._assert_validation_error(
exploration, 'Invalid character # in a state name')
exploration.states = {'A string _': new_state}
self._assert_validation_error(
exploration, 'Invalid character _ in a state name')
exploration.states = {'ABC': new_state}
self._assert_validation_error(
exploration, 'has no initial state name')
exploration.init_state_name = 'initname'
self._assert_validation_error(
exploration,
r'There is no state in \[u\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.')
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
self._assert_validation_error(
exploration, 'destination ABC is not a valid')
# Restore a valid exploration.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
init_state.update_interaction_default_outcome(default_outcome)
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
old_answer_groups = copy.deepcopy(init_state.interaction.answer_groups)
old_answer_groups.append({
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['Test']
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
})
init_state.update_interaction_answer_groups(old_answer_groups)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
self._assert_validation_error(
exploration, 'destination DEF is not a valid')
# Restore a valid exploration.
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], 'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
self._assert_validation_error(
exploration, 'RuleSpec \'Contains\' is missing inputs')
rule_spec.inputs = 'Inputs string'
self._assert_validation_error(
exploration, 'Expected inputs to be a dict')
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
self._assert_validation_error(exploration, 'Unrecognized rule type')
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected list, received 15'
):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
self._assert_validation_error(
exploration,
'RuleSpec \'Contains\' has an input with name \'x\' which refers '
'to an unknown parameter within the exploration: ExampleParam')
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
self._assert_validation_error(
exploration, 'Every outcome should have a destination.')
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
self._assert_validation_error(
exploration, 'Expected outcome dest to be a string')
outcome.dest = destination
outcome.feedback = state_domain.SubtitledHtml('feedback_1', '')
exploration.validate()
outcome.labelled_as_correct = 'hello'
self._assert_validation_error(
exploration, 'The "labelled_as_correct" field should be a boolean')
# Test that labelled_as_correct must be False for self-loops, and that
# this causes a strict validation failure but not a normal validation
# failure.
outcome.labelled_as_correct = True
with self.assertRaisesRegexp(
Exception, 'is labelled correct but is a self-loop.'
):
exploration.validate(strict=True)
exploration.validate()
outcome.labelled_as_correct = False
exploration.validate()
outcome.param_changes = 'Changes'
self._assert_validation_error(
exploration, 'Expected outcome param_changes to be a list')
outcome.param_changes = [param_domain.ParamChange(
0, 'generator_id', {})]
self._assert_validation_error(
exploration,
'Expected param_change name to be a string, received 0')
outcome.param_changes = []
exploration.validate()
outcome.refresher_exploration_id = 12345
self._assert_validation_error(
exploration,
'Expected outcome refresher_exploration_id to be a string')
outcome.refresher_exploration_id = None
exploration.validate()
outcome.refresher_exploration_id = 'valid_string'
exploration.validate()
outcome.missing_prerequisite_skill_id = 12345
self._assert_validation_error(
exploration,
'Expected outcome missing_prerequisite_skill_id to be a string')
outcome.missing_prerequisite_skill_id = None
exploration.validate()
outcome.missing_prerequisite_skill_id = 'valid_string'
exploration.validate()
# Test that refresher_exploration_id must be None for non-self-loops.
new_state_name = 'New state'
exploration.add_states([new_state_name])
outcome.dest = new_state_name
outcome.refresher_exploration_id = 'another_string'
self._assert_validation_error(
exploration,
'has a refresher exploration ID, but is not a self-loop')
outcome.refresher_exploration_id = None
exploration.validate()
exploration.delete_state(new_state_name)
# Validate InteractionInstance.
interaction.id = 15
self._assert_validation_error(
exploration, 'Expected interaction id to be a string')
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
self._assert_validation_error(exploration, 'Invalid interaction id')
self.set_interaction_for_state(init_state, 'TextInput')
valid_text_input_cust_args = init_state.interaction.customization_args
exploration.validate()
interaction.customization_args = []
self._assert_validation_error(
exploration, 'Expected customization args to be a dict')
interaction.customization_args = {15: ''}
self._assert_validation_error(
exploration,
(
'Expected customization arg value to be a '
'InteractionCustomizationArg'
)
)
interaction.customization_args = {
15: state_domain.InteractionCustomizationArg('', {
'type': 'unicode'
})
}
self._assert_validation_error(
exploration, 'Invalid customization arg name')
interaction.customization_args = valid_text_input_cust_args
self.set_interaction_for_state(init_state, 'TextInput')
exploration.validate()
interaction.answer_groups = {}
self._assert_validation_error(
exploration, 'Expected answer groups to be a list')
interaction.answer_groups = answer_groups
self.set_interaction_for_state(init_state, 'EndExploration')
self._assert_validation_error(
exploration,
'Terminal interactions must not have a default outcome.')
self.set_interaction_for_state(init_state, 'TextInput')
init_state.update_interaction_default_outcome(None)
self._assert_validation_error(
exploration,
'Non-terminal interactions must have a default outcome.')
self.set_interaction_for_state(init_state, 'EndExploration')
self._assert_validation_error(
exploration,
'Terminal interactions must not have any answer groups.')
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
init_state.update_interaction_answer_groups([])
exploration.validate()
# Restore a valid exploration.
self.set_interaction_for_state(init_state, 'TextInput')
answer_groups_list = [
answer_group.to_dict() for answer_group in answer_groups]
init_state.update_interaction_answer_groups(answer_groups_list)
init_state.update_interaction_default_outcome(default_outcome)
exploration.validate()
solution_dict = {
'answer_is_exclusive': True,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': 'hello_world is a string'
}
}
solution = state_domain.Solution.from_dict(
init_state.interaction.id, solution_dict)
init_state.update_interaction_solution(solution)
self._assert_validation_error(
exploration,
re.escape('Hint(s) must be specified if solution is specified'))
init_state.update_interaction_solution(None)
interaction.hints = {}
self._assert_validation_error(
exploration, 'Expected hints to be a list')
interaction.hints = []
# Validate AnswerGroup.
answer_groups_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['Test']
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 1
}
init_state.update_interaction_answer_groups([answer_groups_dict])
self._assert_validation_error(
exploration,
'Expected tagged skill misconception id to be a str, received 1')
answer_groups_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['Test']
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id':
'invalid_tagged_skill_misconception_id'
}
init_state.update_interaction_answer_groups([answer_groups_dict])
self._assert_validation_error(
exploration,
'Expected the format of tagged skill misconception id '
'to be <skill_id>-<misconception_id>, received '
'invalid_tagged_skill_misconception_id')
init_state.interaction.answer_groups[0].rule_specs = {}
self._assert_validation_error(
exploration, 'Expected answer group rules to be a list')
first_answer_group = init_state.interaction.answer_groups[0]
first_answer_group.tagged_skill_misconception_id = None
first_answer_group.rule_specs = []
self._assert_validation_error(
exploration,
'There must be at least one rule or training data for each'
' answer group.')
exploration.states = {
exploration.init_state_name: (
state_domain.State.create_default_state(
exploration.init_state_name))
}
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], 'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'English'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
self._assert_validation_error(exploration, 'param_specs to be a dict')
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
self._assert_validation_error(
exploration, 'Only parameter names with characters')
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
self.set_interaction_for_state(init_state, 'EndExploration')
init_state.update_interaction_default_outcome(None)
exploration.validate()
exploration.tags = 'this should be a list'
self._assert_validation_error(
exploration, 'Expected \'tags\' to be a list')
exploration.tags = [123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['abc', 123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['']
self._assert_validation_error(exploration, 'Tags should be non-empty')
exploration.tags = ['123']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = ['ABC']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = [' a b']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b ']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b']
self._assert_validation_error(
exploration, 'Adjacent whitespace in tags should be collapsed')
exploration.tags = ['abc', 'abc']
self._assert_validation_error(
exploration, 'Some tags duplicate each other')
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_title_category_and_objective_validation(self):
"""Test that titles, categories and objectives are validated only in
'strict' mode.
"""
self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration = exp_fetchers.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'title must be specified'
):
exploration.validate(strict=True)
exploration.title = 'A title'
with self.assertRaisesRegexp(
utils.ValidationError, 'category must be specified'
):
exploration.validate(strict=True)
exploration.category = 'A category'
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'
):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_get_trainable_states_dict(self):
"""Test the get_trainable_states_dict() method."""
exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=False)
old_states = exp_fetchers.get_exploration_from_model(
exploration_model).states
exploration = exp_fetchers.get_exploration_by_id(exp_id)
# Rename a state to add it in unchanged answer group.
exploration.rename_state('Home', 'Renamed state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
expected_dict = {
'state_names_with_changed_answer_groups': [],
'state_names_with_unchanged_answer_groups': ['Renamed state']
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Modify answer groups to trigger change in answer groups.
state = exploration.states['Renamed state']
exploration.states['Renamed state'].interaction.answer_groups.insert(
3, state.interaction.answer_groups[3])
answer_groups = []
for answer_group in state.interaction.answer_groups:
answer_groups.append(answer_group.to_dict())
change_list = [exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Renamed state',
'property_name': 'answer_groups',
'new_value': answer_groups
})]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Add new state to trigger change in answer groups.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
})]
expected_dict = {
'state_names_with_changed_answer_groups': [
'New state', 'Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Delete state.
exploration.delete_state('New state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state'
})]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Test addition and multiple renames.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
exploration.rename_state('New state', 'New state2')
exploration.rename_state('New state2', 'New state3')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
})]
expected_dict = {
'state_names_with_changed_answer_groups': [
'Renamed state', 'New state3'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
def test_get_languages_with_complete_translation(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(
exploration.get_languages_with_complete_translation(), [])
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'content_1': {
'hi': {
'data_format': 'html',
'translation': '<p>Translation in Hindi.</p>',
'needs_update': False
}
}
}
})
exploration.states[
feconf.DEFAULT_INIT_STATE_NAME].update_written_translations(
written_translations)
self.assertEqual(
exploration.get_languages_with_complete_translation(), ['hi'])
def test_get_translation_counts_with_no_needs_update(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(
exploration.get_translation_counts(), {})
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'content_1': {
'hi': {
'data_format': 'html',
'translation': '<p>Translation in Hindi.</p>',
'needs_update': False
}
},
'default_outcome': {
'hi': {
'data_format': 'html',
'translation': '<p>Translation in Hindi.</p>',
'needs_update': False
}
}
}
})
exploration.states[
feconf.DEFAULT_INIT_STATE_NAME].update_written_translations(
written_translations)
exploration.add_states(['New state'])
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'content_1': {
'hi': {
'data_format': 'html',
'translation': '<p>New state translation in Hindi.</p>',
'needs_update': False
}
},
'default_outcome': {
'hi': {
'data_format': 'html',
'translation': '<p>New State translation in Hindi.</p>',
'needs_update': False
}
}
}
})
exploration.states['New state'].update_written_translations(
written_translations)
self.assertEqual(
exploration.get_translation_counts(), {'hi': 4})
def test_get_translation_counts_with_needs_update(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(
exploration.get_translation_counts(), {})
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'content_1': {
'hi': {
'data_format': 'html',
'translation': '<p>Translation in Hindi.</p>',
'needs_update': True
}
},
'default_outcome': {
'hi': {
'data_format': 'html',
'translation': '<p>Translation in Hindi.</p>',
'needs_update': False
}
}
}
})
exploration.states[
feconf.DEFAULT_INIT_STATE_NAME].update_written_translations(
written_translations)
self.assertEqual(
exploration.get_translation_counts(), {'hi': 1})
def test_get_translation_counts_with_translation_in_multiple_lang(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(
exploration.get_translation_counts(), {})
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'content_1': {
'hi-en': {
'data_format': 'html',
'translation': '<p>Translation in Hindi.</p>',
'needs_update': False
},
'hi': {
'data_format': 'html',
'translation': '<p>Translation in Hindi.</p>',
'needs_update': False
}
},
'default_outcome': {
'hi': {
'data_format': 'html',
'translation': '<p>Translation in Hindi.</p>',
'needs_update': False
}
}
}
})
exploration.states[
feconf.DEFAULT_INIT_STATE_NAME].update_written_translations(
written_translations)
self.assertEqual(
exploration.get_translation_counts(), {
'hi': 2,
'hi-en': 1
})
def test_get_content_count(self):
# Adds 1 to content count to exploration (content, default_outcome).
exploration = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(exploration.get_content_count(), 1)
# Adds 2 to content count to exploration (content default_outcome).
exploration.add_states(['New state'])
init_state = exploration.states[exploration.init_state_name]
# Adds 1 to content count to exploration (ca_placeholder_0)
self.set_interaction_for_state(init_state, 'TextInput')
answer_group_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['Test']
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
# Adds 1 to content count to exploration (feedback_1).
init_state.update_interaction_answer_groups([answer_group_dict])
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>hint one</p>')
)
]
# Adds 1 to content count to exploration (hint_1).
init_state.update_interaction_hints(hints_list)
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
},
}
solution = state_domain.Solution.from_dict(
init_state.interaction.id, solution_dict)
# Adds 1 to content count to exploration (solution).
init_state.update_interaction_solution(solution)
self.assertEqual(exploration.get_content_count(), 5)
def test_get_content_with_correct_state_name_returns_html(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
self.set_interaction_for_state(init_state, 'TextInput')
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>hint one</p>')
)
]
init_state.update_interaction_hints(hints_list)
self.assertEqual(
exploration.get_content_html(exploration.init_state_name, 'hint_1'),
'<p>hint one</p>')
hints_list[0].hint_content.html = '<p>Changed hint one</p>'
init_state.update_interaction_hints(hints_list)
self.assertEqual(
exploration.get_content_html(exploration.init_state_name, 'hint_1'),
'<p>Changed hint one</p>')
def test_get_content_with_incorrect_state_name_raise_error(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
self.set_interaction_for_state(init_state, 'TextInput')
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>hint one</p>')
)
]
init_state.update_interaction_hints(hints_list)
self.assertEqual(
exploration.get_content_html(exploration.init_state_name, 'hint_1'),
'<p>hint one</p>')
with self.assertRaisesRegexp(
ValueError, 'State Invalid state does not exist'):
exploration.get_content_html('Invalid state', 'hint_1')
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration('abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_has_state_name(self):
"""Test for has_state_name."""
demo = exp_domain.Exploration.create_default_exploration('0')
state_names = list(demo.states.keys())
self.assertEqual(state_names, ['Introduction'])
self.assertEqual(demo.has_state_name('Introduction'), True)
self.assertEqual(demo.has_state_name('Fake state name'), False)
def test_get_interaction_id_by_state_name(self):
"""Test for get_interaction_id_by_state_name."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(
demo.get_interaction_id_by_state_name('Introduction'), None)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.from_dict(demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration('0')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
def test_cannot_create_demo_exp_with_invalid_param_changes(self):
demo_exp = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo_exp.to_dict()
new_state = state_domain.State.create_default_state('new_state_name')
new_state.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'myParam',
'generator_id': 'RandomSelector'
})]
demo_dict['states']['new_state_name'] = new_state.to_dict()
demo_dict['param_specs'] = {
'ParamSpec': {'obj_type': 'UnicodeString'}
}
with self.assertRaisesRegexp(
Exception,
'Parameter myParam was used in a state but not '
'declared in the exploration param_specs.'):
exp_domain.Exploration.from_dict(demo_dict)
def test_validate_exploration_category(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.category = 1
with self.assertRaisesRegexp(
Exception, 'Expected category to be a string, received 1'):
exploration.validate()
def test_validate_exploration_objective(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.objective = 1
with self.assertRaisesRegexp(
Exception, 'Expected objective to be a string, received 1'):
exploration.validate()
def test_validate_exploration_blurb(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.blurb = 1
with self.assertRaisesRegexp(
Exception, 'Expected blurb to be a string, received 1'):
exploration.validate()
def test_validate_exploration_language_code(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.language_code = 1
with self.assertRaisesRegexp(
Exception, 'Expected language_code to be a string, received 1'):
exploration.validate()
def test_validate_exploration_author_notes(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.author_notes = 1
with self.assertRaisesRegexp(
Exception, 'Expected author_notes to be a string, received 1'):
exploration.validate()
def test_validate_exploration_states(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.states = 1
with self.assertRaisesRegexp(
Exception, 'Expected states to be a dict, received 1'):
exploration.validate()
def test_validate_exploration_outcome_dest(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.init_state.interaction.default_outcome.dest = None
with self.assertRaisesRegexp(
Exception, 'Every outcome should have a destination.'):
exploration.validate()
def test_validate_exploration_outcome_dest_type(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.init_state.interaction.default_outcome.dest = 1
with self.assertRaisesRegexp(
Exception, 'Expected outcome dest to be a string, received 1'):
exploration.validate()
def test_validate_exploration_states_schema_version(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.states_schema_version = None
with self.assertRaisesRegexp(
Exception, 'This exploration has no states schema version.'):
exploration.validate()
def test_validate_exploration_auto_tts_enabled(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.auto_tts_enabled = 1
with self.assertRaisesRegexp(
Exception, 'Expected auto_tts_enabled to be a bool, received 1'):
exploration.validate()
def test_validate_exploration_correctness_feedback_enabled(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.correctness_feedback_enabled = 1
with self.assertRaisesRegexp(
Exception,
'Expected correctness_feedback_enabled to be a bool, received 1'):
exploration.validate()
def test_validate_exploration_param_specs(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_specs = {
1: param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
with self.assertRaisesRegexp(
Exception, 'Expected parameter name to be a string, received 1'):
exploration.validate()
def test_validate_exploration_param_changes_type(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = 1
with self.assertRaisesRegexp(
Exception, 'Expected param_changes to be a list, received 1'):
exploration.validate()
def test_validate_exploration_param_name(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'invalid',
'generator_id': 'RandomSelector'
})]
with self.assertRaisesRegexp(
Exception,
'No parameter named \'invalid\' exists in this '
'exploration'):
exploration.validate()
def test_validate_exploration_reserved_param_name(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'all',
'generator_id': 'RandomSelector'
})]
with self.assertRaisesRegexp(
Exception,
'The exploration-level parameter with name \'all\' is '
'reserved. Please choose a different name.'):
exploration.validate()
def test_validate_exploration_is_non_self_loop(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.add_states(['DEF'])
default_outcome = state_domain.Outcome(
'DEF', state_domain.SubtitledHtml(
'default_outcome', '<p>Default outcome for state1</p>'),
False, [], 'refresher_exploration_id', None,
)
exploration.init_state.update_interaction_default_outcome(
default_outcome
)
with self.assertRaisesRegexp(
Exception,
'The default outcome for state Introduction has a refresher '
'exploration ID, but is not a self-loop.'):
exploration.validate()
def test_validate_exploration_answer_group_parameter(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
param_changes = [{
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'ParamChange',
'generator_id': 'RandomSelector'
}]
answer_groups = [{
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': param_changes,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['Test']
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}]
exploration.init_state.update_interaction_answer_groups(answer_groups)
with self.assertRaisesRegexp(
Exception,
'The parameter ParamChange was used in an answer group, '
'but it does not exist in this exploration'):
exploration.validate()
def test_verify_all_states_reachable(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'owner_id')
exploration.validate()
exploration.add_states(['End'])
end_state = exploration.states['End']
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
with self.assertRaisesRegexp(
Exception,
'Please fix the following issues before saving this exploration: '
'1. The following states are not reachable from the initial state: '
'End 2. It is impossible to complete the exploration from the '
'following states: Introduction'):
exploration.validate(strict=True)
def test_update_init_state_name_with_invalid_state(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='title', category='category',
objective='objective', end_state_name='End')
exploration.update_init_state_name('End')
self.assertEqual(exploration.init_state_name, 'End')
with self.assertRaisesRegexp(
Exception,
'Invalid new initial state name: invalid_state;'):
exploration.update_init_state_name('invalid_state')
def test_rename_state_with_invalid_state(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='title', category='category',
objective='objective', end_state_name='End')
self.assertTrue(exploration.states.get('End'))
self.assertFalse(exploration.states.get('new state name'))
exploration.rename_state('End', 'new state name')
self.assertFalse(exploration.states.get('End'))
self.assertTrue(exploration.states.get('new state name'))
with self.assertRaisesRegexp(
Exception, 'State invalid_state does not exist'):
exploration.rename_state('invalid_state', 'new state name')
def test_default_outcome_is_labelled_incorrect_for_self_loop(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='title', category='category',
objective='objective', end_state_name='End')
exploration.validate(strict=True)
(
exploration.init_state.interaction.default_outcome
.labelled_as_correct) = True
(
exploration.init_state.interaction.default_outcome
.dest) = exploration.init_state_name
with self.assertRaisesRegexp(
Exception,
'The default outcome for state Introduction is labelled '
'correct but is a self-loop'):
exploration.validate(strict=True)
def test_serialize_and_deserialize_returns_unchanged_exploration(self):
"""Checks that serializing and then deserializing a default exploration
works as intended by leaving the exploration unchanged.
"""
exploration = exp_domain.Exploration.create_default_exploration('eid')
self.assertEqual(
exploration.to_dict(),
exp_domain.Exploration.deserialize(
exploration.serialize()).to_dict())
class ExplorationSummaryTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationSummaryTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
exploration = exp_domain.Exploration.create_default_exploration('eid')
exp_services.save_new_exploration(self.owner_id, exploration)
self.exp_summary = exp_fetchers.get_exploration_summary_by_id('eid')
self.exp_summary.editor_ids = ['editor_id']
self.exp_summary.voice_artist_ids = ['voice_artist_id']
self.exp_summary.viewer_ids = ['viewer_id']
self.exp_summary.contributor_ids = ['contributor_id']
def test_validation_passes_with_valid_properties(self):
self.exp_summary.validate()
def test_validation_fails_with_invalid_title(self):
self.exp_summary.title = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected title to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_category(self):
self.exp_summary.category = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected category to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_objective(self):
self.exp_summary.objective = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected objective to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_language_code(self):
self.exp_summary.language_code = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_unallowed_language_code(self):
self.exp_summary.language_code = 'invalid'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_tags(self):
self.exp_summary.tags = 'tags'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected \'tags\' to be a list, received tags'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_tag_in_tags(self):
self.exp_summary.tags = ['tag', 2]
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each tag in \'tags\' to be a string, received \'2\''):
self.exp_summary.validate()
def test_validation_fails_with_empty_tag_in_tags(self):
self.exp_summary.tags = ['', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, 'Tags should be non-empty'):
self.exp_summary.validate()
def test_validation_fails_with_unallowed_characters_in_tag(self):
self.exp_summary.tags = ['123', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Tags should only contain lowercase '
'letters and spaces, received \'123\'')):
self.exp_summary.validate()
def test_validation_fails_with_whitespace_in_tag_start(self):
self.exp_summary.tags = [' ab', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \' ab\''):
self.exp_summary.validate()
def test_validation_fails_with_whitespace_in_tag_end(self):
self.exp_summary.tags = ['ab ', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \'ab \''):
self.exp_summary.validate()
def test_validation_fails_with_adjacent_whitespace_in_tag(self):
self.exp_summary.tags = ['a b', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Adjacent whitespace in tags should '
'be collapsed, received \'a b\'')):
self.exp_summary.validate()
def test_validation_fails_with_duplicate_tags(self):
self.exp_summary.tags = ['abc', 'abc', 'ab']
with self.assertRaisesRegexp(
utils.ValidationError, 'Some tags duplicate each other'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_rating_type(self):
self.exp_summary.ratings = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected ratings to be a dict, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_rating_keys(self):
self.exp_summary.ratings = {'1': 0, '10': 1}
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected ratings to have keys: 1, 2, 3, 4, 5, received 1, 10'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_value_type_for_ratings(self):
self.exp_summary.ratings = {'1': 0, '2': 'one', '3': 0, '4': 0, '5': 0}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected value to be int, received one'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_value_for_ratings(self):
self.exp_summary.ratings = {'1': 0, '2': -1, '3': 0, '4': 0, '5': 0}
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected value to be non-negative, received -1'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_scaled_average_rating(self):
self.exp_summary.scaled_average_rating = 'one'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected scaled_average_rating to be float, received one'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_status(self):
self.exp_summary.status = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_community_owned(self):
self.exp_summary.community_owned = '1'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected community_owned to be bool, received 1'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributors_summary(self):
self.exp_summary.contributors_summary = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributors_summary to be dict, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_owner_ids_type(self):
self.exp_summary.owner_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected owner_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_owner_id_in_owner_ids(self):
self.exp_summary.owner_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in owner_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_editor_ids_type(self):
self.exp_summary.editor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected editor_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_editor_id_in_editor_ids(self):
self.exp_summary.editor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in editor_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_voice_artist_ids_type(self):
self.exp_summary.voice_artist_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected voice_artist_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_voice_artist_id_in_voice_artists_ids(
self):
self.exp_summary.voice_artist_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in voice_artist_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_viewer_ids_type(self):
self.exp_summary.viewer_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected viewer_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_viewer_id_in_viewer_ids(self):
self.exp_summary.viewer_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in viewer_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributor_ids_type(self):
self.exp_summary.contributor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributor_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributor_id_in_contributor_ids(
self):
self.exp_summary.contributor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in contributor_ids to be string, received 2'):
self.exp_summary.validate()
def test_is_private(self):
self.assertTrue(self.exp_summary.is_private())
self.exp_summary.status = constants.ACTIVITY_STATUS_PUBLIC
self.assertFalse(self.exp_summary.is_private())
def test_is_solely_owned_by_user_one_owner(self):
self.assertTrue(self.exp_summary.is_solely_owned_by_user(self.owner_id))
self.assertFalse(self.exp_summary.is_solely_owned_by_user('other_id'))
self.exp_summary.owner_ids = ['other_id']
self.assertFalse(
self.exp_summary.is_solely_owned_by_user(self.owner_id))
self.assertTrue(self.exp_summary.is_solely_owned_by_user('other_id'))
def test_is_solely_owned_by_user_multiple_owners(self):
self.assertTrue(self.exp_summary.is_solely_owned_by_user(self.owner_id))
self.assertFalse(self.exp_summary.is_solely_owned_by_user('other_id'))
self.exp_summary.owner_ids = [self.owner_id, 'other_id']
self.assertFalse(
self.exp_summary.is_solely_owned_by_user(self.owner_id))
self.assertFalse(self.exp_summary.is_solely_owned_by_user('other_id'))
def test_is_solely_owned_by_user_other_users(self):
self.assertFalse(self.exp_summary.is_solely_owned_by_user('editor_id'))
self.assertFalse(
self.exp_summary.is_solely_owned_by_user('voice_artist_id'))
self.assertFalse(self.exp_summary.is_solely_owned_by_user('viewer_id'))
self.assertFalse(
self.exp_summary.is_solely_owned_by_user('contributor_id'))
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
EXP_ID = 'An exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='Title', category='Category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, self.SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
# Verify SAMPLE_UNTITLED_YAML_CONTENT can be converted to an exploration
# without error.
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', self.SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Please ensure that you are uploading a YAML text file, '
'not a zip file. The YAML parser returned the following error: '):
exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name')
with self.assertRaisesRegexp(
Exception,
'Please ensure that you are uploading a YAML text file, not a zip'
' file. The YAML parser returned the following error: mapping '
'values are not allowed here'):
exp_domain.Exploration.from_yaml(
'exp4', 'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaisesRegexp(
Exception,
'Please ensure that you are uploading a YAML text file, not a zip'
' file. The YAML parser returned the following error: while '
'scanning a simple key'):
exp_domain.Exploration.from_yaml(
'exp4', 'State1:\n(\nInvalid yaml')
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version >= 10, received: 9'
):
exp_domain.Exploration.from_yaml(
'exp4', self.SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version <= 9'
):
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', self.SAMPLE_YAML_CONTENT)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Exploration domain object class.
"""
def test_correct_states_schema_conversion_methods_exist(self):
"""Test that the right states schema conversion methods exist."""
current_states_schema_version = (
feconf.CURRENT_STATE_SCHEMA_VERSION)
for version_num in python_utils.RANGE(current_states_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version,
current_states_schema_version + 1)))
def test_correct_exploration_schema_conversion_methods_exist(self):
"""Test that the right exploration schema conversion methods exist."""
current_exp_schema_version = (
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION)
for version_num in python_utils.RANGE(1, current_exp_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
current_exp_schema_version, current_exp_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = (
"""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = (
"""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
postCode:
value: ''
preCode:
value: ''
language:
value: ''
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: CodeRepl
param_changes: []
states_schema_version: 6
tags: []
""")
YAML_CONTENT_V10 = (
"""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 10
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 7
tags: []
title: Title
""")
YAML_CONTENT_V11 = (
"""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 11
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 8
tags: []
title: Title
""")
YAML_CONTENT_V12 = (
"""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 12
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: END
feedback:
- Correct!
id: TextInput
param_changes: []
states_schema_version: 9
tags: []
title: Title
""")
YAML_CONTENT_V13 = (
"""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 13
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 10
tags: []
title: Title
""")
YAML_CONTENT_V14 = (
"""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 14
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: []
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 11
tags: []
title: Title
""")
YAML_CONTENT_V15 = (
"""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 15
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 12
tags: []
title: Title
""")
YAML_CONTENT_V16 = (
"""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 16
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V17 = (
"""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 17
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V18 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 18
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints:
- hint_text: ''
id: TextInput
solution:
explanation: ''
answer_is_exclusive: False
correct_answer: Answer
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V19 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 19
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 14
tags: []
title: Title
""")
YAML_CONTENT_V20 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 20
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 15
tags: []
title: Title
""")
YAML_CONTENT_V21 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 21
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
allowImproperFraction:
value: true
allowNonzeroIntegerPart:
value: true
customPlaceholder:
value: ''
requireSimplestForm:
value: false
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: FractionInput
solution: null
param_changes: []
states_schema_version: 16
tags: []
title: Title
""")
YAML_CONTENT_V22 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 22
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 17
tags: []
title: Title
""")
YAML_CONTENT_V23 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 23
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 18
tags: []
title: Title
""")
YAML_CONTENT_V24 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 24
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 19
tags: []
title: Title
""")
YAML_CONTENT_V25 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 25
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 20
tags: []
title: Title
""")
YAML_CONTENT_V26 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 26
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: Correct!
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: Congratulations, you have finished!
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 21
tags: []
title: Title
""")
YAML_CONTENT_V27 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 27
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 22
tags: []
title: Title
""")
YAML_CONTENT_V28 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 28
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 23
tags: []
title: Title
""")
YAML_CONTENT_V29 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 29
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
highlightRegionsOnHover:
value: false
imageAndRegions:
value:
imagePath: s1ImagePath.png
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ImageClickInput
solution: null
param_changes: []
states_schema_version: 24
tags: []
title: Title
""")
YAML_CONTENT_V30 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 30
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 25
tags: []
title: Title
""")
YAML_CONTENT_V31 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 31
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
new_content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 26
tags: []
title: Title
""")
YAML_CONTENT_V32 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 32
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 27
tags: []
title: Title
""")
YAML_CONTENT_V33 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 33
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 28
tags: []
title: Title
""")
YAML_CONTENT_V34 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 34
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 29
tags: []
title: Title
""")
YAML_CONTENT_V35 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
YAML_CONTENT_V36 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 36
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 31
tags: []
title: Title
""")
YAML_CONTENT_V37 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 37
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 32
tags: []
title: Title
""")
YAML_CONTENT_V38 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 38
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 33
tags: []
title: Title
""")
YAML_CONTENT_V39 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 39
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 34
tags: []
title: Title
""")
YAML_CONTENT_V40 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 40
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 35
tags: []
title: Title
""")
YAML_CONTENT_V41 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 41
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_0
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
states_schema_version: 36
tags: []
title: Title
""")
YAML_CONTENT_V42 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 42
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_0
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
states_schema_version: 37
tags: []
title: Title
""")
YAML_CONTENT_V43 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 43
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_0
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
states_schema_version: 38
tags: []
title: Title
""")
YAML_CONTENT_V44 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 44
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_0
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
states_schema_version: 39
tags: []
title: Title
""")
YAML_CONTENT_V45 = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_0
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
states_schema_version: 40
tags: []
title: Title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V45
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_cannot_load_from_v6_with_invalid_handler_name(self):
invalid_yaml_content_v6 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: invalid_handler_name
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert rules with a name '
'\'submit\' in states v3 to v4 conversion process. '):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_rule(self):
invalid_yaml_content_v6 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: invalid_rule
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert default and atomic '
'rules in states v3 to v4 conversion process.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_subject(self):
invalid_yaml_content_v6 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
subject: invalid_subject
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert rules with an \'answer\' '
'subject in states v3 to v4 conversion process.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_interaction_id(self):
invalid_yaml_content_v6 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: invalid_id
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Trying to migrate exploration containing non-existent '
'interaction ID'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
latest_yaml_content = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
language:
value: python
placeholder:
value: ''
postCode:
value: ''
preCode:
value: ''
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: CodeRepl
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 40
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v10(self):
"""Test direct loading from a v10 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V10)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v11(self):
"""Test direct loading from a v11 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V11)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v12(self):
"""Test direct loading from a v12 yaml file."""
latest_yaml_content = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Correct!</p>
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
hint_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
hint_1: {}
states_schema_version: 40
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V12)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v13(self):
"""Test direct loading from a v13 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V13)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v14(self):
"""Test direct loading from a v14 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V14)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v15(self):
"""Test direct loading from a v15 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V15)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v16(self):
"""Test direct loading from a v16 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V16)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v17(self):
"""Test direct loading from a v17 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V17)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v18(self):
"""Test direct loading from a v18 yaml file."""
latest_yaml_content = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: ''
id: TextInput
solution:
answer_is_exclusive: false
correct_answer: Answer
explanation:
content_id: solution
html: ''
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
hint_1: {}
solution: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
hint_1: {}
solution: {}
states_schema_version: 40
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V18)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v19(self):
"""Test direct loading from a v19 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V19)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v20(self):
"""Test direct loading from a v20 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V20)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v21(self):
"""Test direct loading from a v21 yaml file."""
latest_yaml_content = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
allowImproperFraction:
value: true
allowNonzeroIntegerPart:
value: true
customPlaceholder:
value:
content_id: ca_customPlaceholder_0
unicode_str: ''
requireSimplestForm:
value: false
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: FractionInput
solution: null
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_customPlaceholder_0: {}
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_customPlaceholder_0: {}
content: {}
default_outcome: {}
states_schema_version: 40
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V21)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v22(self):
"""Test direct loading from a v22 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V22)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v23(self):
"""Test direct loading from a v23 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V23)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v24(self):
"""Test direct loading from a v24 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V24)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v25(self):
"""Test direct loading from a v25 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V25)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v26(self):
"""Test direct loading from a v26 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V26)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v27(self):
"""Test direct loading from a v27 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V27)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v28(self):
"""Test direct loading from a v28 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V28)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v29(self):
"""Test direct loading from a v29 yaml file."""
latest_yaml_content = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
highlightRegionsOnHover:
value: false
imageAndRegions:
value:
imagePath: s1ImagePath_height_120_width_120.png
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ImageClickInput
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 40
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V29)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v30(self):
"""Test direct loading from a v30 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V30)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v31(self):
"""Test direct loading from a v31 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V31)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v32(self):
"""Test direct loading from a v32 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V32)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v33(self):
"""Test direct loading from a v33 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V33)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v40_special_cases(self):
"""Test to cover some special cases that occurs in the migration from
v40 to v41 exploration schema. This includes modifying existing written
translations, converting hmtl to SubtitledHtml, and filling in empty
SubtitledHtml list customization arguments with a default value.
"""
sample_yaml_content = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 40
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: MultipleChoiceInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content:
en:
html: <p>Translation</p>
needs_update: false
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 35
tags: []
title: Title
""")
latest_sample_yaml_content = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- content_id: ca_choices_2
html: ''
showChoicesInShuffledOrder:
value: true
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: MultipleChoiceInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_choices_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_choices_2: {}
content:
en:
data_format: html
needs_update: false
translation: <p>Translation</p>
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_0
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
states_schema_version: 40
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', sample_yaml_content)
self.assertEqual(exploration.to_yaml(), latest_sample_yaml_content)
def test_load_from_v41_with_text_inputs_case_sensitive_equals_rule(self):
"""Test to cover the case where a TextInput interaction contains
an AnswerGroup that has a CaseSensitiveEquals rule.
"""
sample_yaml_content = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 40
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: CaseSensitiveEquals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content:
en:
html: <p>Translation</p>
needs_update: false
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 35
tags: []
title: Title
""")
latest_sample_yaml_content = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content:
en:
data_format: html
needs_update: false
translation: <p>Translation</p>
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_0
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
states_schema_version: 40
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', sample_yaml_content)
self.assertEqual(exploration.to_yaml(), latest_sample_yaml_content)
def test_cannot_load_from_yaml_with_no_schema_version(self):
sample_yaml_content = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
with self.assertRaisesRegexp(
Exception, 'Invalid YAML file: no schema version specified.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', sample_yaml_content)
def test_cannot_load_from_yaml_with_invalid_schema_version(self):
sample_yaml_content = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 0
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1 to v%s exploration YAML files '
'at present.' % exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', sample_yaml_content)
class HTMLMigrationUnitTests(test_utils.GenericTestBase):
"""Test HTML migration."""
YAML_CONTENT_V26_TEXTANGULAR = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: category
correctness_feedback_enabled: false
init_state_name: Introduction
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 26
states:
Introduction:
classifier_model_id: null
content:
content_id: content
html: '<p>This is test </p><oppia-noninteractive-math
raw_latex-with-value="&quot;+,-,-,+&quot;">
</oppia-noninteractive-math>'
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: Introduction
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
param_changes: []
state1:
classifier_model_id: null
content:
content_id: content
html: <blockquote><p>Hello, this is state1</p></blockquote>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
solution: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: Default <p>outcome</p> for state1
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution:
answer_is_exclusive: true
correct_answer: Answer1
explanation:
content_id: solution
html: This is <i>solution</i> for state1
param_changes: []
state2:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, </p>this <i>is </i>state2
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <div>Outcome1 for state2</div>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
- inputs:
x: 1
rule_type: Equals
tagged_misconception_id: null
training_data: []
- outcome:
dest: state3
feedback:
content_id: feedback_2
html: <pre>Outcome2 <br>for state2</pre>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is </p>value1 <br>for MultipleChoice
- This is value2<span> for <br>MultipleChoice</span>
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Hello, this is<div> html1<b> for </b></div>state2</p>
- hint_content:
content_id: hint_2
html: Here is link 2 <oppia-noninteractive-link
text-with-value="&quot;discussion forum&quot;"
url-with-value="&quot;https://groups.google.com/
forum/?fromgroups#!forum/oppia&quot;">
</oppia-noninteractive-link>
id: MultipleChoiceInput
solution: null
param_changes: []
state3:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, this is state3</p>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: Here is the image1 <i><oppia-noninteractive-image
caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue.png&quot;"
alt-with-value="&quot;&quot;">
</oppia-noninteractive-image></i>Here is the image2
<div><oppia-noninteractive-image
caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue.png&quot;"
alt-with-value="&quot;&quot;">
</oppia-noninteractive-image></div>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- This <span>is value1 for </span>ItemSelectionInput
rule_type: Equals
- inputs:
x:
- This is value3 for ItemSelectionInput
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- This <span>is value1 for </span>ItemSelection
- This <code>is value2</code> for ItemSelection
- This is value3 for ItemSelection
maxAllowableSelectionCount:
value: 1
minAllowableSelectionCount:
value: 1
default_outcome:
dest: state3
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ItemSelectionInput
solution: null
param_changes: []
states_schema_version: 21
tags: []
title: title
""")
# pylint: disable=line-too-long, single-line-pragma
YAML_CONTENT_V45_IMAGE_DIMENSIONS = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: category
correctness_feedback_enabled: false
init_state_name: Introduction
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
Introduction:
classifier_model_id: null
content:
content_id: content
html: '<p>This is test </p><p><oppia-noninteractive-math math_content-with-value="{&quot;raw_latex&quot;:
&quot;+,-,-,+&quot;, &quot;svg_filename&quot;: &quot;&quot;}">
</oppia-noninteractive-math></p>'
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: Introduction
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
state1:
classifier_model_id: null
content:
content_id: content
html: <blockquote><p>Hello, this is state1</p></blockquote>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_0
unicode_str: ''
rows:
value: 1
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: <p>Default </p><p>outcome</p><p> for state1</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution:
answer_is_exclusive: true
correct_answer: Answer1
explanation:
content_id: solution
html: <p>This is <em>solution</em> for state1</p>
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solution: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solution: {}
state2:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, </p><p>this <em>is </em>state2</p>
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <p>Outcome1 for state2</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
- inputs:
x: 1
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
- outcome:
dest: state3
feedback:
content_id: feedback_2
html: "<pre>Outcome2 \\nfor state2</pre>"
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- content_id: ca_choices_3
html: <p>This is </p><p>value1 <br>for MultipleChoice</p>
- content_id: ca_choices_4
html: <p>This is value2 for <br>MultipleChoice</p>
showChoicesInShuffledOrder:
value: false
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Hello, this is</p><p> html1<strong> for </strong></p><p>state2</p>
- hint_content:
content_id: hint_2
html: <p>Here is link 2 <oppia-noninteractive-link text-with-value="&quot;discussion
forum&quot;" url-with-value="&quot;https://groups.google.com/
forum/?fromgroups#!forum/oppia&quot;"> </oppia-noninteractive-link></p>
id: MultipleChoiceInput
solution: null
next_content_id_index: 5
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_choices_3: {}
ca_choices_4: {}
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_choices_3: {}
ca_choices_4: {}
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
state3:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, this is state3</p>
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <p>Here is the image1 </p><oppia-noninteractive-image alt-with-value="&quot;&quot;"
caption-with-value="&quot;&quot;" filepath-with-value="&quot;startBlue_height_490_width_120.png&quot;">
</oppia-noninteractive-image><p>Here is the image2 </p><oppia-noninteractive-image
alt-with-value="&quot;&quot;" caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue_height_490_width_120.png&quot;">
</oppia-noninteractive-image>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- <p>This is value1 for ItemSelectionInput</p>
rule_type: Equals
- inputs:
x:
- <p>This is value3 for ItemSelectionInput</p>
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- content_id: ca_choices_2
html: <p>This is value1 for ItemSelection</p>
- content_id: ca_choices_3
html: <p>This is value2 for ItemSelection</p>
- content_id: ca_choices_4
html: <p>This is value3 for ItemSelection</p>
maxAllowableSelectionCount:
value: 1
minAllowableSelectionCount:
value: 1
default_outcome:
dest: state3
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ItemSelectionInput
solution: null
next_content_id_index: 5
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_choices_2: {}
ca_choices_3: {}
ca_choices_4: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_choices_2: {}
ca_choices_3: {}
ca_choices_4: {}
content: {}
default_outcome: {}
feedback_1: {}
states_schema_version: 40
tags: []
title: title
""")
YAML_CONTENT_V27_WITHOUT_IMAGE_CAPTION = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 27
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: <p><oppia-noninteractive-image filepath-with-value="&quot;random.png&quot;"></oppia-noninteractive-image>Hello this
is test case to check image tag inside p tag</p>
<oppia-noninteractive-math raw_latex-with-value="&quot;+,-,-,+&quot;">
</oppia-noninteractive-math>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 22
tags: []
title: Title
""")
YAML_CONTENT_V35_WITH_IMAGE_CAPTION = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: <oppia-noninteractive-image caption-with-value="&quot;&quot;"
filepath-with-value="&quot;random_height_490_width_120.png&quot;"></oppia-noninteractive-image><p>Hello
this is test case to check image tag inside p tag</p>
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
YAML_CONTENT_V45_WITH_IMAGE_CAPTION = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 45
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: '<oppia-noninteractive-image caption-with-value="&quot;&quot;"
filepath-with-value="&quot;random_height_490_width_120.png&quot;"></oppia-noninteractive-image><p>Hello
this is test case to check image tag inside p tag</p><p> </p><oppia-noninteractive-math
math_content-with-value="{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;,
&quot;svg_filename&quot;: &quot;&quot;}"> </oppia-noninteractive-math>'
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_2
unicode_str: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 3
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_2: {}
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value:
content_id: ca_placeholder_0
unicode_str: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
next_content_id_index: 1
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
ca_placeholder_0: {}
content: {}
default_outcome: {}
states_schema_version: 40
tags: []
title: Title
""")
# pylint: enable=line-too-long, single-line-pragma
def test_load_from_v26_textangular(self):
"""Test direct loading from a v26 yaml file."""
mock_get_filename_with_dimensions_context = self.swap(
html_validation_service, 'get_filename_with_dimensions',
mock_get_filename_with_dimensions)
with mock_get_filename_with_dimensions_context:
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V26_TEXTANGULAR)
self.assertEqual(
exploration.to_yaml(), self.YAML_CONTENT_V45_IMAGE_DIMENSIONS)
def test_load_from_v27_without_image_caption(self):
"""Test direct loading from a v27 yaml file."""
mock_get_filename_with_dimensions_context = self.swap(
html_validation_service, 'get_filename_with_dimensions',
mock_get_filename_with_dimensions)
with mock_get_filename_with_dimensions_context:
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V27_WITHOUT_IMAGE_CAPTION)
self.assertEqual(
exploration.to_yaml(), self.YAML_CONTENT_V45_WITH_IMAGE_CAPTION)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
exp_title = 'Title'
second_state_name = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title=exp_title, category='Category')
exploration.add_states([second_state_name])
def _get_default_state_dict(content_str, dest_name):
"""Gets the default state dict of the exploration."""
return {
'next_content_id_index': 0,
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': content_str,
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {},
'default_outcome': {}
}
},
'solicit_answer_details': False,
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {}
}
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': {
'content_id': feconf.DEFAULT_OUTCOME_CONTENT_ID,
'html': ''
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'hints': [],
'id': None,
'solution': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': exp_title,
'objective': feconf.DEFAULT_EXPLORATION_OBJECTIVE,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
second_state_name: _get_default_state_dict(
'', second_state_name),
},
'param_changes': [],
'param_specs': {},
'language_code': 'en',
'correctness_feedback_enabled': False,
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'
):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
class HtmlCollectionTests(test_utils.GenericTestBase):
"""Test method to obtain all html strings."""
def test_all_html_strings_are_collected(self):
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title='title', category='category')
exploration.add_states(['state1', 'state2', 'state3', 'state4'])
state1 = exploration.states['state1']
state2 = exploration.states['state2']
state3 = exploration.states['state3']
state4 = exploration.states['state4']
content1_dict = {
'content_id': 'content',
'html': '<blockquote>Hello, this is state1</blockquote>'
}
content2_dict = {
'content_id': 'content',
'html': '<pre>Hello, this is state2</pre>'
}
content3_dict = {
'content_id': 'content',
'html': '<p>Hello, this is state3</p>'
}
content4_dict = {
'content_id': 'content',
'html': '<p>Hello, this is state4</p>'
}
state1.update_content(
state_domain.SubtitledHtml.from_dict(content1_dict))
state2.update_content(
state_domain.SubtitledHtml.from_dict(content2_dict))
state3.update_content(
state_domain.SubtitledHtml.from_dict(content3_dict))
state4.update_content(
state_domain.SubtitledHtml.from_dict(content4_dict))
self.set_interaction_for_state(state1, 'TextInput')
self.set_interaction_for_state(state2, 'MultipleChoiceInput')
self.set_interaction_for_state(state3, 'ItemSelectionInput')
self.set_interaction_for_state(state4, 'DragAndDropSortInput')
customization_args_dict1 = {
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': 'Enter here.'
}
},
'rows': {'value': 1}
}
customization_args_dict2 = {
'choices': {'value': [
{
'content_id': 'ca_choices_0',
'html': '<p>This is value1 for MultipleChoice</p>'
},
{
'content_id': 'ca_choices_1',
'html': '<p>This is value2 for MultipleChoice</p>'
}
]},
'showChoicesInShuffledOrder': {'value': True}
}
customization_args_dict3 = {
'choices': {'value': [
{
'content_id': 'ca_choices_0',
'html': '<p>This is value1 for ItemSelection</p>'
},
{
'content_id': 'ca_choices_1',
'html': '<p>This is value2 for ItemSelection</p>'
},
{
'content_id': 'ca_choices_2',
'html': '<p>This is value3 for ItemSelection</p>'
}
]},
'minAllowableSelectionCount': {'value': 1},
'maxAllowableSelectionCount': {'value': 2}
}
customization_args_dict4 = {
'choices': {'value': [
{
'content_id': 'ca_choices_0',
'html': '<p>This is value1 for DragAndDropSortInput</p>'
},
{
'content_id': 'ca_choices_1',
'html': '<p>This is value2 for DragAndDropSortInput</p>'
}
]},
'allowMultipleItemsInSamePosition': {'value': True}
}
state1.update_interaction_customization_args(customization_args_dict1)
state2.update_interaction_customization_args(customization_args_dict2)
state3.update_interaction_customization_args(customization_args_dict3)
state4.update_interaction_customization_args(customization_args_dict4)
default_outcome = state_domain.Outcome(
'state2', state_domain.SubtitledHtml(
'default_outcome', '<p>Default outcome for state1</p>'),
False, [], None, None
)
state1.update_interaction_default_outcome(default_outcome)
hint_list2 = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for state2</p>'
)
),
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_2', '<p>Hello, this is html2 for state2</p>'
)
),
]
state2.update_interaction_hints(hint_list2)
solution_dict = {
'interaction_id': '',
'answer_is_exclusive': True,
'correct_answer': 'Answer1',
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
solution = state_domain.Solution.from_dict(
state1.interaction.id, solution_dict)
state1.update_interaction_solution(solution)
answer_group_list2 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': 0}
}, {
'rule_type': 'Equals',
'inputs': {'x': 1}
}],
'outcome': {
'dest': 'state1',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Outcome1 for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}, {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': 0}
}],
'outcome': {
'dest': 'state3',
'feedback': {
'content_id': 'feedback_2',
'html': '<p>Outcome2 for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
answer_group_list3 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelectionInput</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value3 for ItemSelectionInput</p>'
]}
}],
'outcome': {
'dest': 'state1',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Outcome for state3</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state2.update_interaction_answer_groups(answer_group_list2)
state3.update_interaction_answer_groups(answer_group_list3)
expected_html_list = [
'',
'',
'<pre>Hello, this is state2</pre>',
'<p>Outcome1 for state2</p>',
'<p>Outcome2 for state2</p>',
'',
'<p>Hello, this is html1 for state2</p>',
'<p>Hello, this is html2 for state2</p>',
'<p>This is value1 for MultipleChoice</p>',
'<p>This is value2 for MultipleChoice</p>',
'<blockquote>Hello, this is state1</blockquote>',
'<p>Default outcome for state1</p>',
'<p>This is solution for state1</p>',
'<p>Hello, this is state3</p>',
'<p>Outcome for state3</p>',
'<p>This is value1 for ItemSelectionInput</p>',
'<p>This is value3 for ItemSelectionInput</p>',
'',
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
'<p>This is value3 for ItemSelection</p>',
'<p>Hello, this is state4</p>',
'',
'<p>This is value1 for DragAndDropSortInput</p>',
'<p>This is value2 for DragAndDropSortInput</p>'
]
actual_outcome_list = exploration.get_all_html_content_strings()
self.assertEqual(actual_outcome_list, expected_html_list)
| 28.435115 | 135 | 0.601726 |
acef5a7f13af157099d457700f6c17d76f445603 | 2,329 | py | Python | pysts/notebook/convert.py | sdswart/pysts | f140072e064b59a7d8732e73d71fd812b6d292c5 | [
"MIT"
] | null | null | null | pysts/notebook/convert.py | sdswart/pysts | f140072e064b59a7d8732e73d71fd812b6d292c5 | [
"MIT"
] | null | null | null | pysts/notebook/convert.py | sdswart/pysts | f140072e064b59a7d8732e73d71fd812b6d292c5 | [
"MIT"
] | null | null | null | import os
from .utils import *
def libre_convert_to_pdf(paths):
LIBRE_OFFICE=''
res=[]
for path in paths:
if path.endswith('docx'):
pdf_path=path.replace('.docx','.pdf')
p = Popen([LIBRE_OFFICE, '--headless', '--convert-to', 'pdf', '--outdir',
pdf_path, path])
p.communicate()
res.append(pdf_path)
return res
def docx_to_pdf(paths):
#docx2pdf=get_package('docx2pdf')
pypandoc=get_package('pypandoc')
res=[]
for path in paths:
if path.endswith('docx'):
pdf_path=path.replace('.docx','.pdf')
#docx2pdf.convert(path, pdf_path)
pypandoc.convert_file(path, 'pdf', outputfile=pdf_path,extra_args=['--pdf-engine=xelatex'])
res.append(pdf_path)
return res
def create_word_from_html(html,template_path,output_path=None):
pypandoc=get_package('pypandoc')
docx=get_package('docx','python-docx')
Document=docx.Document
with open("temp.html",'w') as f:
f.write(html)
pypandoc.convert_file('temp.html', 'docx', outputfile="temp.docx")
temp_doc=Document("temp.docx")
if template_path is not None and os.path.isfile(template_path):
document = Document(template_path)
#Add elements
for elem in temp_doc.element.body:
document.element.body.append(elem)
#Add image parts
for imagepart in temp_doc.part._package.image_parts:
document.part._package.image_parts.append(imagepart)
for rId,rel in temp_doc.part.rels.items():
if rel.reltype.endswith('image'):
document.part.rels[rId]=rel
document.part.rels._target_parts_by_rId[rId] = rel._target
else:
document=temp_doc
#change table styles
for table in document.tables:
table.style = 'default'
for row in table.rows:
for cell in row.cells:
for paragraph in cell.paragraphs:
paragraph.style='table_normal'
if output_path is None:
report_name=f'Report_{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}.docx'
output_path=os.path.join(os.getcwd(),report_name)
document.save(output_path)
os.remove('temp.docx')
os.remove('temp.html')
return [output_path]
| 32.347222 | 103 | 0.617432 |
acef5ac108cce64f2d8b6eef0619802c1f5dbb37 | 401 | py | Python | ex073.py | pepev123/PythonEx | 8f39751bf87a9099d7b733aa829988595dab2344 | [
"MIT"
] | null | null | null | ex073.py | pepev123/PythonEx | 8f39751bf87a9099d7b733aa829988595dab2344 | [
"MIT"
] | null | null | null | ex073.py | pepev123/PythonEx | 8f39751bf87a9099d7b733aa829988595dab2344 | [
"MIT"
] | null | null | null | tabela = ('Flamengo', 'Cruzeiro', 'Figueirense', 'Chapecoense', 'Fluminense', 'Avai', 'Santos', 'Bragantino', 'Gremio')
print(f'A ordem na tabela é {tabela}.')
print(f'A tabela em ordem alfabética é {sorted(tabela)}.' )
print(f'Os primerios 5 times da tabela são {tabela[:5]}.')
print(f'Os ultimos 4 times da tabela são {tabela[5:]}.')
print(f'A posição da Chape é {tabela.index("Chapecoense") + 1}') | 66.833333 | 119 | 0.685786 |
acef5b3715f864992ce719013dccd764d2ec9c1f | 1,393 | py | Python | users/views.py | wanguinjoka/Tech-Olympia | 35b070c5011173f16bf4725e6200d988a27bc10f | [
"MIT"
] | null | null | null | users/views.py | wanguinjoka/Tech-Olympia | 35b070c5011173f16bf4725e6200d988a27bc10f | [
"MIT"
] | null | null | null | users/views.py | wanguinjoka/Tech-Olympia | 35b070c5011173f16bf4725e6200d988a27bc10f | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from .forms import UserRegisterForm, UserUpadateForm,ProfileUpdateForm
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for {username}! You are now able to login')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html',{'form':form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpadateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpadateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = { 'u_form': u_form, 'p_form':p_form}
return render(request, 'users/profile.html', context)
| 35.717949 | 99 | 0.677674 |
acef5b97469a3b68ce7c747b75ecc37aa8eb9390 | 3,520 | py | Python | ImageProcessing/Descriptors.py | YuKill/UEMImplementation | dfafe5768d9adcb3606b570497383e6010afb4ff | [
"Unlicense"
] | null | null | null | ImageProcessing/Descriptors.py | YuKill/UEMImplementation | dfafe5768d9adcb3606b570497383e6010afb4ff | [
"Unlicense"
] | 1 | 2021-03-10T02:58:40.000Z | 2021-03-10T02:58:40.000Z | ImageProcessing/Descriptors.py | YuKill/UEMImplementation | dfafe5768d9adcb3606b570497383e6010afb4ff | [
"Unlicense"
] | 1 | 2019-06-20T02:01:29.000Z | 2019-06-20T02:01:29.000Z | #! /usr/bin/python
import numpy as n
import cv2
import sys
from math import *
from queue import *
def fourier(Vec, Freq):
Exp = -2 * pi * Freq / len(Vec)
Each = [Vec[I] * (cos(Exp * I) + (sin(Exp * I) * 1j)) for I in range(len(Vec))]
return sum(Each) / len(Vec)
def invFourier(Vec, M, Top):
Exp = 2 * pi * M / len(Vec)
Each = [Vec[I] * (cos(Exp * I) + (sin(Exp * I) * 1j)) for I in range(Top)]
#Each = [Vec[abs(I-int(Top/2.0))] * (cos(Exp * (I-int(Top/2.0))) + sin(Exp * (I-int(Top/2.0))) * 1j) for I in range(Top)]
return sum(Each)
def distance(P1, P2):
return abs(P1[0] - P2[0]) + abs(P1[1] - P2[1])
def reorganize(Positions, Last):
Proc = {}
for I in range(len(Positions[0])):
Cur = n.ravel(Positions[:, I])
if (not Cur[0] in Proc):
Line = Positions[:, Positions[0] == Cur[0]]
Dist = distance(Last, Cur)
IDist = distance(Last, n.ravel(Line[:, ::-1][:, 0]))
if (IDist < Dist):
Positions[:, Positions[0] == Cur[0]] = Line[:, ::-1]
Proc[Cur[0]] = True
Last = Cur
return Positions
def clockWise(Edges):
Center = n.argwhere(Edges == 255)
Center = Center.sum(0) / len(Center)
Border = n.array(n.where(Edges == 255))
print(Center)
Left = Border[:, Border[1] <= Center[1]]
Left = Left[:, n.argsort(Left[0])[::-1]]
Right = Border[:, Border[1] > Center[1]]
Right = Right[:, n.argsort(Right[0])]
print(Left)
Left = reorganize(Left, n.ravel(Right[:, ::-1][:, 0]))
print(Left)
Right = reorganize(Right, n.ravel(Left[:, ::-1][:, 0]))
return n.concatenate((Right, Left), 1)
ImgName = sys.argv[1]
DescNum = int(sys.argv[2])
#Threshold = int(sys.argv[3])
Img = cv2.imread(ImgName, 0)
Img[Img > 127] = 255;
Img[Img <= 127] = 0;
Img[Img == 255] = 50;
Img[Img == 0] = 255;
Img[Img == 50] = 0;
print(Img.shape)
#Scale = int(Img.shape[0] / 500)
#Img = Img[::Scale, ::Scale]
Width, Height = Img.shape
print(Img)
Edges = cv2.Canny(Img, Width, Height)
#Edges = n.where(Edges == 255)
IEdges = n.argwhere(Edges == 255)
Edges = clockWise(Edges)
#Edges = Edges[::-1]
'''
for I in IEdges:
Found = Edges[:, Edges[:, Edges[0] == I[0]][1] == I[1]]
if (Found.size == 0):
print(Edges[:, Edges[0] == I[0]])
print(Edges[:, Edges[:, Edges[0] == I[0]][1] == I[1]])
print(I)
String = ""
for I in range(len(Edges[0])):
String = String + str(Edges[:, I])
print(String)
print(Edges)
'''
Complex = n.zeros(Img.shape, n.complex)
Complex = [Edges[0][I] + Edges[1][I] * 1j for I in range(len(Edges[0]))]
print(len(Complex))
'''
FourierField = [fourier(Complex, I) for I in range(len(Complex))]
print(FourierField[Threshold::])
FourierField[Threshold:] = n.repeat(0 + 0j, len(FourierField[Threshold:]))
print(len(FourierField))
Edges = [invFourier(FourierField, I, DescNum) for I in range(len(FourierField))]
print(len(Edges))
'''
FromZeroT = floor(DescNum / 2)
ToZeroT = len(Complex) - FromZeroT
if (DescNum % 2 == 1):
ToZeroT = ToZeroT-1
Fourier = n.fft.fft(Complex)
Fourier[FromZeroT:ToZeroT] = 0
print(Fourier[FromZeroT:ToZeroT])
print(FromZeroT, ":", ToZeroT)
IFourier = n.fft.ifft(Fourier)
Contour = n.zeros(Img.shape)
for I in IFourier:
X = int(I.real)
Y = int(I.imag)
if (Y < Height) and (X < Width) and (Y >= 0) and (X >= 0):
Contour[X, Y] = 255
Contour = n.uint8(Contour)
cv2.imshow(ImgName, Img)
cv2.imshow("Contour.png", Contour)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 25.882353 | 125 | 0.583523 |
acef5bbe3db2526b7ab314405b8d3f2a9f4356ea | 6,628 | py | Python | bindings/python/ensmallen_graph/datasets/string/thiothrixnivea.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/thiothrixnivea.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/thiothrixnivea.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Thiothrix nivea.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:48:53.901964
The undirected graph Thiothrix nivea has 4293 nodes and 525467 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05704 and has 22 connected components, where the component with most
nodes has 4238 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 213, the mean node degree is 244.80, and
the node degree mode is 2. The top 5 most central nodes are 870187.Thini_0124
(degree 1770), 870187.Thini_2445 (degree 1671), 870187.Thini_2684 (degree
1549), 870187.Thini_1534 (degree 1458) and 870187.Thini_0125 (degree 1395).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ThiothrixNivea
# Then load the graph
graph = ThiothrixNivea()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ThiothrixNivea(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Thiothrix nivea graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Thiothrix nivea graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:48:53.901964
The undirected graph Thiothrix nivea has 4293 nodes and 525467 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05704 and has 22 connected components, where the component with most
nodes has 4238 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 213, the mean node degree is 244.80, and
the node degree mode is 2. The top 5 most central nodes are 870187.Thini_0124
(degree 1770), 870187.Thini_2445 (degree 1671), 870187.Thini_2684 (degree
1549), 870187.Thini_1534 (degree 1458) and 870187.Thini_0125 (degree 1395).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ThiothrixNivea
# Then load the graph
graph = ThiothrixNivea()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ThiothrixNivea",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.068783 | 223 | 0.701116 |
acef5bd475c484178f029a63944ff009182341e3 | 2,022 | py | Python | var/spack/repos/builtin/packages/rclone/package.py | tz-rrze/spack | f02dec2bbbda08d974fecf6c46657ced4e517692 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-11-05T21:58:37.000Z | 2021-11-05T21:58:37.000Z | var/spack/repos/builtin/packages/rclone/package.py | tz-rrze/spack | f02dec2bbbda08d974fecf6c46657ced4e517692 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2021-05-12T05:53:01.000Z | 2022-03-18T17:30:25.000Z | var/spack/repos/builtin/packages/rclone/package.py | samcmill/spack | 3945e2ad93327ec261ede6dcaf92d57312bf44e7 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-11-06T06:38:51.000Z | 2020-10-27T07:45:01.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rclone(Package):
"""Rclone is a command line program to sync files and directories
to and from various cloud storage providers"""
homepage = "https://rclone.org"
url = "https://github.com/ncw/rclone/releases/download/v1.56.2/rclone-v1.56.2.tar.gz"
maintainers = ['alecbcs']
version('1.56.2', sha256='a8813d25c4640e52495fee83e525e76283c63f01d1cce8fbb58d8486b0c20c8a')
version('1.56.1', sha256='090b4b082caa554812f341ae26ea6758b40338836122595d6283c60c39eb5a97')
version('1.56.0', sha256='81d2eda23ebaad0a355aab6ff030712470a42505b94c01c9bb5a9ead9168cedb')
version('1.55.1', sha256='25da7fc5c9269b3897f27b0d946919df595c6dda1b127085fda0fe32aa59d29d')
version('1.55.0', sha256='75accdaedad3b82edc185dc8824a19a59c30dc6392de7074b6cd98d1dc2c9040')
version('1.51.0', sha256='3eb5b7ffce17e56fadb29bf854666723a14c93fedc02046c7f34c792dbd227ee')
version('1.50.2', sha256='6dd8998a72514d3820d241ae46dc609c0305b742aee3db6aaf6017b46c996091')
version('1.50.1', sha256='48d6c80883427469682b4d97099d7631cf3b67aa85e652c254423bd1422ce216')
version('1.50.0', sha256='f901fd1752aae6116d94fd08d010a70d94535257c2d23caa505e631cce1e802a')
version('1.49.5', sha256='abd2c83d71c63a4b0a30b1980b942868e707d05e14ae76ad39abf5cc5a5fde63')
version('1.49.4', sha256='070afc85e4e9921151d7cb67247db8f0ff2f06fcf2652c43a42fa6e1e35847af')
version('1.43', sha256='d30527b00cecb4e5e7188dddb78e5cec62d67cf2422dab82190db58512b5a4e3')
depends_on("go", type='build')
def setup_build_environment(self, env):
# Point GOPATH at the top of the staging dir for the build step.
env.prepend_path('GOPATH', self.stage.path)
def install(self, spec, prefix):
go('build')
mkdirp(prefix.bin)
install('rclone', prefix.bin)
| 49.317073 | 96 | 0.769041 |
acef5cae3ba6238861059a674142e40f7b03889b | 82 | py | Python | torch_tools/training/strategies/__init__.py | gregunz/TorchTools | 19a33f2e4cd38f86b74bd732949516df66f9e24f | [
"MIT"
] | null | null | null | torch_tools/training/strategies/__init__.py | gregunz/TorchTools | 19a33f2e4cd38f86b74bd732949516df66f9e24f | [
"MIT"
] | null | null | null | torch_tools/training/strategies/__init__.py | gregunz/TorchTools | 19a33f2e4cd38f86b74bd732949516df66f9e24f | [
"MIT"
] | null | null | null | from .gan_strategy import GANStrategy
from .simple_strategy import SimpleStrategy
| 27.333333 | 43 | 0.878049 |
acef5d02034b98f8c2acfec4ea79005dc45b9244 | 955 | py | Python | templates/Clas_Met.py | zara-ms/python_class-2 | edd5a4b7a3b3f2759f63208bbf42d5f9e7acb45b | [
"MIT"
] | null | null | null | templates/Clas_Met.py | zara-ms/python_class-2 | edd5a4b7a3b3f2759f63208bbf42d5f9e7acb45b | [
"MIT"
] | 1 | 2021-12-01T17:05:15.000Z | 2021-12-01T17:05:15.000Z | templates/Clas_Met.py | zara-ms/python_class-2 | edd5a4b7a3b3f2759f63208bbf42d5f9e7acb45b | [
"MIT"
] | 4 | 2021-04-09T19:06:40.000Z | 2021-11-29T01:17:50.000Z |
# Se define la clase "piso" con 4 atributos
class piso():
numero = 0
escalera = ''
ventanas = 0
cuartos = 0
# Se le da la funcion "timbre" a cada piso
def timbre(self):
print("ding dong")
# __init__ se usa para "llenar" los 4 atributos preestablecidos
def __init__(self, numero, ventanas, escaleras, cuartos):
self.numero = numero
self.ventanas = ventanas
self.escaleras = escaleras
self.cuartos = cuartos
class planta_baja(piso):
puerta_principal = True
# Se da un override del timbre para la "planta_baja"
def timbre(self):
print("bzzzzzp")
class azotea(piso):
antena = True
# Se da un override del timbre para la "azotea"
def timbre(self):
print("Fuera de servicio")
primer_piso = piso(2,"si",4,2)
cuarto_visitas = planta_baja(1,4,"si",1)
segundo_piso = azotea(3,0,"no",0)
cuarto_visitas.timbre() | 25.810811 | 68 | 0.619895 |
acef5e3dd323f7bcfddc96b0af514a92629fe277 | 1,026 | py | Python | blog/views.py | xcelize/personal-blog | 386122f6298485005eef0b4828aae72de6dba8e8 | [
"MIT"
] | 1 | 2019-10-14T17:09:27.000Z | 2019-10-14T17:09:27.000Z | blog/views.py | xcelize/personal-blog | 386122f6298485005eef0b4828aae72de6dba8e8 | [
"MIT"
] | null | null | null | blog/views.py | xcelize/personal-blog | 386122f6298485005eef0b4828aae72de6dba8e8 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .models import Article, Comment
from .forms import CommentForm
def index(request):
# Je veux recuperer mes 5 derniers post
try:
four_last_articles = Article.objects.all().reverse()[5]
except:
four_last_articles = Article.objects.all()
return render(request, 'blog/index.html', {
'five_articles': four_last_articles
})
@login_required
def article(request, slug_article):
article = Article.objects.get(slug=slug_article)
return render(request, 'blog/article.html', {
'article': article
})
@login_required
def post_comment(request):
if request.method == 'POST':
form = CommentForm(request)
if form.is_valid():
content = request.POST.get('comment', False)
Comment.create(content=content, user=request.user.id)
return redirect('/blog/')
| 28.5 | 66 | 0.6423 |
acef5e959925152ed3c1134f6fc6f363b0a53ede | 1,663 | py | Python | Lib/ModuleAPI/__init__.py | evi1hack/viperpython | 04bf8e31e21385edb58ea9d25296df062197df39 | [
"BSD-3-Clause"
] | null | null | null | Lib/ModuleAPI/__init__.py | evi1hack/viperpython | 04bf8e31e21385edb58ea9d25296df062197df39 | [
"BSD-3-Clause"
] | null | null | null | Lib/ModuleAPI/__init__.py | evi1hack/viperpython | 04bf8e31e21385edb58ea9d25296df062197df39 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# @File : __init__.py.py
# @Date : 2020/11/4
# @Desc :
from Lib.Module.configs import (
TAG2TYPE, UACLevel, RegType,
)
from Lib.Module.hostinfo import (
HostInfo
)
from Lib.Module.moduletemplate import (
PostMSFRawModule,
PostPythonModule,
PostMSFPowershellModule,
PostMSFCSharpModule,
PostMSFPythonModule,
PostMSFPythonWithParamsModule,
PostMSFPowershellFunctionModule,
PostMSFExecPEModule,
BotMSFModule,
)
from Lib.Module.msfmodule import (
MsfModule
)
from Lib.Module.option import (
register_options,
OptionStr,
OptionText,
OptionInt,
OptionBool,
OptionEnum,
OptionIPAddressRange,
OptionFileEnum,
OptionCredentialEnum,
OptionCacheHanderConfig,
OptionHander,
)
from Lib.file import File
from Lib.gcc import Gcc
from Lib.mingw import Mingw
from Lib.notice import Notice
from Lib.sessionlib import (
SessionLib as Session,
)
from Msgrpc.Handle.filemsf import FileMsf
__all__ = [
"PostMSFRawModule",
"PostPythonModule",
"PostMSFPowershellModule",
"PostMSFCSharpModule",
"PostMSFPythonModule",
"PostMSFPythonWithParamsModule",
"PostMSFPowershellFunctionModule",
"PostMSFExecPEModule",
"BotMSFModule",
"register_options",
"OptionHander",
"OptionIPAddressRange",
"OptionStr",
"OptionText",
"OptionInt",
"OptionBool",
"OptionEnum",
"OptionFileEnum",
"OptionCredentialEnum",
"OptionCacheHanderConfig",
"Session",
"Notice",
"MsfModule",
"Mingw",
"Gcc",
"File",
"FileMsf",
"TAG2TYPE",
"UACLevel",
"RegType",
"HostInfo",
]
| 20.7875 | 41 | 0.684907 |
acef5ee109481e922d39b40da5bca17b4d5ffd74 | 3,334 | py | Python | seq2seq_model.py | v-swami/AIChatbot | e7c0a129d6e94af9c5392fe9b018a314fd471a83 | [
"MIT"
] | null | null | null | seq2seq_model.py | v-swami/AIChatbot | e7c0a129d6e94af9c5392fe9b018a314fd471a83 | [
"MIT"
] | null | null | null | seq2seq_model.py | v-swami/AIChatbot | e7c0a129d6e94af9c5392fe9b018a314fd471a83 | [
"MIT"
] | null | null | null | # Vellore Institute of Technology, Vellore
#TITLE: IMPLEMETING A CHATBOT USING RNN AND SEQ2SEQ MODELING
#MADE BY: SWAMI VENKAT (16BCE2270)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
#from tensorflow.models.rnn.translate import data_utils
#ModuleNotFoundError: No module named 'tensorflow.models'
import data_utils
class Seq2SeqModel(object):
def __init__(self, source_vocab_size, target_vocab_size, buckets, size,
num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, use_lstm=False,
num_samples=512, forward_only=False):
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
w = tf.get_variable("proj_w", [size, self.target_vocab_size])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [self.target_vocab_size])
output_projection = (w, b)
def sampled_loss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples,
self.target_vocab_size)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.nn.seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(tf.float32, shape=[None],
name="weight{0}".format(i)))
| 40.658537 | 78 | 0.688362 |
acef5f635740bc831be6edb305b075a4f03e90f3 | 1,136 | py | Python | scr_linux.py | manuel-fischer/ScrollRec | ec5662d3f61630f939613481290a166133d23a20 | [
"MIT"
] | null | null | null | scr_linux.py | manuel-fischer/ScrollRec | ec5662d3f61630f939613481290a166133d23a20 | [
"MIT"
] | null | null | null | scr_linux.py | manuel-fischer/ScrollRec | ec5662d3f61630f939613481290a166133d23a20 | [
"MIT"
] | null | null | null | from PIL import Image
from ffmpeg_util import popen_ffmpeg
import tempfile
import os
# Clipboard: possible better solution:
# https://stackoverflow.com/questions/3571855/pasting-image-to-clipboard-in-python-in-linux
def take_screenshot(x,y,w,h):
fn = "output.png"
args = [
'-video_size', f'{w}x{h}',
'-f', 'x11grab',
'-draw_mouse', '0',
# Rectangle
'-i', f':0.0+{x},{y}',
'-frames:v', '1',
'-vf', 'format=rgba',
'-f', 'rawvideo',
'-',
]
stdout, _ = popen_ffmpeg(args)
img = Image.frombytes('RGBA', (w, h), stdout, 'raw')
return img
def grab_screenshot(rect_points):
x0, y0, x1, y1 = rect_points
return take_screenshot(x0, y0, x1-x0, y1-y0)
def clipboard_set_image(img):
with tempfile.NamedTemporaryFile(suffix=".png") as tmpfile:
img.save(tmpfile.name, "PNG")
#assert os.path.exists(tmpfile.name)
cmd = f'xclip -i -selection clipboard -t image/png {tmpfile.name}'
os.system(cmd)
if __name__ == "__main__":
img = take_screenshot(100, 100, 200, 300)
img.show()
# | 22.72 | 91 | 0.59419 |
acef5f8ff26ed63467fd32cd3ff45ec56943a3a3 | 3,553 | py | Python | pyleecan/Functions/Load/load_hdf5.py | Eomys/Pyleecan | 4d7f0cbabf0311006963e7a2f435db2ecd901118 | [
"Apache-2.0"
] | 4 | 2017-11-27T10:14:34.000Z | 2018-09-20T11:30:32.000Z | pyleecan/Functions/Load/load_hdf5.py | Eomys/Pyleecan | 4d7f0cbabf0311006963e7a2f435db2ecd901118 | [
"Apache-2.0"
] | null | null | null | pyleecan/Functions/Load/load_hdf5.py | Eomys/Pyleecan | 4d7f0cbabf0311006963e7a2f435db2ecd901118 | [
"Apache-2.0"
] | null | null | null | from h5py import File, Group
from numpy import bool_, int32, int64, string_, array
from cloudpickle import loads
def load_hdf5(file_path):
"""
Load pyleecan object from h5 file
Parameters
----------
file_path: str
file path
Returns
-------
file_path: str
obj_dict: dict
dictionary to instanciate Pyleecan obj
"""
with File(file_path, "r") as file:
# file is a group
obj_dict = construct_dict_from_group(file)
return file_path, obj_dict
def construct_dict_from_group(group):
"""
construct_dict_from_group create a dictionary and extract datasets and groups from the group
Parameters
----------
group: h5py.Group
group to browse
Returns
-------
dict_ : dict
created dict containing the group data
"""
dict_ = {}
# List split to load
if "length_list" in group.attrs.keys():
list_ = []
for i in range(group.attrs["length_list"]):
if hasattr(group["list_" + str(i)], "items"): # Group in list
list_.append(construct_dict_from_group(group["list_" + str(i)]))
else: # Dataset
dataset = group["list_" + str(i)]
value = dataset[()]
if "array_list" in dataset.attrs.keys(): # List saved as an array
value = value.tolist()
elif isinstance(value, bool_): # bool
value = bool(value)
elif isinstance(value, int64): # float
value = float(value)
elif isinstance(value, int32): # int
value = int(value)
elif isinstance(value, (string_, bytes)): # String
value = value.decode("ISO-8859-2")
# None is not available in H5 => we use a string
if value == "NoneValue":
value = None
list_.append(value)
return list_
else:
for key, val in group.items():
# Check if key is an int
if is_int(key):
key = int(key)
# Check if val is a group or a dataset
if isinstance(val, Group): # Group
# Call the function recursively to load group
dict_[key] = construct_dict_from_group(val)
else: # Dataset
value = val[()]
if "array_list" in val.attrs.keys(): # List saved as an array
value = value.tolist()
elif value == "NoneValue": # Handle None values
value = None
elif isinstance(value, bool_): # bool
value = bool(value)
elif isinstance(value, int64): # float
value = float(value)
elif isinstance(value, int32): # int
value = int(value)
elif isinstance(value, (string_, bytes)): # String
value = value.decode("ISO-8859-2")
# None is not available in H5 => we use a string
if value == "NoneValue":
value = None
dict_[key] = value
return dict_
def is_int(inputString):
"""Check if a string is an int"""
# first check if string contains numbers
if any(char.isdigit() for char in inputString):
try:
int(inputString)
return True
except:
pass
return False
| 31.723214 | 96 | 0.51365 |
acef60a7312fa97f4aa390508290f835a78841ab | 8,012 | py | Python | src/pipelines/weather/noaa_gsod.py | EXYNOS-999/data | 771e3ae31047b5e524de7443356472dfc7ab9edc | [
"Apache-2.0"
] | null | null | null | src/pipelines/weather/noaa_gsod.py | EXYNOS-999/data | 771e3ae31047b5e524de7443356472dfc7ab9edc | [
"Apache-2.0"
] | null | null | null | src/pipelines/weather/noaa_gsod.py | EXYNOS-999/data | 771e3ae31047b5e524de7443356472dfc7ab9edc | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import math
import tarfile
import datetime
from io import BytesIO
from random import shuffle
from pathlib import Path
from functools import partial
from typing import Any, Dict, List
import numpy
from pandas import DataFrame, Series, read_csv, concat
from lib.cast import safe_float_cast
from lib.concurrent import thread_map
from lib.data_source import DataSource
from lib.io import pbar
from lib.net import download, download_snapshot
from lib.utils import URL_OUTPUTS_PROD
_COLUMN_MAPPING = {
"DATE": "date",
"STATION": "noaa_station",
"TEMP": "average_temperature",
"MIN": "minimum_temperature",
"MAX": "maximum_temperature",
"PRCP": "rainfall",
"SNDP": "snowfall",
}
_OUTPUT_COLUMNS = [
"date",
"key",
"noaa_station",
"noaa_distance",
"average_temperature",
"minimum_temperature",
"maximum_temperature",
"rainfall",
"snowfall",
]
_DISTANCE_THRESHOLD = 300
_INVENTORY_URL = "https://www1.ncdc.noaa.gov/pub/data/noaa/isd-history.csv"
class NoaaGsodDataSource(DataSource):
# A bit of a circular dependency but we need the latitude and longitude to compute weather
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
geo_url = f"{URL_OUTPUTS_PROD}/geography.csv"
download_opts = (fetch_opts or [{}])[0].get("opts", {})
return {0: download_snapshot(geo_url, output_folder, **download_opts)}
@staticmethod
def haversine_distance(
stations: DataFrame, lat: float, lon: float, radius: float = 6373.0
) -> Series:
""" Compute the distance between two <latitude, longitude> pairs in kilometers """
# Compute the pairwise deltas
lat_diff = stations.lat - lat
lon_diff = stations.lon - lon
# Apply Haversine formula
a = numpy.sin(lat_diff / 2) ** 2
a += math.cos(lat) * numpy.cos(stations.lat) * numpy.sin(lon_diff / 2) ** 2
c = numpy.arctan2(numpy.sqrt(a), numpy.sqrt(1 - a)) * 2
return radius * c
@staticmethod
def noaa_number(value: int):
return None if re.match(r"999+", str(value).replace(".", "")) else safe_float_cast(value)
@staticmethod
def conv_temp(value: int):
value = NoaaGsodDataSource.noaa_number(value)
return numpy.nan if value is None else (value - 32) * 5 / 9
@staticmethod
def conv_dist(value: int):
value = NoaaGsodDataSource.noaa_number(value)
return numpy.nan if value is None else value * 25.4
@staticmethod
def process_location(
station_cache: Dict[str, DataFrame], stations: DataFrame, location: Series
):
nearest = stations.copy()
nearest["key"] = location.key
# Get the nearest stations from our list of stations given lat and lon
nearest["distance"] = NoaaGsodDataSource.haversine_distance(
nearest, location.lat, location.lon
)
# Filter out all but the 10 nearest stations
nearest = nearest[nearest.distance < _DISTANCE_THRESHOLD].sort_values("distance").iloc[:10]
# Early exit: no stations found within distance threshold
if len(nearest) == 0 or all(
station_id not in station_cache for station_id in nearest.id.values
):
return DataFrame(columns=_OUTPUT_COLUMNS)
# Get station records from the cache
nearest = nearest.rename(columns={"id": "noaa_station", "distance": "noaa_distance"})
data = [station_cache.get(station_id) for station_id in nearest.noaa_station.values]
data = concat(
[table.merge(nearest, on="noaa_station") for table in data if table is not None]
)
# Combine them by computing a simple average
value_columns = [
"average_temperature",
"minimum_temperature",
"maximum_temperature",
"rainfall",
"snowfall",
]
agg_functions = {col: "mean" for col in value_columns}
agg_functions["noaa_station"] = "first"
agg_functions["noaa_distance"] = "first"
data = data.groupby(["date", "key"]).agg(agg_functions).reset_index()
# Return all the available data from the records
return data[[col for col in _OUTPUT_COLUMNS if col in data.columns]]
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
):
# Get all the weather stations with data up until last month from inventory
today = datetime.date.today()
min_date = (today - datetime.timedelta(days=30)).strftime("%Y%m%d")
stations = read_csv(_INVENTORY_URL).rename(
columns={"LAT": "lat", "LON": "lon", "ELEV(M)": "elevation"}
)
stations = stations[stations.END > int(min_date)]
stations["id"] = stations["USAF"] + stations["WBAN"].apply(lambda x: f"{x:05d}")
# Download all the station data as a compressed file
buffer = BytesIO()
records_url = "https://www.ncei.noaa.gov/data/global-summary-of-the-day/archive/2020.tar.gz"
download(records_url, buffer, progress=True)
buffer.seek(0)
with tarfile.open(fileobj=buffer, mode="r:gz") as stations_tar:
# Build the station cache by uncompressing all files in memory
station_cache = {}
for member in pbar(stations_tar.getmembers(), desc="Decompressing"):
if not member.name.endswith(".csv"):
continue
# Read the records from the provided station
data = read_csv(stations_tar.extractfile(member)).rename(columns=_COLUMN_MAPPING)
# Fix data types
data.noaa_station = data.noaa_station.astype(str)
data.rainfall = data.rainfall.apply(NoaaGsodDataSource.conv_dist)
data.snowfall = data.snowfall.apply(NoaaGsodDataSource.conv_dist)
for temp_type in ("average", "minimum", "maximum"):
col = f"{temp_type}_temperature"
data[col] = data[col].apply(NoaaGsodDataSource.conv_temp)
station_cache[member.name.replace(".csv", "")] = data
# Get all the POI from metadata and go through each key
keep_columns = ["key", "latitude", "longitude"]
metadata = dataframes[0][keep_columns].dropna()
# Only use keys present in the metadata table
metadata = metadata.merge(aux["metadata"])[keep_columns]
# Convert all coordinates to radians
stations["lat"] = stations.lat.apply(math.radians)
stations["lon"] = stations.lon.apply(math.radians)
metadata["lat"] = metadata.latitude.apply(math.radians)
metadata["lon"] = metadata.longitude.apply(math.radians)
# Make sure the stations and the cache are sent to each function call
map_func = partial(NoaaGsodDataSource.process_location, station_cache, stations)
# We don't care about the index while iterating over each metadata item
map_iter = [record for _, record in metadata.iterrows()]
# Shuffle the iterables to try to make better use of the caching
shuffle(map_iter)
# Bottleneck is network so we can use lots of threads in parallel
records = thread_map(map_func, map_iter, total=len(metadata))
return concat(records)
| 37.971564 | 100 | 0.654144 |
acef61c03402b09520fca44dd52ca65257c0e9b5 | 323 | py | Python | serdespy/__init__.py | liangkatherine/serdespy | 9aa0c20ce66dad60e6488d74364a130e6d71b6fb | [
"MIT"
] | null | null | null | serdespy/__init__.py | liangkatherine/serdespy | 9aa0c20ce66dad60e6488d74364a130e6d71b6fb | [
"MIT"
] | null | null | null | serdespy/__init__.py | liangkatherine/serdespy | 9aa0c20ce66dad60e6488d74364a130e6d71b6fb | [
"MIT"
] | null | null | null | from .prs import *
from .chmodel import *
from .four_port_to_diff import *
from .resample import *
from .eye_diagram import *
from .signal import *
from .transmitter import *
from .receiver import *
from .rs_code import *
#from .signal import nrz_a2d
#from .signal import pam4_a2d
#from .signal import channel_coefficients | 24.846154 | 41 | 0.77709 |
acef623abd8b04cf110fb4edf755cf7a24ca96ae | 543 | py | Python | dev-env/tests/manylinux1/demo/setup.py | ghuntley/daml | 2b3c4e76bb5662e5e139c625755a388c79455c49 | [
"Apache-2.0"
] | null | null | null | dev-env/tests/manylinux1/demo/setup.py | ghuntley/daml | 2b3c4e76bb5662e5e139c625755a388c79455c49 | [
"Apache-2.0"
] | null | null | null | dev-env/tests/manylinux1/demo/setup.py | ghuntley/daml | 2b3c4e76bb5662e5e139c625755a388c79455c49 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from distutils.core import setup, Extension
extension_module = Extension(
'demo.ext',
sources=['demo/extension.c'],
include_dirs=['/usr/include/glib-2.0', '/usr/lib64/glib-2.0/include'],
libraries=['glib-2.0'],
)
setup(
name='demo',
version='0.1',
description='This is a demo package with a compiled C extension.',
ext_modules=[extension_module],
packages=['demo'],
)
| 27.15 | 97 | 0.685083 |
acef6284d04b5dd7af8ad713ea5fc1b4ed7d4776 | 1,061 | py | Python | solution/graph_traversal/16954/main.py | jungyoonoh/baekjoon-1 | 2b4437a4b5e06244fa47fae6c7b7be0157d0f94f | [
"MIT"
] | 2,236 | 2019-08-05T00:36:59.000Z | 2022-03-31T16:03:53.000Z | solution/graph_traversal/16954/main.py | jungyoonoh/baekjoon-1 | 2b4437a4b5e06244fa47fae6c7b7be0157d0f94f | [
"MIT"
] | 225 | 2020-12-17T10:20:45.000Z | 2022-01-05T17:44:16.000Z | solution/graph_traversal/16954/main.py | jungyoonoh/baekjoon-1 | 2b4437a4b5e06244fa47fae6c7b7be0157d0f94f | [
"MIT"
] | 602 | 2019-08-05T00:46:25.000Z | 2022-03-31T13:38:23.000Z | # Authored by : yj2221
# Co-authored by : -
# Link : http://boj.kr/d903976eaa454c208a0a75092a20d1c6
from collections import deque
import sys
def input():
return sys.stdin.readline().rstrip()
board = [list(input()) for _ in range(8)]
def bfs(board):
end = (0,7)
que = deque()
que.append((7,0,0))
visit = [[[False] * 8 for _ in range(8)] for _ in range(9)]
visit[0][7][0] = True
dy = [0,0,0,-1,1,-1,1,-1,1]
dx = [0,-1,1,0,0,-1,1,1,-1]
result = 0
while que:
y,x,time = que.popleft()
if y==end[0] and x==end[1]:
result = 1
break
for i in range(9):
ny, nx = y + dy[i], x + dx[i]
ntime = min(time + 1, 8)
if ny<0 or ny>=8 or nx<0 or nx>=8: continue
if ny-time>=0 and board[ny-time][nx]=='#': continue
if ny-ntime>=0 and board[ny-ntime][nx]=='#': continue
if visit[ntime][ny][nx]: continue
visit[ntime][ny][nx] = True
que.append((ny,nx,ntime))
return result
print(bfs(board))
| 27.921053 | 65 | 0.520264 |
acef629e1e2275457841edacca6190447bdd372e | 54,059 | py | Python | official/nlp/modeling/models/t5.py | esther011/models | 3ce2f49b909a8a00044a9d20dce7db414b23ea94 | [
"Apache-2.0"
] | 1 | 2022-03-13T07:44:17.000Z | 2022-03-13T07:44:17.000Z | official/nlp/modeling/models/t5.py | esther011/models | 3ce2f49b909a8a00044a9d20dce7db414b23ea94 | [
"Apache-2.0"
] | null | null | null | official/nlp/modeling/models/t5.py | esther011/models | 3ce2f49b909a8a00044a9d20dce7db414b23ea94 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement T5 Transformer model by TF official NLP library.
Model paper: https://arxiv.org/pdf/1910.10683.pdf
T5TransformerParams and T5Transformer are public interfaces.
Other modules are implementation details, so users should never build libraries
depending on them.
To use with Keras, users can wrap them within Keras customized layers.
"""
import dataclasses
import functools
import math
from typing import Callable, Dict, Optional, Sequence, Text, Union
import numpy as np
import tensorflow as tf
from official.modeling import tf_utils
ShapeLike = Union[int, Sequence[int], tf.TensorShape]
Initializer = Callable[..., tf.Tensor]
class Module(tf.Module):
"""The nn Module extends from the tf.Module."""
def __init__(self, dtype: tf.DType = tf.float32, name: Optional[Text] = None):
"""Initializes the nn Module.
Args:
dtype: the variable allocation dtype.
name: a string for the module name.
"""
super().__init__(name=name)
self.dtype = dtype
def create_variable(self,
name: Text,
shape: ShapeLike,
initializer: Initializer,
dtype: tf.DType = tf.float32,
**kwargs):
return tf.Variable(initializer(shape, dtype=dtype, **kwargs), name=name)
def read_variable(self,
variable: tf.Variable,
as_dtype: Optional[tf.DType] = None):
if as_dtype is not None:
variable = tf.cast(variable, dtype=as_dtype)
return variable
@tf.custom_gradient
def dense_gradient(x: tf.Tensor):
"""Identity operation whose gradient is converted to a ``tf.Tensor``.
>>> embedding = tf.Variable(tf.random.normal([3, 3]))
>>> with tf.GradientTape() as tape:
... y = tf.nn.embedding_lookup(dense_gradient(embedding), [1])
>>> tape.gradient(y, embedding).numpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
Args:
x: A ``tf.Tensor``.
Returns:
The input ``tf.Tensor`` and a dense identity gradient function.
"""
def grad(dy):
if isinstance(dy, tf.IndexedSlices):
return tf.convert_to_tensor(dy)
else:
return dy
return x, grad
def make_attention_mask(query_input,
key_input,
pairwise_fn=tf.multiply,
dtype=tf.float32):
"""Mask-making helper for attention weights.
In case of 1d inputs (i.e., `[batch..., len_q]`, `[batch..., len_kv]`, the
attention weights will be `[batch..., heads, len_q, len_kv]` and this
function will produce `[batch..., 1, len_q, len_kv]`.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
dtype: mask return dtype
Returns:
A `[batch..., 1, len_q, len_kv]` shaped mask for 1d attention.
"""
mask = pairwise_fn(
tf.expand_dims(query_input, axis=-1), tf.expand_dims(key_input, axis=-2))
mask = tf.expand_dims(mask, axis=-3)
return tf.cast(mask, dtype=dtype)
def make_causal_mask(x, dtype=tf.float32):
"""Make a causal mask for self-attention.
In case of 1d inputs (i.e., `[batch..., len]`, the self-attention weights
will be `[batch..., heads, len, len]` and this function will produce a
causal mask of shape `[batch..., 1, len, len]`.
Args:
x: input array of shape `[batch..., len]`
dtype: mask return dtype
Returns:
A `[batch..., 1, len, len]` shaped causal mask for 1d attention.
"""
x_shape = tf.shape(x)
idxs = tf.broadcast_to(tf.range(x_shape[-1], dtype=tf.int32), x_shape)
return make_attention_mask(idxs, idxs, tf.greater_equal, dtype=dtype)
class Embed(Module):
"""Embedding Module.
A parameterized function from integers [0, n) to d-dimensional vectors.
"""
def __init__(self,
vocab_size: int,
features: int,
embeddings_initializer: Optional[Initializer] = None,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.features = features
self.compute_dtype = compute_dtype
if embeddings_initializer:
self.embed_init = embeddings_initializer
else:
self.embed_init = tf.keras.initializers.TruncatedNormal(stddev=1.0)
with self.name_scope:
self.embeddings = self.create_variable(
"embedding", [self.vocab_size, self.features],
self.embed_init,
dtype=self.dtype)
@tf.Module.with_name_scope
def __call__(self, inputs: tf.Tensor, one_hot: bool = True):
"""Embeds the inputs along the last dimension.
Args:
inputs: input data, the last dimension is to embed.
one_hot: whether to use one-hot matmul to gather embeddings.
Returns:
The output shape follows the input, with an additional `features`
dimension appended.
"""
if one_hot:
flat_inputs = tf.reshape(inputs, [-1])
one_hot_data = tf.one_hot(
flat_inputs, depth=self.vocab_size, dtype=self.compute_dtype)
embeddings = tf.matmul(
one_hot_data,
self.read_variable(self.embeddings, as_dtype=self.compute_dtype))
input_shape = tf_utils.get_shape_list(inputs)
embeddings = tf.reshape(embeddings, input_shape + [self.features])
return embeddings
else:
return tf.nn.embedding_lookup(
dense_gradient(
self.read_variable(self.embeddings, as_dtype=self.compute_dtype)),
inputs)
def attend(self, query):
"""Attends over the embedding using a query tensor.
Args:
query: array with last dimension equal the feature depth `features` of the
embedding.
Returns:
An tensor with final dim `num_embeddings` corresponding to the batched
inner-product of the array of query vectors against each embedding.
Commonly used for weight-sharing between embeddings and logit transform
in NLP models.
"""
return tf.matmul(
query,
self.read_variable(self.embeddings, as_dtype=query.dtype),
transpose_b=True)
class RMSNorm(Module):
"""A layernorm module in the T5 style.
No bias and no subtraction of mean.
"""
def __init__(self, hidden_size: int, epsilon: float = 1e-6, **kwargs):
super().__init__(**kwargs)
self.variance_epsilon = epsilon
with self.name_scope:
self.weight = self.create_variable(
"scale", [hidden_size],
dtype=self.dtype,
initializer=tf.keras.initializers.Ones())
@tf.Module.with_name_scope
def __call__(self, x):
# Keeps the computation inside the layer norm to be float32.
compute_dtype = x.dtype
x = tf.cast(x, dtype=tf.float32)
variance = tf.math.reduce_mean(tf.math.square(x), axis=-1, keepdims=True)
x = x * tf.math.rsqrt(variance + self.variance_epsilon)
x = tf.cast(x, dtype=compute_dtype)
return self.read_variable(self.weight, as_dtype=compute_dtype) * x
class Linear(Module):
"""Linear module, optionally including bias."""
def __init__(self,
in_features: int,
out_features: int,
use_bias: bool = True,
w_init: Optional[Initializer] = None,
b_init: Optional[Initializer] = None,
**kwargs):
"""Constructs a `Linear` module."""
super().__init__(**kwargs)
self.in_features = in_features
self.out_features = out_features
self.use_bias = use_bias
self.w_init = w_init
if self.use_bias:
self.b_init = b_init if b_init else tf.keras.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
with self.name_scope:
if self.w_init is None:
stddev = 1 / math.sqrt(self.in_features)
self.w_init = tf.keras.initializers.HeNormal()
self.w = self.create_variable(
"kernel", [self.in_features, self.out_features],
initializer=self.w_init,
dtype=self.dtype)
if self.use_bias:
self.b = self.create_variable(
"bias", [self.out_features],
initializer=self.b_init,
dtype=self.dtype)
@tf.Module.with_name_scope
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
outputs = tf.matmul(inputs,
self.read_variable(self.w, as_dtype=inputs.dtype))
if self.use_bias:
outputs = tf.add(outputs,
self.read_variable(self.b, as_dtype=inputs.dtype))
return outputs
class Linear3D(Module):
"""Linear3D module, optionally including bias.
Kernel stored as 2d parameter for compatibility with Adafactor optimizer.
"""
def __init__(self,
in_features: int,
out_features: int,
num_heads: int,
use_bias: bool = True,
to_3d: bool = True,
w_init: Optional[Initializer] = None,
b_init: Optional[Initializer] = None,
**kwargs):
"""Constructs a `Linear3D` module."""
super().__init__(**kwargs)
self.in_features = in_features
self.out_features = out_features
self.num_heads = num_heads
self.use_bias = use_bias
self.to_3d = to_3d
self.w_init = w_init
if self.to_3d:
self.kernel_2d_shape = (self.in_features,
self.num_heads * self.out_features)
self.kernel_3d_shape = (self.in_features, self.num_heads,
self.out_features)
self.bias_shape = (self.num_heads, self.out_features)
bias_rank = 2
else:
self.kernel_2d_shape = (self.in_features * self.num_heads,
self.out_features)
self.kernel_3d_shape = (self.num_heads, self.in_features,
self.out_features)
self.bias_shape = (self.out_features,)
bias_rank = 1
if self.use_bias:
self.b_init = b_init or tf.keras.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
with self.name_scope:
if self.w_init is None:
self.w_init = tf.keras.initializers.HeNormal()
self.w = self.create_variable(
"kernel",
self.kernel_2d_shape,
initializer=self.w_init,
dtype=self.dtype)
if self.use_bias:
self.b = self.create_variable(
"bias", self.bias_shape, initializer=self.b_init, dtype=self.dtype)
@tf.Module.with_name_scope
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
# B: batch size
# S: From Sequence length
# D: dimension
# N: Number of heads
# H: head size
compute_dtype = inputs.dtype
w = self.read_variable(self.w, as_dtype=compute_dtype)
w = tf.reshape(w, self.kernel_3d_shape)
if self.to_3d:
outputs = tf.einsum("BSD,DNH->BSNH", inputs, w)
else:
outputs = tf.einsum("BSNH,NHD->BSD", inputs, w)
if self.use_bias:
outputs = tf.add(outputs,
self.read_variable(self.b, as_dtype=compute_dtype))
return outputs
class Dropout(Module):
"""Randomly drop units in the input at a given rate."""
def __init__(self, rate: float, **kwargs):
"""Constructs a Dropout module.
Args:
rate: Probability that each element of x is discarded. Must be a scalar in
the range `[0, 1)`.
**kwargs: other keyword args.
"""
super().__init__(**kwargs)
self._rate = rate
@tf.Module.with_name_scope
def __call__(self,
x: tf.Tensor,
training: bool,
noise_shape: Optional[ShapeLike] = None) -> tf.Tensor:
"""call method for the Dropout module.
Args:
x: the input tensor.
training: whether it is performing training pass.
noise_shape: (Optional) Shape vector controlling the shape of the random
noise used to apply dropout. If not set this will be the shape of the
input. If set it should be broadcastable to the input shape.
Returns:
A tensor after applying dropout.
"""
if not training:
return x
return tf.nn.dropout(x, rate=self._rate, noise_shape=noise_shape)
class FFN(Module):
"""Feed-forward Network. No layer norm, output dropout, or skip connection."""
activation_map = {
"relu": tf.nn.relu,
"gelu": functools.partial(tf.nn.gelu, approximate=True),
"swish": tf.nn.silu,
"silu": tf.nn.silu,
}
def __init__(self,
d_model: int,
d_ff: int,
activations: Sequence[str],
use_bias: bool = False,
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
self.use_bias = use_bias
with self.name_scope:
self.wi = []
self.activations = activations
for idx, act_fn in enumerate(activations):
if (act_fn is not None and act_fn != "linear" and
act_fn not in self.activation_map):
raise ValueError("Invalid activation function string is passed: %s" %
act_fn)
dense_name = "wi" if len(activations) == 1 else f"wi_{idx}"
self.wi.append(
Linear(
d_model,
d_ff,
use_bias=self.use_bias,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name=dense_name))
self.wo = Linear(
d_ff,
d_model,
use_bias=self.use_bias,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name="wo")
self.dropout = Dropout(rate=dropout_rate)
@tf.Module.with_name_scope
def __call__(self,
hidden_states: tf.Tensor,
training: bool = False) -> tf.Tensor:
h = hidden_states
factors = []
for wi, act_fn in zip(self.wi, self.activations):
if act_fn is None or act_fn == "linear":
factors.append(wi(h))
else:
factors.append(self.activation_map[act_fn](wi(h)))
h = functools.reduce(tf.math.multiply, factors)
h_shape = tf_utils.get_shape_list(h)
h_shape[-2] = 1
h = self.dropout(h, noise_shape=h_shape, training=training)
h = self.wo(h)
return h
class RelativePositionEmbedding(Module):
"""Relative position embeddings of T5 style."""
def __init__(self,
num_heads: int,
relative_attention_num_buckets: int = 32,
relative_attention_max_distance: int = 128,
bidirectional: bool = True,
embeddings_initializer: Optional[Initializer] = None,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.bidirectional = bidirectional
self.relative_attention_max_distance = relative_attention_max_distance
with self.name_scope:
self.relative_attention_bias = Embed(
vocab_size=self.relative_attention_num_buckets,
features=self.num_heads,
embeddings_initializer=embeddings_initializer,
dtype=self.dtype,
compute_dtype=compute_dtype,
name="rel_embedding")
@staticmethod
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position.
If bidirectional=False, then positive relative positions are invalid.
We use smaller buckets for small absolute relative_position and larger
buckets for larger absolute relative_positions.
All relative positions >=max_distance map to the same bucket.
All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences
than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += tf.cast(tf.math.less(n, 0), tf.int32) * num_buckets
n = tf.math.abs(n)
else:
n = tf.math.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = tf.math.less(n, max_exact)
val_if_large = max_exact + tf.dtypes.cast(
tf.math.log(
tf.cast(n, tf.float32) / max_exact + np.finfo(np.float32).eps) /
math.log(max_distance / max_exact) * (num_buckets - max_exact),
tf.int32,
)
val_if_large = tf.math.minimum(val_if_large, num_buckets - 1)
ret += tf.where(is_small, n, val_if_large)
return ret
@tf.Module.with_name_scope
def __call__(self, qlen, klen):
context_position = tf.range(qlen)[:, None]
memory_position = tf.range(klen)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position,
bidirectional=self.bidirectional,
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance)
values = self.relative_attention_bias(rp_bucket)
values = tf.expand_dims(
tf.transpose(values, [2, 0, 1]),
axis=0) # shape (1, num_heads, qlen, klen)
return values
class MultiHeadAttention(Module):
"""T5 Attention from Mesh TensorFlow."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
use_bias: bool = False,
dropout_rate: Optional[float] = 0.0,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.d_model = d_model
self.d_kv = d_kv
self.num_heads = num_heads
self.rescale_query = rescale_query
self.use_bias = use_bias
if rescale_query or weight_initializer is None:
query_w_init = weight_initializer
else:
init_std_rescaling = tf.math.sqrt(tf.cast(self.d_kv, dtype=self.dtype))
query_w_init = (
lambda *args, **kwargs: ( # pylint: disable=g-long-lambda
weight_initializer(*args, **kwargs) / init_std_rescaling))
self.q = Linear3D(
self.d_model,
self.d_kv,
num_heads=self.num_heads,
use_bias=self.use_bias,
w_init=query_w_init,
b_init=bias_initializer,
dtype=self.dtype,
name="q")
self.k = Linear3D(
self.d_model,
self.d_kv,
num_heads=self.num_heads,
use_bias=self.use_bias,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name="k")
self.v = Linear3D(
self.d_model,
self.d_kv,
num_heads=self.num_heads,
use_bias=self.use_bias,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name="v")
self.o = Linear3D(
self.d_kv,
self.d_model,
num_heads=self.num_heads,
use_bias=self.use_bias,
to_3d=False,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name="o")
self.dropout = Dropout(dropout_rate)
def _update_cache(self, key, value, cache, decode_position):
"""Updates cache states and gets full-length key/value tensors."""
# Combines cached keys and values with new keys and values.
# TPU one-hot handling.
key_seq_dim = cache["key"].shape.as_list()[1]
indices = tf.reshape(
tf.one_hot(decode_position, key_seq_dim, dtype=key.dtype),
[1, key_seq_dim, 1, 1])
key = cache["key"] + key * indices
value_seq_dim = cache["value"].shape.as_list()[1]
indices = tf.reshape(
tf.one_hot(decode_position, value_seq_dim, dtype=value.dtype),
[1, value_seq_dim, 1, 1])
value = cache["value"] + value * indices
# Update cache
cache["key"] = key
cache["value"] = value
return key, value
@tf.Module.with_name_scope
def __call__(self,
query,
mask=None,
kv=None,
position_bias=None,
cache: Optional[Dict[str, tf.Tensor]] = None,
decode_position=None,
training=False):
"""MultiHeadAttention at work.
Args:
query: Tensor of shape (bs, qlen, d_model).
mask: None or Tensor of shape (bs, n_heads, qlen, klen).
kv: None or Tensor of shape (bs, klen, d_model).
position_bias: None or Tensor of shape (bs, n_heads, qlen, klen).
cache: If not None, cache["key"] and cache["value"] are Tensors of shape
(bs, klen, n_heads, d_kv).
decode_position: If not None, which position of the sequence we are
decoding for. Ranges from 0 to klen - 1.
training: Effects the behavior of dropout.
Returns:
A dictionary, output["context"] is the output after attention,
output["cache"] contains updated cache for the next round of
autoregressive decoding.
"""
# Input is (bs, qlen, d_model)
use_cache = cache is not None
if kv is None:
kv = query
q = self.q(query)
if self.rescale_query:
q /= tf.math.sqrt(tf.cast(self.d_kv, dtype=q.dtype))
k = self.k(kv)
v = self.v(kv)
if use_cache:
k, v = self._update_cache(k, v, cache, decode_position)
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(q_dim)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
scores = tf.einsum("bqnd,bknd->bnqk", q, k) # (bs, n_heads, qlen, klen)
if position_bias is not None:
# If position_bias is None, the input embedings should already include
# position embeddings.
if use_cache:
bias_shape = position_bias.shape.as_list()
position_bias = tf.slice(
position_bias, [0, 0, decode_position, 0],
[bias_shape[0], bias_shape[1], 1, bias_shape[3]])
scores += position_bias
if mask is not None:
scores += mask # (bs, n_heads, qlen, klen)
weights = tf.nn.softmax(tf.cast(scores, tf.float32), axis=-1)
# weights shape = (bs, n_heads, qlen, klen)
weights = tf.cast(weights, scores.dtype)
weight_shape = tf_utils.get_shape_list(weights)
# NOTE: T5 broadcasts along the "length" dim, but unclear which one that
# corresponds to. We assume it is the query dimension.
# (bs, n_heads, qlen, klen)
weight_shape[-2] = 1
weights = self.dropout(weights, training=training, noise_shape=weight_shape)
c = tf.einsum("bnqk,bknd->bqnd", weights, v)
c = self.o(c)
outputs = dict(context=c)
if cache:
outputs["cache"] = cache
return outputs
class SelfAttention(Module):
"""Self attention block including residual connection."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.self_attention = MultiHeadAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="attention")
self.layer_norm = RMSNorm(
hidden_size=d_model,
epsilon=layer_norm_epsilon,
dtype=self.dtype,
name="layer_norm")
self.dropout = Dropout(dropout_rate)
@tf.Module.with_name_scope
def __call__(self,
hidden_states,
attention_mask=None,
position_bias=None,
cache=None,
decode_position=None,
training=False):
norm_x = self.layer_norm(hidden_states)
attention_outputs = self.self_attention(
query=norm_x,
mask=attention_mask,
position_bias=position_bias,
cache=cache,
decode_position=decode_position,
training=training)
y = attention_outputs.pop("context")
tensor_shape = tf_utils.get_shape_list(y)
tensor_shape[-2] = 1
y = self.dropout(y, noise_shape=tensor_shape, training=training)
layer_output = hidden_states + y
attention_outputs["layer_output"] = layer_output
return attention_outputs
class CrossAttention(Module):
"""Cross attention block including residual connection."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.cross_attention = MultiHeadAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="attention")
self.layer_norm = RMSNorm(
hidden_size=d_model,
epsilon=layer_norm_epsilon,
dtype=self.dtype,
name="layer_norm")
self.dropout = Dropout(dropout_rate)
@tf.Module.with_name_scope
def __call__(self,
hidden_states,
kv,
attention_mask=None,
position_bias=None,
cache=None,
training=False):
norm_x = self.layer_norm(hidden_states)
attention_outputs = self.cross_attention(
query=norm_x,
kv=kv,
mask=attention_mask,
position_bias=position_bias,
cache=cache,
training=training)
y = attention_outputs.pop("context")
tensor_shape = tf_utils.get_shape_list(y)
tensor_shape[-2] = 1
y = self.dropout(y, noise_shape=tensor_shape, training=training)
layer_output = hidden_states + y
attention_outputs["layer_output"] = layer_output
return attention_outputs
class EncoderBlock(Module):
"""Transformer Encoder Block with only self attention."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
d_ff: int,
ffn_activations: Sequence[str] = ("relu",),
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.self_attention = SelfAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="self_attention")
self.ffn_layer_norm = RMSNorm(
hidden_size=d_model,
epsilon=layer_norm_epsilon,
dtype=self.dtype,
name="ffn_layer_norm")
self.ffn = FFN(
d_model=d_model,
d_ff=d_ff,
dropout_rate=dropout_rate,
activations=ffn_activations,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="ffn")
self.ffn_output_dropout = Dropout(dropout_rate)
@tf.Module.with_name_scope
def __call__(self,
hidden_states,
attention_mask=None,
position_bias=None,
training=False):
attention_outputs = self.self_attention(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
training=training)
attn_output = attention_outputs["layer_output"]
ffn_output = self.ffn_layer_norm(attn_output)
ffn_output = self.ffn(ffn_output, training=training)
tensor_shape = tf_utils.get_shape_list(ffn_output)
tensor_shape[-2] = 1
ffn_output = self.ffn_output_dropout(
ffn_output, noise_shape=tensor_shape, training=training)
ffn_output = attn_output + ffn_output
return ffn_output
class EncDecoderBlock(Module):
"""Transformer Decoder Block with enc-decoder cross attention."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
d_ff: int,
ffn_activations: Sequence[str] = ("relu",),
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.self_attention = SelfAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="self_attention")
self.cross_attention = CrossAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="cross_attention")
self.ffn_layer_norm = RMSNorm(
hidden_size=d_model,
epsilon=layer_norm_epsilon,
dtype=self.dtype,
name="ffn_layer_norm")
self.ffn = FFN(
d_model=d_model,
d_ff=d_ff,
dropout_rate=dropout_rate,
activations=ffn_activations,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="ffn")
self.ffn_output_dropout = Dropout(dropout_rate,)
@tf.Module.with_name_scope
def __call__(self,
hidden_states,
encoder_hidden_states,
attention_mask=None,
encoder_decoder_mask=None,
position_bias=None,
cache=None,
decode_position=None,
training=False):
self_attention_outputs = self.self_attention(
hidden_states,
attention_mask=attention_mask,
decode_position=decode_position,
position_bias=position_bias,
cache=cache,
training=training)
if "cache" in self_attention_outputs:
cache = self_attention_outputs["cache"]
# No relative position bias is used for encoder-decoder cross attention.
cross_attention_outputs = self.cross_attention(
self_attention_outputs["layer_output"],
kv=encoder_hidden_states,
attention_mask=encoder_decoder_mask,
training=training)
attn_output = cross_attention_outputs["layer_output"]
ffn_output = self.ffn_layer_norm(attn_output)
ffn_output = self.ffn(ffn_output, training=training)
tensor_shape = tf_utils.get_shape_list(ffn_output)
tensor_shape[-2] = 1
ffn_output = self.ffn_output_dropout(
ffn_output, noise_shape=tensor_shape, training=training)
ffn_output = attn_output + ffn_output
return ffn_output, cache
@dataclasses.dataclass
class T5TransformerParams:
"""Transformer parameters."""
num_layers: int
d_model: int
d_kv: int
num_heads: int
d_ff: int
vocab_size: int
dropout_rate: float = 0.0
layer_norm_epsilon: float = 1e-6
shared_embedding: bool = False
vocab_embeddings_initializer: Optional[Initializer] = None
relative_attention_num_buckets: int = 32
relative_attention_max_distance: int = 128
relative_embeddings_initializer: Optional[Initializer] = None
weight_initializer: Optional[Initializer] = (tf.keras.initializers.HeNormal())
bias_initializer: Optional[Initializer] = None
rescale_query: bool = False
bidirectional: bool = True
ffn_activations: Sequence[str] = ("relu",)
logits_via_embedding: bool = True
num_decoder_layers: Optional[int] = None
one_hot_embedding: bool = True
layer_sharing: bool = False
class Encoder(Module):
"""Transformer Model Encoder for sequence to sequence."""
def __init__(self,
config: T5TransformerParams,
shared_embedding: Optional[tf.Variable] = None,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
self.config = config
self.compute_dtype = compute_dtype
self.embed_dim = config.d_model
with self.name_scope:
# Input Embedding.
if shared_embedding is None:
self.input_embed = Embed(
vocab_size=self.config.vocab_size,
features=self.config.d_model,
embeddings_initializer=self.config.vocab_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="input_embedding")
else:
self.input_embed = shared_embedding
# Creates an alias to the input embed for encoder-only models.
self.word_embed = self.input_embed
self.relative_embedding = RelativePositionEmbedding(
num_heads=self.config.num_heads,
relative_attention_num_buckets=self.config
.relative_attention_num_buckets,
relative_attention_max_distance=self.config
.relative_attention_max_distance,
bidirectional=self.config.bidirectional,
embeddings_initializer=self.config.relative_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="relative_posemb")
self.input_dropout = Dropout(self.config.dropout_rate,)
self.encoder_layers = []
for layer_idx in range(self.config.num_layers):
if self.config.layer_sharing and layer_idx > 0:
self.encoder_layers.append(self.encoder_layers[0])
else:
self.encoder_layers.append(
EncoderBlock(
d_model=self.config.d_model,
d_kv=self.config.d_kv,
num_heads=self.config.num_heads,
d_ff=self.config.d_ff,
dropout_rate=self.config.dropout_rate,
ffn_activations=self.config.ffn_activations,
rescale_query=self.config.rescale_query,
weight_initializer=self.config.weight_initializer,
bias_initializer=self.config.bias_initializer,
dtype=self.dtype,
name="encoder_block_%d" % layer_idx))
self.output_norm = RMSNorm(
hidden_size=self.config.d_model,
epsilon=self.config.layer_norm_epsilon,
dtype=self.dtype,
name="final_layer_norm")
self.output_dropout = Dropout(self.config.dropout_rate,)
@tf.Module.with_name_scope
def __call__(self,
inputs=None,
encoder_mask=None,
dense_inputs=None,
training=False):
"""Applies Transformer model on the inputs.
Args:
inputs: input word ids. Optional if dense data are provided.
encoder_mask: the encoder self-attention mask.
dense_inputs: dense input data. Concat after the embedding if word ids
are provided.
training: whether it is training pass, affecting dropouts.
Returns:
output of a transformer encoder.
"""
# Casts inputs to the dtype.
if encoder_mask is not None:
encoder_mask = tf.cast(encoder_mask, self.compute_dtype)
cfg = self.config
inputs_array = []
if inputs is not None:
inputs_array.append(
self.input_embed(inputs, one_hot=cfg.one_hot_embedding))
if dense_inputs is not None:
inputs_array.append(dense_inputs)
if not inputs_array:
raise ValueError("At least one of inputs and dense_inputs must not be "
"None.")
x = tf.concat(inputs_array, axis=1)
tensor_shape = tf_utils.get_shape_list(x)
tensor_shape[-2] = 1
x = self.input_dropout(x, noise_shape=tensor_shape, training=training)
if inputs is not None:
input_length = tf_utils.get_shape_list(inputs)[1]
else:
input_length = 0
position_bias = self.relative_embedding(input_length, input_length)
if dense_inputs is not None:
# Here we ignore relative position bias for dense embeddings.
# TODO(yejiayu): If we proceed to video use cases, rework this part.
dense_input_length = tf_utils.get_shape_list(dense_inputs)[1]
# Position bias shape: [batch, 1, len, len]
paddings = tf.constant([[0, 0], [0, 0], [0, dense_input_length],
[0, dense_input_length]])
position_bias = tf.pad(position_bias, paddings, "CONSTANT")
for i in range(cfg.num_layers):
x = self.encoder_layers[i](
x,
attention_mask=encoder_mask,
position_bias=position_bias,
training=training)
encoded = self.output_norm(x)
encoded = self.output_dropout(encoded, training=training)
return encoded
class Decoder(Module):
"""Transformer Model Decoder for sequence to sequence."""
def __init__(self,
config: T5TransformerParams,
shared_embedding: Optional[tf.Variable] = None,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
self.config = config
self.compute_dtype = compute_dtype
if self.config.num_decoder_layers is None:
self.config.num_decoder_layers = self.config.num_layers
with self.name_scope:
# Target Embedding.
if shared_embedding is None:
self.target_embed = Embed(
vocab_size=self.config.vocab_size,
features=self.config.d_model,
embeddings_initializer=self.config.vocab_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="target_embedding")
else:
self.target_embed = shared_embedding
self.target_dropout = Dropout(self.config.dropout_rate,)
# Position bias for the target self attention.
self.relative_embedding = RelativePositionEmbedding(
num_heads=self.config.num_heads,
relative_attention_num_buckets=self.config
.relative_attention_num_buckets,
relative_attention_max_distance=self.config
.relative_attention_max_distance,
bidirectional=self.config.bidirectional,
embeddings_initializer=self.config.relative_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="relative_posemb")
self.decoder_layers = []
for layer_idx in range(self.config.num_decoder_layers):
if self.config.layer_sharing and layer_idx > 0:
self.decoder_layers.append(self.decoder_layers[0])
else:
self.decoder_layers.append(
EncDecoderBlock(
d_model=self.config.d_model,
d_kv=self.config.d_kv,
num_heads=self.config.num_heads,
d_ff=self.config.d_ff,
dropout_rate=self.config.dropout_rate,
ffn_activations=self.config.ffn_activations,
rescale_query=self.config.rescale_query,
weight_initializer=self.config.weight_initializer,
bias_initializer=self.config.bias_initializer,
dtype=self.dtype,
name="decoder_block_%d" % layer_idx))
self.output_norm = RMSNorm(
hidden_size=self.config.d_model,
epsilon=self.config.layer_norm_epsilon,
dtype=self.dtype,
name="final_layer_norm")
self.output_dropout = Dropout(self.config.dropout_rate,)
if not self.config.logits_via_embedding:
self.logits_dense = Linear(
in_features=self.config.d_model,
out_features=self.config.vocab_size,
use_bias=False,
dtype=self.dtype,
name="logits")
@tf.Module.with_name_scope
def __call__(self,
decoder_input_tokens,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
decode=False,
decode_position=None,
cache=None,
max_decode_len=None,
training=False):
"""Applies Transformer model on the inputs.
Args:
decoder_input_tokens: the decoder input tokens.
encoded: the encoder outputs.
decoder_mask: the decoder self-attention mask.
encoder_decoder_mask: the cross-attention mask.
decode: Whether to perform autoaggressive decoding.
decode_position: integer, the position to decode.
cache: The cache dictionary of key, value tensors.
max_decode_len: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
training: Whether it is training pass, affecting dropouts.
Returns:
output of a transformer encoder.
"""
cfg = self.config
# Casts inputs to the dtype.
encoded = tf.cast(encoded, self.compute_dtype)
if decoder_mask is not None:
decoder_mask = tf.cast(decoder_mask, self.compute_dtype)
if encoder_decoder_mask is not None:
encoder_decoder_mask = tf.cast(encoder_decoder_mask, self.compute_dtype)
x = self.target_embed(decoder_input_tokens, one_hot=cfg.one_hot_embedding)
tensor_shape = tf_utils.get_shape_list(x)
tensor_shape[-2] = 1
x = self.target_dropout(x, noise_shape=tensor_shape, training=training)
if cache is not None:
position_bias = self.relative_embedding(max_decode_len, max_decode_len)
else:
input_length = tf_utils.get_shape_list(decoder_input_tokens)[1]
position_bias = self.relative_embedding(input_length, input_length)
for i in range(cfg.num_decoder_layers):
if cache is None:
x, _ = self.decoder_layers[i](
x,
encoder_hidden_states=encoded,
attention_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
position_bias=position_bias,
training=training)
else:
x, cache[i] = self.decoder_layers[i](
x,
encoder_hidden_states=encoded,
attention_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
position_bias=position_bias,
decode_position=decode_position,
cache=cache[i],
training=training)
output = self.output_norm(x)
tensor_shape = tf_utils.get_shape_list(output)
tensor_shape[-2] = 1
output = self.target_dropout(
output, noise_shape=tensor_shape, training=training)
if self.config.logits_via_embedding:
logits = self.target_embed.attend(output)
logits = logits / math.sqrt(cfg.d_model)
else:
logits = self.logits_dense(output)
return logits, cache
class T5Transformer(Module):
"""Transformer Encoder+Decoder for sequence to sequence."""
def __init__(self,
config: T5TransformerParams,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
# Builds the model components.
shared_embedding = config.shared_embedding
self.compute_dtype = compute_dtype
self.decoder_cfg = dataclasses.replace(config, bidirectional=False)
if self.decoder_cfg.num_decoder_layers is None:
self.decoder_cfg.num_decoder_layers = self.decoder_cfg.num_layers
self.encoder_cfg = dataclasses.replace(config, bidirectional=True)
with self.name_scope:
if shared_embedding:
self.shared_embedding = Embed(
vocab_size=config.vocab_size,
features=config.d_model,
embeddings_initializer=config.vocab_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="shared")
else:
self.shared_embedding = None
self.encoder = Encoder(
self.encoder_cfg,
self.shared_embedding,
dtype=self.dtype,
compute_dtype=self.compute_dtype)
self.decoder = Decoder(
self.decoder_cfg,
self.shared_embedding,
dtype=self.dtype,
compute_dtype=self.compute_dtype)
def encode(self,
encoder_input_tokens=None,
encoder_segment_ids=None,
encoder_dense_inputs=None,
encoder_dense_segment_ids=None,
training=False):
eligible_position_array = []
if encoder_input_tokens is not None:
eligible_position_array.append(
tf.cast(tf.not_equal(encoder_input_tokens, 0), self.compute_dtype))
if encoder_dense_inputs is not None:
eligible_dense_positions = tf.cast(
tf.reduce_any(tf.not_equal(encoder_dense_inputs, 0), axis=-1),
self.compute_dtype)
eligible_position_array.append(eligible_dense_positions)
if not eligible_position_array:
raise ValueError("At least one of encoder_input_tokens and"
" encoder_dense_inputs must be provided.")
eligible_positions = tf.concat(eligible_position_array, axis=1)
encoder_mask = make_attention_mask(
eligible_positions, eligible_positions, dtype=tf.bool)
encoder_segment_id_array = []
if encoder_segment_ids is not None:
encoder_segment_id_array.append(encoder_segment_ids)
if encoder_dense_segment_ids is not None:
encoder_segment_id_array.append(encoder_dense_segment_ids)
if encoder_segment_id_array:
encoder_segment_ids = tf.concat(encoder_segment_id_array, axis=1)
segment_mask = make_attention_mask(
encoder_segment_ids, encoder_segment_ids, tf.equal, dtype=tf.bool)
encoder_mask = tf.math.logical_and(encoder_mask, segment_mask)
encoder_mask = (1.0 - tf.cast(encoder_mask, self.compute_dtype)) * -1e9
return self.encoder(
encoder_input_tokens,
encoder_mask,
encoder_dense_inputs,
training=training)
def decode(
self,
encoded,
decoder_target_tokens,
encoder_input_tokens=None, # only used for masks
encoder_dense_inputs=None,
decoder_input_tokens=None,
encoder_segment_ids=None,
encoder_dense_segment_ids=None,
decoder_segment_ids=None,
decode_position=None,
cache=None,
max_decode_len=None,
decode=False,
training=False):
eligible_inputs_array = []
if encoder_input_tokens is not None:
eligible_inputs = tf.cast(
tf.not_equal(encoder_input_tokens, 0), self.compute_dtype)
eligible_inputs_array.append(eligible_inputs)
if encoder_dense_inputs is not None:
eligible_dense_inputs = tf.cast(
tf.reduce_any(tf.not_equal(encoder_dense_inputs, 0), axis=-1),
self.compute_dtype)
eligible_inputs_array.append(eligible_dense_inputs)
eligible_inputs = tf.concat(eligible_inputs_array, axis=1)
if decode:
# For decoding, the decoder_input_tokens is the decoder_target_tokens.
decoder_input_tokens = decoder_target_tokens
# fast autoregressive decoding uses only a special encoder-decoder mask
decoder_mask = None
encoder_decoder_mask = make_attention_mask(
tf.cast(
tf.not_equal(tf.ones_like(decoder_target_tokens), 0),
self.compute_dtype),
eligible_inputs,
dtype=tf.bool)
else:
# Note that, masks should be created using decoder_target_tokens.
eligible_targets = tf.cast(
tf.not_equal(decoder_target_tokens, 0), self.compute_dtype)
decoder_mask = tf.math.logical_and(
make_attention_mask(
eligible_targets, eligible_targets, dtype=tf.bool),
make_causal_mask(decoder_target_tokens, dtype=tf.bool))
encoder_decoder_mask = make_attention_mask(
eligible_targets, eligible_inputs, dtype=tf.bool)
if encoder_segment_ids is not None:
if decoder_mask is not None:
decoder_mask = tf.math.logical_and(
decoder_mask,
make_attention_mask(
decoder_segment_ids,
decoder_segment_ids,
tf.equal,
dtype=tf.bool))
if encoder_dense_segment_ids is not None:
encoder_segment_ids = tf.concat(
[encoder_segment_ids, encoder_dense_segment_ids], axis=1)
encoder_decoder_mask = tf.math.logical_and(
encoder_decoder_mask,
make_attention_mask(
decoder_segment_ids,
encoder_segment_ids,
tf.equal,
dtype=tf.bool))
if decoder_mask is not None:
decoder_mask = (1.0 - tf.cast(decoder_mask, self.compute_dtype)) * -1e9
encoder_decoder_mask = (
1.0 - tf.cast(encoder_decoder_mask, self.compute_dtype)) * -1e9
logits, cache = self.decoder(
decoder_input_tokens,
encoded,
decode_position=decode_position,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
cache=cache,
max_decode_len=max_decode_len,
decode=decode,
training=training)
return dict(logits=logits, encoded=encoded, cache=cache)
@tf.Module.with_name_scope
def __call__(self,
encoder_input_tokens=None,
decoder_target_tokens=None,
encoder_dense_inputs=None,
encoder_dense_segment_ids=None,
decoder_input_tokens=None,
encoder_segment_ids=None,
decoder_segment_ids=None,
training=False):
"""Applies Transformer model on the inputs.
Args:
encoder_input_tokens: input tokens to the encoder.
decoder_target_tokens: target tokens to the decoder.
encoder_dense_inputs: input dense vectors to the encoder.
encoder_dense_segment_ids: dense input segmentation info for packed
decoder_input_tokens: input tokens to the decoder, only required for
training.
encoder_segment_ids: input segmentation info for packed examples.
examples.
decoder_segment_ids: target segmentation info for packed examples.
training: whether it is training pass, affecting dropouts.
Returns:
a dictionary of logits/cache.
"""
encoded = self.encode(
encoder_input_tokens=encoder_input_tokens,
encoder_segment_ids=encoder_segment_ids,
encoder_dense_inputs=encoder_dense_inputs,
encoder_dense_segment_ids=encoder_dense_segment_ids,
training=training)
outputs = self.decode(
encoded=encoded,
decoder_target_tokens=decoder_target_tokens,
encoder_input_tokens=encoder_input_tokens, # only used for masks.
encoder_dense_inputs=encoder_dense_inputs, # only used for masks.
decoder_input_tokens=decoder_input_tokens,
encoder_segment_ids=encoder_segment_ids,
encoder_dense_segment_ids=encoder_dense_segment_ids,
decoder_segment_ids=decoder_segment_ids,
training=training)
outputs["encoded"] = encoded
return outputs
@property
def checkpoint_items(self):
return dict(encoder=self.encoder, decoder=self.decoder)
| 35.89575 | 80 | 0.642946 |
acef6389a7c3b801be97a8a592b4f9d49106da2e | 3,188 | py | Python | package_control/commands/advanced_install_package_command.py | FichteForks/package_control | c9034102844456c9c69ef13ac159d59d0de29833 | [
"Unlicense",
"MIT"
] | null | null | null | package_control/commands/advanced_install_package_command.py | FichteForks/package_control | c9034102844456c9c69ef13ac159d59d0de29833 | [
"Unlicense",
"MIT"
] | null | null | null | package_control/commands/advanced_install_package_command.py | FichteForks/package_control | c9034102844456c9c69ef13ac159d59d0de29833 | [
"Unlicense",
"MIT"
] | null | null | null | import threading
import re
import time
import functools
import sublime
import sublime_plugin
from ..show_error import show_error
from ..package_manager import PackageManager
from ..package_disabler import PackageDisabler
from ..thread_progress import ThreadProgress
try:
str_cls = unicode
bytes_cls = str
except (NameError):
str_cls = str
bytes_cls = bytes
class AdvancedInstallPackageCommand(sublime_plugin.WindowCommand):
"""
A command that accepts a comma-separated list of packages to install, or
prompts the user to paste a comma-separated list
"""
def run(self, packages=None):
is_str = isinstance(packages, str_cls)
is_bytes = isinstance(packages, bytes_cls)
if packages and (is_str or is_bytes):
packages = self.split(packages)
if packages and isinstance(packages, list):
return self.start(packages)
self.window.show_input_panel('Packages to Install (Comma-separated)',
'', self.on_done, None, None)
def split(self, packages):
if isinstance(packages, bytes_cls):
packages = packages.decode('utf-8')
return re.split(u'\s*,\s*', packages)
def on_done(self, input):
"""
Input panel handler - adds the provided URL as a repository
:param input:
A string of the URL to the new repository
"""
input = input.strip()
if not input:
show_error(u"No package names were entered" % input)
return
self.start(self.split(input))
def start(self, packages):
thread = AdvancedInstallPackageThread(packages)
thread.start()
message = 'Installing package'
if len(packages) > 1:
message += 's'
ThreadProgress(thread, message, '')
class AdvancedInstallPackageThread(threading.Thread, PackageDisabler):
"""
A thread to run the installation of one or more packages in
"""
def __init__(self, packages):
"""
:param window:
An instance of :class:`sublime.Window` that represents the Sublime
Text window to show the available package list in.
"""
self.manager = PackageManager()
self.packages = packages
self.disabled = self.disable_packages(packages, 'install')
self.installed = self.manager.list_packages()
threading.Thread.__init__(self)
def run(self):
# Allow packages to properly disable
time.sleep(0.7)
def do_reenable_package(package_name):
type_ = 'install' if package_name not in self.installed else 'upgrade'
self.reenable_package(package_name, type_)
for package in self.packages:
result = self.manager.install_package(package)
# Do not reenable if installation deferred until next restart
if result is not None:
# We use a functools.partial to generate the on-complete callback in
# order to bind the current value of the parameters, unlike lambdas.
sublime.set_timeout(functools.partial(do_reenable_package, package), 700)
| 29.247706 | 89 | 0.646173 |
acef63ec6a36c330498f1598623fc99ca4d5cbc4 | 338 | py | Python | humandt/tests.py | justquick/django-human-datetime | 9dc79eeb9f66fb6c94d67598b34a8469ccab8839 | [
"Apache-2.0"
] | 2 | 2015-04-28T08:43:45.000Z | 2021-01-12T11:21:50.000Z | humandt/tests.py | justquick/django-human-datetime | 9dc79eeb9f66fb6c94d67598b34a8469ccab8839 | [
"Apache-2.0"
] | null | null | null | humandt/tests.py | justquick/django-human-datetime | 9dc79eeb9f66fb6c94d67598b34a8469ccab8839 | [
"Apache-2.0"
] | 2 | 2015-06-25T20:51:36.000Z | 2015-09-23T19:53:25.000Z | from django.test import TestCase
from parser import parse
from datetime import datetime
class HumanTests(TestCase):
def setUp(self):
self.now = datetime.now()
def test_tomorrow(self):
t = parse('tomorrow 4PM')
self.assertEqual(t.day, self.now.day+1)
self.assertEqual(t.hour, 16)
| 24.142857 | 47 | 0.642012 |
acef64409113895b9402bf19dc6211262a4e6177 | 163 | py | Python | app/__init__.py | totoro0104/fastapi-example | edb197fc0160a72c72f9bd071751fd3e4dae9193 | [
"Apache-2.0"
] | 2 | 2021-05-06T07:51:48.000Z | 2022-01-25T05:50:22.000Z | app/__init__.py | totoro0104/fastapi-example | edb197fc0160a72c72f9bd071751fd3e4dae9193 | [
"Apache-2.0"
] | null | null | null | app/__init__.py | totoro0104/fastapi-example | edb197fc0160a72c72f9bd071751fd3e4dae9193 | [
"Apache-2.0"
] | null | null | null | from fastapi import FastAPI
from config import settings
app = FastAPI(
title=settings.PROJECT_NAME,
openapi_url=f"{settings.API_PREFIX}/openapi.json"
)
| 16.3 | 53 | 0.760736 |
acef64a6cfbd7ba8383b541db315bff606e62865 | 10,981 | py | Python | tests/integration/goldens/logging/google/cloud/logging_v2/services/logging_service_v2/transports/base.py | znowdev/gapic-generator-python | 18ba7a0933461dfa3ecfccf48f2233d65824144a | [
"Apache-2.0"
] | null | null | null | tests/integration/goldens/logging/google/cloud/logging_v2/services/logging_service_v2/transports/base.py | znowdev/gapic-generator-python | 18ba7a0933461dfa3ecfccf48f2233d65824144a | [
"Apache-2.0"
] | null | null | null | tests/integration/goldens/logging/google/cloud/logging_v2/services/logging_service_v2/transports/base.py | znowdev/gapic-generator-python | 18ba7a0933461dfa3ecfccf48f2233d65824144a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.logging_v2.types import logging
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-logging',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class LoggingServiceV2Transport(abc.ABC):
"""Abstract transport class for LoggingServiceV2."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write',
)
DEFAULT_HOST: str = 'logging.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials are service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.delete_log: gapic_v1.method.wrap_method(
self.delete_log,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.write_log_entries: gapic_v1.method.wrap_method(
self.write_log_entries,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_log_entries: gapic_v1.method.wrap_method(
self.list_log_entries,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_monitored_resource_descriptors: gapic_v1.method.wrap_method(
self.list_monitored_resource_descriptors,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_logs: gapic_v1.method.wrap_method(
self.list_logs,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.tail_log_entries: gapic_v1.method.wrap_method(
self.tail_log_entries,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def delete_log(self) -> Callable[
[logging.DeleteLogRequest],
Union[
empty_pb2.Empty,
Awaitable[empty_pb2.Empty]
]]:
raise NotImplementedError()
@property
def write_log_entries(self) -> Callable[
[logging.WriteLogEntriesRequest],
Union[
logging.WriteLogEntriesResponse,
Awaitable[logging.WriteLogEntriesResponse]
]]:
raise NotImplementedError()
@property
def list_log_entries(self) -> Callable[
[logging.ListLogEntriesRequest],
Union[
logging.ListLogEntriesResponse,
Awaitable[logging.ListLogEntriesResponse]
]]:
raise NotImplementedError()
@property
def list_monitored_resource_descriptors(self) -> Callable[
[logging.ListMonitoredResourceDescriptorsRequest],
Union[
logging.ListMonitoredResourceDescriptorsResponse,
Awaitable[logging.ListMonitoredResourceDescriptorsResponse]
]]:
raise NotImplementedError()
@property
def list_logs(self) -> Callable[
[logging.ListLogsRequest],
Union[
logging.ListLogsResponse,
Awaitable[logging.ListLogsResponse]
]]:
raise NotImplementedError()
@property
def tail_log_entries(self) -> Callable[
[logging.TailLogEntriesRequest],
Union[
logging.TailLogEntriesResponse,
Awaitable[logging.TailLogEntriesResponse]
]]:
raise NotImplementedError()
__all__ = (
'LoggingServiceV2Transport',
)
| 40.67037 | 161 | 0.599854 |
acef6548e252c6814d2baaef7f08a70aa5ea3b3f | 1,421 | py | Python | flight_simulator.py | BirdmanTeamShootingStars/FlightSimulator | 05e579b28f0c6ab56bb8e46e8f1deea01b9ec8b0 | [
"MIT"
] | null | null | null | flight_simulator.py | BirdmanTeamShootingStars/FlightSimulator | 05e579b28f0c6ab56bb8e46e8f1deea01b9ec8b0 | [
"MIT"
] | null | null | null | flight_simulator.py | BirdmanTeamShootingStars/FlightSimulator | 05e579b28f0c6ab56bb8e46e8f1deea01b9ec8b0 | [
"MIT"
] | null | null | null | from math import *
import numpy as np
import param
from State import *
import matplotlib.pyplot as plt
from file_func import *
#ordinary differential equation
def func(state, alpha):
return state.dt(alpha)
def runge_kutta(state0, dt, t_list, alpha_list):
state_list = [state0]
for i in range(len(t_list)-1):
k1 = func(state_list[i], alpha_list[i])*dt
k2 = func(state_list[i]+k1*0.5, alpha_list[i])*dt
k3 = func(state_list[i]+k2*0.5, alpha_list[i])*dt
k4 = func(state_list[i]+k3, alpha_list[i])*dt
k = (k1 + k2*2 + k3*2 + k4)/6
state_list.append(state_list[-1] + k)
return state_list
#show a graph of trajectory
def plot_state_list(state_list):
x_list = np.zeros(len(state_list))
y_list = np.zeros(len(state_list))
for i in range(len(state_list)):
x_list[i] = state_list[i].x
y_list[i] = state_list[i].y
fig, axs = plt.subplots()
axs.plot(x_list, y_list)
axs.set_title('trajectory')
plt.xlabel('distance')
plt.ylabel('height')
axs.axis('equal')
plt.show()
if __name__ == '__main__':
state0 = param.STATE0
dt = 0.1
t0 = 0
end_t = 15
t_list = np.arange(t0,end_t+dt,dt)
alpha_list = np.zeros(len(t_list))
state_list = runge_kutta(state0,dt,t_list,alpha_list)
#store_trajectory(t_list,state_list,alpha_list,'./data/let_it_be.csv')
plot_state_list(state_list)
| 27.862745 | 74 | 0.651654 |
acef6684e9d5202c9bccb749a469effbe4cc0527 | 394 | py | Python | example/pyex.py | ayanc/ntviz | 8280ae6902cd26b75f9ef3003ae09d23e25378f5 | [
"MIT"
] | 2 | 2016-02-05T22:59:43.000Z | 2016-02-06T00:31:05.000Z | example/pyex.py | ayanc/ntviz | 8280ae6902cd26b75f9ef3003ae09d23e25378f5 | [
"MIT"
] | null | null | null | example/pyex.py | ayanc/ntviz | 8280ae6902cd26b75f9ef3003ae09d23e25378f5 | [
"MIT"
] | null | null | null | import numpy as np
import ntplot as ntp
x = np.asarray(range(100),dtype=np.float32)
y1 = 10 - np.exp((-x/100)**2) + np.sin(x/10*np.pi)/16
y2 = 10 - np.exp((-x/100)**2) + np.cos(x/10*np.pi)/16
x2 = x[::2]+10
y3 = 10 - np.exp((-x2/100)**2/2)+ np.cos(x2/10*np.pi)/16
plt = ntp.figure()
plt.plot(x*1000,y1,'x.y1')
plt.plot(x*1000,y2,'x.y2')
plt.plot(x2*1000,y3,'x2.y3')
plt.save('pyex.html')
| 20.736842 | 56 | 0.601523 |
acef67a27a5b008d3ad6babec7452dde9bb65262 | 4,213 | py | Python | python_modules/dagster-graphql/dagster_graphql/implementation/execution/run_lifecycle.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql/implementation/execution/run_lifecycle.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql/implementation/execution/run_lifecycle.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | from graphql.execution.base import ResolveInfo
import dagster._check as check
from dagster.core.execution.plan.resume_retry import get_retry_steps_from_parent_run
from dagster.core.execution.plan.state import KnownExecutionState
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.storage.tags import RESUME_RETRY_TAG
from dagster.core.utils import make_new_run_id
from dagster.utils import merge_dicts
from ...schema.errors import GrapheneNoModeProvidedError
from ..external import ensure_valid_config, get_external_execution_plan_or_raise
from ..utils import ExecutionParams, UserFacingGraphQLError
def compute_step_keys_to_execute(graphene_info, execution_params):
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(execution_params, "execution_params", ExecutionParams)
instance = graphene_info.context.instance
if not execution_params.step_keys and is_resume_retry(execution_params):
# Get step keys from parent_run_id if it's a resume/retry
return get_retry_steps_from_parent_run(
instance, execution_params.execution_metadata.parent_run_id
)
else:
known_state = None
if execution_params.execution_metadata.parent_run_id and execution_params.step_keys:
known_state = KnownExecutionState.for_reexecution(
instance.all_logs(execution_params.execution_metadata.parent_run_id),
execution_params.step_keys,
)
return execution_params.step_keys, known_state
def is_resume_retry(execution_params):
check.inst_param(execution_params, "execution_params", ExecutionParams)
return execution_params.execution_metadata.tags.get(RESUME_RETRY_TAG) == "true"
def create_valid_pipeline_run(graphene_info, external_pipeline, execution_params):
if execution_params.mode is None and len(external_pipeline.available_modes) > 1:
raise UserFacingGraphQLError(
GrapheneNoModeProvidedError(external_pipeline.name, external_pipeline.available_modes)
)
elif execution_params.mode is None and len(external_pipeline.available_modes) == 1:
mode = external_pipeline.available_modes[0]
else:
mode = execution_params.mode
ensure_valid_config(external_pipeline, mode, execution_params.run_config)
step_keys_to_execute, known_state = compute_step_keys_to_execute(
graphene_info, execution_params
)
external_execution_plan = get_external_execution_plan_or_raise(
graphene_info=graphene_info,
external_pipeline=external_pipeline,
mode=mode,
run_config=execution_params.run_config,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
)
tags = merge_dicts(external_pipeline.tags, execution_params.execution_metadata.tags)
pipeline_run = graphene_info.context.instance.create_run(
pipeline_snapshot=external_pipeline.pipeline_snapshot,
execution_plan_snapshot=external_execution_plan.execution_plan_snapshot,
parent_pipeline_snapshot=external_pipeline.parent_pipeline_snapshot,
pipeline_name=execution_params.selector.pipeline_name,
run_id=execution_params.execution_metadata.run_id
if execution_params.execution_metadata.run_id
else make_new_run_id(),
asset_selection=frozenset(execution_params.selector.asset_selection)
if execution_params.selector.asset_selection
else None,
solid_selection=execution_params.selector.solid_selection,
solids_to_execute=frozenset(execution_params.selector.solid_selection)
if execution_params.selector.solid_selection
else None,
run_config=execution_params.run_config,
mode=mode,
step_keys_to_execute=step_keys_to_execute,
tags=tags,
root_run_id=execution_params.execution_metadata.root_run_id,
parent_run_id=execution_params.execution_metadata.parent_run_id,
status=PipelineRunStatus.NOT_STARTED,
external_pipeline_origin=external_pipeline.get_external_origin(),
pipeline_code_origin=external_pipeline.get_python_origin(),
)
return pipeline_run
| 43.43299 | 98 | 0.77878 |
acef67e81829951cc60d8d93628fc4b6fdf0ff49 | 6,671 | py | Python | tensorpack/dataflow/imgaug/misc.py | yunhuiguo/tensorpack | 91ce2260e5dc41b802b1a39b8b65ae6bee7ac719 | [
"Apache-2.0"
] | 4 | 2018-12-12T02:42:34.000Z | 2019-08-27T17:12:53.000Z | tensorpack/dataflow/imgaug/misc.py | yunhuiguo/tensorpack | 91ce2260e5dc41b802b1a39b8b65ae6bee7ac719 | [
"Apache-2.0"
] | null | null | null | tensorpack/dataflow/imgaug/misc.py | yunhuiguo/tensorpack | 91ce2260e5dc41b802b1a39b8b65ae6bee7ac719 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
# File: misc.py
import numpy as np
import cv2
from .base import ImageAugmentor
from ...utils import logger
from ...utils.argtools import shape2d
from .transform import ResizeTransform, TransformAugmentorBase
__all__ = ['Flip', 'Resize', 'RandomResize', 'ResizeShortestEdge', 'Transpose']
class Flip(ImageAugmentor):
"""
Random flip the image either horizontally or vertically.
"""
def __init__(self, horiz=False, vert=False, prob=0.5):
"""
Args:
horiz (bool): use horizontal flip.
vert (bool): use vertical flip.
prob (float): probability of flip.
"""
super(Flip, self).__init__()
if horiz and vert:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
elif horiz:
self.code = 1
elif vert:
self.code = 0
else:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def _get_augment_params(self, img):
h, w = img.shape[:2]
do = self._rand_range() < self.prob
return (do, h, w)
def _augment(self, img, param):
do, _, _ = param
if do:
ret = cv2.flip(img, self.code)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
else:
ret = img
return ret
def _augment_coords(self, coords, param):
do, h, w = param
if do:
if self.code == 0:
coords[:, 1] = h - coords[:, 1]
elif self.code == 1:
coords[:, 0] = w - coords[:, 0]
return coords
class Resize(TransformAugmentorBase):
""" Resize image to a target size"""
def __init__(self, shape, interp=cv2.INTER_LINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: cv2 interpolation method
"""
shape = tuple(shape2d(shape))
self._init(locals())
def _get_augment_params(self, img):
return ResizeTransform(
img.shape[0], img.shape[1],
self.shape[0], self.shape[1], self.interp)
class ResizeShortestEdge(TransformAugmentorBase):
"""
Resize the shortest edge to a certain number while
keeping the aspect ratio.
"""
def __init__(self, size, interp=cv2.INTER_LINEAR):
"""
Args:
size (int): the size to resize the shortest edge to.
"""
size = int(size)
self._init(locals())
def _get_augment_params(self, img):
h, w = img.shape[:2]
scale = self.size * 1.0 / min(h, w)
if h < w:
newh, neww = self.size, int(scale * w + 0.5)
else:
newh, neww = int(scale * h + 0.5), self.size
return ResizeTransform(
h, w, newh, neww, self.interp)
class RandomResize(TransformAugmentorBase):
""" Randomly rescale width and height of the image."""
def __init__(self, xrange, yrange=None, minimum=(0, 0), aspect_ratio_thres=0.15,
interp=cv2.INTER_LINEAR):
"""
Args:
xrange (tuple): a (min, max) tuple. If is floating point, the
tuple defines the range of scaling ratio of new width, e.g. (0.9, 1.2).
If is integer, the tuple defines the range of new width in pixels, e.g. (200, 350).
yrange (tuple): similar to xrange, but for height. Should be None when aspect_ratio_thres==0.
minimum (tuple): (xmin, ymin) in pixels. To avoid scaling down too much.
aspect_ratio_thres (float): discard samples which change aspect ratio
larger than this threshold. Set to 0 to keep aspect ratio.
interp: cv2 interpolation method
"""
super(RandomResize, self).__init__()
assert aspect_ratio_thres >= 0
self._init(locals())
def is_float(tp):
return isinstance(tp[0], float) or isinstance(tp[1], float)
if yrange is not None:
assert is_float(xrange) == is_float(yrange), "xrange and yrange has different type!"
self._is_scale = is_float(xrange)
if aspect_ratio_thres == 0:
if self._is_scale:
assert xrange == yrange or yrange is None
else:
if yrange is not None:
logger.warn("aspect_ratio_thres==0, yrange is not used!")
def _get_augment_params(self, img):
cnt = 0
h, w = img.shape[:2]
def get_dest_size():
if self._is_scale:
sx = self._rand_range(*self.xrange)
if self.aspect_ratio_thres == 0:
sy = sx
else:
sy = self._rand_range(*self.yrange)
destX = max(sx * w, self.minimum[0])
destY = max(sy * h, self.minimum[1])
else:
sx = self._rand_range(*self.xrange)
if self.aspect_ratio_thres == 0:
sy = sx * 1.0 / w * h
else:
sy = self._rand_range(*self.yrange)
destX = max(sx, self.minimum[0])
destY = max(sy, self.minimum[1])
return (int(destX + 0.5), int(destY + 0.5))
while True:
destX, destY = get_dest_size()
if self.aspect_ratio_thres > 0: # don't check when thres == 0
oldr = w * 1.0 / h
newr = destX * 1.0 / destY
diff = abs(newr - oldr) / oldr
if diff >= self.aspect_ratio_thres + 1e-5:
cnt += 1
if cnt > 50:
logger.warn("RandomResize failed to augment an image")
return ResizeTransform(h, w, h, w, self.interp)
continue
return ResizeTransform(h, w, destY, destX, self.interp)
class Transpose(ImageAugmentor):
"""
Random transpose the image
"""
def __init__(self, prob=0.5):
"""
Args:
prob (float): probability of transpose.
"""
super(Transpose, self).__init__()
self.prob = prob
self._init()
def _get_augment_params(self, img):
return self._rand_range() < self.prob
def _augment(self, img, do):
ret = img
if do:
ret = cv2.transpose(img)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
def _augment_coords(self, coords, do):
if do:
coords = coords[:, ::-1]
return coords
| 32.383495 | 105 | 0.533503 |
acef6a5e5848b90191e6594d1fed25724ab22b24 | 5,359 | py | Python | forge/blade/core/terrain.py | alexandonian/neural-mmo | a4879c3399971ede81b64f507ee81706ba0d3366 | [
"MIT"
] | 4 | 2020-11-08T22:33:15.000Z | 2020-11-21T15:45:43.000Z | forge/blade/core/terrain.py | ThomasCloarec/neural-mmo | 094744f49ad2cff179ec21e27285258903b70098 | [
"MIT"
] | null | null | null | forge/blade/core/terrain.py | ThomasCloarec/neural-mmo | 094744f49ad2cff179ec21e27285258903b70098 | [
"MIT"
] | null | null | null | from pdb import set_trace as T
import numpy as np
import os
import vec_noise
from imageio import imread, imsave
from tqdm import tqdm
from forge.blade.lib import material
def mkdir(path):
try:
os.mkdir(path)
except:
pass
def sharp(self, noise):
return 2 * (0.5 - abs(0.5 - noise));
class Save:
def render(mats, lookup, path):
images = [[lookup[e] for e in l] for l in mats]
image = np.vstack([np.hstack(e) for e in images])
imsave(path, image)
def fractal(terrain, path):
frac = (256*terrain).astype(np.uint8)
imsave(path, frac)
def np(mats, path):
'''Saves a map into into a tiled compatiable file given a save_path, width
and height of the map, and 2D numpy array specifiying enums for the array'''
mkdir(path)
path = os.path.join(path, 'map.npy')
np.save(path, mats.astype(np.int))
class Terrain:
pass
class MapGenerator:
def __init__(self, config):
self.config = config
self.loadTextures()
def loadTextures(self):
lookup = {}
path = self.config.PATH_TILE
for mat in material.All:
key = mat.tex
tex = imread(path.format(key))
mat.tex = tex[:, :, :3][::4, ::4]
lookup[mat.index] = mat.tex
setattr(Terrain, key.upper(), mat.index)
self.textures = lookup
def material(self, config, val, gamma=0):
assert 0 <= gamma <= 1
alpha = (1 - gamma) * config.TERRAIN_ALPHA
beta = config.TERRAIN_BETA * gamma
if val == config.TERRAIN_LAVA:
return Terrain.LAVA
if val <= config.TERRAIN_WATER:
return Terrain.WATER
if val <= config.TERRAIN_FOREST_LOW - alpha:
return Terrain.FOREST
if val <= config.TERRAIN_GRASS + beta:
return Terrain.GRASS
if val <= config.TERRAIN_FOREST_HIGH:
return Terrain.FOREST
return Terrain.STONE
def generate(self):
config = self.config
if config.__class__.__name__ == 'SmallMaps':
prefix = config.PATH_MAPS_SMALL
elif config.__class__.__name__ == 'LargeMaps':
prefix = config.PATH_MAPS_LARGE
else:
prefix = config.PATH_MAPS
#Train and eval map indices
msg = 'Generating {} training and {} evaluation maps:'
evalMaps = range(-config.N_EVAL_MAPS, 0)
trainMaps = range(1, config.N_TRAIN_MAPS+1)
print(msg.format(config.N_TRAIN_MAPS, config.N_EVAL_MAPS))
for seed in tqdm([*evalMaps, *trainMaps]):
path = prefix + '/map' + str(seed)
mkdir(prefix)
mkdir(path)
terrain, tiles = self.grid(config, seed)
Save.np(tiles, path)
if config.TERRAIN_RENDER:
Save.fractal(terrain, path+'/fractal.png')
Save.render(tiles, self.textures, path+'/map.png')
def grid(self, config, seed):
sz = config.TERRAIN_SIZE
frequency = config.TERRAIN_FREQUENCY
octaves = config.TERRAIN_OCTAVES
mode = config.TERRAIN_MODE
lerp = config.TERRAIN_LERP
border = config.TERRAIN_BORDER
waterRadius = config.TERRAIN_WATER_RADIUS
spawnRegion = config.TERRAIN_CENTER_REGION
spawnWidth = config.TERRAIN_CENTER_WIDTH
assert mode in {'expand', 'contract', 'flat'}
val = np.zeros((sz, sz, octaves))
s = np.arange(sz)
X, Y = np.meshgrid(s, s)
#Compute noise over logscaled octaves
start, end = frequency
for idx, freq in enumerate(np.logspace(start, end, octaves, base=2)):
val[:, :, idx] = 0.5 + 0.5*vec_noise.snoise2(seed*sz + freq*X, idx*sz + freq*Y)
#Compute L1 and L2 distances
x = np.concatenate([np.arange(sz//2, 0, -1), np.arange(1, sz//2+1)])
X, Y = np.meshgrid(x, x)
data = np.stack((X, Y), -1)
l1 = np.max(abs(data), -1)
l2 = np.sqrt(np.sum(data**2, -1))
thresh = l1
#Linear octave blend mask
if octaves > 1:
dist = np.linspace(0.5/octaves, 1-0.5/octaves, octaves)[None, None, :]
norm = 2 * l1[:, :, None] / sz
if mode == 'contract':
v = 1 - abs(1 - norm - dist)
elif mode == 'expand':
v = 1 - abs(norm - dist)
v = (2*octaves-1) * (v - 1) + 1
v = np.clip(v, 0, 1)
v /= np.sum(v, -1)[:, :, None]
val = np.sum(v*val, -1)
l1 = 1 - 2*l1/sz
#Compute distance from the edges inward
if mode == 'contract':
l1 = 1 - l1
if not lerp:
l1 = 0.5 + 0*l1
#Threshold to materials
matl = np.zeros((sz, sz), dtype=object)
for y in range(sz):
for x in range(sz):
matl[y, x] = self.material(config, val[y, x], l1[y, x])
#Lava border and center crop
matl[thresh > sz//2 - border] = Terrain.LAVA
#Grass border or center spawn region
if mode == 'expand':
matl[thresh <= spawnRegion] = Terrain.GRASS
matl[thresh <= spawnRegion-spawnWidth] = Terrain.STONE
matl[thresh <= spawnRegion-spawnWidth-1] = Terrain.WATER
elif mode == 'contract':
matl[thresh == sz//2 - border] = Terrain.GRASS
matl[l2 < waterRadius + 1] = Terrain.GRASS
matl[l2 < waterRadius] = Terrain.WATER
return val, matl
| 30.798851 | 88 | 0.577347 |
acef6a7735068d541c07e363770335ac767aede5 | 27,145 | py | Python | constants.py | masato1230/FreeCashFlowExample | abc43d245c2115c2cdae445503ac96001acfa602 | [
"MIT"
] | null | null | null | constants.py | masato1230/FreeCashFlowExample | abc43d245c2115c2cdae445503ac96001acfa602 | [
"MIT"
] | null | null | null | constants.py | masato1230/FreeCashFlowExample | abc43d245c2115c2cdae445503ac96001acfa602 | [
"MIT"
] | null | null | null | # テスト用のリスト
securities_code_list = [
7180, 7181, 7182, 7183, 7184, 7185, 7186, 7187, 7189, 7190, 7191, 7192,
7196, 7198, 7199, 7201, 7202, 7203, 7205, 7208, 7211, 7212, 7213, 7214,
7215, 7217, 7218, 7219, 7220, 7222, 7224, 7226, 7228, 7229, 7231, 7235,
7236, 7238, 7239, 7240, 7241, 7242, 7244, 7245, 7246, 7247, 7250, 7254,
7255, 7256, 7259, 7261, 7264, 7265, 7266, 7267, 7268, 7269, 7270, 7271,
7272, 7273, 7276, 7277, 7278, 7279, 7280, 7282, 7283, 7284, 7287, 7291
]
# 下のリストが本番用
# securities_code_list = [
# 1301, 1305, 1306, 1308, 1309, 1311, 1312, 1313, 1319, 1320, 1321, 1322,
# 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1332, 1333, 1343, 1344,
# 1345, 1346, 1348, 1349, 1352, 1356, 1357, 1358, 1360, 1364, 1365, 1366,
# 1367, 1368, 1369, 1375, 1376, 1377, 1379, 1380, 1381, 1382, 1383, 1384,
# 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1397, 1398,
# 1399, 1400, 1401, 1407, 1413, 1414, 1417, 1418, 1419, 1420, 1429, 1430,
# 1431, 1432, 1433, 1434, 1435, 1436, 1439, 1440, 1443, 1445, 1446, 1447,
# 1448, 1450, 1451, 1452, 1456, 1457, 1458, 1459, 1460, 1464, 1465, 1466,
# 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478,
# 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490,
# 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1514, 1515, 1518,
# 1540, 1541, 1542, 1543, 1545, 1546, 1547, 1550, 1551, 1552, 1554, 1555,
# 1557, 1559, 1560, 1563, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573,
# 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1584, 1585, 1586, 1591, 1592,
# 1593, 1595, 1596, 1597, 1598, 1599, 1605, 1615, 1617, 1618, 1619, 1620,
# 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632,
# 1633, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1662,
# 1663, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680,
# 1681, 1682, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693,
# 1694, 1695, 1696, 1697, 1698, 1699, 1711, 1712, 1716, 1717, 1718, 1719,
# 1720, 1721, 1723, 1724, 1726, 1728, 1730, 1736, 1737, 1739, 1743, 1757,
# 1758, 1762, 1764, 1766, 1768, 1770, 1773, 1775, 1776, 1780, 1782, 1783,
# 1787, 1788, 1789, 1793, 1795, 1798, 1799, 1801, 1802, 1803, 1805, 1807,
# 1808, 1810, 1811, 1812, 1813, 1814, 1815, 1820, 1821, 1822, 1824, 1826,
# 1827, 1828, 1833, 1835, 1840, 1841, 1844, 1847, 1848, 1850, 1852, 1853,
# 1860, 1861, 1866, 1867, 1870, 1871, 1873, 1878, 1879, 1881, 1882, 1883,
# 1884, 1885, 1887, 1888, 1890, 1893, 1897, 1898, 1899, 1904, 1905, 1909,
# 1911, 1914, 1921, 1925, 1926, 1928, 1929, 1930, 1934, 1938, 1939, 1941,
# 1942, 1944, 1945, 1946, 1948, 1949, 1950, 1951, 1952, 1954, 1959, 1960,
# 1961, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1971, 1972, 1973, 1975,
# 1976, 1979, 1980, 1981, 1982, 1992, 1994, 1997, 2001, 2002, 2003, 2004,
# 2009, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041,
# 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2050, 2053, 2055, 2060, 2065,
# 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2107, 2108, 2109, 2112, 2114,
# 2117, 2120, 2121, 2122, 2124, 2127, 2130, 2134, 2136, 2138, 2139, 2146,
# 2148, 2150, 2151, 2152, 2153, 2154, 2156, 2157, 2158, 2159, 2160, 2162,
# 2163, 2164, 2168, 2169, 2170, 2173, 2174, 2175, 2176, 2178, 2179, 2180,
# 2181, 2183, 2185, 2186, 2191, 2193, 2195, 2196, 2198, 2201, 2204, 2206,
# 2207, 2208, 2209, 2211, 2212, 2215, 2216, 2217, 2220, 2221, 2222, 2224,
# 2226, 2229, 2230, 2264, 2266, 2267, 2268, 2269, 2270, 2281, 2282, 2286,
# 2288, 2291, 2292, 2293, 2294, 2296, 2300, 2301, 2303, 2304, 2305, 2307,
# 2309, 2311, 2315, 2317, 2321, 2323, 2325, 2326, 2327, 2329, 2330, 2331,
# 2332, 2334, 2335, 2336, 2337, 2338, 2340, 2341, 2342, 2344, 2345, 2349,
# 2351, 2352, 2353, 2354, 2359, 2362, 2370, 2371, 2372, 2373, 2374, 2375,
# 2376, 2378, 2379, 2384, 2385, 2388, 2389, 2391, 2393, 2395, 2397, 2398,
# 2402, 2404, 2406, 2408, 2410, 2411, 2412, 2413, 2415, 2418, 2424, 2425,
# 2427, 2428, 2429, 2432, 2433, 2435, 2436, 2437, 2438, 2440, 2445, 2449,
# 2452, 2453, 2454, 2459, 2461, 2462, 2464, 2468, 2469, 2471, 2475, 2477,
# 2479, 2480, 2481, 2483, 2484, 2485, 2487, 2488, 2489, 2491, 2492, 2493,
# 2497, 2498, 2499, 2501, 2502, 2503, 2510, 2511, 2512, 2513, 2514, 2515,
# 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527,
# 2528, 2529, 2530, 2531, 2533, 2540, 2552, 2553, 2554, 2555, 2556, 2557,
# 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569,
# 2573, 2579, 2586, 2587, 2588, 2590, 2593, 2594, 2597, 2599, 2602, 2607,
# 2612, 2613, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2630, 2631,
# 2632, 2651, 2652, 2653, 2654, 2656, 2659, 2664, 2666, 2667, 2668, 2669,
# 2670, 2673, 2674, 2675, 2676, 2678, 2681, 2683, 2685, 2686, 2687, 2689,
# 2692, 2693, 2694, 2695, 2698, 2700, 2702, 2705, 2706, 2708, 2715, 2721,
# 2722, 2726, 2729, 2730, 2733, 2734, 2735, 2736, 2737, 2742, 2743, 2747,
# 2749, 2750, 2751, 2752, 2753, 2754, 2760, 2761, 2762, 2763, 2764, 2767,
# 2768, 2769, 2773, 2776, 2777, 2778, 2780, 2782, 2784, 2788, 2789, 2790,
# 2791, 2792, 2795, 2796, 2798, 2801, 2802, 2804, 2805, 2806, 2809, 2810,
# 2811, 2812, 2813, 2814, 2815, 2816, 2818, 2819, 2820, 2830, 2831, 2871,
# 2872, 2874, 2875, 2876, 2877, 2882, 2883, 2884, 2892, 2894, 2897, 2899,
# 2901, 2903, 2904, 2905, 2907, 2908, 2910, 2911, 2914, 2915, 2916, 2917,
# 2918, 2922, 2923, 2924, 2925, 2926, 2927, 2929, 2930, 2931, 2932, 2970,
# 2971, 2972, 2975, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2985, 2986,
# 2987, 3001, 3002, 3003, 3004, 3010, 3011, 3020, 3021, 3023, 3024, 3028,
# 3030, 3031, 3034, 3035, 3036, 3038, 3039, 3040, 3041, 3042, 3045, 3046,
# 3048, 3050, 3053, 3054, 3058, 3059, 3063, 3064, 3065, 3067, 3068, 3069,
# 3070, 3071, 3073, 3075, 3076, 3077, 3079, 3080, 3082, 3083, 3085, 3086,
# 3087, 3088, 3089, 3091, 3092, 3093, 3094, 3096, 3097, 3098, 3099, 3101,
# 3103, 3104, 3105, 3106, 3107, 3109, 3110, 3111, 3113, 3116, 3121, 3123,
# 3125, 3131, 3132, 3133, 3134, 3135, 3137, 3138, 3139, 3140, 3141, 3143,
# 3148, 3150, 3151, 3153, 3154, 3156, 3157, 3159, 3160, 3161, 3166, 3167,
# 3168, 3169, 3172, 3173, 3174, 3175, 3176, 3177, 3178, 3179, 3180, 3181,
# 3182, 3183, 3184, 3185, 3186, 3187, 3189, 3190, 3191, 3192, 3193, 3195,
# 3196, 3197, 3198, 3199, 3201, 3202, 3204, 3205, 3221, 3222, 3223, 3224,
# 3226, 3228, 3231, 3232, 3234, 3236, 3237, 3238, 3241, 3242, 3244, 3245,
# 3246, 3248, 3249, 3252, 3254, 3261, 3264, 3266, 3267, 3269, 3271, 3275,
# 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3286, 3287, 3288,
# 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300,
# 3302, 3306, 3309, 3315, 3316, 3317, 3319, 3320, 3321, 3322, 3323, 3326,
# 3328, 3329, 3333, 3341, 3344, 3347, 3349, 3350, 3352, 3353, 3355, 3356,
# 3358, 3359, 3360, 3361, 3370, 3371, 3372, 3374, 3375, 3376, 3377, 3382,
# 3386, 3387, 3388, 3390, 3391, 3392, 3393, 3395, 3396, 3397, 3399, 3401,
# 3402, 3405, 3407, 3408, 3409, 3415, 3416, 3417, 3418, 3420, 3421, 3423,
# 3426, 3431, 3433, 3434, 3435, 3436, 3437, 3439, 3440, 3441, 3443, 3444,
# 3445, 3446, 3447, 3448, 3449, 3451, 3452, 3453, 3454, 3455, 3456, 3457,
# 3458, 3459, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470,
# 3471, 3472, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483,
# 3484, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496,
# 3497, 3498, 3501, 3512, 3513, 3515, 3521, 3524, 3526, 3528, 3529, 3536,
# 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3546, 3547, 3548, 3549,
# 3550, 3551, 3553, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3565,
# 3566, 3569, 3571, 3577, 3578, 3580, 3583, 3591, 3593, 3597, 3598, 3600,
# 3604, 3607, 3608, 3611, 3612, 3622, 3623, 3624, 3625, 3626, 3627, 3628,
# 3630, 3632, 3633, 3634, 3635, 3636, 3639, 3640, 3641, 3645, 3646, 3647,
# 3648, 3649, 3652, 3653, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662,
# 3663, 3664, 3665, 3666, 3667, 3668, 3670, 3671, 3672, 3673, 3674, 3675,
# 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3686, 3687, 3688, 3689,
# 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3708, 3710, 3712,
# 3719, 3723, 3726, 3727, 3733, 3738, 3741, 3744, 3747, 3750, 3751, 3753,
# 3758, 3760, 3762, 3763, 3765, 3766, 3768, 3769, 3770, 3771, 3772, 3773,
# 3774, 3776, 3777, 3778, 3779, 3782, 3784, 3787, 3788, 3791, 3793, 3796,
# 3798, 3799, 3800, 3802, 3803, 3804, 3807, 3810, 3814, 3815, 3816, 3817,
# 3823, 3825, 3826, 3834, 3835, 3836, 3837, 3839, 3840, 3841, 3842, 3843,
# 3844, 3845, 3847, 3848, 3850, 3851, 3852, 3853, 3854, 3856, 3857, 3858,
# 3861, 3863, 3864, 3865, 3877, 3878, 3880, 3891, 3892, 3895, 3896, 3900,
# 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912,
# 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924,
# 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936,
# 3937, 3939, 3940, 3941, 3944, 3945, 3946, 3947, 3948, 3950, 3951, 3953,
# 3954, 3955, 3956, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968,
# 3969, 3970, 3974, 3975, 3976, 3978, 3979, 3981, 3983, 3984, 3985, 3986,
# 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998,
# 3999, 4004, 4005, 4008, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018,
# 4019, 4020, 4021, 4022, 4023, 4025, 4026, 4027, 4028, 4031, 4041, 4042,
# 4043, 4044, 4045, 4046, 4047, 4051, 4052, 4053, 4054, 4055, 4056, 4057,
# 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4078, 4080, 4082, 4088, 4091,
# 4092, 4093, 4094, 4095, 4097, 4098, 4099, 4100, 4102, 4107, 4109, 4112,
# 4113, 4114, 4115, 4116, 4118, 4119, 4120, 4124, 4151, 4165, 4166, 4167,
# 4168, 4169, 4170, 4171, 4172, 4173, 4174, 4175, 4182, 4183, 4185, 4186,
# 4187, 4188, 4189, 4202, 4203, 4204, 4205, 4206, 4208, 4212, 4215, 4216,
# 4218, 4220, 4221, 4222, 4224, 4228, 4229, 4231, 4234, 4235, 4237, 4238,
# 4240, 4241, 4242, 4243, 4245, 4246, 4248, 4249, 4250, 4251, 4272, 4274,
# 4275, 4282, 4284, 4286, 4287, 4288, 4290, 4293, 4295, 4298, 4299, 4301,
# 4304, 4307, 4308, 4310, 4312, 4316, 4317, 4318, 4319, 4320, 4321, 4323,
# 4324, 4326, 4327, 4331, 4333, 4334, 4335, 4336, 4337, 4341, 4342, 4343,
# 4344, 4345, 4346, 4347, 4348, 4350, 4351, 4355, 4356, 4361, 4362, 4364,
# 4365, 4366, 4367, 4368, 4369, 4380, 4381, 4382, 4383, 4384, 4385, 4386,
# 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398,
# 4399, 4401, 4403, 4404, 4406, 4409, 4410, 4420, 4421, 4422, 4423, 4424,
# 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4433, 4434, 4435, 4436, 4437,
# 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4448, 4449, 4450,
# 4452, 4461, 4462, 4463, 4464, 4465, 4471, 4475, 4476, 4477, 4478, 4479,
# 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4490, 4491, 4492,
# 4493, 4494, 4495, 4496, 4497, 4499, 4502, 4503, 4506, 4507, 4512, 4514,
# 4516, 4517, 4519, 4521, 4523, 4524, 4526, 4527, 4528, 4530, 4531, 4534,
# 4536, 4538, 4539, 4540, 4541, 4543, 4544, 4547, 4548, 4549, 4550, 4551,
# 4552, 4553, 4554, 4555, 4556, 4558, 4559, 4563, 4564, 4565, 4568, 4569,
# 4570, 4571, 4572, 4574, 4575, 4576, 4577, 4578, 4579, 4581, 4582, 4583,
# 4584, 4586, 4587, 4588, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598,
# 4599, 4611, 4612, 4613, 4615, 4616, 4617, 4619, 4620, 4621, 4623, 4624,
# 4625, 4626, 4627, 4628, 4629, 4631, 4633, 4634, 4635, 4636, 4640, 4641,
# 4642, 4644, 4645, 4650, 4651, 4653, 4657, 4658, 4659, 4661, 4662, 4664,
# 4665, 4666, 4667, 4668, 4669, 4671, 4673, 4674, 4676, 4678, 4679, 4680,
# 4681, 4684, 4685, 4686, 4687, 4689, 4690, 4691, 4694, 4696, 4699, 4704,
# 4705, 4707, 4708, 4709, 4712, 4714, 4716, 4718, 4719, 4720, 4722, 4725,
# 4726, 4728, 4732, 4733, 4734, 4735, 4736, 4739, 4743, 4745, 4746, 4748,
# 4750, 4751, 4752, 4754, 4755, 4760, 4761, 4762, 4763, 4764, 4765, 4766,
# 4767, 4768, 4769, 4770, 4771, 4772, 4776, 4777, 4781, 4783, 4784, 4792,
# 4800, 4801, 4809, 4812, 4813, 4814, 4816, 4819, 4820, 4824, 4825, 4826,
# 4828, 4829, 4832, 4833, 4837, 4838, 4839, 4840, 4845, 4847, 4848, 4849,
# 4875, 4880, 4881, 4883, 4884, 4885, 4901, 4902, 4911, 4912, 4914, 4917,
# 4918, 4919, 4920, 4921, 4922, 4923, 4925, 4926, 4927, 4928, 4929, 4930,
# 4931, 4933, 4934, 4935, 4936, 4951, 4952, 4955, 4956, 4957, 4958, 4960,
# 4962, 4963, 4966, 4967, 4968, 4970, 4971, 4972, 4973, 4974, 4975, 4976,
# 4977, 4978, 4979, 4980, 4985, 4987, 4990, 4992, 4994, 4996, 4997, 4998,
# 4999, 5008, 5009, 5010, 5011, 5013, 5015, 5017, 5018, 5019, 5020, 5021,
# 5070, 5071, 5072, 5073, 5101, 5103, 5104, 5105, 5108, 5110, 5121, 5122,
# 5142, 5161, 5162, 5184, 5185, 5186, 5187, 5189, 5191, 5192, 5194, 5195,
# 5199, 5201, 5202, 5204, 5208, 5210, 5212, 5214, 5216, 5217, 5218, 5232,
# 5233, 5237, 5261, 5262, 5268, 5269, 5271, 5273, 5277, 5279, 5280, 5282,
# 5283, 5284, 5285, 5287, 5288, 5290, 5301, 5302, 5304, 5310, 5331, 5332,
# 5333, 5334, 5337, 5341, 5344, 5351, 5352, 5355, 5357, 5358, 5363, 5367,
# 5368, 5380, 5381, 5384, 5386, 5387, 5388, 5391, 5393, 5395, 5401, 5406,
# 5408, 5410, 5411, 5423, 5440, 5444, 5445, 5446, 5449, 5451, 5458, 5463,
# 5464, 5471, 5476, 5480, 5481, 5482, 5484, 5486, 5491, 5541, 5542, 5563,
# 5602, 5603, 5609, 5610, 5612, 5631, 5632, 5644, 5658, 5659, 5660, 5690,
# 5695, 5697, 5698, 5699, 5702, 5703, 5704, 5706, 5707, 5711, 5713, 5714,
# 5715, 5721, 5724, 5726, 5727, 5729, 5741, 5742, 5753, 5757, 5781, 5801,
# 5802, 5803, 5805, 5807, 5809, 5816, 5817, 5819, 5820, 5821, 5851, 5852,
# 5856, 5857, 5858, 5900, 5901, 5902, 5903, 5905, 5906, 5907, 5909, 5911,
# 5912, 5915, 5918, 5921, 5922, 5923, 5928, 5929, 5930, 5932, 5933, 5935,
# 5936, 5938, 5939, 5940, 5941, 5942, 5943, 5945, 5946, 5947, 5949, 5950,
# 5951, 5952, 5955, 5956, 5957, 5958, 5959, 5962, 5964, 5965, 5966, 5967,
# 5969, 5970, 5971, 5973, 5974, 5975, 5976, 5981, 5982, 5983, 5984, 5985,
# 5986, 5987, 5988, 5989, 5990, 5991, 5992, 5994, 5997, 5998, 5999, 6005,
# 6013, 6016, 6018, 6022, 6023, 6026, 6027, 6028, 6029, 6030, 6031, 6032,
# 6033, 6034, 6035, 6036, 6037, 6038, 6039, 6040, 6042, 6044, 6045, 6046,
# 6047, 6048, 6049, 6050, 6054, 6055, 6058, 6059, 6060, 6061, 6062, 6063,
# 6064, 6066, 6067, 6069, 6070, 6071, 6072, 6073, 6074, 6077, 6078, 6080,
# 6081, 6082, 6083, 6085, 6086, 6087, 6088, 6089, 6090, 6091, 6092, 6093,
# 6094, 6095, 6096, 6098, 6099, 6101, 6103, 6104, 6113, 6118, 6121, 6125,
# 6131, 6134, 6135, 6136, 6137, 6138, 6140, 6141, 6143, 6144, 6145, 6146,
# 6147, 6149, 6150, 6151, 6155, 6156, 6157, 6158, 6159, 6161, 6164, 6165,
# 6166, 6167, 6171, 6172, 6173, 6174, 6175, 6176, 6177, 6178, 6180, 6181,
# 6182, 6183, 6184, 6185, 6186, 6187, 6188, 6189, 6190, 6191, 6192, 6193,
# 6194, 6195, 6196, 6197, 6198, 6199, 6200, 6201, 6203, 6205, 6208, 6210,
# 6217, 6218, 6222, 6229, 6230, 6231, 6232, 6233, 6235, 6236, 6237, 6238,
# 6239, 6240, 6245, 6246, 6247, 6248, 6249, 6250, 6254, 6255, 6257, 6258,
# 6262, 6264, 6265, 6266, 6267, 6268, 6269, 6271, 6272, 6273, 6276, 6277,
# 6278, 6279, 6281, 6282, 6284, 6286, 6287, 6289, 6291, 6292, 6293, 6294,
# 6297, 6298, 6299, 6301, 6302, 6303, 6305, 6306, 6307, 6309, 6310, 6312,
# 6315, 6316, 6317, 6319, 6322, 6323, 6324, 6325, 6326, 6327, 6328, 6330,
# 6331, 6332, 6333, 6334, 6335, 6336, 6337, 6338, 6339, 6340, 6342, 6343,
# 6345, 6346, 6347, 6349, 6351, 6355, 6356, 6357, 6358, 6360, 6361, 6362,
# 6363, 6364, 6365, 6366, 6367, 6368, 6369, 6370, 6371, 6373, 6376, 6378,
# 6379, 6380, 6381, 6382, 6383, 6384, 6387, 6390, 6391, 6392, 6393, 6395,
# 6396, 6400, 6402, 6403, 6405, 6406, 6407, 6408, 6409, 6411, 6412, 6413,
# 6414, 6416, 6417, 6418, 6419, 6420, 6424, 6425, 6428, 6430, 6432, 6433,
# 6436, 6440, 6444, 6445, 6448, 6454, 6455, 6457, 6458, 6459, 6460, 6461,
# 6462, 6463, 6464, 6465, 6466, 6467, 6469, 6470, 6471, 6472, 6473, 6474,
# 6479, 6480, 6481, 6482, 6484, 6485, 6486, 6488, 6489, 6490, 6492, 6493,
# 6494, 6495, 6496, 6497, 6498, 6501, 6502, 6503, 6504, 6505, 6506, 6507,
# 6508, 6513, 6516, 6517, 6518, 6531, 6532, 6533, 6535, 6537, 6538, 6539,
# 6540, 6541, 6542, 6543, 6544, 6545, 6546, 6547, 6548, 6549, 6550, 6551,
# 6552, 6553, 6554, 6555, 6556, 6557, 6558, 6560, 6561, 6562, 6563, 6564,
# 6565, 6566, 6567, 6568, 6569, 6570, 6571, 6572, 6573, 6574, 6575, 6576,
# 6577, 6578, 6579, 6580, 6584, 6586, 6588, 6590, 6592, 6594, 6596, 6597,
# 6599, 6612, 6613, 6615, 6616, 6617, 6618, 6619, 6620, 6622, 6625, 6626,
# 6627, 6628, 6629, 6630, 6632, 6633, 6634, 6635, 6637, 6638, 6639, 6640,
# 6641, 6643, 6644, 6645, 6647, 6648, 6651, 6652, 6653, 6654, 6656, 6658,
# 6659, 6662, 6663, 6664, 6666, 6668, 6670, 6674, 6675, 6676, 6677, 6678,
# 6694, 6695, 6696, 6697, 6698, 6699, 6701, 6702, 6703, 6704, 6706, 6707,
# 6709, 6715, 6718, 6721, 6723, 6724, 6727, 6728, 6730, 6731, 6734, 6736,
# 6737, 6740, 6741, 6742, 6743, 6744, 6745, 6748, 6750, 6752, 6753, 6754,
# 6755, 6757, 6758, 6762, 6763, 6768, 6769, 6770, 6771, 6772, 6775, 6776,
# 6777, 6778, 6779, 6785, 6786, 6787, 6788, 6789, 6794, 6798, 6800, 6803,
# 6804, 6806, 6807, 6809, 6810, 6814, 6815, 6817, 6819, 6820, 6822, 6823,
# 6824, 6826, 6832, 6834, 6835, 6836, 6837, 6838, 6839, 6840, 6841, 6844,
# 6845, 6848, 6849, 6850, 6852, 6853, 6855, 6856, 6857, 6858, 6859, 6861,
# 6862, 6863, 6864, 6866, 6867, 6869, 6870, 6871, 6874, 6875, 6877, 6879,
# 6881, 6882, 6888, 6890, 6894, 6897, 6898, 6899, 6901, 6902, 6904, 6905,
# 6907, 6908, 6912, 6914, 6915, 6916, 6918, 6919, 6920, 6923, 6924, 6925,
# 6926, 6927, 6928, 6929, 6930, 6932, 6937, 6938, 6941, 6942, 6943, 6944,
# 6946, 6947, 6951, 6952, 6954, 6955, 6957, 6958, 6960, 6961, 6962, 6963,
# 6964, 6965, 6966, 6967, 6969, 6971, 6973, 6976, 6977, 6981, 6982, 6986,
# 6988, 6989, 6993, 6994, 6995, 6996, 6997, 6998, 6999, 7003, 7004, 7011,
# 7012, 7013, 7014, 7018, 7021, 7022, 7030, 7031, 7033, 7034, 7035, 7036,
# 7037, 7038, 7039, 7040, 7041, 7042, 7043, 7044, 7045, 7046, 7047, 7048,
# 7049, 7050, 7056, 7057, 7058, 7059, 7060, 7061, 7062, 7063, 7064, 7065,
# 7066, 7067, 7068, 7069, 7070, 7071, 7072, 7073, 7074, 7075, 7077, 7078,
# 7079, 7080, 7081, 7082, 7083, 7084, 7085, 7086, 7087, 7088, 7089, 7090,
# 7091, 7092, 7093, 7094, 7095, 7097, 7098, 7102, 7105, 7122, 7148, 7150,
# 7157, 7161, 7162, 7164, 7167, 7169, 7170, 7172, 7173, 7175, 7176, 7177,
# 7180, 7181, 7182, 7183, 7184, 7185, 7186, 7187, 7189, 7190, 7191, 7192,
# 7196, 7198, 7199, 7201, 7202, 7203, 7205, 7208, 7211, 7212, 7213, 7214,
# 7215, 7217, 7218, 7219, 7220, 7222, 7224, 7226, 7228, 7229, 7231, 7235,
# 7236, 7238, 7239, 7240, 7241, 7242, 7244, 7245, 7246, 7247, 7250, 7254,
# 7255, 7256, 7259, 7261, 7264, 7265, 7266, 7267, 7268, 7269, 7270, 7271,
# 7272, 7273, 7276, 7277, 7278, 7279, 7280, 7282, 7283, 7284, 7287, 7291,
# 7292, 7294, 7296, 7297, 7298, 7299, 7305, 7309, 7313, 7314, 7315, 7317,
# 7320, 7321, 7322, 7325, 7326, 7327, 7337, 7338, 7339, 7342, 7351, 7352,
# 7353, 7354, 7355, 7356, 7357, 7358, 7359, 7360, 7399, 7408, 7412, 7413,
# 7414, 7416, 7417, 7419, 7420, 7421, 7422, 7425, 7426, 7427, 7433, 7434,
# 7435, 7438, 7442, 7443, 7444, 7445, 7446, 7447, 7448, 7450, 7451, 7453,
# 7455, 7456, 7458, 7459, 7460, 7461, 7462, 7463, 7464, 7466, 7467, 7472,
# 7475, 7476, 7477, 7480, 7481, 7482, 7483, 7486, 7487, 7490, 7494, 7500,
# 7501, 7502, 7504, 7505, 7506, 7508, 7509, 7510, 7512, 7513, 7514, 7515,
# 7516, 7518, 7519, 7520, 7521, 7522, 7523, 7524, 7525, 7527, 7531, 7532,
# 7537, 7538, 7539, 7544, 7545, 7550, 7551, 7552, 7554, 7555, 7559, 7561,
# 7562, 7564, 7565, 7567, 7570, 7571, 7575, 7577, 7578, 7581, 7585, 7587,
# 7590, 7593, 7594, 7595, 7596, 7597, 7599, 7600, 7601, 7602, 7603, 7604,
# 7605, 7606, 7607, 7608, 7609, 7610, 7611, 7613, 7614, 7615, 7616, 7618,
# 7619, 7621, 7623, 7624, 7625, 7628, 7630, 7634, 7635, 7636, 7637, 7638,
# 7640, 7643, 7646, 7647, 7649, 7670, 7671, 7672, 7673, 7674, 7676, 7677,
# 7678, 7679, 7680, 7681, 7682, 7683, 7685, 7686, 7687, 7688, 7689, 7690,
# 7691, 7692, 7693, 7694, 7695, 7701, 7702, 7705, 7707, 7709, 7711, 7713,
# 7715, 7716, 7717, 7718, 7719, 7721, 7722, 7723, 7725, 7726, 7727, 7729,
# 7730, 7731, 7732, 7733, 7734, 7735, 7739, 7740, 7741, 7743, 7744, 7745,
# 7746, 7747, 7748, 7749, 7751, 7752, 7758, 7760, 7762, 7768, 7769, 7771,
# 7774, 7775, 7776, 7777, 7779, 7780, 7781, 7782, 7790, 7800, 7803, 7804,
# 7805, 7806, 7807, 7808, 7809, 7810, 7811, 7812, 7813, 7814, 7815, 7816,
# 7817, 7818, 7819, 7820, 7821, 7822, 7823, 7826, 7827, 7829, 7831, 7832,
# 7833, 7836, 7837, 7838, 7839, 7840, 7841, 7844, 7846, 7847, 7849, 7850,
# 7851, 7855, 7856, 7857, 7859, 7860, 7862, 7863, 7864, 7865, 7867, 7868,
# 7869, 7871, 7872, 7874, 7875, 7877, 7878, 7879, 7883, 7885, 7886, 7887,
# 7888, 7893, 7895, 7896, 7897, 7898, 7899, 7901, 7902, 7905, 7906, 7908,
# 7911, 7912, 7914, 7915, 7916, 7917, 7918, 7919, 7921, 7922, 7923, 7925,
# 7927, 7928, 7931, 7932, 7936, 7937, 7938, 7939, 7940, 7942, 7943, 7944,
# 7945, 7946, 7947, 7949, 7951, 7952, 7953, 7955, 7956, 7957, 7958, 7959,
# 7961, 7962, 7963, 7965, 7966, 7970, 7971, 7972, 7974, 7975, 7976, 7979,
# 7980, 7981, 7983, 7984, 7985, 7986, 7987, 7988, 7989, 7990, 7991, 7992,
# 7994, 7995, 7997, 7999, 8001, 8002, 8005, 8006, 8007, 8008, 8011, 8012,
# 8013, 8014, 8015, 8016, 8018, 8020, 8022, 8023, 8025, 8029, 8030, 8031,
# 8032, 8035, 8037, 8038, 8039, 8040, 8041, 8043, 8045, 8046, 8050, 8051,
# 8052, 8053, 8056, 8057, 8058, 8059, 8060, 8061, 8065, 8066, 8068, 8070,
# 8072, 8074, 8075, 8077, 8078, 8079, 8081, 8084, 8085, 8086, 8087, 8088,
# 8089, 8090, 8091, 8093, 8095, 8096, 8097, 8098, 8101, 8103, 8104, 8105,
# 8107, 8108, 8111, 8113, 8114, 8115, 8117, 8118, 8119, 8123, 8125, 8127,
# 8129, 8130, 8131, 8132, 8133, 8135, 8136, 8137, 8138, 8139, 8140, 8141,
# 8142, 8143, 8144, 8147, 8150, 8151, 8152, 8153, 8154, 8155, 8157, 8158,
# 8159, 8160, 8163, 8165, 8166, 8167, 8168, 8173, 8174, 8179, 8181, 8182,
# 8184, 8185, 8186, 8194, 8198, 8200, 8202, 8203, 8207, 8208, 8209, 8214,
# 8215, 8217, 8218, 8219, 8225, 8226, 8227, 8230, 8233, 8237, 8242, 8244,
# 8247, 8249, 8252, 8253, 8254, 8255, 8256, 8257, 8260, 8267, 8273, 8275,
# 8276, 8278, 8279, 8281, 8282, 8283, 8285, 8287, 8289, 8291, 8298, 8301,
# 8303, 8304, 8306, 8308, 8309, 8316, 8331, 8334, 8336, 8337, 8338, 8341,
# 8342, 8343, 8344, 8345, 8346, 8349, 8350, 8354, 8355, 8356, 8358, 8359,
# 8360, 8361, 8362, 8363, 8364, 8365, 8366, 8367, 8368, 8369, 8370, 8377,
# 8381, 8382, 8383, 8385, 8386, 8387, 8388, 8392, 8393, 8395, 8397, 8399,
# 8410, 8411, 8416, 8418, 8421, 8424, 8425, 8439, 8462, 8473, 8508, 8511,
# 8515, 8518, 8521, 8522, 8524, 8527, 8530, 8537, 8541, 8542, 8544, 8550,
# 8551, 8558, 8562, 8563, 8566, 8570, 8572, 8584, 8585, 8586, 8591, 8593,
# 8595, 8596, 8600, 8601, 8604, 8609, 8613, 8614, 8616, 8617, 8622, 8624,
# 8628, 8630, 8697, 8698, 8699, 8700, 8704, 8705, 8706, 8707, 8708, 8713,
# 8714, 8715, 8725, 8732, 8737, 8739, 8740, 8742, 8746, 8747, 8750, 8766,
# 8769, 8771, 8772, 8783, 8789, 8793, 8795, 8798, 8801, 8802, 8803, 8804,
# 8806, 8818, 8830, 8835, 8836, 8841, 8842, 8844, 8848, 8850, 8854, 8860,
# 8864, 8869, 8871, 8876, 8877, 8881, 8886, 8887, 8889, 8890, 8891, 8892,
# 8893, 8894, 8897, 8898, 8903, 8904, 8905, 8908, 8909, 8912, 8914, 8917,
# 8918, 8919, 8920, 8921, 8922, 8923, 8925, 8927, 8928, 8929, 8931, 8934,
# 8935, 8938, 8940, 8944, 8945, 8946, 8951, 8952, 8953, 8954, 8955, 8956,
# 8957, 8958, 8960, 8961, 8963, 8964, 8966, 8967, 8968, 8972, 8975, 8976,
# 8977, 8979, 8984, 8985, 8986, 8987, 8995, 8999, 9001, 9003, 9005, 9006,
# 9007, 9008, 9009, 9010, 9012, 9014, 9017, 9020, 9021, 9022, 9024, 9025,
# 9028, 9029, 9031, 9033, 9034, 9036, 9037, 9039, 9041, 9042, 9044, 9045,
# 9046, 9048, 9049, 9051, 9052, 9055, 9057, 9058, 9059, 9060, 9062, 9063,
# 9064, 9065, 9066, 9067, 9068, 9069, 9070, 9072, 9073, 9074, 9075, 9076,
# 9078, 9081, 9082, 9083, 9086, 9087, 9090, 9099, 9101, 9104, 9107, 9110,
# 9115, 9119, 9127, 9130, 9142, 9143, 9145, 9171, 9173, 9176, 9179, 9193,
# 9201, 9202, 9206, 9232, 9233, 9260, 9261, 9262, 9263, 9264, 9265, 9266,
# 9267, 9268, 9270, 9271, 9272, 9273, 9274, 9275, 9276, 9278, 9279, 9281,
# 9282, 9283, 9284, 9285, 9286, 9287, 9301, 9302, 9303, 9304, 9305, 9306,
# 9307, 9308, 9310, 9311, 9312, 9313, 9318, 9319, 9322, 9324, 9325, 9326,
# 9351, 9353, 9355, 9358, 9360, 9361, 9362, 9363, 9364, 9365, 9366, 9367,
# 9368, 9369, 9375, 9376, 9377, 9380, 9381, 9384, 9385, 9386, 9388, 9399,
# 9401, 9404, 9405, 9408, 9409, 9412, 9413, 9414, 9416, 9417, 9418, 9419,
# 9421, 9422, 9423, 9424, 9425, 9428, 9432, 9433, 9434, 9435, 9436, 9438,
# 9439, 9441, 9444, 9445, 9446, 9449, 9450, 9465, 9466, 9467, 9468, 9470,
# 9474, 9475, 9476, 9478, 9479, 9501, 9502, 9503, 9504, 9505, 9506, 9507,
# 9508, 9509, 9511, 9513, 9514, 9517, 9519, 9531, 9532, 9533, 9534, 9535,
# 9536, 9537, 9539, 9543, 9551, 9600, 9601, 9602, 9603, 9605, 9610, 9612,
# 9613, 9616, 9619, 9621, 9622, 9624, 9625, 9627, 9628, 9629, 9631, 9632,
# 9633, 9635, 9636, 9637, 9639, 9640, 9641, 9644, 9647, 9651, 9656, 9658,
# 9661, 9663, 9671, 9672, 9675, 9678, 9679, 9681, 9682, 9684, 9685, 9686,
# 9687, 9691, 9692, 9695, 9696, 9697, 9698, 9699, 9701, 9702, 9704, 9706,
# 9707, 9708, 9709, 9713, 9715, 9716, 9717, 9719, 9720, 9722, 9723, 9726,
# 9728, 9729, 9731, 9733, 9734, 9735, 9739, 9740, 9742, 9743, 9744, 9746,
# 9749, 9753, 9755, 9757, 9758, 9759, 9760, 9761, 9763, 9765, 9766, 9767,
# 9768, 9769, 9776, 9778, 9780, 9782, 9783, 9787, 9788, 9790, 9791, 9793,
# 9795, 9797, 9799, 9810, 9812, 9816, 9818, 9820, 9823, 9824, 9827, 9828,
# 9829, 9830, 9831, 9832, 9835, 9837, 9842, 9843, 9845, 9846, 9849, 9850,
# 9852, 9853, 9854, 9856, 9857, 9861, 9867, 9869, 9872, 9873, 9876, 9878,
# 9880, 9882, 9885, 9887, 9888, 9889, 9890, 9895, 9896, 9900, 9902, 9903,
# 9904, 9906, 9908, 9909, 9913, 9914, 9919, 9927, 9928, 9929, 9930, 9932,
# 9934, 9936, 9941, 9945, 9946, 9948, 9950, 9955, 9956, 9959, 9960, 9962,
# 9964, 9966, 9967, 9969, 9972, 9973, 9974, 9976, 9977, 9978, 9979, 9980,
# 9982, 9983, 9984, 9986, 9987, 9989, 9990, 9991, 9993, 9994, 9995, 9996,
# 9997]
| 76.25 | 78 | 0.615841 |
acef6c2dc04f5b85702c8d34b63096221c4d3305 | 10,484 | py | Python | uuv_control/uuv_auv_control_allocator/src/uuv_auv_actuator_interface/actuator_manager.py | MoMagDii/VAUV-simulator | 56f55f9349e38e0a327a40feb5a437fcad511b00 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | uuv_control/uuv_auv_control_allocator/src/uuv_auv_actuator_interface/actuator_manager.py | MoMagDii/VAUV-simulator | 56f55f9349e38e0a327a40feb5a437fcad511b00 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | uuv_control/uuv_auv_control_allocator/src/uuv_auv_actuator_interface/actuator_manager.py | MoMagDii/VAUV-simulator | 56f55f9349e38e0a327a40feb5a437fcad511b00 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2021-04-10T18:17:43.000Z | 2021-04-10T21:07:56.000Z | # Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import yaml
from geometry_msgs.msg import Wrench, WrenchStamped
import rclpy
from rclpy.node import Node
import tf2_py as tf2
import tf2_ros
#from tf2_py import LookupException
from tf_quaternion.transformations import quaternion_matrix
from uuv_thrusters.models import Thruster
from uuv_auv_control_allocator.msg import AUVCommand
from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
from .fin_model import FinModel
from plankton_utils.params_helper import parse_nested_params_to_dict
#TODO Refactor
class ActuatorManager(Node):
MAX_FINS = 4
def __init__(self, node_name, **kwargs):
super().__init__(node_name,
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
**kwargs)
# Acquiring the namespace of the vehicle
self.namespace = self.get_namespace().replace('/', '')
self.get_logger().info('Initialize control allocator for vehicle <%s>' % self.namespace)
self.tf_buffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tf_buffer, self)
tf_trans_ned_to_enu = None
try:
if self.namespace != '':
target = '{}/base_link'.format(self.namespace)
source = '{}/base_link_ned'.format(self.namespace)
else:
target = 'base_link'
source = 'base_link_ned'
self.get_logger().info('Lookup transfrom from %s to %s' % (source, target))
tf_trans_ned_to_enu = self.tf_buffer.lookup_transform().lookup_transform(
target, source, rclpy.time.Time(), rclpy.time.Duration(seconds=1))
except Exception as e:
self.get_logger().warning('No transform found between base_link and base_link_ned'
' for vehicle {}, message={}'.format(self.namespace, e))
self.base_link_ned_to_enu = None
if tf_trans_ned_to_enu is not None:
self.base_link_ned_to_enu = quaternion_matrix(
(tf_trans_ned_to_enu.transform.rotation.x,
tf_trans_ned_to_enu.transform.rotation.y,
tf_trans_ned_to_enu.transform.rotation.z,
tf_trans_ned_to_enu.transform.rotation.w))[0:3, 0:3]
self.get_logger().warning('base_link transform NED to ENU=\n{}'.format(
self.base_link_ned_to_enu))
self.base_link = self.get_parameter('base_link', 'base_link').get_parameter_value().string_value
# Retrieve the thruster configuration parameters if available
thruster_config = self.get_parameters_by_prefix('thruster_config')
if len(thruster_config) == 0:
raise RuntimeError('Thruster configuration not available')
self.thruster_config = parse_nested_params_to_dict(self.thruster_config, '.', True)
# Check if all necessary thruster model parameter are available
thruster_params = ['conversion_fcn_params', 'conversion_fcn',
'topic_prefix', 'topic_suffix', 'frame_base', 'max_thrust']
for p in thruster_params:
if p not in self.thruster_config:
raise RuntimeError(
'Parameter <%s> for thruster conversion function is missing' % p)
# Setting up the thruster topic name
self.thruster_topic = build_topic_name(self.namespace,
self.thruster_config['topic_prefix'], 0,
self.thruster_config['topic_suffix'])
self.thruster = None
# Retrieve the fin configuration if available
fin_config = self.get_parameters_by_prefix('fin_config')
if len(fin_config) == 0:
raise RuntimeError('Fin configuration is not available')
self.fin_config = parse_nested_params_to_dict(self.fin_config, '.', True)
# Check if all necessary fin parameters are available
fin_params = ['fluid_density', 'lift_coefficient', 'fin_area',
'topic_prefix', 'topic_suffix', 'frame_base']
for p in fin_params:
if p not in self.fin_config:
raise RuntimeError(
'Parameter <%s> for fin configuration is missing' % p)
self.fin_lower_limit = -np.pi / 2
if 'lower_limit' in self.fin_config:
self.fin_lower_limit = self.fin_config['lower_limit']
self.fin_upper_limit = np.pi / 2
if 'upper_limit' in self.fin_config:
self.fin_upper_limit = self.fin_config['upper_limit']
if self.fin_config['lower_limit'] >= self.fin_config['upper_limit']:
raise RuntimeError('Fin angle limits are invalid')
self.fins = dict()
self.n_fins = 0
if not self.find_actuators():
raise RuntimeError('No thruster and/or fins found')
# =========================================================================
def find_actuators(self):
"""Calculate the control allocation matrix, if one is not given."""
self.ready = False
self.get_logger().infos('ControlAllocator: updating thruster poses')
base = '%s/%s' % (self.namespace, self.base_link)
frame = '%s/%s%d' % (self.namespace, self.thruster_config['frame_base'], 0)
self.get_logger().info('Lookup: Thruster transform found %s -> %s' % (base, frame))
trans = self.tf_buffer.lookup_transform(base, frame, rclpy.time.Time(), rclpy.time.Duration(seconds=1))
pos = np.array([trans.transform.translation.x,
trans.transform.translation.y,
trans.transform.translation.z])
quat = np.array([trans.transform.rotation.x,
trans.transform.rotation.y,
trans.transform.rotation.z,
trans.transform.rotation.w])
self.get_logger().info('Thruster transform found %s -> %s' % (base, frame))
self.get_logger().info('pos=' + str(pos))
self.get_logger().info('rot=' + str(quat))
# Read transformation from thruster
#params = {key: val.value for key, val in params.items()}
self.thruster = Thruster.create_thruster(
self.thruster_config['conversion_fcn'], 0,
self.thruster_topic, pos, quat,
**self.thruster_config['conversion_fcn_params'])
for i in range(self.MAX_FINS):
try:
frame = '%s/%s%d' % (self.namespace, self.fin_config['frame_base'], i)
self.get_logger().info('Lookup: Fin transform found %s -> %s' % (base, frame))
trans = self.tf_buffer.lookup_transform(base, frame, rclpy.time.Time(), rclpy.time.Duration(seconds=1))
pos = np.array([trans.transform.translation.x,
trans.transform.translation.y,
trans.transform.translation.z])
quat = np.array([trans.transform.rotation.x,
trans.transform.rotation.y,
trans.transform.rotation.z,
trans.transform.rotation.w])
self.get_logger().info('Fin transform found %s -> %s' % (base, frame))
self.get_logger().info('pos=' + str(pos))
self.get_logger().info('quat=' + str(quat))
fin_topic = build_topic_name(self.namespace,
self.fin_config['topic_prefix'], i, self.fin_config['topic_suffix'])
self.fins[i] = FinModel(
i,
pos,
quat,
fin_topic,
self)
except (tf2.LookupException, tf2.ConnectivityException, tf2.ExtrapolationException):
self.get_logger().info('Could not get transform from %s to %s ' % (base, frame))
break
self.n_fins = len(self.fins.keys())
self.get_logger().info('# fins found: %d' % len(self.fins.keys()))
for i in range(self.n_fins):
self.get_logger().info(i)
self.get_logger().info(self.fins[i].pos)
self.get_logger().info(self.fins[i].rot)
self.ready = True
return True
# =========================================================================
def compute_control_force(self, thrust, delta, u):
actuator_model = self.thruster.tam_column.reshape((6, 1)) * thrust
for i in self.fins:
f_lift = (0.5 * self.fin_config['fluid_density'] *
self.fin_config['lift_coefficient'] * self.fin_config['fin_area'] *
delta[i] * u**2)
tau = np.zeros(6)
tau[0:3] = f_lift * self.fins[i].lift_vector
tau[3::] = np.cross(self.fins[i].pos, f_lift)
actuator_model += tau
return actuator_model
# =========================================================================
def publish_commands(self, command):
self.thruster.publish_command(command[0])
for i in range(self.n_fins):
self.fins[i].publish_command(command[i + 1])
# =========================================================================
def build_topic_name(self, namespace, topic_prefix, id, topic_prefix):
return '/%s/%s/id_%d/%s' % (namespace, topic_prefix, 0, topic_suffix)
| 43.866109 | 119 | 0.591473 |
acef6c4ff09c5b6f886e9ab36d1a006706cf91f1 | 21,648 | py | Python | test/test_ip4_vrf_multi_instance.py | B4dM4n/vpp | 3459ece6da90627b161e2128b5926f1e58e7db65 | [
"Apache-2.0"
] | 751 | 2017-07-13T06:16:46.000Z | 2022-03-30T09:14:35.000Z | test/test_ip4_vrf_multi_instance.py | B4dM4n/vpp | 3459ece6da90627b161e2128b5926f1e58e7db65 | [
"Apache-2.0"
] | 15 | 2018-03-19T15:20:07.000Z | 2022-03-18T19:48:21.000Z | test/test_ip4_vrf_multi_instance.py | B4dM4n/vpp | 3459ece6da90627b161e2128b5926f1e58e7db65 | [
"Apache-2.0"
] | 479 | 2017-07-13T06:17:26.000Z | 2022-03-31T18:20:43.000Z | #!/usr/bin/env python3
"""IP4 VRF Multi-instance Test Case HLD:
**NOTES:**
- higher number of pg-ip4 interfaces causes problems => only 15 pg-ip4 \
interfaces in 5 VRFs are tested
- jumbo packets in configuration with 15 pg-ip4 interfaces leads to \
problems too
**config 1**
- add 15 pg-ip4 interfaces
- configure 5 hosts per pg-ip4 interface
- configure 4 VRFs
- add 3 pg-ip4 interfaces per VRF
**test 1**
- send IP4 packets between all pg-ip4 interfaces in all VRF groups
**verify 1**
- check VRF data by parsing output of ip_fib_dump API command
- all packets received correctly in case of pg-ip4 interfaces in the
same VRF
- no packet received in case of pg-ip4 interfaces not in VRF
- no packet received in case of pg-ip4 interfaces in different VRFs
**config 2**
- reset 2 VRFs
**test 2**
- send IP4 packets between all pg-ip4 interfaces in all VRF groups
**verify 2**
- all packets received correctly in case of pg-ip4 interfaces in the
same VRF
- no packet received in case of pg-ip4 interfaces not in VRF
- no packet received in case of pg-ip4 interfaces in different VRFs
**config 3**
- add 1 of reset VRFs and 1 new VRF
**test 3**
- send IP4 packets between all pg-ip4 interfaces in all VRF groups
**verify 3**
- check VRF data by parsing output of ip_fib_dump API command
- all packets received correctly in case of pg-ip4 interfaces in the
same VRF
- no packet received in case of pg-ip4 interfaces not in VRF
- no packet received in case of pg-ip4 interfaces in different VRFs
**config 4**
- reset all created VRFs
**test 4**
- send IP4 packets between all pg-ip4 interfaces in all VRF groups
**verify 4**
- check VRF data by parsing output of ip_fib_dump API command
- all packets received correctly in case of pg-ip4 interfaces in the
same VRF
- no packet received in case of pg-ip4 interfaces not in VRF
- no packet received in case of pg-ip4 interfaces in different VRFs
"""
import unittest
import random
import socket
import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP, UDP
from framework import VppTestCase, VppTestRunner
from util import ppp
from vrf import VRFState
def is_ipv4_misc(p):
""" Is packet one of uninteresting IPv4 broadcasts? """
if p.haslayer(ARP):
return True
return False
class TestIp4VrfMultiInst(VppTestCase):
""" IP4 VRF Multi-instance Test Case """
@classmethod
def setUpClass(cls):
"""
Perform standard class setup (defined by class method setUpClass in
class VppTestCase) before running the test case, set test case related
variables and configure VPP.
"""
super(TestIp4VrfMultiInst, cls).setUpClass()
# Test variables
cls.hosts_per_pg = 5
cls.nr_of_vrfs = 5
cls.pg_ifs_per_vrf = 3
try:
# Create pg interfaces
cls.create_pg_interfaces(
range(cls.nr_of_vrfs * cls.pg_ifs_per_vrf))
# Packet flows mapping pg0 -> pg1, pg2 etc.
cls.flows = dict()
for i in range(len(cls.pg_interfaces)):
multiplicand = i // cls.pg_ifs_per_vrf
pg_list = [
cls.pg_interfaces[multiplicand * cls.pg_ifs_per_vrf + j]
for j in range(cls.pg_ifs_per_vrf)
if (multiplicand * cls.pg_ifs_per_vrf + j) != i]
cls.flows[cls.pg_interfaces[i]] = pg_list
# Packet sizes - jumbo packet (9018 bytes) skipped
cls.pg_if_packet_sizes = [64, 512, 1518]
# Set up all interfaces
for pg_if in cls.pg_interfaces:
pg_if.admin_up()
pg_if.generate_remote_hosts(cls.hosts_per_pg)
# Create list of VRFs
cls.vrf_list = list()
# Create list of reset VRFs
cls.vrf_reset_list = list()
# Create list of pg_interfaces in VRFs
cls.pg_in_vrf = list()
# Create list of pg_interfaces not in VRFs
cls.pg_not_in_vrf = [pg_if for pg_if in cls.pg_interfaces]
# Create mapping of pg_interfaces to VRF IDs
cls.pg_if_sets = dict()
for i in range(cls.nr_of_vrfs):
set_id = i + 1
pg_list = [
cls.pg_interfaces[i * cls.pg_ifs_per_vrf + j]
for j in range(cls.pg_ifs_per_vrf)]
cls.pg_if_sets[set_id] = pg_list
except Exception:
super(TestIp4VrfMultiInst, cls).tearDownClass()
raise
@classmethod
def tearDownClass(cls):
super(TestIp4VrfMultiInst, cls).tearDownClass()
def setUp(self):
"""
Clear trace and packet infos before running each test.
"""
super(TestIp4VrfMultiInst, self).setUp()
self.reset_packet_infos()
def tearDown(self):
"""
Show various debug prints after each test.
"""
super(TestIp4VrfMultiInst, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.ppcli("show ip fib"))
self.logger.info(self.vapi.ppcli("show ip4 neighbors"))
def _assign_interfaces(self, vrf_id, if_set_id):
for i in range(self.pg_ifs_per_vrf):
pg_if = self.pg_if_sets[if_set_id][i]
pg_if.set_table_ip4(vrf_id)
self.logger.info("pg-interface %s added to IPv4 VRF ID %d"
% (pg_if.name, vrf_id))
if pg_if not in self.pg_in_vrf:
self.pg_in_vrf.append(pg_if)
if pg_if in self.pg_not_in_vrf:
self.pg_not_in_vrf.remove(pg_if)
pg_if.config_ip4()
pg_if.configure_ipv4_neighbors()
def create_vrf_and_assign_interfaces(self, count, start=1):
"""
Create required number of FIB tables / VRFs, put 3 pg-ip4 interfaces
to every FIB table / VRF.
:param int count: Number of FIB tables / VRFs to be created.
:param int start: Starting number of the FIB table / VRF ID. \
(Default value = 1)
"""
for i in range(count):
vrf_id = i + start
self.vapi.ip_table_add_del(is_add=1, table={'table_id': vrf_id})
self.logger.info("IPv4 VRF ID %d created" % vrf_id)
if vrf_id not in self.vrf_list:
self.vrf_list.append(vrf_id)
if vrf_id in self.vrf_reset_list:
self.vrf_reset_list.remove(vrf_id)
self._assign_interfaces(vrf_id, vrf_id)
self.logger.debug(self.vapi.ppcli("show ip fib"))
self.logger.debug(self.vapi.ppcli("show ip4 neighbors"))
def create_vrf_by_id_and_assign_interfaces(self, set_id,
vrf_id=0xffffffff):
"""
Create a FIB table / VRF by vrf_id, put 3 pg-ip4 interfaces
to FIB table / VRF.
:param int vrf_id: Required table ID / VRF ID. \
(Default value = 0xffffffff, ID will be selected automatically)
"""
ret = self.vapi.ip_table_allocate(table={'table_id': vrf_id})
vrf_id = ret.table.table_id
self.logger.info("IPv4 VRF ID %d created" % vrf_id)
if vrf_id not in self.vrf_list:
self.vrf_list.append(vrf_id)
if vrf_id in self.vrf_reset_list:
self.vrf_reset_list.remove(vrf_id)
self._assign_interfaces(vrf_id, set_id)
self.logger.debug(self.vapi.ppcli("show ip fib"))
self.logger.debug(self.vapi.ppcli("show ip4 neighbors"))
return vrf_id
def reset_vrf_and_remove_from_vrf_list(self, vrf_id, if_set_id=None):
"""
Reset required FIB table / VRF and remove it from VRF list.
:param int vrf_id: The FIB table / VRF ID to be reset.
"""
if if_set_id is None:
if_set_id = vrf_id
self.vapi.ip_table_flush(table={'table_id': vrf_id})
if vrf_id in self.vrf_list:
self.vrf_list.remove(vrf_id)
if vrf_id not in self.vrf_reset_list:
self.vrf_reset_list.append(vrf_id)
for j in range(self.pg_ifs_per_vrf):
pg_if = self.pg_if_sets[if_set_id][j]
pg_if.unconfig_ip4()
if pg_if in self.pg_in_vrf:
self.pg_in_vrf.remove(pg_if)
if pg_if not in self.pg_not_in_vrf:
self.pg_not_in_vrf.append(pg_if)
self.logger.info("IPv4 VRF ID %d reset finished" % vrf_id)
self.logger.debug(self.vapi.ppcli("show ip fib"))
self.logger.debug(self.vapi.ppcli("show ip neighbors"))
self.vapi.ip_table_add_del(is_add=0, table={'table_id': vrf_id})
def create_stream(self, src_if, packet_sizes):
"""
Create input packet stream for defined interface using hosts list.
:param object src_if: Interface to create packet stream for.
:param list packet_sizes: List of required packet sizes.
:return: Stream of packets.
"""
pkts = []
src_hosts = src_if.remote_hosts
for dst_if in self.flows[src_if]:
for dst_host in dst_if.remote_hosts:
src_host = random.choice(src_hosts)
pkt_info = self.create_packet_info(src_if, dst_if)
payload = self.info_to_payload(pkt_info)
p = (Ether(dst=src_if.local_mac, src=src_host.mac) /
IP(src=src_host.ip4, dst=dst_host.ip4) /
UDP(sport=1234, dport=1234) /
Raw(payload))
pkt_info.data = p.copy()
size = random.choice(packet_sizes)
self.extend_packet(p, size)
pkts.append(p)
self.logger.debug("Input stream created for port %s. Length: %u pkt(s)"
% (src_if.name, len(pkts)))
return pkts
def create_stream_crosswise_vrf(self, src_if, vrf_id, packet_sizes):
"""
Create input packet stream for negative test for leaking across
different VRFs for defined interface using hosts list.
:param object src_if: Interface to create packet stream for.
:param int vrf_id: The FIB table / VRF ID where src_if is assigned.
:param list packet_sizes: List of required packet sizes.
:return: Stream of packets.
"""
pkts = []
src_hosts = src_if.remote_hosts
vrf_lst = list(self.vrf_list)
vrf_lst.remove(vrf_id)
for vrf in vrf_lst:
for dst_if in self.pg_if_sets[vrf]:
for dst_host in dst_if.remote_hosts:
src_host = random.choice(src_hosts)
pkt_info = self.create_packet_info(src_if, dst_if)
payload = self.info_to_payload(pkt_info)
p = (Ether(dst=src_if.local_mac, src=src_host.mac) /
IP(src=src_host.ip4, dst=dst_host.ip4) /
UDP(sport=1234, dport=1234) /
Raw(payload))
pkt_info.data = p.copy()
size = random.choice(packet_sizes)
self.extend_packet(p, size)
pkts.append(p)
self.logger.debug("Input stream created for port %s. Length: %u pkt(s)"
% (src_if.name, len(pkts)))
return pkts
def verify_capture(self, pg_if, capture):
"""
Verify captured input packet stream for defined interface.
:param object pg_if: Interface to verify captured packet stream for.
:param list capture: Captured packet stream.
"""
last_info = dict()
for i in self.pg_interfaces:
last_info[i.sw_if_index] = None
dst_sw_if_index = pg_if.sw_if_index
for packet in capture:
try:
ip = packet[IP]
udp = packet[UDP]
payload_info = self.payload_to_info(packet[Raw])
packet_index = payload_info.index
self.assertEqual(payload_info.dst, dst_sw_if_index)
self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
(pg_if.name, payload_info.src, packet_index))
next_info = self.get_next_packet_info_for_interface2(
payload_info.src, dst_sw_if_index,
last_info[payload_info.src])
last_info[payload_info.src] = next_info
self.assertIsNotNone(next_info)
self.assertEqual(packet_index, next_info.index)
saved_packet = next_info.data
# Check standard fields
self.assertEqual(ip.src, saved_packet[IP].src)
self.assertEqual(ip.dst, saved_packet[IP].dst)
self.assertEqual(udp.sport, saved_packet[UDP].sport)
self.assertEqual(udp.dport, saved_packet[UDP].dport)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
for i in self.pg_interfaces:
remaining_packet = self.get_next_packet_info_for_interface2(
i, dst_sw_if_index, last_info[i.sw_if_index])
self.assertIsNone(
remaining_packet,
"Port %u: Packet expected from source %u didn't arrive" %
(dst_sw_if_index, i.sw_if_index))
def verify_vrf(self, vrf_id, if_set_id=None):
"""
Check if the FIB table / VRF ID is configured.
:param int vrf_id: The FIB table / VRF ID to be verified.
:return: 1 if the FIB table / VRF ID is configured, otherwise return 0.
"""
if if_set_id is None:
if_set_id = vrf_id
ip_fib_dump = self.vapi.ip_route_dump(vrf_id)
vrf_exist = len(ip_fib_dump)
vrf_count = 0
for ip_fib_details in ip_fib_dump:
addr = ip_fib_details.route.prefix.network_address
found = False
for pg_if in self.pg_if_sets[if_set_id]:
if found:
break
for host in pg_if.remote_hosts:
if str(addr) == host.ip4:
vrf_count += 1
found = True
break
if not vrf_exist and vrf_count == 0:
self.logger.info("IPv4 VRF ID %d is not configured" % vrf_id)
return VRFState.not_configured
elif vrf_exist and vrf_count == 0:
self.logger.info("IPv4 VRF ID %d has been reset" % vrf_id)
return VRFState.reset
else:
self.logger.info("IPv4 VRF ID %d is configured" % vrf_id)
return VRFState.configured
def run_verify_test(self):
"""
Create packet streams for all configured pg interfaces, send all \
prepared packet streams and verify that:
- all packets received correctly on all pg-ip4 interfaces assigned
to VRFs
- no packet received on all pg-ip4 interfaces not assigned to VRFs
:raise RuntimeError: If no packet captured on pg-ip4 interface assigned
to VRF or if any packet is captured on pg-ip4 interface not
assigned to VRF.
"""
# Test
# Create incoming packet streams for packet-generator interfaces
for pg_if in self.pg_interfaces:
pkts = self.create_stream(pg_if, self.pg_if_packet_sizes)
pg_if.add_stream(pkts)
# Enable packet capture and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify
# Verify outgoing packet streams per packet-generator interface
for pg_if in self.pg_interfaces:
if pg_if in self.pg_in_vrf:
capture = pg_if.get_capture(remark="interface is in VRF")
self.verify_capture(pg_if, capture)
elif pg_if in self.pg_not_in_vrf:
pg_if.assert_nothing_captured(remark="interface is not in VRF",
filter_out_fn=is_ipv4_misc)
self.logger.debug("No capture for interface %s" % pg_if.name)
else:
raise Exception("Unknown interface: %s" % pg_if.name)
def run_crosswise_vrf_test(self):
"""
Create packet streams for every pg-ip4 interface in VRF towards all
pg-ip4 interfaces in other VRFs, send all prepared packet streams and
verify that:
- no packet received on all configured pg-ip4 interfaces
:raise RuntimeError: If any packet is captured on any pg-ip4 interface.
"""
# Test
# Create incoming packet streams for packet-generator interfaces
for vrf_id in self.vrf_list:
for pg_if in self.pg_if_sets[vrf_id]:
pkts = self.create_stream_crosswise_vrf(
pg_if, vrf_id, self.pg_if_packet_sizes)
pg_if.add_stream(pkts)
# Enable packet capture and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify
# Verify outgoing packet streams per packet-generator interface
for pg_if in self.pg_interfaces:
pg_if.assert_nothing_captured(remark="interface is in other VRF",
filter_out_fn=is_ipv4_misc)
self.logger.debug("No capture for interface %s" % pg_if.name)
def test_ip4_vrf_01(self):
""" IP4 VRF Multi-instance test 1 - create 4 VRFs
"""
# Config 1
# Create 4 VRFs
self.create_vrf_and_assign_interfaces(4)
# Verify 1
for vrf_id in self.vrf_list:
self.assert_equal(self.verify_vrf(vrf_id),
VRFState.configured, VRFState)
# Test 1
self.run_verify_test()
self.run_crosswise_vrf_test()
def test_ip4_vrf_02(self):
""" IP4 VRF Multi-instance test 2 - reset 2 VRFs
"""
# Config 2
# Reset 2 VRFs
self.reset_vrf_and_remove_from_vrf_list(1)
self.reset_vrf_and_remove_from_vrf_list(2)
# Verify 2
for vrf_id in self.vrf_reset_list:
self.assert_equal(self.verify_vrf(vrf_id),
VRFState.reset, VRFState)
for vrf_id in self.vrf_list:
self.assert_equal(self.verify_vrf(vrf_id),
VRFState.configured, VRFState)
# Test 2
self.run_verify_test()
self.run_crosswise_vrf_test()
def test_ip4_vrf_03(self):
""" IP4 VRF Multi-instance 3 - add 2 VRFs
"""
# Config 3
# Add 1 of reset VRFs and 1 new VRF
self.create_vrf_and_assign_interfaces(1)
self.create_vrf_and_assign_interfaces(1, start=5)
# Verify 3
for vrf_id in self.vrf_reset_list:
self.assert_equal(self.verify_vrf(vrf_id),
VRFState.reset, VRFState)
for vrf_id in self.vrf_list:
self.assert_equal(self.verify_vrf(vrf_id),
VRFState.configured, VRFState)
# Test 3
self.run_verify_test()
self.run_crosswise_vrf_test()
def test_ip4_vrf_04(self):
""" IP4 VRF Multi-instance test 4 - reset 4 VRFs
"""
# Config 4
# Reset all VRFs (i.e. no VRF except VRF=0 configured)
for i in range(len(self.vrf_list)):
self.reset_vrf_and_remove_from_vrf_list(self.vrf_list[0])
# Verify 4
for vrf_id in self.vrf_reset_list:
self.assert_equal(self.verify_vrf(vrf_id),
VRFState.reset, VRFState)
vrf_list_length = len(self.vrf_list)
self.assertEqual(
vrf_list_length, 0,
"List of configured VRFs is not empty: %s != 0" % vrf_list_length)
# Test 4
self.run_verify_test()
self.run_crosswise_vrf_test()
def test_ip4_vrf_05(self):
""" IP4 VRF Multi-instance test 5 - id allocation
"""
# Config 5
# Create several VRFs
# Set vrf_id manually first
self.create_vrf_by_id_and_assign_interfaces(1, 1)
# Set vrf_id automatically a few times
auto_vrf_id = [
self.create_vrf_by_id_and_assign_interfaces(i) for i in range(2, 5)
]
# Verify 5
self.assert_equal(self.verify_vrf(1, 1), VRFState.configured, VRFState)
for i, vrf in enumerate(auto_vrf_id):
self.assert_equal(self.verify_vrf(vrf, i+2),
VRFState.configured, VRFState)
# Test 5
self.run_verify_test()
# Config 5.1
# Reset VRFs
self.reset_vrf_and_remove_from_vrf_list(1)
for i, vrf in enumerate(auto_vrf_id):
self.reset_vrf_and_remove_from_vrf_list(vrf, i+2)
# Verify 5.1
self.assert_equal(self.verify_vrf(1, 1), VRFState.reset, VRFState)
for i, vrf in enumerate(auto_vrf_id):
self.assert_equal(self.verify_vrf(vrf, i+2),
VRFState.reset, VRFState)
vrf_list_length = len(self.vrf_list)
self.assertEqual(
vrf_list_length, 0,
"List of configured VRFs is not empty: %s != 0" % vrf_list_length)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 37.978947 | 79 | 0.597238 |
acef6d1d7c37792488eae3f285e85bd99b71aab8 | 5,413 | py | Python | data/RandomDataModule.py | Jabb0/FastFlow3D | cdc2a547268b85d0c851cf87786d80fcde4e8487 | [
"MIT"
] | 6 | 2021-10-14T03:30:32.000Z | 2022-03-25T07:16:03.000Z | data/RandomDataModule.py | Jabb0/FastFlow3D | cdc2a547268b85d0c851cf87786d80fcde4e8487 | [
"MIT"
] | 2 | 2021-10-08T09:06:24.000Z | 2022-03-26T10:37:22.000Z | data/RandomDataModule.py | Jabb0/FastFlow3D | cdc2a547268b85d0c851cf87786d80fcde4e8487 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Optional, Union, List, Dict
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from .RandomDataset import RandomDataset
from .util import ApplyPillarization, drop_points_function
class RandomDataModule(pl.LightningDataModule):
"""
Data module to prepare and load the waymo dataset.
Using a data module streamlines the data loading and preprocessing process.
"""
def __init__(self, dataset_directory,
# These parameters are specific to the dataset
grid_cell_size, x_min, x_max, y_min, y_max, z_min, z_max,
batch_size: int = 32,
has_test=False,
num_workers=1):
super(RandomDataModule, self).__init__()
self._dataset_directory = Path(dataset_directory)
self._batch_size = batch_size
self._train_ = None
self._val_ = None
self._test_ = None
# This is a transformation class that applies to pillarization
self._pillarization_transform = ApplyPillarization(grid_cell_size=grid_cell_size, x_min=x_min,
y_min=y_min, z_min=z_min, z_max=z_max)
# This returns a function that removes points that should not be included in the pillarization.
# It also removes the labels if given.
self._drop_points_function = drop_points_function(x_min=x_min,
x_max=x_max, y_min=y_min, y_max=y_max,
z_min=z_min, z_max=z_max)
self._has_test = has_test
self._num_workers = num_workers
# Only required for this dataset type
self.z_max = z_max
self.z_min = z_min
self.y_max = y_max
self.y_min = y_min
self.x_max = x_max
self.x_min = x_min
def prepare_data(self) -> None:
"""
Preprocessing of the data only called on 1 GPU.
Download and process the datasets here. E.g., tokenization.
Everything that is not random and only necessary once.
This is used to download the dataset to a local storage for example.
Later the dataset is then loaded by every worker in the setup() method.
:return: None
"""
# No need to download stuff
pass
def setup(self, stage: Optional[str] = None) -> None:
"""
Setup of the datasets. Called on every GPU in distributed training.
Do splits and build model internals here.
:param stage: either 'fit', 'validate', 'test' or 'predict'
:return: None
"""
# The Dataset will apply a transformation to each pointcloud
# This transformation consists of a pillarization
self._train_ = RandomDataset(x_max=self.x_max, x_min=self.x_min, y_max=self.y_max, y_min=self.y_min,
z_max=self.z_max, z_min=self.z_min,
# This part is actually necessary to prepare the data
point_cloud_transform=self._pillarization_transform,
drop_invalid_point_function=self._drop_points_function)
self._val_ = RandomDataset(x_max=self.x_max, x_min=self.x_min, y_max=self.y_max, y_min=self.y_min,
z_max=self.z_max, z_min=self.z_min,
# This part is actually necessary to prepare the data
point_cloud_transform=self._pillarization_transform,
drop_invalid_point_function=self._drop_points_function)
if self._has_test:
self._test_ = RandomDataset(x_max=self.x_max, x_min=self.x_min, y_max=self.y_max, y_min=self.y_min,
z_max=self.z_max, z_min=self.z_min,
# This part is actually necessary to prepare the data
point_cloud_transform=self._pillarization_transform,
drop_invalid_point_function=self._drop_points_function)
def train_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:
"""
Return a data loader for training
:return: the dataloader to use
"""
return DataLoader(self._train_, self._batch_size, num_workers=self._num_workers,
collate_fn=custom_collate)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:
"""
Return a data loader for validation
:return: the dataloader to use
"""
return DataLoader(self._val_, self._batch_size, shuffle=False, num_workers=self._num_workers,
collate_fn=custom_collate)
def test_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:
"""
Return a data loader for testing
:return: the dataloader to use
"""
if not self._has_test:
raise RuntimeError("No test dataset specified. Maybe set has_test=True in DataModule init.")
return DataLoader(self._test_, self._batch_size, shuffle=False, num_workers=self._num_workers,
collate_fn=custom_collate)
| 47.482456 | 111 | 0.605764 |
acef6d32ad7ed50c04d7403872043092976c2bb8 | 689 | py | Python | setup.py | serjtroshin/PLBART | 58e5de3041a2fc8b98e54648c6489fb3c23db9cb | [
"MIT"
] | null | null | null | setup.py | serjtroshin/PLBART | 58e5de3041a2fc8b98e54648c6489fb3c23db9cb | [
"MIT"
] | null | null | null | setup.py | serjtroshin/PLBART | 58e5de3041a2fc8b98e54648c6489fb3c23db9cb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
import subprocess
import sys
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='PLBART',
version='0.1.0',
description='Unified Pre-training for Program Understanding and Generation',
long_description=readme,
license=license,
python_requires='>=3.6',
packages=find_packages("."),
install_requires=[
"tree-sitter==0.19.0",
"sentencepiece==0.1.96",
"sacrebleu==1.2.11",
]
)
| 22.225806 | 80 | 0.66328 |
acef6d6f13a40d7653d2d4741120e62e2b27e44e | 524 | py | Python | Code/creating_resource_params.py | notha99y/Satellite-Scheduling | 6231eccf353f37ba643a7e37aa60525355f5d005 | [
"MIT"
] | 14 | 2018-04-06T22:36:30.000Z | 2022-02-15T02:36:58.000Z | Code/creating_resource_params.py | notha99y/Satellite-Scheduling | 6231eccf353f37ba643a7e37aa60525355f5d005 | [
"MIT"
] | null | null | null | Code/creating_resource_params.py | notha99y/Satellite-Scheduling | 6231eccf353f37ba643a7e37aa60525355f5d005 | [
"MIT"
] | 4 | 2018-04-06T22:36:57.000Z | 2022-02-15T02:37:00.000Z | from random import random
import math
num = 1
file = open('sat_params.csv','w')
file.write('name,attitude,ave_angular_speed,payload,memory,max_memory,lat,longi,roll,pitch,yaw,altitude\n')
for i in range(num):
file.write('T1,1,0.0628,EO,0,8.0,0.0,0.0,0.0,0.0,0.0,550.0\n')
file.close
num2 = 1
file = open('gs_params.csv','w')
file.write('name,lat,longi\n')
for i in range(num2):
file.write('CRISP,1.3,103.8\n')
file.write('GREEN,1.3,283.8\n')
# file.write('ORANGE,-3,90.0\n')
file.close
| 26.2 | 108 | 0.652672 |
acef6f23c391b2337d5c8418fb7edee4ccd3abb6 | 3,795 | py | Python | src/transformers/sagemaker/training_args_sm.py | JadeMaveric/transformers | fb2b89840bf2ab9f74702bf83af8ddf92b61efb3 | [
"Apache-2.0"
] | 2 | 2021-04-18T07:58:07.000Z | 2021-07-14T01:50:45.000Z | src/transformers/sagemaker/training_args_sm.py | JadeMaveric/transformers | fb2b89840bf2ab9f74702bf83af8ddf92b61efb3 | [
"Apache-2.0"
] | 2 | 2021-06-22T23:35:09.000Z | 2022-02-22T21:40:11.000Z | src/transformers/sagemaker/training_args_sm.py | JadeMaveric/transformers | fb2b89840bf2ab9f74702bf83af8ddf92b61efb3 | [
"Apache-2.0"
] | 1 | 2021-12-27T04:49:35.000Z | 2021-12-27T04:49:35.000Z | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
from dataclasses import dataclass, field
import torch
from transformers.file_utils import cached_property, is_sagemaker_distributed_available
from transformers.training_args import TrainingArguments
from transformers.utils import logging
logger = logging.get_logger(__name__)
def is_smdistributed_available():
return importlib.util.find_spec("smdistributed") is not None
if is_smdistributed_available():
import smdistributed.modelparallel.torch as smp
@dataclass
class SageMakerTrainingArguments(TrainingArguments):
mp_parameters: str = field(
default="", metadata={"help": "Used by the SageMaker launcher to send mp-specific args."}
)
def __post_init__(self):
super().__post_init__()
if is_smdistributed_available() and self.mp_parameters != "":
smp.init()
@cached_property
def _setup_devices(self) -> "torch.device":
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
self._n_gpu = 0
elif is_smdistributed_available() and self.mp_parameters != "":
local_rank = smp.local_rank()
device = torch.device("cuda", local_rank)
self._n_gpu = 1
elif is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
dist.init_process_group()
self.local_rank = dist.get_local_rank()
device = torch.device("cuda", self.local_rank)
self._n_gpu = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
self._n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
self._n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device
@property
def world_size(self):
if is_smdistributed_available() and self.mp_parameters != "":
return smp.dp_size()
return super().world_size
@property
def place_model_on_device(self):
return not (is_smdistributed_available() and self.mp_parameters != "")
@property
def _no_sync_in_gradient_accumulation(self):
return False
| 37.574257 | 118 | 0.67668 |
acef71a7ffb810f502fd0c463f34e41c2ef8d189 | 14,018 | py | Python | django/views/generic/date_based.py | yarko/django | 90b6240c8753ece3e52cafc37e1088b0646b843f | [
"BSD-3-Clause"
] | 4 | 2015-08-27T22:03:47.000Z | 2017-09-04T08:13:44.000Z | django/views/generic/date_based.py | mradziej/django | 5d38965743a369981c9a738a298f467f854a2919 | [
"BSD-3-Clause"
] | 1 | 2022-02-11T15:33:31.000Z | 2022-02-11T15:33:31.000Z | django/views/generic/date_based.py | mradziej/django | 5d38965743a369981c9a738a298f467f854a2919 | [
"BSD-3-Clause"
] | 6 | 2017-06-26T07:30:22.000Z | 2019-01-27T10:47:53.000Z | import datetime
import time
from django.template import loader, RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.core.xheaders import populate_xheaders
from django.db.models.fields import DateTimeField
from django.http import Http404, HttpResponse
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
DeprecationWarning
)
def archive_index(request, queryset, date_field, num_latest=15,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
mimetype=None, allow_future=False, template_object_name='latest'):
"""
Generic top-level archive of date-based objects.
Templates: ``<app_label>/<model_name>_archive.html``
Context:
date_list
List of years
latest
Latest N (defaults to 15) objects by date
"""
if extra_context is None: extra_context = {}
model = queryset.model
if not allow_future:
queryset = queryset.filter(**{'%s__lte' % date_field: datetime.datetime.now()})
date_list = queryset.dates(date_field, 'year')[::-1]
if not date_list and not allow_empty:
raise Http404("No %s available" % model._meta.verbose_name)
if date_list and num_latest:
latest = queryset.order_by('-'+date_field)[:num_latest]
else:
latest = None
if not template_name:
template_name = "%s/%s_archive.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list' : date_list,
template_object_name : latest,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_year(request, year, queryset, date_field, template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object', mimetype=None,
make_object_list=False, allow_future=False):
"""
Generic yearly archive view.
Templates: ``<app_label>/<model_name>_archive_year.html``
Context:
date_list
List of months in this year with objects
year
This year
object_list
List of objects published in the given month
(Only available if make_object_list argument is True)
"""
if extra_context is None: extra_context = {}
model = queryset.model
now = datetime.datetime.now()
lookup_kwargs = {'%s__year' % date_field: year}
# Only bother to check current date if the year isn't in the past and future objects aren't requested.
if int(year) >= now.year and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
date_list = queryset.filter(**lookup_kwargs).dates(date_field, 'month')
if not date_list and not allow_empty:
raise Http404
if make_object_list:
object_list = queryset.filter(**lookup_kwargs)
else:
object_list = []
if not template_name:
template_name = "%s/%s_archive_year.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'year': year,
'%s_list' % template_object_name: object_list,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_month(request, year, month, queryset, date_field,
month_format='%b', template_name=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic monthly archive view.
Templates: ``<app_label>/<model_name>_archive_month.html``
Context:
date_list:
List of days in this month with objects
month:
(date) this month
next_month:
(date) the first day of the next month, or None if the next month is in the future
previous_month:
(date) the first day of the previous month
object_list:
list of objects published in the given month
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime("%s-%s" % (year, month), '%s-%s' % ('%Y', month_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of month, for use in a date-range lookup.
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the month isn't in the past and future objects are requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
date_list = object_list.dates(date_field, 'day')
if not object_list and not allow_empty:
raise Http404
# Calculate the next month, if applicable.
if allow_future:
next_month = last_day
elif last_day <= datetime.date.today():
next_month = last_day
else:
next_month = None
# Calculate the previous month
if first_day.month == 1:
previous_month = first_day.replace(year=first_day.year-1,month=12)
else:
previous_month = first_day.replace(month=first_day.month-1)
if not template_name:
template_name = "%s/%s_archive_month.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'%s_list' % template_object_name: object_list,
'month': date,
'next_month': next_month,
'previous_month': previous_month,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_week(request, year, week, queryset, date_field,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic weekly archive view.
Templates: ``<app_label>/<model_name>_archive_week.html``
Context:
week:
(date) this week
object_list:
list of objects published in the given week
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime(year+'-0-'+week, '%Y-%w-%U')
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of week, for use in a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the week isn't in the past and future objects aren't requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not object_list and not allow_empty:
raise Http404
if not template_name:
template_name = "%s/%s_archive_week.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'week': date,
})
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_day(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object',
mimetype=None, allow_future=False):
"""
Generic daily archive view.
Templates: ``<app_label>/<model_name>_archive_day.html``
Context:
object_list:
list of objects published that day
day:
(datetime) the day
previous_day
(datetime) the previous day
next_day
(datetime) the next day, or None if the current day is today
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not allow_empty and not object_list:
raise Http404
# Calculate the next day, if applicable.
if allow_future:
next_day = date + datetime.timedelta(days=1)
elif date < datetime.date.today():
next_day = date + datetime.timedelta(days=1)
else:
next_day = None
if not template_name:
template_name = "%s/%s_archive_day.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'day': date,
'previous_day': date - datetime.timedelta(days=1),
'next_day': next_day,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_today(request, **kwargs):
"""
Generic daily archive view for today. Same as archive_day view.
"""
today = datetime.date.today()
kwargs.update({
'year': str(today.year),
'month': today.strftime('%b').lower(),
'day': str(today.day),
})
return archive_day(request, **kwargs)
def object_detail(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', object_id=None, slug=None,
slug_field='slug', template_name=None, template_name_field=None,
template_loader=loader, extra_context=None, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic detail view from year/month/day/slug or year/month/day/id structure.
Templates: ``<app_label>/<model_name>_detail.html``
Context:
object:
the object to be detailed
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise AttributeError("Generic detail view must be called with either an object_id or a slug/slugfield")
try:
obj = queryset.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for" % model._meta.verbose_name)
if not template_name:
template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower())
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
response = HttpResponse(t.render(c), mimetype=mimetype)
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name))
return response
| 37.281915 | 156 | 0.652376 |
acef71ea091d64d50a7d0f51b2dad0434f06d5a1 | 5,632 | py | Python | lessons/WebDevelopment/BackEndWorkspaceFiles/3_flask+plotly+pandas_example/wrangling_scripts/wrangle_data.py | HIP70890/DSND_Term2 | fcd5d8233ce68fa20d1f530d4295a86ea6f346d1 | [
"MIT"
] | null | null | null | lessons/WebDevelopment/BackEndWorkspaceFiles/3_flask+plotly+pandas_example/wrangling_scripts/wrangle_data.py | HIP70890/DSND_Term2 | fcd5d8233ce68fa20d1f530d4295a86ea6f346d1 | [
"MIT"
] | null | null | null | lessons/WebDevelopment/BackEndWorkspaceFiles/3_flask+plotly+pandas_example/wrangling_scripts/wrangle_data.py | HIP70890/DSND_Term2 | fcd5d8233ce68fa20d1f530d4295a86ea6f346d1 | [
"MIT"
] | null | null | null | import pandas as pd
import plotly.graph_objs as go
def cleandata(dataset, keepcolumns = ['Country Name', '1990', '2015'], value_variables = ['1990', '2015']):
"""Clean world bank data for a visualizaiton dashboard
Keeps data range of dates in keep_columns variable and data for the top 10 economies
Reorients the columns into a year, country and value
Saves the results to a csv file
Args:
dataset (str): name of the csv data file
Returns:
None
"""
df = pd.read_csv(dataset, skiprows=4)
# Keep only the columns of interest (years and country name)
df = df[keepcolumns]
top10country = ['United States', 'China', 'Japan', 'Germany', 'United Kingdom', 'India', 'France', 'Brazil', 'Italy', 'Canada']
df = df[df['Country Name'].isin(top10country)]
# melt year columns and convert year to date time
df_melt = df.melt(id_vars='Country Name', value_vars = value_variables)
df_melt.columns = ['country','year', 'variable']
df_melt['year'] = df_melt['year'].astype('datetime64[ns]').dt.year
# output clean csv file
return df_melt
def return_figures():
"""Creates four plotly visualizations
Args:
None
Returns:
list (dict): list containing the four plotly visualizations
"""
# first chart plots arable land from 1990 to 2015 in top 10 economies
# as a line chart
graph_one = []
df = cleandata('data/API_AG.LND.ARBL.HA.PC_DS2_en_csv_v2.csv')
df.columns = ['country','year','hectaresarablelandperperson']
df.sort_values('hectaresarablelandperperson', ascending=False, inplace=True)
countrylist = df.country.unique().tolist()
for country in countrylist:
x_val = df[df['country'] == country].year.tolist()
y_val = df[df['country'] == country].hectaresarablelandperperson.tolist()
graph_one.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = country
)
)
layout_one = dict(title = 'Change in Hectares Arable Land <br> per Person 1990 to 2015',
xaxis = dict(title = 'Year',
autotick=False, tick0=1990, dtick=25),
yaxis = dict(title = 'Hectares'),
)
# second chart plots ararble land for 2015 as a bar chart
graph_two = []
df = cleandata('data/API_AG.LND.ARBL.HA.PC_DS2_en_csv_v2.csv')
df.columns = ['country','year','hectaresarablelandperperson']
df.sort_values('hectaresarablelandperperson', ascending=False, inplace=True)
df = df[df['year'] == 2015]
graph_two.append(
go.Bar(
x = df.country.tolist(),
y = df.hectaresarablelandperperson.tolist(),
)
)
layout_two = dict(title = 'Hectares Arable Land per Person in 2015',
xaxis = dict(title = 'Country',),
yaxis = dict(title = 'Hectares per person'),
)
# third chart plots percent of population that is rural from 1990 to 2015
graph_three = []
df = cleandata('data/API_SP.RUR.TOTL.ZS_DS2_en_csv_v2_9948275.csv')
df.columns = ['country', 'year', 'percentrural']
df.sort_values('percentrural', ascending=False, inplace=True)
for country in countrylist:
x_val = df[df['country'] == country].year.tolist()
y_val = df[df['country'] == country].percentrural.tolist()
graph_three.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = country
)
)
layout_three = dict(title = 'Change in Rural Population <br> (Percent of Total Population)',
xaxis = dict(title = 'Year',
autotick=False, tick0=1990, dtick=25),
yaxis = dict(title = 'Percent'),
)
# fourth chart shows rural population vs arable land
graph_four = []
valuevariables = [str(x) for x in range(1995, 2016)]
keepcolumns = [str(x) for x in range(1995, 2016)]
keepcolumns.insert(0, 'Country Name')
df_one = cleandata('data/API_SP.RUR.TOTL_DS2_en_csv_v2_9914824.csv', keepcolumns, valuevariables)
df_two = cleandata('data/API_AG.LND.FRST.K2_DS2_en_csv_v2_9910393.csv', keepcolumns, valuevariables)
df_one.columns = ['country', 'year', 'variable']
df_two.columns = ['country', 'year', 'variable']
df = df_one.merge(df_two, on=['country', 'year'])
for country in countrylist:
x_val = df[df['country'] == country].variable_x.tolist()
y_val = df[df['country'] == country].variable_y.tolist()
year = df[df['country'] == country].year.tolist()
country_label = df[df['country'] == country].country.tolist()
text = []
for country, year in zip(country_label, year):
text.append(str(country) + ' ' + str(year))
graph_four.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'markers',
text = text,
name = country,
textposition = 'top left'
)
)
layout_four = dict(title = 'Rural Population versus <br> Forested Area (Square Km) 1990-2015',
xaxis = dict(title = 'Rural Population'),
yaxis = dict(title = 'Forest Area (square km)'),
)
# append all charts to the figures list
figures = []
figures.append(dict(data=graph_one, layout=layout_one))
figures.append(dict(data=graph_two, layout=layout_two))
figures.append(dict(data=graph_three, layout=layout_three))
figures.append(dict(data=graph_four, layout=layout_four))
return figures | 34.552147 | 131 | 0.613991 |
acef72c740d720df78285207df323b4d09da78a6 | 1,004 | py | Python | app/config.py | accmi/words-api-py | ae96b2124899d58017cb2716d09de3e6a1add550 | [
"MIT"
] | null | null | null | app/config.py | accmi/words-api-py | ae96b2124899d58017cb2716d09de3e6a1add550 | [
"MIT"
] | null | null | null | app/config.py | accmi/words-api-py | ae96b2124899d58017cb2716d09de3e6a1add550 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
class AppConfig:
def __init__(self, app):
load_dotenv()
self.app = app
self.USER = os.getenv('POSTGRES_USER')
self.PASSWORD = os.getenv('POSTGRES_PASSWORD')
self.DB_NAME = os.getenv('POSTGRES_DB')
self.DB_HOST = os.getenv('POSTGRES_HOST')
self.DB_PORT = os.getenv('POSTGRES_PORT')
self.connection_string = f'postgresql://{self.USER}:{self.PASSWORD}@{self.DB_HOST}:{self.DB_PORT}/{self.DB_NAME}?sslmode=disable'
self.SECRET_KEY = os.getenv('SECRET_KEY')
self.PORT = os.getenv('PORT')
self.HOST = os.getenv('HOST')
self.install()
def install(self):
self.app.secret_key = self.SECRET_KEY
self.app.config['SQLALCHEMY_DATABASE_URI'] = self.connection_string
self.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
self.app.config['JWT_AUTH_URL_RULE'] = '/api/signin'
self.app.config['JWT_AUTH_USERNAME_KEY'] = 'email'
| 37.185185 | 137 | 0.655378 |
acef74631cd03ae9849c6ee83b1bfff67c28c33a | 7,058 | py | Python | jsonschema/_format.py | apiraino/jsonschema | b07d0f1d893f4a21008e0c8922959ddcf0614b73 | [
"MIT"
] | null | null | null | jsonschema/_format.py | apiraino/jsonschema | b07d0f1d893f4a21008e0c8922959ddcf0614b73 | [
"MIT"
] | null | null | null | jsonschema/_format.py | apiraino/jsonschema | b07d0f1d893f4a21008e0c8922959ddcf0614b73 | [
"MIT"
] | null | null | null | import datetime
import re
import socket
from jsonschema.compat import str_types
from jsonschema.exceptions import FormatError
class FormatChecker(object):
"""
A ``format`` property checker.
JSON Schema does not mandate that the ``format`` property actually do any
validation. If validation is desired however, instances of this class can
be hooked into validators to enable format validation.
`FormatChecker` objects always return ``True`` when asked about
formats that they do not know how to validate.
To check a custom format using a function that takes an instance and
returns a ``bool``, use the `FormatChecker.checks` or
`FormatChecker.cls_checks` decorators.
Arguments:
formats (~collections.Iterable):
The known formats to validate. This argument can be used to
limit which formats will be used during validation.
"""
checkers = {}
def __init__(self, formats=None):
if formats is None:
self.checkers = self.checkers.copy()
else:
self.checkers = dict((k, self.checkers[k]) for k in formats)
def checks(self, format, raises=()):
"""
Register a decorated function as validating a new format.
Arguments:
format (str):
The format that the decorated function will check.
raises (Exception):
The exception(s) raised by the decorated function when an
invalid instance is found.
The exception object will be accessible as the
`jsonschema.exceptions.ValidationError.cause` attribute of the
resulting validation error.
"""
def _checks(func):
self.checkers[format] = (func, raises)
return func
return _checks
cls_checks = classmethod(checks)
def check(self, instance, format):
"""
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format (str):
The format that instance should conform to
Raises:
FormatError: if the instance does not conform to ``format``
"""
if format not in self.checkers:
return
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(instance)
except raises as e:
cause = e
if not result:
raise FormatError(
"%r is not a %r" % (instance, format), cause=cause,
)
def conforms(self, instance, format):
"""
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format (str):
The format that instance should conform to
Returns:
bool: whether it conformed
"""
try:
self.check(instance, format)
except FormatError:
return False
else:
return True
_draft_checkers = {"draft3": [], "draft4": []}
def _checks_drafts(both=None, draft3=None, draft4=None, raises=()):
draft3 = draft3 or both
draft4 = draft4 or both
def wrap(func):
if draft3:
_draft_checkers["draft3"].append(draft3)
func = FormatChecker.cls_checks(draft3, raises)(func)
if draft4:
_draft_checkers["draft4"].append(draft4)
func = FormatChecker.cls_checks(draft4, raises)(func)
return func
return wrap
@_checks_drafts("email")
def is_email(instance):
if not isinstance(instance, str_types):
return True
return "@" in instance
_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
@_checks_drafts(draft3="ip-address", draft4="ipv4")
def is_ipv4(instance):
if not isinstance(instance, str_types):
return True
if not _ipv4_re.match(instance):
return False
return all(0 <= int(component) <= 255 for component in instance.split("."))
if hasattr(socket, "inet_pton"):
@_checks_drafts("ipv6", raises=socket.error)
def is_ipv6(instance):
if not isinstance(instance, str_types):
return True
return socket.inet_pton(socket.AF_INET6, instance)
_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
@_checks_drafts(draft3="host-name", draft4="hostname")
def is_host_name(instance):
if not isinstance(instance, str_types):
return True
if not _host_name_re.match(instance):
return False
components = instance.split(".")
for component in components:
if len(component) > 63:
return False
return True
try:
import rfc3987
except ImportError:
pass
else:
@_checks_drafts("uri", raises=ValueError)
def is_uri(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="URI")
try:
import strict_rfc3339
except ImportError:
try:
import isodate
except ImportError:
pass
else:
@_checks_drafts("date-time", raises=(ValueError, isodate.ISO8601Error))
def is_datetime(instance):
if not isinstance(instance, str_types):
return True
return isodate.parse_datetime(instance)
else:
@_checks_drafts("date-time")
def is_datetime(instance):
if not isinstance(instance, str_types):
return True
return strict_rfc3339.validate_rfc3339(instance)
@_checks_drafts("regex", raises=re.error)
def is_regex(instance):
if not isinstance(instance, str_types):
return True
return re.compile(instance)
@_checks_drafts(draft3="date", raises=ValueError)
def is_date(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%Y-%m-%d")
@_checks_drafts(draft3="time", raises=ValueError)
def is_time(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%H:%M:%S")
try:
import webcolors
except ImportError:
pass
else:
def is_css_color_code(instance):
return webcolors.normalize_hex(instance)
@_checks_drafts(draft3="color", raises=(ValueError, TypeError))
def is_css21_color(instance):
if (
not isinstance(instance, str_types) or
instance.lower() in webcolors.css21_names_to_hex
):
return True
return is_css_color_code(instance)
def is_css3_color(instance):
if instance.lower() in webcolors.css3_names_to_hex:
return True
return is_css_color_code(instance)
draft3_format_checker = FormatChecker(_draft_checkers["draft3"])
draft4_format_checker = FormatChecker(_draft_checkers["draft4"])
| 25.948529 | 79 | 0.626806 |
acef753f9715afb332d9f3dffad03552493a0173 | 1,456 | py | Python | papermerge/contrib/admin/urls.py | amo13/papermerge | d188acb01c7e2e7086d216cd496e65030d48ae52 | [
"Apache-2.0"
] | 1 | 2020-09-28T06:04:38.000Z | 2020-09-28T06:04:38.000Z | papermerge/contrib/admin/urls.py | amo13/papermerge | d188acb01c7e2e7086d216cd496e65030d48ae52 | [
"Apache-2.0"
] | null | null | null | papermerge/contrib/admin/urls.py | amo13/papermerge | d188acb01c7e2e7086d216cd496e65030d48ae52 | [
"Apache-2.0"
] | 1 | 2020-11-17T16:20:05.000Z | 2020-11-17T16:20:05.000Z | from django.urls import path
from papermerge.contrib.admin import views
app_name = 'admin'
urlpatterns = [
path(
'', views.browse, name="index"
),
path(
'inbox/', views.inbox_view, name="inbox"
),
path(
'browse', views.browse, name="browse"
),
path(
'search', views.search, name="search"
),
path(
'logs', views.LogsListView.as_view(), name="logs"
),
path(
'log/<int:id>/change',
views.LogChangeView.as_view(),
name="log_change"
),
path(
'tags', views.TagsListView.as_view(), name="tags"
),
path(
'tag/',
views.TagView.as_view(),
name="tag"
),
path(
'tag/<int:id>/change',
views.TagChangeView.as_view(), name='tag_change'
),
path(
'groups/',
views.GroupsListView.as_view(),
name='groups'
),
path(
'group/',
views.GroupView.as_view(),
name='group'
),
path(
'group/<int:id>/change',
views.GroupChangeView.as_view(),
name='group_change'
),
path(
'automates/',
views.AutomatesListView.as_view(),
name='automates'
),
path(
'automate/',
views.AutomateView.as_view(),
name='automate'
),
path(
'automate/<int:id>/change',
views.AutomateChangeView.as_view(),
name='automate_change'
),
]
| 20.507042 | 57 | 0.517857 |
acef75463a72d9d6dadec9079defd225eb166a6e | 258 | py | Python | probability/calculations/calculation_types/simple_calculation.py | vahndi/probability | 6ddf88e6f3d947c96b879e426030f60eb5cb2d59 | [
"MIT"
] | 2 | 2020-02-21T00:47:03.000Z | 2020-09-22T19:00:48.000Z | probability/calculations/calculation_types/simple_calculation.py | vahndi/probability | 6ddf88e6f3d947c96b879e426030f60eb5cb2d59 | [
"MIT"
] | 52 | 2020-01-16T16:05:08.000Z | 2022-02-24T15:10:10.000Z | probability/calculations/calculation_types/simple_calculation.py | vahndi/probability | 6ddf88e6f3d947c96b879e426030f60eb5cb2d59 | [
"MIT"
] | null | null | null | from probability.calculations.mixins import ProbabilityCalculationMixin
class SimpleCalculation(
ProbabilityCalculationMixin,
object
):
"""
Base class for SampleCalculation and ValueCalculation.
Used for type-checking
"""
pass
| 18.428571 | 71 | 0.744186 |
acef755426a715085d10f76791001ef4600eb7f5 | 20 | py | Python | examples/__init__.py | samerhaj/python-redfish | 34b77e064a1059176414e327541d25d5e045f87d | [
"Apache-2.0"
] | null | null | null | examples/__init__.py | samerhaj/python-redfish | 34b77e064a1059176414e327541d25d5e045f87d | [
"Apache-2.0"
] | null | null | null | examples/__init__.py | samerhaj/python-redfish | 34b77e064a1059176414e327541d25d5e045f87d | [
"Apache-2.0"
] | null | null | null | __author__ = 'deva'
| 10 | 19 | 0.7 |
acef760f94db5781d8310f8805accfb99d4b4280 | 633 | py | Python | settings.py | Arrisio/dvmn-async-06-filter-news | 4dbac26974f95dc427fb6e44370500edd844cf42 | [
"MIT"
] | null | null | null | settings.py | Arrisio/dvmn-async-06-filter-news | 4dbac26974f95dc427fb6e44370500edd844cf42 | [
"MIT"
] | null | null | null | settings.py | Arrisio/dvmn-async-06-filter-news | 4dbac26974f95dc427fb6e44370500edd844cf42 | [
"MIT"
] | null | null | null | CHARGED_WORDS_FILE_PATH = "charged_dict.zip"
FETCH_NEWS_TIMEOUT = 5
PROCESS_NEWS_TIMEOUT = 5
TEST_ARTICLE_URLS = [
"https://inosmi.ru/social/20210625/249988253.html",
"https://inosmi.ru/politic/20210625/249990364.html",
"https://inosmi.ru/social/20210625/249988253.html",
"https://inosmi.ru/politic/20210625/249989092.html",
"https://inosmi.ru/economic/20210625/249987698.html",
"https://inosmi.ru/politic/20210625/249990025.html",
]
SOME_LARGE_TEXT_URL = "https://dvmn.org/media/filer_public/51/83/51830f54-7ec7-4702-847b-c5790ed3724c/gogol_nikolay_taras_bulba_-_bookscafenet.txt"
MAX_URL_PER_REQUEST = 10
| 39.5625 | 147 | 0.769352 |
acef76a61da877d03e0c1d28e956dcc1b9e437d8 | 4,519 | py | Python | lelof1py/definitions.py | timorama82/lelo-f1-python-sdk | 491136013588ce94c2e2f27e7335190b7d1040ae | [
"MIT"
] | 1 | 2021-11-17T22:45:10.000Z | 2021-11-17T22:45:10.000Z | lelof1py/definitions.py | timorama82/lelo-f1-python-sdk | 491136013588ce94c2e2f27e7335190b7d1040ae | [
"MIT"
] | null | null | null | lelof1py/definitions.py | timorama82/lelo-f1-python-sdk | 491136013588ce94c2e2f27e7335190b7d1040ae | [
"MIT"
] | null | null | null |
class Constants:
'''
Application constants and logger names.
Exposition of logger names allow easy logging configuration
'''
LOGGER_NAME = 'lelo-f1-sdk-client'
LOGGER_IO_NAME = LOGGER_NAME + '.io'
LOGGER_CALLBACK_NAME = LOGGER_NAME + '.notification'
LOGGER_SYNC_NAME = LOGGER_NAME + '.sync'
LOGGER_SOCKET_SERVER_NAME = LOGGER_NAME + '.socket-server'
LOGGER_FS_NAME = LOGGER_NAME + '.fs'
ADVERTISING_DEVICE_NAME = 'F1s'
class Characteristics:
'''
Contains characteristics identifiers (UUIDs) for the device.
'''
KEY_STATE = '00000a0f-0000-1000-8000-00805f9b34fb'
MOTOR_CONTROL = '0000fff1-0000-1000-8000-00805f9b34fb'
MANUFACTURER_NAME = '00002a29-0000-1000-8000-00805f9b34fb'
MODEL_NUMBER = '00002a24-0000-1000-8000-00805f9b34fb'
HARDWARE_REVISION = '00002a27-0000-1000-8000-00805f9b34fb'
FIRMWARE_REVISION = '00002a26-0000-1000-8000-00805f9b34fb'
SOFTWARE_REVISION = '00002a28-0000-1000-8000-00805f9b34fb'
MAC_ADDRESS = '00000a06-0000-1000-8000-00805f9b34fb'
SERIAL_NUMBER = '00000a05-0000-1000-8000-00805f9b34fb'
BATTERY_LEVEL = '00002a19-0000-1000-8000-00805f9b34fb'
MOTOR_WORK_ON_TOUCH = '00000aa5-0000-1000-8000-00805f9b34fb'
VIBRATOR_SETTING = '00000a0d-0000-1000-8000-00805f9b34fb'
WAKE_UP = '00000aa1-0000-1000-8000-00805f9b34fb'
HALL = '00000aa3-0000-1000-8000-00805f9b34fb'
LENGTH = '00000a0b-0000-1000-8000-00805f9b34fb'
ACCELEROMETER = '00000a0c-0000-1000-8000-00805f9b34fb'
PRESSURE = '00000a0a-0000-1000-8000-00805f9b34fb'
BUTTON = '00000aa4-0000-1000-8000-00805f9b34fb'
USER_RECORD = '00000a04-0000-1000-8000-00805f9b34fb'
CHIP_ID = '00000a07-0000-1000-8000-00805f9b34fb'
# unreadable (err. 2)
BATTERY_VOLTAGE = '00000a00-0000-1000-8000-00805f9b34fb'
OTA = '00000a08-0000-1000-8000-00805f9b34fb'
# reads [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ACTIVATE = '00000a0e-0000-1000-8000-00805f9b34fb'
# reads [0]
ACCELEROMETER_CONTROL = '00000aa0-0000-1000-8000-00805f9b34fb'
# reads [0]
HALL_CONTROL = '00000aa2-0000-1000-8000-00805f9b34fb'
# ServiceName: GenericAccess
# CharacteristicName: DeviceName
# reads [70, 49, 115] "F1s"
GENERIC_ACCESS_DEVICE_NAME = '00002a00-0000-1000-8000-00805f9b34fb'
# ServiceName: GenericAccess
# CharacteristicName: Appearance
# reads [0, 0]
GENERIC_ACCESS_APPEARANCE = '00002a01-0000-1000-8000-00805f9b34fb'
# ServiceName: GenericAccess
# CharacteristicName: PeripheralPreferredConnectionParameters
# reads [80, 0, 160, 0, 0, 0, 232, 3]
GENERIC_ACCESS_PERIPHERAL_PREFERRED_CONNECTION_PARAMETERS = '00002a04-0000-1000-8000-00805f9b34fb'
# ServiceName: DeviceInformation
# CharacteristicName: SystemId
# reads [238, 91, 69, 0, 0, 227, 100, 196]
DEVICE_INFORMATION_SYSTEM_ID = '00002a23-0000-1000-8000-00805f9b34fb'
# ServiceName: DeviceInformation
# CharacteristicName: SerialNumberString
# reads [83, 101, 114, 105, 97, 108, 32, 78, 117, 109, 98, 101, 114] "Serial Number"
DEVICE_INFORMATION_SERIAL_NUMBER_STRING = '00002a25-0000-1000-8000-00805f9b34fb'
# ServiceName: DeviceInformation
# CharacteristicName: Ieee11073_20601RegulatoryCertificationDataList
# reads [254, 0, 101, 120, 112, 101, 114, 105, 109, 101, 110, 116, 97, 108]
DEVICE_INFORMATION_IEEE11073 = '00002a2a-0000-1000-8000-00805f9b34fb'
# ServiceName: DeviceInformation
# CharacteristicName: PnpId
# reads [1, 13, 0, 0, 0, 16, 1]
DEVICE_INFORMATION_PNP_ID = '00002a50-0000-1000-8000-00805f9b34fb'
class Services:
'''
Contains services identifiers (UUIDs) for the device.
Unused at the moment.
'''
GENERIC_ACCESS_PROFILE = '00001800-0000-1000-8000-00805f9b34fb'
GENERIC_ATTRIBUTE_PROFILE = '00001801-0000-1000-8000-00805f9b34fb'
DEVICE_INFORMATION = '0000180a-0000-1000-8000-00805f9b34fb'
VENDOR_SPECIFIC = '0000fff0-0000-1000-8000-00805f9b34fb'
BATTERY_SERVICE = '0000180f-0000-1000-8000-00805f9b34fb'
class CruiseControlStatus:
'''
Alias for Cruise Control status.
For internal use only: value is translated to boolean when accessed from client methods.
Not that values ENABLE_AND_RESET supports write only
'''
DISABLED = 0x00
ENABLED = 0x01
ENABLE_AND_RESET = 0x02
class WakeUp:
'''
Alias for quick Wake-Up status.
For internal use only: value is translated to boolean when accessed from client methods.
'''
DISABLED = 0x00
ENABLED = 0x01
class Buttons:
'''
Alias for buttons status.
'''
NONE_PRESSED = 0x03
CENTRAL = 0x00
PLUS = 0x01
MINUS = 0x02
class ConnectionProfile:
'''
Holds information on connected device
'''
address = None
uuid = None
name = None | 32.510791 | 99 | 0.762116 |
acef7b14bce5e2102c7da7c7242b58804ce0b107 | 35,761 | py | Python | pygments/lexers/configs.py | KenKundert/pygments | abd14ab63c7201ed4b8511f8ae4d219f884fc5e7 | [
"BSD-2-Clause"
] | null | null | null | pygments/lexers/configs.py | KenKundert/pygments | abd14ab63c7201ed4b8511f8ae4d219f884fc5e7 | [
"BSD-2-Clause"
] | null | null | null | pygments/lexers/configs.py | KenKundert/pygments | abd14ab63c7201ed4b8511f8ae4d219f884fc5e7 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pygments.lexers.configs
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for configuration file formats.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, words, bygroups, include, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace, Literal, Generic, Error
from pygments.lexers.shell import BashLexer
from pygments.lexers.data import JsonLexer
__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
'TerraformLexer', 'TermcapLexer', 'TerminfoLexer',
'PkgConfigLexer', 'PacmanConfLexer', 'AugeasLexer', 'TOMLLexer',
'NestedTextLexer', 'SingularityLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg', 'dosini']
filenames = ['*.ini', '*.cfg', '*.inf']
mimetypes = ['text/x-ini', 'text/inf']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
# standalone option, supported by some INI parsers
(r'(.+?)$', Name.Attribute),
],
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class RegeditLexer(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
.. versionadded:: 1.6
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.+', String, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
Note: trailing whitespace counts as part of the value as per spec
.. versionadded:: 1.4
"""
name = 'Properties'
aliases = ['properties', 'jproperties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
tokens = {
'root': [
(r'^(\w+)([ \t])(\w+\s*)$', bygroups(Name.Attribute, Text, String)),
(r'^\w+(\\[ \t]\w*)*$', Name.Attribute),
(r'(^ *)([#!].*)', bygroups(Text, Comment)),
# More controversial comments
(r'(^ *)((?:;|//).*)', bygroups(Text, Comment)),
(r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
(r'\s', Text),
],
}
def _rx_indent(level):
# Kconfig *always* interprets a tab as 8 spaces, so this is the default.
# Edit this if you are in an environment where KconfigLexer gets expanded
# input (tabs expanded to spaces) and the expansion tab width is != 8,
# e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width).
# Value range here is 2 <= {tab_width} <= 8.
tab_width = 8
# Regex matching a given indentation {level}, assuming that indentation is
# a multiple of {tab_width}. In other cases there might be problems.
if tab_width == 2:
space_repeat = '+'
else:
space_repeat = '{1,%d}' % (tab_width - 1)
if level == 1:
level_repeat = ''
else:
level_repeat = '{%s}' % level
return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat)
class KconfigLexer(RegexLexer):
"""
For Linux-style Kconfig files.
.. versionadded:: 1.6
"""
name = 'Kconfig'
aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config']
# Adjust this if new kconfig file names appear in your environment
filenames = ['Kconfig*', '*Config.in*', 'external.in*',
'standard-modules.in']
mimetypes = ['text/x-kconfig']
# No re.MULTILINE, indentation-aware help text needs line-by-line handling
flags = 0
def call_indent(level):
# If indentation >= {level} is detected, enter state 'indent{level}'
return (_rx_indent(level), String.Doc, 'indent%s' % level)
def do_indent(level):
# Print paragraphs of indentation level >= {level} as String.Doc,
# ignoring blank lines. Then return to 'root' state.
return [
(_rx_indent(level), String.Doc),
(r'\s*\n', Text),
default('#pop:2')
]
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(words((
'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice',
'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif',
'source', 'prompt', 'select', 'depends on', 'default',
'range', 'option'), suffix=r'\b'),
Keyword),
(r'(---help---|help)[\t ]*\n', Keyword, 'help'),
(r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b',
Name.Builtin),
(r'[!=&|]', Operator),
(r'[()]', Punctuation),
(r'[0-9]+', Number.Integer),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r'\S+', Text),
],
# Help text is indented, multi-line and ends when a lower indentation
# level is detected.
'help': [
# Skip blank lines after help token, if any
(r'\s*\n', Text),
# Determine the first help line's indentation level heuristically(!).
# Attention: this is not perfect, but works for 99% of "normal"
# indentation schemes up to a max. indentation level of 7.
call_indent(7),
call_indent(6),
call_indent(5),
call_indent(4),
call_indent(3),
call_indent(2),
call_indent(1),
default('#pop'), # for incomplete help sections without text
],
# Handle text for indentation levels 7 to 1
'indent7': do_indent(7),
'indent6': do_indent(6),
'indent5': do_indent(5),
'indent4': do_indent(4),
'indent3': do_indent(3),
'indent2': do_indent(2),
'indent1': do_indent(1),
}
class Cfengine3Lexer(RegexLexer):
"""
Lexer for `CFEngine3 <http://cfengine.org>`_ policy files.
.. versionadded:: 1.5
"""
name = 'CFEngine3'
aliases = ['cfengine3', 'cf3']
filenames = ['*.cf']
mimetypes = []
tokens = {
'root': [
(r'#.*?\n', Comment),
(r'(body)(\s+)(\S+)(\s+)(control)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()',
bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation),
'arglist'),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)',
bygroups(Punctuation, Name.Variable, Punctuation,
Text, Keyword.Type, Text, Operator, Text)),
(r'(\S+)(\s*)(=>)(\s*)',
bygroups(Keyword.Reserved, Text, Operator, Text)),
(r'"', String, 'string'),
(r'(\w+)(\()', bygroups(Name.Function, Punctuation)),
(r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)),
(r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)),
(r'@[{(][^)}]+[})]', Name.Variable),
(r'[(){},;]', Punctuation),
(r'=>', Operator),
(r'->', Operator),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\w+', Name.Function),
(r'\s+', Text),
],
'string': [
(r'\$[{(]', String.Interpol, 'interpol'),
(r'\\.', String.Escape),
(r'"', String, '#pop'),
(r'\n', String),
(r'.', String),
],
'interpol': [
(r'\$[{(]', String.Interpol, '#push'),
(r'[})]', String.Interpol, '#pop'),
(r'[^${()}]+', String.Interpol),
],
'arglist': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'\w+', Name.Variable),
(r'\s+', Text),
],
}
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
.. versionadded:: 0.6
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'#(.*\\\n)+.*$|(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'[a-z]\w*', Name.Builtin, 'value'),
(r'\.+', Text),
],
'value': [
(r'\\\n', Text),
(r'$', Text, '#pop'),
(r'\\', Text),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([*a-z0-9][*\w./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\(.|\n)[^"\\]*)*)"', String.Double),
(r'[^\s"\\]+', Text)
],
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
.. versionadded:: 0.9
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = (
"access_log", "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to", "anonymize_headers",
"append_domain", "as_whois_server", "auth_param_basic",
"authenticate_children", "authenticate_program", "authenticate_ttl",
"broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
"cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
"cache_peer_access", "cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
"client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
"dead_peer_timeout", "debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters", "delay_pools",
"deny_info", "dns_children", "dns_defnames", "dns_nameservers",
"dns_testnames", "emulate_httpd_log", "err_html_text",
"fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
"fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients", "header_access",
"header_replace", "hierarchy_stoplist", "high_response_time_warning",
"high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
"http_anonymizer", "httpd_accel", "httpd_accel_host",
"httpd_accel_port", "httpd_accel_uses_host_header",
"httpd_accel_with_proxy", "http_port", "http_reply_access",
"icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average", "inside_firewall",
"ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
"local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
"log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy", "mime_table",
"min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
"minimum_object_size", "minimum_retry_timeout", "miss_access",
"negative_dns_ttl", "negative_ttl", "neighbor_timeout",
"neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
"pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
"prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"refresh_pattern", "reload_into_ims", "request_body_max_size",
"request_size", "request_timeout", "shutdown_lifetime",
"single_parent_bypass", "siteselect_timeout", "snmp_access",
"snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
"test_reachability", "udp_hit_obj", "udp_hit_obj_size",
"udp_incoming_address", "udp_outgoing_address", "unique_hostname",
"unlinkd_program", "uri_whitespace", "useragent_log",
"visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
)
opts = (
"proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
"multicast-responder", "on", "off", "all", "deny", "allow", "via",
"parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
"credentialsttl", "none", "disable", "offline_toggle", "diskd",
)
actions = (
"shutdown", "info", "parameter", "server_list", "client_list",
r'squid.conf',
)
actions_stats = (
"objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
"redirector", "io", "reply_headers", "filedescriptors", "netdb",
)
actions_log = ("status", "enable", "disable", "clear")
acls = (
"url_regex", "urlpath_regex", "referer_regex", "port", "proto",
"req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
"dst", "time", "dstdomain", "ident", "snmp_community",
)
ip_re = (
r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
r'[1-9]?\d)){3}))'
)
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#', Comment, 'comment'),
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
(words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant),
# Actions
(words(actions, prefix=r'\b', suffix=r'\b'), String),
(words(actions_stats, prefix=r'stats/', suffix=r'\b'), String),
(words(actions_log, prefix=r'log/', suffix=r'='), String),
(words(acls, prefix=r'\b', suffix=r'\b'), Keyword),
(ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.+', Comment, '#pop'),
default('#pop'),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = ['nginx.conf']
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'\}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'\{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
# (r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class DockerLexer(RegexLexer):
"""
Lexer for `Docker <http://docker.io>`_ configuration files.
.. versionadded:: 2.0
"""
name = 'Docker'
aliases = ['docker', 'dockerfile']
filenames = ['Dockerfile', '*.docker']
mimetypes = ['text/x-dockerfile-config']
_keywords = (r'(?:MAINTAINER|EXPOSE|WORKDIR|USER|STOPSIGNAL)')
_bash_keywords = (r'(?:RUN|CMD|ENTRYPOINT|ENV|ARG|LABEL|ADD|COPY)')
_lb = r'(?:\s*\\?\s*)' # dockerfile line break regex
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'#.*', Comment),
(r'(FROM)([ \t]*)(\S*)([ \t]*)(?:(AS)([ \t]*)(\S*))?',
bygroups(Keyword, Text, String, Text, Keyword, Text, String)),
(r'(ONBUILD)(%s)' % (_lb,), bygroups(Keyword, using(BashLexer))),
(r'(HEALTHCHECK)((%s--\w+=\w+%s)*)' % (_lb, _lb),
bygroups(Keyword, using(BashLexer))),
(r'(VOLUME|ENTRYPOINT|CMD|SHELL)(%s)(\[.*?\])' % (_lb,),
bygroups(Keyword, using(BashLexer), using(JsonLexer))),
(r'(LABEL|ENV|ARG)((%s\w+=\w+%s)*)' % (_lb, _lb),
bygroups(Keyword, using(BashLexer))),
(r'(%s|VOLUME)\b(.*)' % (_keywords), bygroups(Keyword, String)),
(r'(%s)' % (_bash_keywords,), Keyword),
(r'(.*\\\n)*.+', using(BashLexer)),
]
}
class TerraformLexer(RegexLexer):
"""
Lexer for `terraformi .tf files <https://www.terraform.io/>`_.
.. versionadded:: 2.1
"""
name = 'Terraform'
aliases = ['terraform', 'tf']
filenames = ['*.tf']
mimetypes = ['application/x-tf', 'application/x-terraform']
embedded_keywords = ('ingress', 'egress', 'listener', 'default',
'connection', 'alias', 'terraform', 'tags', 'vars',
'config', 'lifecycle', 'timeouts')
tokens = {
'root': [
include('string'),
include('punctuation'),
include('curly'),
include('basic'),
include('whitespace'),
(r'[0-9]+', Number),
],
'basic': [
(words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
(r'\s*/\*', Comment.Multiline, 'comment'),
(r'\s*#.*\n', Comment.Single),
(r'(.*?)(\s*)(=)', bygroups(Name.Attribute, Text, Operator)),
(words(('variable', 'resource', 'provider', 'provisioner', 'module',
'backend', 'data', 'output'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved, 'function'),
(words(embedded_keywords, prefix=r'\b', suffix=r'\b'),
Keyword.Declaration),
(r'\$\{', String.Interpol, 'var_builtin'),
],
'function': [
(r'(\s+)(".*")(\s+)', bygroups(Text, String, Text)),
include('punctuation'),
include('curly'),
],
'var_builtin': [
(r'\$\{', String.Interpol, '#push'),
(words(('concat', 'file', 'join', 'lookup', 'element'),
prefix=r'\b', suffix=r'\b'), Name.Builtin),
include('string'),
include('punctuation'),
(r'\s+', Text),
(r'\}', String.Interpol, '#pop'),
],
'string': [
(r'(".*")', bygroups(String.Double)),
],
'punctuation': [
(r'[\[\](),.]', Punctuation),
],
# Keep this seperate from punctuation - we sometimes want to use different
# Tokens for { }
'curly': [
(r'\{', Text.Punctuation),
(r'\}', Text.Punctuation),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
],
}
class TermcapLexer(RegexLexer):
"""
Lexer for termcap database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Termcap'
aliases = ['termcap']
filenames = ['termcap', 'termcap.src']
mimetypes = []
# NOTE:
# * multiline with trailing backslash
# * separator is ':'
# * to embed colon as data, we must use \072
# * space after separator is not allowed (mayve)
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#:|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r':', Punctuation, 'defs'),
(r'\|', Punctuation),
(r'[^:|]+', Name.Attribute),
],
'defs': [
(r'\\\n[ \t]*', Text),
(r'\n[ \t]*', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r':', Punctuation),
(r'[^\s:=#]+', Name.Class),
],
'data': [
(r'\\072', Literal),
(r':', Punctuation, '#pop'),
(r'[^:\\]+', Literal), # for performance
(r'.', Literal),
],
}
class TerminfoLexer(RegexLexer):
"""
Lexer for terminfo database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Terminfo'
aliases = ['terminfo']
filenames = ['terminfo', 'terminfo.src']
mimetypes = []
# NOTE:
# * multiline with leading whitespace
# * separator is ','
# * to embed comma as data, we can use \,
# * space after separator is allowed
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#,|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), 'defs'),
(r'\|', Punctuation),
(r'[^,|]+', Name.Attribute),
],
'defs': [
(r'\n[ \t]+', Text),
(r'\n', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text)),
(r'[^\s,=#]+', Name.Class),
],
'data': [
(r'\\[,\\]', Literal),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), '#pop'),
(r'[^\\,]+', Literal), # for performance
(r'.', Literal),
],
}
class PkgConfigLexer(RegexLexer):
"""
Lexer for `pkg-config
<http://www.freedesktop.org/wiki/Software/pkg-config/>`_
(see also `manual page <http://linux.die.net/man/1/pkg-config>`_).
.. versionadded:: 2.1
"""
name = 'PkgConfig'
aliases = ['pkgconfig']
filenames = ['*.pc']
mimetypes = []
tokens = {
'root': [
(r'#.*$', Comment.Single),
# variable definitions
(r'^(\w+)(=)', bygroups(Name.Attribute, Operator)),
# keyword lines
(r'^([\w.]+)(:)',
bygroups(Name.Tag, Punctuation), 'spvalue'),
# variable references
include('interp'),
# fallback
(r'[^${}#=:\n.]+', Text),
(r'.', Text),
],
'interp': [
# you can escape literal "$" as "$$"
(r'\$\$', Text),
# variable references
(r'\$\{', String.Interpol, 'curly'),
],
'curly': [
(r'\}', String.Interpol, '#pop'),
(r'\w+', Name.Attribute),
],
'spvalue': [
include('interp'),
(r'#.*$', Comment.Single, '#pop'),
(r'\n', Text, '#pop'),
# fallback
(r'[^${}#\n]+', Text),
(r'.', Text),
],
}
class PacmanConfLexer(RegexLexer):
"""
Lexer for `pacman.conf
<https://www.archlinux.org/pacman/pacman.conf.5.html>`_.
Actually, IniLexer works almost fine for this format,
but it yield error token. It is because pacman.conf has
a form without assignment like:
UseSyslog
Color
TotalDownload
CheckSpace
VerbosePkgLists
These are flags to switch on.
.. versionadded:: 2.1
"""
name = 'PacmanConf'
aliases = ['pacmanconf']
filenames = ['pacman.conf']
mimetypes = []
tokens = {
'root': [
# comment
(r'#.*$', Comment.Single),
# section header
(r'^\s*\[.*?\]\s*$', Keyword),
# variable definitions
# (Leading space is allowed...)
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
# flags to on
(r'^(\s*)(\w+)(\s*)$',
bygroups(Text, Name.Attribute, Text)),
# built-in special values
(words((
'$repo', # repository
'$arch', # architecture
'%o', # outfile
'%u', # url
), suffix=r'\b'),
Name.Variable),
# fallback
(r'.', Text),
],
}
class AugeasLexer(RegexLexer):
"""
Lexer for `Augeas <http://augeas.net>`_.
.. versionadded:: 2.4
"""
name = 'Augeas'
aliases = ['augeas']
filenames = ['*.aug']
tokens = {
'root': [
(r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Text, Name.Variable)),
(r'(del|store|value|counter|seq|key|label|autoload|incl|excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Text)),
(r'(\()([^:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)),
(r'\(\*', Comment.Multiline, 'comment'),
(r'[*+\-.;=?|]', Operator),
(r'[()\[\]{}]', Operator),
(r'"', String.Double, 'string'),
(r'\/', String.Regex, 'regex'),
(r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable)),
(r'.', Name.Variable),
(r'\s', Text),
],
'string': [
(r'\\.', String.Escape),
(r'[^"]', String.Double),
(r'"', String.Double, '#pop'),
],
'regex': [
(r'\\.', String.Escape),
(r'[^/]', String.Regex),
(r'\/', String.Regex, '#pop'),
],
'comment': [
(r'[^*)]', Comment.Multiline),
(r'\(\*', Comment.Multiline, '#push'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[)*]', Comment.Multiline)
],
}
class TOMLLexer(RegexLexer):
"""
Lexer for `TOML <https://github.com/toml-lang/toml>`_, a simple language
for config files.
.. versionadded:: 2.4
"""
name = 'TOML'
aliases = ['toml']
filenames = ['*.toml', 'Pipfile', 'poetry.lock']
tokens = {
'root': [
# Basics, comments, strings
(r'\s+', Text),
(r'#.*?$', Comment.Single),
# Basic string
(r'"(\\\\|\\"|[^"])*"', String),
# Literal string
(r'\'\'\'(.*)\'\'\'', String),
(r'\'[^\']*\'', String),
(r'(true|false)$', Keyword.Constant),
(r'[a-zA-Z_][\w\-]*', Name),
(r'\[.*?\]$', Keyword),
# Datetime
# TODO this needs to be expanded, as TOML is rather flexible:
# https://github.com/toml-lang/toml#offset-date-time
(r'\d{4}-\d{2}-\d{2}(?:T| )\d{2}:\d{2}:\d{2}(?:Z|[-+]\d{2}:\d{2})', Number.Integer),
# Numbers
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
# Handle +-inf, +-infinity, +-nan
(r'[+-]?(?:(inf(?:inity)?)|nan)', Number.Float),
(r'[+-]?\d+', Number.Integer),
# Punctuation
(r'[]{}:(),;[]', Punctuation),
(r'\.', Punctuation),
# Operators
(r'=', Operator)
]
}
class NestedTextLexer(RegexLexer):
"""
Lexer for `NextedText <https://nestedtext.org>`_, a human-friendly data
format.
.. versionadded:: 2.9
"""
name = 'NestedText'
aliases = ['nestedtext', 'nt']
filenames = ['*.nt']
tokens = {
'root': [
(r'^(\s*)(#.*)$', bygroups(Text, Comment)),
(r'^(\s*)(\{)', bygroups(Text, Punctuation), 'inline_dict'),
(r'^(\s*)(\[)', bygroups(Text, Punctuation), 'inline_list'),
(r'^(\s*)(>)$', bygroups(Text, Punctuation)),
(r'^(\s*)(> )(.*?)(\s*)$', bygroups(Text, Punctuation, String, Whitespace)),
(r'^(\s*)(-)$', bygroups(Text, Punctuation)),
(r'^(\s*)(- )(.*?)(\s*)$', bygroups(Text, Punctuation, String, Whitespace)),
(r'^(\s*)(:)$', bygroups(Text, Punctuation)),
(r'^(\s*)(: )(.*?)(\s*)$', bygroups(Text, Punctuation, Name.Tag, Whitespace)),
(r'^(\s*)([^\{\[].+?)(:)$', bygroups(Text, Name.Tag, Punctuation)),
(r'^(\s*)([^\{\[].+?)(: )(.*?)(\s*)$', bygroups(Text, Name.Tag, Punctuation, String, Whitespace)),
],
'inline_list': [
include('whitespace'),
(r'[^\{\}\[\],\s]', String),
include('inline_value'),
(r',', Punctuation),
(r'\]', Punctuation, '#pop'),
(r'\n', Error, '#pop'),
],
'inline_dict': [
include('whitespace'),
(r'[^\{\}\[\],:\s]', Name.Tag),
(r':', Punctuation, 'inline_dict_value'),
(r'\}', Punctuation, '#pop'),
(r'\n', Error, '#pop'),
],
'inline_dict_value': [
include('whitespace'),
(r'[^\{\}\[\],:\s]', String),
include('inline_value'),
(r',', Punctuation, '#pop'),
(r'\}', Punctuation, '#pop:2'),
],
'inline_value': [
include('whitespace'),
(r'\{', Punctuation, 'inline_dict'),
(r'\[', Punctuation, 'inline_list'),
],
'whitespace': [
(r'\s+', Text),
],
}
class SingularityLexer(RegexLexer):
"""
Lexer for `Singularity definition files
<https://www.sylabs.io/guides/3.0/user-guide/definition_files.html>`_.
.. versionadded:: 2.6
"""
name = 'Singularity'
aliases = ['singularity']
filenames = ['*.def', 'Singularity']
flags = re.IGNORECASE | re.MULTILINE | re.DOTALL
_headers = r'^(\s*)(bootstrap|from|osversion|mirrorurl|include|registry|namespace|includecmd)(:)'
_section = r'^%(?:pre|post|setup|environment|help|labels|test|runscript|files|startscript)\b'
_appsect = r'^%app(?:install|help|run|labels|env|test|files)\b'
tokens = {
'root': [
(_section, Generic.Heading, 'script'),
(_appsect, Generic.Heading, 'script'),
(_headers, bygroups(Text, Keyword, Text)),
(r'\s*#.*?\n', Comment),
(r'\b(([0-9]+\.?[0-9]*)|(\.[0-9]+))\b', Number),
(r'(?!^\s*%).', Text),
],
'script': [
(r'(.+?(?=^\s*%))|(.*)', using(BashLexer), '#pop'),
],
}
def analyse_text(text):
"""This is a quite simple script file, but there are a few keywords
which seem unique to this language."""
result = 0
if re.search(r'\b(?:osversion|includecmd|mirrorurl)\b', text, re.IGNORECASE):
result += 0.5
if re.search(SingularityLexer._section[1:], text):
result += 0.49
return result
| 34.286673 | 152 | 0.484718 |
acef7bd7c090baf40812d566f071d7217c96da36 | 1,881 | py | Python | vdb/eth_tester_debug_backend.py | sambacha/vyper-debug | 7e90e77a765121874491c8d1a81108c3d52ab797 | [
"MIT"
] | null | null | null | vdb/eth_tester_debug_backend.py | sambacha/vyper-debug | 7e90e77a765121874491c8d1a81108c3d52ab797 | [
"MIT"
] | null | null | null | vdb/eth_tester_debug_backend.py | sambacha/vyper-debug | 7e90e77a765121874491c8d1a81108c3d52ab797 | [
"MIT"
] | null | null | null | from eth.chains.base import MiningChain
from eth.db import get_db_backend
from eth.vm.forks.byzantium import ByzantiumVM
from eth.vm.forks.byzantium.state import ByzantiumState
from vdb.debug_computation import DebugComputation
from eth_tester.backends.pyevm.main import (
get_default_genesis_params,
generate_genesis_state_for_keys,
get_default_account_keys,
PyEVMBackend,
)
class DebugState(ByzantiumState):
computation_class = DebugComputation
class DebugVM(ByzantiumVM):
_state_class = DebugState # type: Type[BaseState]
def _setup_tester_chain(genesis_params, genesis_state, num_accounts):
class DebugNoProofVM(DebugVM):
"""Byzantium VM rules, without validating any miner proof of work"""
@classmethod
def validate_seal(self, header):
pass
class MainnetTesterNoProofChain(MiningChain):
vm_configuration = ((0, DebugNoProofVM),)
genesis_params = get_default_genesis_params()
account_keys = get_default_account_keys(quantity=num_accounts)
genesis_state = generate_genesis_state_for_keys(account_keys)
base_db = get_db_backend()
chain = MainnetTesterNoProofChain.from_genesis(
base_db, genesis_params, genesis_state
)
return account_keys, chain
class PyEVMDebugBackend(PyEVMBackend):
def __init__(self,):
super().__init__()
def reset_to_genesis(
self, genesis_params=None, genesis_state=None, num_accounts=None
):
self.account_keys, self.chain = _setup_tester_chain(
genesis_params, genesis_state, num_accounts
)
def set_debug_info(source_code, source_map, stdin=None, stdout=None):
setattr(DebugComputation, "source_code", source_code)
setattr(DebugComputation, "source_map", source_map)
setattr(DebugComputation, "stdin", stdin)
setattr(DebugComputation, "stdout", stdout)
| 29.390625 | 76 | 0.748006 |
acef7c944cb24b7495403fd9cba51a452d8e9bdd | 4,324 | py | Python | 2017-11-04-pycon/2-example-django-api/app/settings/base.py | pavlov99/presentations | c2b4402f8c12c0f08a338fdb9ecb45deb444afaf | [
"MIT"
] | 12 | 2017-10-19T05:43:21.000Z | 2021-03-24T17:04:02.000Z | 2017-11-04-pycon/2-example-django-api/app/settings/base.py | pavlov99/presentations | c2b4402f8c12c0f08a338fdb9ecb45deb444afaf | [
"MIT"
] | null | null | null | 2017-11-04-pycon/2-example-django-api/app/settings/base.py | pavlov99/presentations | c2b4402f8c12c0f08a338fdb9ecb45deb444afaf | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import logging
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*odz92bouip^e8kupu6x1hbn9ga64!0$71dm^mze6rb++_(+th'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
OAUTH2_PROVIDER = {
'ACCESS_TOKEN_EXPIRE_SECONDS': 3600
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Logging
# =======
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'DEBUG',
'handlers': []
},
'formatters': {
'simple': {
'format': '%(asctime)s [%(levelname)s] \t%(message)s'
},
'verbose': {
'format': '%(asctime)s %(levelname)s [%(name)s] %(message)s'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO',
},
'app.api': {
'handlers': ['console'],
'level': 'INFO'
},
'jsonrpc': {
'handlers': ['console'],
'level': 'INFO'
},
}
}
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%d.%m %H:%M:%S',
)
logging.info("Base settings loaded.")
| 24.292135 | 91 | 0.621415 |
acef7d227fb043f720b2f6eac3f9b9b5374356c6 | 7,809 | py | Python | examples/pwr_run/checkpointing/throughput/feedback_inverse/job34.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/throughput/feedback_inverse/job34.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/throughput/feedback_inverse/job34.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.0006
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_feedback/' + job_name + '*'
total_epochs = 50
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_feedback/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 32.949367 | 118 | 0.691254 |
acef7dd7451bc927acc7025b4b705a460d09b4fe | 556 | py | Python | env/Lib/site-packages/plotly/validators/sunburst/marker/colorbar/_showticksuffix.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/sunburst/marker/colorbar/_showticksuffix.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/sunburst/marker/colorbar/_showticksuffix.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showticksuffix",
parent_name="sunburst.marker.colorbar",
**kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
| 30.888889 | 80 | 0.627698 |
acef7f08ac2e767540a1a6300f137ea5a8bf313f | 1,556 | py | Python | third_party/catapult/dashboard/dashboard/pinpoint/handlers/migrate.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | dashboard/dashboard/pinpoint/handlers/migrate.py | dajaffe/catapult | d89bc5ae795c6a8f3cb7489653c9b8f8803111a8 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | dashboard/dashboard/pinpoint/handlers/migrate.py | dajaffe/catapult | d89bc5ae795c6a8f3cb7489653c9b8f8803111a8 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import webapp2
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
from dashboard.common import stored_object
from dashboard.pinpoint.models import job
_BATCH_SIZE = 10
_STATUS_KEY = 'job_migration_status'
class Migrate(webapp2.RequestHandler):
def get(self):
self.response.write(json.dumps(stored_object.Get(_STATUS_KEY) or {}))
def post(self):
query = job.Job.query(job.Job.task == None)
status = stored_object.Get(_STATUS_KEY)
if not status:
self._Start(query)
self.get()
return
self._Migrate(query, status)
self.get()
def _Start(self, query):
status = {
'count': 0,
'started': datetime.datetime.now().isoformat(),
'total': query.count(),
}
stored_object.Set(_STATUS_KEY, status)
taskqueue.add(url='/api/migrate')
def _Migrate(self, query, status):
cursor = datastore_query.Cursor(urlsafe=self.request.get('cursor'))
jobs, next_cursor, more = query.fetch_page(_BATCH_SIZE, start_cursor=cursor)
ndb.put_multi(jobs)
if more:
status['count'] += len(jobs)
stored_object.Set(_STATUS_KEY, status)
params = {'cursor': next_cursor.urlsafe()}
taskqueue.add(url='/api/migrate', params=params)
else:
stored_object.Set(_STATUS_KEY, None)
| 26.372881 | 80 | 0.703085 |
acef7f1d7ff561d02983ead79891bf46400d7c71 | 1,150 | py | Python | argostranslate/settings.py | thomas536/argos-translate | b76810815b75ccdb1cdc25830b3333d3ff41468f | [
"MIT"
] | 1 | 2021-01-12T12:51:43.000Z | 2021-01-12T12:51:43.000Z | argostranslate/settings.py | thomas536/argos-translate | b76810815b75ccdb1cdc25830b3333d3ff41468f | [
"MIT"
] | null | null | null | argostranslate/settings.py | thomas536/argos-translate | b76810815b75ccdb1cdc25830b3333d3ff41468f | [
"MIT"
] | null | null | null | from pathlib import Path
import os
data_dir = Path.home() / '.argos-translate'
if 'SNAP' in os.environ:
data_dir = Path(os.environ['SNAP_USER_DATA']) / '.argos-translate'
package_data_dir = data_dir / 'packages'
# Will search all of these directories for packages
package_dirs = [package_data_dir]
if 'SNAP' in os.environ:
# Packages bundled with snap
snap_package_dir = Path(os.environ['SNAP']) / 'snap_custom' / 'packages'
if os.path.isdir(snap_package_dir):
package_dirs.append(snap_package_dir)
# Packages loaded from a content snap
content_snap_packages = Path(os.environ['SNAP']) / 'snap_custom' / 'content_snap_packages'
if os.path.isdir(content_snap_packages):
for package_dir in content_snap_packages.iterdir():
if package_dir.is_dir():
package_dirs.append(package_dir)
if 'ARGOS_TRANSLATE_PACKAGE_DIR' in os.environ:
package_dirs.append(Path(os.environ[
'ARGOS_TRANSLATE_PACKAGE_DIR']))
about_text = """
Argos Translate is an open source neural machine
translation application created by Argos Open
Technologies, LLC (www.argosopentech.com).
"""
| 34.848485 | 94 | 0.722609 |
acef7f9e0a5f929cdcbddf0dab2e9c84d08a31ec | 962 | py | Python | src/helpers/webdriver_factory.py | oluiscabral/10fastfingers-faketyper | 1e75e3ecb6d8337add5af5281ad34bdaeb9037cb | [
"MIT"
] | null | null | null | src/helpers/webdriver_factory.py | oluiscabral/10fastfingers-faketyper | 1e75e3ecb6d8337add5af5281ad34bdaeb9037cb | [
"MIT"
] | null | null | null | src/helpers/webdriver_factory.py | oluiscabral/10fastfingers-faketyper | 1e75e3ecb6d8337add5af5281ad34bdaeb9037cb | [
"MIT"
] | null | null | null | '''
@author: oluiscabral
'''
from selenium.webdriver.remote.webdriver import WebDriver
from selenium import webdriver
from helpers.webdriver_common import WebdriverCommon
class WebdriverFactory:
BROWSERS = {
(webdriver.Chrome, webdriver.ChromeOptions()),
(webdriver.Firefox, webdriver.FirefoxOptions())
}
@staticmethod
def create(headless:bool=True)->WebDriver:
for browser in WebdriverFactory.BROWSERS:
try:
return WebdriverFactory._get_webdriver_to_os(browser[0], browser[1], headless)
except Exception:
pass
raise Exception("Could not find any compatible browser.")
@staticmethod
def _get_webdriver_to_os(web_driver:WebDriver, options, headless:bool) -> WebDriver:
if headless:
options.set_headless()
ret = web_driver(executable_path=WebdriverCommon.get_path(web_driver), options=options)
return ret
| 33.172414 | 95 | 0.685031 |
acef7fc2442df2a59bb4458017dcf3653e4daa8c | 7,684 | py | Python | supervisor/api/supervisor.py | janiversen/supervisor | 890313701c37eb4a14b870b361729491c1ed20aa | [
"Apache-2.0"
] | 597 | 2017-04-27T15:10:08.000Z | 2019-12-18T16:02:57.000Z | supervisor/api/supervisor.py | janiversen/supervisor | 890313701c37eb4a14b870b361729491c1ed20aa | [
"Apache-2.0"
] | 799 | 2017-05-02T00:26:07.000Z | 2019-12-18T21:40:18.000Z | supervisor/api/supervisor.py | janiversen/supervisor | 890313701c37eb4a14b870b361729491c1ed20aa | [
"Apache-2.0"
] | 173 | 2017-04-26T17:03:42.000Z | 2019-12-15T10:41:57.000Z | """Init file for Supervisor Supervisor RESTful API."""
import asyncio
import logging
from typing import Any, Awaitable
from aiohttp import web
import voluptuous as vol
from ..const import (
ATTR_ADDONS,
ATTR_ADDONS_REPOSITORIES,
ATTR_ARCH,
ATTR_BLK_READ,
ATTR_BLK_WRITE,
ATTR_CHANNEL,
ATTR_CONTENT_TRUST,
ATTR_CPU_PERCENT,
ATTR_DEBUG,
ATTR_DEBUG_BLOCK,
ATTR_DESCRIPTON,
ATTR_DIAGNOSTICS,
ATTR_FORCE_SECURITY,
ATTR_HEALTHY,
ATTR_ICON,
ATTR_IP_ADDRESS,
ATTR_LOGGING,
ATTR_LOGO,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
ATTR_MEMORY_USAGE,
ATTR_NAME,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_REPOSITORY,
ATTR_SLUG,
ATTR_STATE,
ATTR_SUPPORTED,
ATTR_TIMEZONE,
ATTR_UPDATE_AVAILABLE,
ATTR_VERSION,
ATTR_VERSION_LATEST,
ATTR_WAIT_BOOT,
CONTENT_TYPE_BINARY,
LogLevel,
UpdateChannel,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..utils.validate import validate_timezone
from ..validate import repositories, version_tag, wait_boot
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_CHANNEL): vol.Coerce(UpdateChannel),
vol.Optional(ATTR_ADDONS_REPOSITORIES): repositories,
vol.Optional(ATTR_TIMEZONE): validate_timezone,
vol.Optional(ATTR_WAIT_BOOT): wait_boot,
vol.Optional(ATTR_LOGGING): vol.Coerce(LogLevel),
vol.Optional(ATTR_DEBUG): vol.Boolean(),
vol.Optional(ATTR_DEBUG_BLOCK): vol.Boolean(),
vol.Optional(ATTR_DIAGNOSTICS): vol.Boolean(),
vol.Optional(ATTR_CONTENT_TRUST): vol.Boolean(),
vol.Optional(ATTR_FORCE_SECURITY): vol.Boolean(),
}
)
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
class APISupervisor(CoreSysAttributes):
"""Handle RESTful API for Supervisor functions."""
@api_process
async def ping(self, request):
"""Return ok for signal that the API is ready."""
return True
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
"""Return host information."""
list_addons = []
for addon in self.sys_addons.installed:
list_addons.append(
{
ATTR_NAME: addon.name,
ATTR_SLUG: addon.slug,
ATTR_DESCRIPTON: addon.description,
ATTR_STATE: addon.state,
ATTR_VERSION: addon.version,
ATTR_VERSION_LATEST: addon.latest_version,
ATTR_UPDATE_AVAILABLE: addon.need_update,
ATTR_REPOSITORY: addon.repository,
ATTR_ICON: addon.with_icon,
ATTR_LOGO: addon.with_logo,
}
)
return {
ATTR_VERSION: self.sys_supervisor.version,
ATTR_VERSION_LATEST: self.sys_supervisor.latest_version,
ATTR_UPDATE_AVAILABLE: self.sys_supervisor.need_update,
ATTR_CHANNEL: self.sys_updater.channel,
ATTR_ARCH: self.sys_supervisor.arch,
ATTR_SUPPORTED: self.sys_core.supported,
ATTR_HEALTHY: self.sys_core.healthy,
ATTR_IP_ADDRESS: str(self.sys_supervisor.ip_address),
ATTR_WAIT_BOOT: self.sys_config.wait_boot,
ATTR_TIMEZONE: self.sys_config.timezone,
ATTR_LOGGING: self.sys_config.logging,
ATTR_DEBUG: self.sys_config.debug,
ATTR_DEBUG_BLOCK: self.sys_config.debug_block,
ATTR_DIAGNOSTICS: self.sys_config.diagnostics,
ATTR_ADDONS: list_addons,
ATTR_ADDONS_REPOSITORIES: self.sys_config.addons_repositories,
}
@api_process
async def options(self, request: web.Request) -> None:
"""Set Supervisor options."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_CHANNEL in body:
self.sys_updater.channel = body[ATTR_CHANNEL]
if ATTR_TIMEZONE in body:
self.sys_config.timezone = body[ATTR_TIMEZONE]
if ATTR_WAIT_BOOT in body:
self.sys_config.wait_boot = body[ATTR_WAIT_BOOT]
if ATTR_DEBUG in body:
self.sys_config.debug = body[ATTR_DEBUG]
if ATTR_DEBUG_BLOCK in body:
self.sys_config.debug_block = body[ATTR_DEBUG_BLOCK]
if ATTR_DIAGNOSTICS in body:
self.sys_config.diagnostics = body[ATTR_DIAGNOSTICS]
self.sys_dbus.agent.diagnostics = body[ATTR_DIAGNOSTICS]
if ATTR_LOGGING in body:
self.sys_config.logging = body[ATTR_LOGGING]
# REMOVE: 2021.7
if ATTR_CONTENT_TRUST in body:
self.sys_security.content_trust = body[ATTR_CONTENT_TRUST]
# REMOVE: 2021.7
if ATTR_FORCE_SECURITY in body:
self.sys_security.force = body[ATTR_FORCE_SECURITY]
# Save changes before processing addons in case of errors
self.sys_updater.save_data()
self.sys_config.save_data()
if ATTR_ADDONS_REPOSITORIES in body:
await asyncio.shield(
self.sys_store.update_repositories(set(body[ATTR_ADDONS_REPOSITORIES]))
)
await self.sys_resolution.evaluate.evaluate_system()
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
"""Return resource information."""
stats = await self.sys_supervisor.stats()
return {
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,
ATTR_BLK_WRITE: stats.blk_write,
}
@api_process
async def update(self, request: web.Request) -> None:
"""Update Supervisor OS."""
body = await api_validate(SCHEMA_VERSION, request)
# This option is useless outside of DEV
if not self.sys_dev and not self.sys_supervisor.need_update:
raise APIError(
f"No supervisor update available - {self.sys_supervisor.version!s}"
)
if self.sys_dev:
version = body.get(ATTR_VERSION, self.sys_updater.version_supervisor)
else:
version = self.sys_updater.version_supervisor
await asyncio.shield(self.sys_supervisor.update(version))
@api_process
def reload(self, request: web.Request) -> Awaitable[None]:
"""Reload add-ons, configuration, etc."""
return asyncio.shield(
asyncio.wait(
[
self.sys_updater.reload(),
self.sys_homeassistant.secrets.reload(),
self.sys_resolution.evaluate.evaluate_system(),
]
)
)
@api_process
def repair(self, request: web.Request) -> Awaitable[None]:
"""Try to repair the local setup / overlayfs."""
return asyncio.shield(self.sys_core.repair())
@api_process
def restart(self, request: web.Request) -> Awaitable[None]:
"""Soft restart Supervisor."""
return asyncio.shield(self.sys_supervisor.restart())
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return supervisor Docker logs."""
return self.sys_supervisor.logs()
| 33.701754 | 87 | 0.646148 |
acef806eae40bc7e13d2308b6fc50df58345a092 | 4,646 | py | Python | strategies/naive_intelligence/naive_intelligence.py | mu-zhao/Liars_dice | 413686e9dce567659b1967c51b993583a3b20c88 | [
"MIT"
] | 2 | 2021-09-22T04:14:22.000Z | 2021-09-22T04:40:26.000Z | strategies/naive_intelligence/naive_intelligence.py | mu-zhao/Liars_dice | 413686e9dce567659b1967c51b993583a3b20c88 | [
"MIT"
] | null | null | null | strategies/naive_intelligence/naive_intelligence.py | mu-zhao/Liars_dice | 413686e9dce567659b1967c51b993583a3b20c88 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from strategies.simulation import initial_bid_candidates,Simulation,get_bid_candidate,reward,squared_power,linear_power
from IPython.display import display, HTML
def good_choice(res,pay,rollout):
if len(set(res))==3:
r=rollout+rollout[0]
d={}
for i in res:
if len(i)>1:
d[i]=i[0]*(1+(i[1]==0))-r[i[1]]
return list(min(d,key=d.get))
if res[0]==res[1] or res[0]==res[2]:
return list(res[0])
else:
return list(res[1])
def two_player_game(last_bid,rollout):
pass
def time_limit(t_limit,dice):
num=1
for i in dice[1:]:
num*=np.math.factorial(i)
class NaiveIntelligence:
def __init__(self,aggresive=0.8,simulation_time_limit=2,num_limit=2000,response_principle=0,utility=squared_power,
blur=False,call_level=1/3,bayes_dist=True,simple_minded=True,advisor=False):
self.time_limit=simulation_time_limit
self.aggresiveness=aggresive
self.expected_power=[]
self.response_principle=response_principle
self.num_lim=num_limit
self.judgement=blur
self.bayes_dist=bayes_dist
self.utility=utility
self.simple_minded=simple_minded
self.advisor=advisor
self.suggestion=pd.DataFrame(columns=['response','dice lost','relative power','error'],
index=['call liar','call spot on','suggestion:reasonalbe call assumption',
'suggestion: naive call assumption',
'suggestion: simple call assumption','joint suggestion'])
def bid(self,player_id,rollout,private_dist,ck):
belief_dist=ck.get_all_common_belief(player_id)
player_in_game_dice=ck.get_player_in_game_dice(player_id)
# if len(player_in_game_dice)==2: #two player game
# return two_player_game(rollout,ck.las)
if ck.last_bid is None:
bid_candidate=initial_bid_candidates(rollout,ck.get_total_dice(),self.aggresiveness)
max_payoff=-1
response=None
else:
p_liar=private_dist[ck.last_bid[0],ck.last_bid[1]]
if ck.last_bid[0]>=ck.get_total_dice():
p_spot_on=p_liar
else:
p_spot_on=p_liar-private_dist[ck.last_bid[0]+1,ck.last_bid[1]]
if self.simple_minded:
payoff_call_liar=-p_liar
payoff_spot_on=p_spot_on-1
else:
payoff_call_liar=reward(player_in_game_dice,squared_power,True)*(1-p_liar)+reward(player_in_game_dice,squared_power)*p_liar
payoff_spot_on=reward(player_in_game_dice,squared_power,True,True)*p_spot_on+reward(player_in_game_dice,squared_power,spot_on=True)*(1-p_spot_on)
if self.advisor:
self.suggestion.loc[:2,'dice lost':'relative power']=np.array([[-p_liar,p_spot_on-1],[payoff_call_liar,payoff_spot_on]]).T
if payoff_call_liar>payoff_spot_on:
max_payoff=payoff_call_liar
response=[0]
else:
max_payoff=payoff_spot_on
response=[1]
bid_candidate=get_bid_candidate(ck.last_bid,private_dist,1/2+self.aggresiveness/2)
if len(bid_candidate)>0:
good_response=[]
good_payoff=np.zeros(3)
for i in range(3):
self.response_principle=i
simulation=Simulation(bid_candidate,rollout,player_in_game_dice,belief_dist,self.response_principle,
self.time_limit,self.judgement,self.bayes_dist,utility_f=self.utility,simple_minded=self.simple_minded)
res,payoff,dice_lost,error=simulation.simulation_result()
#print(res,payoff,dice_lost,error)
if self.advisor:
self.suggestion.iloc[i+2]=np.array([res,dice_lost,payoff,error])
#print(res,payoff)
if payoff>max_payoff:
good_response.append(tuple(res))
good_payoff[i]=payoff
else:
good_response.append(tuple(response))
good_payoff[i]=max_payoff
response=good_choice(good_response,good_payoff,rollout)
self.suggestion.iloc[-1]['response']=response
if self.advisor:
display(self.suggestion)
return response
def reset(self):
pass
| 40.754386 | 161 | 0.603315 |
acef80f213b60e6c5d98cb971357d5b03e310608 | 174 | py | Python | config.py | diceroll123/reddit-overview.widget | 5a38c6b93d836094fecfdd4f69b9ef74c14b941d | [
"WTFPL"
] | 3 | 2017-12-21T07:40:31.000Z | 2020-01-16T08:17:56.000Z | config.py | diceroll123/reddit-overview.widget | 5a38c6b93d836094fecfdd4f69b9ef74c14b941d | [
"WTFPL"
] | null | null | null | config.py | diceroll123/reddit-overview.widget | 5a38c6b93d836094fecfdd4f69b9ef74c14b941d | [
"WTFPL"
] | null | null | null | client_id = ''
client_secret = ''
# usernames and/or subreddits to keep an eye on.
usernames = ['diceroll123']
subreddits = ['android', 'science', 'technology', 'unixporn']
| 24.857143 | 61 | 0.695402 |
acef8173a48233b786616b5d35a7d2f41fcc9faf | 1,454 | py | Python | roengine/gui/progress_bar.py | ROTARTSI82/RoEngine | d739893e90b4f2e8a7f5a8b2e7b441929d4da7a3 | [
"Apache-2.0"
] | 1 | 2021-12-17T12:18:02.000Z | 2021-12-17T12:18:02.000Z | roengine/gui/progress_bar.py | ROTARTSI82/RoEngine | d739893e90b4f2e8a7f5a8b2e7b441929d4da7a3 | [
"Apache-2.0"
] | 1 | 2018-12-19T17:11:02.000Z | 2018-12-19T17:11:02.000Z | roengine/gui/progress_bar.py | ROTARTSI82/RoEngine | d739893e90b4f2e8a7f5a8b2e7b441929d4da7a3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
import pygame
__all__ = ["ProgressBar"]
class ProgressBar(pygame.sprite.Sprite):
def __init__(self, val_range, val, size, width, colors=((255, 0, 0), (0, 0, 0))):
pygame.sprite.Sprite.__init__(self)
self.range = val_range
self.val = val
self.size = size
self.width = width
self.cols = colors
self.image = pygame.Surface([self.size[0] + self.width[0] * 2, self.size[1] + self.width[1] * 2])
self.rate = size[0] / float(val_range[1] - val_range[0])
self.bar_width = (val - val_range[0]) * self.rate
self.bar = pygame.Surface([self.bar_width, size[1]])
self.image.fill(colors[1])
self.bar.fill(colors[0])
self.image.blit(self.bar, self.width)
self.rect = self.image.get_rect()
def update(self):
self.val = min(self.range[1], max(self.range[0], self.val))
self.image = pygame.Surface([self.size[0] + self.width[0] * 2, self.size[1] + self.width[1] * 2])
oldcenter = self.rect.center
self.rate = self.size[0] / float(self.range[1] - self.range[0])
self.bar_width = (self.val - self.range[0]) * self.rate
self.bar = pygame.Surface([self.bar_width, self.size[1]])
self.image.fill(self.cols[1])
self.bar.fill(self.cols[0])
self.image.blit(self.bar, self.width)
self.rect = self.image.get_rect()
self.rect.center = oldcenter
| 38.263158 | 105 | 0.594911 |
acef81a955265fb38c55eb7211d93897e7030150 | 176 | py | Python | main.py | Omnia-Beyond/Password-Generator | 7c0a7b0cf85dbb6908f6231eb9ccd69af365eb11 | [
"MIT"
] | null | null | null | main.py | Omnia-Beyond/Password-Generator | 7c0a7b0cf85dbb6908f6231eb9ccd69af365eb11 | [
"MIT"
] | null | null | null | main.py | Omnia-Beyond/Password-Generator | 7c0a7b0cf85dbb6908f6231eb9ccd69af365eb11 | [
"MIT"
] | null | null | null | #PASSWORD GENERATOR v1.0
#Developer: Matteo Sensi
#Designer: Christian Alessandri
from App import App
if __name__ == "__main__":
app = App()
app.mainloop()
| 17.6 | 33 | 0.670455 |
acef828824c63452bda227475cc932b01f7facca | 1,352 | py | Python | heron/shell/src/python/handlers/pmaphandler.py | Munyola/incubator-heron | 4aa106c6eaef9c60ed2d692e41998adda8115e6f | [
"Apache-2.0"
] | 2 | 2016-07-04T07:10:31.000Z | 2018-03-28T16:59:02.000Z | heron/shell/src/python/handlers/pmaphandler.py | Munyola/incubator-heron | 4aa106c6eaef9c60ed2d692e41998adda8115e6f | [
"Apache-2.0"
] | 1 | 2019-05-08T22:30:16.000Z | 2019-05-08T22:30:16.000Z | heron/shell/src/python/handlers/pmaphandler.py | Munyola/incubator-heron | 4aa106c6eaef9c60ed2d692e41998adda8115e6f | [
"Apache-2.0"
] | 1 | 2017-06-05T17:55:45.000Z | 2017-06-05T17:55:45.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' pmaphandler.py '''
import json
import tornado.web
from heron.shell.src.python import utils
class PmapHandler(tornado.web.RequestHandler):
"""
Responsible for reporting memory map of a process given its pid.
"""
# pylint: disable=attribute-defined-outside-init
@tornado.web.asynchronous
def get(self, pid):
''' get method '''
body = utils.str_cmd(['pmap', '-pXX', pid], None, None)
self.content_type = 'application/json'
self.write(json.dumps(body))
self.finish()
| 32.97561 | 66 | 0.725592 |
acef82c3612490f1e5de9337f171a20d33901528 | 12,157 | py | Python | QDE/training/run_training_2gates.py | oxquantum-repo/drl_for_quantum_measurement | a02a8f3a7c5b40458f440a63355932409c66921c | [
"MIT"
] | 5 | 2021-05-18T01:07:04.000Z | 2022-01-29T13:31:18.000Z | QDE/training/run_training_2gates.py | oxquantum-repo/drl_for_quantum_measurement | a02a8f3a7c5b40458f440a63355932409c66921c | [
"MIT"
] | null | null | null | QDE/training/run_training_2gates.py | oxquantum-repo/drl_for_quantum_measurement | a02a8f3a7c5b40458f440a63355932409c66921c | [
"MIT"
] | 1 | 2021-05-18T01:07:20.000Z | 2021-05-18T01:07:20.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 14:35:17 2019
@author: Vu
"""
import sys
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../utilities')
sys.path.append('../environments')
sys.path.append('../data')
from tqdm import tqdm
sys.path.append('../testing_code')
from utility_plot_arrow import plot_arrow_to_file
import numpy as np
import tensorflow as tf
import logging
from datetime import datetime
logging.basicConfig(level=logging.DEBUG,format='%(process)d-%(levelname)s-%(message)s')
import matplotlib.pyplot as plt
import random
#from prioritized_experience_replay import Memory
from environment_2d import Quantum_T4_2D
import pickle
import os
from print_trajectories_policies import print_trajectory_from_location,final_policy_on_test,get_value_state_on_test
from play_episodes import play_train_episode, play_test_episode_from_location,burn_in_experience
from drl_models import Dueling_DQN_PER_2D
IM_SIZE = 2 #80
N_CHANEL=9 # this is the representation of a block by 9 blocks
K = 6 #env.action_space.n
import warnings
warnings.filterwarnings("ignore")
#FILE_NAME="T4_scan_data_res_480_win_480"
#FILE_NAME="T4_scan_data_res_350_win_350"
'''File_Name_List = ["T4_scan_data_res_320_win_320", "T4_scan_data_res_350_win_350",
"T4_scan_data_res_400_win_400_sep", "T4_scan_data_res_480_win_480"]'''
File_Name_List = ["rotated_T4_scan_data_res_320_win_320", "rotated_T4_scan_data_res_350_win_350",
"rotated_T4_scan_data_res_400_win_400_sep", "rotated_T4_scan_data_res_480_win_480"]
n_env = len(File_Name_List)
env_list=[0]*n_env
for n in range(n_env):
env_list[n] = Quantum_T4_2D(File_Name_List[n],isRepeat=True,offset=2.0e-10)
env_list[n].id = n
plt.imshow(env_list[n].image)
plt.title(File_Name_List[n])
plt.colorbar()
plt.savefig(File_Name_List[n]+'.png',transparent = True)
plt.show()
plt.imshow(env_list[n].threshold_test)
plt.title(File_Name_List[n] +" Pre-classify")
plt.colorbar()
plt.savefig(File_Name_List[n] + '_pre_classifier.png', transparent=True)
plt.show()
plt.imshow(env_list[n].prediction)
plt.title(File_Name_List[n] +" CNN")
plt.colorbar()
plt.savefig(File_Name_List[n] + '_cnn_prediction.png', transparent=True)
plt.show()
plt.imshow(env_list[n].isquantum)
plt.title(File_Name_List[n] +" Classification")
plt.colorbar()
plt.savefig(File_Name_List[n] + '_classification.png', transparent=True)
plt.show()
#env1 = env_list[0]
#env2 = env_list[1]
#print(env_list[0])
# this is for printing purpose
initial_gate_c5_c9=[ -570.,-940]
window=350
myxrange=np.linspace(initial_gate_c5_c9[1]-window/2,initial_gate_c5_c9[1]+window/2,4).astype(int)
myyrange=np.linspace(initial_gate_c5_c9[0]-window/2,initial_gate_c5_c9[0]+window/2,4).astype(int)
myxrange=myxrange[::-1]
myyrange=myyrange[::-1]
np.random.seed(1)
random.seed(1)
tf.set_random_seed(1)
tf.reset_default_graph()
# create multiple environment
starting_pixel_loc_list=[[20,340],[320,15]]
#starting_pixel_loc_list=[[100,100],[100,200],[150,200],[50,450],[80,480],[350,50],[390,50],[320,180],[395,195],[350,15]]
n_env=len(starting_pixel_loc_list)
D = env_list[0].D
K = env_list[0].K
hidden_layer_sizes = [128,64,32]
gamma = 0.5
#batch_sz = 32
#num_episodes =10100
num_episodes = 9000
total_t = 0
experience_replay_buffer = []
episode_rewards = np.zeros(num_episodes)
myloss = np.zeros(num_episodes)
last_100_avg=np.zeros(num_episodes)
last_100_avg_step=np.zeros(num_episodes)
num_steps=np.zeros(num_episodes)
episode_rewards_Test=[]
num_steps_Test=[]
episode_rewards_Test_B=[]
episode_rewards_Test_SC=[]
episode_rewards_Test_SD=[]
num_steps_Test_B=[]
num_steps_Test_SC=[]
num_steps_Test_SD=[]
# epsilon
eps = 1.0
eps_min = 0.1
eps_change = (eps - eps_min) / (3*num_episodes)
# number of random test
batch_sz=32
count=0
model = Dueling_DQN_PER_2D(D=D,K=K,batch_sz=batch_sz,hidden_layer_sizes=hidden_layer_sizes,
gamma=gamma, lr=2.3e-6, N_CHANEL=N_CHANEL,IM_SIZE=IM_SIZE,scope="DDQN")
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
def make_session(n):
return tf.InteractiveSession(config=tf.ConfigProto(inter_op_parallelism_threads=n, intra_op_parallelism_threads=n))
#cpu_count = os.cpu_count()
#sess = make_session(cpu_count)
sess.run(init)
model.set_session(sess)
# Create models
# Set the logs writer to the folder /tmp/tensorflow_logs
summary_writer = tf.summary.FileWriter('../logs/2d', graph=sess.graph)
print("Populating experience replay buffer...")
starting_loc_test=[[50,40],[40,200],[30,200],[40,340],[50,340],[50,340],[15,320],[35,345],[20,320],[30,340], # barrier
[340,5],[340,10],[320,15],[295,20],[265,15],[340,25],[340,30],[310,25],[285,40],[275,25], # short circut
[250,250],[200,200],[180,180],[160,165],[195,195],[230,240],[220,210],[190,210],[190,185],[215,225]]
nTest=len(starting_loc_test)
optimal_policy_list=[]
optimal_policy_list_2=[]
optimal_val_list=[]
optimal_val_list_2=[]
value_state_map_list=[]
count_found_target=0
'''for i in range(20): # burn in
c=burn_in_experience( env_list, experience_replay_buffer, model,MaxStep=50)
count_found_target+=c'''
logging.debug("Found Target {:d}/20".format(count_found_target))
start = datetime.now()
# Play a number of episodes and learn!
for i in tqdm(range(num_episodes)):
total_t, episode_rewards[i], duration, num_steps[i], time_per_step, eps,myloss[i], summary_writer = play_train_episode(env_list,
total_t,i,experience_replay_buffer,model,gamma,batch_sz, eps,eps_change,eps_min,summary_writer,MaxStep=300)
last_100_avg[i] = episode_rewards[max(0, i - 100):i + 1].mean()
last_100_avg_step[i] = num_steps[max(0, i - 100):i + 1].mean()
if i%500==0:
logging.debug("Epi:", i,"Duration:", duration,"#steps:", num_steps[i],"Reward:", episode_rewards[i],\
"Train time/step:", "%.3f" % time_per_step,"Avg Reward (Last 100):", "%.3f" % last_100_avg[i], "Eps:", "%.3f" % eps )
# create another test screnario
# where we will start at other location (not the middle)
temp_reward=[0]*nTest
temp_step=[0]*nTest
location_state_list_multiple=[0]*nTest
for jj in range(nTest):
#id_env=ii%2
rand = random.random()
if rand > 0.5:
newenv = env_list[0]
else:
newenv = env_list[1]
temp_reward[jj], temp_step[jj], visit_map,location_state_list_multiple[jj],newenv, position_list_x, position_list_y = \
play_test_episode_from_location(newenv,model ,eps,MaxStep=300)
if i==100000:
print_trajectory_from_location(newenv,location_state_list_multiple[jj], idx=jj,
myxlabel="Gate A",myxrange=myxrange,myylabel="Gate B",myyrange=myyrange,strFolder="../plot/t4_small/",filetype="png")
#export pickle
strTest="../plot/t4_small/location_state_list_multiple_2d_{}.pickle".format(jj)
pickle_out = open(strTest,"wb")
pickle.dump(location_state_list_multiple, pickle_out)
pickle_out.close()
print("Optimal Policy on Test: 0:Up \t 1:Down \t 2:Left \t 3:Right \t 4:Down Right \t 5: Up Left")
optimal_policy,val_pol,optimal_policy_2,val_pol2=final_policy_on_test(newenv, model,starting_loc_test[0])
optimal_policy_list.append(optimal_policy)
optimal_val_list.append(val_pol)
optimal_policy_list_2.append(optimal_policy_2)
optimal_val_list_2.append(val_pol2)
print(optimal_policy)
#print(optimal_policy_2)
count_found_target=0
for uu in range(15): # burn in
c=burn_in_experience( env_list, experience_replay_buffer, model,MaxStep=50)
count_found_target+=c
print("Burnin Exp: Found Target {:d}/15".format(count_found_target))
value_state_map=get_value_state_on_test(model,newenv)
value_state_map_list.append(value_state_map)
episode_rewards_Test_B.append(temp_reward[0:10])
episode_rewards_Test_SC.append(temp_reward[10:20])
episode_rewards_Test_SD.append(temp_reward[20:30])
num_steps_Test_B.append(temp_step[0:10])
num_steps_Test_SC.append(temp_step[10:20])
num_steps_Test_SD.append(temp_step[20:30])
print("Barrier reward Test:",episode_rewards_Test_B[-1]," #step Test:",num_steps_Test_B[-1])
print("SC reward Test:",episode_rewards_Test_SC[-1]," #step Test:",num_steps_Test_SC[-1])
print("SD reward Test:",episode_rewards_Test_SD[-1]," #step Test:",num_steps_Test_SD[-1])
saver = tf.train.Saver()
save_path = saver.save(sess, "../logs/2d/save_models/2d_mean_std")
end = datetime.now()
time_taken = end - start
print("TIME TAKEN", time_taken)
print("TIME TAKEN (s)", time_taken.total_seconds())
fig=plt.figure()
plt.plot(np.log(myloss))
plt.title('Training Loss')
plt.xlabel('Episode')
plt.ylabel('Log of Loss')
plt.show()
fig.savefig("fig/b2/TrainingLoss64.pdf",box_inches="tight")
logloss=np.log(myloss)
ave_logloss=[np.mean(logloss[max(0,i-100):i+1]) for i in range(len(logloss))]
fig=plt.figure()
plt.plot(ave_logloss)
plt.title('Average Training Loss')
plt.xlabel('Episode')
plt.ylabel('Log of Loss')
plt.show()
fig.savefig("fig/b2/TrainingAverageLoss64.pdf",box_inches="tight")
fig=plt.figure()
plt.plot(episode_rewards)
plt.title('Training Reward')
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.show()
fig.savefig("fig/b2/TrainingReward64.pdf",box_inches="tight")
fig=plt.figure()
plt.plot(last_100_avg)
plt.title('Training Average Reward')
plt.xlabel('Episode')
plt.ylabel('Average Reward')
plt.show()
fig.savefig("fig/b2/TrainingReward_Ave64.pdf",box_inches="tight")
fig=plt.figure()
plt.plot(last_100_avg[2000:])
plt.title('Training Average Reward from 2000...')
plt.xlabel('Episode')
plt.ylabel('Average Reward')
plt.show()
fig.savefig("fig/b2/TrainingReward_Ave2000_64.pdf",box_inches="tight")
fig=plt.figure()
plt.plot(num_steps)
plt.title('Number of Training Steps')
plt.xlabel('Episode')
plt.ylabel('Step')
plt.show()
fig.savefig("fig/b2/TrainingStep.pdf",box_inches="tight")
fig=plt.figure()
plt.plot(last_100_avg_step)
plt.title('Average of Training Step')
plt.xlabel('Episode')
plt.ylabel('Average Steps')
plt.show()
fig.savefig("fig/b2/TrainingAveStep64.pdf",box_inches="tight")
fig=plt.figure()
plt.plot(episode_rewards_Test_B)
plt.title('Average Reward Test Barrier')
plt.xlabel('Episode')
plt.ylabel('Average Reward')
plt.show()
fig.savefig("fig/b2/TestAveReward64_B.pdf",box_inches="tight")
fig=plt.figure()
plt.plot(num_steps_Test_B)
plt.title('Number of Test Steps Barrier')
plt.xlabel('Episode')
plt.ylabel('Average Step')
plt.show()
fig.savefig("fig/b2/TestAveStep64_B.pdf",box_inches="tight")
output=[myloss,episode_rewards,last_100_avg,num_steps,last_100_avg_step
,episode_rewards_Test_B,num_steps_Test_B,episode_rewards_Test_SC,num_steps_Test_SC,
episode_rewards_Test_SD,num_steps_Test_SD, optimal_policy_list, optimal_val_list,
optimal_policy_list_2,optimal_val_list_2,value_state_map_list]
pickle.dump( output, open( "results/result_2d_T4_small.p", "wb" ) )
initial_gate_c5_c9=[ -570., -940]
window=350
myxrange=np.linspace(initial_gate_c5_c9[1]-window/2,initial_gate_c5_c9[1]+window/2,4).astype(int)
myyrange=np.linspace(initial_gate_c5_c9[0]-window/2,initial_gate_c5_c9[0]+window/2,4).astype(int)
myxrange=myxrange[::-1]
myyrange=myyrange[::-1]
'''plot_arrow_to_file(newenv,optimal_policy_list, optimal_val_list,
optimal_policy_list_2,optimal_val_list_2,"action_plot",myxlabel="Gate A",
myxrange=myxrange,myyrange=myyrange,myylabel="Gate B")
'''
'''for ii,value in enumerate(value_state_map_list):
fig=plt.figure()
plt.imshow(value)
plt.colorbar()
plt.show()''' | 31.908136 | 136 | 0.708974 |
acef82eeec258c26f11560759c443a8706015627 | 6,032 | py | Python | i3/bar.py | kyrias/dotfiles | 564effbbc8e14ee4c2d1bc1e449c0658e7c5a6ad | [
"ISC"
] | 7 | 2018-03-20T16:00:41.000Z | 2022-02-04T03:14:18.000Z | i3/bar.py | kyrias/dotfiles | 564effbbc8e14ee4c2d1bc1e449c0658e7c5a6ad | [
"ISC"
] | null | null | null | i3/bar.py | kyrias/dotfiles | 564effbbc8e14ee4c2d1bc1e449c0658e7c5a6ad | [
"ISC"
] | null | null | null | # -*- coding: utf-8 -*-
###
# Dependencies:
#
# i3pystatus
# netifaces
# colour
import socket
from i3pystatus import Status
hostname = socket.gethostname()
status = Status(standalone=True)
status.register("clock",
color="#CDC0B0",
format="<span font_features=\"zero, ss01, tnum\">%Y-%m-%d %H:%M:%S%z</span>",
hints={"markup": "pango"})
if hostname == "hydrogen.kyriasis.com":
status.register("battery",
color="#CDC0B0",
full_color="#7CFC00",
charging_color="#7CFC00",
critical_color="#EE4000",
format="⚡0 {percentage:.2f}% {remaining:%E%hh:%Mm}{status}",
alert=True,
alert_percentage=5,
status={
"DIS": "↓",
"CHR": "↑",
"FULL": "=",
},
battery_ident="BAT0",)
status.register("battery",
color="#CDC0B0",
full_color="#7CFC00",
charging_color="#7CFC00",
critical_color="#EE4000",
format="⚡1 {percentage:.2f}% {remaining:%E%hh:%Mm}{status}",
alert=True,
alert_percentage=5,
status={
"DIS": "↓",
"CHR": "↑",
"FULL": "=",
},
battery_ident="BAT1",)
elif hostname.startswith("lithium"):
status.register("battery",
color="#CDC0B0",
full_color="#7CFC00",
charging_color="#7CFC00",
critical_color="#EE4000",
format="⚡0 {percentage:.2f}% {remaining:%E%hh:%Mm}{status}",
alert=True,
alert_percentage=5,
status={
"DIS": "↓",
"CHR": "↑",
"FULL": "=",
},
battery_ident="BAT0",)
status.register("battery",
color="#CDC0B0",
full_color="#7CFC00",
charging_color="#7CFC00",
critical_color="#EE4000",
format="⚡1 {percentage:.2f}% {remaining:%E%hh:%Mm}{status}",
alert=True,
alert_percentage=5,
status={
"DIS": "↓",
"CHR": "↑",
"FULL": "=",
},
battery_ident="BAT1",)
else:
status.register("battery",
color="#CDC0B0",
full_color="#7CFC00",
charging_color="#7CFC00",
critical_color="#EE4000",
format="⚡ {percentage:.2f}% {remaining:%E%hh:%Mm}{status}",
alert=True,
alert_percentage=5,
status={
"DIS": "↓",
"CHR": "↑",
"FULL": "=",
},
battery_ident="BAT0",)
status.register("temp",
color="#CDC0B0",
format="{Package_id_0}°C {Core_0_bar}{Core_1_bar}",
hints={"markup": "pango"},
lm_sensors_enabled=True)
status.register("pulseaudio",
color_unmuted="#CDC0B0",
color_muted="#EE4000",
format="♪ {volume}%",)
status.register("backlight",
color="#CDC0B0",
backlight="intel_backlight",
format="🔆 {percentage}% ({brightness}/{max_brightness})")
if hostname == "zorg.kyriasis.com":
status.register("network",
color_up="#7CFC00",
color_down="#EE4000",
interface="wlp4s0",
format_up="{essid:.10s}: {v4cidr} {quality:3.0f}%",)
status.register("network",
color_up="#7CFC00",
color_down="#EE4000",
interface="enp0s25",
format_up="{interface}: {v4cidr}")
elif hostname == "tirxu.kyriasis.com":
status.register("network",
color_up="#7CFC00",
color_down="#EE4000",
interface="wlp4s0",
format_up="{essid:.10s}: {v4cidr} {quality:3.0f}%",)
status.register("network",
color_up="#7CFC00",
color_down="#EE4000",
interface="enp0s20u3u1u3",
format_up="{interface}: {v4cidr}")
elif hostname == "hydrogen.kyriasis.com":
status.register("network",
color_up="#7CFC00",
color_down="#EE4000",
interface="wlp4s0",
format_up="{essid:.10s}: {v4cidr} {quality:3.0f}%",)
status.register("network",
color_up="#7CFC00",
color_down="#EE4000",
interface="enp0s31f6",
format_up="{interface}: {v4cidr}")
elif hostname.startswith('lithium'):
status.register("network",
color_up="#7CFC00",
color_down="#EE4000",
interface="wlp3s0",
format_up="{essid:.10s}: {v4cidr} {quality:3.0f}%",)
status.register("network",
color_up="#7CFC00",
color_down="#EE4000",
interface="enp0s31f6",
format_up="{interface}: {v4cidr}")
status.register("disk",
color="#CDC0B0",
path="/boot",
divisor=1024**2,
format="/boot {avail}M",)
status.register("disk",
color="#CDC0B0",
path="/",
format="/ {avail}G",)
status.run()
| 32.085106 | 93 | 0.413959 |
acef8351a9aeee97e55f96dcc7b360660d49aa7c | 4,880 | py | Python | tests/ml_utils/test_summary_performance_metrics_classification.py | jameshtwose/jmspack | b226519c1b8a0007f3d59eb8117234e63194d745 | [
"BSD-3-Clause"
] | null | null | null | tests/ml_utils/test_summary_performance_metrics_classification.py | jameshtwose/jmspack | b226519c1b8a0007f3d59eb8117234e63194d745 | [
"BSD-3-Clause"
] | 4 | 2021-03-21T14:46:19.000Z | 2021-12-21T09:33:56.000Z | tests/ml_utils/test_summary_performance_metrics_classification.py | jameshtwose/jmspack | b226519c1b8a0007f3d59eb8117234e63194d745 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from jmspack.ml_utils import summary_performance_metrics_classification
@pytest.fixture
def df_iris():
"""Example data set: iris."""
df = sns.load_dataset("iris")
df = df[df["species"].isin(["virginica", "versicolor"])].assign(
species=lambda d: d["species"].astype("category").cat.codes
)
return df
class TestPerformanceMetricClassification:
"""Test for the function summary_performance_metrics_classification"""
def test_returns_expected_values(self, df_iris):
X = df_iris.drop("species", axis=1)
y = df_iris["species"]
clf = LogisticRegression().fit(X, y)
summary_df = summary_performance_metrics_classification(
model=clf, X_test=X, y_true=y, bootstraps=100, fold_size=1000
)
assert summary_df["TN"].iloc[0] == pytest.approx(47)
assert summary_df["FP"].iloc[0] == pytest.approx(3)
assert summary_df["FN"].iloc[0] == pytest.approx(1)
assert summary_df["TP"].iloc[0] == pytest.approx(49)
assert summary_df["Accuracy"].iloc[0] == pytest.approx(0.96)
assert summary_df["Balanced Accuracy"].iloc[0] == pytest.approx(0.96)
assert summary_df["Prevalence"].iloc[0] == pytest.approx(0.5)
assert summary_df["Sensitivity"].iloc[0] == pytest.approx(0.98)
assert summary_df["Specificity"].iloc[0] == pytest.approx(0.94)
assert summary_df["PPV"].iloc[0] == pytest.approx(0.942)
assert summary_df["NPV"].iloc[0] == pytest.approx(0.979)
assert summary_df["auc"].iloc[0] == pytest.approx(0.995)
assert (
summary_df["Mean AUC (CI 5%-95%)"].iloc[0] == "0.997 (95% CI 0.997-0.997)"
)
assert summary_df["F1"].iloc[0] == pytest.approx(0.961)
def test_returns_expected_values_SVC_probability_False(self, df_iris):
X = df_iris.drop("species", axis=1)
y = df_iris["species"]
clf = SVC(probability=False).fit(X, y)
summary_df = summary_performance_metrics_classification(
model=clf, X_test=X, y_true=y, bootstraps=100, fold_size=1000
)
assert summary_df["TN"].iloc[0] == pytest.approx(48)
assert summary_df["FP"].iloc[0] == pytest.approx(2)
assert summary_df["FN"].iloc[0] == pytest.approx(2)
assert summary_df["TP"].iloc[0] == pytest.approx(48)
assert summary_df["Accuracy"].iloc[0] == pytest.approx(0.96)
assert summary_df["Balanced Accuracy"].iloc[0] == pytest.approx(0.96)
assert summary_df["Prevalence"].iloc[0] == pytest.approx(0.5)
assert summary_df["Sensitivity"].iloc[0] == pytest.approx(0.96)
assert summary_df["Specificity"].iloc[0] == pytest.approx(0.96)
assert summary_df["PPV"].iloc[0] == pytest.approx(0.96)
assert summary_df["NPV"].iloc[0] == pytest.approx(0.96)
assert summary_df["auc"].iloc[0] == pytest.approx(0.96)
assert (
summary_df["Mean AUC (CI 5%-95%)"].iloc[0] == "0.970 (95% CI 0.970-0.970)"
)
assert summary_df["F1"].iloc[0] == pytest.approx(0.96)
def test_returns_expected_values_SVC_probability_True(self, df_iris):
X = df_iris.drop("species", axis=1)
y = df_iris["species"]
clf = SVC(probability=True).fit(X, y)
summary_df = summary_performance_metrics_classification(
model=clf, X_test=X, y_true=y, bootstraps=100, fold_size=1000
)
assert summary_df["TN"].iloc[0] == pytest.approx(48)
assert summary_df["FP"].iloc[0] == pytest.approx(2)
assert summary_df["FN"].iloc[0] == pytest.approx(2)
assert summary_df["TP"].iloc[0] == pytest.approx(48)
assert summary_df["Accuracy"].iloc[0] == pytest.approx(0.96)
assert summary_df["Balanced Accuracy"].iloc[0] == pytest.approx(0.96)
assert summary_df["Prevalence"].iloc[0] == pytest.approx(0.5)
assert summary_df["Sensitivity"].iloc[0] == pytest.approx(0.96)
assert summary_df["Specificity"].iloc[0] == pytest.approx(0.96)
assert summary_df["PPV"].iloc[0] == pytest.approx(0.96)
assert summary_df["NPV"].iloc[0] == pytest.approx(0.96)
assert summary_df["auc"].iloc[0] == pytest.approx(0.995)
assert (
summary_df["Mean AUC (CI 5%-95%)"].iloc[0] == "0.997 (95% CI 0.997-0.997)"
)
assert summary_df["F1"].iloc[0] == pytest.approx(0.96)
def test_warning_predict_proba(self, df_iris):
X = df_iris.drop("species", axis=1)
y = df_iris["species"]
clf = SVC(probability=False).fit(X, y)
with pytest.warns(UserWarning):
_ = summary_performance_metrics_classification(
model=clf, X_test=X, y_true=y, bootstraps=100, fold_size=1000
)
| 43.185841 | 86 | 0.629303 |
acef8369e110a1b16b7f87336621ceab5c74641c | 582 | py | Python | mk006-is_identity_matrix.py | karakose77/udacity-cs101-intro-to-computer-science-exercises-and-projects | 5d41d5274f01887f20c6fe82b9214305f4e81e36 | [
"MIT"
] | null | null | null | mk006-is_identity_matrix.py | karakose77/udacity-cs101-intro-to-computer-science-exercises-and-projects | 5d41d5274f01887f20c6fe82b9214305f4e81e36 | [
"MIT"
] | null | null | null | mk006-is_identity_matrix.py | karakose77/udacity-cs101-intro-to-computer-science-exercises-and-projects | 5d41d5274f01887f20c6fe82b9214305f4e81e36 | [
"MIT"
] | null | null | null | # Given a list of lists representing a n * n matrix as input,
# define a procedure that returns True if the input is an identity matrix
# and False otherwise.
def is_identity_matrix(L):
"""
Returns True if the input matrix is an identity matrix, False otherwise.
"""
result = len(L) == len(L[0])
for i in range(len(L)):
for j in range(len(L)):
if i == j:
result *= (L[i][j] == 1)
else:
result *= (L[i][j] == 0)
return result
print(is_identity_matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])) | 29.1 | 76 | 0.54811 |
acef84f82c7d4e80ef6b98c09384791044156b25 | 855 | py | Python | DVC/data/UVG/convert.py | YOULLNEVERWA/PyTorchVideoCompression | 48b57298c86557d151627dc3ef8a2db8ab613654 | [
"MIT"
] | null | null | null | DVC/data/UVG/convert.py | YOULLNEVERWA/PyTorchVideoCompression | 48b57298c86557d151627dc3ef8a2db8ab613654 | [
"MIT"
] | null | null | null | DVC/data/UVG/convert.py | YOULLNEVERWA/PyTorchVideoCompression | 48b57298c86557d151627dc3ef8a2db8ab613654 | [
"MIT"
] | null | null | null | import os
num = 7
video_name = ['Beauty_1920x1024_120fps_420_8bit_YUV.yuv', 'HoneyBee_1920x1024_120fps_420_8bit_YUV.yuv', 'ReadySteadyGo_1920x1024_120fps_420_8bit_YUV.yuv', 'YachtRide_1920x1024_120fps_420_8bit_YUV.yuv', 'Bosphorus_1920x1024_120fps_420_8bit_YUV.yuv', 'Jockey_1920x1024_120fps_420_8bit_YUV.yuv', 'ShakeNDry_1920x1024_120fps_420_8bit_YUV.yuv']
short = ['Beauty', 'HoneyBee', 'ReadySteadyGo', 'YachtRide', 'Bosphorus', 'Jockey', 'ShakeNDry']
for i in range(num):
saveroot = 'images/' + short[i]
savepath = 'images/' + short[i] + '/im%03d.png'
if not os.path.exists(saveroot):
os.makedirs(saveroot)
print('ffmpeg -y -pix_fmt yuv420p -s 1920x1024 -i ' + 'videos_crop/' + video_name[i] + ' ' + savepath)
os.system('ffmpeg -y -pix_fmt yuv420p -s 1920x1024 -i ' + 'videos_crop/' + video_name[i] + ' ' + savepath)
| 61.071429 | 341 | 0.725146 |
acef8550f2c00ee9b012a0873ff79f557c958c65 | 2,881 | py | Python | data_structures_and_algorithms_commented/quick_sort.py | ErlendKH/data_structures_and_algorithms | 909a28c65e28c07bf170b7e3785bbf02f4ad182f | [
"CNRI-Python"
] | null | null | null | data_structures_and_algorithms_commented/quick_sort.py | ErlendKH/data_structures_and_algorithms | 909a28c65e28c07bf170b7e3785bbf02f4ad182f | [
"CNRI-Python"
] | null | null | null | data_structures_and_algorithms_commented/quick_sort.py | ErlendKH/data_structures_and_algorithms | 909a28c65e28c07bf170b7e3785bbf02f4ad182f | [
"CNRI-Python"
] | null | null | null |
#
def swap(my_list, index1, index2):
# storing value of index1 in temp
temp = my_list[index1]
# setting value of index1 to be the value of index2
my_list[index1] = my_list[index2]
# setting value of index2 to be the value of index1 (temp)
my_list[index2] = temp
# pivot index = 0
# end_index = length - 1
# last index of the list's length, so length - 1.
def pivot(my_list, pivot_index, end_index):
swap_index = pivot_index
# Hm. From 1 up to but not included end_index+1.
for i in range(pivot_index+1, end_index+1):
if my_list[i] < my_list[pivot_index]:
# set swap_index forward by 1 index.
swap_index += 1
# swap items at i and swap index.
swap(my_list, swap_index, i)
# Finally, swap the values of swap_index with the final
# pivot_index.
swap(my_list, pivot_index, swap_index)
return swap_index
###
# So in the example:
# left = 0
# right = 6 (length of list - 1)
def quick_sort_helper(my_list, left, right):
# Debug:
# print('quick_sort | left: ', left)
# print('quick_sort | right: ', right)
if left < right:
# Getting the index of the pivot
pivot_index = pivot(my_list, left, right)
# Note: Not including the pivot_index itself, because
# it's already in the correct position.
# pivot_index-1
quick_sort_helper(my_list, left, pivot_index-1)
# pivot_index+1
quick_sort_helper(my_list, pivot_index+1, right)
return my_list
# For not needing to pass beginning and end index
def quick_sort(my_list):
return quick_sort_helper(my_list, 0, len(my_list)-1)
print(quick_sort([4,6,1,7,3,2,5])) # [1, 2, 3, 4, 5, 6, 7]
# So, based on the printed debug messages of pivot and quick_sort:
# Initial quick_sort_helper([list], 0, 6):
# quick_sort | left: 0
# quick_sort | right: 6
# pivot | pivot_index: 0
# pivot | end_index: 6
# pivot | swap_index: 3
# So it quick sorts the left side first.
# index 0 to 2:
# quick_sort | left: 0
# quick_sort | right: 2
# pivot | pivot_index: 0
# pivot | end_index: 2
# pivot | swap_index: 1
# Here, left is not less than right, so left side is done.
# quick_sort | left: 0
# quick_sort | right: 0
# quick_sort | left: 2
# quick_sort | right: 2
# Quick-sorting the right side -- index 4 to 6:
# quick_sort | left: 4
# quick_sort | right: 6
# pivot | pivot_index: 4
# pivot | end_index: 6
# pivot | swap_index: 5
# This time, left is not less than right on the right side
# of the pivot, so this breaks the second recursion.
# quick_sort | left: 4
# quick_sort | right: 4
# quick_sort | left: 6
# quick_sort | right: 6
# Finally, the sorted list is returned.
# [1, 2, 3, 4, 5, 6, 7]
| 26.675926 | 67 | 0.621659 |
acef85601384ec5ed7be67cf974028f368cb6387 | 873 | py | Python | scripts/rtdc_scripts/plot_rtdc_image/plot_rtdc_image.py | GuckLab/Code-Sharing-Python | b82bd5b63ade26c71e424c2f23711542a148b343 | [
"MIT"
] | null | null | null | scripts/rtdc_scripts/plot_rtdc_image/plot_rtdc_image.py | GuckLab/Code-Sharing-Python | b82bd5b63ade26c71e424c2f23711542a148b343 | [
"MIT"
] | 3 | 2021-08-05T13:00:43.000Z | 2021-11-15T14:58:14.000Z | scripts/rtdc_scripts/plot_rtdc_image/plot_rtdc_image.py | GuckLab/Code-Sharing-Python | b82bd5b63ade26c71e424c2f23711542a148b343 | [
"MIT"
] | 1 | 2021-08-04T12:41:59.000Z | 2021-08-04T12:41:59.000Z | """
Function for plotting an rtdc image
This is just an example function. It isn't very useful.
"""
# import modules/packages at the top of the script
# remember to make a requirements.txt file with the package versions
import dclab
import matplotlib.pyplot as plt
# write your tool (function, class)
# remember to run flake8 on your script before uploading it
def plot_rtdc_image(rtdc_ds, image_n):
"""Plot the nth image in an rtdc dataset
Parameters
----------
rtdc_ds : rtdc dataset
image_n : int
The index of the image you wish to plot
"""
fig, ax = plt.subplots(1, 1, figsize=(9, 5))
ax.imshow(rtdc_ds["image"][image_n])
plt.show(block=False)
plt.pause(3)
plt.close()
# example use of the above function
ds = dclab.new_dataset("fb719fb2-bd9f-817a-7d70-f4002af916f0")
plot_rtdc_image(rtdc_ds=ds, image_n=5)
| 23.594595 | 68 | 0.69874 |
acef856fb849232455eee5e4fc38c84906d20090 | 902 | py | Python | darts/__init__.py | muliliao/darts | 2b5f5c3aa81c6962f4d0d2ba5f280d42f5dc5eb0 | [
"Apache-2.0"
] | null | null | null | darts/__init__.py | muliliao/darts | 2b5f5c3aa81c6962f4d0d2ba5f280d42f5dc5eb0 | [
"Apache-2.0"
] | null | null | null | darts/__init__.py | muliliao/darts | 2b5f5c3aa81c6962f4d0d2ba5f280d42f5dc5eb0 | [
"Apache-2.0"
] | null | null | null | """
darts
-----
"""
from .timeseries import TimeSeries
import matplotlib as mpl
from matplotlib import cycler
__version__ = '0.9.0'
colors = cycler(color=['black', '003DFD', 'b512b8', '11a9ba', '0d780f', 'f77f07', 'ba0f0f'])
u8plots_mplstyle = {
'font.family' : 'sans serif',
'axes.edgecolor' : 'black',
'axes.grid' : True,
'axes.labelcolor': '#333333',
'axes.labelweight' : 600,
'axes.linewidth' : 1,
'axes.prop_cycle' : colors,
'axes.spines.top' : False,
'axes.spines.right' : False,
'axes.spines.bottom' : False,
'axes.spines.left' : False,
'grid.color' : '#dedede',
'legend.frameon' : False,
'lines.linewidth' : 1.3,
'xtick.bottom' : False,
'xtick.color': '#333333',
'xtick.labelsize':'small',
'ytick.color': '#333333',
'ytick.labelsize':'small',
'xtick.bottom' : False,
}
mpl.rcParams.update(u8plots_mplstyle)
| 22.55 | 92 | 0.613082 |
acef85fdd49331259bf9b10ae0fbc9ed2e6ed516 | 4,894 | py | Python | modules/helpers.py | chucknado/zlo | 39b666c2c4e819205b4d82ac9d27e9ef9be0b9ff | [
"MIT"
] | 2 | 2020-01-17T14:52:43.000Z | 2020-05-14T08:05:20.000Z | modules/helpers.py | chucknado/zlo | 39b666c2c4e819205b4d82ac9d27e9ef9be0b9ff | [
"MIT"
] | 1 | 2020-05-13T17:15:09.000Z | 2020-05-13T17:15:09.000Z | modules/helpers.py | chucknado/zlo | 39b666c2c4e819205b4d82ac9d27e9ef9be0b9ff | [
"MIT"
] | null | null | null | import json
import configparser
from pathlib import Path
from bs4 import BeautifulSoup, Comment
from modules.api import get_resource_list
def get_path_setting(name=''):
"""
Gets a path specified in the Files section of the settings.ini file.
:param name: One of the variable names in the FILES section of settings.ini
:return: Path object from the pathlib library
"""
config = configparser.ConfigParser()
config.read('settings.ini')
try:
config['PATHS'][name]
except KeyError:
print(f'\'{name}\' is not a valid argument for get_path(). Exiting.')
exit()
path = Path(config['PATHS'][name])
if path.exists():
return path
else:
print('The path in settings.ini does not exist on your system. Exiting.')
exit()
def get_aws_setting(name=''):
"""
Gets a setting specified in the AWS section of the settings.ini file.
:param name: One of the variable names in the AWS section of settings.ini
:return: String
"""
config = configparser.ConfigParser()
config.read('settings.ini')
try:
config['AWS'][name]
except KeyError:
print(f'\'{name}\' is not a valid argument for get_aws_path(). Exiting.')
exit()
return config['AWS'][name]
def get_image_skip_list():
skip_list_path = get_path_setting('data') / 'image_skip_list.txt'
with skip_list_path.open() as f:
skip_list = f.read().splitlines()
return skip_list
def write_json(file, data):
with file.open(mode='w', encoding='utf-8') as f:
return json.dump(data, f, sort_keys=True, indent=2)
def read_json(file):
with file.open(mode='r') as f:
return json.load(f)
def create_tree_from_api(response):
"""
Returns a BeautifulSoup tree object from the HTML returned by the HC API
:param response: Response from the Articles API containing the article. Converted to Dict from JSON
:return: A tree object
"""
body = '<html>' + response['body'] + '</html>' # to parse all the file (prevent `<p> </p>` None-type errors)
tree = BeautifulSoup(body, 'lxml')
if tree.html is None or tree.body is None:
print('{}: tree.html or tree.body is None'.format(response['id']))
return None
comments = tree.find_all(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
head = tree.new_tag('head')
meta = tree.new_tag('meta')
meta['charset'] = 'utf-8'
head.append(meta)
tree.body.insert_before(head)
h1 = tree.new_tag('h1')
h1.string = response['title']
tree.body.insert(0, h1)
return tree
def create_tree_from_file(path):
html = path.read_text(encoding='utf-8')
tree = BeautifulSoup(html, 'lxml')
return tree
def get_article_markup(tree):
"""
Builds HTML markup from parsed tree to write to file, and strips any HTML comments.
:param tree: A BeautifulSoup tree object
:return: String
"""
xml = '<?xml version="1.0" encoding="UTF-8"?>\n'
markup = xml + str(tree)
return markup
def get_article_images(tree):
article_images = []
image_skip_list = get_image_skip_list()
images = tree.find_all('img')
for image in images:
image_url = Path(image['src'])
if 'zen-marketing-documentation.s3.amazonaws.com/docs/' not in str(image_url):
continue
if image_url.name in image_skip_list:
continue
article_images.append(image_url.name)
return article_images
def get_article_image_names(handoff_name, handoff_manifest, article_list):
handoff_path = get_path_setting('handoffs')
image_names = []
manifest_articles = []
for article in handoff_manifest:
if article['id'] in article_list:
manifest_articles.append(article)
for article in manifest_articles:
article_path = handoff_path / handoff_name / article['hc'] / 'articles' / '{}.html'.format(article['id'])
tree = create_tree_from_file(article_path)
images = get_article_images(tree)
image_names.extend(images)
return image_names
def get_http_method(article_id, article_locale, hc):
"""
Check if any missing translations of the article exist. Use post for them, otherwise put.
:param article_id:
:param article_locale:
:param hc:
:return:
"""
root = 'https://{}.zendesk.com/api/v2/help_center'.format(hc)
url = root + '/articles/{}/translations/missing.json'.format(article_id)
response = get_resource_list(url, list_name='locales', paginate=False)
if response is False:
print('\nError getting missing translations for {}. Exiting.'.format(article_id))
exit()
missing_translations = response
if article_locale in missing_translations: # get http method to use for article
return 'post'
else:
return 'put'
| 32.197368 | 113 | 0.663465 |
acef8638f3119c8d2f32c8b46e7095d245f2892f | 1,441 | py | Python | app/control_hadoop_logout.py | TanmayC2001/Serverin-Devops-Integration | 5aeb0ea249a8e87c4a8aa12c0c7165636ce9d44d | [
"MIT"
] | null | null | null | app/control_hadoop_logout.py | TanmayC2001/Serverin-Devops-Integration | 5aeb0ea249a8e87c4a8aa12c0c7165636ce9d44d | [
"MIT"
] | null | null | null | app/control_hadoop_logout.py | TanmayC2001/Serverin-Devops-Integration | 5aeb0ea249a8e87c4a8aa12c0c7165636ce9d44d | [
"MIT"
] | null | null | null | # Server - Controller Node termination script (Logout Script)
import boto3
import boto3.session
from loaders import BarLoader
loader = BarLoader()
# terminate instance
hadoop_session = boto3.Session(profile_name='hadoop')
def terminate_instance(instance_id):
loader.start()
ec2 = hadoop_session.resource('ec2')
ec2.instances.filter(InstanceIds=instance_id).stop()
ec2.instances.filter(InstanceIds=instance_id).terminate()
loader.stop()
print("Hadoop Controller Instance terminated successfully\n")
# 1 EC2 instances status check
def status_check():
print("Status Check Hadoop Controller............\n")
loader.start()
conn = hadoop_session.resource('ec2')
instances = conn.instances.filter(
Filters=[{'Name': 'tag:Name', 'Values': ['Controller-hadoop', 'NameNode', 'DataNode']}])
flag_run = 0
for instance in instances:
if instance.state["Name"] == "running":
print('Instance exists')
instance_id = []
instance_id.append(str(instance.id))
print(instance_id)
flag_run = 1
loader.stop()
print("These Are Instance ID's")
print(instance_id)
# if instances with tag: Contrroller dont exist --->>> initiate one
if flag_run == 1:
terminate_instance(instance_id)
else:
print("Instance dont exist............\n")
# status_check()
| 30.020833 | 97 | 0.639833 |
acef86790dedf8c0e85a3e7ff62c805375e6008b | 6,493 | py | Python | pong.py | HuttNerd/pgzero-pong | 7801dee2c8572f713ed28f9f9c6e2956b6ad1484 | [
"MIT"
] | null | null | null | pong.py | HuttNerd/pgzero-pong | 7801dee2c8572f713ed28f9f9c6e2956b6ad1484 | [
"MIT"
] | null | null | null | pong.py | HuttNerd/pgzero-pong | 7801dee2c8572f713ed28f9f9c6e2956b6ad1484 | [
"MIT"
] | null | null | null | import pgzrun
from math import sin, cos, radians
from time import sleep
#setup the constants
TITLE = "Pygame Zero Pong"
WIDTH = 1000
HEIGHT = 800
BALLSPEED = 10
PADDLESPEED = 8
MAXBOUNCEANGLE = 54
GAMELENGTH = 11
gamemode = 0
winner = " "
hold = False
def reset_game(angle):
global hold
#setup ball properties
ball.pos = WIDTH / 2, HEIGHT / 2
ball.x_float = float(ball.x)
ball.y_float = float(ball.y)
ball.angle = angle
ball.x_vel = BALLSPEED * cos(radians(ball.angle))
ball.y_vel = BALLSPEED * sin(radians(ball.angle))
ball.speed = BALLSPEED
ball.strokes = 0
#position the paddles
pad1.pos = 30, HEIGHT / 2
pad2.pos = WIDTH - 30, HEIGHT / 2
# Tells the game to pause in update()
hold = True
#create a rectangle of the playing area
screenRect = Rect(20, 60, WIDTH - 40, HEIGHT - 120)
#create ball
ball = Actor('ball')
#create paddles
pad1 = Actor('paddle')
pad2 = Actor('paddle')
#reset the game
reset_game(180)
#setup the goals
goals = [0, 0]
def draw():
screen.clear()
screen.draw.filled_rect(Rect((20, 32),(WIDTH-40, 16)), (255,255,255))
screen.draw.filled_rect(Rect((20, HEIGHT-48),(WIDTH-40, 16)), (255,255,255))
if gamemode == 0:
screen.draw.text("PONG", center=(WIDTH // 2, (HEIGHT // 2)-64), fontname="lcd", fontsize=128)
screen.draw.text("Press 1 for 1-player game\nPress 2 for 2-player game\n\nKeys: L-player - Q & A, R-player - K & M",
midtop=(WIDTH // 2, 480), fontname="lcd", fontsize=36)
return
if gamemode == 3:
screen.draw.text(winner + " Wins", center=(WIDTH // 2, (HEIGHT // 2)-64), fontname="lcd", fontsize=100)
screen.draw.text("Press 1 for 1-player game\nPress 2 for 2-player game",
midtop=(WIDTH // 2, 480), fontname="lcd", fontsize=36)
return
screen.blit('middots', (500-8, 48))
screen.draw.text(str(goals[0]), midtop=(250, 80), fontname="lcd", fontsize=72)
screen.draw.text(str(goals[1]), midtop=(750, 80), fontname="lcd", fontsize=72)
if not hold:
ball.draw()
pad1.draw()
pad2.draw()
def computer_move():
if ball.x_vel >= 0:
#If ball is moving away from paddle, center bat
if pad1.y < (HEIGHT/2):
pad1.y += 4
elif pad1.y > (HEIGHT/2):
pad1.y -= 4
#if ball is moving towards bat, track its movement.
elif ball.x_vel < 0:
if pad1.y < ball.y:
pad1.y += 7
else:
pad1.y -= 7
def update_speed(ball):
# after 9 strokes, increase ball speed every 3 strokes
ball.strokes += 1
if ball.strokes > 8:
if ball.strokes % 3 == 0:
ball.speed += 1
ball.x_vel = ball.speed * cos(radians(ball.angle))
ball.y_vel = ball.speed * sin(radians(ball.angle))
def update():
global goals, gamemode, winner, hold
# pause to let player(s) prepare
if hold:
sleep(2)
hold = False
# handle game screens, mode 0 is startup screen, mode 3 is winner announcement screen
if gamemode == 0 or gamemode == 3:
if keyboard.K_1 or keyboard.KP_1:
gamemode = 1
#reset the game
reset_game(180)
#setup the goals
goals = [0, 0]
if keyboard.K_2 or keyboard.KP_2:
gamemode = 2
reset_game(180)
goals = [0, 0]
return
#move the paddles
if gamemode == 1:
#in 1-player mode, let the computer operate paddle 1
computer_move()
if gamemode == 2:
#in 2-player mode, let the player operate paddle 1
if keyboard.q and pad1.top > 48:
pad1.top -= PADDLESPEED
if keyboard.a and pad1.bottom < HEIGHT-48:
pad1.top += PADDLESPEED
#in all modes, let the player operate paddle 2
if keyboard.k and pad2.top > 48:
pad2.top -= PADDLESPEED
if keyboard.m and pad2.bottom < HEIGHT-48:
pad2.top += PADDLESPEED
#move the ball
ball_old_x = ball.x_float
ball_old_y = ball.y_float
ball.x_float = ball.x_float + ball.x_vel
ball.y_float = ball.y_float + ball.y_vel
ball.x = int(round(ball.x_float))
ball.y = int(round(ball.y_float))
#move the ball back to where it was?
reset_ball = False
#has the ball left the screen?
if not screenRect.contains(ball._rect):
#did it hit the top or bottom?
if ball.top < 32 or ball.bottom > HEIGHT-32:
ball.y_vel *= -1
reset_ball = True
#it must have hit the side
else:
if ball.left < 10:
print("Player 2 goal")
goals[1] += 1
reset_game(180)
print("Score {} : {}".format(goals[0], goals[1]))
if goals[1] == GAMELENGTH:
if gamemode == 1:
winner = "Player"
if gamemode == 2:
winner = "Player 2"
gamemode = 3
return
elif ball.right > WIDTH - 10:
print("player 1 goal")
goals[0] += 1
reset_game(0)
print("Score {} : {}".format(goals[0], goals[1]))
if goals[0] == GAMELENGTH:
if gamemode == 1:
winner = "Computer"
if gamemode == 2:
winner = "Player 1"
gamemode = 3
return
#has the ball hit a paddle
if pad1.colliderect(ball):
#work out the bounce angle
bounce_angle = ((ball.y - pad1.y) / (pad1.height / 2)) * MAXBOUNCEANGLE
ball.angle = max(0 - MAXBOUNCEANGLE, min(MAXBOUNCEANGLE, bounce_angle))
#work out the ball velocity
update_speed(ball)
reset_ball = True
elif pad2.colliderect(ball):
bounce_angle = 180 - (((ball.y - pad2.y) / (pad2.height / 2)) * MAXBOUNCEANGLE)
ball.angle = max(180 - MAXBOUNCEANGLE, min(180 + MAXBOUNCEANGLE, bounce_angle))
update_speed(ball)
reset_ball = True
if reset_ball:
ball.x_float = ball_old_x + ball.x_vel # The second term prevents the ball from sticking to the paddle
ball.y_float = ball_old_y + ball.y_vel # The second term prevents the ball from sticking to the paddle
ball.x = int(round(ball.x_float))
ball.y = int(round(ball.y_float))
pgzrun.go()
| 31.673171 | 124 | 0.5643 |
acef86a03bbe3648fa61ad8d9c0c920f87def456 | 48,276 | py | Python | SCons/Tool/__init__.py | fire/scons | f5f5f99d447bd00e0f2202beddb9d86bf0417589 | [
"MIT"
] | null | null | null | SCons/Tool/__init__.py | fire/scons | f5f5f99d447bd00e0f2202beddb9d86bf0417589 | [
"MIT"
] | null | null | null | SCons/Tool/__init__.py | fire/scons | f5f5f99d447bd00e0f2202beddb9d86bf0417589 | [
"MIT"
] | null | null | null | """SCons.Tool
SCons tool selection.
This looks for modules that define a callable object that can modify
a construction environment as appropriate for a given tool (or tool
chain).
Note that because this subsystem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "tool specification" in an arbitrary callable function. No
one needs to use or tie in to this subsystem in order to roll their own
tool definition.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import os
from collections.abc import Callable
import importlib.util
import SCons.Builder
import SCons.Errors
import SCons.Node.FS
import SCons.Scanner
import SCons.Scanner.C
import SCons.Scanner.D
import SCons.Scanner.LaTeX
import SCons.Scanner.Prog
import SCons.Scanner.SWIG
DefaultToolpath = []
CScanner = SCons.Scanner.C.CScanner()
DScanner = SCons.Scanner.D.DScanner()
LaTeXScanner = SCons.Scanner.LaTeX.LaTeXScanner()
PDFLaTeXScanner = SCons.Scanner.LaTeX.PDFLaTeXScanner()
ProgramScanner = SCons.Scanner.Prog.ProgramScanner()
SourceFileScanner = SCons.Scanner.Base({}, name='SourceFileScanner')
SWIGScanner = SCons.Scanner.SWIG.SWIGScanner()
CSuffixes = [".c", ".C", ".cxx", ".cpp", ".c++", ".cc",
".h", ".H", ".hxx", ".hpp", ".hh",
".F", ".fpp", ".FPP",
".m", ".mm",
".S", ".spp", ".SPP", ".sx"]
DSuffixes = ['.d']
IDLSuffixes = [".idl", ".IDL"]
LaTeXSuffixes = [".tex", ".ltx", ".latex"]
SWIGSuffixes = ['.i']
for suffix in CSuffixes:
SourceFileScanner.add_scanner(suffix, CScanner)
for suffix in DSuffixes:
SourceFileScanner.add_scanner(suffix, DScanner)
for suffix in SWIGSuffixes:
SourceFileScanner.add_scanner(suffix, SWIGScanner)
# FIXME: what should be done here? Two scanners scan the same extensions,
# but look for different files, e.g., "picture.eps" vs. "picture.pdf".
# The builders for DVI and PDF explicitly reference their scanners
# I think that means this is not needed???
for suffix in LaTeXSuffixes:
SourceFileScanner.add_scanner(suffix, LaTeXScanner)
SourceFileScanner.add_scanner(suffix, PDFLaTeXScanner)
# Tool aliases are needed for those tools whose module names also
# occur in the python standard library. This causes module shadowing and
# can break using python library functions under python3
TOOL_ALIASES = {
'gettext': 'gettext_tool',
'clang++': 'clangxx',
}
class Tool:
def __init__(self, name, toolpath=None, **kw):
if toolpath is None:
toolpath = []
# Rename if there's a TOOL_ALIAS for this tool
self.name = TOOL_ALIASES.get(name, name)
self.toolpath = toolpath + DefaultToolpath
# remember these so we can merge them into the call
self.init_kw = kw
module = self._tool_module()
self.generate = module.generate
self.exists = module.exists
if hasattr(module, 'options'):
self.options = module.options
def _load_dotted_module_py2(self, short_name, full_name, searchpaths=None):
import imp
splitname = short_name.split('.')
index = 0
srchpths = searchpaths
for item in splitname:
file, path, desc = imp.find_module(item, srchpths)
mod = imp.load_module(full_name, file, path, desc)
srchpths = [path]
return mod, file
def _tool_module(self):
oldpythonpath = sys.path
sys.path = self.toolpath + sys.path
# sys.stderr.write("Tool:%s\nPATH:%s\n"%(self.name,sys.path))
# From: http://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path/67692#67692
# import importlib.util
# spec = importlib.util.spec_from_file_location("module.name", "/path/to/file.py")
# foo = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(foo)
# foo.MyClass()
# Py 3 code
# sys.stderr.write("toolpath:%s\n" % self.toolpath)
# sys.stderr.write("SCONS.TOOL path:%s\n" % sys.modules['SCons.Tool'].__path__)
debug = False
spec = None
found_name = self.name
add_to_scons_tools_namespace = False
for path in self.toolpath:
sepname = self.name.replace('.', os.path.sep)
file_path = os.path.join(path, "%s.py" % sepname)
file_package = os.path.join(path, sepname)
if debug: sys.stderr.write("Trying:%s %s\n" % (file_path, file_package))
if os.path.isfile(file_path):
spec = importlib.util.spec_from_file_location(self.name, file_path)
if debug: print("file_Path:%s FOUND" % file_path)
break
elif os.path.isdir(file_package):
file_package = os.path.join(file_package, '__init__.py')
spec = importlib.util.spec_from_file_location(self.name, file_package)
if debug: print("PACKAGE:%s Found" % file_package)
break
else:
continue
if spec is None:
if debug: sys.stderr.write("NO SPEC :%s\n" % self.name)
spec = importlib.util.find_spec("." + self.name, package='SCons.Tool')
if spec:
found_name = 'SCons.Tool.' + self.name
add_to_scons_tools_namespace = True
if debug: sys.stderr.write("Spec Found? .%s :%s\n" % (self.name, spec))
if spec is None:
error_string = "No module named %s" % self.name
raise SCons.Errors.SConsEnvironmentError(error_string)
module = importlib.util.module_from_spec(spec)
if module is None:
if debug: print("MODULE IS NONE:%s" % self.name)
error_string = "No module named %s" % self.name
raise SCons.Errors.SConsEnvironmentError(error_string)
# Don't reload a tool we already loaded.
sys_modules_value = sys.modules.get(found_name, False)
found_module = None
if sys_modules_value and sys_modules_value.__file__ == spec.origin:
found_module = sys.modules[found_name]
else:
# Not sure what to do in the case that there already
# exists sys.modules[self.name] but the source file is
# different.. ?
module = spec.loader.load_module(spec.name)
sys.modules[found_name] = module
if add_to_scons_tools_namespace:
# If we found it in SCons.Tool, then add it to the module
setattr(SCons.Tool, self.name, module)
found_module = module
if found_module is not None:
sys.path = oldpythonpath
return found_module
sys.path = oldpythonpath
full_name = 'SCons.Tool.' + self.name
try:
return sys.modules[full_name]
except KeyError:
try:
smpath = sys.modules['SCons.Tool'].__path__
try:
module, file = self._load_dotted_module_py2(self.name, full_name, smpath)
setattr(SCons.Tool, self.name, module)
if file:
file.close()
return module
except ImportError as e:
if str(e) != "No module named %s" % self.name:
raise SCons.Errors.SConsEnvironmentError(e)
try:
import zipimport
importer = zipimport.zipimporter(sys.modules['SCons.Tool'].__path__[0])
module = importer.load_module(full_name)
setattr(SCons.Tool, self.name, module)
return module
except ImportError as e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.SConsEnvironmentError(m)
except ImportError as e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.SConsEnvironmentError(m)
def __call__(self, env, *args, **kw):
if self.init_kw is not None:
# Merge call kws into init kws;
# but don't bash self.init_kw.
if kw is not None:
call_kw = kw
kw = self.init_kw.copy()
kw.update(call_kw)
else:
kw = self.init_kw
env.Append(TOOLS=[self.name])
if hasattr(self, 'options'):
import SCons.Variables
if 'options' not in env:
from SCons.Script import ARGUMENTS
env['options'] = SCons.Variables.Variables(args=ARGUMENTS)
opts = env['options']
self.options(opts)
opts.Update(env)
self.generate(env, *args, **kw)
def __str__(self):
return self.name
##########################################################################
# Create common executable program / library / object builders
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action=SCons.Defaults.LinkAction,
emitter='$PROGEMITTER',
prefix='$PROGPREFIX',
suffix='$PROGSUFFIX',
src_suffix='$OBJSUFFIX',
src_builder='Object',
target_scanner=ProgramScanner)
env['BUILDERS']['Program'] = program
return program
def createStaticLibBuilder(env):
"""This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [SCons.Action.Action("$ARCOM", "$ARCOMSTR")]
if env.get('RANLIB', False) or env.Detect('ranlib'):
ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR")
action_list.append(ranlib_action)
static_lib = SCons.Builder.Builder(action=action_list,
emitter='$LIBEMITTER',
prefix='$LIBPREFIX',
suffix='$LIBSUFFIX',
src_suffix='$OBJSUFFIX',
src_builder='StaticObject')
env['BUILDERS']['StaticLibrary'] = static_lib
env['BUILDERS']['Library'] = static_lib
return static_lib
def _call_linker_cb(env, callback, args, result=None):
"""Returns the result of env['LINKCALLBACKS'][callback](*args)
if env['LINKCALLBACKS'] is a dictionary and env['LINKCALLBACKS'][callback]
is callable. If these conditions are not met, return the value provided as
the *result* argument. This function is mainly used for generating library
info such as versioned suffixes, symlink maps, sonames etc. by delegating
the core job to callbacks configured by current linker tool"""
Verbose = False
if Verbose:
print('_call_linker_cb: args=%r' % args)
print('_call_linker_cb: callback=%r' % callback)
try:
cbfun = env['LINKCALLBACKS'][callback]
except (KeyError, TypeError):
if Verbose:
print('_call_linker_cb: env["LINKCALLBACKS"][%r] not found or can not be used' % callback)
pass
else:
if Verbose:
print('_call_linker_cb: env["LINKCALLBACKS"][%r] found' % callback)
print('_call_linker_cb: env["LINKCALLBACKS"][%r]=%r' % (callback, cbfun))
if isinstance(cbfun, Callable):
if Verbose:
print('_call_linker_cb: env["LINKCALLBACKS"][%r] is callable' % callback)
result = cbfun(env, *args)
return result
def _call_env_subst(env, string, *args, **kw):
kw2 = {}
for k in ('raw', 'target', 'source', 'conv', 'executor'):
try:
kw2[k] = kw[k]
except KeyError:
pass
return env.subst(string, *args, **kw2)
class _ShLibInfoSupport:
@property
def libtype(self):
return 'ShLib'
def get_lib_prefix(self, env, *args, **kw):
return _call_env_subst(env, '$SHLIBPREFIX', *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return _call_env_subst(env, '$SHLIBSUFFIX', *args, **kw)
def get_lib_version(self, env, *args, **kw):
return _call_env_subst(env, '$SHLIBVERSION', *args, **kw)
def get_lib_noversionsymlinks(self, env, *args, **kw):
return _call_env_subst(env, '$SHLIBNOVERSIONSYMLINKS', *args, **kw)
class _LdModInfoSupport:
@property
def libtype(self):
return 'LdMod'
def get_lib_prefix(self, env, *args, **kw):
return _call_env_subst(env, '$LDMODULEPREFIX', *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return _call_env_subst(env, '$LDMODULESUFFIX', *args, **kw)
def get_lib_version(self, env, *args, **kw):
return _call_env_subst(env, '$LDMODULEVERSION', *args, **kw)
def get_lib_noversionsymlinks(self, env, *args, **kw):
return _call_env_subst(env, '$LDMODULENOVERSIONSYMLINKS', *args, **kw)
class _ImpLibInfoSupport:
@property
def libtype(self):
return 'ImpLib'
def get_lib_prefix(self, env, *args, **kw):
return _call_env_subst(env, '$IMPLIBPREFIX', *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return _call_env_subst(env, '$IMPLIBSUFFIX', *args, **kw)
def get_lib_version(self, env, *args, **kw):
version = _call_env_subst(env, '$IMPLIBVERSION', *args, **kw)
if not version:
try:
lt = kw['implib_libtype']
except KeyError:
pass
else:
if lt == 'ShLib':
version = _call_env_subst(env, '$SHLIBVERSION', *args, **kw)
elif lt == 'LdMod':
version = _call_env_subst(env, '$LDMODULEVERSION', *args, **kw)
return version
def get_lib_noversionsymlinks(self, env, *args, **kw):
disable = None
try:
env['IMPLIBNOVERSIONSYMLINKS']
except KeyError:
try:
lt = kw['implib_libtype']
except KeyError:
pass
else:
if lt == 'ShLib':
disable = _call_env_subst(env, '$SHLIBNOVERSIONSYMLINKS', *args, **kw)
elif lt == 'LdMod':
disable = _call_env_subst(env, '$LDMODULENOVERSIONSYMLINKS', *args, **kw)
else:
disable = _call_env_subst(env, '$IMPLIBNOVERSIONSYMLINKS', *args, **kw)
return disable
class _LibInfoGeneratorBase:
"""Generator base class for library-related info such as suffixes for
versioned libraries, symlink maps, sonames etc. It handles commonities
of SharedLibrary and LoadableModule
"""
_support_classes = {'ShLib': _ShLibInfoSupport,
'LdMod': _LdModInfoSupport,
'ImpLib': _ImpLibInfoSupport}
def __init__(self, libtype, infoname):
self.libtype = libtype
self.infoname = infoname
@property
def libtype(self):
return self._support.libtype
@libtype.setter
def libtype(self, libtype):
try:
support_class = self._support_classes[libtype]
except KeyError:
raise ValueError('unsupported libtype %r' % libtype)
self._support = support_class()
def get_lib_prefix(self, env, *args, **kw):
return self._support.get_lib_prefix(env, *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return self._support.get_lib_suffix(env, *args, **kw)
def get_lib_version(self, env, *args, **kw):
return self._support.get_lib_version(env, *args, **kw)
def get_lib_noversionsymlinks(self, env, *args, **kw):
return self._support.get_lib_noversionsymlinks(env, *args, **kw)
# Returns name of generator linker callback that shall be used to generate
# our info for a versioned library. For example, if our libtype is 'ShLib'
# and infoname is 'Prefix', it would return 'VersionedShLibPrefix'.
def get_versioned_lib_info_generator(self, **kw):
try:
libtype = kw['generator_libtype']
except KeyError:
libtype = self.libtype
return 'Versioned%s%s' % (libtype, self.infoname)
def generate_versioned_lib_info(self, env, args, result=None, **kw):
callback = self.get_versioned_lib_info_generator(**kw)
return _call_linker_cb(env, callback, args, result)
class _LibPrefixGenerator(_LibInfoGeneratorBase):
"""Library prefix generator, used as target_prefix in SharedLibrary and
LoadableModule builders"""
def __init__(self, libtype):
super(_LibPrefixGenerator, self).__init__(libtype, 'Prefix')
def __call__(self, env, sources=None, **kw):
Verbose = False
if sources and 'source' not in kw:
kw2 = kw.copy()
kw2['source'] = sources
else:
kw2 = kw
prefix = self.get_lib_prefix(env, **kw2)
if Verbose:
print("_LibPrefixGenerator: input prefix=%r" % prefix)
version = self.get_lib_version(env, **kw2)
if Verbose:
print("_LibPrefixGenerator: version=%r" % version)
if version:
prefix = self.generate_versioned_lib_info(env, [prefix, version], prefix, **kw2)
if Verbose:
print("_LibPrefixGenerator: return prefix=%r" % prefix)
return prefix
ShLibPrefixGenerator = _LibPrefixGenerator('ShLib')
LdModPrefixGenerator = _LibPrefixGenerator('LdMod')
ImpLibPrefixGenerator = _LibPrefixGenerator('ImpLib')
class _LibSuffixGenerator(_LibInfoGeneratorBase):
"""Library suffix generator, used as target_suffix in SharedLibrary and
LoadableModule builders"""
def __init__(self, libtype):
super(_LibSuffixGenerator, self).__init__(libtype, 'Suffix')
def __call__(self, env, sources=None, **kw):
Verbose = False
if sources and 'source' not in kw:
kw2 = kw.copy()
kw2['source'] = sources
else:
kw2 = kw
suffix = self.get_lib_suffix(env, **kw2)
if Verbose:
print("_LibSuffixGenerator: input suffix=%r" % suffix)
version = self.get_lib_version(env, **kw2)
if Verbose:
print("_LibSuffixGenerator: version=%r" % version)
if version:
suffix = self.generate_versioned_lib_info(env, [suffix, version], suffix, **kw2)
if Verbose:
print("_LibSuffixGenerator: return suffix=%r" % suffix)
return suffix
ShLibSuffixGenerator = _LibSuffixGenerator('ShLib')
LdModSuffixGenerator = _LibSuffixGenerator('LdMod')
ImpLibSuffixGenerator = _LibSuffixGenerator('ImpLib')
class _LibSymlinkGenerator(_LibInfoGeneratorBase):
"""Library symlink map generator. It generates a list of symlinks that
should be created by SharedLibrary or LoadableModule builders"""
def __init__(self, libtype):
super(_LibSymlinkGenerator, self).__init__(libtype, 'Symlinks')
def __call__(self, env, libnode, **kw):
Verbose = False
if libnode and 'target' not in kw:
kw2 = kw.copy()
kw2['target'] = libnode
else:
kw2 = kw
if Verbose:
print("_LibSymLinkGenerator: libnode=%r" % libnode.get_path())
symlinks = None
version = self.get_lib_version(env, **kw2)
disable = self.get_lib_noversionsymlinks(env, **kw2)
if Verbose:
print('_LibSymlinkGenerator: version=%r' % version)
print('_LibSymlinkGenerator: disable=%r' % disable)
if version and not disable:
prefix = self.get_lib_prefix(env, **kw2)
suffix = self.get_lib_suffix(env, **kw2)
symlinks = self.generate_versioned_lib_info(env, [libnode, version, prefix, suffix], **kw2)
if Verbose:
print('_LibSymlinkGenerator: return symlinks=%r' % StringizeLibSymlinks(symlinks))
return symlinks
ShLibSymlinkGenerator = _LibSymlinkGenerator('ShLib')
LdModSymlinkGenerator = _LibSymlinkGenerator('LdMod')
ImpLibSymlinkGenerator = _LibSymlinkGenerator('ImpLib')
class _LibNameGenerator(_LibInfoGeneratorBase):
"""Generates "unmangled" library name from a library file node.
Generally, it's thought to revert modifications done by prefix/suffix
generators (_LibPrefixGenerator/_LibSuffixGenerator) used by a library
builder. For example, on gnulink the suffix generator used by SharedLibrary
builder appends $SHLIBVERSION to $SHLIBSUFFIX producing node name which
ends with "$SHLIBSUFFIX.$SHLIBVERSION". Correspondingly, the implementation
of _LibNameGenerator replaces "$SHLIBSUFFIX.$SHLIBVERSION" with
"$SHLIBSUFFIX" in the node's basename. So that, if $SHLIBSUFFIX is ".so",
$SHLIBVERSION is "0.1.2" and the node path is "/foo/bar/libfoo.so.0.1.2",
the _LibNameGenerator shall return "libfoo.so". Other link tools may
implement it's own way of library name unmangling.
"""
def __init__(self, libtype):
super(_LibNameGenerator, self).__init__(libtype, 'Name')
def __call__(self, env, libnode, **kw):
"""Returns "demangled" library name"""
Verbose = False
if libnode and 'target' not in kw:
kw2 = kw.copy()
kw2['target'] = libnode
else:
kw2 = kw
if Verbose:
print("_LibNameGenerator: libnode=%r" % libnode.get_path())
version = self.get_lib_version(env, **kw2)
if Verbose:
print('_LibNameGenerator: version=%r' % version)
name = None
if version:
prefix = self.get_lib_prefix(env, **kw2)
suffix = self.get_lib_suffix(env, **kw2)
name = self.generate_versioned_lib_info(env, [libnode, version, prefix, suffix], **kw2)
if not name:
name = os.path.basename(libnode.get_path())
if Verbose:
print('_LibNameGenerator: return name=%r' % name)
return name
ShLibNameGenerator = _LibNameGenerator('ShLib')
LdModNameGenerator = _LibNameGenerator('LdMod')
ImpLibNameGenerator = _LibNameGenerator('ImpLib')
class _LibSonameGenerator(_LibInfoGeneratorBase):
"""Library soname generator. Returns library soname (e.g. libfoo.so.0) for
a given node (e.g. /foo/bar/libfoo.so.0.1.2)"""
def __init__(self, libtype):
super(_LibSonameGenerator, self).__init__(libtype, 'Soname')
def __call__(self, env, libnode, **kw):
"""Returns a SONAME based on a shared library's node path"""
Verbose = False
if libnode and 'target' not in kw:
kw2 = kw.copy()
kw2['target'] = libnode
else:
kw2 = kw
if Verbose:
print("_LibSonameGenerator: libnode=%r" % libnode.get_path())
soname = _call_env_subst(env, '$SONAME', **kw2)
if not soname:
version = self.get_lib_version(env, **kw2)
if Verbose:
print("_LibSonameGenerator: version=%r" % version)
if version:
prefix = self.get_lib_prefix(env, **kw2)
suffix = self.get_lib_suffix(env, **kw2)
soname = self.generate_versioned_lib_info(env, [libnode, version, prefix, suffix], **kw2)
if not soname:
# fallback to library name (as returned by appropriate _LibNameGenerator)
soname = _LibNameGenerator(self.libtype)(env, libnode)
if Verbose:
print("_LibSonameGenerator: FALLBACK: soname=%r" % soname)
if Verbose:
print("_LibSonameGenerator: return soname=%r" % soname)
return soname
ShLibSonameGenerator = _LibSonameGenerator('ShLib')
LdModSonameGenerator = _LibSonameGenerator('LdMod')
def StringizeLibSymlinks(symlinks):
"""Converts list with pairs of nodes to list with pairs of node paths
(strings). Used mainly for debugging."""
if SCons.Util.is_List(symlinks):
try:
return [(k.get_path(), v.get_path()) for k, v in symlinks]
except (TypeError, ValueError):
return symlinks
else:
return symlinks
def EmitLibSymlinks(env, symlinks, libnode, **kw):
"""Used by emitters to handle (shared/versioned) library symlinks"""
Verbose = False
# nodes involved in process... all symlinks + library
nodes = list(set([x for x, y in symlinks] + [libnode]))
clean_targets = kw.get('clean_targets', [])
if not SCons.Util.is_List(clean_targets):
clean_targets = [clean_targets]
for link, linktgt in symlinks:
env.SideEffect(link, linktgt)
if Verbose:
print("EmitLibSymlinks: SideEffect(%r,%r)" % (link.get_path(), linktgt.get_path()))
clean_list = [x for x in nodes if x != linktgt]
env.Clean(list(set([linktgt] + clean_targets)), clean_list)
if Verbose:
print("EmitLibSymlinks: Clean(%r,%r)" % (linktgt.get_path(), [x.get_path() for x in clean_list]))
def CreateLibSymlinks(env, symlinks):
"""Physically creates symlinks. The symlinks argument must be a list in
form [ (link, linktarget), ... ], where link and linktarget are SCons
nodes.
"""
Verbose = False
for link, linktgt in symlinks:
linktgt = link.get_dir().rel_path(linktgt)
link = link.get_path()
if Verbose:
print("CreateLibSymlinks: preparing to add symlink %r -> %r" % (link, linktgt))
# Delete the (previously created) symlink if exists. Let only symlinks
# to be deleted to prevent accidental deletion of source files...
if env.fs.islink(link):
env.fs.unlink(link)
if Verbose:
print("CreateLibSymlinks: removed old symlink %r" % link)
# If a file or directory exists with the same name as link, an OSError
# will be thrown, which should be enough, I think.
env.fs.symlink(linktgt, link)
if Verbose:
print("CreateLibSymlinks: add symlink %r -> %r" % (link, linktgt))
return 0
def LibSymlinksActionFunction(target, source, env):
for tgt in target:
symlinks = getattr(getattr(tgt, 'attributes', None), 'shliblinks', None)
if symlinks:
CreateLibSymlinks(env, symlinks)
return 0
def LibSymlinksStrFun(target, source, env, *args):
cmd = None
for tgt in target:
symlinks = getattr(getattr(tgt, 'attributes', None), 'shliblinks', None)
if symlinks:
if cmd is None: cmd = ""
if cmd: cmd += "\n"
cmd += "Create symlinks for: %r" % tgt.get_path()
try:
linkstr = ', '.join(["%r->%r" % (k, v) for k, v in StringizeLibSymlinks(symlinks)])
except (KeyError, ValueError):
pass
else:
cmd += ": %s" % linkstr
return cmd
LibSymlinksAction = SCons.Action.Action(LibSymlinksActionFunction, LibSymlinksStrFun)
def createSharedLibBuilder(env):
"""This is a utility function that creates the SharedLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
shared_lib = env['BUILDERS']['SharedLibrary']
except KeyError:
import SCons.Defaults
action_list = [SCons.Defaults.SharedCheck,
SCons.Defaults.ShLinkAction,
LibSymlinksAction]
shared_lib = SCons.Builder.Builder(action=action_list,
emitter="$SHLIBEMITTER",
prefix=ShLibPrefixGenerator,
suffix=ShLibSuffixGenerator,
target_scanner=ProgramScanner,
src_suffix='$SHOBJSUFFIX',
src_builder='SharedObject')
env['BUILDERS']['SharedLibrary'] = shared_lib
return shared_lib
def createLoadableModuleBuilder(env):
"""This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction,
LibSymlinksAction]
ld_module = SCons.Builder.Builder(action=action_list,
emitter="$LDMODULEEMITTER",
prefix=LdModPrefixGenerator,
suffix=LdModSuffixGenerator,
target_scanner=ProgramScanner,
src_suffix='$SHOBJSUFFIX',
src_builder='SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module
def createObjBuilders(env):
"""This is a utility function that creates the StaticObject
and SharedObject Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (StaticObject, SharedObject)
"""
try:
static_obj = env['BUILDERS']['StaticObject']
except KeyError:
static_obj = SCons.Builder.Builder(action={},
emitter={},
prefix='$OBJPREFIX',
suffix='$OBJSUFFIX',
src_builder=['CFile', 'CXXFile'],
source_scanner=SourceFileScanner,
single_source=1)
env['BUILDERS']['StaticObject'] = static_obj
env['BUILDERS']['Object'] = static_obj
try:
shared_obj = env['BUILDERS']['SharedObject']
except KeyError:
shared_obj = SCons.Builder.Builder(action={},
emitter={},
prefix='$SHOBJPREFIX',
suffix='$SHOBJSUFFIX',
src_builder=['CFile', 'CXXFile'],
source_scanner=SourceFileScanner,
single_source=1)
env['BUILDERS']['SharedObject'] = shared_obj
return (static_obj, shared_obj)
def createCFileBuilders(env):
"""This is a utility function that creates the CFile/CXXFile
Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (CFile, CXXFile)
"""
try:
c_file = env['BUILDERS']['CFile']
except KeyError:
c_file = SCons.Builder.Builder(action={},
emitter={},
suffix={None: '$CFILESUFFIX'})
env['BUILDERS']['CFile'] = c_file
env.SetDefault(CFILESUFFIX='.c')
try:
cxx_file = env['BUILDERS']['CXXFile']
except KeyError:
cxx_file = SCons.Builder.Builder(action={},
emitter={},
suffix={None: '$CXXFILESUFFIX'})
env['BUILDERS']['CXXFile'] = cxx_file
env.SetDefault(CXXFILESUFFIX='.cc')
return (c_file, cxx_file)
##########################################################################
# Create common Java builders
def CreateJarBuilder(env):
"""The Jar builder expects a list of class files
which it can package into a jar file.
The jar tool provides an interface for passing other types
of java files such as .java, directories or swig interfaces
and will build them to class files in which it can package
into the jar.
"""
try:
java_jar = env['BUILDERS']['JarFile']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR')
java_jar = SCons.Builder.Builder(action=jar_com,
suffix='$JARSUFFIX',
src_suffix='$JAVACLASSSUFFIX',
src_builder='JavaClassFile',
source_factory=fs.Entry)
env['BUILDERS']['JarFile'] = java_jar
return java_jar
def CreateJavaHBuilder(env):
try:
java_javah = env['BUILDERS']['JavaH']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
java_javah_com = SCons.Action.Action('$JAVAHCOM', '$JAVAHCOMSTR')
java_javah = SCons.Builder.Builder(action=java_javah_com,
src_suffix='$JAVACLASSSUFFIX',
target_factory=fs.Entry,
source_factory=fs.File,
src_builder='JavaClassFile')
env['BUILDERS']['JavaH'] = java_javah
return java_javah
def CreateJavaClassFileBuilder(env):
try:
java_class_file = env['BUILDERS']['JavaClassFile']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_file = SCons.Builder.Builder(action=javac_com,
emitter={},
# suffix = '$JAVACLASSSUFFIX',
src_suffix='$JAVASUFFIX',
src_builder=['JavaFile'],
target_factory=fs.Entry,
source_factory=fs.File)
env['BUILDERS']['JavaClassFile'] = java_class_file
return java_class_file
def CreateJavaClassDirBuilder(env):
try:
java_class_dir = env['BUILDERS']['JavaClassDir']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_dir = SCons.Builder.Builder(action=javac_com,
emitter={},
target_factory=fs.Dir,
source_factory=fs.Dir)
env['BUILDERS']['JavaClassDir'] = java_class_dir
return java_class_dir
def CreateJavaFileBuilder(env):
try:
java_file = env['BUILDERS']['JavaFile']
except KeyError:
java_file = SCons.Builder.Builder(action={},
emitter={},
suffix={None: '$JAVASUFFIX'})
env['BUILDERS']['JavaFile'] = java_file
env['JAVASUFFIX'] = '.java'
return java_file
class ToolInitializerMethod:
"""
This is added to a construction environment in place of a
method(s) normally called for a Builder (env.Object, env.StaticObject,
etc.). When called, it has its associated ToolInitializer
object search the specified list of tools and apply the first
one that exists to the construction environment. It then calls
whatever builder was (presumably) added to the construction
environment in place of this particular instance.
"""
def __init__(self, name, initializer):
"""
Note: we store the tool name as __name__ so it can be used by
the class that attaches this to a construction environment.
"""
self.__name__ = name
self.initializer = initializer
def get_builder(self, env):
"""
Returns the appropriate real Builder for this method name
after having the associated ToolInitializer object apply
the appropriate Tool module.
"""
builder = getattr(env, self.__name__)
self.initializer.apply_tools(env)
builder = getattr(env, self.__name__)
if builder is self:
# There was no Builder added, which means no valid Tool
# for this name was found (or possibly there's a mismatch
# between the name we were called by and the Builder name
# added by the Tool module).
return None
self.initializer.remove_methods(env)
return builder
def __call__(self, env, *args, **kw):
"""
"""
builder = self.get_builder(env)
if builder is None:
return [], []
return builder(*args, **kw)
class ToolInitializer:
"""
A class for delayed initialization of Tools modules.
Instances of this class associate a list of Tool modules with
a list of Builder method names that will be added by those Tool
modules. As part of instantiating this object for a particular
construction environment, we also add the appropriate
ToolInitializerMethod objects for the various Builder methods
that we want to use to delay Tool searches until necessary.
"""
def __init__(self, env, tools, names):
if not SCons.Util.is_List(tools):
tools = [tools]
if not SCons.Util.is_List(names):
names = [names]
self.env = env
self.tools = tools
self.names = names
self.methods = {}
for name in names:
method = ToolInitializerMethod(name, self)
self.methods[name] = method
env.AddMethod(method)
def remove_methods(self, env):
"""
Removes the methods that were added by the tool initialization
so we no longer copy and re-bind them when the construction
environment gets cloned.
"""
for method in self.methods.values():
env.RemoveMethod(method)
def apply_tools(self, env):
"""
Searches the list of associated Tool modules for one that
exists, and applies that to the construction environment.
"""
for t in self.tools:
tool = SCons.Tool.Tool(t)
if tool.exists(env):
env.Tool(tool)
return
# If we fall through here, there was no tool module found.
# This is where we can put an informative error message
# about the inability to find the tool. We'll start doing
# this as we cut over more pre-defined Builder+Tools to use
# the ToolInitializer class.
def Initializers(env):
ToolInitializer(env, ['install'], ['_InternalInstall', '_InternalInstallAs', '_InternalInstallVersionedLib'])
def Install(self, *args, **kw):
return self._InternalInstall(*args, **kw)
def InstallAs(self, *args, **kw):
return self._InternalInstallAs(*args, **kw)
def InstallVersionedLib(self, *args, **kw):
return self._InternalInstallVersionedLib(*args, **kw)
env.AddMethod(Install)
env.AddMethod(InstallAs)
env.AddMethod(InstallVersionedLib)
def FindTool(tools, env):
for tool in tools:
t = Tool(tool)
if t.exists(env):
return tool
return None
def FindAllTools(tools, env):
def ToolExists(tool, env=env):
return Tool(tool).exists(env)
return list(filter(ToolExists, tools))
def tool_list(platform, env):
other_plat_tools = []
# XXX this logic about what tool to prefer on which platform
# should be moved into either the platform files or
# the tool files themselves.
# The search orders here are described in the man page. If you
# change these search orders, update the man page as well.
if str(platform) == 'win32':
"prefer Microsoft tools on Windows"
linkers = ['mslink', 'gnulink', 'ilink', 'linkloc', 'ilink32']
c_compilers = ['msvc', 'mingw', 'gcc', 'intelc', 'icl', 'icc', 'cc', 'bcc32']
cxx_compilers = ['msvc', 'intelc', 'icc', 'g++', 'cxx', 'bcc32']
assemblers = ['masm', 'nasm', 'gas', '386asm']
fortran_compilers = ['gfortran', 'g77', 'ifl', 'cvf', 'f95', 'f90', 'fortran']
ars = ['mslib', 'ar', 'tlib']
other_plat_tools = ['msvs', 'midl']
elif str(platform) == 'os2':
"prefer IBM tools on OS/2"
linkers = ['ilink', 'gnulink', ] # 'mslink']
c_compilers = ['icc', 'gcc', ] # 'msvc', 'cc']
cxx_compilers = ['icc', 'g++', ] # 'msvc', 'cxx']
assemblers = ['nasm', ] # 'masm', 'gas']
fortran_compilers = ['ifl', 'g77']
ars = ['ar', ] # 'mslib']
elif str(platform) == 'irix':
"prefer MIPSPro on IRIX"
linkers = ['sgilink', 'gnulink']
c_compilers = ['sgicc', 'gcc', 'cc']
cxx_compilers = ['sgicxx', 'g++', 'cxx']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['sgiar']
elif str(platform) == 'sunos':
"prefer Forte tools on SunOS"
linkers = ['sunlink', 'gnulink']
c_compilers = ['suncc', 'gcc', 'cc']
cxx_compilers = ['suncxx', 'g++', 'cxx']
assemblers = ['as', 'gas']
fortran_compilers = ['sunf95', 'sunf90', 'sunf77', 'f95', 'f90', 'f77',
'gfortran', 'g77', 'fortran']
ars = ['sunar']
elif str(platform) == 'hpux':
"prefer aCC tools on HP-UX"
linkers = ['hplink', 'gnulink']
c_compilers = ['hpcc', 'gcc', 'cc']
cxx_compilers = ['hpcxx', 'g++', 'cxx']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'aix':
"prefer AIX Visual Age tools on AIX"
linkers = ['aixlink', 'gnulink']
c_compilers = ['aixcc', 'gcc', 'cc']
cxx_compilers = ['aixcxx', 'g++', 'cxx']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'aixf77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'darwin':
"prefer GNU tools on Mac OS X, except for some linkers and IBM tools"
linkers = ['applelink', 'gnulink']
c_compilers = ['gcc', 'cc']
cxx_compilers = ['g++', 'cxx']
assemblers = ['as']
fortran_compilers = ['gfortran', 'f95', 'f90', 'g77']
ars = ['ar']
elif str(platform) == 'cygwin':
"prefer GNU tools on Cygwin, except for a platform-specific linker"
linkers = ['cyglink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'cxx']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
else:
"prefer GNU tools on all other platforms"
linkers = ['gnulink', 'ilink']
c_compilers = ['gcc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'intelc', 'icc', 'cxx']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', ]
if not str(platform) == 'win32':
other_plat_tools += ['m4', 'rpm']
c_compiler = FindTool(c_compilers, env) or c_compilers[0]
# XXX this logic about what tool provides what should somehow be
# moved into the tool files themselves.
if c_compiler and c_compiler == 'mingw':
# MinGW contains a linker, C compiler, C++ compiler,
# Fortran compiler, archiver and assembler:
cxx_compiler = None
linker = None
assembler = None
fortran_compiler = None
ar = None
else:
# Don't use g++ if the C compiler has built-in C++ support:
if c_compiler in ('msvc', 'intelc', 'icc'):
cxx_compiler = None
else:
cxx_compiler = FindTool(cxx_compilers, env) or cxx_compilers[0]
linker = FindTool(linkers, env) or linkers[0]
assembler = FindTool(assemblers, env) or assemblers[0]
fortran_compiler = FindTool(fortran_compilers, env) or fortran_compilers[0]
ar = FindTool(ars, env) or ars[0]
d_compilers = ['dmd', 'ldc', 'gdc']
d_compiler = FindTool(d_compilers, env) or d_compilers[0]
other_tools = FindAllTools(other_plat_tools + [
# TODO: merge 'install' into 'filesystem' and
# make 'filesystem' the default
'filesystem',
'wix', # 'midl', 'msvs',
# Parser generators
'lex', 'yacc',
# Foreign function interface
'rpcgen', 'swig',
# Java
'jar', 'javac', 'javah', 'rmic',
# TeX
'dvipdf', 'dvips', 'gs',
'tex', 'latex', 'pdflatex', 'pdftex',
# Archivers
'tar', 'zip',
# File builders (text)
'textfile',
], env)
tools = [
linker,
c_compiler,
cxx_compiler,
fortran_compiler,
assembler,
ar,
d_compiler,
] + other_tools
return [x for x in tools if x]
def find_program_path(env, key_program, default_paths=None):
"""
Find the location of a tool using various means.
Mainly for windows where tools aren't all installed in /usr/bin, etc.
:param env: Current Construction Environment.
:param key_program: Tool to locate.
:param default_paths: List of additional paths this tool might be found in.
"""
# First search in the SCons path
path = env.WhereIs(key_program)
if path:
return path
# Then in the OS path
path = SCons.Util.WhereIs(key_program)
if path:
return path
# Finally, add the defaults and check again. Do not change
# ['ENV']['PATH'] permananetly, the caller can do that if needed.
if default_paths is None:
return path
save_path = env['ENV']['PATH']
for p in default_paths:
env.AppendENVPath('PATH', p)
path = env.WhereIs(key_program)
env['ENV']['PATH'] = save_path
return path
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 36.325056 | 113 | 0.590293 |
acef86c05b883b00b6e130e3c67bbbb3e09618a2 | 11,526 | py | Python | gluon/packages/dal/pydal/dialects/postgre.py | guadaltech/web2py-ruben | 45e0f4f316774e707a3075f23e3f8b9fed00c387 | [
"BSD-3-Clause"
] | null | null | null | gluon/packages/dal/pydal/dialects/postgre.py | guadaltech/web2py-ruben | 45e0f4f316774e707a3075f23e3f8b9fed00c387 | [
"BSD-3-Clause"
] | null | null | null | gluon/packages/dal/pydal/dialects/postgre.py | guadaltech/web2py-ruben | 45e0f4f316774e707a3075f23e3f8b9fed00c387 | [
"BSD-3-Clause"
] | null | null | null | from ..adapters.postgres import Postgre, PostgreNew, PostgreBoolean
from ..helpers.methods import varquote_aux
from ..objects import Expression
from .base import SQLDialect
from . import dialects, sqltype_for, register_expression
@dialects.register_for(Postgre)
class PostgreDialect(SQLDialect):
true_exp = "TRUE"
false_exp = "FALSE"
@sqltype_for('blob')
def type_blob(self):
return 'BYTEA'
@sqltype_for('bigint')
def type_bigint(self):
return 'BIGINT'
@sqltype_for('double')
def type_double(self):
return 'FLOAT8'
@sqltype_for('id')
def type_id(self):
return 'SERIAL PRIMARY KEY'
@sqltype_for('big-id')
def type_big_id(self):
return 'BIGSERIAL PRIMARY KEY'
@sqltype_for('big-reference')
def type_big_reference(self):
return 'BIGINT REFERENCES %(foreign_key)s ' + \
'ON DELETE %(on_delete_action)s %(null)s %(unique)s'
@sqltype_for('reference TFK')
def type_reference_tfk(self):
return ' CONSTRAINT "FK_%(constraint_name)s_PK" FOREIGN KEY ' + \
'(%(field_name)s) REFERENCES %(foreign_table)s' + \
'(%(foreign_key)s) ON DELETE %(on_delete_action)s'
@sqltype_for('geometry')
def type_geometry(self):
return 'GEOMETRY'
@sqltype_for('geography')
def type_geography(self):
return 'GEOGRAPHY'
def varquote(self, val):
return varquote_aux(val, '"%s"')
def sequence_name(self, tablename):
return self.quote('%s_id_seq' % tablename)
def insert(self, table, fields, values, returning=None):
ret = ''
if returning:
ret = 'RETURNING %s' % returning
return 'INSERT INTO %s(%s) VALUES (%s)%s;' % (
table, fields, values, ret)
@property
def random(self):
return 'RANDOM()'
def add(self, first, second, query_env={}):
t = first.type
if t in ('text', 'string', 'password', 'json', 'jsonb', 'upload', 'blob'):
return '(%s || %s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
else:
return '(%s + %s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def regexp(self, first, second, query_env={}):
return '(%s ~ %s)' % (
self.expand(first, query_env=query_env),
self.expand(second, 'string', query_env=query_env))
def like(self, first, second, escape=None, query_env={}):
if isinstance(second, Expression):
second = self.expand(second, 'string', query_env=query_env)
else:
second = self.expand(second, 'string', query_env=query_env)
if escape is None:
escape = '\\'
second = second.replace(escape, escape * 2)
if first.type not in ('string', 'text', 'json', 'jsonb'):
return "(%s LIKE %s ESCAPE '%s')" % (
self.cast(self.expand(first, query_env=query_env),
'CHAR(%s)' % first.length), second, escape)
return "(%s LIKE %s ESCAPE '%s')" % (
self.expand(first, query_env=query_env), second, escape)
def ilike(self, first, second, escape=None, query_env={}):
if isinstance(second, Expression):
second = self.expand(second, 'string', query_env=query_env)
else:
second = self.expand(second, 'string', query_env=query_env)
if escape is None:
escape = '\\'
second = second.replace(escape, escape * 2)
if first.type not in ('string', 'text', 'json', 'jsonb', 'list:string'):
return "(%s ILIKE %s ESCAPE '%s')" % (
self.cast(self.expand(first, query_env=query_env),
'CHAR(%s)' % first.length), second, escape)
return "(%s ILIKE %s ESCAPE '%s')" % (
self.expand(first, query_env=query_env), second, escape)
def drop_table(self, table, mode):
if mode not in ['restrict', 'cascade', '']:
raise ValueError('Invalid mode: %s' % mode)
return ['DROP TABLE ' + table._rname + ' ' + mode + ';']
def create_index(self, name, table, expressions, unique=False, where=None):
uniq = ' UNIQUE' if unique else ''
whr = ''
if where:
whr = ' %s' % self.where(where)
with self.adapter.index_expander():
rv = 'CREATE%s INDEX %s ON %s (%s)%s;' % (
uniq, self.quote(name), table._rname, ','.join(
self.expand(field) for field in expressions), whr)
return rv
def st_asgeojson(self, first, second, query_env={}):
return 'ST_AsGeoJSON(%s,%s,%s,%s)' % (
second['version'], self.expand(first, query_env=query_env),
second['precision'], second['options'])
def st_astext(self, first, query_env={}):
return 'ST_AsText(%s)' % self.expand(first, query_env=query_env)
def st_x(self, first, query_env={}):
return 'ST_X(%s)' % (self.expand(first, query_env=query_env))
def st_y(self, first, query_env={}):
return 'ST_Y(%s)' % (self.expand(first, query_env=query_env))
def st_contains(self, first, second, query_env={}):
return 'ST_Contains(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_distance(self, first, second, query_env={}):
return 'ST_Distance(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_equals(self, first, second, query_env={}):
return 'ST_Equals(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_intersects(self, first, second, query_env={}):
return 'ST_Intersects(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_overlaps(self, first, second, query_env={}):
return 'ST_Overlaps(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_simplify(self, first, second, query_env={}):
return 'ST_Simplify(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, 'double', query_env=query_env))
def st_simplifypreservetopology(self, first, second, query_env={}):
return 'ST_SimplifyPreserveTopology(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, 'double', query_env=query_env))
def st_touches(self, first, second, query_env={}):
return 'ST_Touches(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_within(self, first, second, query_env={}):
return 'ST_Within(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_dwithin(self, first, tup, query_env={}):
return 'ST_DWithin(%s,%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(tup[0], first.type, query_env=query_env),
self.expand(tup[1], 'double', query_env=query_env))
@register_expression('doy')
def extract_doy(self, expr):
return Expression(expr.db, self.extract, expr, 'doy', 'integer')
@register_expression('dow')
def extract_dow(self, expr):
return Expression(expr.db, self.extract, expr, 'dow', 'integer')
@register_expression('isodow')
def extract_isodow(self, expr):
return Expression(expr.db, self.extract, expr, 'isodow', 'integer')
@register_expression('isoyear')
def extract_isoyear(self, expr):
return Expression(expr.db, self.extract, expr, 'isoyear', 'integer')
@register_expression('quarter')
def extract_quarter(self, expr):
return Expression(expr.db, self.extract, expr, 'quarter', 'integer')
@register_expression('week')
def extract_week(self, expr):
return Expression(expr.db, self.extract, expr, 'week', 'integer')
@register_expression('decade')
def extract_decade(self, expr):
return Expression(expr.db, self.extract, expr, 'decade', 'integer')
@register_expression('century')
def extract_century(self, expr):
return Expression(expr.db, self.extract, expr, 'century', 'integer')
@register_expression('millenium')
def extract_millenium(self, expr):
return Expression(expr.db, self.extract, expr, 'millenium', 'integer')
class PostgreDialectJSON(PostgreDialect):
@sqltype_for('json')
def type_json(self):
return 'JSON'
@sqltype_for('jsonb')
def type_jsonb(self):
return 'JSONB'
@dialects.register_for(PostgreNew)
class PostgreDialectArrays(PostgreDialect):
@sqltype_for('list:integer')
def type_list_integer(self):
return 'BIGINT[]'
@sqltype_for('list:string')
def type_list_string(self):
return 'TEXT[]'
@sqltype_for('list:reference')
def type_list_reference(self):
return 'BIGINT[]'
def any(self, val, query_env={}):
return "ANY(%s)" % self.expand(val, query_env=query_env)
def contains(self, first, second, case_sensitive=True, query_env={}):
if first.type.startswith('list:'):
f = self.expand(second, 'string', query_env=query_env)
s = self.any(first, query_env)
if case_sensitive is True:
return self.eq(f, s)
return self.ilike(f, s, escape='\\', query_env=query_env)
return super(PostgreDialectArrays, self).contains(
first, second, case_sensitive=case_sensitive, query_env=query_env)
def ilike(self, first, second, escape=None, query_env={}):
if first and 'type' not in first:
args = (first, self.expand(second, query_env=query_env))
return '(%s ILIKE %s)' % args
return super(PostgreDialectArrays, self).ilike(
first, second, escape=escape, query_env=query_env)
def eq(self, first, second=None, query_env={}):
if first and 'type' not in first:
return '(%s = %s)' % (first,
self.expand(second, query_env=query_env))
return super(PostgreDialectArrays, self).eq(first, second, query_env)
class PostgreDialectArraysJSON(PostgreDialectArrays):
@sqltype_for('json')
def type_json(self):
return 'JSON'
@sqltype_for('jsonb')
def type_jsonb(self):
return 'JSONB'
@dialects.register_for(PostgreBoolean)
class PostgreDialectBoolean(PostgreDialectArrays):
@sqltype_for('boolean')
def type_boolean(self):
return 'BOOLEAN'
class PostgreDialectBooleanJSON(PostgreDialectBoolean):
@sqltype_for('json')
def type_json(self):
return 'JSON'
@sqltype_for('jsonb')
def type_jsonb(self):
return 'JSONB'
| 37.422078 | 83 | 0.594656 |
acef872eda36880b3afb2e35e8084f1969d93a60 | 8,578 | py | Python | examples/rllib.py | isgeles/SMARTS | 423275123ae4aab8b7d409140d82b50555a5267c | [
"MIT"
] | 1 | 2021-05-19T06:19:41.000Z | 2021-05-19T06:19:41.000Z | examples/rllib.py | isgeles/SMARTS | 423275123ae4aab8b7d409140d82b50555a5267c | [
"MIT"
] | 12 | 2021-08-25T16:17:20.000Z | 2022-03-12T01:00:37.000Z | examples/rllib.py | isgeles/SMARTS | 423275123ae4aab8b7d409140d82b50555a5267c | [
"MIT"
] | null | null | null | import argparse
import logging
import multiprocessing
import random
from datetime import timedelta
from os import stat
from pathlib import Path
from typing import Dict
import numpy as np
from ray import tune
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.evaluation.episode import MultiAgentEpisode
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.policy.policy import Policy
from ray.rllib.utils.typing import PolicyID
from ray.tune.schedulers import PopulationBasedTraining
import smarts
from examples.rllib_agent import TrainingModel, rllib_agent
from smarts.core.utils.file import copy_tree
from smarts.env.rllib_hiway_env import RLlibHiWayEnv
logging.basicConfig(level=logging.INFO)
# Add custom metrics to your tensorboard using these callbacks
# See: https://ray.readthedocs.io/en/latest/rllib-training.html#callbacks-and-custom-metrics
class Callbacks(DefaultCallbacks):
@staticmethod
def on_episode_start(
worker: RolloutWorker,
base_env: BaseEnv,
policies: Dict[PolicyID, Policy],
episode: MultiAgentEpisode,
env_index: int,
**kwargs,
):
episode.user_data["ego_speed"] = []
@staticmethod
def on_episode_step(
worker: RolloutWorker,
base_env: BaseEnv,
episode: MultiAgentEpisode,
env_index: int,
**kwargs,
):
single_agent_id = list(episode._agent_to_last_obs)[0]
obs = episode.last_raw_obs_for(single_agent_id)
episode.user_data["ego_speed"].append(obs["speed"])
@staticmethod
def on_episode_end(
worker: RolloutWorker,
base_env: BaseEnv,
policies: Dict[PolicyID, Policy],
episode: MultiAgentEpisode,
env_index: int,
**kwargs,
):
mean_ego_speed = np.mean(episode.user_data["ego_speed"])
print(
f"ep. {episode.episode_id:<12} ended;"
f" length={episode.length:<6}"
f" mean_ego_speed={mean_ego_speed:.2f}"
)
episode.custom_metrics["mean_ego_speed"] = mean_ego_speed
def explore(config):
# ensure we collect enough timesteps to do sgd
if config["train_batch_size"] < config["rollout_fragment_length"] * 2:
config["train_batch_size"] = config["rollout_fragment_length"] * 2
return config
def main(
scenario,
headless,
time_total_s,
rollout_fragment_length,
train_batch_size,
seed,
num_samples,
num_agents,
num_workers,
resume_training,
result_dir,
checkpoint_num,
save_model_path,
):
assert train_batch_size > 0, f"{train_batch_size.__name__} cannot be less than 1."
if rollout_fragment_length > train_batch_size:
rollout_fragment_length = train_batch_size
pbt = PopulationBasedTraining(
time_attr="time_total_s",
metric="episode_reward_mean",
mode="max",
perturbation_interval=300,
resample_probability=0.25,
# Specifies the mutations of these hyperparams
# See: `ray.rllib.agents.trainer.COMMON_CONFIG` for common hyperparams
hyperparam_mutations={
"lr": [1e-3, 5e-4, 1e-4, 5e-5, 1e-5],
"rollout_fragment_length": lambda: rollout_fragment_length,
"train_batch_size": lambda: train_batch_size,
},
# Specifies additional mutations after hyperparam_mutations is applied
custom_explore_fn=explore,
)
# XXX: There is a bug in Ray where we can only export a trained model if
# the policy it's attached to is named 'default_policy'.
# See: https://github.com/ray-project/ray/issues/5339
rllib_policies = {
"default_policy": (
None,
rllib_agent["observation_space"],
rllib_agent["action_space"],
{"model": {"custom_model": TrainingModel.NAME}},
)
}
smarts.core.seed(seed)
tune_config = {
"env": RLlibHiWayEnv,
"log_level": "WARN",
"num_workers": num_workers,
"env_config": {
"seed": tune.sample_from(lambda spec: random.randint(0, 300)),
"scenarios": [str(Path(scenario).expanduser().resolve().absolute())],
"headless": headless,
"agent_specs": {
f"AGENT-{i}": rllib_agent["agent_spec"] for i in range(num_agents)
},
},
"multiagent": {"policies": rllib_policies},
"callbacks": Callbacks,
}
experiment_name = "rllib_example_multi"
result_dir = Path(result_dir).expanduser().resolve().absolute()
if checkpoint_num:
checkpoint = str(
result_dir / f"checkpoint_{checkpoint_num}" / f"checkpoint-{checkpoint_num}"
)
else:
checkpoint = None
print(f"Checkpointing at {str(result_dir)}")
analysis = tune.run(
"PG",
name=experiment_name,
stop={"time_total_s": time_total_s},
checkpoint_freq=1,
checkpoint_at_end=True,
local_dir=str(result_dir),
resume=resume_training,
restore=checkpoint,
max_failures=3,
num_samples=num_samples,
export_formats=["model", "checkpoint"],
config=tune_config,
scheduler=pbt,
)
print(analysis.dataframe().head())
best_logdir = Path(analysis.get_best_logdir("episode_reward_max", mode="max"))
model_path = best_logdir / "model"
copy_tree(str(model_path), save_model_path, overwrite=True)
print(f"Wrote model to: {save_model_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser("rllib-example")
parser.add_argument(
"scenario",
help="Scenario to run (see scenarios/ for some samples you can use)",
type=str,
)
parser.add_argument(
"--headless",
action="store_true",
default=False,
help="Run simulation in headless mode",
)
parser.add_argument(
"--num_samples",
type=int,
default=1,
help="Number of times to sample from hyperparameter space",
)
parser.add_argument(
"--rollout_fragment_length",
type=int,
default=200,
help="Episodes are divided into fragments of this many steps for each rollout. In this example this will be ensured to be `1=<rollout_fragment_length<=train_batch_size`",
)
parser.add_argument(
"--train_batch_size",
type=int,
default=2000,
help="The training batch size. This value must be > 0.",
)
parser.add_argument(
"--time_total_s",
type=int,
default=1 * 60 * 60, # 1 hour
help="Total time in seconds to run the simulation for. This is a rough end time as it will be checked per training batch.",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="The base random seed to use, intended to be mixed with --num_samples",
)
parser.add_argument(
"--num_agents", type=int, default=2, help="Number of agents (one per policy)"
)
parser.add_argument(
"--num_workers",
type=int,
default=(multiprocessing.cpu_count() // 2 + 1),
help="Number of workers (defaults to use all system cores)",
)
parser.add_argument(
"--resume_training",
default=False,
action="store_true",
help="Resume the last trained example",
)
parser.add_argument(
"--result_dir",
type=str,
default="~/ray_results",
help="Directory containing results",
)
parser.add_argument(
"--checkpoint_num", type=int, default=None, help="Checkpoint number"
)
save_model_path = str(Path(__file__).expanduser().resolve().parent / "model")
parser.add_argument(
"--save_model_path",
type=str,
default=save_model_path,
help="Destination path of where to copy the model when training is over",
)
args = parser.parse_args()
main(
scenario=args.scenario,
headless=args.headless,
time_total_s=args.time_total_s,
rollout_fragment_length=args.rollout_fragment_length,
train_batch_size=args.train_batch_size,
seed=args.seed,
num_samples=args.num_samples,
num_agents=args.num_agents,
num_workers=args.num_workers,
resume_training=args.resume_training,
result_dir=args.result_dir,
checkpoint_num=args.checkpoint_num,
save_model_path=args.save_model_path,
)
| 31.421245 | 178 | 0.645838 |
acef877ab169a43e2b369b9326e5e349b2ae9686 | 7,645 | py | Python | fine/controllers/admin.py | finron/finepy | 93e0fda1a4fbda62a4b591856e25c8f24126941c | [
"BSD-3-Clause"
] | null | null | null | fine/controllers/admin.py | finron/finepy | 93e0fda1a4fbda62a4b591856e25c8f24126941c | [
"BSD-3-Clause"
] | null | null | null | fine/controllers/admin.py | finron/finepy | 93e0fda1a4fbda62a4b591856e25c8f24126941c | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
'''
admin.py
~~~~~~~~
'''
from datetime import datetime
from flask import (render_template, Blueprint, request,
url_for, current_app, redirect)
from flask_login import login_required
from fine import db
from fine.models import (Post, PostTag, User, Tag, Link,
Comment)
from fine.lib.util import remove_html_tag
from fine.lib.decorators import admin_required
bp = Blueprint('admin', __name__)
@bp.route('/admin', methods=['GET'])
@login_required
@admin_required
def index():
return redirect(url_for('.posts'))
@bp.route('/admin/post', methods=['GET', 'POST'])
@bp.route('/admin/post/<int:id>', methods=['GET'])
@bp.route('/admin/post/edit/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_post(id=None):
if request.method == 'GET':
if id:
post = Post.query.get_or_404(id)
return render_template('admin/post.html', post=post)
post = Post()
return render_template('admin/post.html', post=post)
else:
form = request.form
if id:
post = Post.query.get_or_404(id)
else:
post = Post()
is_privacy = 'privacy' in form
tagname_list = form.get('post_tags', '')
post_id = post.id
t_query = Tag.query
pt_query = PostTag.query
for tagname in tagname_list.split(','):
# import pdb; pdb.set_trace()
tagname = tagname.strip()
if not tagname:
continue
if post.has_tag(tagname):
continue
tag = t_query.filter(Tag.name==tagname).first()
if tag:
tag_id = tag.id
else:
tag = Tag(name=tagname)
db.session.add(tag)
db.session.commit()
tag_id = tag.id
if not tag.weight:
tag.weight = 0
tag.weight += 1
pt = PostTag(post_id=post_id,
tag_id=tag_id)
db.session.add(pt)
db.session.commit()
content = form.get('post_content', 'None')
post.body = content
content_summary = remove_html_tag(content)[:140]
post.body_html = content_summary
post.title =form.get('post_title', 'None')
post.privacy = is_privacy
post.post_time = datetime.utcnow()
db.session.add(post)
db.session.commit()
return redirect('/admin/posts')
@bp.route('/admin/comment', methods=['GET', 'POST'])
@bp.route('/admin/comment/<int:id>', methods=['GET'])
@bp.route('/admin/comment/edit/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_comment(id=None):
if request.method == 'GET':
if id:
comment = Comment.query.get_or_404(id)
return render_template('admin/comment.html', comment=comment)
comment = Comment()
return render_template('admin/comment.html', comment=comment)
else:
form = request.form
if id:
comment = Comment.query.get_or_404(id)
comment.body = form.get('comment_body', 'None')
db.session.commit()
return redirect('/admin/comments.html')
@bp.route('/admin/link', methods=['GET', 'POST'])
@bp.route('/admin/link/<int:id>', methods=['GET'])
@bp.route('/admin/link/edit/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_link(id=None):
if request.method == 'GET':
if id:
link = Link.query.get_or_404(id)
return render_template('admin/link.html', link=link)
link = Link()
return render_template('admin/link.html', link=link)
else:
form = request.form
if id:
link = Link.query.get_or_404(id)
else:
link = Link()
link.name = form.get('link_name', 'None')
link.url = form.get('link_url', 'None')
try:
link_weight = int(form.get('link_weight', 1))
except:
link_weight = 1
link.weight = link_weight
link.note = form.get('link_note', 'None')
db.session.add(link)
db.session.commit()
return redirect('admin/links.html')
@bp.route('/admin/posts', methods=['GET', 'POST'])
@login_required
@admin_required
def posts():
page = request.args.get('page', 1, type=int)
query = Post.query
pagination = query.order_by(Post.post_time.desc()).paginate(
page, per_page=current_app.config['FINEPY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('admin/posts.html', posts=posts,
pagination=pagination,
tab_menu='posts')
@bp.route('/admin/comments', methods=['GET'])
@login_required
@admin_required
def comments():
page = request.args.get('page', 1, type=int)
query = Comment.query
pagination = query.order_by(Comment.create_time.desc()).paginate(
page, per_page=current_app.config['FINEPY_POSTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('admin/comments.html', comments=comments,
pagination=pagination,
tab_menu='comments')
@bp.route('/admin/users', methods=['GET', 'POST'])
@login_required
@admin_required
def users():
page = request.args.get('page', 1, type=int)
query = User.query
pagination = query.order_by(User.id.asc()).paginate(
page, per_page=current_app.config['FINEPY_POSTS_PER_PAGE'],
error_out=False)
users = pagination.items
return render_template('admin/users.html', users=users,
pagination=pagination,
tab_menu='users')
@bp.route('/admin/links', methods=['GET', 'POST'])
@login_required
@admin_required
def links():
page = request.args.get('page', 1, type=int)
query = Link.query
pagination = query.order_by(Link.weight.desc()).paginate(
page, per_page=current_app.config['FINEPY_POSTS_PER_PAGE'],
error_out=False)
links = pagination.items
return render_template('admin/links.html', links=links,
pagination=pagination,
tab_menu='links')
@bp.route('/admin/user', methods=['GET', 'POST'])
@bp.route('/admin/user/<int:id>', methods=['GET'])
@bp.route('/admin/user/edit/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_user(id=None):
if request.method == 'GET':
if id:
user = User.query.get_or_404(id)
return render_template('admin/user.html', user=user)
user = User()
return render_template('admin/user.html', user=user)
else:
form = request.form
if id:
user = User.query.get_or_404(id)
else:
user = User()
user.username = form.get('username', 'None')
user.email = form.get('email', 'None')
user.name = form.get('name')
user.confirmed = 'confirmed' in form
user.avatar_hash = user.gravatar()
db.session.add(user)
db.session.commit()
return render_template('admin/users.html',
tab_menu='users')
@bp.route('/admin/logs', methods=['GET', 'POST'])
@login_required
@admin_required
def logs():
return ""
@bp.route('/admin/settings', methods=['GET', 'POST'])
@login_required
@admin_required
def settings():
return ""
@bp.route('/admin/', methods=['GET', 'POST'])
@login_required
@admin_required
def search():
return ""
| 31.204082 | 73 | 0.590059 |
acef8892c3a2e85d3ff5c4bbe0a026cfe864da24 | 11,722 | py | Python | ablation_train.py | wandoucao/BDCN | 1062f5bf04cd9484c548af2c435773d7bf870ec5 | [
"MIT"
] | 19 | 2019-07-10T08:20:43.000Z | 2022-03-23T12:07:43.000Z | ablation_train.py | wandoucao/BDCN | 1062f5bf04cd9484c548af2c435773d7bf870ec5 | [
"MIT"
] | 4 | 2019-07-07T01:06:47.000Z | 2019-12-31T08:16:59.000Z | ablation_train.py | wandoucao/BDCN | 1062f5bf04cd9484c548af2c435773d7bf870ec5 | [
"MIT"
] | 6 | 2019-07-20T08:30:54.000Z | 2021-12-17T07:25:03.000Z | import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import argparse
import time
import re
import os
import sys
import ablation
from datasets.dataset import Data
import cfg
import log
def adjust_learning_rate(optimizer, steps, step_size, gamma=0.1, logger=None):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
# lr = args.lr * (0.1 ** (epoch // 2))
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * gamma # (0.1 ** (steps // step_size))
if logger:
logger.info('%s: %s' % (param_group['name'], param_group['lr']))
def cross_entropy_loss2d(inputs, targets, cuda=False, balance=1.1):
"""
:param inputs: inputs is a 4 dimensional data nx1xhxw
:param targets: targets is a 3 dimensional data nx1xhxw
:return:
"""
n, c, h, w = inputs.size()
weights = np.zeros((n, c, h, w))
for i in range(n):
t = targets[i, :, :, :].cpu().data.numpy()
pos = (t == 1).sum()
neg = (t == 0).sum()
valid = neg + pos
weights[i, t == 1] = neg * 1. / valid
weights[i, t == 0] = pos * balance / valid
weights = torch.Tensor(weights)
if cuda:
weights = weights.cuda()
weights = Variable(weights)
inputs = F.sigmoid(inputs)
loss = nn.BCELoss(weights, size_average=False)(inputs, targets)
return loss
def train(model, args):
data_root = cfg.config[args.dataset]['data_root']
data_lst = cfg.config[args.dataset]['data_lst']
if 'Multicue' in args.dataset:
data_lst = data_lst % args.k
mean_bgr = np.array(cfg.config[args.dataset]['mean_bgr'])
yita = args.yita if args.yita else cfg.config[args.dataset]['yita']
crop_size = args.crop_size
train_img = Data(data_root, data_lst, yita, mean_bgr=mean_bgr, crop_size=crop_size)
trainloader = torch.utils.data.DataLoader(train_img,
batch_size=args.batch_size, shuffle=True, num_workers=5)
params_dict = dict(model.named_parameters())
base_lr = args.base_lr
weight_decay = args.weight_decay
logger = args.logger
params = []
for key, v in params_dict.items():
if re.match(r'conv[1-5]_[1-3]_down', key):
if 'weight' in key:
params += [{'params': v, 'lr': base_lr*0.1, 'weight_decay': weight_decay*1, 'name': key}]
elif 'bias' in key:
params += [{'params': v, 'lr': base_lr*0.2, 'weight_decay': weight_decay*0, 'name': key}]
elif re.match(r'.*conv[1-4]_[1-3]', key):
if 'weight' in key:
params += [{'params': v, 'lr': base_lr*1, 'weight_decay': weight_decay*1, 'name': key}]
elif 'bias' in key:
params += [{'params': v, 'lr': base_lr*2, 'weight_decay': weight_decay*0, 'name': key}]
elif re.match(r'.*conv5_[1-3]', key):
if 'weight' in key:
params += [{'params': v, 'lr': base_lr*100, 'weight_decay': weight_decay*1, 'name': key}]
elif 'bias' in key:
params += [{'params': v, 'lr': base_lr*200, 'weight_decay': weight_decay*0, 'name': key}]
elif re.match(r'score_dsn[1-5]', key):
if 'weight' in key:
params += [{'params': v, 'lr': base_lr*0.01, 'weight_decay': weight_decay*1, 'name': key}]
elif 'bias' in key:
params += [{'params': v, 'lr': base_lr*0.02, 'weight_decay': weight_decay*0, 'name': key}]
elif re.match(r'upsample_[248](_5)?', key):
if 'weight' in key:
params += [{'params': v, 'lr': base_lr*0, 'weight_decay': weight_decay*0, 'name': key}]
elif 'bias' in key:
params += [{'params': v, 'lr': base_lr*0, 'weight_decay': weight_decay*0, 'name': key}]
elif re.match(r'.*msblock[1-5]_[1-3]\.conv', key):
if 'weight' in key:
params += [{'params': v, 'lr': base_lr*1, 'weight_decay': weight_decay*1, 'name': key}]
elif 'bias' in key:
params += [{'params': v, 'lr': base_lr*2, 'weight_decay': weight_decay*0, 'name': key}]
else:
if 'weight' in key:
params += [{'params': v, 'lr': base_lr*0.001, 'weight_decay': weight_decay*1, 'name': key}]
elif 'bias' in key:
params += [{'params': v, 'lr': base_lr*0.002, 'weight_decay': weight_decay*0, 'name': key}]
optimizer = torch.optim.SGD(params, momentum=args.momentum,
lr=args.base_lr, weight_decay=args.weight_decay)
start_step = 1
mean_loss = []
cur = 0
pos = 0
data_iter = iter(trainloader)
iter_per_epoch = len(trainloader)
logger.info('*'*40)
logger.info('train images in all are %d ' % iter_per_epoch)
logger.info('*'*40)
start_time = time.time()
if args.cuda:
model.cuda()
if args.resume:
logger.info('resume from %s' % args.resume)
state = torch.load(args.resume)
start_step = state['step']
optimizer.load_state_dict(state['solver'])
model.train()
batch_size = args.iter_size * args.batch_size
for step in xrange(start_step, args.max_iter + 1):
optimizer.zero_grad()
batch_loss = 0
for i in xrange(args.iter_size):
if cur == iter_per_epoch:
cur = 0
data_iter = iter(trainloader)
images, labels = next(data_iter)
if args.cuda:
images, labels = images.cuda(), labels.cuda()
images, labels = Variable(images), Variable(labels)
out = model(images)
loss = 0
for k in range(len(out) - 1):
loss += args.side_weight*cross_entropy_loss2d(out[k], labels, args.cuda, args.balance)/batch_size
loss += args.fuse_weight*cross_entropy_loss2d(out[-1], labels, args.cuda, args.balance)/batch_size
loss.backward()
batch_loss += loss.data[0]
cur += 1
# update parameter
optimizer.step()
if len(mean_loss) < args.average_loss:
mean_loss.append(batch_loss)
else:
mean_loss[pos] = batch_loss
pos = (pos + 1) % args.average_loss
if step % args.step_size == 0:
adjust_learning_rate(optimizer, step, args.step_size)
if step % args.snapshots == 0:
torch.save(model.state_dict(), '%s/bdcn_%d.pth' % (args.param_dir, step))
# state = {'step': step+1,'param':model.state_dict(),'solver':optimizer.state_dict()}
# torch.save(state, '%s/bdcn_%d.pth.tar' % (args.param_dir, step))
if step % args.display == 0:
tm = time.time() - start_time
logger.info('iter: %d, lr: %e, loss: %f, time using: %f(%fs/iter)' % (step,
optimizer.param_groups[0]['lr'], np.mean(mean_loss), tm, tm/args.display))
start_time = time.time()
def main():
args = parse_args()
logger = log.get_logger(args.log)
args.logger = logger
logger.info('*'*80)
logger.info('the args are the below')
logger.info('*'*80)
for x in args.__dict__:
logger.info(x+','+str(args.__dict__[x]))
logger.info(cfg.config[args.dataset])
logger.info('*'*80)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if not os.path.exists(args.param_dir):
os.mkdir(args.param_dir)
torch.manual_seed(long(time.time()))
model = ablation.BDCN(pretrain=args.pretrain, logger=logger,
ms=args.ms, block=args.block, bdcn=not args.no_bdcn, direction=args.dir,
k=args.num_conv, rate=args.rate)
if args.complete_pretrain:
model.load_state_dict(torch.load(args.complete_pretrain))
logger.info(model)
train(model, args)
def parse_args():
parser = argparse.ArgumentParser(description='Train BDCN for different args')
parser.add_argument('-d', '--dataset', type=str, choices=cfg.config.keys(),
default='bsds500', help='The dataset to train')
parser.add_argument('--param-dir', type=str, default='params',
help='the directory to store the params')
parser.add_argument('--lr', dest='base_lr', type=float, default=1e-6,
help='the base learning rate of model')
parser.add_argument('-m', '--momentum', type=float, default=0.9,
help='the momentum')
parser.add_argument('-c', '--cuda', action='store_true',
help='whether use gpu to train network')
parser.add_argument('-g', '--gpu', type=str, default='0',
help='the gpu id to train net')
parser.add_argument('--weight-decay', type=float, default=0.0002,
help='the weight_decay of net')
parser.add_argument('-r', '--resume', type=str, default=None,
help='whether resume from some, default is None')
parser.add_argument('-p', '--pretrain', type=str, default=None,
help='init net from pretrained model default is None')
parser.add_argument('--max-iter', type=int, default=40000,
help='max iters to train network, default is 40000')
parser.add_argument('--iter-size', type=int, default=10,
help='iter size equal to the batch size, default 10')
parser.add_argument('--average-loss', type=int, default=50,
help='smoothed loss, default is 50')
parser.add_argument('-s', '--snapshots', type=int, default=1000,
help='how many iters to store the params, default is 1000')
parser.add_argument('--step-size', type=int, default=10000,
help='the number of iters to decrease the learning rate, default is 10000')
parser.add_argument('--display', type=int, default=20,
help='how many iters display one time, default is 20')
parser.add_argument('-b', '--balance', type=float, default=1.1,
help='the parameter to balance the neg and pos, default is 1.1')
parser.add_argument('-l', '--log', type=str, default='log.txt',
help='the file to store log, default is log.txt')
parser.add_argument('-k', type=int, default=1,
help='the k-th split set of multicue')
parser.add_argument('--batch-size', type=int, default=1,
help='batch size of one iteration, default 1')
parser.add_argument('--crop-size', type=int, default=None,
help='the size of image to crop, default not crop')
parser.add_argument('--yita', type=float, default=None,
help='the param to operate gt, default is data in the config file')
parser.add_argument('--complete-pretrain', type=str, default=None,
help='finetune on the complete_pretrain, default None')
parser.add_argument('--side-weight', type=float, default=0.5,
help='the loss weight of sideout, default 0.5')
parser.add_argument('--fuse-weight', type=float, default=1.1,
help='the loss weight of fuse, default 1.1')
parser.add_argument('--ms', action='store_true',
help='whether employ the ms blocks, default False')
parser.add_argument('--block', type=int, default=5,
help='how many blocks of the model, default 5')
parser.add_argument('--no-bdcn', action='store_true',
help='whether to employ our policy to train the model, default False')
parser.add_argument('--dir', type=str, choices=['both', 's2d', 'd2s'], default='both',
help='the direction of cascade, default both')
parser.add_argument('--num-conv', type=int, choices=[0,1,2,3,4], default=3,
help='the number of convolution of SEB, default 3')
parser.add_argument('--rate', type=int, default=4,
help='the dilation rate of scale enhancement block, default 4')
return parser.parse_args()
if __name__ == '__main__':
main()
| 45.968627 | 113 | 0.611414 |
acef88e74bd745a7a57090235c40f38fd3c9d0f4 | 17,601 | py | Python | model/mshpfnl.py | psychopa4/MSHPFNL | cce6392a7e94bd6ad809d0c39277aaea618fbf7c | [
"MIT"
] | 12 | 2020-09-02T02:31:08.000Z | 2021-11-08T07:56:44.000Z | model/mshpfnl.py | ljw1000000/mjy-MSHPFNL | cce6392a7e94bd6ad809d0c39277aaea618fbf7c | [
"MIT"
] | 3 | 2021-03-03T05:20:03.000Z | 2021-07-16T13:01:30.000Z | model/mshpfnl.py | ljw1000000/mjy-MSHPFNL | cce6392a7e94bd6ad809d0c39277aaea618fbf7c | [
"MIT"
] | 3 | 2020-12-07T03:46:19.000Z | 2021-03-03T08:10:17.000Z | import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from os.path import join,exists
import glob
import random
import numpy as np
from PIL import Image
import scipy
import time
import os
from tensorflow.python.layers.convolutional import Conv2D,conv2d
from utils import NonLocalBlock, DownSample, DownSample_4D, BLUR, get_num_params, cv2_imread, cv2_imsave, automkdir
from tqdm import tqdm,trange
from model.base_model import VSR
'''This is the official TensorFlow code of MSHPFNL (A Progressive Fusion Generative Adversarial Network for Realistic and Consistent Video Super-Resolution).
The code is mainly based on https://github.com/psychopa4/PFNL.
'''
class HybridConv(object):
def __init__(self, mf=32, ks=3, ds=1, dr=2 , activation=None, ki=None, name='HybridConv'):
super(HybridConv, self).__init__()
self.dconv=Conv2D(mf, ks, strides=ds, dilation_rate=dr, padding='same', activation=activation, kernel_initializer=ki, name=name+'/d')
self.conv=Conv2D(mf, ks, strides=ds, dilation_rate=1, padding='same', activation=activation, kernel_initializer=ki, name=name+'/c')
self.merge=Conv2D(mf*2, 1, strides=ds, dilation_rate=1, padding='same', activation=activation, kernel_initializer=ki, name=name+'/m')
def __call__(self, x):
df=self.dconv(x)
cf=self.conv(x)
mf=self.merge(tf.concat([df,cf],-1))
return mf
class ReSample(object):
def __init__(self, mf=32, ks=1, activation=None, ki=None, name='ReSample'):
super(ReSample, self).__init__()
self.conv=Conv2D(mf, ks, strides=1, padding='same', activation=activation, kernel_initializer=ki, name=name+'/Conv2D0')
def __call__(self, x, scale=1):
if scale<1:
x1=tf.space_to_depth(x, int(1./scale))
else:
x1=tf.depth_to_space(x, scale)
x1=self.conv(x1)
return x1
class ReSample_S(object):
def __init__(self, mf=32, ks=1, activation=None, ki=None, name='ReSample'):
super(ReSample_S, self).__init__()
self.conv=Conv2D(mf, ks, strides=1, padding='same', activation=activation, kernel_initializer=ki, name=name+'/Conv2D0')
def __call__(self, x, scale=1):
x1=self.conv(x)
if scale<1:
x1=tf.space_to_depth(x1, int(1./scale))
else:
x1=tf.depth_to_space(x1, scale)
return x1
class MSPFRB(object):
def __init__(self, mf=32, num_frame=3, scale=1, ks=3, ds=1, dr=1 , activation=None, ki=None, name='MSPFRB'):
super(MSPFRB, self).__init__()
self.bf=mf
self.nf=num_frame
self.scale=scale
self.act=activation
self.conv0=Conv2D(mf, ks, strides=ds, dilation_rate=1, padding='same', activation=activation, kernel_initializer=ki, name=name+'/Conv2D0')
self.conv1=Conv2D(mf, 1, strides=ds, dilation_rate=1, padding='same', activation=activation, kernel_initializer=ki, name=name+'/Conv2D1')
self.conv2=Conv2D(mf, 3, strides=ds, dilation_rate=1, padding='same', activation=activation, kernel_initializer=ki, name=name+'/Conv2D2')
self.hybridconv=HybridConv(mf//2, ks=3, ds=1, dr=2 , activation=activation, ki=ki, name=name+'/HybridConv')
self.conv0_dp=self.conv0
self.conv2_dp=self.conv2
self.up=ReSample(mf, ks=1, activation=activation, ki=ki, name=name+'/UpSample')
self.dp=ReSample_S(mf//4, ks=1, activation=activation, ki=ki, name=name+'/DownSample')
def __call__(self, x_mix):
x=x_mix[0]
x_dp=x_mix[1]
x1=[self.conv0(i) for i in x]
x1_dp=self.conv0_dp(x_dp)
x1_dp_up=self.up(x1_dp, self.scale)
base=self.conv1(tf.concat(x1+[x1_dp_up], -1))
base=self.hybridconv(base)
base_dp=self.dp(base, 1./self.scale)
x2=[tf.concat([base,i], -1) for i in x1]
x2_dp=tf.concat([base_dp,x1_dp], -1)
x2=[self.conv2(i) for i in x2]
x2_dp=self.conv2_dp(x2_dp)
return [[tf.add(x[i],x2[i]*0.1) for i in range(len(x))], tf.add(x_dp,x2_dp*0.1)]
class MSHPFNL(VSR):
def __init__(self):
self.num_frames=7
self.scale=4
self.in_size=32
self.gt_size=self.in_size*self.scale
self.eval_in_size=[128,240]
self.batch_size=16
self.eval_basz=4
self.learning_rate=1e-3
self.end_lr=1e-4
self.reload=True
self.max_step=int(2.e5+1)
self.decay_step=1.2e5
self.train_dir='./data/filelist_train.txt'
self.eval_dir='./data/filelist_val.txt'
self.save_dir='./checkpoint/mshpfnl'
self.log_dir='./mshpfnl.txt'
def forward(self, x):
mf=64
dk=3
activate=tf.nn.leaky_relu
num_block=40
n,f1,w,h,c=x.shape
ki=tf.contrib.layers.xavier_initializer()
ds=1
with tf.variable_scope('nlvsr',reuse=tf.AUTO_REUSE) as scope:
conv0=Conv2D(mf, 5, strides=ds, padding='same', activation=activate, kernel_initializer=ki, name='conv0')
blocks=[MSPFRB(mf, num_frame=self.num_frames, scale=2, ks=dk, ds=1, dr=1 , activation=activate, ki=ki, name='MSPFRB{}'.format(i)) for i in range(num_block)]
convmerge1=Conv2D(48, 3, strides=ds, padding='same', activation=activate, kernel_initializer=ki, name='convmerge1')
convmerge2=Conv2D(12, 3, strides=ds, padding='same', activation=None, kernel_initializer=ki, name='convmerge2')
predp=ReSample(mf, ks=3, activation=activate, ki=ki, name='DownSample')
afterup=ReSample(mf, ks=3, activation=activate, ki=ki, name='UpSample')
inp0=[x[:,i,:,:,:] for i in range(f1)]
inp0=tf.concat(inp0, axis=-1)
inp1=tf.space_to_depth(inp0,2)
inp1=NonLocalBlock(inp1,int(c)*self.num_frames*4,sub_sample=1,nltype=1,scope='nlrb_{}'.format(0))
inp1=tf.depth_to_space(inp1,2)
inp0+=inp1
inp0=tf.split(inp0, num_or_size_splits=self.num_frames, axis=-1)
x_dp=predp(tf.concat(inp0, -1), 1./2)
inp0=[conv0(f) for f in inp0]
bic=tf.image.resize_images(x[:,self.num_frames//2,:,:,:],[w*self.scale,h*self.scale],method=2)
x_mix=[inp0, x_dp]
for i in range(num_block):
x_mix=blocks[i](x_mix)
x, x_dp=x_mix
x_dp_up=afterup(x_dp, 2)
merge=tf.concat(x+[x_dp_up],axis=-1)
merge=convmerge1(merge)
large1=tf.depth_to_space(merge,2)
out1=convmerge2(large1)
out=tf.depth_to_space(out1,2)
return tf.stack([bic+out], axis=1,name='out')
def build(self):
in_h,in_w=self.eval_in_size
H = tf.placeholder(tf.float32, shape=[None, 1, None, None, 3], name='H_truth')
L_train = tf.placeholder(tf.float32, shape=[self.batch_size, self.num_frames, self.in_size, self.in_size, 3], name='L_train')
L_eval = tf.placeholder(tf.float32, shape=[self.eval_basz, self.num_frames, in_h, in_w, 3], name='L_eval')
SR_train = self.forward(L_train)
SR_eval = self.forward(L_eval)
loss=tf.reduce_mean(tf.sqrt((SR_train-H)**2+1e-6))
eval_mse=tf.reduce_mean((SR_eval-H) ** 2, axis=[2,3,4])
self.loss, self.eval_mse= loss, eval_mse
self.L, self.L_eval, self.H, self.SR = L_train, L_eval, H, SR_train
def eval(self):
print('Evaluating ...')
if not hasattr(self, 'sess'):
sess = tf.Session()
self.load(sess, self.save_dir)
else:
sess = self.sess
border=8
in_h,in_w=self.eval_in_size
out_h = in_h*self.scale #512
out_w = in_w*self.scale #960
bd=border//self.scale
eval_gt = tf.placeholder(tf.float32, [None, self.num_frames, out_h, out_w, 3])
eval_inp=DownSample(eval_gt, BLUR, scale=self.scale)
filenames=open(self.eval_dir, 'rt').read().splitlines()#sorted(glob.glob(join(self.eval_dir,'*')))
gt_list=[sorted(glob.glob(join(f,'truth','*.png'))) for f in filenames]
center=15
batch_gt = []
batch_cnt=0
mse_acc=None
for gtlist in gt_list:
max_frame=len(gtlist)
for idx0 in range(center, max_frame, 32):
index=np.array([i for i in range(idx0-self.num_frames//2,idx0+self.num_frames//2+1)])
index=np.clip(index,0,max_frame-1).tolist()
gt=[cv2_imread(gtlist[i]) for i in index]
gt = [i[border:out_h+border, border:out_w+border, :].astype(np.float32) / 255.0 for i in gt]
batch_gt.append(np.stack(gt, axis=0))
if len(batch_gt) == self.eval_basz:
batch_gt = np.stack(batch_gt, 0)
batch_lr=sess.run(eval_inp,feed_dict={eval_gt:batch_gt})
mse_val=sess.run(self.eval_mse,feed_dict={self.L_eval:batch_lr, self.H:batch_gt[:,self.num_frames//2:self.num_frames//2+1]})
if mse_acc is None:
mse_acc = mse_val
else:
mse_acc = np.concatenate([mse_acc, mse_val], axis=0)
batch_gt = []
print('\tEval batch {} - {} ...'.format(batch_cnt, batch_cnt + self.eval_basz))
batch_cnt+=self.eval_basz
psnr_acc = 10 * np.log10(1.0 / mse_acc)
mse_avg = np.mean(mse_acc, axis=0)
psnr_avg = np.mean(psnr_acc, axis=0)
for i in range(mse_avg.shape[0]):
tf.summary.scalar('val_mse{}'.format(i), tf.convert_to_tensor(mse_avg[i], dtype=tf.float32))
print('Eval PSNR: {}, MSE: {}'.format(psnr_avg, mse_avg))
# write to log file
with open(self.log_dir, 'a+') as f:
mse_avg=(mse_avg*1e6).astype(np.int64)/(1e6)
psnr_avg=(psnr_avg*1e6).astype(np.int64)/(1e6)
f.write('{'+'"Iter": {} , "PSNR": {}, "MSE": {}'.format(sess.run(self.global_step), psnr_avg.tolist(), mse_avg.tolist())+'}\n')
def train(self):
LR, HR= self.single_input_producer()
global_step=tf.Variable(initial_value=0, trainable=False)
self.global_step=global_step
self.build()
lr= tf.train.polynomial_decay(self.learning_rate, global_step, self.decay_step, end_learning_rate=self.end_lr, power=1.)
vars_all=tf.trainable_variables()
print('Params num of all:',get_num_params(vars_all))
training_op = tf.train.AdamOptimizer(lr).minimize(self.loss, var_list=vars_all, global_step=global_step)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
#sess=tf.Session()
self.sess=sess
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=1)
if self.reload:
self.load(sess, self.save_dir)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
cost_time=0
start_time=time.time()
gs=sess.run(global_step)
for step in range(sess.run(global_step), self.max_step):
if step>gs and step%20==0:
print(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()),'Step:{}, loss:{}'.format(step,loss_v))
if step % 500 == 0:
if step>gs:
self.save(sess, self.save_dir, step)
cost_time=time.time()-start_time
print('cost {}s.'.format(cost_time))
self.eval()
cost_time=time.time()-start_time
start_time=time.time()
print('cost {}s.'.format(cost_time))
lr1,hr=sess.run([LR,HR])
_,loss_v=sess.run([training_op,self.loss],feed_dict={self.L:lr1, self.H:hr})
if step>500 and loss_v>10:
print('Model collapsed with loss={}'.format(loss_v))
break
def test_video_truth(self, path, name='result', reuse=False, part=50):
save_path=join(path,name)
automkdir(save_path)
inp_path=join(path,'truth')
imgs=sorted(glob.glob(join(inp_path,'*.png')))
max_frame=len(imgs)
imgs=np.array([cv2_imread(i) for i in imgs])/255.
if part>max_frame:
part=max_frame
if max_frame%part ==0 :
num_once=max_frame//part
else:
num_once=max_frame//part+1
h,w,c=imgs[0].shape
L_test = tf.placeholder(tf.float32, shape=[num_once, self.num_frames, h//self.scale, w//self.scale, 3], name='L_test')
SR_test=self.forward(L_test)
if not reuse:
self.img_hr=tf.placeholder(tf.float32, shape=[None, None, None, 3], name='H_truth')
self.img_lr=DownSample_4D(self.img_hr, BLUR, scale=self.scale)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
#sess=tf.Session()
self.sess=sess
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=100, keep_checkpoint_every_n_hours=1)
self.load(sess, self.save_dir)
lrs=self.sess.run(self.img_lr,feed_dict={self.img_hr:imgs})
lr_list=[]
max_frame=lrs.shape[0]
for i in range(max_frame):
index=np.array([i for i in range(i-self.num_frames//2,i+self.num_frames//2+1)])
index=np.clip(index,0,max_frame-1).tolist()
lr_list.append(np.array([lrs[j] for j in index]))
lr_list=np.array(lr_list)
print('Save at {}'.format(save_path))
print('{} Inputs With Shape {}'.format(lrs.shape[0],lrs.shape[1:]))
h,w,c=lrs.shape[1:]
all_time=[]
for i in trange(part):
st_time=time.time()
sr=self.sess.run(SR_test,feed_dict={L_test : lr_list[i*num_once:(i+1)*num_once]})
all_time.append(time.time()-st_time)
for j in range(sr.shape[0]):
img=sr[j][0]*255.
img=np.clip(img,0,255)
img=np.round(img,0).astype(np.uint8)
cv2_imsave(join(save_path, '{:0>4}.png'.format(i*num_once+j)),img)
all_time=np.array(all_time)
if max_frame>0:
all_time=np.array(all_time)
print('spent {} s in total and {} s in average'.format(np.sum(all_time),np.mean(all_time[1:])))
def test_video_lr(self, path, name='result', reuse=False, part=50):
save_path=join(path,name)
automkdir(save_path)
inp_path=join(path,'blur{}'.format(self.scale))
imgs=sorted(glob.glob(join(inp_path,'*.png')))
max_frame=len(imgs)
lrs=np.array([cv2_imread(i) for i in imgs])/255.
if part>max_frame:
part=max_frame
if max_frame%part ==0 :
num_once=max_frame//part
else:
num_once=max_frame//part+1
h,w,c=lrs[0].shape
L_test = tf.placeholder(tf.float32, shape=[num_once, self.num_frames, h, w, 3], name='L_test')
SR_test=self.forward(L_test)
if not reuse:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
#sess=tf.Session()
self.sess=sess
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=100, keep_checkpoint_every_n_hours=1)
self.load(sess, self.save_dir)
lr_list=[]
max_frame=lrs.shape[0]
for i in range(max_frame):
index=np.array([i for i in range(i-self.num_frames//2,i+self.num_frames//2+1)])
index=np.clip(index,0,max_frame-1).tolist()
lr_list.append(np.array([lrs[j] for j in index]))
lr_list=np.array(lr_list)
print('Save at {}'.format(save_path))
print('{} Inputs With Shape {}'.format(lrs.shape[0],lrs.shape[1:]))
h,w,c=lrs.shape[1:]
all_time=[]
for i in trange(part):
st_time=time.time()
sr=self.sess.run(SR_test,feed_dict={L_test : lr_list[i*num_once:(i+1)*num_once]})
all_time.append(time.time()-st_time)
for j in range(sr.shape[0]):
img=sr[j][0]*255.
img=np.clip(img,0,255)
img=np.round(img,0).astype(np.uint8)
cv2_imsave(join(save_path, '{:0>4}.png'.format(i*num_once+j)),img)
all_time=np.array(all_time)
if max_frame>0:
all_time=np.array(all_time)
print('spent {} s in total and {} s in average'.format(np.sum(all_time),np.mean(all_time[1:])))
def testvideos(self, path='/dev/f/data/video/test2/udm10', start=0, name='mshpfnl'):
kind=sorted(glob.glob(join(path,'*')))
kind=[k for k in kind if os.path.isdir(k)]
reuse=False
for k in kind:
idx=kind.index(k)
if idx>=start:
if idx>start:
reuse=True
datapath=join(path,k)
self.test_video_truth(datapath, name=name, reuse=reuse, part=1000)
if __name__=='__main__':
model=MSHPFNL()
model.train()
#model.testvideos()
| 42.00716 | 168 | 0.591103 |
acef89bfa2f5ebc11b11d86e0356c4b8c7552b0e | 572 | py | Python | python/RPiSense/Convert.py | dbullockphd/RPiSense | 787a72992969992c9e6efa080800024de1294097 | [
"MIT"
] | null | null | null | python/RPiSense/Convert.py | dbullockphd/RPiSense | 787a72992969992c9e6efa080800024de1294097 | [
"MIT"
] | null | null | null | python/RPiSense/Convert.py | dbullockphd/RPiSense | 787a72992969992c9e6efa080800024de1294097 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# https://github.com/dbullockphd/RPiSense
def C2F (C):
"""
Convert Celsius to Fahrenheit
:param C: (`float`)
The temperature in Celsius.
:return:
- **F** (`float`) -- The temperature in Fahrenheit.
"""
F = 9.*C/5. + 32
return F
def mbar2inHg (mbar):
"""
Convert millibars to inches of mercury.
:param mbar: (`float`)
The pressure in millibars.
:return:
- **inHg** (`float`) -- The pressure in inches of mercury.
"""
inHg = 0.029530 * mbar
return inHg
| 16.823529 | 66 | 0.557692 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.