file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
environment.py | """Module with code to be run before and after certain events during the testing."""
import json
import datetime
import subprocess
import os.path
import contextlib
from behave.log_capture import capture
import docker
import requests
import time
from src.s3interface import S3Interface
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logger = logging.getLogger(__file__)
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
_REPO_DIR = os.path.dirname(os.path.dirname(_THIS_DIR))
# The following API endpoint is used to check if the system is started
_API_ENDPOINT = 'api/v1'
# The following endpoint is used to get the access token from OSIO AUTH service
_AUTH_ENDPOINT = "/api/token/refresh"
# Ports used by various services
_FABRIC8_ANALYTICS_SERVER = 32000
_FABRIC8_ANALYTICS_JOBS = 34000
_FABRIC8_GREMLIN_SERVICE = 80
_FABRIC8_LICENSE_SERVICE = 80
# Endpoint for jobs debug API
_JOBS_DEBUG_API = _API_ENDPOINT + "/debug"
# Default timeout values for the stack analysis and component analysis endpoints
_DEFAULT_STACK_ANALYSIS_TIMEOUT = 1200
_DEFAULT_COMPONENT_ANALYSIS_TIMEOUT = 1200
def _make_compose_name(suffix='.yml'):
return os.path.join(_REPO_DIR, 'docker-compose' + suffix)
def _set_default_compose_path(context):
base_compose = _make_compose_name()
test_specific_compose = _make_compose_name(".integration-tests.yml")
# Extra containers are added as needed by integration setup commands
context.docker_compose_path = [base_compose, test_specific_compose]
# WARNING: make sure behave uses pytest improved asserts
# Behave runner uses behave.runner.exec_file function to read, compile
# and exec code of environment file and step files *in this order*.
# Therefore we provide a new implementation here, which uses pytest's
# _pytest.assertion.rewrite to rewrite the bytecode with pytest's
# improved asserts.
# This means that when behave tries to load steps, it will use our exec_file.
# => SUCCESS
# Don't ask how long it took me to figure this out.
import behave.runner
def exec_file(filename, globals=None, locals=None):
"""Execute the specified file, optionaly setup its context by using globals and locals."""
if globals is None:
globals = {}
if locals is None:
locals = globals
locals['__file__'] = filename
from py import path
from _pytest import config
from _pytest.assertion import rewrite
f = path.local(filename)
config = config._prepareconfig([], [])
source_stat, code = rewrite._rewrite_test(config, f)
logger.debug('filename: {} source_stat: {} code: {}'.format(filename, source_stat, code))
exec(code, globals, locals)
behave.runner.exec_file = exec_file
# *** end this madness
def _make_compose_command(context, *args):
cmd = ['docker-compose']
for compose_file in context.docker_compose_path:
cmd.append('-f')
cmd.append(compose_file)
cmd.extend(args)
logger.info(cmd)
return cmd
def _start_system(context):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'up', '--no-build', '-d')
else:
cmd = ['kubectl', 'create', '-f', context.kubernetes_dir_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _make_compose_teardown_callback(context, services):
cmds = []
cmds.append(_make_compose_command(context, 'kill', *services))
cmds.append(_make_compose_command(context, 'rm', '-fv', *services))
def teardown_services():
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return teardown_services
def _run_command_in_service(context, service, command):
"""Start the specified service.
Service is started via `docker-compose run`;
command is list of strs
"""
if context.docker_compose_path:
cmd = _make_compose_command(context, 'run', '--rm', '-d', service)
cmd.extend(command)
else:
raise Exception("not implemented")
try:
# universal_newlines decodes output on Python 3.x
output = subprocess.check_output(cmd, universal_newlines=True).strip()
logger.info(output)
return output
except subprocess.CalledProcessError as ex:
logger.exception(ex.output)
raise
def _exec_command_in_container(client, container, command):
"""Run the specified command in container.
equiv of `docker exec`, command is str
"""
exec_id = client.exec_create(container, command)
output = client.exec_start(exec_id).decode('utf-8')
logger.info(output)
return output
def _get_k8s_volumes_to_delete():
# universal_newlines decodes output on Python 3.x
out = subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json'], universal_newlines=True)
j = json.loads(out)
volumes = []
for pod in j['items']:
pod_vols = pod['spec'].get('volumes', [])
for pod_vol in pod_vols:
if 'hostPath' in pod_vol:
volumes.append(pod_vol['hostPath']['path'])
return volumes
def _dump_server_logs(context, tail=None):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'logs')
if tail is not None:
cmd.append('--tail={:d}'.format(tail))
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
else:
pass # No current support for dumping logs under k8s
def _teardown_system(context):
cmds = []
if context.docker_compose_path:
cmds.append(_make_compose_command(context, 'kill'))
cmds.append(_make_compose_command(context, 'rm', '-fv'))
if hasattr(context, "container"):
cmds.append(['docker', "kill", context.container])
cmds.append(['docker', "rm", "-fv", "--rm-all", context.container])
_set_default_compose_path(context)
else:
cmds.append(['kubectl', 'delete', '--ignore-not-found', '-f', context.kubernetes_dir_path])
volumes = _get_k8s_volumes_to_delete()
for volume in volumes:
# TODO: the sudo thing is not very nice, but...
cmds.append(['sudo', 'rm', '-rf', volume])
cmds.append(['sudo', 'mkdir', volume])
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _post_startup(context, started_all, wait_for_server):
"""Post startup actions."""
if started_all:
# let's give the whole system a while to breathe
time.sleep(float(context.config.userdata.get('breath_time', 5)))
else:
raise Exception('Server failed to start in under {s} seconds'.
format(s=wait_for_server))
def _wait_for_system(context, wait_for_server=60):
start = datetime.datetime.utcnow()
wait_till = start + datetime.timedelta(seconds=wait_for_server)
# try to wait for server to start for some time
while datetime.datetime.utcnow() < wait_till:
time.sleep(1)
started_all = False
if context.kubernetes_dir_path:
res = json.loads(subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json']))
for pod in res['items']:
status = pod.get('status', {})
conditions = status.get('conditions', [])
phase = status.get('phase', '')
if status == {}:
continue
if phase != 'Running':
continue
for condition in conditions:
if condition['type'] == 'Ready' and condition['status'] != 'True':
continue
# if we got here, then everything is running
started_all = True
break
else:
if _is_running(context):
started_all = True
break
_post_startup(context, started_all, wait_for_server)
def _wait_for_api(context, wait_for_service, check_function):
for _ in range(wait_for_service):
if check_function(context):
break
time.sleep(1)
else:
raise Exception('Timeout waiting for the API service')
def _wait_for_jobs_debug_api_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_jobs_debug_api_running)
def _wait_for_component_search_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_component_search_service_running)
def _wait_for_master_tag_list_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_master_tag_list_service_running)
def _wait_for_get_untagged_component_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_get_untagged_component_service_running)
def _restart_system(context, wait_for_server=60):
# NOTE: it does make sense to restart the local system only
if context.running_locally:
try:
_teardown_system(context)
_start_system(context)
_wait_for_system(context, wait_for_server)
except subprocess.CalledProcessError as e:
raise Exception('Failed to restart system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
def _is_api_running(url, accepted_codes=None):
accepted_codes = accepted_codes or {200, 401}
try:
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_staging_running(threescale_url, accepted_codes={200, 401}):
|
def _is_3scale_preview_running(context, accepted_codes={200, 403, 401}):
try:
res = requests.post(context.threescale_preview_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_backbone_api_running(backbone_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % backbone_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_gemini_api_running(gemini_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % gemini_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_api_running_post(url):
try:
res = requests.post(url)
if res.status_code in {200, 401}:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT) and \
_is_api_running(context.jobs_api_url + _API_ENDPOINT) and \
_is_api_running(context.gremlin_url, {400})
def _is_jobs_debug_api_running(context):
return _is_api_running(context.jobs_api_url + _JOBS_DEBUG_API +
"/analyses-report?ecosystem=maven")
def _is_component_search_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/component-search/any-component")
def _is_master_tag_list_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/master-tags/maven")
def _is_get_untagged_component_service_running(context):
return _is_api_running_post(context.coreapi_url + _API_ENDPOINT +
"/get-next-component/maven")
def _read_boolean_setting(context, setting_name):
setting = context.config.userdata.get(setting_name, '').lower()
if setting in ('1', 'yes', 'true', 'on'):
return True
if setting in ('', '0', 'no', 'false', 'off'):
return False
msg = '{!r} is not a valid option for boolean setting {!r}'
raise ValueError(msg.format(setting, setting_name))
def _add_slash(url):
if url and not url.endswith('/'):
url += '/'
return url
def _get_api_url(context, attribute, port):
return _add_slash(context.config.userdata.get(attribute,
'http://localhost:{port}/'.format(port=port)))
def _send_json_file(endpoint, filename, custom_headers=None):
"""Send the JSON file to the selected API endpoint.
The optional custom header is used (given it is provided).
"""
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
if custom_headers is not None:
headers.update(custom_headers)
with open(filename) as json_data:
response = requests.post(endpoint, data=json_data, headers=headers)
return response
def _check_env_for_remote_tests(env_var_name):
if os.environ.get(env_var_name):
logger.info("Note: {e} environment variable is specified, but tests are "
"still run locally\n"
"Check other values required to run tests against existing "
"deployent".format(e=env_var_name))
def _missing_api_token_warning(env_var_name):
if os.environ.get(env_var_name):
logger.info("OK: {name} environment is set and will be used as "
"authorization token".format(name=env_var_name))
else:
logger.info("Warning: the {name} environment variable is not"
" set.\n"
"Most tests that require authorization will probably fail".format(
name=env_var_name))
def _check_api_tokens_presence():
# we need RECOMMENDER_API_TOKEN or RECOMMENDER_REFRESH_TOKEN to be set
if not os.environ.get("RECOMMENDER_REFRESH_TOKEN"):
_missing_api_token_warning("RECOMMENDER_API_TOKEN")
else:
_missing_api_token_warning("RECOMMENDER_REFRESH_TOKEN")
_missing_api_token_warning("JOB_API_TOKEN")
def _check_env_var_presence_s3_db(env_var_name):
"""Check if given environment variable exist.
Check the existence of environment variable needed to connect to the
AWS S3 database.
"""
if os.environ.get(env_var_name) is None:
logger.info("Warning: the {name} environment variable is not set.\n"
"All tests that access AWS S3 database will fail\n".format(
name=env_var_name))
def _parse_int_env_var(env_var_name):
val = os.environ.get(env_var_name)
try:
return int(val)
except (TypeError, ValueError):
return None
def _read_url_from_env_var(env_var_name):
return _add_slash(os.environ.get(env_var_name, None))
def check_test_environment(context, coreapi_url):
"""Check the test environent - whether tests are run locally or in Docker."""
if context.running_locally:
logger.info("Note: integration tests are running localy via docker-compose")
if coreapi_url:
_check_env_for_remote_tests("F8A_API_URL")
_check_env_for_remote_tests("F8A_JOB_API_URL")
_check_env_for_remote_tests("F8A_GEMINI_API_URL")
else:
logger.info("Note: integration tests are running against existing deployment")
_check_api_tokens_presence()
def _running_locally(coreapi_url, jobs_api_url):
"""Check if tests are running locally."""
return not (coreapi_url and jobs_api_url)
def _get_url(context, actual, attribute_name, port):
"""Get the URL + port for the selected service."""
return actual or _get_api_url(context, attribute_name, port)
def check_token_structure(data):
"""Check the basic structure of response with access token."""
assert "token" in data
token_structure = data["token"]
assert "access_token" in token_structure
assert "token_type" in token_structure
assert "expires_in" in token_structure
def before_all(context):
"""Perform the setup before the first event."""
context.config.setup_logging()
context.start_system = _start_system
context.teardown_system = _teardown_system
context.restart_system = _restart_system
context.run_command_in_service = _run_command_in_service
context.exec_command_in_container = _exec_command_in_container
context.is_running = _is_running
context.is_jobs_debug_api_running = _is_jobs_debug_api_running
context.is_component_search_service_running = _is_component_search_service_running
context.is_master_tag_list_service_running = _is_master_tag_list_service_running
context.wait_for_master_tag_list_service = _wait_for_master_tag_list_service
context.is_get_untagged_component_service_running = _is_get_untagged_component_service_running
context.wait_for_get_untagged_component_service = _wait_for_get_untagged_component_service
context.send_json_file = _send_json_file
context.wait_for_jobs_debug_api_service = _wait_for_jobs_debug_api_service
context.wait_for_component_search_service = _wait_for_component_search_service
context.is_3scale_staging_running = _is_3scale_staging_running
context.is_3scale_preview_running = _is_3scale_preview_running
context.is_backbone_api_running = _is_backbone_api_running
context.is_gemini_api_running = _is_gemini_api_running
# Configure container logging
context.dump_logs = _read_boolean_setting(context, 'dump_logs')
tail_logs = int(context.config.userdata.get('tail_logs', 0))
dump_errors = _read_boolean_setting(context, 'dump_errors')
if tail_logs:
dump_errors = True
else:
tail_logs = 50
context.dump_errors = dump_errors
context.tail_logs = tail_logs
# Configure system under test
context.kubernetes_dir_path = context.config.userdata.get('kubernetes_dir', None)
if context.kubernetes_dir_path is not None:
context.docker_compose_path = None
else:
# If we're not running Kubernetes, use the local Docker Compose setup
_set_default_compose_path(context)
# for now, we just assume we know what compose file looks like (what services need what images)
context.images = {}
context.images['bayesian/bayesian-api'] = context.config.userdata.get(
'coreapi_server_image',
'registry.devshift.net/bayesian/bayesian-api')
context.images['bayesian/cucos-worker'] = context.config.userdata.get(
'coreapi_worker_image',
'registry.devshift.net/bayesian/cucos-worker')
core_v2_api_url = _read_url_from_env_var('F8A_API_V2_URL')
coreapi_url = _read_url_from_env_var('F8A_API_URL')
core_v2_api_url = _read_url_from_env_var('F8A_API_V2_URL')
valid_synk_token = os.environ.get("SNYK_TOKEN")
uuid = os.environ.get("REGISTERED_USER_UUID")
jobs_api_url = _read_url_from_env_var('F8A_JOB_API_URL')
gremlin_url = _read_url_from_env_var('F8A_GREMLIN_URL')
threescale_url = _read_url_from_env_var('F8A_3SCALE_URL')
threescale_preview_url = _read_url_from_env_var('F8A_THREE_SCALE_PREVIEW_URL')
backbone_api_url = _read_url_from_env_var('F8A_BACKBONE_API_URL')
service_id = _read_url_from_env_var('F8A_SERVICE_ID')
gemini_api_url = _read_url_from_env_var('F8A_GEMINI_API_URL')
license_service_url = _read_url_from_env_var('F8A_LICENSE_SERVICE_URL')
context.running_locally = _running_locally(coreapi_url, jobs_api_url)
check_test_environment(context, coreapi_url)
context.coreapi_url = _get_url(context, coreapi_url, 'coreapi_url', _FABRIC8_ANALYTICS_SERVER)
context.core_v2_api_url = core_v2_api_url
context.jobs_api_url = _get_url(context, jobs_api_url, 'jobs_api_url', _FABRIC8_ANALYTICS_JOBS)
context.gremlin_url = _get_url(context, gremlin_url, "gremlin_url", _FABRIC8_GREMLIN_SERVICE)
context.license_service_url = _get_url(context, license_service_url, 'license_service_url',
_FABRIC8_LICENSE_SERVICE)
context.core_v2_api_url = core_v2_api_url
context.threescale_url = threescale_url
context.valid_synk_token = valid_synk_token
context.uuid = uuid
context.threescale_preview_url = threescale_preview_url
context.backbone_api_url = backbone_api_url
context.service_id = service_id
context.gemini_api_url = gemini_api_url
# we can retrieve access token by using refresh/offline token
# informations needed to access S3 database from tests
_check_env_var_presence_s3_db('AWS_ACCESS_KEY_ID')
_check_env_var_presence_s3_db('AWS_SECRET_ACCESS_KEY')
_check_env_var_presence_s3_db('S3_REGION_NAME')
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
s3_region_name = os.environ.get('S3_REGION_NAME')
deployment_prefix = os.environ.get('DEPLOYMENT_PREFIX', 'STAGE')
context.reports_bucket = os.environ.get('DEVELOPER_ANALYTICS_REPORTS_BUCKET')
context.s3interface = S3Interface(aws_access_key_id, aws_secret_access_key,
s3_region_name, deployment_prefix)
context.client = None
# timeout values can be overwritten by environment variables
stack_analysis_timeout = _parse_int_env_var('F8A_STACK_ANALYSIS_TIMEOUT')
component_analysis_timeout = _parse_int_env_var('F8A_COMPONENT_ANALYSIS_TIMEOUT')
context.stack_analysis_timeout = stack_analysis_timeout or _DEFAULT_STACK_ANALYSIS_TIMEOUT
context.component_analysis_timeout = component_analysis_timeout \
or _DEFAULT_COMPONENT_ANALYSIS_TIMEOUT
if context.running_locally:
context.client = docker.AutoVersionClient()
for desired, actual in context.images.items():
desired = 'registry.devshift.net/' + desired
if desired != actual:
context.client.tag(actual, desired, force=True)
# Specify the analyses checked for when looking for "complete" results
def _get_expected_component_analyses(ecosystem):
common = context.EXPECTED_COMPONENT_ANALYSES
specific = context.ECOSYSTEM_DEPENDENT_ANALYSES.get(ecosystem, set())
return common | specific
context.get_expected_component_analyses = _get_expected_component_analyses
def _compare_analysis_sets(actual, expected):
unreliable = context.UNRELIABLE_ANALYSES
missing = expected - actual - unreliable
unexpected = actual - expected - unreliable
return missing, unexpected
context.compare_analysis_sets = _compare_analysis_sets
context.EXPECTED_COMPONENT_ANALYSES = {
'metadata', 'source_licenses', 'digests',
'dependency_snapshot', 'code_metrics'
# The follower workers are currently disabled by default:
# 'static_analysis', 'binary_data', 'languages', 'crypto_algorithms'
}
# Analyses that are only executed for particular language ecosystems
context.ECOSYSTEM_DEPENDENT_ANALYSES = dict()
# Results that use a nonstandard format, so we don't check for the
# standard "status", "summary", and "details" keys
context.NONSTANDARD_ANALYSIS_FORMATS = set()
# Analyses that are just plain unreliable and so need to be excluded from
# consideration when determining whether or not an analysis is complete
context.UNRELIABLE_ANALYSES = {
'github_details', # if no github api token provided
'security_issues' # needs Snyk vulndb in S3
}
@capture
def before_scenario(context, scenario):
"""Perform the setup before each scenario is run."""
context.resource_manager = contextlib.ExitStack()
@capture
def after_scenario(context, scenario):
"""Perform the cleanup after each scenario is run."""
if context.running_locally:
if context.dump_logs or context.dump_errors and scenario.status == "failed":
try:
_dump_server_logs(context, int(context.tail_logs))
except subprocess.CalledProcessError as e:
raise Exception('Failed to dump server logs. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
# Clean up resources (which may destroy some container logs)
context.resource_manager.close()
@capture
def after_all(context):
"""Perform the cleanup after the last event."""
if context.running_locally:
try:
_teardown_system(context)
except subprocess.CalledProcessError as e:
raise Exception('Failed to teardown system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
| try:
res = requests.post(threescale_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False | identifier_body |
environment.py | """Module with code to be run before and after certain events during the testing."""
import json
import datetime
import subprocess
import os.path
import contextlib
from behave.log_capture import capture
import docker
import requests
import time
from src.s3interface import S3Interface
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logger = logging.getLogger(__file__)
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
_REPO_DIR = os.path.dirname(os.path.dirname(_THIS_DIR))
# The following API endpoint is used to check if the system is started
_API_ENDPOINT = 'api/v1'
# The following endpoint is used to get the access token from OSIO AUTH service
_AUTH_ENDPOINT = "/api/token/refresh"
# Ports used by various services
_FABRIC8_ANALYTICS_SERVER = 32000
_FABRIC8_ANALYTICS_JOBS = 34000
_FABRIC8_GREMLIN_SERVICE = 80
_FABRIC8_LICENSE_SERVICE = 80
# Endpoint for jobs debug API
_JOBS_DEBUG_API = _API_ENDPOINT + "/debug"
# Default timeout values for the stack analysis and component analysis endpoints
_DEFAULT_STACK_ANALYSIS_TIMEOUT = 1200
_DEFAULT_COMPONENT_ANALYSIS_TIMEOUT = 1200
def _make_compose_name(suffix='.yml'):
return os.path.join(_REPO_DIR, 'docker-compose' + suffix)
def _set_default_compose_path(context):
base_compose = _make_compose_name()
test_specific_compose = _make_compose_name(".integration-tests.yml")
# Extra containers are added as needed by integration setup commands
context.docker_compose_path = [base_compose, test_specific_compose]
# WARNING: make sure behave uses pytest improved asserts
# Behave runner uses behave.runner.exec_file function to read, compile
# and exec code of environment file and step files *in this order*.
# Therefore we provide a new implementation here, which uses pytest's
# _pytest.assertion.rewrite to rewrite the bytecode with pytest's
# improved asserts.
# This means that when behave tries to load steps, it will use our exec_file.
# => SUCCESS
# Don't ask how long it took me to figure this out.
import behave.runner
def exec_file(filename, globals=None, locals=None):
"""Execute the specified file, optionaly setup its context by using globals and locals."""
if globals is None:
globals = {}
if locals is None:
locals = globals
locals['__file__'] = filename
from py import path
from _pytest import config
from _pytest.assertion import rewrite
f = path.local(filename)
config = config._prepareconfig([], [])
source_stat, code = rewrite._rewrite_test(config, f)
logger.debug('filename: {} source_stat: {} code: {}'.format(filename, source_stat, code))
exec(code, globals, locals)
behave.runner.exec_file = exec_file
# *** end this madness
def _make_compose_command(context, *args):
cmd = ['docker-compose']
for compose_file in context.docker_compose_path:
cmd.append('-f')
cmd.append(compose_file)
cmd.extend(args)
logger.info(cmd)
return cmd
def _start_system(context):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'up', '--no-build', '-d')
else:
cmd = ['kubectl', 'create', '-f', context.kubernetes_dir_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _make_compose_teardown_callback(context, services):
cmds = []
cmds.append(_make_compose_command(context, 'kill', *services))
cmds.append(_make_compose_command(context, 'rm', '-fv', *services))
def teardown_services():
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return teardown_services
def _run_command_in_service(context, service, command):
"""Start the specified service.
Service is started via `docker-compose run`;
command is list of strs
"""
if context.docker_compose_path:
cmd = _make_compose_command(context, 'run', '--rm', '-d', service)
cmd.extend(command)
else:
raise Exception("not implemented")
try:
# universal_newlines decodes output on Python 3.x
output = subprocess.check_output(cmd, universal_newlines=True).strip()
logger.info(output)
return output
except subprocess.CalledProcessError as ex:
logger.exception(ex.output)
raise
def _exec_command_in_container(client, container, command):
"""Run the specified command in container.
equiv of `docker exec`, command is str
"""
exec_id = client.exec_create(container, command)
output = client.exec_start(exec_id).decode('utf-8')
logger.info(output)
return output
def _get_k8s_volumes_to_delete():
# universal_newlines decodes output on Python 3.x
out = subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json'], universal_newlines=True)
j = json.loads(out)
volumes = []
for pod in j['items']:
pod_vols = pod['spec'].get('volumes', [])
for pod_vol in pod_vols:
if 'hostPath' in pod_vol:
volumes.append(pod_vol['hostPath']['path'])
return volumes
def _dump_server_logs(context, tail=None):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'logs')
if tail is not None:
cmd.append('--tail={:d}'.format(tail))
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
else:
pass # No current support for dumping logs under k8s
def _teardown_system(context):
cmds = []
if context.docker_compose_path:
cmds.append(_make_compose_command(context, 'kill'))
cmds.append(_make_compose_command(context, 'rm', '-fv'))
if hasattr(context, "container"):
cmds.append(['docker', "kill", context.container])
cmds.append(['docker', "rm", "-fv", "--rm-all", context.container])
_set_default_compose_path(context)
else:
cmds.append(['kubectl', 'delete', '--ignore-not-found', '-f', context.kubernetes_dir_path])
volumes = _get_k8s_volumes_to_delete()
for volume in volumes:
# TODO: the sudo thing is not very nice, but...
cmds.append(['sudo', 'rm', '-rf', volume])
cmds.append(['sudo', 'mkdir', volume])
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _post_startup(context, started_all, wait_for_server):
"""Post startup actions."""
if started_all:
# let's give the whole system a while to breathe
time.sleep(float(context.config.userdata.get('breath_time', 5)))
else:
raise Exception('Server failed to start in under {s} seconds'.
format(s=wait_for_server))
def _wait_for_system(context, wait_for_server=60):
start = datetime.datetime.utcnow()
wait_till = start + datetime.timedelta(seconds=wait_for_server)
# try to wait for server to start for some time
while datetime.datetime.utcnow() < wait_till:
time.sleep(1)
started_all = False
if context.kubernetes_dir_path:
res = json.loads(subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json']))
for pod in res['items']:
status = pod.get('status', {})
conditions = status.get('conditions', [])
phase = status.get('phase', '')
if status == {}:
continue
if phase != 'Running':
continue
for condition in conditions:
if condition['type'] == 'Ready' and condition['status'] != 'True':
continue
# if we got here, then everything is running
started_all = True
break
else:
if _is_running(context):
started_all = True
break
_post_startup(context, started_all, wait_for_server)
def _wait_for_api(context, wait_for_service, check_function):
for _ in range(wait_for_service):
if check_function(context):
break
time.sleep(1)
else:
raise Exception('Timeout waiting for the API service')
def _wait_for_jobs_debug_api_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_jobs_debug_api_running)
def _wait_for_component_search_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_component_search_service_running)
def _wait_for_master_tag_list_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_master_tag_list_service_running)
def _wait_for_get_untagged_component_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_get_untagged_component_service_running)
def _restart_system(context, wait_for_server=60):
# NOTE: it does make sense to restart the local system only
if context.running_locally:
try:
_teardown_system(context)
_start_system(context)
_wait_for_system(context, wait_for_server)
except subprocess.CalledProcessError as e:
raise Exception('Failed to restart system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
def _is_api_running(url, accepted_codes=None):
accepted_codes = accepted_codes or {200, 401}
try:
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_staging_running(threescale_url, accepted_codes={200, 401}):
try:
res = requests.post(threescale_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_preview_running(context, accepted_codes={200, 403, 401}):
try:
res = requests.post(context.threescale_preview_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_backbone_api_running(backbone_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % backbone_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_gemini_api_running(gemini_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % gemini_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_api_running_post(url):
try:
res = requests.post(url)
if res.status_code in {200, 401}:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT) and \
_is_api_running(context.jobs_api_url + _API_ENDPOINT) and \
_is_api_running(context.gremlin_url, {400})
def _is_jobs_debug_api_running(context):
return _is_api_running(context.jobs_api_url + _JOBS_DEBUG_API +
"/analyses-report?ecosystem=maven")
def _is_component_search_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/component-search/any-component")
def _is_master_tag_list_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/master-tags/maven")
def _is_get_untagged_component_service_running(context):
return _is_api_running_post(context.coreapi_url + _API_ENDPOINT +
"/get-next-component/maven")
def _read_boolean_setting(context, setting_name):
setting = context.config.userdata.get(setting_name, '').lower()
if setting in ('1', 'yes', 'true', 'on'):
return True
if setting in ('', '0', 'no', 'false', 'off'):
return False
msg = '{!r} is not a valid option for boolean setting {!r}'
raise ValueError(msg.format(setting, setting_name))
def _add_slash(url):
if url and not url.endswith('/'):
url += '/'
return url
def _get_api_url(context, attribute, port):
return _add_slash(context.config.userdata.get(attribute,
'http://localhost:{port}/'.format(port=port)))
def _send_json_file(endpoint, filename, custom_headers=None):
"""Send the JSON file to the selected API endpoint.
The optional custom header is used (given it is provided).
"""
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
if custom_headers is not None:
headers.update(custom_headers)
with open(filename) as json_data:
response = requests.post(endpoint, data=json_data, headers=headers)
return response
def _check_env_for_remote_tests(env_var_name):
if os.environ.get(env_var_name):
logger.info("Note: {e} environment variable is specified, but tests are "
"still run locally\n"
"Check other values required to run tests against existing "
"deployent".format(e=env_var_name)) |
def _missing_api_token_warning(env_var_name):
if os.environ.get(env_var_name):
logger.info("OK: {name} environment is set and will be used as "
"authorization token".format(name=env_var_name))
else:
logger.info("Warning: the {name} environment variable is not"
" set.\n"
"Most tests that require authorization will probably fail".format(
name=env_var_name))
def _check_api_tokens_presence():
# we need RECOMMENDER_API_TOKEN or RECOMMENDER_REFRESH_TOKEN to be set
if not os.environ.get("RECOMMENDER_REFRESH_TOKEN"):
_missing_api_token_warning("RECOMMENDER_API_TOKEN")
else:
_missing_api_token_warning("RECOMMENDER_REFRESH_TOKEN")
_missing_api_token_warning("JOB_API_TOKEN")
def _check_env_var_presence_s3_db(env_var_name):
"""Check if given environment variable exist.
Check the existence of environment variable needed to connect to the
AWS S3 database.
"""
if os.environ.get(env_var_name) is None:
logger.info("Warning: the {name} environment variable is not set.\n"
"All tests that access AWS S3 database will fail\n".format(
name=env_var_name))
def _parse_int_env_var(env_var_name):
val = os.environ.get(env_var_name)
try:
return int(val)
except (TypeError, ValueError):
return None
def _read_url_from_env_var(env_var_name):
return _add_slash(os.environ.get(env_var_name, None))
def check_test_environment(context, coreapi_url):
"""Check the test environent - whether tests are run locally or in Docker."""
if context.running_locally:
logger.info("Note: integration tests are running localy via docker-compose")
if coreapi_url:
_check_env_for_remote_tests("F8A_API_URL")
_check_env_for_remote_tests("F8A_JOB_API_URL")
_check_env_for_remote_tests("F8A_GEMINI_API_URL")
else:
logger.info("Note: integration tests are running against existing deployment")
_check_api_tokens_presence()
def _running_locally(coreapi_url, jobs_api_url):
"""Check if tests are running locally."""
return not (coreapi_url and jobs_api_url)
def _get_url(context, actual, attribute_name, port):
"""Get the URL + port for the selected service."""
return actual or _get_api_url(context, attribute_name, port)
def check_token_structure(data):
"""Check the basic structure of response with access token."""
assert "token" in data
token_structure = data["token"]
assert "access_token" in token_structure
assert "token_type" in token_structure
assert "expires_in" in token_structure
def before_all(context):
"""Perform the setup before the first event."""
context.config.setup_logging()
context.start_system = _start_system
context.teardown_system = _teardown_system
context.restart_system = _restart_system
context.run_command_in_service = _run_command_in_service
context.exec_command_in_container = _exec_command_in_container
context.is_running = _is_running
context.is_jobs_debug_api_running = _is_jobs_debug_api_running
context.is_component_search_service_running = _is_component_search_service_running
context.is_master_tag_list_service_running = _is_master_tag_list_service_running
context.wait_for_master_tag_list_service = _wait_for_master_tag_list_service
context.is_get_untagged_component_service_running = _is_get_untagged_component_service_running
context.wait_for_get_untagged_component_service = _wait_for_get_untagged_component_service
context.send_json_file = _send_json_file
context.wait_for_jobs_debug_api_service = _wait_for_jobs_debug_api_service
context.wait_for_component_search_service = _wait_for_component_search_service
context.is_3scale_staging_running = _is_3scale_staging_running
context.is_3scale_preview_running = _is_3scale_preview_running
context.is_backbone_api_running = _is_backbone_api_running
context.is_gemini_api_running = _is_gemini_api_running
# Configure container logging
context.dump_logs = _read_boolean_setting(context, 'dump_logs')
tail_logs = int(context.config.userdata.get('tail_logs', 0))
dump_errors = _read_boolean_setting(context, 'dump_errors')
if tail_logs:
dump_errors = True
else:
tail_logs = 50
context.dump_errors = dump_errors
context.tail_logs = tail_logs
# Configure system under test
context.kubernetes_dir_path = context.config.userdata.get('kubernetes_dir', None)
if context.kubernetes_dir_path is not None:
context.docker_compose_path = None
else:
# If we're not running Kubernetes, use the local Docker Compose setup
_set_default_compose_path(context)
# for now, we just assume we know what compose file looks like (what services need what images)
context.images = {}
context.images['bayesian/bayesian-api'] = context.config.userdata.get(
'coreapi_server_image',
'registry.devshift.net/bayesian/bayesian-api')
context.images['bayesian/cucos-worker'] = context.config.userdata.get(
'coreapi_worker_image',
'registry.devshift.net/bayesian/cucos-worker')
core_v2_api_url = _read_url_from_env_var('F8A_API_V2_URL')
coreapi_url = _read_url_from_env_var('F8A_API_URL')
core_v2_api_url = _read_url_from_env_var('F8A_API_V2_URL')
valid_synk_token = os.environ.get("SNYK_TOKEN")
uuid = os.environ.get("REGISTERED_USER_UUID")
jobs_api_url = _read_url_from_env_var('F8A_JOB_API_URL')
gremlin_url = _read_url_from_env_var('F8A_GREMLIN_URL')
threescale_url = _read_url_from_env_var('F8A_3SCALE_URL')
threescale_preview_url = _read_url_from_env_var('F8A_THREE_SCALE_PREVIEW_URL')
backbone_api_url = _read_url_from_env_var('F8A_BACKBONE_API_URL')
service_id = _read_url_from_env_var('F8A_SERVICE_ID')
gemini_api_url = _read_url_from_env_var('F8A_GEMINI_API_URL')
license_service_url = _read_url_from_env_var('F8A_LICENSE_SERVICE_URL')
context.running_locally = _running_locally(coreapi_url, jobs_api_url)
check_test_environment(context, coreapi_url)
context.coreapi_url = _get_url(context, coreapi_url, 'coreapi_url', _FABRIC8_ANALYTICS_SERVER)
context.core_v2_api_url = core_v2_api_url
context.jobs_api_url = _get_url(context, jobs_api_url, 'jobs_api_url', _FABRIC8_ANALYTICS_JOBS)
context.gremlin_url = _get_url(context, gremlin_url, "gremlin_url", _FABRIC8_GREMLIN_SERVICE)
context.license_service_url = _get_url(context, license_service_url, 'license_service_url',
_FABRIC8_LICENSE_SERVICE)
context.core_v2_api_url = core_v2_api_url
context.threescale_url = threescale_url
context.valid_synk_token = valid_synk_token
context.uuid = uuid
context.threescale_preview_url = threescale_preview_url
context.backbone_api_url = backbone_api_url
context.service_id = service_id
context.gemini_api_url = gemini_api_url
# we can retrieve access token by using refresh/offline token
# informations needed to access S3 database from tests
_check_env_var_presence_s3_db('AWS_ACCESS_KEY_ID')
_check_env_var_presence_s3_db('AWS_SECRET_ACCESS_KEY')
_check_env_var_presence_s3_db('S3_REGION_NAME')
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
s3_region_name = os.environ.get('S3_REGION_NAME')
deployment_prefix = os.environ.get('DEPLOYMENT_PREFIX', 'STAGE')
context.reports_bucket = os.environ.get('DEVELOPER_ANALYTICS_REPORTS_BUCKET')
context.s3interface = S3Interface(aws_access_key_id, aws_secret_access_key,
s3_region_name, deployment_prefix)
context.client = None
# timeout values can be overwritten by environment variables
stack_analysis_timeout = _parse_int_env_var('F8A_STACK_ANALYSIS_TIMEOUT')
component_analysis_timeout = _parse_int_env_var('F8A_COMPONENT_ANALYSIS_TIMEOUT')
context.stack_analysis_timeout = stack_analysis_timeout or _DEFAULT_STACK_ANALYSIS_TIMEOUT
context.component_analysis_timeout = component_analysis_timeout \
or _DEFAULT_COMPONENT_ANALYSIS_TIMEOUT
if context.running_locally:
context.client = docker.AutoVersionClient()
for desired, actual in context.images.items():
desired = 'registry.devshift.net/' + desired
if desired != actual:
context.client.tag(actual, desired, force=True)
# Specify the analyses checked for when looking for "complete" results
def _get_expected_component_analyses(ecosystem):
common = context.EXPECTED_COMPONENT_ANALYSES
specific = context.ECOSYSTEM_DEPENDENT_ANALYSES.get(ecosystem, set())
return common | specific
context.get_expected_component_analyses = _get_expected_component_analyses
def _compare_analysis_sets(actual, expected):
unreliable = context.UNRELIABLE_ANALYSES
missing = expected - actual - unreliable
unexpected = actual - expected - unreliable
return missing, unexpected
context.compare_analysis_sets = _compare_analysis_sets
context.EXPECTED_COMPONENT_ANALYSES = {
'metadata', 'source_licenses', 'digests',
'dependency_snapshot', 'code_metrics'
# The follower workers are currently disabled by default:
# 'static_analysis', 'binary_data', 'languages', 'crypto_algorithms'
}
# Analyses that are only executed for particular language ecosystems
context.ECOSYSTEM_DEPENDENT_ANALYSES = dict()
# Results that use a nonstandard format, so we don't check for the
# standard "status", "summary", and "details" keys
context.NONSTANDARD_ANALYSIS_FORMATS = set()
# Analyses that are just plain unreliable and so need to be excluded from
# consideration when determining whether or not an analysis is complete
context.UNRELIABLE_ANALYSES = {
'github_details', # if no github api token provided
'security_issues' # needs Snyk vulndb in S3
}
@capture
def before_scenario(context, scenario):
"""Perform the setup before each scenario is run."""
context.resource_manager = contextlib.ExitStack()
@capture
def after_scenario(context, scenario):
"""Perform the cleanup after each scenario is run."""
if context.running_locally:
if context.dump_logs or context.dump_errors and scenario.status == "failed":
try:
_dump_server_logs(context, int(context.tail_logs))
except subprocess.CalledProcessError as e:
raise Exception('Failed to dump server logs. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
# Clean up resources (which may destroy some container logs)
context.resource_manager.close()
@capture
def after_all(context):
"""Perform the cleanup after the last event."""
if context.running_locally:
try:
_teardown_system(context)
except subprocess.CalledProcessError as e:
raise Exception('Failed to teardown system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output)) | random_line_split | |
environment.py | """Module with code to be run before and after certain events during the testing."""
import json
import datetime
import subprocess
import os.path
import contextlib
from behave.log_capture import capture
import docker
import requests
import time
from src.s3interface import S3Interface
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logger = logging.getLogger(__file__)
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
_REPO_DIR = os.path.dirname(os.path.dirname(_THIS_DIR))
# The following API endpoint is used to check if the system is started
_API_ENDPOINT = 'api/v1'
# The following endpoint is used to get the access token from OSIO AUTH service
_AUTH_ENDPOINT = "/api/token/refresh"
# Ports used by various services
_FABRIC8_ANALYTICS_SERVER = 32000
_FABRIC8_ANALYTICS_JOBS = 34000
_FABRIC8_GREMLIN_SERVICE = 80
_FABRIC8_LICENSE_SERVICE = 80
# Endpoint for jobs debug API
_JOBS_DEBUG_API = _API_ENDPOINT + "/debug"
# Default timeout values for the stack analysis and component analysis endpoints
_DEFAULT_STACK_ANALYSIS_TIMEOUT = 1200
_DEFAULT_COMPONENT_ANALYSIS_TIMEOUT = 1200
def _make_compose_name(suffix='.yml'):
return os.path.join(_REPO_DIR, 'docker-compose' + suffix)
def _set_default_compose_path(context):
base_compose = _make_compose_name()
test_specific_compose = _make_compose_name(".integration-tests.yml")
# Extra containers are added as needed by integration setup commands
context.docker_compose_path = [base_compose, test_specific_compose]
# WARNING: make sure behave uses pytest improved asserts
# Behave runner uses behave.runner.exec_file function to read, compile
# and exec code of environment file and step files *in this order*.
# Therefore we provide a new implementation here, which uses pytest's
# _pytest.assertion.rewrite to rewrite the bytecode with pytest's
# improved asserts.
# This means that when behave tries to load steps, it will use our exec_file.
# => SUCCESS
# Don't ask how long it took me to figure this out.
import behave.runner
def exec_file(filename, globals=None, locals=None):
"""Execute the specified file, optionaly setup its context by using globals and locals."""
if globals is None:
globals = {}
if locals is None:
locals = globals
locals['__file__'] = filename
from py import path
from _pytest import config
from _pytest.assertion import rewrite
f = path.local(filename)
config = config._prepareconfig([], [])
source_stat, code = rewrite._rewrite_test(config, f)
logger.debug('filename: {} source_stat: {} code: {}'.format(filename, source_stat, code))
exec(code, globals, locals)
behave.runner.exec_file = exec_file
# *** end this madness
def _make_compose_command(context, *args):
cmd = ['docker-compose']
for compose_file in context.docker_compose_path:
cmd.append('-f')
cmd.append(compose_file)
cmd.extend(args)
logger.info(cmd)
return cmd
def _start_system(context):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'up', '--no-build', '-d')
else:
cmd = ['kubectl', 'create', '-f', context.kubernetes_dir_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _make_compose_teardown_callback(context, services):
cmds = []
cmds.append(_make_compose_command(context, 'kill', *services))
cmds.append(_make_compose_command(context, 'rm', '-fv', *services))
def teardown_services():
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return teardown_services
def _run_command_in_service(context, service, command):
"""Start the specified service.
Service is started via `docker-compose run`;
command is list of strs
"""
if context.docker_compose_path:
cmd = _make_compose_command(context, 'run', '--rm', '-d', service)
cmd.extend(command)
else:
raise Exception("not implemented")
try:
# universal_newlines decodes output on Python 3.x
output = subprocess.check_output(cmd, universal_newlines=True).strip()
logger.info(output)
return output
except subprocess.CalledProcessError as ex:
logger.exception(ex.output)
raise
def _exec_command_in_container(client, container, command):
"""Run the specified command in container.
equiv of `docker exec`, command is str
"""
exec_id = client.exec_create(container, command)
output = client.exec_start(exec_id).decode('utf-8')
logger.info(output)
return output
def _get_k8s_volumes_to_delete():
# universal_newlines decodes output on Python 3.x
out = subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json'], universal_newlines=True)
j = json.loads(out)
volumes = []
for pod in j['items']:
pod_vols = pod['spec'].get('volumes', [])
for pod_vol in pod_vols:
if 'hostPath' in pod_vol:
volumes.append(pod_vol['hostPath']['path'])
return volumes
def _dump_server_logs(context, tail=None):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'logs')
if tail is not None:
cmd.append('--tail={:d}'.format(tail))
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
else:
pass # No current support for dumping logs under k8s
def _teardown_system(context):
cmds = []
if context.docker_compose_path:
cmds.append(_make_compose_command(context, 'kill'))
cmds.append(_make_compose_command(context, 'rm', '-fv'))
if hasattr(context, "container"):
cmds.append(['docker', "kill", context.container])
cmds.append(['docker', "rm", "-fv", "--rm-all", context.container])
_set_default_compose_path(context)
else:
cmds.append(['kubectl', 'delete', '--ignore-not-found', '-f', context.kubernetes_dir_path])
volumes = _get_k8s_volumes_to_delete()
for volume in volumes:
# TODO: the sudo thing is not very nice, but...
cmds.append(['sudo', 'rm', '-rf', volume])
cmds.append(['sudo', 'mkdir', volume])
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _post_startup(context, started_all, wait_for_server):
"""Post startup actions."""
if started_all:
# let's give the whole system a while to breathe
time.sleep(float(context.config.userdata.get('breath_time', 5)))
else:
raise Exception('Server failed to start in under {s} seconds'.
format(s=wait_for_server))
def _wait_for_system(context, wait_for_server=60):
start = datetime.datetime.utcnow()
wait_till = start + datetime.timedelta(seconds=wait_for_server)
# try to wait for server to start for some time
while datetime.datetime.utcnow() < wait_till:
time.sleep(1)
started_all = False
if context.kubernetes_dir_path:
res = json.loads(subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json']))
for pod in res['items']:
status = pod.get('status', {})
conditions = status.get('conditions', [])
phase = status.get('phase', '')
if status == {}:
continue
if phase != 'Running':
continue
for condition in conditions:
if condition['type'] == 'Ready' and condition['status'] != 'True':
continue
# if we got here, then everything is running
started_all = True
break
else:
if _is_running(context):
started_all = True
break
_post_startup(context, started_all, wait_for_server)
def _wait_for_api(context, wait_for_service, check_function):
for _ in range(wait_for_service):
if check_function(context):
break
time.sleep(1)
else:
raise Exception('Timeout waiting for the API service')
def _wait_for_jobs_debug_api_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_jobs_debug_api_running)
def _wait_for_component_search_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_component_search_service_running)
def _wait_for_master_tag_list_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_master_tag_list_service_running)
def _wait_for_get_untagged_component_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_get_untagged_component_service_running)
def _restart_system(context, wait_for_server=60):
# NOTE: it does make sense to restart the local system only
if context.running_locally:
try:
_teardown_system(context)
_start_system(context)
_wait_for_system(context, wait_for_server)
except subprocess.CalledProcessError as e:
raise Exception('Failed to restart system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
def _is_api_running(url, accepted_codes=None):
accepted_codes = accepted_codes or {200, 401}
try:
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_staging_running(threescale_url, accepted_codes={200, 401}):
try:
res = requests.post(threescale_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_preview_running(context, accepted_codes={200, 403, 401}):
try:
res = requests.post(context.threescale_preview_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_backbone_api_running(backbone_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % backbone_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def | (gemini_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % gemini_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_api_running_post(url):
try:
res = requests.post(url)
if res.status_code in {200, 401}:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT) and \
_is_api_running(context.jobs_api_url + _API_ENDPOINT) and \
_is_api_running(context.gremlin_url, {400})
def _is_jobs_debug_api_running(context):
return _is_api_running(context.jobs_api_url + _JOBS_DEBUG_API +
"/analyses-report?ecosystem=maven")
def _is_component_search_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/component-search/any-component")
def _is_master_tag_list_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/master-tags/maven")
def _is_get_untagged_component_service_running(context):
return _is_api_running_post(context.coreapi_url + _API_ENDPOINT +
"/get-next-component/maven")
def _read_boolean_setting(context, setting_name):
setting = context.config.userdata.get(setting_name, '').lower()
if setting in ('1', 'yes', 'true', 'on'):
return True
if setting in ('', '0', 'no', 'false', 'off'):
return False
msg = '{!r} is not a valid option for boolean setting {!r}'
raise ValueError(msg.format(setting, setting_name))
def _add_slash(url):
if url and not url.endswith('/'):
url += '/'
return url
def _get_api_url(context, attribute, port):
return _add_slash(context.config.userdata.get(attribute,
'http://localhost:{port}/'.format(port=port)))
def _send_json_file(endpoint, filename, custom_headers=None):
"""Send the JSON file to the selected API endpoint.
The optional custom header is used (given it is provided).
"""
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
if custom_headers is not None:
headers.update(custom_headers)
with open(filename) as json_data:
response = requests.post(endpoint, data=json_data, headers=headers)
return response
def _check_env_for_remote_tests(env_var_name):
if os.environ.get(env_var_name):
logger.info("Note: {e} environment variable is specified, but tests are "
"still run locally\n"
"Check other values required to run tests against existing "
"deployent".format(e=env_var_name))
def _missing_api_token_warning(env_var_name):
if os.environ.get(env_var_name):
logger.info("OK: {name} environment is set and will be used as "
"authorization token".format(name=env_var_name))
else:
logger.info("Warning: the {name} environment variable is not"
" set.\n"
"Most tests that require authorization will probably fail".format(
name=env_var_name))
def _check_api_tokens_presence():
# we need RECOMMENDER_API_TOKEN or RECOMMENDER_REFRESH_TOKEN to be set
if not os.environ.get("RECOMMENDER_REFRESH_TOKEN"):
_missing_api_token_warning("RECOMMENDER_API_TOKEN")
else:
_missing_api_token_warning("RECOMMENDER_REFRESH_TOKEN")
_missing_api_token_warning("JOB_API_TOKEN")
def _check_env_var_presence_s3_db(env_var_name):
"""Check if given environment variable exist.
Check the existence of environment variable needed to connect to the
AWS S3 database.
"""
if os.environ.get(env_var_name) is None:
logger.info("Warning: the {name} environment variable is not set.\n"
"All tests that access AWS S3 database will fail\n".format(
name=env_var_name))
def _parse_int_env_var(env_var_name):
val = os.environ.get(env_var_name)
try:
return int(val)
except (TypeError, ValueError):
return None
def _read_url_from_env_var(env_var_name):
return _add_slash(os.environ.get(env_var_name, None))
def check_test_environment(context, coreapi_url):
"""Check the test environent - whether tests are run locally or in Docker."""
if context.running_locally:
logger.info("Note: integration tests are running localy via docker-compose")
if coreapi_url:
_check_env_for_remote_tests("F8A_API_URL")
_check_env_for_remote_tests("F8A_JOB_API_URL")
_check_env_for_remote_tests("F8A_GEMINI_API_URL")
else:
logger.info("Note: integration tests are running against existing deployment")
_check_api_tokens_presence()
def _running_locally(coreapi_url, jobs_api_url):
"""Check if tests are running locally."""
return not (coreapi_url and jobs_api_url)
def _get_url(context, actual, attribute_name, port):
"""Get the URL + port for the selected service."""
return actual or _get_api_url(context, attribute_name, port)
def check_token_structure(data):
"""Check the basic structure of response with access token."""
assert "token" in data
token_structure = data["token"]
assert "access_token" in token_structure
assert "token_type" in token_structure
assert "expires_in" in token_structure
def before_all(context):
"""Perform the setup before the first event."""
context.config.setup_logging()
context.start_system = _start_system
context.teardown_system = _teardown_system
context.restart_system = _restart_system
context.run_command_in_service = _run_command_in_service
context.exec_command_in_container = _exec_command_in_container
context.is_running = _is_running
context.is_jobs_debug_api_running = _is_jobs_debug_api_running
context.is_component_search_service_running = _is_component_search_service_running
context.is_master_tag_list_service_running = _is_master_tag_list_service_running
context.wait_for_master_tag_list_service = _wait_for_master_tag_list_service
context.is_get_untagged_component_service_running = _is_get_untagged_component_service_running
context.wait_for_get_untagged_component_service = _wait_for_get_untagged_component_service
context.send_json_file = _send_json_file
context.wait_for_jobs_debug_api_service = _wait_for_jobs_debug_api_service
context.wait_for_component_search_service = _wait_for_component_search_service
context.is_3scale_staging_running = _is_3scale_staging_running
context.is_3scale_preview_running = _is_3scale_preview_running
context.is_backbone_api_running = _is_backbone_api_running
context.is_gemini_api_running = _is_gemini_api_running
# Configure container logging
context.dump_logs = _read_boolean_setting(context, 'dump_logs')
tail_logs = int(context.config.userdata.get('tail_logs', 0))
dump_errors = _read_boolean_setting(context, 'dump_errors')
if tail_logs:
dump_errors = True
else:
tail_logs = 50
context.dump_errors = dump_errors
context.tail_logs = tail_logs
# Configure system under test
context.kubernetes_dir_path = context.config.userdata.get('kubernetes_dir', None)
if context.kubernetes_dir_path is not None:
context.docker_compose_path = None
else:
# If we're not running Kubernetes, use the local Docker Compose setup
_set_default_compose_path(context)
# for now, we just assume we know what compose file looks like (what services need what images)
context.images = {}
context.images['bayesian/bayesian-api'] = context.config.userdata.get(
'coreapi_server_image',
'registry.devshift.net/bayesian/bayesian-api')
context.images['bayesian/cucos-worker'] = context.config.userdata.get(
'coreapi_worker_image',
'registry.devshift.net/bayesian/cucos-worker')
core_v2_api_url = _read_url_from_env_var('F8A_API_V2_URL')
coreapi_url = _read_url_from_env_var('F8A_API_URL')
core_v2_api_url = _read_url_from_env_var('F8A_API_V2_URL')
valid_synk_token = os.environ.get("SNYK_TOKEN")
uuid = os.environ.get("REGISTERED_USER_UUID")
jobs_api_url = _read_url_from_env_var('F8A_JOB_API_URL')
gremlin_url = _read_url_from_env_var('F8A_GREMLIN_URL')
threescale_url = _read_url_from_env_var('F8A_3SCALE_URL')
threescale_preview_url = _read_url_from_env_var('F8A_THREE_SCALE_PREVIEW_URL')
backbone_api_url = _read_url_from_env_var('F8A_BACKBONE_API_URL')
service_id = _read_url_from_env_var('F8A_SERVICE_ID')
gemini_api_url = _read_url_from_env_var('F8A_GEMINI_API_URL')
license_service_url = _read_url_from_env_var('F8A_LICENSE_SERVICE_URL')
context.running_locally = _running_locally(coreapi_url, jobs_api_url)
check_test_environment(context, coreapi_url)
context.coreapi_url = _get_url(context, coreapi_url, 'coreapi_url', _FABRIC8_ANALYTICS_SERVER)
context.core_v2_api_url = core_v2_api_url
context.jobs_api_url = _get_url(context, jobs_api_url, 'jobs_api_url', _FABRIC8_ANALYTICS_JOBS)
context.gremlin_url = _get_url(context, gremlin_url, "gremlin_url", _FABRIC8_GREMLIN_SERVICE)
context.license_service_url = _get_url(context, license_service_url, 'license_service_url',
_FABRIC8_LICENSE_SERVICE)
context.core_v2_api_url = core_v2_api_url
context.threescale_url = threescale_url
context.valid_synk_token = valid_synk_token
context.uuid = uuid
context.threescale_preview_url = threescale_preview_url
context.backbone_api_url = backbone_api_url
context.service_id = service_id
context.gemini_api_url = gemini_api_url
# we can retrieve access token by using refresh/offline token
# informations needed to access S3 database from tests
_check_env_var_presence_s3_db('AWS_ACCESS_KEY_ID')
_check_env_var_presence_s3_db('AWS_SECRET_ACCESS_KEY')
_check_env_var_presence_s3_db('S3_REGION_NAME')
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
s3_region_name = os.environ.get('S3_REGION_NAME')
deployment_prefix = os.environ.get('DEPLOYMENT_PREFIX', 'STAGE')
context.reports_bucket = os.environ.get('DEVELOPER_ANALYTICS_REPORTS_BUCKET')
context.s3interface = S3Interface(aws_access_key_id, aws_secret_access_key,
s3_region_name, deployment_prefix)
context.client = None
# timeout values can be overwritten by environment variables
stack_analysis_timeout = _parse_int_env_var('F8A_STACK_ANALYSIS_TIMEOUT')
component_analysis_timeout = _parse_int_env_var('F8A_COMPONENT_ANALYSIS_TIMEOUT')
context.stack_analysis_timeout = stack_analysis_timeout or _DEFAULT_STACK_ANALYSIS_TIMEOUT
context.component_analysis_timeout = component_analysis_timeout \
or _DEFAULT_COMPONENT_ANALYSIS_TIMEOUT
if context.running_locally:
context.client = docker.AutoVersionClient()
for desired, actual in context.images.items():
desired = 'registry.devshift.net/' + desired
if desired != actual:
context.client.tag(actual, desired, force=True)
# Specify the analyses checked for when looking for "complete" results
def _get_expected_component_analyses(ecosystem):
common = context.EXPECTED_COMPONENT_ANALYSES
specific = context.ECOSYSTEM_DEPENDENT_ANALYSES.get(ecosystem, set())
return common | specific
context.get_expected_component_analyses = _get_expected_component_analyses
def _compare_analysis_sets(actual, expected):
unreliable = context.UNRELIABLE_ANALYSES
missing = expected - actual - unreliable
unexpected = actual - expected - unreliable
return missing, unexpected
context.compare_analysis_sets = _compare_analysis_sets
context.EXPECTED_COMPONENT_ANALYSES = {
'metadata', 'source_licenses', 'digests',
'dependency_snapshot', 'code_metrics'
# The follower workers are currently disabled by default:
# 'static_analysis', 'binary_data', 'languages', 'crypto_algorithms'
}
# Analyses that are only executed for particular language ecosystems
context.ECOSYSTEM_DEPENDENT_ANALYSES = dict()
# Results that use a nonstandard format, so we don't check for the
# standard "status", "summary", and "details" keys
context.NONSTANDARD_ANALYSIS_FORMATS = set()
# Analyses that are just plain unreliable and so need to be excluded from
# consideration when determining whether or not an analysis is complete
context.UNRELIABLE_ANALYSES = {
'github_details', # if no github api token provided
'security_issues' # needs Snyk vulndb in S3
}
@capture
def before_scenario(context, scenario):
"""Perform the setup before each scenario is run."""
context.resource_manager = contextlib.ExitStack()
@capture
def after_scenario(context, scenario):
"""Perform the cleanup after each scenario is run."""
if context.running_locally:
if context.dump_logs or context.dump_errors and scenario.status == "failed":
try:
_dump_server_logs(context, int(context.tail_logs))
except subprocess.CalledProcessError as e:
raise Exception('Failed to dump server logs. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
# Clean up resources (which may destroy some container logs)
context.resource_manager.close()
@capture
def after_all(context):
"""Perform the cleanup after the last event."""
if context.running_locally:
try:
_teardown_system(context)
except subprocess.CalledProcessError as e:
raise Exception('Failed to teardown system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
| _is_gemini_api_running | identifier_name |
reactor.go | package v1
import (
"fmt"
"time"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 10
trySendIntervalMS = 10
// ask for best height every 10s
statusUpdateIntervalSeconds = 10
)
var (
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
// have not been received.
maxRequestsPerPeer = 20
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
maxNumRequests = 64
)
type consensusReactor interface {
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(state sm.State, skipWAL bool)
}
// BlockchainReactor handles long-term catchup syncing.
type BlockchainReactor struct {
p2p.BaseReactor
initialState sm.State // immutable
state sm.State
blockExec *sm.BlockExecutor
store *store.BlockStore
fastSync bool
stateSynced bool
fsm *BcReactorFSM
blocksSynced uint64
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
messagesForFSMCh chan bcReactorMessage
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
// to this channel to be processed in the context of the poolRoutine.
errorsForFSMCh chan bcReactorMessage
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
// the switch.
eventsFromFSMCh chan bcFsmMessage
swReporter *behaviour.SwitchReporter
}
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
fastSync bool) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
store.Height()))
}
const capacity = 1000
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
messagesForFSMCh := make(chan bcReactorMessage, capacity)
errorsForFSMCh := make(chan bcReactorMessage, capacity)
startHeight := store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
bcR := &BlockchainReactor{
initialState: state,
state: state,
blockExec: blockExec,
fastSync: fastSync,
store: store,
messagesForFSMCh: messagesForFSMCh,
eventsFromFSMCh: eventsFromFSMCh,
errorsForFSMCh: errorsForFSMCh,
}
fsm := NewFSM(startHeight, bcR)
bcR.fsm = fsm
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
return bcR
}
// bcReactorMessage is used by the reactor to send messages to the FSM.
type bcReactorMessage struct {
event bReactorEvent
data bReactorEventData
}
type bFsmEvent uint
const (
// message type events
peerErrorEv = iota + 1
syncFinishedEv
)
type bFsmEventData struct {
peerID p2p.ID
err error
}
// bcFsmMessage is used by the FSM to send messages to the reactor
type bcFsmMessage struct {
event bFsmEvent
data bFsmEventData
}
// SetLogger implements service.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.fsm.SetLogger(l)
}
// OnStart implements service.Service.
func (bcR *BlockchainReactor) OnStart() error {
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
if bcR.fastSync {
go bcR.poolRoutine()
}
return nil
}
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
_ = bcR.Stop()
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
bcR.fastSync = true
bcR.initialState = state
bcR.state = state
bcR.stateSynced = true
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
bcR.fsm.SetLogger(bcR.Logger)
go bcR.poolRoutine()
return nil
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.updatePeer()
}
// sendBlockToPeer loads a block and sends it to the requesting peer.
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
pbbi, err := block.ToProto()
if err != nil {
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: pbbi},
}, bcR.Logger)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
msgData := bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: peer.ID(),
err: errSwitchRemovesPeer,
},
}
bcR.errorsForFSMCh <- msgData
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
switch msg := e.Message.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
}
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
bcR.Logger.Error("error transition block from protobuf", "err", err)
return
}
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: bi.Height,
block: bi,
length: msg.Size(),
},
}
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
length: msg.Size(),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
}
}
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) |
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
doProcessBlockCh := make(chan struct{}, 1)
lastHundred := time.Now()
lastRate := 0.0
ForLoop:
for {
select {
case <-stopProcessing:
bcR.Logger.Info("finishing block execution")
break ForLoop
case <-processReceivedBlockTicker.C: // try to execute blocks
select {
case doProcessBlockCh <- struct{}{}:
default:
}
case <-doProcessBlockCh:
for {
err := bcR.processBlock()
if err == errMissingBlock {
break
}
// Notify FSM of block processing result.
msgForFSM := bcReactorMessage{
event: processedBlockEv,
data: bReactorEventData{
err: err,
},
}
_ = bcR.fsm.Handle(&msgForFSM)
if err != nil {
break
}
bcR.blocksSynced++
if bcR.blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
height, maxPeerHeight := bcR.fsm.Status()
bcR.Logger.Info("Fast Sync Rate", "height", height,
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
lastHundred = time.Now()
}
}
}
}
}
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
func (bcR *BlockchainReactor) poolRoutine() {
bcR.fsm.Start()
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
stopProcessing := make(chan struct{}, 1)
go bcR.processBlocksRoutine(stopProcessing)
ForLoop:
for {
select {
case <-sendBlockRequestTicker.C:
if !bcR.fsm.NeedsBlocks() {
continue
}
_ = bcR.fsm.Handle(&bcReactorMessage{
event: makeRequestsEv,
data: bReactorEventData{
maxNumRequests: maxNumRequests}})
case <-statusUpdateTicker.C:
// Ask for status updates.
go bcR.sendStatusRequest()
case msg := <-bcR.messagesForFSMCh:
// Sent from the Receive() routine when status (statusResponseEv) and
// block (blockResponseEv) response events are received
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.errorsForFSMCh:
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
// FSM state timer expiry routine (stateTimeoutEv).
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.eventsFromFSMCh:
switch msg.event {
case syncFinishedEv:
stopProcessing <- struct{}{}
// Sent from the FSM when it enters finished state.
break ForLoop
case peerErrorEv:
// Sent from the FSM when it detects peer error
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
if msg.data.err == errNoPeerResponse {
// Sent from the peer timeout handler routine
_ = bcR.fsm.Handle(&bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: msg.data.peerID,
err: msg.data.err,
},
})
}
// else {
// For slow peers, or errors due to blocks received from wrong peer
// the FSM had already removed the peers
// }
default:
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
}
case <-bcR.Quit():
break ForLoop
}
}
}
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
}
}
func (bcR *BlockchainReactor) processBlock() error {
first, second, err := bcR.fsm.FirstTwoBlocks()
if err != nil {
// We need both to sync the first block.
return err
}
chainID := bcR.initialState.ChainID
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartSetHeader := firstParts.Header()
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err = bcR.state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Error("error during commit verification", "err", err,
"first", first.Height, "second", second.Height)
return errBlockVerificationFailure
}
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
if err != nil {
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
return nil
}
// Implements bcRNotifier
// sendStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) sendStatusRequest() {
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
ChannelID: BlockchainChannel,
Message: &bcproto.StatusRequest{},
})
}
// Implements bcRNotifier
// BlockRequest sends `BlockRequest` height.
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
peer := bcR.Switch.Peers().Get(peerID)
if peer == nil {
return errNilPeerForBlockRequest
}
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockRequest{Height: height},
}, bcR.Logger)
if !queued {
return errSendQueueFull
}
return nil
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) switchToConsensus() {
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
if ok {
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced)
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
}
// else {
// Should only happen during testing.
// }
}
// Implements bcRNotifier
// Called by FSM and pool:
// - pool calls when it detects slow peer or when peer times out
// - FSM calls when:
// - adding a block (addBlock) fails
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
msgData := bcFsmMessage{
event: peerErrorEv,
data: bFsmEventData{
peerID: peerID,
err: err,
},
}
bcR.eventsFromFSMCh <- msgData
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
if timer == nil {
panic("nil timer pointer parameter")
}
if *timer == nil {
*timer = time.AfterFunc(timeout, func() {
msg := bcReactorMessage{
event: stateTimeoutEv,
data: bReactorEventData{
stateName: name,
},
}
bcR.errorsForFSMCh <- msg
})
} else {
(*timer).Reset(timeout)
}
}
| {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
bcR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
} | identifier_body |
reactor.go | package v1
import (
"fmt"
"time"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 10
trySendIntervalMS = 10
// ask for best height every 10s
statusUpdateIntervalSeconds = 10
)
var (
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
// have not been received.
maxRequestsPerPeer = 20
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
maxNumRequests = 64
)
type consensusReactor interface {
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(state sm.State, skipWAL bool)
}
// BlockchainReactor handles long-term catchup syncing.
type BlockchainReactor struct {
p2p.BaseReactor
initialState sm.State // immutable
state sm.State
blockExec *sm.BlockExecutor
store *store.BlockStore
fastSync bool
stateSynced bool
fsm *BcReactorFSM
blocksSynced uint64
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
messagesForFSMCh chan bcReactorMessage
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
// to this channel to be processed in the context of the poolRoutine.
errorsForFSMCh chan bcReactorMessage
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
// the switch.
eventsFromFSMCh chan bcFsmMessage
swReporter *behaviour.SwitchReporter
}
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
fastSync bool) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
store.Height()))
}
const capacity = 1000
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
messagesForFSMCh := make(chan bcReactorMessage, capacity)
errorsForFSMCh := make(chan bcReactorMessage, capacity)
startHeight := store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
bcR := &BlockchainReactor{
initialState: state,
state: state,
blockExec: blockExec,
fastSync: fastSync,
store: store,
messagesForFSMCh: messagesForFSMCh,
eventsFromFSMCh: eventsFromFSMCh,
errorsForFSMCh: errorsForFSMCh,
}
fsm := NewFSM(startHeight, bcR)
bcR.fsm = fsm
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
return bcR
}
// bcReactorMessage is used by the reactor to send messages to the FSM.
type bcReactorMessage struct {
event bReactorEvent
data bReactorEventData
}
type bFsmEvent uint
const (
// message type events
peerErrorEv = iota + 1
syncFinishedEv
)
type bFsmEventData struct {
peerID p2p.ID
err error
}
// bcFsmMessage is used by the FSM to send messages to the reactor
type bcFsmMessage struct {
event bFsmEvent
data bFsmEventData
}
// SetLogger implements service.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.fsm.SetLogger(l)
}
// OnStart implements service.Service.
func (bcR *BlockchainReactor) OnStart() error {
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
if bcR.fastSync {
go bcR.poolRoutine()
}
return nil
}
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
_ = bcR.Stop()
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
bcR.fastSync = true
bcR.initialState = state
bcR.state = state
bcR.stateSynced = true
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
bcR.fsm.SetLogger(bcR.Logger)
go bcR.poolRoutine()
return nil
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.updatePeer()
}
// sendBlockToPeer loads a block and sends it to the requesting peer.
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
pbbi, err := block.ToProto()
if err != nil {
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: pbbi},
}, bcR.Logger)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
msgData := bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: peer.ID(),
err: errSwitchRemovesPeer,
},
}
bcR.errorsForFSMCh <- msgData
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
switch msg := e.Message.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
}
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil |
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: bi.Height,
block: bi,
length: msg.Size(),
},
}
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
length: msg.Size(),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
}
}
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
bcR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
}
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
doProcessBlockCh := make(chan struct{}, 1)
lastHundred := time.Now()
lastRate := 0.0
ForLoop:
for {
select {
case <-stopProcessing:
bcR.Logger.Info("finishing block execution")
break ForLoop
case <-processReceivedBlockTicker.C: // try to execute blocks
select {
case doProcessBlockCh <- struct{}{}:
default:
}
case <-doProcessBlockCh:
for {
err := bcR.processBlock()
if err == errMissingBlock {
break
}
// Notify FSM of block processing result.
msgForFSM := bcReactorMessage{
event: processedBlockEv,
data: bReactorEventData{
err: err,
},
}
_ = bcR.fsm.Handle(&msgForFSM)
if err != nil {
break
}
bcR.blocksSynced++
if bcR.blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
height, maxPeerHeight := bcR.fsm.Status()
bcR.Logger.Info("Fast Sync Rate", "height", height,
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
lastHundred = time.Now()
}
}
}
}
}
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
func (bcR *BlockchainReactor) poolRoutine() {
bcR.fsm.Start()
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
stopProcessing := make(chan struct{}, 1)
go bcR.processBlocksRoutine(stopProcessing)
ForLoop:
for {
select {
case <-sendBlockRequestTicker.C:
if !bcR.fsm.NeedsBlocks() {
continue
}
_ = bcR.fsm.Handle(&bcReactorMessage{
event: makeRequestsEv,
data: bReactorEventData{
maxNumRequests: maxNumRequests}})
case <-statusUpdateTicker.C:
// Ask for status updates.
go bcR.sendStatusRequest()
case msg := <-bcR.messagesForFSMCh:
// Sent from the Receive() routine when status (statusResponseEv) and
// block (blockResponseEv) response events are received
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.errorsForFSMCh:
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
// FSM state timer expiry routine (stateTimeoutEv).
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.eventsFromFSMCh:
switch msg.event {
case syncFinishedEv:
stopProcessing <- struct{}{}
// Sent from the FSM when it enters finished state.
break ForLoop
case peerErrorEv:
// Sent from the FSM when it detects peer error
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
if msg.data.err == errNoPeerResponse {
// Sent from the peer timeout handler routine
_ = bcR.fsm.Handle(&bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: msg.data.peerID,
err: msg.data.err,
},
})
}
// else {
// For slow peers, or errors due to blocks received from wrong peer
// the FSM had already removed the peers
// }
default:
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
}
case <-bcR.Quit():
break ForLoop
}
}
}
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
}
}
func (bcR *BlockchainReactor) processBlock() error {
first, second, err := bcR.fsm.FirstTwoBlocks()
if err != nil {
// We need both to sync the first block.
return err
}
chainID := bcR.initialState.ChainID
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartSetHeader := firstParts.Header()
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err = bcR.state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Error("error during commit verification", "err", err,
"first", first.Height, "second", second.Height)
return errBlockVerificationFailure
}
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
if err != nil {
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
return nil
}
// Implements bcRNotifier
// sendStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) sendStatusRequest() {
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
ChannelID: BlockchainChannel,
Message: &bcproto.StatusRequest{},
})
}
// Implements bcRNotifier
// BlockRequest sends `BlockRequest` height.
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
peer := bcR.Switch.Peers().Get(peerID)
if peer == nil {
return errNilPeerForBlockRequest
}
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockRequest{Height: height},
}, bcR.Logger)
if !queued {
return errSendQueueFull
}
return nil
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) switchToConsensus() {
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
if ok {
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced)
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
}
// else {
// Should only happen during testing.
// }
}
// Implements bcRNotifier
// Called by FSM and pool:
// - pool calls when it detects slow peer or when peer times out
// - FSM calls when:
// - adding a block (addBlock) fails
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
msgData := bcFsmMessage{
event: peerErrorEv,
data: bFsmEventData{
peerID: peerID,
err: err,
},
}
bcR.eventsFromFSMCh <- msgData
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
if timer == nil {
panic("nil timer pointer parameter")
}
if *timer == nil {
*timer = time.AfterFunc(timeout, func() {
msg := bcReactorMessage{
event: stateTimeoutEv,
data: bReactorEventData{
stateName: name,
},
}
bcR.errorsForFSMCh <- msg
})
} else {
(*timer).Reset(timeout)
}
}
| {
bcR.Logger.Error("error transition block from protobuf", "err", err)
return
} | conditional_block |
reactor.go | package v1
import (
"fmt"
"time"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 10
trySendIntervalMS = 10
// ask for best height every 10s
statusUpdateIntervalSeconds = 10
)
var (
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
// have not been received.
maxRequestsPerPeer = 20
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
maxNumRequests = 64
)
type consensusReactor interface {
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(state sm.State, skipWAL bool)
}
// BlockchainReactor handles long-term catchup syncing.
type BlockchainReactor struct {
p2p.BaseReactor
initialState sm.State // immutable
state sm.State
blockExec *sm.BlockExecutor
store *store.BlockStore
fastSync bool
stateSynced bool
fsm *BcReactorFSM
blocksSynced uint64
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
messagesForFSMCh chan bcReactorMessage
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
// to this channel to be processed in the context of the poolRoutine.
errorsForFSMCh chan bcReactorMessage
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
// the switch.
eventsFromFSMCh chan bcFsmMessage
swReporter *behaviour.SwitchReporter
}
// NewBlockchainReactor returns new reactor instance.
func | (state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
fastSync bool) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
store.Height()))
}
const capacity = 1000
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
messagesForFSMCh := make(chan bcReactorMessage, capacity)
errorsForFSMCh := make(chan bcReactorMessage, capacity)
startHeight := store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
bcR := &BlockchainReactor{
initialState: state,
state: state,
blockExec: blockExec,
fastSync: fastSync,
store: store,
messagesForFSMCh: messagesForFSMCh,
eventsFromFSMCh: eventsFromFSMCh,
errorsForFSMCh: errorsForFSMCh,
}
fsm := NewFSM(startHeight, bcR)
bcR.fsm = fsm
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
return bcR
}
// bcReactorMessage is used by the reactor to send messages to the FSM.
type bcReactorMessage struct {
event bReactorEvent
data bReactorEventData
}
type bFsmEvent uint
const (
// message type events
peerErrorEv = iota + 1
syncFinishedEv
)
type bFsmEventData struct {
peerID p2p.ID
err error
}
// bcFsmMessage is used by the FSM to send messages to the reactor
type bcFsmMessage struct {
event bFsmEvent
data bFsmEventData
}
// SetLogger implements service.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.fsm.SetLogger(l)
}
// OnStart implements service.Service.
func (bcR *BlockchainReactor) OnStart() error {
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
if bcR.fastSync {
go bcR.poolRoutine()
}
return nil
}
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
_ = bcR.Stop()
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
bcR.fastSync = true
bcR.initialState = state
bcR.state = state
bcR.stateSynced = true
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
bcR.fsm.SetLogger(bcR.Logger)
go bcR.poolRoutine()
return nil
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.updatePeer()
}
// sendBlockToPeer loads a block and sends it to the requesting peer.
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
pbbi, err := block.ToProto()
if err != nil {
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: pbbi},
}, bcR.Logger)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
msgData := bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: peer.ID(),
err: errSwitchRemovesPeer,
},
}
bcR.errorsForFSMCh <- msgData
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
switch msg := e.Message.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
}
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
bcR.Logger.Error("error transition block from protobuf", "err", err)
return
}
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: bi.Height,
block: bi,
length: msg.Size(),
},
}
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
length: msg.Size(),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
}
}
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
bcR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
}
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
doProcessBlockCh := make(chan struct{}, 1)
lastHundred := time.Now()
lastRate := 0.0
ForLoop:
for {
select {
case <-stopProcessing:
bcR.Logger.Info("finishing block execution")
break ForLoop
case <-processReceivedBlockTicker.C: // try to execute blocks
select {
case doProcessBlockCh <- struct{}{}:
default:
}
case <-doProcessBlockCh:
for {
err := bcR.processBlock()
if err == errMissingBlock {
break
}
// Notify FSM of block processing result.
msgForFSM := bcReactorMessage{
event: processedBlockEv,
data: bReactorEventData{
err: err,
},
}
_ = bcR.fsm.Handle(&msgForFSM)
if err != nil {
break
}
bcR.blocksSynced++
if bcR.blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
height, maxPeerHeight := bcR.fsm.Status()
bcR.Logger.Info("Fast Sync Rate", "height", height,
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
lastHundred = time.Now()
}
}
}
}
}
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
func (bcR *BlockchainReactor) poolRoutine() {
bcR.fsm.Start()
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
stopProcessing := make(chan struct{}, 1)
go bcR.processBlocksRoutine(stopProcessing)
ForLoop:
for {
select {
case <-sendBlockRequestTicker.C:
if !bcR.fsm.NeedsBlocks() {
continue
}
_ = bcR.fsm.Handle(&bcReactorMessage{
event: makeRequestsEv,
data: bReactorEventData{
maxNumRequests: maxNumRequests}})
case <-statusUpdateTicker.C:
// Ask for status updates.
go bcR.sendStatusRequest()
case msg := <-bcR.messagesForFSMCh:
// Sent from the Receive() routine when status (statusResponseEv) and
// block (blockResponseEv) response events are received
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.errorsForFSMCh:
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
// FSM state timer expiry routine (stateTimeoutEv).
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.eventsFromFSMCh:
switch msg.event {
case syncFinishedEv:
stopProcessing <- struct{}{}
// Sent from the FSM when it enters finished state.
break ForLoop
case peerErrorEv:
// Sent from the FSM when it detects peer error
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
if msg.data.err == errNoPeerResponse {
// Sent from the peer timeout handler routine
_ = bcR.fsm.Handle(&bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: msg.data.peerID,
err: msg.data.err,
},
})
}
// else {
// For slow peers, or errors due to blocks received from wrong peer
// the FSM had already removed the peers
// }
default:
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
}
case <-bcR.Quit():
break ForLoop
}
}
}
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
}
}
func (bcR *BlockchainReactor) processBlock() error {
first, second, err := bcR.fsm.FirstTwoBlocks()
if err != nil {
// We need both to sync the first block.
return err
}
chainID := bcR.initialState.ChainID
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartSetHeader := firstParts.Header()
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err = bcR.state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Error("error during commit verification", "err", err,
"first", first.Height, "second", second.Height)
return errBlockVerificationFailure
}
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
if err != nil {
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
return nil
}
// Implements bcRNotifier
// sendStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) sendStatusRequest() {
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
ChannelID: BlockchainChannel,
Message: &bcproto.StatusRequest{},
})
}
// Implements bcRNotifier
// BlockRequest sends `BlockRequest` height.
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
peer := bcR.Switch.Peers().Get(peerID)
if peer == nil {
return errNilPeerForBlockRequest
}
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockRequest{Height: height},
}, bcR.Logger)
if !queued {
return errSendQueueFull
}
return nil
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) switchToConsensus() {
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
if ok {
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced)
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
}
// else {
// Should only happen during testing.
// }
}
// Implements bcRNotifier
// Called by FSM and pool:
// - pool calls when it detects slow peer or when peer times out
// - FSM calls when:
// - adding a block (addBlock) fails
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
msgData := bcFsmMessage{
event: peerErrorEv,
data: bFsmEventData{
peerID: peerID,
err: err,
},
}
bcR.eventsFromFSMCh <- msgData
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
if timer == nil {
panic("nil timer pointer parameter")
}
if *timer == nil {
*timer = time.AfterFunc(timeout, func() {
msg := bcReactorMessage{
event: stateTimeoutEv,
data: bReactorEventData{
stateName: name,
},
}
bcR.errorsForFSMCh <- msg
})
} else {
(*timer).Reset(timeout)
}
}
| NewBlockchainReactor | identifier_name |
reactor.go | package v1
import (
"fmt"
"time"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 10
trySendIntervalMS = 10
// ask for best height every 10s
statusUpdateIntervalSeconds = 10
)
var (
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
// have not been received.
maxRequestsPerPeer = 20
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
maxNumRequests = 64
)
type consensusReactor interface {
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(state sm.State, skipWAL bool)
}
// BlockchainReactor handles long-term catchup syncing.
type BlockchainReactor struct {
p2p.BaseReactor
initialState sm.State // immutable
state sm.State
blockExec *sm.BlockExecutor
store *store.BlockStore
fastSync bool
stateSynced bool
fsm *BcReactorFSM
blocksSynced uint64
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
messagesForFSMCh chan bcReactorMessage
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
// to this channel to be processed in the context of the poolRoutine.
errorsForFSMCh chan bcReactorMessage
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
// the switch.
eventsFromFSMCh chan bcFsmMessage
swReporter *behaviour.SwitchReporter
}
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
fastSync bool) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
store.Height()))
}
const capacity = 1000
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
messagesForFSMCh := make(chan bcReactorMessage, capacity)
errorsForFSMCh := make(chan bcReactorMessage, capacity)
startHeight := store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
bcR := &BlockchainReactor{
initialState: state,
state: state,
blockExec: blockExec,
fastSync: fastSync,
store: store,
messagesForFSMCh: messagesForFSMCh,
eventsFromFSMCh: eventsFromFSMCh,
errorsForFSMCh: errorsForFSMCh,
}
fsm := NewFSM(startHeight, bcR)
bcR.fsm = fsm
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
return bcR
}
// bcReactorMessage is used by the reactor to send messages to the FSM.
type bcReactorMessage struct {
event bReactorEvent
data bReactorEventData
}
type bFsmEvent uint
const (
// message type events
peerErrorEv = iota + 1
syncFinishedEv
)
type bFsmEventData struct {
peerID p2p.ID
err error
}
// bcFsmMessage is used by the FSM to send messages to the reactor
type bcFsmMessage struct {
event bFsmEvent
data bFsmEventData
}
// SetLogger implements service.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.fsm.SetLogger(l)
}
// OnStart implements service.Service.
func (bcR *BlockchainReactor) OnStart() error {
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
if bcR.fastSync {
go bcR.poolRoutine()
}
return nil
}
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
_ = bcR.Stop()
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
bcR.fastSync = true
bcR.initialState = state
bcR.state = state
bcR.stateSynced = true
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
bcR.fsm.SetLogger(bcR.Logger)
go bcR.poolRoutine()
return nil
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.updatePeer()
}
// sendBlockToPeer loads a block and sends it to the requesting peer.
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
pbbi, err := block.ToProto()
if err != nil {
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: pbbi},
}, bcR.Logger)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
msgData := bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: peer.ID(),
err: errSwitchRemovesPeer,
},
}
bcR.errorsForFSMCh <- msgData
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
switch msg := e.Message.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
}
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
bcR.Logger.Error("error transition block from protobuf", "err", err)
return
}
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: bi.Height,
block: bi,
length: msg.Size(),
},
}
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
length: msg.Size(),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
}
}
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
bcR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
}
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
doProcessBlockCh := make(chan struct{}, 1)
lastHundred := time.Now()
lastRate := 0.0
ForLoop:
for {
select {
case <-stopProcessing:
bcR.Logger.Info("finishing block execution")
break ForLoop
case <-processReceivedBlockTicker.C: // try to execute blocks
select {
case doProcessBlockCh <- struct{}{}:
default:
}
case <-doProcessBlockCh:
for {
err := bcR.processBlock()
if err == errMissingBlock {
break
}
// Notify FSM of block processing result.
msgForFSM := bcReactorMessage{
event: processedBlockEv,
data: bReactorEventData{
err: err,
},
}
_ = bcR.fsm.Handle(&msgForFSM)
if err != nil {
break
}
bcR.blocksSynced++
if bcR.blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
height, maxPeerHeight := bcR.fsm.Status()
bcR.Logger.Info("Fast Sync Rate", "height", height,
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
lastHundred = time.Now()
}
}
}
}
}
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
func (bcR *BlockchainReactor) poolRoutine() {
bcR.fsm.Start()
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
stopProcessing := make(chan struct{}, 1)
go bcR.processBlocksRoutine(stopProcessing)
ForLoop:
for {
select {
case <-sendBlockRequestTicker.C:
if !bcR.fsm.NeedsBlocks() {
continue
}
_ = bcR.fsm.Handle(&bcReactorMessage{
event: makeRequestsEv,
data: bReactorEventData{
maxNumRequests: maxNumRequests}})
case <-statusUpdateTicker.C:
// Ask for status updates.
go bcR.sendStatusRequest()
case msg := <-bcR.messagesForFSMCh:
// Sent from the Receive() routine when status (statusResponseEv) and
// block (blockResponseEv) response events are received
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.errorsForFSMCh:
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
// FSM state timer expiry routine (stateTimeoutEv).
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.eventsFromFSMCh:
switch msg.event {
case syncFinishedEv:
stopProcessing <- struct{}{}
// Sent from the FSM when it enters finished state.
break ForLoop
case peerErrorEv:
// Sent from the FSM when it detects peer error
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
if msg.data.err == errNoPeerResponse {
// Sent from the peer timeout handler routine
_ = bcR.fsm.Handle(&bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: msg.data.peerID,
err: msg.data.err,
},
})
}
// else {
// For slow peers, or errors due to blocks received from wrong peer | default:
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
}
case <-bcR.Quit():
break ForLoop
}
}
}
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
}
}
func (bcR *BlockchainReactor) processBlock() error {
first, second, err := bcR.fsm.FirstTwoBlocks()
if err != nil {
// We need both to sync the first block.
return err
}
chainID := bcR.initialState.ChainID
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartSetHeader := firstParts.Header()
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err = bcR.state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Error("error during commit verification", "err", err,
"first", first.Height, "second", second.Height)
return errBlockVerificationFailure
}
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
if err != nil {
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
return nil
}
// Implements bcRNotifier
// sendStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) sendStatusRequest() {
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
ChannelID: BlockchainChannel,
Message: &bcproto.StatusRequest{},
})
}
// Implements bcRNotifier
// BlockRequest sends `BlockRequest` height.
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
peer := bcR.Switch.Peers().Get(peerID)
if peer == nil {
return errNilPeerForBlockRequest
}
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockRequest{Height: height},
}, bcR.Logger)
if !queued {
return errSendQueueFull
}
return nil
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) switchToConsensus() {
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
if ok {
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced)
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
}
// else {
// Should only happen during testing.
// }
}
// Implements bcRNotifier
// Called by FSM and pool:
// - pool calls when it detects slow peer or when peer times out
// - FSM calls when:
// - adding a block (addBlock) fails
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
msgData := bcFsmMessage{
event: peerErrorEv,
data: bFsmEventData{
peerID: peerID,
err: err,
},
}
bcR.eventsFromFSMCh <- msgData
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
if timer == nil {
panic("nil timer pointer parameter")
}
if *timer == nil {
*timer = time.AfterFunc(timeout, func() {
msg := bcReactorMessage{
event: stateTimeoutEv,
data: bReactorEventData{
stateName: name,
},
}
bcR.errorsForFSMCh <- msg
})
} else {
(*timer).Reset(timeout)
}
} | // the FSM had already removed the peers
// } | random_line_split |
mod.rs | use std::collections::{HashMap, HashSet};
use std::process::exit;
use ansi_term::Colour::{Red, Green, Cyan};
use petgraph::{
graph::{EdgeIndex, NodeIndex},
Directed, Graph, Incoming, Outgoing,
};
/// The edge of a DelfGraph is a DelfEdge
pub mod edge;
/// The node of a DelfGraph is a DelfObject
pub mod object;
use crate::storage::{get_connection, DelfStorageConnection};
use crate::DelfYamls;
/// The DelfGraph is the core structure for delf's functionality. It contains the algorithm to traverse the graph, as well as metadata to perform the deletions.
#[derive(Debug)]
pub struct DelfGraph {
pub(crate) nodes: HashMap<String, NodeIndex>,
pub(crate) edges: HashMap<String, EdgeIndex>,
graph: Graph<object::DelfObject, edge::DelfEdge, Directed>,
storages: HashMap<String, Box<dyn DelfStorageConnection>>,
}
impl DelfGraph {
/// Create a new DelfGraph from a schema and a config. See [yaml_rust](../../yaml_rust/index.html) for information on creating the Yaml structs, or alternately use the helper functions: [read_files](../fn.read_files.html), [read_yamls](../fn.read_yamls.html) for constructing a DelfGraph from either paths or `&str` of yaml.
pub fn new(yamls: &DelfYamls) -> DelfGraph {
let schema = &yamls.schema;
let config = &yamls.config;
let mut edges_to_insert = Vec::new();
let mut nodes = HashMap::<String, NodeIndex>::new();
let mut edges = HashMap::<String, EdgeIndex>::new();
let mut graph = Graph::<object::DelfObject, edge::DelfEdge>::new();
// each yaml is an object
for yaml in schema.iter() {
let obj_name = String::from(yaml["object_type"]["name"].as_str().unwrap());
let obj_node = object::DelfObject::from(&yaml["object_type"]);
let node_id = graph.add_node(obj_node);
nodes.insert(obj_name.clone(), node_id);
// need to make sure all the nodes exist before edges can be added to the graph
for e in yaml["object_type"]["edge_types"].as_vec().unwrap().iter() {
let delf_edge = edge::DelfEdge::from(e);
edges_to_insert.push((obj_name.clone(), delf_edge));
}
}
// add all the edges to the graph
for (from, e) in edges_to_insert.iter_mut() {
if !nodes.contains_key(&e.to.object_type) {
eprintln!("Error creating edge {:#?}: No object with name {:#?}", e.name, e.to.object_type);
exit(1);
}
let edge_id = graph.add_edge(nodes[from], nodes[&e.to.object_type], e.clone());
edges.insert(String::from(&e.name), edge_id);
}
// create the storage map
let mut storages = HashMap::<String, Box<dyn DelfStorageConnection>>::new();
for yaml in config.iter() {
for storage in yaml["storages"].as_vec().unwrap().iter() {
let storage_name = String::from(storage["name"].as_str().unwrap());
storages.insert(
storage_name,
get_connection(
storage["plugin"].as_str().unwrap(),
storage["url"].as_str().unwrap(),
),
);
}
}
return DelfGraph {
nodes,
edges,
graph,
storages,
};
}
/// Pretty print the graph's contents.
pub fn print(&self) {
println!("{:#?}", self.graph);
}
/// Given an edge name, get the corresponding DelfEdge
pub fn get_edge(&self, edge_name: &String) -> &edge::DelfEdge {
let edge_id = self.edges.get(edge_name).unwrap();
return self.graph.edge_weight(*edge_id).unwrap();
}
/// Given an edge name and the ids of the to/from object instances, delete the edge
pub fn delete_edge(&self, edge_name: &String, from_id: &String, to_id: &String) {
let e = self.get_edge(edge_name);
e.delete_one(from_id, to_id, self);
}
/// Given an object name, get the corresponding DelfObject
pub fn | (&self, object_name: &String) -> &object::DelfObject {
let object_id = self.nodes.get(object_name).unwrap();
return self.graph.node_weight(*object_id).unwrap();
}
/// Given the object name and the id of the instance, delete the object
pub fn delete_object(&self, object_name: &String, id: &String) {
self._delete_object(object_name, id, None);
}
fn _delete_object(
&self,
object_name: &String,
id: &String,
from_edge: Option<&edge::DelfEdge>,
) {
let obj = self.get_object(object_name);
let deleted = obj.delete(id, from_edge, &self.storages);
if deleted {
let edges = self.graph.edges_directed(self.nodes[&obj.name], Outgoing);
for e in edges {
e.weight().delete_all(id, &obj.id_type, self);
}
}
}
/// Validate that the objects and edges described in the schema exist in the corresponding storage as expected. Additionally, ensure that all objects in the graph are reachable by traversal via `deep` or `refcount` edges starting at an object with deletion type of `directly`, `directly_only`, `short_ttl`, or `not_deleted`. This ensures that all objects are deletable and accounted for.
pub fn validate(&self) {
println!("\u{1f50d} {}", Cyan.bold().paint("Validating DelF graph..."));
let mut errs = Vec::new();
let mut passed = true;
for (_, node_id) in self.nodes.iter() {
match self.graph
.node_weight(*node_id)
.unwrap()
.validate(&self.storages) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Objects exist in storage"));
}
for (_, edge_id) in self.edges.iter() {
match self.graph.edge_weight(*edge_id).unwrap().validate(self) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all edges found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Edges exist in storage"));
}
match self.reachability_analysis() {
Err(e) => errs.push(e),
_ => ()
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects deletable"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("All objects deletable"));
}
if passed {
println!("\u{1F680} {} \u{1F680}", Green.bold().paint("Validation successful!"));
} else {
println!("\u{26a0} {} \u{26a0}", Red.bold().paint("Validation errors found"));
}
}
// Starting from a directly deletable (or excepted) node, ensure all ndoes are reached.
fn reachability_analysis(&self) -> Result<(), String> {
let mut visited_nodes = HashSet::new();
for (_, node_id) in self.nodes.iter() {
let obj = self.graph.node_weight(*node_id).unwrap();
match obj.deletion {
object::DeleteType::ShortTTL
| object::DeleteType::Directly
| object::DeleteType::DirectlyOnly
| object::DeleteType::NotDeleted => {
// this object is a starting point in traversal, start traversal
self.visit_node(&obj.name, &mut visited_nodes);
}
_ => (),
}
}
if visited_nodes.len() != self.nodes.len() {
let node_set: HashSet<String> = self.nodes.keys().cloned().collect();
return Err(format!(
"Not all objects are deletable: {:?}",
node_set.difference(&visited_nodes)
));
} else {
return Ok(());
}
}
// Recursively visit all un-visited nodes that are connected via depp or refcounte edges from the starting node with the passed in name
fn visit_node(&self, name: &String, visited_nodes: &mut HashSet<String>) {
visited_nodes.insert(name.clone());
let edges = self.graph.edges_directed(self.nodes[name], Outgoing);
for e in edges {
let ew = e.weight();
match ew.deletion {
edge::DeleteType::Deep | edge::DeleteType::RefCount => {
if !visited_nodes.contains(&ew.to.object_type) {
self.visit_node(&ew.to.object_type, visited_nodes);
}
}
_ => (),
}
}
}
// find all the inbound edges for a given object
fn get_inbound_edges(&self, obj: &object::DelfObject) -> Vec<&edge::DelfEdge> {
let object_id = self.nodes.get(&obj.name).unwrap();
let edges = self.graph.edges_directed(*object_id, Incoming);
let mut res = Vec::new();
for edge in edges {
res.push(edge.weight());
}
return res;
}
/// Check all objects in the DelfGraph with the deletion type of `short_ttl` if there are instances of the object which are past their expiration time. If so, delete the objects.
pub fn check_short_ttl(&self) {
for (_, node_id) in self.nodes.iter() {
let obj = self.graph.node_weight(*node_id).unwrap();
for obj_id in obj.check_short_ttl(&self.storages).iter() {
self.delete_object(&obj.name, obj_id);
}
}
}
}
| get_object | identifier_name |
mod.rs | use std::collections::{HashMap, HashSet};
use std::process::exit;
use ansi_term::Colour::{Red, Green, Cyan};
use petgraph::{
graph::{EdgeIndex, NodeIndex},
Directed, Graph, Incoming, Outgoing,
};
/// The edge of a DelfGraph is a DelfEdge
pub mod edge;
/// The node of a DelfGraph is a DelfObject
pub mod object;
use crate::storage::{get_connection, DelfStorageConnection};
use crate::DelfYamls;
/// The DelfGraph is the core structure for delf's functionality. It contains the algorithm to traverse the graph, as well as metadata to perform the deletions.
#[derive(Debug)]
pub struct DelfGraph {
pub(crate) nodes: HashMap<String, NodeIndex>,
pub(crate) edges: HashMap<String, EdgeIndex>,
graph: Graph<object::DelfObject, edge::DelfEdge, Directed>,
storages: HashMap<String, Box<dyn DelfStorageConnection>>,
}
impl DelfGraph {
/// Create a new DelfGraph from a schema and a config. See [yaml_rust](../../yaml_rust/index.html) for information on creating the Yaml structs, or alternately use the helper functions: [read_files](../fn.read_files.html), [read_yamls](../fn.read_yamls.html) for constructing a DelfGraph from either paths or `&str` of yaml.
pub fn new(yamls: &DelfYamls) -> DelfGraph {
let schema = &yamls.schema;
let config = &yamls.config;
let mut edges_to_insert = Vec::new();
let mut nodes = HashMap::<String, NodeIndex>::new();
let mut edges = HashMap::<String, EdgeIndex>::new();
let mut graph = Graph::<object::DelfObject, edge::DelfEdge>::new();
// each yaml is an object
for yaml in schema.iter() {
let obj_name = String::from(yaml["object_type"]["name"].as_str().unwrap());
let obj_node = object::DelfObject::from(&yaml["object_type"]);
let node_id = graph.add_node(obj_node);
nodes.insert(obj_name.clone(), node_id);
// need to make sure all the nodes exist before edges can be added to the graph
for e in yaml["object_type"]["edge_types"].as_vec().unwrap().iter() {
let delf_edge = edge::DelfEdge::from(e);
edges_to_insert.push((obj_name.clone(), delf_edge));
}
}
// add all the edges to the graph
for (from, e) in edges_to_insert.iter_mut() {
if !nodes.contains_key(&e.to.object_type) {
eprintln!("Error creating edge {:#?}: No object with name {:#?}", e.name, e.to.object_type);
exit(1);
}
let edge_id = graph.add_edge(nodes[from], nodes[&e.to.object_type], e.clone());
edges.insert(String::from(&e.name), edge_id);
}
// create the storage map
let mut storages = HashMap::<String, Box<dyn DelfStorageConnection>>::new();
for yaml in config.iter() {
for storage in yaml["storages"].as_vec().unwrap().iter() {
let storage_name = String::from(storage["name"].as_str().unwrap());
storages.insert(
storage_name,
get_connection(
storage["plugin"].as_str().unwrap(),
storage["url"].as_str().unwrap(),
),
);
}
}
return DelfGraph {
nodes,
edges,
graph,
storages,
};
}
/// Pretty print the graph's contents.
pub fn print(&self) {
println!("{:#?}", self.graph);
}
/// Given an edge name, get the corresponding DelfEdge
pub fn get_edge(&self, edge_name: &String) -> &edge::DelfEdge {
let edge_id = self.edges.get(edge_name).unwrap();
return self.graph.edge_weight(*edge_id).unwrap();
}
/// Given an edge name and the ids of the to/from object instances, delete the edge
pub fn delete_edge(&self, edge_name: &String, from_id: &String, to_id: &String) {
let e = self.get_edge(edge_name);
e.delete_one(from_id, to_id, self);
}
/// Given an object name, get the corresponding DelfObject
pub fn get_object(&self, object_name: &String) -> &object::DelfObject {
let object_id = self.nodes.get(object_name).unwrap();
return self.graph.node_weight(*object_id).unwrap();
}
/// Given the object name and the id of the instance, delete the object
pub fn delete_object(&self, object_name: &String, id: &String) {
self._delete_object(object_name, id, None);
}
fn _delete_object(
&self,
object_name: &String,
id: &String,
from_edge: Option<&edge::DelfEdge>,
) {
let obj = self.get_object(object_name);
let deleted = obj.delete(id, from_edge, &self.storages);
if deleted {
let edges = self.graph.edges_directed(self.nodes[&obj.name], Outgoing);
for e in edges {
e.weight().delete_all(id, &obj.id_type, self);
}
}
}
/// Validate that the objects and edges described in the schema exist in the corresponding storage as expected. Additionally, ensure that all objects in the graph are reachable by traversal via `deep` or `refcount` edges starting at an object with deletion type of `directly`, `directly_only`, `short_ttl`, or `not_deleted`. This ensures that all objects are deletable and accounted for.
pub fn validate(&self) {
println!("\u{1f50d} {}", Cyan.bold().paint("Validating DelF graph..."));
let mut errs = Vec::new();
let mut passed = true;
for (_, node_id) in self.nodes.iter() {
match self.graph
.node_weight(*node_id)
.unwrap()
.validate(&self.storages) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Objects exist in storage"));
}
for (_, edge_id) in self.edges.iter() {
match self.graph.edge_weight(*edge_id).unwrap().validate(self) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all edges found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Edges exist in storage"));
}
match self.reachability_analysis() {
Err(e) => errs.push(e),
_ => ()
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects deletable"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("All objects deletable"));
}
if passed {
println!("\u{1F680} {} \u{1F680}", Green.bold().paint("Validation successful!"));
} else {
println!("\u{26a0} {} \u{26a0}", Red.bold().paint("Validation errors found"));
}
}
| for (_, node_id) in self.nodes.iter() {
let obj = self.graph.node_weight(*node_id).unwrap();
match obj.deletion {
object::DeleteType::ShortTTL
| object::DeleteType::Directly
| object::DeleteType::DirectlyOnly
| object::DeleteType::NotDeleted => {
// this object is a starting point in traversal, start traversal
self.visit_node(&obj.name, &mut visited_nodes);
}
_ => (),
}
}
if visited_nodes.len() != self.nodes.len() {
let node_set: HashSet<String> = self.nodes.keys().cloned().collect();
return Err(format!(
"Not all objects are deletable: {:?}",
node_set.difference(&visited_nodes)
));
} else {
return Ok(());
}
}
// Recursively visit all un-visited nodes that are connected via depp or refcounte edges from the starting node with the passed in name
fn visit_node(&self, name: &String, visited_nodes: &mut HashSet<String>) {
visited_nodes.insert(name.clone());
let edges = self.graph.edges_directed(self.nodes[name], Outgoing);
for e in edges {
let ew = e.weight();
match ew.deletion {
edge::DeleteType::Deep | edge::DeleteType::RefCount => {
if !visited_nodes.contains(&ew.to.object_type) {
self.visit_node(&ew.to.object_type, visited_nodes);
}
}
_ => (),
}
}
}
// find all the inbound edges for a given object
fn get_inbound_edges(&self, obj: &object::DelfObject) -> Vec<&edge::DelfEdge> {
let object_id = self.nodes.get(&obj.name).unwrap();
let edges = self.graph.edges_directed(*object_id, Incoming);
let mut res = Vec::new();
for edge in edges {
res.push(edge.weight());
}
return res;
}
/// Check all objects in the DelfGraph with the deletion type of `short_ttl` if there are instances of the object which are past their expiration time. If so, delete the objects.
pub fn check_short_ttl(&self) {
for (_, node_id) in self.nodes.iter() {
let obj = self.graph.node_weight(*node_id).unwrap();
for obj_id in obj.check_short_ttl(&self.storages).iter() {
self.delete_object(&obj.name, obj_id);
}
}
}
} | // Starting from a directly deletable (or excepted) node, ensure all ndoes are reached.
fn reachability_analysis(&self) -> Result<(), String> {
let mut visited_nodes = HashSet::new(); | random_line_split |
mod.rs | use std::collections::{HashMap, HashSet};
use std::process::exit;
use ansi_term::Colour::{Red, Green, Cyan};
use petgraph::{
graph::{EdgeIndex, NodeIndex},
Directed, Graph, Incoming, Outgoing,
};
/// The edge of a DelfGraph is a DelfEdge
pub mod edge;
/// The node of a DelfGraph is a DelfObject
pub mod object;
use crate::storage::{get_connection, DelfStorageConnection};
use crate::DelfYamls;
/// The DelfGraph is the core structure for delf's functionality. It contains the algorithm to traverse the graph, as well as metadata to perform the deletions.
#[derive(Debug)]
pub struct DelfGraph {
pub(crate) nodes: HashMap<String, NodeIndex>,
pub(crate) edges: HashMap<String, EdgeIndex>,
graph: Graph<object::DelfObject, edge::DelfEdge, Directed>,
storages: HashMap<String, Box<dyn DelfStorageConnection>>,
}
impl DelfGraph {
/// Create a new DelfGraph from a schema and a config. See [yaml_rust](../../yaml_rust/index.html) for information on creating the Yaml structs, or alternately use the helper functions: [read_files](../fn.read_files.html), [read_yamls](../fn.read_yamls.html) for constructing a DelfGraph from either paths or `&str` of yaml.
pub fn new(yamls: &DelfYamls) -> DelfGraph |
/// Pretty print the graph's contents.
pub fn print(&self) {
println!("{:#?}", self.graph);
}
/// Given an edge name, get the corresponding DelfEdge
pub fn get_edge(&self, edge_name: &String) -> &edge::DelfEdge {
let edge_id = self.edges.get(edge_name).unwrap();
return self.graph.edge_weight(*edge_id).unwrap();
}
/// Given an edge name and the ids of the to/from object instances, delete the edge
pub fn delete_edge(&self, edge_name: &String, from_id: &String, to_id: &String) {
let e = self.get_edge(edge_name);
e.delete_one(from_id, to_id, self);
}
/// Given an object name, get the corresponding DelfObject
pub fn get_object(&self, object_name: &String) -> &object::DelfObject {
let object_id = self.nodes.get(object_name).unwrap();
return self.graph.node_weight(*object_id).unwrap();
}
/// Given the object name and the id of the instance, delete the object
pub fn delete_object(&self, object_name: &String, id: &String) {
self._delete_object(object_name, id, None);
}
fn _delete_object(
&self,
object_name: &String,
id: &String,
from_edge: Option<&edge::DelfEdge>,
) {
let obj = self.get_object(object_name);
let deleted = obj.delete(id, from_edge, &self.storages);
if deleted {
let edges = self.graph.edges_directed(self.nodes[&obj.name], Outgoing);
for e in edges {
e.weight().delete_all(id, &obj.id_type, self);
}
}
}
/// Validate that the objects and edges described in the schema exist in the corresponding storage as expected. Additionally, ensure that all objects in the graph are reachable by traversal via `deep` or `refcount` edges starting at an object with deletion type of `directly`, `directly_only`, `short_ttl`, or `not_deleted`. This ensures that all objects are deletable and accounted for.
pub fn validate(&self) {
println!("\u{1f50d} {}", Cyan.bold().paint("Validating DelF graph..."));
let mut errs = Vec::new();
let mut passed = true;
for (_, node_id) in self.nodes.iter() {
match self.graph
.node_weight(*node_id)
.unwrap()
.validate(&self.storages) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Objects exist in storage"));
}
for (_, edge_id) in self.edges.iter() {
match self.graph.edge_weight(*edge_id).unwrap().validate(self) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all edges found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Edges exist in storage"));
}
match self.reachability_analysis() {
Err(e) => errs.push(e),
_ => ()
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects deletable"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("All objects deletable"));
}
if passed {
println!("\u{1F680} {} \u{1F680}", Green.bold().paint("Validation successful!"));
} else {
println!("\u{26a0} {} \u{26a0}", Red.bold().paint("Validation errors found"));
}
}
// Starting from a directly deletable (or excepted) node, ensure all ndoes are reached.
fn reachability_analysis(&self) -> Result<(), String> {
let mut visited_nodes = HashSet::new();
for (_, node_id) in self.nodes.iter() {
let obj = self.graph.node_weight(*node_id).unwrap();
match obj.deletion {
object::DeleteType::ShortTTL
| object::DeleteType::Directly
| object::DeleteType::DirectlyOnly
| object::DeleteType::NotDeleted => {
// this object is a starting point in traversal, start traversal
self.visit_node(&obj.name, &mut visited_nodes);
}
_ => (),
}
}
if visited_nodes.len() != self.nodes.len() {
let node_set: HashSet<String> = self.nodes.keys().cloned().collect();
return Err(format!(
"Not all objects are deletable: {:?}",
node_set.difference(&visited_nodes)
));
} else {
return Ok(());
}
}
// Recursively visit all un-visited nodes that are connected via depp or refcounte edges from the starting node with the passed in name
fn visit_node(&self, name: &String, visited_nodes: &mut HashSet<String>) {
visited_nodes.insert(name.clone());
let edges = self.graph.edges_directed(self.nodes[name], Outgoing);
for e in edges {
let ew = e.weight();
match ew.deletion {
edge::DeleteType::Deep | edge::DeleteType::RefCount => {
if !visited_nodes.contains(&ew.to.object_type) {
self.visit_node(&ew.to.object_type, visited_nodes);
}
}
_ => (),
}
}
}
// find all the inbound edges for a given object
fn get_inbound_edges(&self, obj: &object::DelfObject) -> Vec<&edge::DelfEdge> {
let object_id = self.nodes.get(&obj.name).unwrap();
let edges = self.graph.edges_directed(*object_id, Incoming);
let mut res = Vec::new();
for edge in edges {
res.push(edge.weight());
}
return res;
}
/// Check all objects in the DelfGraph with the deletion type of `short_ttl` if there are instances of the object which are past their expiration time. If so, delete the objects.
pub fn check_short_ttl(&self) {
for (_, node_id) in self.nodes.iter() {
let obj = self.graph.node_weight(*node_id).unwrap();
for obj_id in obj.check_short_ttl(&self.storages).iter() {
self.delete_object(&obj.name, obj_id);
}
}
}
}
| {
let schema = &yamls.schema;
let config = &yamls.config;
let mut edges_to_insert = Vec::new();
let mut nodes = HashMap::<String, NodeIndex>::new();
let mut edges = HashMap::<String, EdgeIndex>::new();
let mut graph = Graph::<object::DelfObject, edge::DelfEdge>::new();
// each yaml is an object
for yaml in schema.iter() {
let obj_name = String::from(yaml["object_type"]["name"].as_str().unwrap());
let obj_node = object::DelfObject::from(&yaml["object_type"]);
let node_id = graph.add_node(obj_node);
nodes.insert(obj_name.clone(), node_id);
// need to make sure all the nodes exist before edges can be added to the graph
for e in yaml["object_type"]["edge_types"].as_vec().unwrap().iter() {
let delf_edge = edge::DelfEdge::from(e);
edges_to_insert.push((obj_name.clone(), delf_edge));
}
}
// add all the edges to the graph
for (from, e) in edges_to_insert.iter_mut() {
if !nodes.contains_key(&e.to.object_type) {
eprintln!("Error creating edge {:#?}: No object with name {:#?}", e.name, e.to.object_type);
exit(1);
}
let edge_id = graph.add_edge(nodes[from], nodes[&e.to.object_type], e.clone());
edges.insert(String::from(&e.name), edge_id);
}
// create the storage map
let mut storages = HashMap::<String, Box<dyn DelfStorageConnection>>::new();
for yaml in config.iter() {
for storage in yaml["storages"].as_vec().unwrap().iter() {
let storage_name = String::from(storage["name"].as_str().unwrap());
storages.insert(
storage_name,
get_connection(
storage["plugin"].as_str().unwrap(),
storage["url"].as_str().unwrap(),
),
);
}
}
return DelfGraph {
nodes,
edges,
graph,
storages,
};
} | identifier_body |
exec.go | package runner
import (
"bufio"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"sync"
"syscall"
"time"
"github.com/solovev/orange-app-runner/system"
"github.com/solovev/orange-app-runner/util"
)
// RunProcess запускает основной процесс отслеживания
func RunProcess(cfg *util.Config) (int, error) {
util.Debug("Starting process: %s %v", cfg.ProcessPath, cfg.ProcessArgs)
if len(cfg.ProcessPath) == 0 {
return -1, errors.New("ProcessPath isn't specified")
}
cmd := exec.Command(cfg.ProcessPath, cfg.ProcessArgs...)
// Передаем в параметры переменные среды
cmd.Env = cfg.Environment
homeDir, err := util.GetProcessHomeDirectory(cfg.HomeDirectory)
if err != nil {
return -1, fmt.Errorf("Unable to create home directory \"%s\": %v", cfg.HomeDirectory, err)
}
cmd.Dir = homeDir
cmd.SysProcAttr = &syscall.SysProcAttr{}
// Атрибутом "ptrace" сообщаем системе, что будем отслеживать действия процесса.
cmd.SysProcAttr.Ptrace = true
// "Убиваем" всех потомков процесса при его смерти.
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
// Если текущий пользователь имеет привелегии администратора, то эмулируем запуск
// под другим пользователем, указанным в параметре "-l"
if cfg.User != system.GetCurrentUserName() {
uid, gid, err := system.FindUser(cfg.User)
if err != nil {
return -1, err
}
util.Debug("Set process credential to %s [UID: %d, GID: %d]", cfg.User, uid, gid)
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid}
}
wg := &sync.WaitGroup{}
wg.Add(2)
stdout, err := cmd.StdoutPipe()
if err != nil {
return -1, err
}
// Если параметр "-o" был указан, то перенаправляем stdout запущенного процесса в файл
// И, если не указан параметр "-q", еще в нашу консоль.
if len(cfg.OutputFile) > 0 {
var outputFile *os.File
outputFile, err = util.CreateFile(cfg.OutputFile)
if err != nil {
return -1, fmt.Errorf("Unable to create \"%s\": %v", cfg.OutputFile, err)
}
defer outputFile.Close()
go fromPipe(cfg, stdout, outputFile, wg)
} else {
// Параметр "-o" не был указан, перенаправляем stdout только в консоль.
go fromPipe(cfg, stdout, nil, wg)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return -1, err
}
// Если параметр "-e" был указан, то перенаправляем stderr запущенного процесса в файл
// И, если не указан параметр "-q", еще в нашу консоль.
if len(cfg.ErrorFile) > 0 {
var errorFile *os.File
errorFile, err = util.CreateFile(cfg.ErrorFile)
if err != nil {
return -1, fmt.Errorf("Unable to create \"%s\": %v", cfg.ErrorFile, err)
}
defer errorFile.Close()
go fromPipe(cfg, stderr, errorFile, wg)
} else {
// Параметр "-e" не был указан, перенаправляем stderr только в консоль.
go fromPipe(cfg, stderr, nil, wg)
}
// Если указан параметр "-i", то stdin'ом процесса является указанный файл.
if len(cfg.InputFile) > 0 {
var inputFile *os.File
inputFile, err = util.OpenFile(cfg.InputFile)
if err != nil {
return -1, fmt.Errorf("Unable to open \"%s\": %v", cfg.InputFile, err)
}
defer inputFile.Close()
cmd.Stdin = inputFile
} else {
// Если не указан параметр "-i", stdin'ом процесса является консоль
cmd.Stdin = os.Stdin
}
// Запускаем процесс
err = cmd.Start()
if err != nil {
return -1, err
}
// Убеждаемся, что после выхода из функции (runProcess) запущенный процесс завершится.
defer cmd.Process.Kill()
pid := cmd.Process.Pid
util.Debug("Process id: %d", pid)
// Если указан параметр "-1", процесс будет выполнятся только на 1ом ядре процессора.
if len(cfg.Affinity) > 0 {
set, err := system.SetAffinity(cfg.Affinity, pid)
if err != nil {
return 0, err
}
util.Debug("Processor affinity was set to: %v", set)
}
// Если указан параметр "-s", создаем файл сбора статистики.
var storeFile *os.File
if len(cfg.StoreFile) > 0 {
storeFile, err = util.OpenFile(cfg.StoreFile)
if err != nil {
return -1, fmt.Errorf("Unable to open storage file: %v", err)
}
defer storeFile.Close()
}
// Начинаем отслеживать потребление ресурсов в отдельном потоке.
go measureUsage(cfg, storeFile, cmd.Process)
// В отдельном потоке начинаем отслеживать время жизни процесса, если указан "-t".
timeLimit := cfg.TimeLimit.Value()
if timeLimit > 0 {
go func() {
select {
case <-time.After(timeLimit):
checkError(cmd.Process, fmt.Errorf("Time limit [%s] exceeded", cfg.TimeLimit.String()))
}
}()
}
// Т.к. атрибут "ptrace" включен, то, после запуска, начинаем ждать пока
// процесс изменит свой статус (остановится, завершится, подаст сигнал и т.д.)
var ws syscall.WaitStatus
waitPid, err := syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for \"%s\": %v", cfg.BaseName, err)
}
if waitPid != pid {
return -1, fmt.Errorf("Error [syscall.Wait4]: First waited PID (%d) not equal to \"%s\" PID (%d)", waitPid, cfg.BaseName, pid)
}
// Ptrace-параметры
options := syscall.PTRACE_O_TRACEFORK
options |= syscall.PTRACE_O_TRACEVFORK
options |= syscall.PTRACE_O_TRACECLONE
options |= syscall.PTRACE_O_TRACEEXIT
options |= syscall.PTRACE_O_TRACEEXEC
parentPid := 0
// Начинаем рекурсивно отслеживать поведение запущенного процесса и его потомков.
// Пример. Если запущенный процесс (указанный в параметре "oar") создал потомка_1, то
// начинаем отслеживать этого потомка_1, потомок_1 тоже может создать потомка (потомка_2),
// в новой итерации начинаем отслеживать этого нового потомка (потомка_2).
// Если потомок_2 больше не создает новых потомков, то следущая итерация цикла
// будет принадлежать его родителю (потомку_1), если потомок_1 также не создает потомков,
// переходим к главному процессу (запущенному через "oar").
// Первая (и последняя) итерация будет отслеживать наш запущенный процесс, остальные - потомков.
// Creation 0 | Main Process Exit 5 | - - - - - Main Process
// 1 | - Child_1 4 | - - - Child_1
// 2 | - - - Child_2 3 | Child_2
for {
// После того как процесс-потомок остановился (после syscall.Wait4)
// Передаем ему ptrace-параметры, которые заставят его останавливаться
// в тех случаях, когда он начинает создавать дочерний процесс.
syscall.PtraceSetOptions(waitPid, options)
syscall.PtraceCont(waitPid, 0)
parentPid = waitPid
// Снова ждем пока процесс-потомок изменит свой статус, теперь это может быть
// не только остановка, завершение, сигналирование, но и создание дочернего процесса.
waitPid, err = syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for [PID: %d, PPID %d]: %v", waitPid, parentPid, err)
}
command := system.GetProcessCommand(waitPid)
util.Debug("Waited PID: %d, PPID: %d, CMD: %s", waitPid, parentPid, command)
// Проверяем, завершился ли процесс-потомок
if ws.Exited() {
util.Debug(" - Process [PID: %d] finished", waitPid)
// Если завершенный процесс-потомок является нашим запущенным процессом
// (процессом, указанным в параметре "oar"), то ломаем цикл for,
// и выходим из функции (runProcess) с кодом выхода ws.ExitStatus()
if waitPid == pid {
break
}
// Если нет, переходим к его родителю.
continue
}
if ws.Signaled() {
util.Debug(" - Process [PID: %d] signaled: %v", waitPid, ws)
continue
}
sigtrap := uint32(syscall.SIGTRAP)
sigsegv := uint32(syscall.SIGSEGV)
// Если причиной изменения статуса является создание дочернего процесса:
// Если параметер "-Xacp" не установлен, то после попытки создать дочерний процесс
// функция (runProcess) завершится, сработает defer cmd.Process.Kill()
// (процесс будет убит), а т.к. в атрибутах запуска процесса стоит
// Pdeathsig: syscall.SIGKILL, то будут убиты все созданные потомки.
if !cfg.AllowCreateProcesses {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
// Для создания отдельного потока, процесс создает потомка,
// параметр "-Xamt" разрешает многопоточность.
if !cfg.MultiThreadedProcess {
return -1, fmt.Errorf("Process attempt to clone himself")
}
clonePid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("Unable to retrieve id of cloned process: %v", err)
}
util.Debug("Process [%d] just maked clone [%d]", waitPid, clonePid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
return -1, fmt.Errorf("Attempt to create new process")
case sigsegv:
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
}
// Если параметер "-Xacp" установлен, то просто выводим инфу о созданных потомках
} else {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_EXIT << 8):
util.Debug(" - Detected exit event.")
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_CLONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace clone [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_FORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace fork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK_DONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork done [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
util.Debug("- Ptrace exec event detected")
case sigtrap | (0x80 << 8): // PTRACE_EVENT_STOP
util.Debug("- Ptrace stop event detected")
case sigtrap:
util.Debug("- Sigtrap detected")
case uint32(syscall.SIGCHLD):
util.Debug("- Sigchld detected")
case uint32(syscall.SIGSTOP):
util.Debug("- Sigstop detected")
case sigsegv:
util.Debug("- Sigsegv detected.")
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
default:
util.Debug(" - Process [%d] stopped for unknown reasons [Status %v, Signal %d]", waitPid, ws, ws.StopSignal())
}
}
}
// Ждем, пока функции перенаправления stdout и stderr в файл/консоль,
// запущенные в отдельных потоках, закончат свою работу |
func measureUsage(cfg *util.Config, storage *os.File, process *os.Process) {
// Проверяем, не завершился ли процесс до того, как мы начнем считать потребление ресурсов.
if _, err := os.Stat(fmt.Sprintf("/proc/%d/stat", process.Pid)); err == nil {
// Потребление CPU в % считается по такой формуле:
// consumtion = (cores * (ptA - ptB) * 100) / (ttA - ttB)
// Где cores - Количество используемых ядер процессорa
// ptA - Потребляемое время cpu процессом в момент времени А
// ptB - Потребляемое время cpu процессом в момент времени B
// ttA - Нагруженность процессора (общее время) в момент A
// ttB - Нагруженность процессора (общее время) в момент B
// Замер А позже замера B (A > B)
ptB, _, err := system.GetProcessStats(process.Pid)
checkError(process, err)
ttB, err := system.GetTotalCPUTime()
checkError(process, err)
cores, err := system.GetCPUCount(process.Pid)
checkError(process, err)
util.Debug("Process using %d cpu cores.", cores)
idle := 0
idleLimit := cfg.IdleLimit.Seconds()
// Проводим замер каждую секунду? работы программы
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
ttA, err := system.GetTotalCPUTime()
checkError(process, err)
ptA, processMemory, err := system.GetProcessStats(process.Pid)
checkError(process, err)
// Расчитываем потребление CPU
load := float64(uint64(cores)*(ptA-ptB)) / float64(ttA-ttB)
if idleLimit > 0 {
// Если потребление CPU меньше чем допустимая нагрузка
// увеличиваем счетчик простоя (idle)
if cfg.RequiredLoad.Value() > load {
idle++
} else {
idle = 0
}
}
stringMemory := util.StringifyMemory(processMemory)
stringLoad := util.StringifyLoad(load)
util.Debug(" - [Memory: %s/%s, Load: %s/%s]", stringMemory, cfg.MemoryLimit.String(), stringLoad, cfg.RequiredLoad.String())
// Записываем полученные данные о потреблении ресурсов в файл, указанный в "-s".
if storage != nil {
storage.WriteString(fmt.Sprintf("%s,%f,%d\n", time.Now().Format("15:04:05"), load, processMemory))
err = storage.Sync()
checkError(process, err)
}
// Проверка на превышение указанных лимитов (если параметры были указаны)
if idleLimit > 0 && idle >= idleLimit {
checkError(process, fmt.Errorf("Idle time limit [%d] exceeded", cfg.IdleLimit.Seconds()))
}
memoryLimit := cfg.MemoryLimit.Value()
if memoryLimit > 0 && processMemory > memoryLimit {
checkError(process, fmt.Errorf("Memory limit [%s] exceeded", cfg.MemoryLimit.String()))
}
ptB = ptA
ttB = ttA
}
}
}
}
func checkError(process *os.Process, err error) {
if err != nil {
log.Printf("Process killed from subthread. Cause: %v\n", err)
if process != nil {
process.Kill() // Catch the error?
}
os.Exit(0)
}
}
func fromPipe(cfg *util.Config, r io.Reader, f *os.File, wg *sync.WaitGroup) {
defer wg.Done()
scanner := bufio.NewScanner(r)
for scanner.Scan() {
text := scanner.Text()
if len(text) == 0 {
continue
}
if !cfg.Quiet {
log.Printf(util.Bold("[%s]: %s"), cfg.BaseName, text)
}
if f != nil {
f.WriteString(fmt.Sprintf("%s\n", text))
}
}
if err := scanner.Err(); err != nil {
checkError(nil, fmt.Errorf("Pipe handling error: %v", err))
}
} | wg.Wait()
return ws.ExitStatus(), nil
} | random_line_split |
exec.go | package runner
import (
"bufio"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"sync"
"syscall"
"time"
"github.com/solovev/orange-app-runner/system"
"github.com/solovev/orange-app-runner/util"
)
// RunProcess запускает основной процесс отслеживания
func RunProcess(cfg *util.Config) (int, error) {
util.Debug("Starting process: %s %v", cfg.ProcessPath, cfg.ProcessArgs)
if len(cfg.ProcessPath) == 0 {
return -1, errors.New("ProcessPath isn't specified")
}
cmd := exec.Command(cfg.ProcessPath, cfg.ProcessArgs...)
// Передаем в параметры переменные среды
cmd.Env = cfg.Environment
homeDir, err := util.GetProcessHomeDirectory(cfg.HomeDirectory)
if err != nil {
return -1, fmt.Errorf("Unable to create home directory \"%s\": %v", cfg.HomeDirectory, err)
}
cmd.Dir = homeDir
cmd.SysProcAttr = &syscall.SysProcAttr{}
// Атрибутом "ptrace" сообщаем системе, что будем отслеживать действия процесса.
cmd.SysProcAttr.Ptrace = true
// "Убиваем" всех потомков процесса при его смерти.
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
// Если текущий пользователь имеет привелегии администратора, то эмулируем запуск
// под другим пользователем, указанным в параметре "-l"
if cfg.User != system.GetCurrentUserName() {
uid, gid, err := system.FindUser(cfg.User)
if err != nil {
return -1, err
}
util.Debug("Set process credential to %s [UID: %d, GID: %d]", cfg.User, uid, gid)
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid}
}
wg := &sync.WaitGroup{}
wg.Add(2)
stdout, err := cmd.StdoutPipe()
if err != nil {
return -1, err
}
// Если параметр "-o" был указан, то перенаправляем stdout запущенного процесса в файл
// И, если не указан параметр "-q", еще в нашу консоль.
if len(cfg.OutputFile) > 0 {
var outputFile *os.File
outputFile, err = util.CreateFile(cfg.OutputFile)
if err != nil {
return -1, fmt.Errorf("Unable to create \"%s\": %v", cfg.OutputFile, err)
}
defer outputFile.Close()
go fromPipe(cfg, stdout, outputFile, wg)
} else {
// Параметр "-o" не был указан, перенаправляем stdout только в консоль.
go fromPipe(cfg, stdout, nil, wg)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return -1, err
}
// Если параметр "-e" был указан, то перенаправляем stderr запущенного процесса в файл
// И, если не указан параметр "-q", еще в нашу консоль.
if len(cfg.ErrorFile) > 0 {
var errorFile *os.File
errorFile, err = util.CreateFile(cfg.ErrorFile)
if err != nil {
return -1, fmt.Errorf("Unable to create \"%s\": %v", cfg.ErrorFile, err)
}
defer errorFile.Close()
go fromPipe(cfg, stderr, errorFile, wg)
} else {
// Параметр "-e" не был указан, перенаправляем stderr только в консоль.
go fromPipe(cfg, stderr, nil, wg)
}
// Если указан параметр "-i", то stdin'ом процесса является указанный файл.
if len(cfg.InputFile) > 0 {
var inputFile *os.File
inputFile, err = util.OpenFile(cfg.InputFile)
if err != nil {
return -1, fmt.Errorf("Unable to open \"%s\": %v", cfg.InputFile, err)
}
defer inputFile.Close()
cmd.Stdin = inputFile
} else {
// Если не указан параметр "-i", stdin'ом процесса является консоль
cmd.Stdin = os.Stdin
}
// Запускаем процесс
err = cmd.Start()
if err != nil {
return -1, err
}
// Убеждаемся, что после выхода из функции (runProcess) запущенный процесс завершится.
defer cmd.Process.Kill()
pid := cmd.Process.Pid
util.Debug("Process id: %d", pid)
// Если указан параметр "-1", процесс будет выполнятся только на 1ом ядре процессора.
if len(cfg.Affinity) > 0 {
set, err := system.SetAffinity(cfg.Affinity, pid)
if err != nil {
return 0, err
}
util.Debug("Processor affinity was set to: %v", set)
}
// Если указан параметр "-s", создаем файл сбора статистики.
var storeFile *os.File
if len(cfg.StoreFile) > 0 {
storeFile, err = util.OpenFile(cfg.StoreFile)
if err != nil {
return -1, fmt.Errorf("Unable to open storage file: %v", err)
}
defer storeFile.Close()
}
// Начинаем отслеживать потребление ресурсов в отдельном потоке.
go measureUsage(cfg, storeFile, cmd.Process)
// В отдельном потоке начинаем отслеживать время жизни процесса, если указан "-t".
timeLimit := cfg.TimeLimit.Value()
if timeLimit > 0 {
go func() {
select {
case <-time.After(timeLimit):
checkError(cmd.Process, fmt.Errorf("Time limit [%s] exceeded", cfg.TimeLimit.String()))
}
}()
}
// Т.к. атрибут "ptrace" включен, то, после запуска, начинаем ждать пока
// процесс изменит свой статус (остановится, завершится, подаст сигнал и т.д.)
var ws syscall.WaitStatus
waitPid, err := syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for \"%s\": %v", cfg.BaseName, err)
}
if waitPid != pid {
return -1, fmt.Errorf("Error [syscall.Wait4]: First waited PID (%d) not equal to \"%s\" PID (%d)", waitPid, cfg.BaseName, pid)
}
// Ptrace-параметры
options := syscall.PTRACE_O_TRACEFORK
options |= syscall.PTRACE_O_TRACEVFORK
options |= syscall.PTRACE_O_TRACECLONE
options |= syscall.PTRACE_O_TRACEEXIT
options |= syscall.PTRACE_O_TRACEEXEC
parentPid := 0
// Начинаем рекурсивно отслеживать поведение запущенного процесса и его потомков.
// Пример. Если запущенный процесс (указанный в параметре "oar") создал потомка_1, то
// начинаем отслеживать этого потомка_1, потомок_1 тоже может создать потомка (потомка_2),
// в новой итерации начинаем отслеживать этого нового потомка (потомка_2).
// Если потомок_2 больше не создает новых потомков, то следущая итерация цикла
// будет принадлежать его родителю (потомку_1), если потомок_1 также не создает потомков,
// переходим к главному процессу (запущенному через "oar").
// Первая (и последняя) итерация будет отслеживать наш запущенный процесс, остальные - потомков.
// Creation 0 | Main Process Exit 5 | - - - - - Main Process
// 1 | - Child_1 4 | - - - Child_1
// 2 | - - - Child_2 3 | Child_2
for {
// После того как процесс-потомок остановился (после syscall.Wait4)
// Передаем ему ptrace-параметры, которые заставят его останавливаться
// в тех случаях, когда он начинает создавать дочерний процесс.
syscall.PtraceSetOptions(waitPid, options)
syscall.PtraceCont(waitPid, 0)
parentPid = waitPid
// Снова ждем пока процесс-потомок изменит свой статус, теперь это может быть
// не только остановка, завершение, сигналирование, но и создание дочернего процесса.
waitPid, err = syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for [PID: %d, PPID %d]: %v", waitPid, parentPid, err)
}
command := system.GetProcessCommand(waitPid)
util.Debug("Waited PID: %d, PPID: %d, CMD: %s", waitPid, parentPid, command)
// Проверяем, завершился ли процесс-потомок
if ws.Exited() {
util.Debug(" - Process [PID: %d] finished", waitPid)
// Если завершенный процесс-потомок является нашим запущенным процессом
// (процессом, указанным в параметре "oar"), то ломаем цикл for,
// и выходим из функции (runProcess) с кодом выхода ws.ExitStatus()
if waitPid == pid {
break
}
// Если нет, переходим к его родителю.
continue
}
if ws.Signaled() {
util.Debug(" - Process [PID: %d] signaled: %v", waitPid, ws)
continue
}
sigtrap := uint32(syscall.SIGTRAP)
sigsegv := uint32(syscall.SIGSEGV)
// Если причиной изменения статуса является создание дочернего процесса:
// Если параметер "-Xacp" не установлен, то после попытки создать дочерний процесс
// функция (runProcess) завершится, сработает defer cmd.Process.Kill()
// (процесс будет убит), а т.к. в атрибутах запуска процесса стоит
// Pdeathsig: syscall.SIGKILL, то будут убиты все созданные потомки.
if !cfg.AllowCreateProcesses {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
// Для создания отдельного потока, процесс создает потомка,
// параметр "-Xamt" разрешает многопоточность.
if !cfg.MultiThreadedProcess {
return -1, fmt.Errorf("Process attempt to clone himself")
}
clonePid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("Unable to retrieve id of cloned process: %v", err)
}
util.Debug("Process [%d] just maked clone [%d]", waitPid, clonePid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
return -1, fmt.Errorf("Attempt to create new process")
case sigsegv:
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
}
// Если параметер "-Xacp" установлен, то просто выводим инфу о созданных потомках
} else {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_EXIT << 8):
util.Debug(" - Detected exit event.")
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_CLONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace clone [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_FORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace fork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK_DONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork done [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
util.Debug("- Ptrace exec event detected")
case sigtrap | (0x80 << 8): // PTRACE_EVENT_STOP
util.Debug("- Ptrace stop event detected")
case sigtrap:
util.Debug("- Sigtrap detected")
case uint32(syscall.SIGCHLD):
util.Debug("- Sigchld detected")
case uint32(syscall.SIGSTOP):
util.Debug("- Sigstop detected")
case sigsegv:
util.Debug("- Sigsegv detected.")
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
default:
util.Debug(" - Process [%d] stopped for unknown reasons [Status %v, Signal %d]", waitPid, ws, ws.StopSignal())
}
}
}
// Ждем, пока функции перенаправления stdout и stderr в файл/консоль,
// запущенные в отдельных потоках, закончат свою работу
wg.Wait()
return ws.ExitStatus(), nil
}
func measureUsage(cfg *util.Config, storage *os.File, process *os.Process) {
// Проверяем, не завершился ли процесс до того, как мы начнем считать потребление ресурсов.
if _, err := os.Stat(fmt.Sprintf("/proc/%d/stat", process.Pid)); err == nil {
// Потребление CPU в % считается по такой формуле:
// consumtion = (cores * (ptA - ptB) * 100) / (ttA - ttB)
// Где cores - Количество используемых ядер процессорa
// ptA - Потребляемое время cpu процессом в момент времени А
// ptB - Потребляемое время cpu процессом в момент времени B
// ttA - Нагруженность процессора (общее время) в момент A
// ttB - Нагруженность процессора (общее время) в момент B
// Замер А позже замера B (A > B)
ptB, _, err := system.GetProcessStats(process.Pid)
checkError(process, err)
ttB, err := system.GetTotalCPUTime()
checkError(process, err)
cores, err := system.GetCPUCount(process.Pid)
checkError(process, err)
util.Debug("Process using %d cpu cores.", cores)
idle := 0
idleLimit := cfg.IdleLimit.Seconds()
// Проводим замер каждую секунду? работы программы
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
ttA, err := system.GetTotalCPUTime()
checkError(process, err)
ptA, processMemory, err := system.GetProcessStats(process.Pid)
checkError(process, err)
// Расчитываем потребление CPU
load := float64(uint64(cores)*(ptA-ptB)) / float64(ttA-ttB)
if idleLimit > 0 {
// Если потребление CPU меньше чем допустимая нагрузка
// увеличиваем счетчик простоя (idle)
if cfg.RequiredLoad.Value() > load {
idle++
} else {
idle = 0
}
}
stringMemory := util.StringifyMemory(processMemory)
stringLoad := util.StringifyLoad(load)
util.Debug(" - [Memory: %s/%s, Load: %s/%s]", stringMemory, cfg.MemoryLimit.String(), stringLoad, cfg.RequiredLoad.String())
// Записываем полученные данные о потреблении ресурсов в файл, указанный в "-s".
if storage != nil {
storage.WriteString(fmt.Sprintf("%s,%f,%d\n", time.Now().Format("15:04:05"), load, processMemory))
err = storage.Sync()
checkError(process, err)
}
// Проверка на превышение указанных лимитов (если параметры были указаны)
if idleLimit > 0 && idle >= idleLimit {
checkError(process, fmt.Errorf("Idle time limit [%d] exceeded", cfg.IdleLimit.Seconds()))
}
memoryLimit := cfg.MemoryLimit.Value()
if memoryLimit > 0 && processMemory > memoryLimit {
checkError(process, fmt.Errorf("Memory limit [%s] exceeded", cfg.MemoryLimit.String()))
}
ptB = ptA
ttB = ttA
}
}
}
}
func checkError(process *os.Process, err error) {
if err != nil {
log.Printf("Process killed from subthread. Cause: %v\n", err)
if process != nil {
process.Kill() // Catch the error?
}
os.Exit(0)
}
}
func fromPipe(cfg *util.Config, r io.Reader, f *os.File, wg *sync.WaitGroup) {
defer wg.Done()
scanner := bufio.NewScanner(r)
for scanner.Scan() {
text := scanner.Text()
if len(text) == 0 {
continue
}
if !cfg.Quiet {
log.Printf(util.Bold("[%s]: %s"), cfg.BaseName, text)
}
if f != nil {
f.WriteString(fmt.Sprintf("%s\n", text))
}
}
if err := scanner.Err(); err != nil {
checkError(nil, fmt.Errorf("Pipe handling error: %v", err))
}
}
| conditional_block | ||
exec.go | package runner
import (
"bufio"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"sync"
"syscall"
"time"
"github.com/solovev/orange-app-runner/system"
"github.com/solovev/orange-app-runner/util"
)
// RunProcess запускает основной процесс отслеживания
func RunProcess(cfg *util.Config) (int, error) {
util.Debug("Starting process: %s %v", cfg.ProcessPath, cfg.ProcessArgs)
if len(cfg.ProcessPath) == 0 {
return -1, errors.New("ProcessPath isn't specified")
}
cmd := exec.Command(cfg.ProcessPath, cfg.ProcessArgs...)
// Передаем в параметры переменные среды
cmd.Env = cfg.Environment
homeDir, err := util.GetProcessHomeDirectory(cfg.HomeDirectory)
if err != nil {
return -1, fmt.Errorf("Unable to create home directory \"%s\": %v", cfg.HomeDirectory, err)
}
cmd.Dir = homeDir
cmd.SysProcAttr = &syscall.SysProcAttr{}
// Атрибутом "ptrace" сообщаем системе, что будем отслеживать действия процесса.
cmd.SysProcAttr.Ptrace = true
// "Убиваем" всех потомков процесса при его смерти.
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
// Если текущий пользователь имеет привелегии администратора, то эмулируем запуск
// под другим пользователем, указанным в параметре "-l"
if cfg.User != system.GetCurrentUserName() {
uid, gid, err := system.FindUser(cfg.User)
if err != nil {
return -1, err
}
util.Debug("Set process credential to %s [UID: %d, GID: %d]", cfg.User, uid, gid)
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid}
}
wg := &sync.WaitGroup{}
wg.Add(2)
stdout, err := cmd.StdoutPipe()
if err != nil {
return -1, err
}
// Если параметр "-o" был указан, то перенаправляем stdout запущенного процесса в файл
// И, если не указан параметр "-q", еще в нашу консоль.
if len(cfg.OutputFile) > 0 {
var outputFile *os.File
outputFile, err = util.CreateFile(cfg.OutputFile)
if err != nil {
return -1, fmt.Errorf("Unable to create \"%s\": %v", cfg.OutputFile, err)
}
defer outputFile.Close()
go fromPipe(cfg, stdout, outputFile, wg)
} else {
// Параметр "-o" не был указан, перенаправляем stdout только в консоль.
go fromPipe(cfg, stdout, nil, wg)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return -1, err
}
// Если параметр "-e" был указан, то перенаправляем stderr запущенного процесса в файл
// И, если не указан параметр "-q", еще в нашу консоль.
if len(cfg.ErrorFile) > 0 {
var errorFile *os.File
errorFile, err = util.CreateFile(cfg.ErrorFile)
if err != nil {
return -1, fmt.Errorf("Unable to create \"%s\": %v", cfg.ErrorFile, err)
}
defer errorFile.Close()
go fromPipe(cfg, stderr, errorFile, wg)
} else {
// Параметр "-e" не был указан, перенаправляем stderr только в консоль.
go fromPipe(cfg, stderr, nil, wg)
}
// Если указан параметр "-i", то stdin'ом процесса является указанный файл.
if len(cfg.InputFile) > 0 {
var inputFile *os.File
inputFile, err = util.OpenFile(cfg.InputFile)
if err != nil {
return -1, fmt.Errorf("Unable to open \"%s\": %v", cfg.InputFile, err)
}
defer inputFile.Close()
cmd.Stdin = inputFile
} else {
// Если не указан параметр "-i", stdin'ом процесса является консоль
cmd.Stdin = os.Stdin
}
// Запускаем процесс
err = cmd.Start()
if err != nil {
return -1, err
}
// Убеждаемся, что после выхода из функции (runProcess) запущенный процесс завершится.
defer cmd.Process.Kill()
pid := cmd.Process.Pid
util.Debug("Process id: %d", pid)
// Если указан параметр "-1", процесс будет выполнятся только на 1ом ядре процессора.
if len(cfg.Affinity) > 0 {
set, err := system.SetAffinity(cfg.Affinity, pid)
if err != nil {
return 0, err
}
util.Debug("Processor affinity was set to: %v", set)
}
// Если указан параметр "-s", создаем файл сбора статистики.
var storeFile *os.File
if len(cfg.StoreFile) > 0 {
storeFile, err = util.OpenFile(cfg.StoreFile)
if err != nil {
return -1, fmt.Errorf("Unable to open storage file: %v", err)
}
defer storeFile.Close()
}
// Начинаем отслеживать потребление ресурсов в отдельном потоке.
go measureUsage(cfg, storeFile, cmd.Process)
// В отдельном потоке начинаем отслеживать время жизни процесса, если указан "-t".
timeLimit := cfg.TimeLimit.Value()
if timeLimit > 0 {
go func() {
select {
case <-time.After(timeLimit):
checkError(cmd.Process, fmt.Errorf("Time limit [%s] exceeded", cfg.TimeLimit.String()))
}
}()
}
// Т.к. атрибут "ptrace" включен, то, после запуска, начинаем ждать пока
// процесс изменит свой статус (остановится, завершится, подаст сигнал и т.д.)
var ws syscall.WaitStatus
waitPid, err := syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for \"%s\": %v", cfg.BaseName, err)
}
if waitPid != pid {
return -1, fmt.Errorf("Error [syscall.Wait4]: First waited PID (%d) not equal to \"%s\" PID (%d)", waitPid, cfg.BaseName, pid)
}
// Ptrace-параметры
options := syscall.PTRACE_O_TRACEFORK
options |= syscall.PTRACE_O_TRACEVFORK
options |= syscall.PTRACE_O_TRACECLONE
options |= syscall.PTRACE_O_TRACEEXIT
options |= syscall.PTRACE_O_TRACEEXEC
parentPid := 0
// Начинаем рекурсивно отслеживать поведение запущенного процесса и его потомков.
// Пример. Если запущенный процесс (указанный в параметре "oar") создал потомка_1, то
// начинаем отслеживать этого потомка_1, потомок_1 тоже может создать потомка (потомка_2),
// в новой итерации начинаем отслеживать этого нового потомка (потомка_2).
// Если потомок_2 больше не создает новых потомков, то следущая итерация цикла
// будет принадлежать его родителю (потомку_1), если потомок_1 также не создает потомков,
// переходим к главному процессу (запущенному через "oar").
// Первая (и последняя) итерация будет отслеживать наш запущенный процесс, остальные - потомков.
// Creation 0 | Main Process Exit 5 | - - - - - Main Process
// 1 | - Child_1 4 | - - - Child_1
// 2 | - - - Child_2 3 | Child_2
for {
// После того как процесс-потомок остановился (после syscall.Wait4)
// Передаем ему ptrace-параметры, которые заставят его останавливаться
// в тех случаях, когда он начинает создавать дочерний процесс.
syscall.PtraceSetOptions(waitPid, options)
syscall.PtraceCont(waitPid, 0)
parentPid = waitPid
// Снова ждем пока процесс-потомок изменит свой статус, теперь это может быть
// не только остановка, завершение, сигналирование, но и создание дочернего процесса.
waitPid, err = syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for [PID: %d, PPID %d]: %v", waitPid, parentPid, err)
}
command := system.GetProcessCommand(waitPid)
util.Debug("Waited PID: %d, PPID: %d, CMD: %s", waitPid, parentPid, command)
// Проверяем, завершился ли процесс-потомок
if ws.Exited() {
util.Debug(" - Process [PID: %d] finished", waitPid)
// Если завершенный процесс-потомок является нашим запущенным процессом
// (процессом, указанным в параметре "oar"), то ломаем цикл for,
// и выходим из функции (runProcess) с кодом выхода ws.ExitStatus()
if waitPid == pid {
break
}
// Если нет, переходим к его родителю.
continue
}
if ws.Signaled() {
util.Debug(" - Process [PID: %d] signaled: %v", waitPid, ws)
continue
}
sigtrap := uint32(syscall.SIGTRAP)
sigsegv := uint32(syscall.SIGSEGV)
// Если причиной изменения статуса является создание дочернего процесса:
// Если параметер "-Xacp" не установлен, то после попытки создать дочерний процесс
// функция (runProcess) завершится, сработает defer cmd.Process.Kill()
// (процесс будет убит), а т.к. в атрибутах запуска процесса стоит
// Pdeathsig: syscall.SIGKILL, то будут убиты все созданные потомки.
if !cfg.AllowCreateProcesses {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
// Для создания отдельного потока, процесс создает потомка,
// параметр "-Xamt" разрешает многопоточность.
if !cfg.MultiThreadedProcess {
return -1, fmt.Errorf("Process attempt to clone himself")
}
clonePid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("Unable to retrieve id of cloned process: %v", err)
}
util.Debug("Process [%d] just maked clone [%d]", waitPid, clonePid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
return -1, fmt.Errorf("Attempt to create new process")
case sigsegv:
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
}
// Если параметер "-Xacp" установлен, то просто выводим инфу о созданных потомках
} else {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_EXIT << 8):
util.Debug(" - Detected exit event.")
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_CLONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace clone [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_FORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace fork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK_DONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork done [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
util.Debug("- Ptrace exec event detected")
case sigtrap | (0x80 << 8): // PTRACE_EVENT_STOP
util.Debug("- Ptrace stop event detected")
case sigtrap:
util.Debug("- Sigtrap detected")
case uint32(syscall.SIGCHLD):
util.Debug("- Sigchld detected")
case uint32(syscall.SIGSTOP):
util.Debug("- Sigstop detected")
case sigsegv:
util.Debug("- Sigsegv detected.")
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
default:
util.Debug(" - Process [%d] stopped for unknown reasons [Status %v, Signal %d]", waitPid, ws, ws.StopSignal())
}
}
}
// Ждем, пока функции перенаправления stdout и stderr в файл/консоль,
// запущенные в отдельных потоках, закончат свою работу
wg.Wait()
return ws.ExitStatus(), nil
}
func measureUsage(cfg *util.Config, storage *os.File, process *os.Process) {
// Проверяем, не завершился ли процесс до того, как мы начнем считать потребление ресурсов.
if _, err := os.Stat(fmt.Sprintf("/proc/%d/stat", process.Pid)); err == nil {
// Потребление CPU в % считается по такой формуле:
// consumtion = (cores * (ptA - ptB) * 100) / (ttA - ttB)
// Где cores - Количество используемых ядер процессорa
// ptA - Потребляемое время cpu процессом в момент времени А
// ptB - Потребляемое время cpu процессом в момент времени B
// ttA - Нагруженность процессора (общее время) в момент A
// ttB - Нагруженность процессора (общее время) в момент B
// Замер А позже замера B (A > B)
ptB, _, err := system.GetProcessStats(process.Pid)
checkError(process, err)
ttB, err := system.GetTotalCPUTime()
checkError(process, err)
cores, err := system.GetCPUCount(process.Pid)
checkError(process, err)
util.Debug("Process using %d cpu cores.", cores)
idle := 0
idleLimit := cfg.IdleLimit.Seconds()
// Проводим замер каждую секунду? работы программы
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
ttA, err := system.GetTotalCPUTime()
checkError(process, err)
ptA, processMemory, err := system.GetProcessStats(process.Pid)
checkError(process, err)
// Расчитываем потребление CPU
load := float64(uint64(cores)*(ptA-ptB)) / float64(ttA-ttB)
if idleLimit > 0 {
// Если потребление CPU меньше чем допустимая нагрузка
// увеличиваем счетчик простоя (idle)
if cfg.RequiredLoad.Value() > load {
idle++
} else {
idle = 0
}
}
stringMemory := util.StringifyMemory(processMemory)
stringLoad := util.StringifyLoad(load)
util.Debug(" - [Memory: %s/%s, Load: %s/%s]", stringMemory, cfg.MemoryLimit.String(), stringLoad, cfg.RequiredLoad.String())
// Записываем полученные данные о потреблении ресурсов в файл, указанный в "-s".
if storage != nil {
storage.WriteString(fmt.Sprintf("%s,%f,%d\n", time.Now().Format("15:04:05"), load, processMemory))
err = storage.Sync()
checkError(process, err)
}
// Проверка на превышение указанных лимитов (если параметры были указаны)
if idleLimit > 0 && idle >= idleLimit {
checkError(process, fmt.Errorf("Idle time limit [%d] exceeded", cfg.IdleLimit.Seconds()))
}
memoryLimit := cfg.MemoryLimit.Value()
if memoryLimit > 0 && processMemory > memoryLimit {
checkError(process, fmt.Errorf("Memory limit [%s] exceeded", cfg.MemoryLimit.String()))
}
ptB = ptA
ttB = ttA
}
}
}
}
func checkError(process *os.Process, err error) {
if err != nil {
log.Printf("Process killed from subthread. Cause: %v\n", err)
if process != nil {
process.Kill() // Catch the error?
}
os.Exit(0)
}
}
func fromPipe(cfg *util.Config, r io.Reader, f *os.File, wg *sync.WaitGroup) {
defer wg.Done()
scanner := bufio.NewScanner(r)
for scanner.Scan() {
text := scanner.Text()
if len(text) == 0 {
continue
}
if !cfg.Quiet {
log.Printf(util.Bold("[%s]: %s"), cfg.BaseName, text)
}
if f != nil {
f.WriteString(fmt.Sprintf("%s\n", text))
}
}
if err := scanner.Err(); err != nil {
checkError(nil, fmt.Errorf("Pipe handling error: %v", err))
}
}
| identifier_body | ||
exec.go | package runner
import (
"bufio"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"sync"
"syscall"
"time"
"github.com/solovev/orange-app-runner/system"
"github.com/solovev/orange-app-runner/util"
)
// RunProcess запускает основной процесс отслеживания
func RunProcess(cfg *util.Config) (int, error) {
util.Debug("Starting process: %s %v", cfg.ProcessPath, cfg.ProcessArgs)
if len(cfg.ProcessPath) == 0 {
return -1, errors.New("ProcessPath isn't specified")
}
cmd := exec.Command(cfg.ProcessPath, cfg.ProcessArgs...)
// Передаем в параметры переменные среды
cmd.Env = cfg.Environment
homeDir, err := util.GetProcessHomeDirectory(cfg.HomeDirectory)
if err != nil {
return -1, fmt.Errorf("Unable to create home directory \"%s\": %v", cfg.HomeDirectory, err)
}
cmd.Dir = homeDir
cmd.SysProcAttr = &syscall.SysProcAttr{}
// Атрибутом "ptrace" сообщаем системе, что будем отслеживать действия процесса.
cmd.SysProcAttr.Ptrace = true
// "Убиваем" всех потомков процесса при его смерти.
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
// Если текущий пользователь имеет привелегии администратора, то эмулируем запуск
// под другим пользователем, указанным в параметре "-l"
if cfg.User != system.GetCurrentUserName() {
uid, gid, err := system.FindUser(cfg.User)
if err != nil {
return -1, err
}
util.Debug("Set process credential to %s [UID: %d, GID: %d]", cfg.User, uid, gid)
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid}
}
wg := &sync.WaitGroup{}
wg.Add(2)
stdout, err := cmd.StdoutPipe()
if err != nil {
return -1, err
}
// Если параметр "-o" был указан, то перенаправляем stdout запущенного процесса в файл
// И, если не указан параметр "-q", еще в нашу консоль.
if len(cfg.OutputFile) > 0 {
var outputFile *os.File
outputFile, err = util.CreateFile(cfg.OutputFile)
if err != nil {
return -1, fmt.Errorf("Unable to create \"%s\": %v", cfg.OutputFile, err)
}
defer outputFile.Close()
go fromPipe(cfg, stdout, outputFile, wg)
} else {
// Параметр "-o" не был указан, перенаправляем stdout только в консоль.
go fromPipe(cfg, stdout, nil, wg)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return -1, err
}
// Если параметр "-e" был указан, то перенаправляем stderr запущенного процесса в файл
// И, если не указан параметр "-q", еще в нашу консоль.
if len(cfg.ErrorFile) > 0 {
var errorFile *os.File
errorFile, err = util.CreateFile(cfg.ErrorFile)
if err != nil {
return -1, fmt.Errorf("Unable to create \"%s\": %v", cfg.ErrorFile, err)
}
defer errorFile.Close()
go fromPipe(cfg, stderr, errorFile, wg)
} else {
// Параметр "-e" не был указан, перенаправляем stderr только в консоль.
go fromPipe(cfg, stderr, nil, wg)
}
// Если указан параметр "-i", то stdin'ом процесса является указанный файл.
if len(cfg.InputFile) > 0 {
var inputFile *os.File
inputFile, err = util.OpenFile(cfg.InputFile)
if err != nil {
return -1, fmt.Errorf("Unable to open \"%s\": %v", cfg.InputFile, err)
}
defer inputFile.Close()
cmd.Stdin = inputFile
} else {
// Если не указан параметр "-i", stdin'ом процесса является консоль
cmd.Stdin = os.Stdin
}
// Запускаем процесс
err = cmd.Start()
if err != nil {
return -1, err
}
// Убеждаемся, что после выхода из функции (runProcess) запущенный процесс завершится.
defer cmd.Process.Kill()
pid := cmd.Process.Pid
util.Debug("Process id: %d", pid)
// Если указан параметр "-1", процесс будет выполнятся только на 1ом ядре процессора.
if len(cfg.Affinity) > 0 {
set, err := system.SetAffinity(cfg.Affinity, pid)
if err != nil {
return 0, err
}
util.Debug("Processor affinity was set to: %v", set)
}
// Если указан параметр "-s", создаем файл сбора статистики.
var storeFile *os.File
if len(cfg.StoreFile) > 0 {
storeFile, err = util.OpenFile(cfg.StoreFile)
if err != nil {
return -1, fmt.Errorf("Unable to open storage file: %v", err)
}
defer storeFile.Close()
}
// Начинаем отслеживать потребление ресурсов в отдельном потоке.
go measureUsage(cfg, storeFile, cmd.Process)
// В отдельном потоке начинаем отслеживать время жизни процесса, если указан "-t".
timeLimit := cfg.TimeLimit.Value()
if timeLimit > 0 {
go func() {
select {
case <-time.After(timeLimit):
checkError(cmd.Process, fmt.Errorf("Time limit [%s] exceeded", cfg.TimeLimit.String()))
}
}()
}
// Т.к. атрибут "ptrace" включен, то, после запуска, начинаем ждать пока
// процесс изменит свой статус (остановится, завершится, подаст сигнал и т.д.)
var ws syscall.WaitStatus
waitPid, err := syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for \"%s\": %v", cfg.BaseName, err)
}
if waitPid != pid {
return -1, fmt.Errorf("Error [syscall.Wait4]: First waited PID (%d) not equal to \"%s\" PID (%d)", waitPid, cfg.BaseName, pid)
}
// Ptrace-параметры
options := syscall.PTRACE_O_TRACEFORK
options |= syscall.PTRACE_O_TRACEVFORK
options |= syscall.PTRACE_O_TRACECLONE
options |= syscall.PTRACE_O_TRACEEXIT
options |= syscall.PTRACE_O_TRACEEXEC
parentPid := 0
// Начинаем рекурсивно отслеживать поведение запущенного процесса и его потомков.
// Пример. Если запущенный процесс (указанный в параметре "oar") создал потомка_1, то
// начинаем отслеживать этого потомка_1, потомок_1 тоже может создать потомка (потомка_2),
// в новой итерации начинаем отслеживать этого нового потомка (потомка_2).
// Если потомок_2 больше не создает новых потомков, то следущая итерация цикла
// будет принадлежать его родителю (потомку_1), если потомок_1 также не создает потомков,
// переходим к главному процессу (запущенному через "oar").
// Первая (и последняя) итерация будет отслеживать наш запущенный процесс, остальные - потомков.
// Creation 0 | Main Process Exit 5 | - - - - - Main Process
// 1 | - Child_1 4 | - - - Child_1
// 2 | - - - Child_2 3 | Child_2
for {
// После того как процесс-потомок остановился (после syscall.Wait4)
// Передаем ему ptrace-параметры, которые заставят его останавливаться
// в тех случаях, когда он начинает создавать дочерний процесс.
syscall.PtraceSetOptions(waitPid, options)
syscall.PtraceCont(waitPid, 0)
parentPid = waitPid
// Снова ждем пока процесс-потомок изменит свой статус, теперь это может быть
// не только остановка, завершение, сигналирование, но и создание дочернего процесса.
waitPid, err = syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for [PID: %d, PPID %d]: %v", waitPid, parentPid, err)
}
command := system.GetProcessCommand(waitPid)
util.Debug("Waited PID: %d, PPID: %d, CMD: %s", waitPid, parentPid, command)
// Проверяем, завершился ли процесс-потомок
if ws.Exited() {
util.Debug(" - Process [PID: %d] finished", waitPid)
// Если завершенный процесс-потомок является нашим запущенным процессом
// (процессом, указанным в параметре "oar"), то ломаем цикл for,
// и выходим из функции (runProcess) с кодом выхода ws.ExitStatus()
if waitPid == pid {
break
}
// Если нет, переходим к его родителю.
continue
}
if ws.Signaled() {
util.Debug(" - Process [PID: %d] signaled: %v", waitPid, ws)
continue
}
sigtrap := uint32(syscall.SIGTRAP)
sigsegv := uint32(syscall.SIGSEGV)
// Если причиной изменения статуса является создание дочернего процесса:
// Если параметер "-Xacp" не установлен, то после попытки создать дочерний процесс
// функция (runProcess) завершится, сработает defer cmd.Process.Kill()
// (процесс будет убит), а т.к. в атрибутах запуска процесса стоит
// Pdeathsig: syscall.SIGKILL, то будут убиты все созданные потомки.
if !cfg.AllowCreateProcesses {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
// Для создания отдельного потока, процесс создает потомка,
// параметр "-Xamt" разрешает многопоточность.
if !cfg.MultiThreadedProcess {
return -1, fmt.Errorf("Process attempt to clone himself")
}
clonePid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("Unable to retrieve id of cloned process: %v", err)
}
util.Debug("Process [%d] just maked clone [%d]", waitPid, clonePid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
return -1, fmt.Errorf("Attempt to create new process")
case sigsegv:
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
}
// Если параметер "-Xacp" установлен, то просто выводим инфу о созданных потомках
} else {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_EXIT << 8):
util.Debug(" - Detected exit event.")
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_CLONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace clone [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_FORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace fork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK_DONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork done [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
util.Debug("- Ptrace exec event detected")
case sigtrap | (0x80 << 8): // PTRACE_EVENT_STOP
util.Debug("- Ptrace stop event detected")
case sigtrap:
util.Debug("- Sigtrap detected")
case uint32(syscall.SIGCHLD):
util.Debug("- Sigchld detected")
case uint32(syscall.SIGSTOP):
util.Debug("- Sigstop detected")
case sigsegv:
util.Debug("- Sigsegv detected.")
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
default:
util.Debug(" - Process [%d] stopped for unknown reasons [Status %v, Signal %d]", waitPid, ws, ws.StopSignal())
}
}
}
// Ждем, пока функции перенаправления stdout и stderr в файл/консоль,
// запущенные в отдельных потоках, закончат свою работу
wg.Wait()
return ws.ExitStatus(), nil
}
func measureUsage(cfg *util.Config, storage *os.File, process *os.Process) {
// Проверяем, не завершился ли процесс до того, как мы начнем считать потребление ресурсов.
if _, err := os.Stat(fmt.Sprintf("/proc/%d/stat", process.Pid)); err == nil {
// Потребление CPU в % считается по такой формуле:
// consumtion = (cores * (ptA - ptB) * 100) / (ttA - ttB)
// Где cores - Количество используемых ядер процессорa
// ptA - Потребляемое время cpu процессом в момент времени А
// ptB - Потребляемое время cpu процессом в момент времени B
// ttA - Нагруженность процессора (общее время) в момент A
// ttB - Нагруженность процессора (общее время) в момент B
// Замер А позже замера B (A > B)
ptB, _, err := system.GetProcessStats(process.Pid)
checkError(process, err)
ttB, err := system.GetTotalCPUTime()
checkError(process, err)
cores, err := system.GetCPUCount(process.Pid)
checkError(process, err)
util.Debug("Process using %d cpu cores.", cores)
idle := 0
idleLimit := cfg.IdleLimit.Seconds()
// Проводим замер каждую секунду? работы программы
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
ttA, err := system.GetTotalCPUTime()
checkError(process, err)
ptA, processMemory, err := system.GetProcessStats(process.Pid)
checkError(process, err)
// Расчитываем потребление CPU
load := float64(uint64(cores)*(ptA-ptB)) / float64(ttA-ttB)
if idleLimit > 0 {
// Если потребление CPU меньше чем допустимая нагрузка
// увеличиваем счетчик простоя (idle)
if cfg.RequiredLoad.Value() > load {
idle++
} else {
idle = 0
}
}
stringMemory := util.StringifyMemory(processMemory)
stringLoad := util.StringifyLoad(load)
util.Debug(" - [Memory: %s/%s, Load: %s/%s]", stringMemory, cfg.MemoryLimit.String(), stringLoad, cfg.RequiredLoad.String())
// Записываем полученные данные о потреблении ресурсов в файл, указанный в "-s".
if storage != nil {
storage.WriteString(fmt.Sprintf("%s,%f,%d\n", time.Now().Format("15:04:05"), load, processMemory))
err = storage.Sync()
checkError(process, err)
}
// Проверка на превышение указанных лимитов (если параметры были указаны)
if idleLimit > 0 && idle >= idleLimit {
checkError(process, fmt.Errorf("Idle time limit [%d] exceeded", cfg.IdleLimit.Seconds()))
}
memoryLimit := cfg.MemoryLimit.Value()
if memoryLimit > 0 && processMemory > memoryLimit {
checkError(process, fmt.Errorf("Memory limit [%s] exceeded", cfg.MemoryLimit.String()))
}
ptB = ptA
ttB = ttA
}
}
}
}
func checkError(process *os.Process, err error) {
if err != nil {
log.Printf("Process killed from subthread. Cause: %v\n", err)
if process != nil {
process.Kill() // Catch the error?
}
os.Exit(0)
}
}
func fromPipe(cfg *util.Config, r io.Reader, f *os.File, wg *sync.WaitGroup) {
defer wg.Done()
scanner := bufio.NewScanner(r)
for scanner.Scan() {
text := scanner.Text()
if len(text) == 0 {
continue
}
if !cfg.Quiet {
log.Printf(util.Bold("[%s]: %s"), cfg.BaseName, text)
}
if f != nil {
f.WriteString(fmt.Sprintf("%s\n", text))
}
}
if err := scanner.Err(); err != nil {
checkError(nil, fmt.Errorf("Pipe handling error: %v", err))
}
}
| identifier_name | ||
create-docker-context-for-node-component.js | #!/usr/bin/env node
const childProcess = require("child_process");
const fse = require("fs-extra");
const path = require("path");
const process = require("process");
const yargs = require("yargs");
const _ = require("lodash");
const isSubDir = require("is-subdir");
const {
getVersions,
getTags,
getName,
getRepository
} = require("./docker-util");
// --- cache dependencies data from package.json
const packageDependencyDataCache = {};
const argv = yargs
.options({
build: {
description: "Pipe the Docker context straight to Docker.",
type: "boolean",
default: false
},
tag: {
description:
'The tag to pass to "docker build". This parameter is only used if --build is specified. If the value of this parameter is `auto`, a tag name is automatically created from NPM configuration.',
type: "string",
default: "auto"
},
repository: {
description:
"The repository to use in auto tag generation. Will default to '', i.e. dockerhub unless --local is set. Requires --tag=auto",
type: "string",
default: process.env.MAGDA_DOCKER_REPOSITORY
},
name: {
description:
"The package name to use in auto tag generation. Will default to ''. Used to override the docker nanme config in package.json during the auto tagging. Requires --tag=auto",
type: "string",
default: process.env.MAGDA_DOCKER_NAME
},
version: {
description:
"The version(s) to use in auto tag generation. Will default to the current version in package.json. Requires --tag=auto",
type: "string",
array: true,
default: process.env.MAGDA_DOCKER_VERSION
},
output: {
description:
"The output path and filename for the Docker context .tar file.",
type: "string"
},
local: {
description:
"Build for a local Kubernetes container registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
push: {
description:
"Push the build image to the docker registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
platform: {
description:
"A list of platform that the docker image build should target. Specify this value will enable multi-arch image build.",
type: "string"
},
noCache: {
description: "Disable the cache during the docker image build.",
type: "boolean",
default: false
},
cacheFromVersion: {
description:
"Version to cache from when building, using the --cache-from field in docker. Will use the same repository and name. Using this options causes the image to be pulled before build.",
type: "string"
}
})
// Because 'version is a default yargs thing we need to specifically override its normal parsing.
.version(false)
.array("version")
.help().argv;
if (!argv.build && !argv.output) {
console.log("Either --build or --output <filename> must be specified.");
process.exit(1);
}
if (argv.platform && !argv.push) {
console.log(
"When --platform is specified, --push must be specified as well as multi-arch image can only be pushed to remote registry."
);
process.exit(1);
}
if (argv.noCache && argv.cacheFromVersion) {
console.log("When --noCache=true, --cacheFromVersion can't be specified.");
process.exit(1);
}
const componentSrcDir = path.resolve(process.cwd());
const dockerContextDir = fse.mkdtempSync(
path.resolve(__dirname, "..", "docker-context-")
);
const componentDestDir = path.resolve(dockerContextDir, "component");
fse.emptyDirSync(dockerContextDir);
fse.ensureDirSync(componentDestDir);
preparePackage(componentSrcDir, componentDestDir);
const tar = process.platform === "darwin" ? "gtar" : "tar";
// Docker and ConEmu (an otherwise excellent console for Windows) don't get along.
// See: https://github.com/Maximus5/ConEmu/issues/958 and https://github.com/moby/moby/issues/28814
// So if we're running under ConEmu, we need to add an extra -cur_console:i parameter to disable
// ConEmu's hooks and also set ConEmuANSI to OFF so Docker doesn't do anything drastic.
const env = Object.assign({}, process.env);
const extraParameters = [];
if (env.ConEmuANSI === "ON") {
env.ConEmuANSI = "OFF";
extraParameters.push("-cur_console:i");
}
updateDockerFile(componentSrcDir, componentDestDir);
if (argv.build) {
const cacheFromImage =
argv.cacheFromVersion &&
getRepository(argv.local, argv.repository) +
getName(argv.name) +
":" +
argv.cacheFromVersion;
if (cacheFromImage) {
// Pull this image into the docker daemon - if it fails we don't care, we'll just go from scratch.
const dockerPullProcess = childProcess.spawnSync(
"docker",
[...extraParameters, "pull", cacheFromImage],
{
stdio: "inherit",
env: env
}
);
wrapConsoleOutput(dockerPullProcess);
}
const tarProcess = childProcess.spawn(
tar,
[...extraParameters, "--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", "pipe", "inherit"],
env: env,
shell: true
}
);
const tags = getTags(
argv.tag,
argv.local,
argv.repository,
argv.version,
argv.name
);
const tagArgs = tags
.map((tag) => ["-t", tag])
.reduce((soFar, tagArgs) => soFar.concat(tagArgs), []);
const cacheFromArgs = cacheFromImage
? ["--cache-from", cacheFromImage]
: [];
const dockerProcess = childProcess.spawn(
"docker",
[
...extraParameters,
...(argv.platform ? ["buildx"] : []),
"build",
...tagArgs,
...cacheFromArgs,
...(argv.noCache ? ["--no-cache"] : []),
...(argv.platform ? ["--platform", argv.platform, "--push"] : []),
"-f",
`./component/Dockerfile`,
"-"
],
{
stdio: ["pipe", "inherit", "inherit"],
env: env
}
);
wrapConsoleOutput(dockerProcess);
dockerProcess.on("close", (code) => {
fse.removeSync(dockerContextDir);
if (code === 0 && argv.push && !argv.platform) {
if (tags.length === 0) {
console.error("Can not push an image without a tag.");
process.exit(1);
}
// Stop if there's a code !== 0
tags.every((tag) => {
const process = childProcess.spawnSync(
"docker",
["push", tag],
{
stdio: "inherit"
}
);
code = process.status;
return code === 0;
});
}
process.exit(code);
});
tarProcess.on("close", (code) => {
dockerProcess.stdin.end();
});
tarProcess.stdout.on("data", (data) => {
dockerProcess.stdin.write(data);
});
} else if (argv.output) {
const outputPath = path.resolve(process.cwd(), argv.output);
const outputTar = fse.openSync(outputPath, "w", 0o644);
const tarProcess = childProcess.spawn(
tar,
["--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", outputTar, "inherit"],
env: env,
shell: true
}
);
tarProcess.on("close", (code) => {
fse.closeSync(outputTar);
console.log(tarProcess.status);
fse.removeSync(dockerContextDir);
});
}
function updateDockerFile(sourceDir, destDir) {
const tags = getVersions(argv.local, argv.version);
const repository = getRepository(argv.local, argv.repository);
const dockerFileContents = fse.readFileSync(
path.resolve(sourceDir, "Dockerfile"),
"utf-8"
);
const replacedDockerFileContents = dockerFileContents
// Add a repository if this is a magda image
.replace(
/FROM .*(magda-[^:\s]+)(:[^\s]+)/,
"FROM " + repository + "$1" + (tags[0] ? ":" + tags[0] : "$2")
);
fse.writeFileSync(
path.resolve(destDir, "Dockerfile"),
replacedDockerFileContents,
"utf-8"
);
}
function preparePackage(packageDir, destDir) {
const packageJson = require(path.join(packageDir, "package.json"));
const dockerIncludesFromPackageJson =
packageJson.config &&
packageJson.config.docker &&
packageJson.config.docker.include;
let dockerIncludes;
if (!dockerIncludesFromPackageJson) {
console.log(
`WARNING: Package ${packageDir} does not have a config.docker.include key in package.json, so all of its files will be included in the docker image.`
);
dockerIncludes = fse.readdirSync(packageDir);
} else if (dockerIncludesFromPackageJson.trim() === "*") {
dockerIncludes = fse.readdirSync(packageDir);
} else {
if (dockerIncludesFromPackageJson.indexOf("*") >= 0) {
throw new Error(
"Sorry, wildcards are not currently supported in config.docker.include."
);
}
dockerIncludes = dockerIncludesFromPackageJson
.split(" ")
.filter((include) => include.length > 0);
}
dockerIncludes
.filter((include) => include !== "Dockerfile") // Filter out the dockerfile because we'll manually copy over a modified version.
.forEach(function (include) {
const src = path.resolve(packageDir, include);
const dest = path.resolve(destDir, include);
if (include === "node_modules") {
fse.ensureDirSync(dest);
const env = Object.create(process.env);
env.NODE_ENV = "production";
const productionPackages = _.uniqBy(
getPackageList(packageDir, path.resolve(packageDir, "..")),
(package) => package.path
);
prepareNodeModules(src, dest, productionPackages);
return;
}
try {
// On Windows we can't create symlinks to files without special permissions.
// So just copy the file instead. Usually creating directory junctions is
// fine without special permissions, but fall back on copying in the unlikely
// event that fails, too.
const type = fse.statSync(src).isFile() ? "file" : "junction";
fse.ensureSymlinkSync(src, dest, type);
} catch (e) {
fse.copySync(src, dest);
}
});
}
function prepareNodeModules(packageDir, destDir, productionPackages) {
productionPackages.forEach((src) => {
const relativePath = path.relative(packageDir, src.path);
const dest = path.resolve(destDir, relativePath);
const srcPath = path.resolve(packageDir, relativePath);
// console.log("src " + srcPath + " to " + dest);
try {
const stat = fse.lstatSync(srcPath);
const type = stat.isFile() ? "file" : "junction";
fse.ensureSymlinkSync(srcPath, dest, type);
} catch (e) {
fse.copySync(srcPath, dest);
}
});
}
function getPackageList(packagePath, packageSearchRoot, resolvedSoFar = {}) {
const dependencies = getPackageDependencies(packagePath);
const result = [];
if (!dependencies || !dependencies.length) {
return result;
}
dependencies.forEach(function (dependencyName) {
const dependencyNamePath = dependencyName.replace(/\//g, path.sep);
let currentBaseDir = packagePath;
let dependencyDir;
do {
dependencyDir = path.resolve(
currentBaseDir,
"node_modules",
dependencyNamePath
);
if (
currentBaseDir === packageSearchRoot ||
isSubDir(currentBaseDir, packageSearchRoot)
) {
// --- will not look for packages outside project root directory
break;
}
// Does this directory exist? If not, imitate node's module resolution by walking
// up the directory tree.
currentBaseDir = path.resolve(currentBaseDir, "..");
} while (!fse.existsSync(dependencyDir));
if (!fse.existsSync(dependencyDir)) {
throw new Error(
"Could not find path for " +
dependencyName +
" @ " +
packagePath
);
}
// If we haven't already seen this
if (!resolvedSoFar[dependencyDir]) {
result.push({ name: dependencyName, path: dependencyDir });
// Now that we've added this package to the list to resolve, add all its children.
const childPackageResult = getPackageList(
dependencyDir,
packageSearchRoot,
{ ...resolvedSoFar, [dependencyDir]: true }
);
Array.prototype.push.apply(result, childPackageResult);
}
});
return result;
}
function | (packagePath) {
const packageJsonPath = path.resolve(packagePath, "package.json");
if (packageDependencyDataCache[packageJsonPath]) {
return packageDependencyDataCache[packageJsonPath];
}
const pkgData = fse.readJSONSync(packageJsonPath);
const depData = pkgData["dependencies"];
if (!depData) {
packageDependencyDataCache[packageJsonPath] = [];
} else {
packageDependencyDataCache[packageJsonPath] = Object.keys(depData);
}
return packageDependencyDataCache[packageJsonPath];
}
function wrapConsoleOutput(process) {
if (process.stdout) {
process.stdout.on("data", (data) => {
console.log(data.toString());
});
}
if (process.stderr) {
process.stderr.on("data", (data) => {
console.error(data.toString());
});
}
}
| getPackageDependencies | identifier_name |
create-docker-context-for-node-component.js | #!/usr/bin/env node
const childProcess = require("child_process");
const fse = require("fs-extra");
const path = require("path");
const process = require("process");
const yargs = require("yargs");
const _ = require("lodash");
const isSubDir = require("is-subdir");
const {
getVersions,
getTags,
getName,
getRepository
} = require("./docker-util");
// --- cache dependencies data from package.json
const packageDependencyDataCache = {};
const argv = yargs
.options({
build: {
description: "Pipe the Docker context straight to Docker.",
type: "boolean",
default: false
},
tag: {
description:
'The tag to pass to "docker build". This parameter is only used if --build is specified. If the value of this parameter is `auto`, a tag name is automatically created from NPM configuration.',
type: "string",
default: "auto"
},
repository: {
description:
"The repository to use in auto tag generation. Will default to '', i.e. dockerhub unless --local is set. Requires --tag=auto",
type: "string",
default: process.env.MAGDA_DOCKER_REPOSITORY
},
name: {
description:
"The package name to use in auto tag generation. Will default to ''. Used to override the docker nanme config in package.json during the auto tagging. Requires --tag=auto",
type: "string",
default: process.env.MAGDA_DOCKER_NAME
},
version: {
description:
"The version(s) to use in auto tag generation. Will default to the current version in package.json. Requires --tag=auto",
type: "string",
array: true,
default: process.env.MAGDA_DOCKER_VERSION
},
output: {
description:
"The output path and filename for the Docker context .tar file.",
type: "string"
},
local: {
description:
"Build for a local Kubernetes container registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
push: {
description:
"Push the build image to the docker registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
platform: {
description:
"A list of platform that the docker image build should target. Specify this value will enable multi-arch image build.",
type: "string"
},
noCache: {
description: "Disable the cache during the docker image build.",
type: "boolean",
default: false
},
cacheFromVersion: {
description:
"Version to cache from when building, using the --cache-from field in docker. Will use the same repository and name. Using this options causes the image to be pulled before build.",
type: "string"
}
})
// Because 'version is a default yargs thing we need to specifically override its normal parsing.
.version(false)
.array("version")
.help().argv;
if (!argv.build && !argv.output) {
console.log("Either --build or --output <filename> must be specified.");
process.exit(1);
}
if (argv.platform && !argv.push) {
console.log(
"When --platform is specified, --push must be specified as well as multi-arch image can only be pushed to remote registry."
);
process.exit(1);
}
if (argv.noCache && argv.cacheFromVersion) {
console.log("When --noCache=true, --cacheFromVersion can't be specified.");
process.exit(1);
}
const componentSrcDir = path.resolve(process.cwd());
const dockerContextDir = fse.mkdtempSync(
path.resolve(__dirname, "..", "docker-context-")
);
const componentDestDir = path.resolve(dockerContextDir, "component");
fse.emptyDirSync(dockerContextDir);
fse.ensureDirSync(componentDestDir);
preparePackage(componentSrcDir, componentDestDir);
const tar = process.platform === "darwin" ? "gtar" : "tar";
// Docker and ConEmu (an otherwise excellent console for Windows) don't get along.
// See: https://github.com/Maximus5/ConEmu/issues/958 and https://github.com/moby/moby/issues/28814
// So if we're running under ConEmu, we need to add an extra -cur_console:i parameter to disable
// ConEmu's hooks and also set ConEmuANSI to OFF so Docker doesn't do anything drastic.
const env = Object.assign({}, process.env);
const extraParameters = [];
if (env.ConEmuANSI === "ON") {
env.ConEmuANSI = "OFF";
extraParameters.push("-cur_console:i");
}
updateDockerFile(componentSrcDir, componentDestDir);
if (argv.build) {
const cacheFromImage =
argv.cacheFromVersion &&
getRepository(argv.local, argv.repository) +
getName(argv.name) +
":" +
argv.cacheFromVersion;
if (cacheFromImage) {
// Pull this image into the docker daemon - if it fails we don't care, we'll just go from scratch.
const dockerPullProcess = childProcess.spawnSync(
"docker",
[...extraParameters, "pull", cacheFromImage],
{
stdio: "inherit",
env: env
}
);
wrapConsoleOutput(dockerPullProcess);
}
const tarProcess = childProcess.spawn(
tar,
[...extraParameters, "--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", "pipe", "inherit"],
env: env,
shell: true
}
);
const tags = getTags(
argv.tag,
argv.local,
argv.repository,
argv.version,
argv.name
);
const tagArgs = tags
.map((tag) => ["-t", tag])
.reduce((soFar, tagArgs) => soFar.concat(tagArgs), []);
const cacheFromArgs = cacheFromImage
? ["--cache-from", cacheFromImage]
: [];
const dockerProcess = childProcess.spawn(
"docker",
[
...extraParameters,
...(argv.platform ? ["buildx"] : []),
"build",
...tagArgs,
...cacheFromArgs,
...(argv.noCache ? ["--no-cache"] : []),
...(argv.platform ? ["--platform", argv.platform, "--push"] : []),
"-f",
`./component/Dockerfile`,
"-"
],
{
stdio: ["pipe", "inherit", "inherit"],
env: env
}
);
wrapConsoleOutput(dockerProcess);
dockerProcess.on("close", (code) => {
fse.removeSync(dockerContextDir);
if (code === 0 && argv.push && !argv.platform) {
if (tags.length === 0) {
console.error("Can not push an image without a tag.");
process.exit(1);
}
// Stop if there's a code !== 0
tags.every((tag) => {
const process = childProcess.spawnSync(
"docker",
["push", tag],
{
stdio: "inherit"
}
);
code = process.status;
return code === 0;
});
}
process.exit(code);
});
tarProcess.on("close", (code) => {
dockerProcess.stdin.end();
});
tarProcess.stdout.on("data", (data) => {
dockerProcess.stdin.write(data);
});
} else if (argv.output) {
const outputPath = path.resolve(process.cwd(), argv.output);
const outputTar = fse.openSync(outputPath, "w", 0o644);
const tarProcess = childProcess.spawn(
tar,
["--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", outputTar, "inherit"],
env: env,
shell: true
}
);
tarProcess.on("close", (code) => {
fse.closeSync(outputTar);
console.log(tarProcess.status);
fse.removeSync(dockerContextDir);
});
}
function updateDockerFile(sourceDir, destDir) {
const tags = getVersions(argv.local, argv.version);
const repository = getRepository(argv.local, argv.repository);
const dockerFileContents = fse.readFileSync(
path.resolve(sourceDir, "Dockerfile"),
"utf-8"
);
const replacedDockerFileContents = dockerFileContents
// Add a repository if this is a magda image
.replace(
/FROM .*(magda-[^:\s]+)(:[^\s]+)/,
"FROM " + repository + "$1" + (tags[0] ? ":" + tags[0] : "$2")
);
fse.writeFileSync(
path.resolve(destDir, "Dockerfile"),
replacedDockerFileContents,
"utf-8"
);
}
function preparePackage(packageDir, destDir) {
const packageJson = require(path.join(packageDir, "package.json"));
const dockerIncludesFromPackageJson =
packageJson.config &&
packageJson.config.docker &&
packageJson.config.docker.include;
let dockerIncludes;
if (!dockerIncludesFromPackageJson) {
console.log(
`WARNING: Package ${packageDir} does not have a config.docker.include key in package.json, so all of its files will be included in the docker image.`
);
dockerIncludes = fse.readdirSync(packageDir);
} else if (dockerIncludesFromPackageJson.trim() === "*") {
dockerIncludes = fse.readdirSync(packageDir);
} else {
if (dockerIncludesFromPackageJson.indexOf("*") >= 0) {
throw new Error(
"Sorry, wildcards are not currently supported in config.docker.include."
);
}
dockerIncludes = dockerIncludesFromPackageJson
.split(" ")
.filter((include) => include.length > 0);
}
dockerIncludes
.filter((include) => include !== "Dockerfile") // Filter out the dockerfile because we'll manually copy over a modified version.
.forEach(function (include) {
const src = path.resolve(packageDir, include);
const dest = path.resolve(destDir, include);
if (include === "node_modules") {
fse.ensureDirSync(dest);
const env = Object.create(process.env);
env.NODE_ENV = "production";
const productionPackages = _.uniqBy(
getPackageList(packageDir, path.resolve(packageDir, "..")),
(package) => package.path
);
prepareNodeModules(src, dest, productionPackages);
return;
}
try {
// On Windows we can't create symlinks to files without special permissions.
// So just copy the file instead. Usually creating directory junctions is
// fine without special permissions, but fall back on copying in the unlikely
// event that fails, too.
const type = fse.statSync(src).isFile() ? "file" : "junction";
fse.ensureSymlinkSync(src, dest, type);
} catch (e) {
fse.copySync(src, dest);
}
});
}
function prepareNodeModules(packageDir, destDir, productionPackages) {
productionPackages.forEach((src) => {
const relativePath = path.relative(packageDir, src.path);
const dest = path.resolve(destDir, relativePath);
const srcPath = path.resolve(packageDir, relativePath);
// console.log("src " + srcPath + " to " + dest);
try {
const stat = fse.lstatSync(srcPath);
const type = stat.isFile() ? "file" : "junction";
fse.ensureSymlinkSync(srcPath, dest, type);
} catch (e) {
fse.copySync(srcPath, dest);
}
});
}
function getPackageList(packagePath, packageSearchRoot, resolvedSoFar = {}) {
const dependencies = getPackageDependencies(packagePath);
const result = [];
if (!dependencies || !dependencies.length) |
dependencies.forEach(function (dependencyName) {
const dependencyNamePath = dependencyName.replace(/\//g, path.sep);
let currentBaseDir = packagePath;
let dependencyDir;
do {
dependencyDir = path.resolve(
currentBaseDir,
"node_modules",
dependencyNamePath
);
if (
currentBaseDir === packageSearchRoot ||
isSubDir(currentBaseDir, packageSearchRoot)
) {
// --- will not look for packages outside project root directory
break;
}
// Does this directory exist? If not, imitate node's module resolution by walking
// up the directory tree.
currentBaseDir = path.resolve(currentBaseDir, "..");
} while (!fse.existsSync(dependencyDir));
if (!fse.existsSync(dependencyDir)) {
throw new Error(
"Could not find path for " +
dependencyName +
" @ " +
packagePath
);
}
// If we haven't already seen this
if (!resolvedSoFar[dependencyDir]) {
result.push({ name: dependencyName, path: dependencyDir });
// Now that we've added this package to the list to resolve, add all its children.
const childPackageResult = getPackageList(
dependencyDir,
packageSearchRoot,
{ ...resolvedSoFar, [dependencyDir]: true }
);
Array.prototype.push.apply(result, childPackageResult);
}
});
return result;
}
function getPackageDependencies(packagePath) {
const packageJsonPath = path.resolve(packagePath, "package.json");
if (packageDependencyDataCache[packageJsonPath]) {
return packageDependencyDataCache[packageJsonPath];
}
const pkgData = fse.readJSONSync(packageJsonPath);
const depData = pkgData["dependencies"];
if (!depData) {
packageDependencyDataCache[packageJsonPath] = [];
} else {
packageDependencyDataCache[packageJsonPath] = Object.keys(depData);
}
return packageDependencyDataCache[packageJsonPath];
}
function wrapConsoleOutput(process) {
if (process.stdout) {
process.stdout.on("data", (data) => {
console.log(data.toString());
});
}
if (process.stderr) {
process.stderr.on("data", (data) => {
console.error(data.toString());
});
}
}
| {
return result;
} | conditional_block |
create-docker-context-for-node-component.js | #!/usr/bin/env node
const childProcess = require("child_process");
const fse = require("fs-extra");
const path = require("path");
const process = require("process");
const yargs = require("yargs");
const _ = require("lodash");
const isSubDir = require("is-subdir");
const {
getVersions,
getTags,
getName,
getRepository
} = require("./docker-util");
// --- cache dependencies data from package.json
const packageDependencyDataCache = {};
const argv = yargs
.options({
build: {
description: "Pipe the Docker context straight to Docker.",
type: "boolean",
default: false
},
tag: {
description:
'The tag to pass to "docker build". This parameter is only used if --build is specified. If the value of this parameter is `auto`, a tag name is automatically created from NPM configuration.',
type: "string",
default: "auto"
},
repository: {
description:
"The repository to use in auto tag generation. Will default to '', i.e. dockerhub unless --local is set. Requires --tag=auto",
type: "string",
default: process.env.MAGDA_DOCKER_REPOSITORY
},
name: {
description:
"The package name to use in auto tag generation. Will default to ''. Used to override the docker nanme config in package.json during the auto tagging. Requires --tag=auto",
type: "string",
default: process.env.MAGDA_DOCKER_NAME
},
version: {
description:
"The version(s) to use in auto tag generation. Will default to the current version in package.json. Requires --tag=auto",
type: "string",
array: true,
default: process.env.MAGDA_DOCKER_VERSION
},
output: {
description:
"The output path and filename for the Docker context .tar file.",
type: "string"
},
local: {
description:
"Build for a local Kubernetes container registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
push: {
description:
"Push the build image to the docker registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
platform: {
description:
"A list of platform that the docker image build should target. Specify this value will enable multi-arch image build.",
type: "string"
},
noCache: {
description: "Disable the cache during the docker image build.",
type: "boolean",
default: false
},
cacheFromVersion: {
description:
"Version to cache from when building, using the --cache-from field in docker. Will use the same repository and name. Using this options causes the image to be pulled before build.",
type: "string"
}
})
// Because 'version is a default yargs thing we need to specifically override its normal parsing.
.version(false)
.array("version")
.help().argv;
if (!argv.build && !argv.output) {
console.log("Either --build or --output <filename> must be specified.");
process.exit(1);
}
if (argv.platform && !argv.push) {
console.log(
"When --platform is specified, --push must be specified as well as multi-arch image can only be pushed to remote registry."
);
process.exit(1);
}
if (argv.noCache && argv.cacheFromVersion) {
console.log("When --noCache=true, --cacheFromVersion can't be specified.");
process.exit(1);
}
const componentSrcDir = path.resolve(process.cwd());
const dockerContextDir = fse.mkdtempSync(
path.resolve(__dirname, "..", "docker-context-")
);
const componentDestDir = path.resolve(dockerContextDir, "component");
fse.emptyDirSync(dockerContextDir);
fse.ensureDirSync(componentDestDir);
preparePackage(componentSrcDir, componentDestDir);
const tar = process.platform === "darwin" ? "gtar" : "tar";
// Docker and ConEmu (an otherwise excellent console for Windows) don't get along.
// See: https://github.com/Maximus5/ConEmu/issues/958 and https://github.com/moby/moby/issues/28814
// So if we're running under ConEmu, we need to add an extra -cur_console:i parameter to disable
// ConEmu's hooks and also set ConEmuANSI to OFF so Docker doesn't do anything drastic.
const env = Object.assign({}, process.env);
const extraParameters = [];
if (env.ConEmuANSI === "ON") {
env.ConEmuANSI = "OFF";
extraParameters.push("-cur_console:i");
}
updateDockerFile(componentSrcDir, componentDestDir);
if (argv.build) {
const cacheFromImage =
argv.cacheFromVersion &&
getRepository(argv.local, argv.repository) +
getName(argv.name) +
":" +
argv.cacheFromVersion;
if (cacheFromImage) {
// Pull this image into the docker daemon - if it fails we don't care, we'll just go from scratch.
const dockerPullProcess = childProcess.spawnSync(
"docker",
[...extraParameters, "pull", cacheFromImage],
{
stdio: "inherit",
env: env
}
);
wrapConsoleOutput(dockerPullProcess);
}
const tarProcess = childProcess.spawn(
tar,
[...extraParameters, "--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", "pipe", "inherit"],
env: env,
shell: true
}
);
const tags = getTags(
argv.tag,
argv.local,
argv.repository,
argv.version,
argv.name
);
const tagArgs = tags
.map((tag) => ["-t", tag])
.reduce((soFar, tagArgs) => soFar.concat(tagArgs), []);
const cacheFromArgs = cacheFromImage
? ["--cache-from", cacheFromImage]
: [];
const dockerProcess = childProcess.spawn(
"docker",
[
...extraParameters,
...(argv.platform ? ["buildx"] : []),
"build",
...tagArgs,
...cacheFromArgs,
...(argv.noCache ? ["--no-cache"] : []),
...(argv.platform ? ["--platform", argv.platform, "--push"] : []),
"-f",
`./component/Dockerfile`,
"-"
],
{
stdio: ["pipe", "inherit", "inherit"],
env: env
}
);
wrapConsoleOutput(dockerProcess);
dockerProcess.on("close", (code) => {
fse.removeSync(dockerContextDir);
if (code === 0 && argv.push && !argv.platform) {
if (tags.length === 0) {
console.error("Can not push an image without a tag.");
process.exit(1);
}
// Stop if there's a code !== 0
tags.every((tag) => {
const process = childProcess.spawnSync(
"docker",
["push", tag],
{
stdio: "inherit"
}
);
code = process.status;
return code === 0;
});
}
process.exit(code);
});
tarProcess.on("close", (code) => {
dockerProcess.stdin.end();
});
tarProcess.stdout.on("data", (data) => {
dockerProcess.stdin.write(data);
});
} else if (argv.output) {
const outputPath = path.resolve(process.cwd(), argv.output);
const outputTar = fse.openSync(outputPath, "w", 0o644);
const tarProcess = childProcess.spawn(
tar,
["--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", outputTar, "inherit"],
env: env,
shell: true
}
);
tarProcess.on("close", (code) => {
fse.closeSync(outputTar);
console.log(tarProcess.status);
fse.removeSync(dockerContextDir);
});
}
function updateDockerFile(sourceDir, destDir) {
const tags = getVersions(argv.local, argv.version);
const repository = getRepository(argv.local, argv.repository);
const dockerFileContents = fse.readFileSync(
path.resolve(sourceDir, "Dockerfile"),
"utf-8"
);
const replacedDockerFileContents = dockerFileContents
// Add a repository if this is a magda image
.replace(
/FROM .*(magda-[^:\s]+)(:[^\s]+)/,
"FROM " + repository + "$1" + (tags[0] ? ":" + tags[0] : "$2")
);
fse.writeFileSync(
path.resolve(destDir, "Dockerfile"),
replacedDockerFileContents,
"utf-8"
);
}
function preparePackage(packageDir, destDir) {
const packageJson = require(path.join(packageDir, "package.json"));
const dockerIncludesFromPackageJson =
packageJson.config &&
packageJson.config.docker &&
packageJson.config.docker.include;
let dockerIncludes;
if (!dockerIncludesFromPackageJson) {
console.log(
`WARNING: Package ${packageDir} does not have a config.docker.include key in package.json, so all of its files will be included in the docker image.`
);
dockerIncludes = fse.readdirSync(packageDir);
} else if (dockerIncludesFromPackageJson.trim() === "*") {
dockerIncludes = fse.readdirSync(packageDir);
} else {
if (dockerIncludesFromPackageJson.indexOf("*") >= 0) {
throw new Error(
"Sorry, wildcards are not currently supported in config.docker.include."
);
}
dockerIncludes = dockerIncludesFromPackageJson
.split(" ")
.filter((include) => include.length > 0);
}
dockerIncludes
.filter((include) => include !== "Dockerfile") // Filter out the dockerfile because we'll manually copy over a modified version.
.forEach(function (include) {
const src = path.resolve(packageDir, include);
const dest = path.resolve(destDir, include);
if (include === "node_modules") {
fse.ensureDirSync(dest);
const env = Object.create(process.env);
env.NODE_ENV = "production";
const productionPackages = _.uniqBy(
getPackageList(packageDir, path.resolve(packageDir, "..")),
(package) => package.path
);
prepareNodeModules(src, dest, productionPackages);
return;
}
try {
// On Windows we can't create symlinks to files without special permissions.
// So just copy the file instead. Usually creating directory junctions is
// fine without special permissions, but fall back on copying in the unlikely
// event that fails, too.
const type = fse.statSync(src).isFile() ? "file" : "junction";
fse.ensureSymlinkSync(src, dest, type);
} catch (e) {
fse.copySync(src, dest);
}
});
}
function prepareNodeModules(packageDir, destDir, productionPackages) {
productionPackages.forEach((src) => {
const relativePath = path.relative(packageDir, src.path);
const dest = path.resolve(destDir, relativePath);
const srcPath = path.resolve(packageDir, relativePath);
// console.log("src " + srcPath + " to " + dest);
try {
const stat = fse.lstatSync(srcPath);
const type = stat.isFile() ? "file" : "junction";
fse.ensureSymlinkSync(srcPath, dest, type);
} catch (e) {
fse.copySync(srcPath, dest);
}
});
}
function getPackageList(packagePath, packageSearchRoot, resolvedSoFar = {}) {
const dependencies = getPackageDependencies(packagePath);
const result = [];
if (!dependencies || !dependencies.length) {
return result;
}
dependencies.forEach(function (dependencyName) {
const dependencyNamePath = dependencyName.replace(/\//g, path.sep);
let currentBaseDir = packagePath;
let dependencyDir;
do {
dependencyDir = path.resolve(
currentBaseDir,
"node_modules",
dependencyNamePath
);
if (
currentBaseDir === packageSearchRoot ||
isSubDir(currentBaseDir, packageSearchRoot)
) {
// --- will not look for packages outside project root directory
break;
}
// Does this directory exist? If not, imitate node's module resolution by walking
// up the directory tree.
currentBaseDir = path.resolve(currentBaseDir, "..");
} while (!fse.existsSync(dependencyDir));
if (!fse.existsSync(dependencyDir)) {
throw new Error(
"Could not find path for " +
dependencyName +
" @ " +
packagePath
);
}
// If we haven't already seen this
if (!resolvedSoFar[dependencyDir]) {
result.push({ name: dependencyName, path: dependencyDir });
// Now that we've added this package to the list to resolve, add all its children.
const childPackageResult = getPackageList(
dependencyDir,
packageSearchRoot,
{ ...resolvedSoFar, [dependencyDir]: true }
);
Array.prototype.push.apply(result, childPackageResult);
}
});
return result;
}
function getPackageDependencies(packagePath) {
const packageJsonPath = path.resolve(packagePath, "package.json");
if (packageDependencyDataCache[packageJsonPath]) {
return packageDependencyDataCache[packageJsonPath];
}
const pkgData = fse.readJSONSync(packageJsonPath);
const depData = pkgData["dependencies"];
if (!depData) {
packageDependencyDataCache[packageJsonPath] = [];
} else {
packageDependencyDataCache[packageJsonPath] = Object.keys(depData);
}
return packageDependencyDataCache[packageJsonPath];
}
function wrapConsoleOutput(process) | {
if (process.stdout) {
process.stdout.on("data", (data) => {
console.log(data.toString());
});
}
if (process.stderr) {
process.stderr.on("data", (data) => {
console.error(data.toString());
});
}
} | identifier_body | |
create-docker-context-for-node-component.js | #!/usr/bin/env node
const childProcess = require("child_process");
const fse = require("fs-extra");
const path = require("path");
const process = require("process");
const yargs = require("yargs");
const _ = require("lodash");
const isSubDir = require("is-subdir");
const {
getVersions,
getTags,
getName,
getRepository
} = require("./docker-util");
// --- cache dependencies data from package.json
const packageDependencyDataCache = {};
const argv = yargs
.options({
build: {
description: "Pipe the Docker context straight to Docker.",
type: "boolean",
default: false
},
tag: {
description:
'The tag to pass to "docker build". This parameter is only used if --build is specified. If the value of this parameter is `auto`, a tag name is automatically created from NPM configuration.',
type: "string",
default: "auto"
},
repository: {
description:
"The repository to use in auto tag generation. Will default to '', i.e. dockerhub unless --local is set. Requires --tag=auto",
type: "string",
default: process.env.MAGDA_DOCKER_REPOSITORY
},
name: {
description:
"The package name to use in auto tag generation. Will default to ''. Used to override the docker nanme config in package.json during the auto tagging. Requires --tag=auto",
type: "string",
default: process.env.MAGDA_DOCKER_NAME
},
version: {
description:
"The version(s) to use in auto tag generation. Will default to the current version in package.json. Requires --tag=auto",
type: "string",
array: true,
default: process.env.MAGDA_DOCKER_VERSION
},
output: {
description:
"The output path and filename for the Docker context .tar file.",
type: "string"
},
local: {
description:
"Build for a local Kubernetes container registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
push: {
description:
"Push the build image to the docker registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
platform: {
description:
"A list of platform that the docker image build should target. Specify this value will enable multi-arch image build.",
type: "string"
},
noCache: {
description: "Disable the cache during the docker image build.",
type: "boolean",
default: false
},
cacheFromVersion: {
description:
"Version to cache from when building, using the --cache-from field in docker. Will use the same repository and name. Using this options causes the image to be pulled before build.",
type: "string"
}
})
// Because 'version is a default yargs thing we need to specifically override its normal parsing.
.version(false)
.array("version")
.help().argv;
if (!argv.build && !argv.output) {
console.log("Either --build or --output <filename> must be specified.");
process.exit(1);
}
if (argv.platform && !argv.push) {
console.log(
"When --platform is specified, --push must be specified as well as multi-arch image can only be pushed to remote registry."
);
process.exit(1);
}
if (argv.noCache && argv.cacheFromVersion) {
console.log("When --noCache=true, --cacheFromVersion can't be specified.");
process.exit(1);
}
const componentSrcDir = path.resolve(process.cwd());
const dockerContextDir = fse.mkdtempSync(
path.resolve(__dirname, "..", "docker-context-")
);
const componentDestDir = path.resolve(dockerContextDir, "component");
fse.emptyDirSync(dockerContextDir);
fse.ensureDirSync(componentDestDir);
preparePackage(componentSrcDir, componentDestDir);
const tar = process.platform === "darwin" ? "gtar" : "tar";
// Docker and ConEmu (an otherwise excellent console for Windows) don't get along.
// See: https://github.com/Maximus5/ConEmu/issues/958 and https://github.com/moby/moby/issues/28814
// So if we're running under ConEmu, we need to add an extra -cur_console:i parameter to disable
// ConEmu's hooks and also set ConEmuANSI to OFF so Docker doesn't do anything drastic.
const env = Object.assign({}, process.env);
const extraParameters = [];
if (env.ConEmuANSI === "ON") {
env.ConEmuANSI = "OFF";
extraParameters.push("-cur_console:i");
}
updateDockerFile(componentSrcDir, componentDestDir);
if (argv.build) {
const cacheFromImage =
argv.cacheFromVersion &&
getRepository(argv.local, argv.repository) +
getName(argv.name) +
":" +
argv.cacheFromVersion;
if (cacheFromImage) {
// Pull this image into the docker daemon - if it fails we don't care, we'll just go from scratch.
const dockerPullProcess = childProcess.spawnSync(
"docker",
[...extraParameters, "pull", cacheFromImage],
{
stdio: "inherit",
env: env
}
);
wrapConsoleOutput(dockerPullProcess);
}
const tarProcess = childProcess.spawn(
tar,
[...extraParameters, "--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", "pipe", "inherit"],
env: env,
shell: true
}
);
const tags = getTags(
argv.tag,
argv.local,
argv.repository,
argv.version,
argv.name
);
const tagArgs = tags
.map((tag) => ["-t", tag])
.reduce((soFar, tagArgs) => soFar.concat(tagArgs), []);
const cacheFromArgs = cacheFromImage
? ["--cache-from", cacheFromImage]
: [];
const dockerProcess = childProcess.spawn(
"docker",
[
...extraParameters,
...(argv.platform ? ["buildx"] : []),
"build",
...tagArgs,
...cacheFromArgs,
...(argv.noCache ? ["--no-cache"] : []),
...(argv.platform ? ["--platform", argv.platform, "--push"] : []),
"-f",
`./component/Dockerfile`,
"-"
],
{
stdio: ["pipe", "inherit", "inherit"],
env: env
}
);
wrapConsoleOutput(dockerProcess);
dockerProcess.on("close", (code) => {
fse.removeSync(dockerContextDir);
if (code === 0 && argv.push && !argv.platform) {
if (tags.length === 0) {
console.error("Can not push an image without a tag.");
process.exit(1);
}
// Stop if there's a code !== 0
tags.every((tag) => {
const process = childProcess.spawnSync(
"docker",
["push", tag],
{
stdio: "inherit"
}
);
code = process.status;
return code === 0;
});
}
process.exit(code);
});
tarProcess.on("close", (code) => {
dockerProcess.stdin.end();
});
tarProcess.stdout.on("data", (data) => {
dockerProcess.stdin.write(data);
});
} else if (argv.output) {
const outputPath = path.resolve(process.cwd(), argv.output);
const outputTar = fse.openSync(outputPath, "w", 0o644);
const tarProcess = childProcess.spawn(
tar,
["--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", outputTar, "inherit"],
env: env,
shell: true
}
);
tarProcess.on("close", (code) => {
fse.closeSync(outputTar);
console.log(tarProcess.status);
fse.removeSync(dockerContextDir);
});
}
function updateDockerFile(sourceDir, destDir) {
const tags = getVersions(argv.local, argv.version);
const repository = getRepository(argv.local, argv.repository);
const dockerFileContents = fse.readFileSync(
path.resolve(sourceDir, "Dockerfile"),
"utf-8"
);
const replacedDockerFileContents = dockerFileContents
// Add a repository if this is a magda image
.replace(
/FROM .*(magda-[^:\s]+)(:[^\s]+)/,
"FROM " + repository + "$1" + (tags[0] ? ":" + tags[0] : "$2")
);
fse.writeFileSync(
path.resolve(destDir, "Dockerfile"),
replacedDockerFileContents,
"utf-8"
);
}
function preparePackage(packageDir, destDir) {
const packageJson = require(path.join(packageDir, "package.json"));
const dockerIncludesFromPackageJson =
packageJson.config &&
packageJson.config.docker &&
packageJson.config.docker.include;
let dockerIncludes;
if (!dockerIncludesFromPackageJson) {
console.log(
`WARNING: Package ${packageDir} does not have a config.docker.include key in package.json, so all of its files will be included in the docker image.`
);
dockerIncludes = fse.readdirSync(packageDir);
} else if (dockerIncludesFromPackageJson.trim() === "*") {
dockerIncludes = fse.readdirSync(packageDir);
} else {
if (dockerIncludesFromPackageJson.indexOf("*") >= 0) {
throw new Error(
"Sorry, wildcards are not currently supported in config.docker.include."
);
}
dockerIncludes = dockerIncludesFromPackageJson
.split(" ")
.filter((include) => include.length > 0);
}
dockerIncludes
.filter((include) => include !== "Dockerfile") // Filter out the dockerfile because we'll manually copy over a modified version.
.forEach(function (include) {
const src = path.resolve(packageDir, include);
const dest = path.resolve(destDir, include);
if (include === "node_modules") {
fse.ensureDirSync(dest);
const env = Object.create(process.env);
env.NODE_ENV = "production";
const productionPackages = _.uniqBy( |
return;
}
try {
// On Windows we can't create symlinks to files without special permissions.
// So just copy the file instead. Usually creating directory junctions is
// fine without special permissions, but fall back on copying in the unlikely
// event that fails, too.
const type = fse.statSync(src).isFile() ? "file" : "junction";
fse.ensureSymlinkSync(src, dest, type);
} catch (e) {
fse.copySync(src, dest);
}
});
}
function prepareNodeModules(packageDir, destDir, productionPackages) {
productionPackages.forEach((src) => {
const relativePath = path.relative(packageDir, src.path);
const dest = path.resolve(destDir, relativePath);
const srcPath = path.resolve(packageDir, relativePath);
// console.log("src " + srcPath + " to " + dest);
try {
const stat = fse.lstatSync(srcPath);
const type = stat.isFile() ? "file" : "junction";
fse.ensureSymlinkSync(srcPath, dest, type);
} catch (e) {
fse.copySync(srcPath, dest);
}
});
}
function getPackageList(packagePath, packageSearchRoot, resolvedSoFar = {}) {
const dependencies = getPackageDependencies(packagePath);
const result = [];
if (!dependencies || !dependencies.length) {
return result;
}
dependencies.forEach(function (dependencyName) {
const dependencyNamePath = dependencyName.replace(/\//g, path.sep);
let currentBaseDir = packagePath;
let dependencyDir;
do {
dependencyDir = path.resolve(
currentBaseDir,
"node_modules",
dependencyNamePath
);
if (
currentBaseDir === packageSearchRoot ||
isSubDir(currentBaseDir, packageSearchRoot)
) {
// --- will not look for packages outside project root directory
break;
}
// Does this directory exist? If not, imitate node's module resolution by walking
// up the directory tree.
currentBaseDir = path.resolve(currentBaseDir, "..");
} while (!fse.existsSync(dependencyDir));
if (!fse.existsSync(dependencyDir)) {
throw new Error(
"Could not find path for " +
dependencyName +
" @ " +
packagePath
);
}
// If we haven't already seen this
if (!resolvedSoFar[dependencyDir]) {
result.push({ name: dependencyName, path: dependencyDir });
// Now that we've added this package to the list to resolve, add all its children.
const childPackageResult = getPackageList(
dependencyDir,
packageSearchRoot,
{ ...resolvedSoFar, [dependencyDir]: true }
);
Array.prototype.push.apply(result, childPackageResult);
}
});
return result;
}
function getPackageDependencies(packagePath) {
const packageJsonPath = path.resolve(packagePath, "package.json");
if (packageDependencyDataCache[packageJsonPath]) {
return packageDependencyDataCache[packageJsonPath];
}
const pkgData = fse.readJSONSync(packageJsonPath);
const depData = pkgData["dependencies"];
if (!depData) {
packageDependencyDataCache[packageJsonPath] = [];
} else {
packageDependencyDataCache[packageJsonPath] = Object.keys(depData);
}
return packageDependencyDataCache[packageJsonPath];
}
function wrapConsoleOutput(process) {
if (process.stdout) {
process.stdout.on("data", (data) => {
console.log(data.toString());
});
}
if (process.stderr) {
process.stderr.on("data", (data) => {
console.error(data.toString());
});
}
} | getPackageList(packageDir, path.resolve(packageDir, "..")),
(package) => package.path
);
prepareNodeModules(src, dest, productionPackages); | random_line_split |
main.py | import socket
import re
import math
import time
from datetime import datetime, timedelta
import random
import numpy
import RPi.GPIO as GPIO
import cv2
import numpy as np
import base64
from Class_Def import *
from module import *
#from open_cv import *
HOST = '192.168.0.118'
PORT = 9000
font = cv2.FONT_HERSHEY_COMPLEX
Unit_horizon = []
Unit_vertical = []
Unit_hpass = []
Unit_vpass = []
p1=[]
p2=[]
p3=[]
p4=[]
lot = 0
hStandard = 0.0
vStandard = 0.0
count = 0
Hadjust = 0
Vadjust = 0
### open_cv 스크립트 변수들
p_list = []
dot1=[]
dot2=[]
dot3=[]
dot4=[]
imgstring = ''
temperature = 0
### open_cv 스크립트 변수들 2020-11-03 옮김
hunpassCount = 0
vunpassCount = 0
a=0
TotalunpassCount = 0
time1 = 0
time2 = 0
dateGap = 0
def opencapture(count):
global img
global imgstring
cap = cv2.VideoCapture(-1)
while True:
ret, img = cap.read() # Read 결과와 frame
img = img[170:450, 40:600]
cv2.imshow("Battery_live", img)
cv2.waitKey(1)
if not ret:
break
print("Image " + str(count) + "saved")
file = '/home/pi/project/' + str(count) + '.jpg'
#file = '/home/pi/project/' + 'CaptureTest'+'.jpg'
cv2.imwrite(file, img)
img2 = cv2.imread(file, cv2.IMREAD_COLOR)
img3 = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
_, threshold = cv2.threshold(img3, 150, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
p_list.clear()
for cnt in contours:
epsilon = 0.02 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 1)
n = approx.ravel()
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
string = str(x) + ", " + str(y)
cv2.putText(img2, string, (x, y), font, 0.5, (0, 0, 255))
string_list = string.split(",")
string_list = list(map(int, string_list))
p_list.append(string_list)
i = i + 1
#time.sleep(0.5)
if len(p_list)==4:
cv2.imshow('Battery_(x, y)', img2)
cv2.imwrite(file, img2)
res = cv2.resize(img2,(762,461))
cv2.imwrite(file, res)
image = open(file,'rb')
image_read = image.read()
image_64_encode = base64.encodebytes(image_read)
imgstring = b'<sof>' + image_64_encode + b'<eof>'
return p_list
else:
p_list.clear()
continue
break
def calculate(count): #start를 calculate로 | global imgstring
global hStandard
global vStandard
global TotalunpassCount
global dateGap
Result_horizon, Result_vertical = Dot_Distance(p1[count][0], p1[count][1], p2[count][0], p2[count][1], p3[count][0], p3[count][1], p4[count][0], p4[count][1])
Unit_horizon.append(Result_horizon)
Unit_vertical.append(Result_vertical)
Result_hpass = Unit_Defect(hStandard, Result_horizon)
Result_vpass = Unit_Defect(vStandard, Result_vertical)
temperature = read_temp()
#temperature = '26.5'
if Result_hpass == 1 and Result_vpass == 1:
led_on(led2) #green
setServoPos1(10)
time.sleep(1)
setServoPos2(165)
time.sleep(1)
elif Result_hpass == 1 and Result_vpass == 0: #세로만 불량
led_on(led3) #red
setServoPos1(10)
time.sleep(1)
setServoPos2(125)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 1: #가로만 불량
led_on(led3) #red
setServoPos1(45)
time.sleep(1)
setServoPos2(165)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 0: #가로, 세로 둘다 불량
led_on(led3) #red
setServoPos1(155)
time.sleep(1)
setServoPos2(80)
TotalunpassCount+=1
time.sleep(1)
time.sleep(0.3)
#Result_pass
Unit_hpass.append(Result_hpass)
Unit_vpass.append(Result_vpass)
print('TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO')
send_dp = 'TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO'
s.send(send_dp.encode('UTF-8'))
time.sleep(0.5)
s.send(imgstring)
time.sleep(0.3)
if count == (lot-1):
#AQL_pass
AQL_hpass = AQL_Chart(Unit_hpass, Sample_Letter(lot))
AQL_vpass = AQL_Chart(Unit_vpass, Sample_Letter(lot))
#Deviation
HDeviation = Sigma(Unit_horizon)
VDeviation = Sigma(Unit_vertical)
#Mean
HMean = Avg(Unit_horizon)
VMean = Avg(Unit_vertical)
#Cp
HCp = PCA(HDeviation, hStandard)
VCp = PCA(VDeviation, vStandard)
Hadjust = adjust(Unit_horizon, hStandard, lot)
Vadjust = adjust(Unit_vertical, vStandard, lot)
hunpassCount, hDefectrate = CountRate(Unit_hpass, lot)
vunpassCount, vDefectrate = CountRate(Unit_vpass, lot)
TotalDefectrate = round(TotalunpassCount/lot * 100, 1)
time2 = time.time()
Gap = time2-time1
dateGap = convert_seconds_to_kor_time(Gap)
print('TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph' + str(HCp)+","+'Cpv' + str(VCp)+ "," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate' + str(hDefectrate)+","+'vDefectrate' + str(vDefectrate)+","+'Hadjust' + str(Hadjust)+","+'Vadjust' + str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate' + str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap' + str(dateGap)+","+'lot' + str(lot)+","+'Model'+str(Model))
send_Final = 'TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph'+str(HCp)+","+'Cpv'+str(VCp)+"," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate'+str(hDefectrate)+","+'vDefectrate'+str(vDefectrate)+","+'Hadjust'+str(Hadjust)+","+'Vadjust'+str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate'+str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap'+str(dateGap)+","+'lot'+str(lot)+","+'Model'+str(Model)
time.sleep(3)
s.send(send_Final.encode('UTF-8'))
return
if __name__ == "__main__":
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect( ( HOST, PORT ) )
print('Hi! I am client')
try:
while True:
if a == 0:
recv_msg = s.recv(1024).decode('UTF-8')
if recv_msg.startswith('TOR'):
recv_msg = recv_msg.replace('TORDatesStand,','').split(',')
print(recv_msg)
Model = str(recv_msg[0])
print(Model)
lot = int(recv_msg[1])
print(lot)
hStandard = float(recv_msg[2])
print(hStandard)
vStandard = float(recv_msg[3])
print(vStandard)
Date = int(recv_msg[4])
print(Date)
a=1
recv_msg = ''
elif a == 1:
time1 = time.time()
setServoPos1(90)
time.sleep(1)
setServoPos2(80)
time.sleep(1)
while True:
main_conveyor_On(con1_port)
main_conveyor_On(con2_port)
time.sleep(2)
while True:
d1 = Sonar(sig1)
if d1 <= 4.5:
main_conveyor_Off(con1_port)
break
else:
continue
led_on(led1)
led_off(led2)
led_off(led3)
dot1, dot2, dot3, dot4 = opencapture(count)
a, b, c, d = dot1234(dot1, dot2, dot3, dot4)
temp = [dot1, dot2, dot3, dot4]
for i in range(4):
if i == a:
p1.append(temp[i])
elif i == b:
p2.append(temp[i])
elif i == c:
p3.append(temp[i])
elif i == d:
p4.append(temp[i])
calculate(count)
led_off(led1)
count+=1
if count == lot:
main_conveyor_On(con1_port)
time.sleep(11)
led_off(led2)
led_off(led3)
main_conveyor_Off(con1_port)
main_conveyor_Off(con2_port)
setServoPos1(90)
time.sleep(1)
setServoPos2(80)
time.sleep(1)
Unit_horizon = []
Unit_vertical = []
Unit_hpass = []
Unit_vpass = []
p1=[]
p2=[]
p3=[]
p4=[]
count=0
a=0
TotalunpassCount = 0
TotalDefectrate = 0
print(recv_msg)
break
except (ValueError, KeyboardInterrupt):
print( '[ Server Message ( Exception ) : non numeric' )
print( '[ Server Message ( Exception ) : close client connection' )
GPIO.cleanup()
s.close()
finally:
GPIO.cleanup()
s.close()
| 바꿈
| identifier_name |
main.py | import socket
import re
import math
import time
from datetime import datetime, timedelta
import random
import numpy
import RPi.GPIO as GPIO
import cv2
import numpy as np
import base64
from Class_Def import *
from module import *
#from open_cv import *
HOST = '192.168.0.118'
PORT = 9000
font = cv2.FONT_HERSHEY_COMPLEX
Unit_horizon = []
Unit_vertical = []
Unit_hpass = []
Unit_vpass = []
p1=[]
p2=[]
p3=[]
p4=[]
lot = 0
| count = 0
Hadjust = 0
Vadjust = 0
### open_cv 스크립트 변수들
p_list = []
dot1=[]
dot2=[]
dot3=[]
dot4=[]
imgstring = ''
temperature = 0
### open_cv 스크립트 변수들 2020-11-03 옮김
hunpassCount = 0
vunpassCount = 0
a=0
TotalunpassCount = 0
time1 = 0
time2 = 0
dateGap = 0
def opencapture(count):
global img
global imgstring
cap = cv2.VideoCapture(-1)
while True:
ret, img = cap.read() # Read 결과와 frame
img = img[170:450, 40:600]
cv2.imshow("Battery_live", img)
cv2.waitKey(1)
if not ret:
break
print("Image " + str(count) + "saved")
file = '/home/pi/project/' + str(count) + '.jpg'
#file = '/home/pi/project/' + 'CaptureTest'+'.jpg'
cv2.imwrite(file, img)
img2 = cv2.imread(file, cv2.IMREAD_COLOR)
img3 = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
_, threshold = cv2.threshold(img3, 150, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
p_list.clear()
for cnt in contours:
epsilon = 0.02 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 1)
n = approx.ravel()
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
string = str(x) + ", " + str(y)
cv2.putText(img2, string, (x, y), font, 0.5, (0, 0, 255))
string_list = string.split(",")
string_list = list(map(int, string_list))
p_list.append(string_list)
i = i + 1
#time.sleep(0.5)
if len(p_list)==4:
cv2.imshow('Battery_(x, y)', img2)
cv2.imwrite(file, img2)
res = cv2.resize(img2,(762,461))
cv2.imwrite(file, res)
image = open(file,'rb')
image_read = image.read()
image_64_encode = base64.encodebytes(image_read)
imgstring = b'<sof>' + image_64_encode + b'<eof>'
return p_list
else:
p_list.clear()
continue
break
def calculate(count): #start를 calculate로 바꿈
global imgstring
global hStandard
global vStandard
global TotalunpassCount
global dateGap
Result_horizon, Result_vertical = Dot_Distance(p1[count][0], p1[count][1], p2[count][0], p2[count][1], p3[count][0], p3[count][1], p4[count][0], p4[count][1])
Unit_horizon.append(Result_horizon)
Unit_vertical.append(Result_vertical)
Result_hpass = Unit_Defect(hStandard, Result_horizon)
Result_vpass = Unit_Defect(vStandard, Result_vertical)
temperature = read_temp()
#temperature = '26.5'
if Result_hpass == 1 and Result_vpass == 1:
led_on(led2) #green
setServoPos1(10)
time.sleep(1)
setServoPos2(165)
time.sleep(1)
elif Result_hpass == 1 and Result_vpass == 0: #세로만 불량
led_on(led3) #red
setServoPos1(10)
time.sleep(1)
setServoPos2(125)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 1: #가로만 불량
led_on(led3) #red
setServoPos1(45)
time.sleep(1)
setServoPos2(165)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 0: #가로, 세로 둘다 불량
led_on(led3) #red
setServoPos1(155)
time.sleep(1)
setServoPos2(80)
TotalunpassCount+=1
time.sleep(1)
time.sleep(0.3)
#Result_pass
Unit_hpass.append(Result_hpass)
Unit_vpass.append(Result_vpass)
print('TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO')
send_dp = 'TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO'
s.send(send_dp.encode('UTF-8'))
time.sleep(0.5)
s.send(imgstring)
time.sleep(0.3)
if count == (lot-1):
#AQL_pass
AQL_hpass = AQL_Chart(Unit_hpass, Sample_Letter(lot))
AQL_vpass = AQL_Chart(Unit_vpass, Sample_Letter(lot))
#Deviation
HDeviation = Sigma(Unit_horizon)
VDeviation = Sigma(Unit_vertical)
#Mean
HMean = Avg(Unit_horizon)
VMean = Avg(Unit_vertical)
#Cp
HCp = PCA(HDeviation, hStandard)
VCp = PCA(VDeviation, vStandard)
Hadjust = adjust(Unit_horizon, hStandard, lot)
Vadjust = adjust(Unit_vertical, vStandard, lot)
hunpassCount, hDefectrate = CountRate(Unit_hpass, lot)
vunpassCount, vDefectrate = CountRate(Unit_vpass, lot)
TotalDefectrate = round(TotalunpassCount/lot * 100, 1)
time2 = time.time()
Gap = time2-time1
dateGap = convert_seconds_to_kor_time(Gap)
print('TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph' + str(HCp)+","+'Cpv' + str(VCp)+ "," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate' + str(hDefectrate)+","+'vDefectrate' + str(vDefectrate)+","+'Hadjust' + str(Hadjust)+","+'Vadjust' + str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate' + str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap' + str(dateGap)+","+'lot' + str(lot)+","+'Model'+str(Model))
send_Final = 'TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph'+str(HCp)+","+'Cpv'+str(VCp)+"," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate'+str(hDefectrate)+","+'vDefectrate'+str(vDefectrate)+","+'Hadjust'+str(Hadjust)+","+'Vadjust'+str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate'+str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap'+str(dateGap)+","+'lot'+str(lot)+","+'Model'+str(Model)
time.sleep(3)
s.send(send_Final.encode('UTF-8'))
return
if __name__ == "__main__":
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect( ( HOST, PORT ) )
print('Hi! I am client')
try:
while True:
if a == 0:
recv_msg = s.recv(1024).decode('UTF-8')
if recv_msg.startswith('TOR'):
recv_msg = recv_msg.replace('TORDatesStand,','').split(',')
print(recv_msg)
Model = str(recv_msg[0])
print(Model)
lot = int(recv_msg[1])
print(lot)
hStandard = float(recv_msg[2])
print(hStandard)
vStandard = float(recv_msg[3])
print(vStandard)
Date = int(recv_msg[4])
print(Date)
a=1
recv_msg = ''
elif a == 1:
time1 = time.time()
setServoPos1(90)
time.sleep(1)
setServoPos2(80)
time.sleep(1)
while True:
main_conveyor_On(con1_port)
main_conveyor_On(con2_port)
time.sleep(2)
while True:
d1 = Sonar(sig1)
if d1 <= 4.5:
main_conveyor_Off(con1_port)
break
else:
continue
led_on(led1)
led_off(led2)
led_off(led3)
dot1, dot2, dot3, dot4 = opencapture(count)
a, b, c, d = dot1234(dot1, dot2, dot3, dot4)
temp = [dot1, dot2, dot3, dot4]
for i in range(4):
if i == a:
p1.append(temp[i])
elif i == b:
p2.append(temp[i])
elif i == c:
p3.append(temp[i])
elif i == d:
p4.append(temp[i])
calculate(count)
led_off(led1)
count+=1
if count == lot:
main_conveyor_On(con1_port)
time.sleep(11)
led_off(led2)
led_off(led3)
main_conveyor_Off(con1_port)
main_conveyor_Off(con2_port)
setServoPos1(90)
time.sleep(1)
setServoPos2(80)
time.sleep(1)
Unit_horizon = []
Unit_vertical = []
Unit_hpass = []
Unit_vpass = []
p1=[]
p2=[]
p3=[]
p4=[]
count=0
a=0
TotalunpassCount = 0
TotalDefectrate = 0
print(recv_msg)
break
except (ValueError, KeyboardInterrupt):
print( '[ Server Message ( Exception ) : non numeric' )
print( '[ Server Message ( Exception ) : close client connection' )
GPIO.cleanup()
s.close()
finally:
GPIO.cleanup()
s.close() | hStandard = 0.0
vStandard = 0.0
| random_line_split |
main.py | import socket
import re
import math
import time
from datetime import datetime, timedelta
import random
import numpy
import RPi.GPIO as GPIO
import cv2
import numpy as np
import base64
from Class_Def import *
from module import *
#from open_cv import *
HOST = '192.168.0.118'
PORT = 9000
font = cv2.FONT_HERSHEY_COMPLEX
Unit_horizon = []
Unit_vertical = []
Unit_hpass = []
Unit_vpass = []
p1=[]
p2=[]
p3=[]
p4=[]
lot = 0
hStandard = 0.0
vStandard = 0.0
count = 0
Hadjust = 0
Vadjust = 0
### open_cv 스크립트 변수들
p_list = []
dot1=[]
dot2=[]
dot3=[]
dot4=[]
imgstring = ''
temperature = 0
### open_cv 스크립트 변수들 2020-11-03 옮김
hunpassCount = 0
vunpassCount = 0
a=0
TotalunpassCount = 0
time1 = 0
time2 = 0
dateGap = 0
def opencapture(count):
global img
global imgstring
cap = cv2.VideoCapture(-1)
while True:
ret, img = cap.read() # Read 결과와 frame
img = img[170:450, 40:600]
cv2.imshow("Battery_live", img)
cv2.waitKey(1)
if not ret:
break
print("Image " + str(count) + "saved")
file = '/home/pi/project/' + str(count) + '.jpg'
#file = '/home/pi/project/' + 'CaptureTest'+'.jpg'
cv2.imwrite(file, img)
img2 = cv2.imread(file, cv2.IMREAD_COLOR)
img3 = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
_, threshold = cv2.threshold(img3, 150, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
p_list.clear()
for cnt in contours:
epsilon = 0.02 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 1)
n = approx.ravel()
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
string = str(x) + ", " + str(y)
cv2.putText(img2, string, (x, y), font, 0.5, (0, 0, 255))
string_list = string.split(",")
string_list = list(map(int, string_list))
p_list.append(string_list)
i = i + 1
#time.sleep(0.5)
if len(p_list)==4:
cv2.imshow('Battery_(x, y)', img2)
cv2.imwrite(file, img2)
res = cv2.resize(img2,(762,461))
cv2.imwrite(file, res)
image = open(file,'rb')
image_read = image.read()
image_64_encode = base64.encodebytes(image_read)
imgstring = b'<sof>' + image_64_encode + b'<eof>'
return p_list
else:
p_list.clear()
continue
break
def calculate(count): #start를 calculate로 바꿈
global imgstring
global hStandard
gl | CK_STREAM )
s.connect( ( HOST, PORT ) )
print('Hi! I am client')
try:
while True:
if a == 0:
recv_msg = s.recv(1024).decode('UTF-8')
if recv_msg.startswith('TOR'):
recv_msg = recv_msg.replace('TORDatesStand,','').split(',')
print(recv_msg)
Model = str(recv_msg[0])
print(Model)
lot = int(recv_msg[1])
print(lot)
hStandard = float(recv_msg[2])
print(hStandard)
vStandard = float(recv_msg[3])
print(vStandard)
Date = int(recv_msg[4])
print(Date)
a=1
recv_msg = ''
elif a == 1:
time1 = time.time()
setServoPos1(90)
time.sleep(1)
setServoPos2(80)
time.sleep(1)
while True:
main_conveyor_On(con1_port)
main_conveyor_On(con2_port)
time.sleep(2)
while True:
d1 = Sonar(sig1)
if d1 <= 4.5:
main_conveyor_Off(con1_port)
break
else:
continue
led_on(led1)
led_off(led2)
led_off(led3)
dot1, dot2, dot3, dot4 = opencapture(count)
a, b, c, d = dot1234(dot1, dot2, dot3, dot4)
temp = [dot1, dot2, dot3, dot4]
for i in range(4):
if i == a:
p1.append(temp[i])
elif i == b:
p2.append(temp[i])
elif i == c:
p3.append(temp[i])
elif i == d:
p4.append(temp[i])
calculate(count)
led_off(led1)
count+=1
if count == lot:
main_conveyor_On(con1_port)
time.sleep(11)
led_off(led2)
led_off(led3)
main_conveyor_Off(con1_port)
main_conveyor_Off(con2_port)
setServoPos1(90)
time.sleep(1)
setServoPos2(80)
time.sleep(1)
Unit_horizon = []
Unit_vertical = []
Unit_hpass = []
Unit_vpass = []
p1=[]
p2=[]
p3=[]
p4=[]
count=0
a=0
TotalunpassCount = 0
TotalDefectrate = 0
print(recv_msg)
break
except (ValueError, KeyboardInterrupt):
print( '[ Server Message ( Exception ) : non numeric' )
print( '[ Server Message ( Exception ) : close client connection' )
GPIO.cleanup()
s.close()
finally:
GPIO.cleanup()
s.close()
| obal vStandard
global TotalunpassCount
global dateGap
Result_horizon, Result_vertical = Dot_Distance(p1[count][0], p1[count][1], p2[count][0], p2[count][1], p3[count][0], p3[count][1], p4[count][0], p4[count][1])
Unit_horizon.append(Result_horizon)
Unit_vertical.append(Result_vertical)
Result_hpass = Unit_Defect(hStandard, Result_horizon)
Result_vpass = Unit_Defect(vStandard, Result_vertical)
temperature = read_temp()
#temperature = '26.5'
if Result_hpass == 1 and Result_vpass == 1:
led_on(led2) #green
setServoPos1(10)
time.sleep(1)
setServoPos2(165)
time.sleep(1)
elif Result_hpass == 1 and Result_vpass == 0: #세로만 불량
led_on(led3) #red
setServoPos1(10)
time.sleep(1)
setServoPos2(125)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 1: #가로만 불량
led_on(led3) #red
setServoPos1(45)
time.sleep(1)
setServoPos2(165)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 0: #가로, 세로 둘다 불량
led_on(led3) #red
setServoPos1(155)
time.sleep(1)
setServoPos2(80)
TotalunpassCount+=1
time.sleep(1)
time.sleep(0.3)
#Result_pass
Unit_hpass.append(Result_hpass)
Unit_vpass.append(Result_vpass)
print('TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO')
send_dp = 'TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO'
s.send(send_dp.encode('UTF-8'))
time.sleep(0.5)
s.send(imgstring)
time.sleep(0.3)
if count == (lot-1):
#AQL_pass
AQL_hpass = AQL_Chart(Unit_hpass, Sample_Letter(lot))
AQL_vpass = AQL_Chart(Unit_vpass, Sample_Letter(lot))
#Deviation
HDeviation = Sigma(Unit_horizon)
VDeviation = Sigma(Unit_vertical)
#Mean
HMean = Avg(Unit_horizon)
VMean = Avg(Unit_vertical)
#Cp
HCp = PCA(HDeviation, hStandard)
VCp = PCA(VDeviation, vStandard)
Hadjust = adjust(Unit_horizon, hStandard, lot)
Vadjust = adjust(Unit_vertical, vStandard, lot)
hunpassCount, hDefectrate = CountRate(Unit_hpass, lot)
vunpassCount, vDefectrate = CountRate(Unit_vpass, lot)
TotalDefectrate = round(TotalunpassCount/lot * 100, 1)
time2 = time.time()
Gap = time2-time1
dateGap = convert_seconds_to_kor_time(Gap)
print('TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph' + str(HCp)+","+'Cpv' + str(VCp)+ "," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate' + str(hDefectrate)+","+'vDefectrate' + str(vDefectrate)+","+'Hadjust' + str(Hadjust)+","+'Vadjust' + str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate' + str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap' + str(dateGap)+","+'lot' + str(lot)+","+'Model'+str(Model))
send_Final = 'TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph'+str(HCp)+","+'Cpv'+str(VCp)+"," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate'+str(hDefectrate)+","+'vDefectrate'+str(vDefectrate)+","+'Hadjust'+str(Hadjust)+","+'Vadjust'+str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate'+str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap'+str(dateGap)+","+'lot'+str(lot)+","+'Model'+str(Model)
time.sleep(3)
s.send(send_Final.encode('UTF-8'))
return
if __name__ == "__main__":
s = socket.socket( socket.AF_INET, socket.SO | identifier_body |
main.py | import socket
import re
import math
import time
from datetime import datetime, timedelta
import random
import numpy
import RPi.GPIO as GPIO
import cv2
import numpy as np
import base64
from Class_Def import *
from module import *
#from open_cv import *
HOST = '192.168.0.118'
PORT = 9000
font = cv2.FONT_HERSHEY_COMPLEX
Unit_horizon = []
Unit_vertical = []
Unit_hpass = []
Unit_vpass = []
p1=[]
p2=[]
p3=[]
p4=[]
lot = 0
hStandard = 0.0
vStandard = 0.0
count = 0
Hadjust = 0
Vadjust = 0
### open_cv 스크립트 변수들
p_list = []
dot1=[]
dot2=[]
dot3=[]
dot4=[]
imgstring = ''
temperature = 0
### open_cv 스크립트 변수들 2020-11-03 옮김
hunpassCount = 0
vunpassCount = 0
a=0
TotalunpassCount = 0
time1 = 0
time2 = 0
dateGap = 0
def opencapture(count):
global img
global imgstring
cap = cv2.VideoCapture(-1)
while True:
ret, img = cap.read() # Read 결과와 frame
img = img[170:450, 40:600]
cv2.imshow("Battery_live", img)
cv2.waitKey(1)
if not ret:
break
print("Image " + str(count) + "saved")
file = '/home/pi/project/' + str(count) + '.jpg'
#file = '/home/pi/project/' + 'CaptureTest'+'.jpg'
cv2.imwrite(file, img)
img2 = cv2.imread(file, cv2.IMREAD_COLOR)
img3 = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
_, threshold = cv2.threshold(img3, 150, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
p_list.clear()
for cnt in contours:
epsilon = 0.02 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 1)
n = approx.ravel()
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
string = str(x) + ", " + str(y)
cv2.putText(img2, string, (x, y), font, 0.5, (0, 0, 255))
string_list = string.split(",")
string_list = list(map(int, string_list))
p_list.append(string_list)
i = i + 1
#time.sleep(0.5)
if len(p_list)==4:
cv2.imshow('Battery_(x, y)', img2)
cv2.imwrite(file, img2)
res = cv2.resize(img2,(762,461))
cv2.imwrite(file, res)
image = open(file,'rb')
image_read = image.read()
image_64_encode = base64.encodebytes(image_read)
imgstring = b'<sof>' + image_64_encode + b'<eof>'
return p_list
else:
p_list.clear()
continue
break
def calculate(count): #start를 calculate로 바꿈
global imgstring
global hStandard
global vStandard
global TotalunpassCount
global dateGap
Result_horizon, Result_vertical = Dot_Distance(p1[count][0], p1[count][1], p2[count][0], p2[count][1], p3[count][0], p3[count][1], p4[count][0], p4[count][1])
Unit_horizon.append(Result_horizon)
Unit_vertical.append(Result_vertical)
Result_hpass = Unit_Defect(hStandard, Result_horizon)
Result_vpass = Unit_Defect(vStandard, Result_vertical)
temperature = read_temp()
#temperature = '26.5'
if Result_hpass == 1 and Result_vpass == 1:
led_on(led2) #green
setServoPos1(10)
time.sleep(1)
setServoPos2(165)
time.sleep(1)
elif Result_hpass == 1 and Result_vpass == 0: #세로만 불량
led_on(led3) #red
setServoPos1(10)
time.sleep(1)
setServoPos2(125)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 1: #가로만 불량
led_on(led3) #red
setServoPos1(45)
time.sleep(1)
setServoPos2(165)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 0: #가로, 세로 둘다 불량
led_on(led3) #red
setServoPos1(155)
time.sleep(1)
setServoPos2(80)
TotalunpassCount+=1
time.sleep(1)
time.sleep(0.3)
#Result_pass
Unit_hpass.append(Result_hpass)
Unit_vpass.append(Result_vpass)
print('TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO')
send_dp = 'TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO'
s.send(send_dp.encode('UTF-8'))
time.sleep(0.5)
s.send(imgstring)
time.sleep(0.3)
if count == (lot-1):
#AQL_pass
AQL_hpass = AQL_Chart(Unit_hpass, Sample_Letter(lot))
AQL_vpass = AQL_Chart(Unit_vpass, Sample_Letter(lot))
#Deviation
HDeviation = Sigma(Unit_horizon)
VDeviation = Sigma(Unit_vertical)
#Mean
HMean = Avg(Unit_horizon)
VMean = Avg(Unit_vertical)
#Cp
HCp = PCA(HDeviation, hStandard)
VCp = PCA(VDeviation, vStandard)
Hadjust = adjust(Unit_horizon, hStandard, lot)
Vadjust = adjust(Unit_vertical, vStandard, lot)
hunpassCount, hDefectrate = CountRate(Unit_hpass, lot)
vunpassCount, vDefectrate = CountRate(Unit_vpass, lot)
TotalDefectrate = round(TotalunpassCount/lot * 100, 1)
time2 = time.time()
Gap = time2-time1
dateGap = convert_seconds_to_kor_time(Gap)
print('TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph' + str(HCp)+","+'Cpv' + str(VCp)+ "," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate' + str(hDefectrate)+","+'vDefectrate' + str(vDefectrate)+","+'Hadjust' + str(Hadjust)+","+'Vadjust' + str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate' + str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap' + str(dateGap)+","+'lot' + str(lot)+","+'Model'+str(Model))
send_Final = 'TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph'+str(HCp)+","+'Cpv'+str(VCp)+"," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate'+str(hDefectrate)+","+'vDefectrate'+str(vDefectrate)+","+'Hadjust'+str(Hadjust)+","+'Vadjust'+str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate'+str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap'+str(dateGap)+","+'lot'+str(lot)+","+'Model'+str(Model)
time.sleep(3)
s.send(send_Final.encode('UTF-8'))
return
if __name__ == "__main__":
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect( ( HOST, PORT ) )
print('Hi! I am client')
try:
while True:
if a == 0:
recv_msg = s.recv(1024).decode('UTF-8')
if recv_msg.startswith('TOR'):
recv_msg = recv_msg.replace('TORDatesStand,','').split(',')
print(recv_msg)
Model = str(recv_msg[0])
print(Model)
lot = int(recv_msg[1])
print(lot)
hStandard = float(recv_msg[2])
print(hStandard)
vStandard = float(recv_msg[3])
print(vStandard)
Date = int(recv_msg[4])
print(Date)
a=1
recv_msg = ''
elif a == 1:
time1 = time.time()
setServoPos1(90)
time.sleep(1)
setServoPos2(80)
time.sleep(1)
while True:
main_conveyor_On(con1_port)
main_conveyor_On(con2_port)
time.sleep(2)
while True:
d1 = Sonar(sig1)
if d1 <= 4.5:
main_conveyor_Off(con1_port)
break
else:
continue
led_on(led1)
led_off(led2)
led_off(led3)
dot1, dot2, dot3, dot4 = opencapture(count)
a, b, c, d = dot1234(dot1, dot2, dot3, dot4)
temp = [dot1, dot2, dot3, dot4]
for i in range(4):
if i == a:
p1.append(temp[i])
| count+=1
if count == lot:
main_conveyor_On(con1_port)
time.sleep(11)
led_off(led2)
led_off(led3)
main_conveyor_Off(con1_port)
main_conveyor_Off(con2_port)
setServoPos1(90)
time.sleep(1)
setServoPos2(80)
time.sleep(1)
Unit_horizon = []
Unit_vertical = []
Unit_hpass = []
Unit_vpass = []
p1=[]
p2=[]
p3=[]
p4=[]
count=0
a=0
TotalunpassCount = 0
TotalDefectrate = 0
print(recv_msg)
break
except (ValueError, KeyboardInterrupt):
print( '[ Server Message ( Exception ) : non numeric' )
print( '[ Server Message ( Exception ) : close client connection' )
GPIO.cleanup()
s.close()
finally:
GPIO.cleanup()
s.close()
| elif i == b:
p2.append(temp[i])
elif i == c:
p3.append(temp[i])
elif i == d:
p4.append(temp[i])
calculate(count)
led_off(led1)
| conditional_block |
client.go | //Docket Client
//Author: Sivamani Varun
//Gopher Gala
package main
//push
//pull
//-h[ost]
//-p[ort]
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/alecthomas/kingpin"
"github.com/fsouza/go-dockerclient"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"sync"
"regexp"
"strconv"
"strings"
"time"
)
var (
host = kingpin.Flag("host", "Set host of docket registry.").Short('h').Default("http://127.0.0.1").String()
port = kingpin.Flag("port", "Set port of docket registry.").Short('p').Default("8004").String()
location = kingpin.Flag("location", "Set location to store torrents and tarballs.").Short('l').Default("/tmp/docket").String()
push = kingpin.Command("push", "Push to the docket registry.")
pushImage = push.Arg("push", "Image to push.").Required().String()
pull = kingpin.Command("pull", "pull to the docket registry.")
pullImage = pull.Arg("pull", "Image to pull.").Required().String()
imagesCmd = kingpin.Command("images", "display images in the docket registry.")
imageFlag = imagesCmd.Flag("images", "display images in the docket registry.").Bool()
)
var flag bool
type RootFS struct{
Type string
Layers []string
}
type ManifestFile struct {
Id string
RepoTags interface{}
RepoDigests interface{}
Parent string
Comment string
Created string
Container string
ContainerConfig interface{}
DockerVersion string
Author string
Config interface{}
Architecture string
Os string
Size string
VirtualSize string
GraphicDriver interface{}
RootFS RootFS
Metadata string
}
func track(start time.Time, name string) {
elapsed := time.Since(start)
elapsed = elapsed
log.Printf("\n\n%s took %s\n\n", name, elapsed)
}
// Creates a new tarball upload http request to the Docket registry
func uploadFile(params map[string]string, paramName, path string) (*http.Request, error) {
uri := *host + ":" + *port + "/images"
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
err = writer.Close()
if err != nil {
return nil, err
}
request, err := http.NewRequest("POST", uri, body)
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", writer.FormDataContentType())
for key, val := range params {
fmt.Println("key = ", key, " val = ", val)
request.Header.Add(key, val)
}
return request, nil
}
func applyPush(image string) error {
defer track(time.Now(), "Image Push")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
endpoint := "unix:///var/run/docker.sock"
client, _ := docker.NewClient(endpoint)
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
found := false
imageId := ""
filePath := ""
created := ""
for _, img := range imgs {
tags := img.RepoTags
for _, tag := range tags {
if tag == image {
found = true
imageId = img.ID
created = strconv.FormatInt(img.Created, 10)
fmt.Println("Found image: ", image)
fmt.Println("ID: ", img.ID)
fmt.Println("RepoTags: ", img.RepoTags)
fmt.Println("Created: ", img.Created)
fmt.Println("Size: ", img.Size)
fmt.Println("VirtualSize: ", img.VirtualSize)
fmt.Println("ParentId: ", img.ParentID)
safeImageName := reg.ReplaceAllString(image, "_")
s := []string{loc, "/", imageId, "_", safeImageName, ".tar"}
filePath = strings.Join(s, "")
break
}
}
}
if !found {
return errors.New("Sorry the image could not be found.")
}
//os.Remove("temp.json")
//Run export command
//command invocation
//run docker command save to tar ball in location
fmt.Println("Exporting image to tarball...")
cmd := fmt.Sprintf("sudo docker save %s > %s", image, filePath)
_, err1 := exec.Command("sh", "-c", cmd).Output()
if err1 != nil {
return err1
}
fmt.Println("Successively exported tarball...")
//make post request with contents of tarball to docket registry
imageParams := map[string]string{
"image": image,
"id": imageId,
"created": created,
//"layers": layer,
}
//Adapted from http://matt.aimonetti.net/posts/2013/07/01/golang-multipart-file-upload-example/ (C) Matt Aimonetti
request, err := uploadFile(imageParams, "file", filePath)
if err != nil {
log.Fatal(err)
}
uploadClient := &http.Client{}
resp, err := uploadClient.Do(request)
if err != nil {
log.Fatal(err)
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
log.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("Failed to push image...")
}
}
fmt.Println("Successfully uploaded image: ", image, " to the Docket registry.")
os.Remove(filePath)
return nil
}
//Adapted from https://github.com/thbar/golang-playground/blob/master/download-files.go
func downloadFromUrl(url string, fileName string) (err error) |
func applyPull(image string) error {
defer track(time.Now(), "Image Pull")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
safeImageName := reg.ReplaceAllString(image, "_")
filePath := loc + "/"
fileName := filePath + safeImageName + ".torrent"
fmt.Print("\n\n@@@@ Pulling image ---> ", safeImageName, " @@@@\n\nDownloading meta data file\n")
//Download torrent file
queryParam := map[string]string{
"image": image,
}
queryParamJson, _ := json.Marshal(queryParam)
metaUrl := *host + ":" + *port + "/images?q=" + url.QueryEscape(string(queryParamJson))
response, err3 := http.Get(metaUrl)
if err3 != nil {
fmt.Println("Failed to query image metadata endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get image metadata")
return errors.New("Failed to get images metadata...")
}
defer response.Body.Close()
metaJson, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get image metadata json")
return errors.New("Failed to get image metadata json")
}
fmt.Println("\nSuccessfully Fetched meta data file")
var queryObj map[string]interface{}
if err := json.Unmarshal([]byte(metaJson), &queryObj); err != nil {
return errors.New("Failed to decode images metadata json...")
}
tarballNameInterface := queryObj["fileName"]
tarballName := tarballNameInterface.(string)
layers := (queryObj["layers"]).(string)
result := strings.Split(tarballName, "_")
locToImg := loc+"/" + result[0] + "/"
if _, errI := os.Stat(locToImg); os.IsNotExist(errI) {
os.Mkdir(locToImg, 0644)
}
jsonFile := (queryObj["jsonFile"]).(string)
d1 := []byte(jsonFile)
jsonFileName := locToImg+strings.Split(result[0], ":")[1]+".json"
err2 := ioutil.WriteFile(jsonFileName, d1, 0644)
if err2 != nil {
fmt.Println("Error copying json file")
return errors.New("Error copying json file")
}
metadata := (queryObj["metadata"]).(string)
d2 := []byte(metadata)
errM := ioutil.WriteFile(locToImg+"manifest.json", d2, 0644)
if errM != nil {
fmt.Println("Error copying manifest file")
return errors.New("Error copying manifest file")
}
repository := (queryObj["repository"]).(string)
d3 := []byte(repository)
errR := ioutil.WriteFile(locToImg+"repositories", d3, 0644)
if errR != nil {
fmt.Println("Error copying repository file")
return errors.New("Error copying repository file")
}
layerList := strings.Split(layers, ",")
flag = false
layerMap := (queryObj["layerMap"]).(string)
layersShaList :=strings.Split(layerMap,",")
layerShaMap := make(map[string]string)
nonExistingList := make([]string,0)
for i := 0; i < len(layersShaList); i++ {
layersArray :=strings.Split(layersShaList[i],":")
layerShaMap[layersArray[0]] = layersArray[1]
}
var wg sync.WaitGroup
fmt.Print("\n\nFinding the missing layers in the image\n")
for i := 0; i < len(layerList); i++ {
layerVal := layerList[i]
grepCmd := "sudo find /var/lib/docker/image -name " + layerShaMap[layerVal]
cmd := exec.Command("sh", "-c", grepCmd)
grepOutput, err1 := cmd.CombinedOutput()
if err1 != nil {
fmt.Printf("ERROR GREPING: %s", err1)
}
if len(grepOutput) <= 0 {
fmt.Print("\nlayer -> ", layerList[i], " not present")
nonExistingList = append(nonExistingList, layerList[i])
}
}
//if len(nonExistingList) == 0{
// fmt.Print("\n\nImage already loaded at machine\n\n")
// return nil
//}
fmt.Print("\n\nDownloading torrent file for each missing layer in the image\n")
for i := 0; i < len(layerList); i++ {
wg.Add(1)
fmt.Print("\nlayer -> ", layerList[i])
go func(layerList []string,i int){
queryPrm := map[string]string{
"image": layerList[i],
}
queryPrmJson, _ := json.Marshal(queryPrm)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryPrmJson))
err1 := downloadFromUrl(url, filePath+layerList[i]+".tar.torrent")
if err1 != nil {
flag = true
fmt.Println("\n\nTorrent file missing at registry for layer--> ", layerList[i],". Will be downloading the entire image")
}
defer wg.Done()
}(layerList, i)
}
wg.Wait()
if flag == true{
fmt.Println("\n\nDownloading the torrent file for original image: ", safeImageName)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryParamJson))
err1 := downloadFromUrl(url, fileName)
if err1 != nil {
fmt.Println("Failed to pull image")
return err
}
fmt.Println("\nDownloading the entire image from registry: ", safeImageName)
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, fileName)
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
//TODO:Replace filename with that from metadata
tarballPath := filePath + tarballName
//Load the downloaded tarball
fmt.Println("\n\nLoading Image...")
importCmd := fmt.Sprintf("sudo docker load -i %s", tarballPath)
_, err2 := exec.Command("sh", "-c", importCmd).Output()
if err2 != nil {
fmt.Printf("Failed to load image into docker!")
return err2
}
fmt.Printf("\nSuccessfively pulled image: ", safeImageName,"\n\n")
return nil
} else{
flag = true
var wg1 sync.WaitGroup
fmt.Print("\n\nDownloading each image layer\n")
for i := 0; i < len(layerList); i++ {
wg1.Add(1)
go func(layerList []string, i int){
fmt.Print("\nDownloading layer-> ",layerList[i])
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, layerList[i]+".tar.torrent >/dev/null 2>&1")
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
fmt.Print("\nExtracting layer-> ",layerList[i])
cmdStr1 := "sudo tar -C "+locToImg + " -xvf "+ filePath+layerList[i]+".tar"
_ , errT := exec.Command("sh", "-c", cmdStr1, " >/dev/null 2>&1").Output()
if errT != nil {
flag = false
}
defer wg1.Done()
}(layerList, i)
}
wg1.Wait()
if flag == false{
fmt.Printf("\nERROR in downloading layers\n")
}
//Load the downloaded tarball
os.Chdir(filePath)
fmt.Println("\n\nLoading Image....\n")
importCmd := fmt.Sprintf("sudo tar -cC %s . | docker load", result[0])
_, err2 := exec.Command("sh", "-c", importCmd).Output()
if err2 != nil {
fmt.Printf("Failed to load image into docker!", err2)
}else {
os.Remove(loc)
fmt.Print("\n@@@@@ Successfively pulled image: ", safeImageName, " @@@@@")
}
}
fmt.Print("\n\n")
return nil
}
func applyImages() error {
imagesUrl := *host + ":" + *port + "/images/all"
//TODO:Get metadata GET /images?q={"image":}
response, err3 := http.Get(imagesUrl)
if err3 != nil {
fmt.Println("Failed to query images list endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get images list")
return errors.New("Failed to get images list...")
}
defer response.Body.Close()
imagesList, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get images list")
return errors.New("Failed to get images list")
}
fmt.Println(string(imagesList))
return nil
}
func main() {
kingpin.CommandLine.Help = "Docket Client"
switch kingpin.Parse() {
case "push":
kingpin.FatalIfError(applyPush(*pushImage), "Pushing of image failed")
case "pull":
kingpin.FatalIfError(applyPull((*pullImage)), "Pulling of image failed")
case "images":
kingpin.FatalIfError(applyImages(), "Listing of images failed")
}
}
| {
output, err := os.Create(fileName)
if err != nil {
fmt.Println("\nError while creating", fileName, "-", err)
return err
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
if response.StatusCode != 200 {
fmt.Println("\nFailed to pull image")
return errors.New("Failed to pull image...")
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
//fmt.Println(n, "bytes downloaded.")
//Hack: trivial check to ensure if file downloaded is not too small
if n < 100 {
return errors.New("Failed to pull image...")
}
return nil
} | identifier_body |
client.go | //Docket Client
//Author: Sivamani Varun
//Gopher Gala
package main
//push
//pull
//-h[ost]
//-p[ort]
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/alecthomas/kingpin"
"github.com/fsouza/go-dockerclient"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"sync"
"regexp"
"strconv"
"strings"
"time"
)
var (
host = kingpin.Flag("host", "Set host of docket registry.").Short('h').Default("http://127.0.0.1").String()
port = kingpin.Flag("port", "Set port of docket registry.").Short('p').Default("8004").String()
location = kingpin.Flag("location", "Set location to store torrents and tarballs.").Short('l').Default("/tmp/docket").String()
push = kingpin.Command("push", "Push to the docket registry.")
pushImage = push.Arg("push", "Image to push.").Required().String()
pull = kingpin.Command("pull", "pull to the docket registry.")
pullImage = pull.Arg("pull", "Image to pull.").Required().String()
imagesCmd = kingpin.Command("images", "display images in the docket registry.")
imageFlag = imagesCmd.Flag("images", "display images in the docket registry.").Bool()
)
var flag bool
type RootFS struct{
Type string
Layers []string
}
type ManifestFile struct {
Id string
RepoTags interface{}
RepoDigests interface{}
Parent string
Comment string
Created string
Container string
ContainerConfig interface{}
DockerVersion string
Author string
Config interface{}
Architecture string
Os string
Size string
VirtualSize string
GraphicDriver interface{}
RootFS RootFS
Metadata string
}
func track(start time.Time, name string) {
elapsed := time.Since(start)
elapsed = elapsed
log.Printf("\n\n%s took %s\n\n", name, elapsed)
}
// Creates a new tarball upload http request to the Docket registry
func uploadFile(params map[string]string, paramName, path string) (*http.Request, error) {
uri := *host + ":" + *port + "/images"
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
err = writer.Close()
if err != nil {
return nil, err
}
request, err := http.NewRequest("POST", uri, body)
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", writer.FormDataContentType())
for key, val := range params {
fmt.Println("key = ", key, " val = ", val)
request.Header.Add(key, val)
}
return request, nil
}
func applyPush(image string) error {
defer track(time.Now(), "Image Push")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
endpoint := "unix:///var/run/docker.sock"
client, _ := docker.NewClient(endpoint)
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
found := false
imageId := ""
filePath := ""
created := ""
for _, img := range imgs {
tags := img.RepoTags
for _, tag := range tags {
if tag == image {
found = true
imageId = img.ID
created = strconv.FormatInt(img.Created, 10)
fmt.Println("Found image: ", image)
fmt.Println("ID: ", img.ID)
fmt.Println("RepoTags: ", img.RepoTags)
fmt.Println("Created: ", img.Created)
fmt.Println("Size: ", img.Size)
fmt.Println("VirtualSize: ", img.VirtualSize)
fmt.Println("ParentId: ", img.ParentID)
safeImageName := reg.ReplaceAllString(image, "_")
s := []string{loc, "/", imageId, "_", safeImageName, ".tar"}
filePath = strings.Join(s, "")
break
}
}
}
if !found {
return errors.New("Sorry the image could not be found.")
}
//os.Remove("temp.json")
//Run export command
//command invocation
//run docker command save to tar ball in location
fmt.Println("Exporting image to tarball...")
cmd := fmt.Sprintf("sudo docker save %s > %s", image, filePath)
_, err1 := exec.Command("sh", "-c", cmd).Output()
if err1 != nil {
return err1
}
fmt.Println("Successively exported tarball...")
//make post request with contents of tarball to docket registry
imageParams := map[string]string{
"image": image,
"id": imageId,
"created": created,
//"layers": layer,
}
//Adapted from http://matt.aimonetti.net/posts/2013/07/01/golang-multipart-file-upload-example/ (C) Matt Aimonetti
request, err := uploadFile(imageParams, "file", filePath)
if err != nil {
log.Fatal(err)
}
uploadClient := &http.Client{}
resp, err := uploadClient.Do(request)
if err != nil {
log.Fatal(err)
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
log.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("Failed to push image...")
}
}
fmt.Println("Successfully uploaded image: ", image, " to the Docket registry.")
os.Remove(filePath)
return nil
}
//Adapted from https://github.com/thbar/golang-playground/blob/master/download-files.go
func downloadFromUrl(url string, fileName string) (err error) {
output, err := os.Create(fileName)
if err != nil {
fmt.Println("\nError while creating", fileName, "-", err)
return err
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
if response.StatusCode != 200 {
fmt.Println("\nFailed to pull image")
return errors.New("Failed to pull image...")
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
//fmt.Println(n, "bytes downloaded.")
//Hack: trivial check to ensure if file downloaded is not too small
if n < 100 {
return errors.New("Failed to pull image...")
}
return nil
}
func applyPull(image string) error {
defer track(time.Now(), "Image Pull")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
safeImageName := reg.ReplaceAllString(image, "_")
filePath := loc + "/"
fileName := filePath + safeImageName + ".torrent"
fmt.Print("\n\n@@@@ Pulling image ---> ", safeImageName, " @@@@\n\nDownloading meta data file\n")
//Download torrent file
queryParam := map[string]string{
"image": image,
}
queryParamJson, _ := json.Marshal(queryParam)
metaUrl := *host + ":" + *port + "/images?q=" + url.QueryEscape(string(queryParamJson))
response, err3 := http.Get(metaUrl)
if err3 != nil {
fmt.Println("Failed to query image metadata endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get image metadata")
return errors.New("Failed to get images metadata...")
}
defer response.Body.Close()
metaJson, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get image metadata json")
return errors.New("Failed to get image metadata json")
}
fmt.Println("\nSuccessfully Fetched meta data file")
var queryObj map[string]interface{}
if err := json.Unmarshal([]byte(metaJson), &queryObj); err != nil {
return errors.New("Failed to decode images metadata json...")
}
tarballNameInterface := queryObj["fileName"]
tarballName := tarballNameInterface.(string)
layers := (queryObj["layers"]).(string)
result := strings.Split(tarballName, "_")
locToImg := loc+"/" + result[0] + "/"
if _, errI := os.Stat(locToImg); os.IsNotExist(errI) {
os.Mkdir(locToImg, 0644)
}
jsonFile := (queryObj["jsonFile"]).(string)
d1 := []byte(jsonFile)
jsonFileName := locToImg+strings.Split(result[0], ":")[1]+".json"
err2 := ioutil.WriteFile(jsonFileName, d1, 0644)
if err2 != nil {
fmt.Println("Error copying json file")
return errors.New("Error copying json file")
}
metadata := (queryObj["metadata"]).(string)
d2 := []byte(metadata)
errM := ioutil.WriteFile(locToImg+"manifest.json", d2, 0644)
if errM != nil {
fmt.Println("Error copying manifest file")
return errors.New("Error copying manifest file")
}
repository := (queryObj["repository"]).(string)
d3 := []byte(repository)
errR := ioutil.WriteFile(locToImg+"repositories", d3, 0644)
if errR != nil {
fmt.Println("Error copying repository file")
return errors.New("Error copying repository file")
}
layerList := strings.Split(layers, ",")
flag = false
layerMap := (queryObj["layerMap"]).(string)
layersShaList :=strings.Split(layerMap,",")
layerShaMap := make(map[string]string)
nonExistingList := make([]string,0)
for i := 0; i < len(layersShaList); i++ {
layersArray :=strings.Split(layersShaList[i],":")
layerShaMap[layersArray[0]] = layersArray[1]
}
var wg sync.WaitGroup
fmt.Print("\n\nFinding the missing layers in the image\n")
for i := 0; i < len(layerList); i++ {
layerVal := layerList[i]
grepCmd := "sudo find /var/lib/docker/image -name " + layerShaMap[layerVal]
cmd := exec.Command("sh", "-c", grepCmd)
grepOutput, err1 := cmd.CombinedOutput()
if err1 != nil {
fmt.Printf("ERROR GREPING: %s", err1)
}
if len(grepOutput) <= 0 {
fmt.Print("\nlayer -> ", layerList[i], " not present")
nonExistingList = append(nonExistingList, layerList[i])
}
}
//if len(nonExistingList) == 0{
// fmt.Print("\n\nImage already loaded at machine\n\n")
// return nil
//}
fmt.Print("\n\nDownloading torrent file for each missing layer in the image\n")
for i := 0; i < len(layerList); i++ {
wg.Add(1)
fmt.Print("\nlayer -> ", layerList[i])
go func(layerList []string,i int){
queryPrm := map[string]string{
"image": layerList[i],
}
queryPrmJson, _ := json.Marshal(queryPrm)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryPrmJson))
err1 := downloadFromUrl(url, filePath+layerList[i]+".tar.torrent")
if err1 != nil {
flag = true
fmt.Println("\n\nTorrent file missing at registry for layer--> ", layerList[i],". Will be downloading the entire image")
}
defer wg.Done()
}(layerList, i)
}
wg.Wait()
if flag == true{
fmt.Println("\n\nDownloading the torrent file for original image: ", safeImageName)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryParamJson))
err1 := downloadFromUrl(url, fileName)
if err1 != nil {
fmt.Println("Failed to pull image")
return err
}
fmt.Println("\nDownloading the entire image from registry: ", safeImageName)
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, fileName)
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
//TODO:Replace filename with that from metadata
tarballPath := filePath + tarballName
//Load the downloaded tarball
fmt.Println("\n\nLoading Image...")
importCmd := fmt.Sprintf("sudo docker load -i %s", tarballPath)
_, err2 := exec.Command("sh", "-c", importCmd).Output()
if err2 != nil {
fmt.Printf("Failed to load image into docker!")
return err2
}
fmt.Printf("\nSuccessfively pulled image: ", safeImageName,"\n\n")
return nil
} else{
flag = true
var wg1 sync.WaitGroup
fmt.Print("\n\nDownloading each image layer\n")
for i := 0; i < len(layerList); i++ {
wg1.Add(1)
go func(layerList []string, i int){
fmt.Print("\nDownloading layer-> ",layerList[i])
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, layerList[i]+".tar.torrent >/dev/null 2>&1")
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
fmt.Print("\nExtracting layer-> ",layerList[i])
cmdStr1 := "sudo tar -C "+locToImg + " -xvf "+ filePath+layerList[i]+".tar"
_ , errT := exec.Command("sh", "-c", cmdStr1, " >/dev/null 2>&1").Output()
if errT != nil {
flag = false
}
defer wg1.Done()
}(layerList, i)
}
wg1.Wait()
if flag == false |
//Load the downloaded tarball
os.Chdir(filePath)
fmt.Println("\n\nLoading Image....\n")
importCmd := fmt.Sprintf("sudo tar -cC %s . | docker load", result[0])
_, err2 := exec.Command("sh", "-c", importCmd).Output()
if err2 != nil {
fmt.Printf("Failed to load image into docker!", err2)
}else {
os.Remove(loc)
fmt.Print("\n@@@@@ Successfively pulled image: ", safeImageName, " @@@@@")
}
}
fmt.Print("\n\n")
return nil
}
func applyImages() error {
imagesUrl := *host + ":" + *port + "/images/all"
//TODO:Get metadata GET /images?q={"image":}
response, err3 := http.Get(imagesUrl)
if err3 != nil {
fmt.Println("Failed to query images list endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get images list")
return errors.New("Failed to get images list...")
}
defer response.Body.Close()
imagesList, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get images list")
return errors.New("Failed to get images list")
}
fmt.Println(string(imagesList))
return nil
}
func main() {
kingpin.CommandLine.Help = "Docket Client"
switch kingpin.Parse() {
case "push":
kingpin.FatalIfError(applyPush(*pushImage), "Pushing of image failed")
case "pull":
kingpin.FatalIfError(applyPull((*pullImage)), "Pulling of image failed")
case "images":
kingpin.FatalIfError(applyImages(), "Listing of images failed")
}
}
| {
fmt.Printf("\nERROR in downloading layers\n")
} | conditional_block |
client.go | //Docket Client
//Author: Sivamani Varun
//Gopher Gala
package main
//push
//pull
//-h[ost]
//-p[ort]
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/alecthomas/kingpin"
"github.com/fsouza/go-dockerclient"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"sync"
"regexp"
"strconv"
"strings"
"time"
)
var (
host = kingpin.Flag("host", "Set host of docket registry.").Short('h').Default("http://127.0.0.1").String()
port = kingpin.Flag("port", "Set port of docket registry.").Short('p').Default("8004").String()
location = kingpin.Flag("location", "Set location to store torrents and tarballs.").Short('l').Default("/tmp/docket").String()
push = kingpin.Command("push", "Push to the docket registry.")
pushImage = push.Arg("push", "Image to push.").Required().String()
pull = kingpin.Command("pull", "pull to the docket registry.")
pullImage = pull.Arg("pull", "Image to pull.").Required().String()
imagesCmd = kingpin.Command("images", "display images in the docket registry.")
imageFlag = imagesCmd.Flag("images", "display images in the docket registry.").Bool()
)
var flag bool
type RootFS struct{
Type string
Layers []string
}
type ManifestFile struct {
Id string
RepoTags interface{}
RepoDigests interface{}
Parent string
Comment string
Created string
Container string
ContainerConfig interface{}
DockerVersion string
Author string
Config interface{}
Architecture string
Os string
Size string
VirtualSize string
GraphicDriver interface{}
RootFS RootFS
Metadata string
}
func track(start time.Time, name string) {
elapsed := time.Since(start)
elapsed = elapsed
log.Printf("\n\n%s took %s\n\n", name, elapsed)
}
// Creates a new tarball upload http request to the Docket registry
func | (params map[string]string, paramName, path string) (*http.Request, error) {
uri := *host + ":" + *port + "/images"
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
err = writer.Close()
if err != nil {
return nil, err
}
request, err := http.NewRequest("POST", uri, body)
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", writer.FormDataContentType())
for key, val := range params {
fmt.Println("key = ", key, " val = ", val)
request.Header.Add(key, val)
}
return request, nil
}
func applyPush(image string) error {
defer track(time.Now(), "Image Push")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
endpoint := "unix:///var/run/docker.sock"
client, _ := docker.NewClient(endpoint)
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
found := false
imageId := ""
filePath := ""
created := ""
for _, img := range imgs {
tags := img.RepoTags
for _, tag := range tags {
if tag == image {
found = true
imageId = img.ID
created = strconv.FormatInt(img.Created, 10)
fmt.Println("Found image: ", image)
fmt.Println("ID: ", img.ID)
fmt.Println("RepoTags: ", img.RepoTags)
fmt.Println("Created: ", img.Created)
fmt.Println("Size: ", img.Size)
fmt.Println("VirtualSize: ", img.VirtualSize)
fmt.Println("ParentId: ", img.ParentID)
safeImageName := reg.ReplaceAllString(image, "_")
s := []string{loc, "/", imageId, "_", safeImageName, ".tar"}
filePath = strings.Join(s, "")
break
}
}
}
if !found {
return errors.New("Sorry the image could not be found.")
}
//os.Remove("temp.json")
//Run export command
//command invocation
//run docker command save to tar ball in location
fmt.Println("Exporting image to tarball...")
cmd := fmt.Sprintf("sudo docker save %s > %s", image, filePath)
_, err1 := exec.Command("sh", "-c", cmd).Output()
if err1 != nil {
return err1
}
fmt.Println("Successively exported tarball...")
//make post request with contents of tarball to docket registry
imageParams := map[string]string{
"image": image,
"id": imageId,
"created": created,
//"layers": layer,
}
//Adapted from http://matt.aimonetti.net/posts/2013/07/01/golang-multipart-file-upload-example/ (C) Matt Aimonetti
request, err := uploadFile(imageParams, "file", filePath)
if err != nil {
log.Fatal(err)
}
uploadClient := &http.Client{}
resp, err := uploadClient.Do(request)
if err != nil {
log.Fatal(err)
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
log.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("Failed to push image...")
}
}
fmt.Println("Successfully uploaded image: ", image, " to the Docket registry.")
os.Remove(filePath)
return nil
}
//Adapted from https://github.com/thbar/golang-playground/blob/master/download-files.go
func downloadFromUrl(url string, fileName string) (err error) {
output, err := os.Create(fileName)
if err != nil {
fmt.Println("\nError while creating", fileName, "-", err)
return err
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
if response.StatusCode != 200 {
fmt.Println("\nFailed to pull image")
return errors.New("Failed to pull image...")
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
//fmt.Println(n, "bytes downloaded.")
//Hack: trivial check to ensure if file downloaded is not too small
if n < 100 {
return errors.New("Failed to pull image...")
}
return nil
}
func applyPull(image string) error {
defer track(time.Now(), "Image Pull")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
safeImageName := reg.ReplaceAllString(image, "_")
filePath := loc + "/"
fileName := filePath + safeImageName + ".torrent"
fmt.Print("\n\n@@@@ Pulling image ---> ", safeImageName, " @@@@\n\nDownloading meta data file\n")
//Download torrent file
queryParam := map[string]string{
"image": image,
}
queryParamJson, _ := json.Marshal(queryParam)
metaUrl := *host + ":" + *port + "/images?q=" + url.QueryEscape(string(queryParamJson))
response, err3 := http.Get(metaUrl)
if err3 != nil {
fmt.Println("Failed to query image metadata endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get image metadata")
return errors.New("Failed to get images metadata...")
}
defer response.Body.Close()
metaJson, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get image metadata json")
return errors.New("Failed to get image metadata json")
}
fmt.Println("\nSuccessfully Fetched meta data file")
var queryObj map[string]interface{}
if err := json.Unmarshal([]byte(metaJson), &queryObj); err != nil {
return errors.New("Failed to decode images metadata json...")
}
tarballNameInterface := queryObj["fileName"]
tarballName := tarballNameInterface.(string)
layers := (queryObj["layers"]).(string)
result := strings.Split(tarballName, "_")
locToImg := loc+"/" + result[0] + "/"
if _, errI := os.Stat(locToImg); os.IsNotExist(errI) {
os.Mkdir(locToImg, 0644)
}
jsonFile := (queryObj["jsonFile"]).(string)
d1 := []byte(jsonFile)
jsonFileName := locToImg+strings.Split(result[0], ":")[1]+".json"
err2 := ioutil.WriteFile(jsonFileName, d1, 0644)
if err2 != nil {
fmt.Println("Error copying json file")
return errors.New("Error copying json file")
}
metadata := (queryObj["metadata"]).(string)
d2 := []byte(metadata)
errM := ioutil.WriteFile(locToImg+"manifest.json", d2, 0644)
if errM != nil {
fmt.Println("Error copying manifest file")
return errors.New("Error copying manifest file")
}
repository := (queryObj["repository"]).(string)
d3 := []byte(repository)
errR := ioutil.WriteFile(locToImg+"repositories", d3, 0644)
if errR != nil {
fmt.Println("Error copying repository file")
return errors.New("Error copying repository file")
}
layerList := strings.Split(layers, ",")
flag = false
layerMap := (queryObj["layerMap"]).(string)
layersShaList :=strings.Split(layerMap,",")
layerShaMap := make(map[string]string)
nonExistingList := make([]string,0)
for i := 0; i < len(layersShaList); i++ {
layersArray :=strings.Split(layersShaList[i],":")
layerShaMap[layersArray[0]] = layersArray[1]
}
var wg sync.WaitGroup
fmt.Print("\n\nFinding the missing layers in the image\n")
for i := 0; i < len(layerList); i++ {
layerVal := layerList[i]
grepCmd := "sudo find /var/lib/docker/image -name " + layerShaMap[layerVal]
cmd := exec.Command("sh", "-c", grepCmd)
grepOutput, err1 := cmd.CombinedOutput()
if err1 != nil {
fmt.Printf("ERROR GREPING: %s", err1)
}
if len(grepOutput) <= 0 {
fmt.Print("\nlayer -> ", layerList[i], " not present")
nonExistingList = append(nonExistingList, layerList[i])
}
}
//if len(nonExistingList) == 0{
// fmt.Print("\n\nImage already loaded at machine\n\n")
// return nil
//}
fmt.Print("\n\nDownloading torrent file for each missing layer in the image\n")
for i := 0; i < len(layerList); i++ {
wg.Add(1)
fmt.Print("\nlayer -> ", layerList[i])
go func(layerList []string,i int){
queryPrm := map[string]string{
"image": layerList[i],
}
queryPrmJson, _ := json.Marshal(queryPrm)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryPrmJson))
err1 := downloadFromUrl(url, filePath+layerList[i]+".tar.torrent")
if err1 != nil {
flag = true
fmt.Println("\n\nTorrent file missing at registry for layer--> ", layerList[i],". Will be downloading the entire image")
}
defer wg.Done()
}(layerList, i)
}
wg.Wait()
if flag == true{
fmt.Println("\n\nDownloading the torrent file for original image: ", safeImageName)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryParamJson))
err1 := downloadFromUrl(url, fileName)
if err1 != nil {
fmt.Println("Failed to pull image")
return err
}
fmt.Println("\nDownloading the entire image from registry: ", safeImageName)
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, fileName)
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
//TODO:Replace filename with that from metadata
tarballPath := filePath + tarballName
//Load the downloaded tarball
fmt.Println("\n\nLoading Image...")
importCmd := fmt.Sprintf("sudo docker load -i %s", tarballPath)
_, err2 := exec.Command("sh", "-c", importCmd).Output()
if err2 != nil {
fmt.Printf("Failed to load image into docker!")
return err2
}
fmt.Printf("\nSuccessfively pulled image: ", safeImageName,"\n\n")
return nil
} else{
flag = true
var wg1 sync.WaitGroup
fmt.Print("\n\nDownloading each image layer\n")
for i := 0; i < len(layerList); i++ {
wg1.Add(1)
go func(layerList []string, i int){
fmt.Print("\nDownloading layer-> ",layerList[i])
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, layerList[i]+".tar.torrent >/dev/null 2>&1")
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
fmt.Print("\nExtracting layer-> ",layerList[i])
cmdStr1 := "sudo tar -C "+locToImg + " -xvf "+ filePath+layerList[i]+".tar"
_ , errT := exec.Command("sh", "-c", cmdStr1, " >/dev/null 2>&1").Output()
if errT != nil {
flag = false
}
defer wg1.Done()
}(layerList, i)
}
wg1.Wait()
if flag == false{
fmt.Printf("\nERROR in downloading layers\n")
}
//Load the downloaded tarball
os.Chdir(filePath)
fmt.Println("\n\nLoading Image....\n")
importCmd := fmt.Sprintf("sudo tar -cC %s . | docker load", result[0])
_, err2 := exec.Command("sh", "-c", importCmd).Output()
if err2 != nil {
fmt.Printf("Failed to load image into docker!", err2)
}else {
os.Remove(loc)
fmt.Print("\n@@@@@ Successfively pulled image: ", safeImageName, " @@@@@")
}
}
fmt.Print("\n\n")
return nil
}
func applyImages() error {
imagesUrl := *host + ":" + *port + "/images/all"
//TODO:Get metadata GET /images?q={"image":}
response, err3 := http.Get(imagesUrl)
if err3 != nil {
fmt.Println("Failed to query images list endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get images list")
return errors.New("Failed to get images list...")
}
defer response.Body.Close()
imagesList, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get images list")
return errors.New("Failed to get images list")
}
fmt.Println(string(imagesList))
return nil
}
func main() {
kingpin.CommandLine.Help = "Docket Client"
switch kingpin.Parse() {
case "push":
kingpin.FatalIfError(applyPush(*pushImage), "Pushing of image failed")
case "pull":
kingpin.FatalIfError(applyPull((*pullImage)), "Pulling of image failed")
case "images":
kingpin.FatalIfError(applyImages(), "Listing of images failed")
}
}
| uploadFile | identifier_name |
client.go | //Docket Client
//Author: Sivamani Varun
//Gopher Gala
package main
//push
//pull
//-h[ost]
//-p[ort]
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/alecthomas/kingpin"
"github.com/fsouza/go-dockerclient" | "net/url"
"os"
"os/exec"
"path/filepath"
"sync"
"regexp"
"strconv"
"strings"
"time"
)
var (
host = kingpin.Flag("host", "Set host of docket registry.").Short('h').Default("http://127.0.0.1").String()
port = kingpin.Flag("port", "Set port of docket registry.").Short('p').Default("8004").String()
location = kingpin.Flag("location", "Set location to store torrents and tarballs.").Short('l').Default("/tmp/docket").String()
push = kingpin.Command("push", "Push to the docket registry.")
pushImage = push.Arg("push", "Image to push.").Required().String()
pull = kingpin.Command("pull", "pull to the docket registry.")
pullImage = pull.Arg("pull", "Image to pull.").Required().String()
imagesCmd = kingpin.Command("images", "display images in the docket registry.")
imageFlag = imagesCmd.Flag("images", "display images in the docket registry.").Bool()
)
var flag bool
type RootFS struct{
Type string
Layers []string
}
type ManifestFile struct {
Id string
RepoTags interface{}
RepoDigests interface{}
Parent string
Comment string
Created string
Container string
ContainerConfig interface{}
DockerVersion string
Author string
Config interface{}
Architecture string
Os string
Size string
VirtualSize string
GraphicDriver interface{}
RootFS RootFS
Metadata string
}
func track(start time.Time, name string) {
elapsed := time.Since(start)
elapsed = elapsed
log.Printf("\n\n%s took %s\n\n", name, elapsed)
}
// Creates a new tarball upload http request to the Docket registry
func uploadFile(params map[string]string, paramName, path string) (*http.Request, error) {
uri := *host + ":" + *port + "/images"
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
err = writer.Close()
if err != nil {
return nil, err
}
request, err := http.NewRequest("POST", uri, body)
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", writer.FormDataContentType())
for key, val := range params {
fmt.Println("key = ", key, " val = ", val)
request.Header.Add(key, val)
}
return request, nil
}
func applyPush(image string) error {
defer track(time.Now(), "Image Push")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
endpoint := "unix:///var/run/docker.sock"
client, _ := docker.NewClient(endpoint)
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
found := false
imageId := ""
filePath := ""
created := ""
for _, img := range imgs {
tags := img.RepoTags
for _, tag := range tags {
if tag == image {
found = true
imageId = img.ID
created = strconv.FormatInt(img.Created, 10)
fmt.Println("Found image: ", image)
fmt.Println("ID: ", img.ID)
fmt.Println("RepoTags: ", img.RepoTags)
fmt.Println("Created: ", img.Created)
fmt.Println("Size: ", img.Size)
fmt.Println("VirtualSize: ", img.VirtualSize)
fmt.Println("ParentId: ", img.ParentID)
safeImageName := reg.ReplaceAllString(image, "_")
s := []string{loc, "/", imageId, "_", safeImageName, ".tar"}
filePath = strings.Join(s, "")
break
}
}
}
if !found {
return errors.New("Sorry the image could not be found.")
}
//os.Remove("temp.json")
//Run export command
//command invocation
//run docker command save to tar ball in location
fmt.Println("Exporting image to tarball...")
cmd := fmt.Sprintf("sudo docker save %s > %s", image, filePath)
_, err1 := exec.Command("sh", "-c", cmd).Output()
if err1 != nil {
return err1
}
fmt.Println("Successively exported tarball...")
//make post request with contents of tarball to docket registry
imageParams := map[string]string{
"image": image,
"id": imageId,
"created": created,
//"layers": layer,
}
//Adapted from http://matt.aimonetti.net/posts/2013/07/01/golang-multipart-file-upload-example/ (C) Matt Aimonetti
request, err := uploadFile(imageParams, "file", filePath)
if err != nil {
log.Fatal(err)
}
uploadClient := &http.Client{}
resp, err := uploadClient.Do(request)
if err != nil {
log.Fatal(err)
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
log.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("Failed to push image...")
}
}
fmt.Println("Successfully uploaded image: ", image, " to the Docket registry.")
os.Remove(filePath)
return nil
}
//Adapted from https://github.com/thbar/golang-playground/blob/master/download-files.go
func downloadFromUrl(url string, fileName string) (err error) {
output, err := os.Create(fileName)
if err != nil {
fmt.Println("\nError while creating", fileName, "-", err)
return err
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
if response.StatusCode != 200 {
fmt.Println("\nFailed to pull image")
return errors.New("Failed to pull image...")
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
//fmt.Println(n, "bytes downloaded.")
//Hack: trivial check to ensure if file downloaded is not too small
if n < 100 {
return errors.New("Failed to pull image...")
}
return nil
}
func applyPull(image string) error {
defer track(time.Now(), "Image Pull")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
safeImageName := reg.ReplaceAllString(image, "_")
filePath := loc + "/"
fileName := filePath + safeImageName + ".torrent"
fmt.Print("\n\n@@@@ Pulling image ---> ", safeImageName, " @@@@\n\nDownloading meta data file\n")
//Download torrent file
queryParam := map[string]string{
"image": image,
}
queryParamJson, _ := json.Marshal(queryParam)
metaUrl := *host + ":" + *port + "/images?q=" + url.QueryEscape(string(queryParamJson))
response, err3 := http.Get(metaUrl)
if err3 != nil {
fmt.Println("Failed to query image metadata endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get image metadata")
return errors.New("Failed to get images metadata...")
}
defer response.Body.Close()
metaJson, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get image metadata json")
return errors.New("Failed to get image metadata json")
}
fmt.Println("\nSuccessfully Fetched meta data file")
var queryObj map[string]interface{}
if err := json.Unmarshal([]byte(metaJson), &queryObj); err != nil {
return errors.New("Failed to decode images metadata json...")
}
tarballNameInterface := queryObj["fileName"]
tarballName := tarballNameInterface.(string)
layers := (queryObj["layers"]).(string)
result := strings.Split(tarballName, "_")
locToImg := loc+"/" + result[0] + "/"
if _, errI := os.Stat(locToImg); os.IsNotExist(errI) {
os.Mkdir(locToImg, 0644)
}
jsonFile := (queryObj["jsonFile"]).(string)
d1 := []byte(jsonFile)
jsonFileName := locToImg+strings.Split(result[0], ":")[1]+".json"
err2 := ioutil.WriteFile(jsonFileName, d1, 0644)
if err2 != nil {
fmt.Println("Error copying json file")
return errors.New("Error copying json file")
}
metadata := (queryObj["metadata"]).(string)
d2 := []byte(metadata)
errM := ioutil.WriteFile(locToImg+"manifest.json", d2, 0644)
if errM != nil {
fmt.Println("Error copying manifest file")
return errors.New("Error copying manifest file")
}
repository := (queryObj["repository"]).(string)
d3 := []byte(repository)
errR := ioutil.WriteFile(locToImg+"repositories", d3, 0644)
if errR != nil {
fmt.Println("Error copying repository file")
return errors.New("Error copying repository file")
}
layerList := strings.Split(layers, ",")
flag = false
layerMap := (queryObj["layerMap"]).(string)
layersShaList :=strings.Split(layerMap,",")
layerShaMap := make(map[string]string)
nonExistingList := make([]string,0)
for i := 0; i < len(layersShaList); i++ {
layersArray :=strings.Split(layersShaList[i],":")
layerShaMap[layersArray[0]] = layersArray[1]
}
var wg sync.WaitGroup
fmt.Print("\n\nFinding the missing layers in the image\n")
for i := 0; i < len(layerList); i++ {
layerVal := layerList[i]
grepCmd := "sudo find /var/lib/docker/image -name " + layerShaMap[layerVal]
cmd := exec.Command("sh", "-c", grepCmd)
grepOutput, err1 := cmd.CombinedOutput()
if err1 != nil {
fmt.Printf("ERROR GREPING: %s", err1)
}
if len(grepOutput) <= 0 {
fmt.Print("\nlayer -> ", layerList[i], " not present")
nonExistingList = append(nonExistingList, layerList[i])
}
}
//if len(nonExistingList) == 0{
// fmt.Print("\n\nImage already loaded at machine\n\n")
// return nil
//}
fmt.Print("\n\nDownloading torrent file for each missing layer in the image\n")
for i := 0; i < len(layerList); i++ {
wg.Add(1)
fmt.Print("\nlayer -> ", layerList[i])
go func(layerList []string,i int){
queryPrm := map[string]string{
"image": layerList[i],
}
queryPrmJson, _ := json.Marshal(queryPrm)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryPrmJson))
err1 := downloadFromUrl(url, filePath+layerList[i]+".tar.torrent")
if err1 != nil {
flag = true
fmt.Println("\n\nTorrent file missing at registry for layer--> ", layerList[i],". Will be downloading the entire image")
}
defer wg.Done()
}(layerList, i)
}
wg.Wait()
if flag == true{
fmt.Println("\n\nDownloading the torrent file for original image: ", safeImageName)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryParamJson))
err1 := downloadFromUrl(url, fileName)
if err1 != nil {
fmt.Println("Failed to pull image")
return err
}
fmt.Println("\nDownloading the entire image from registry: ", safeImageName)
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, fileName)
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
//TODO:Replace filename with that from metadata
tarballPath := filePath + tarballName
//Load the downloaded tarball
fmt.Println("\n\nLoading Image...")
importCmd := fmt.Sprintf("sudo docker load -i %s", tarballPath)
_, err2 := exec.Command("sh", "-c", importCmd).Output()
if err2 != nil {
fmt.Printf("Failed to load image into docker!")
return err2
}
fmt.Printf("\nSuccessfively pulled image: ", safeImageName,"\n\n")
return nil
} else{
flag = true
var wg1 sync.WaitGroup
fmt.Print("\n\nDownloading each image layer\n")
for i := 0; i < len(layerList); i++ {
wg1.Add(1)
go func(layerList []string, i int){
fmt.Print("\nDownloading layer-> ",layerList[i])
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, layerList[i]+".tar.torrent >/dev/null 2>&1")
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
fmt.Print("\nExtracting layer-> ",layerList[i])
cmdStr1 := "sudo tar -C "+locToImg + " -xvf "+ filePath+layerList[i]+".tar"
_ , errT := exec.Command("sh", "-c", cmdStr1, " >/dev/null 2>&1").Output()
if errT != nil {
flag = false
}
defer wg1.Done()
}(layerList, i)
}
wg1.Wait()
if flag == false{
fmt.Printf("\nERROR in downloading layers\n")
}
//Load the downloaded tarball
os.Chdir(filePath)
fmt.Println("\n\nLoading Image....\n")
importCmd := fmt.Sprintf("sudo tar -cC %s . | docker load", result[0])
_, err2 := exec.Command("sh", "-c", importCmd).Output()
if err2 != nil {
fmt.Printf("Failed to load image into docker!", err2)
}else {
os.Remove(loc)
fmt.Print("\n@@@@@ Successfively pulled image: ", safeImageName, " @@@@@")
}
}
fmt.Print("\n\n")
return nil
}
func applyImages() error {
imagesUrl := *host + ":" + *port + "/images/all"
//TODO:Get metadata GET /images?q={"image":}
response, err3 := http.Get(imagesUrl)
if err3 != nil {
fmt.Println("Failed to query images list endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get images list")
return errors.New("Failed to get images list...")
}
defer response.Body.Close()
imagesList, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get images list")
return errors.New("Failed to get images list")
}
fmt.Println(string(imagesList))
return nil
}
func main() {
kingpin.CommandLine.Help = "Docket Client"
switch kingpin.Parse() {
case "push":
kingpin.FatalIfError(applyPush(*pushImage), "Pushing of image failed")
case "pull":
kingpin.FatalIfError(applyPull((*pullImage)), "Pulling of image failed")
case "images":
kingpin.FatalIfError(applyImages(), "Listing of images failed")
}
} | "io"
"io/ioutil"
"log"
"mime/multipart"
"net/http" | random_line_split |
context.rs | // Copyright (C) 2018-2020 Sebastian Dröge <sebastian@centricular.com>
// Copyright (C) 2019-2022 François Laignel <fengalin@free.fr>
//
// Take a look at the license at the top of the repository in the LICENSE file.
use futures::prelude::*;
use gst::glib::once_cell::sync::Lazy;
use std::collections::HashMap;
use std::io;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
use std::time::Duration;
use super::{Handle, HandleWeak, JoinHandle, Scheduler, SubTaskOutput, TaskId};
use crate::runtime::RUNTIME_CAT;
// We are bound to using `sync` for the `runtime` `Mutex`es. Attempts to use `async` `Mutex`es
// lead to the following issues:
//
// * `CONTEXTS`: can't `spawn` a `Future` when called from a `Context` thread via `ffi`.
// * `timers`: can't automatically `remove` the timer from `BinaryHeap` because `async drop`
// is not available.
// * `task_queues`: can't `add` a pending task when called from a `Context` thread via `ffi`.
//
// Also, we want to be able to `acquire` a `Context` outside of an `async` context.
// These `Mutex`es must be `lock`ed for a short period.
static CONTEXTS: Lazy<Mutex<HashMap<Arc<str>, ContextWeak>>> =
Lazy::new(|| Mutex::new(HashMap::new()));
/// Blocks on `future` in one way or another if possible.
///
/// IO & time related `Future`s must be handled within their own [`Context`].
/// Wait for the result using a [`JoinHandle`] or a `channel`.
///
/// If there's currently an active `Context` with a task, then the future is only queued up as a
/// pending sub task for that task.
///
/// Otherwise the current thread is blocking and the passed in future is executed.
///
/// Note that you must not pass any futures here that wait for the currently active task in one way
/// or another as this would deadlock!
#[track_caller]
pub fn block_on_or_add_sub_task<Fut>(future: Fut) -> Option<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some((cur_context, cur_task_id)) = Context::current_task() {
gst::debug!(
RUNTIME_CAT,
"Adding subtask to task {:?} on context {}",
cur_task_id,
cur_context.name()
);
let _ = cur_context.add_sub_task(cur_task_id, async move {
future.await;
Ok(())
});
return None;
}
// Not running in a Context thread so we can block
Some(block_on(future))
}
/// Blocks on `future`.
///
/// IO & time related `Future`s must be handled within their own [`Context`].
/// Wait for the result using a [`JoinHandle`] or a `channel`.
///
/// The current thread is blocking and the passed in future is executed.
///
/// # Panics
///
/// This function panics if called within a [`Context`] thread.
#[track_caller]
pub fn block_on<Fut>(future: Fut) -> Fut::Output
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some(context) = Context::current() {
let msg = format!("Attempt to block within Context {}", context.name());
gst::error!(RUNTIME_CAT, "{}", msg);
panic!("{}", msg);
}
// Not running in a Context thread so we can block
gst::debug!(RUNTIME_CAT, "Blocking on new dummy context");
Scheduler::block_on(future)
}
/// Yields execution back to the runtime.
#[inline]
pub fn yield_now() -> YieldNow {
YieldNow::default()
}
#[derive(Debug, Default)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct YieldNow(bool);
impl Future for YieldNow {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
if !self.0 {
self.0 = true;
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(())
}
}
}
#[derive(Clone, Debug)]
pub struct ContextWeak(HandleWeak);
impl ContextWeak {
pub fn upgrade(&self) -> Option<Context> {
self.0.upgrade().map(Context)
}
}
/// A `threadshare` `runtime` `Context`.
///
/// The `Context` provides low-level asynchronous processing features to
/// multiplex task execution on a single thread.
///
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
/// provide high-level features.
///
/// [`PadSrc`]: ../pad/struct.PadSrc.html
/// [`PadSink`]: ../pad/struct.PadSink.html
#[derive(Clone, Debug)]
pub struct Context(Handle);
impl PartialEq for Context {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
impl Eq for Context {}
impl Context {
pub fn acquire(context_name: &str, wait: Duration) -> Result<Self, io::Error> {
assert_ne!(context_name, Scheduler::DUMMY_NAME);
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context_weak) = contexts.get(context_name) {
if let Some(context) = context_weak.upgrade() {
gst::debug!(RUNTIME_CAT, "Joining Context '{}'", context.name());
return Ok(context);
}
}
let context = Context(Scheduler::start(context_name, wait));
contexts.insert(context_name.into(), context.downgrade());
gst::debug!(
RUNTIME_CAT,
"New Context '{}' throttling {:?}",
context.name(),
wait,
);
Ok(context)
}
pub fn downgrade(&self) -> ContextWeak {
ContextWeak(self.0.downgrade())
}
pub fn name(&self) -> &str {
self.0.context_name()
}
// FIXME this could be renamed as max_throttling
// but then, all elements should also change their
// wait variables and properties to max_throttling.
pub fn wait_duration(&self) -> Duration {
self.0.max_throttling()
}
/// Total duration the scheduler spent parked.
///
/// This is only useful for performance evaluation.
#[cfg(feature = "tuning")]
pub fn parked_duration(&self) -> Duration {
self.0.parked_duration()
}
/// Returns `true` if a `Context` is running on current thread.
pub fn is_context_thread() -> bool {
Scheduler::is_scheduler_thread()
}
/// Returns the `Context` running on current thread, if any.
pub fn current() -> Option<Context> {
Scheduler::current().map(Context)
}
/// Returns the `TaskId` running on current thread, if any.
pub fn current_task() -> Option<(Context, TaskId)> {
Scheduler::current().map(|scheduler| {
// Context users always operate on a Task
(Context(scheduler), TaskId::current().unwrap())
})
} | /// of a [`Context`].
///
/// # Panic
///
/// This will block current thread and would panic if run
/// from the [`Context`].
#[track_caller]
pub fn enter<'a, F, O>(&'a self, f: F) -> O
where
F: FnOnce() -> O + Send + 'a,
O: Send + 'a,
{
if let Some(cur) = Context::current().as_ref() {
if cur == self {
panic!(
"Attempt to enter Context {} within itself, this would deadlock",
self.name()
);
} else {
gst::warning!(
RUNTIME_CAT,
"Entering Context {} within {}",
self.name(),
cur.name()
);
}
} else {
gst::debug!(RUNTIME_CAT, "Entering Context {}", self.name());
}
self.0.enter(f)
}
pub fn spawn<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn(future)
}
pub fn spawn_and_unpark<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn_and_unpark(future)
}
/// Forces the scheduler to unpark.
///
/// This is not needed by elements implementors as they are
/// supposed to call [`Self::spawn_and_unpark`] when needed.
/// However, it's useful for lower level implementations such as
/// `runtime::Task` so as to make sure the iteration loop yields
/// as soon as possible when a transition is requested.
pub(in crate::runtime) fn unpark(&self) {
self.0.unpark();
}
pub fn add_sub_task<T>(&self, task_id: TaskId, sub_task: T) -> Result<(), T>
where
T: Future<Output = SubTaskOutput> + Send + 'static,
{
self.0.add_sub_task(task_id, sub_task)
}
pub async fn drain_sub_tasks() -> SubTaskOutput {
let (ctx, task_id) = match Context::current_task() {
Some(task) => task,
None => return Ok(()),
};
ctx.0.drain_sub_tasks(task_id).await
}
}
impl From<Handle> for Context {
fn from(handle: Handle) -> Self {
Context(handle)
}
}
#[cfg(test)]
mod tests {
use futures::channel::mpsc;
use futures::lock::Mutex;
use futures::prelude::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::super::Scheduler;
use super::Context;
use crate::runtime::Async;
type Item = i32;
const SLEEP_DURATION_MS: u64 = 2;
const SLEEP_DURATION: Duration = Duration::from_millis(SLEEP_DURATION_MS);
const DELAY: Duration = Duration::from_millis(SLEEP_DURATION_MS * 10);
#[test]
fn block_on_task_id() {
gst::init().unwrap();
assert!(!Context::is_context_thread());
crate::runtime::executor::block_on(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), Scheduler::DUMMY_NAME);
assert_eq!(task_id, super::TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, super::TaskId(0));
Ok(())
});
assert!(res.is_ok());
assert!(Context::is_context_thread());
});
assert!(!Context::is_context_thread());
}
#[test]
fn block_on_timer() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let now = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
now.elapsed()
});
assert!(elapsed >= DELAY);
}
#[test]
fn context_task_id() {
use super::TaskId;
gst::init().unwrap();
let context = Context::acquire("context_task_id", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), "context_task_id");
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
// TaskId(0) is vacant again
let ctx_weak = context.downgrade();
let join_handle = context.spawn(async move {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
Ok(())
});
assert!(res.is_ok());
ctx_weak
.upgrade()
.unwrap()
.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
Ok(())
});
assert!(res.is_ok());
assert!(Context::drain_sub_tasks().await.is_ok());
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
})
.await
.unwrap();
assert!(Context::drain_sub_tasks().await.is_ok());
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
}
#[test]
fn drain_sub_tasks() {
// Setup
gst::init().unwrap();
let context = Context::acquire("drain_sub_tasks", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (sender, mut receiver) = mpsc::channel(1);
let sender: Arc<Mutex<mpsc::Sender<Item>>> = Arc::new(Mutex::new(sender));
let add_sub_task = move |item| {
let sender = sender.clone();
Context::current_task()
.ok_or(())
.and_then(|(ctx, task_id)| {
ctx.add_sub_task(task_id, async move {
sender
.lock()
.await
.send(item)
.await
.map_err(|_| gst::FlowError::Error)
})
.map_err(drop)
})
};
// Tests
// Drain empty queue
let drain_fut = Context::drain_sub_tasks();
drain_fut.await.unwrap();
// Add a subtask
add_sub_task(0).unwrap();
// Check that it was not executed yet
receiver.try_next().unwrap_err();
// Drain it now and check that it was executed
let drain_fut = Context::drain_sub_tasks();
drain_fut.await.unwrap();
assert_eq!(receiver.try_next().unwrap(), Some(0));
// Add another task and check that it's not executed yet
add_sub_task(1).unwrap();
receiver.try_next().unwrap_err();
// Return the receiver
receiver
});
let mut receiver = futures::executor::block_on(join_handle).unwrap();
// The last sub task should be simply dropped at this point
match receiver.try_next() {
Ok(None) | Err(_) => (),
other => panic!("Unexpected {other:?}"),
}
}
#[test]
fn block_on_from_sync() {
gst::init().unwrap();
let context = Context::acquire("block_on_from_sync", SLEEP_DURATION).unwrap();
let bytes_sent = crate::runtime::executor::block_on(context.spawn(async {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5001);
let socket = Async::<UdpSocket>::bind(saddr).unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4001);
socket.send_to(&[0; 10], saddr).await.unwrap()
}))
.unwrap();
assert_eq!(bytes_sent, 10);
let elapsed = crate::runtime::executor::block_on(context.spawn(async {
let start = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
start.elapsed()
}))
.unwrap();
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
#[test]
#[should_panic]
fn block_on_from_context() {
gst::init().unwrap();
let context = Context::acquire("block_on_from_context", SLEEP_DURATION).unwrap();
// Panic: attempt to `runtime::executor::block_on` within a `Context` thread
let join_handle = context.spawn(async {
crate::runtime::executor::block_on(crate::runtime::timer::delay_for(DELAY));
});
// Panic: task has failed
// (enforced by `async-task`, see comment in `Future` impl for `JoinHandle`).
futures::executor::block_on(join_handle).unwrap_err();
}
#[test]
fn enter_context_from_scheduler() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let context = Context::acquire("enter_context_from_executor", SLEEP_DURATION).unwrap();
let socket = context
.enter(|| {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5002);
Async::<UdpSocket>::bind(saddr)
})
.unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4002);
let bytes_sent = socket.send_to(&[0; 10], saddr).await.unwrap();
assert_eq!(bytes_sent, 10);
let (start, timer) =
context.enter(|| (Instant::now(), crate::runtime::timer::delay_for(DELAY)));
timer.await;
start.elapsed()
});
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
#[test]
fn enter_context_from_sync() {
gst::init().unwrap();
let context = Context::acquire("enter_context_from_sync", SLEEP_DURATION).unwrap();
let socket = context
.enter(|| {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5003);
Async::<UdpSocket>::bind(saddr)
})
.unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4003);
let bytes_sent = futures::executor::block_on(socket.send_to(&[0; 10], saddr)).unwrap();
assert_eq!(bytes_sent, 10);
let (start, timer) =
context.enter(|| (Instant::now(), crate::runtime::timer::delay_for(DELAY)));
let elapsed = crate::runtime::executor::block_on(async move {
timer.await;
start.elapsed()
});
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
} |
/// Executes the provided function relatively to this [`Context`].
///
/// Usefull to initialize i/o sources and timers from outside | random_line_split |
context.rs | // Copyright (C) 2018-2020 Sebastian Dröge <sebastian@centricular.com>
// Copyright (C) 2019-2022 François Laignel <fengalin@free.fr>
//
// Take a look at the license at the top of the repository in the LICENSE file.
use futures::prelude::*;
use gst::glib::once_cell::sync::Lazy;
use std::collections::HashMap;
use std::io;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
use std::time::Duration;
use super::{Handle, HandleWeak, JoinHandle, Scheduler, SubTaskOutput, TaskId};
use crate::runtime::RUNTIME_CAT;
// We are bound to using `sync` for the `runtime` `Mutex`es. Attempts to use `async` `Mutex`es
// lead to the following issues:
//
// * `CONTEXTS`: can't `spawn` a `Future` when called from a `Context` thread via `ffi`.
// * `timers`: can't automatically `remove` the timer from `BinaryHeap` because `async drop`
// is not available.
// * `task_queues`: can't `add` a pending task when called from a `Context` thread via `ffi`.
//
// Also, we want to be able to `acquire` a `Context` outside of an `async` context.
// These `Mutex`es must be `lock`ed for a short period.
static CONTEXTS: Lazy<Mutex<HashMap<Arc<str>, ContextWeak>>> =
Lazy::new(|| Mutex::new(HashMap::new()));
/// Blocks on `future` in one way or another if possible.
///
/// IO & time related `Future`s must be handled within their own [`Context`].
/// Wait for the result using a [`JoinHandle`] or a `channel`.
///
/// If there's currently an active `Context` with a task, then the future is only queued up as a
/// pending sub task for that task.
///
/// Otherwise the current thread is blocking and the passed in future is executed.
///
/// Note that you must not pass any futures here that wait for the currently active task in one way
/// or another as this would deadlock!
#[track_caller]
pub fn block_on_or_add_sub_task<Fut>(future: Fut) -> Option<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some((cur_context, cur_task_id)) = Context::current_task() {
gst::debug!(
RUNTIME_CAT,
"Adding subtask to task {:?} on context {}",
cur_task_id,
cur_context.name()
);
let _ = cur_context.add_sub_task(cur_task_id, async move {
future.await;
Ok(())
});
return None;
}
// Not running in a Context thread so we can block
Some(block_on(future))
}
/// Blocks on `future`.
///
/// IO & time related `Future`s must be handled within their own [`Context`].
/// Wait for the result using a [`JoinHandle`] or a `channel`.
///
/// The current thread is blocking and the passed in future is executed.
///
/// # Panics
///
/// This function panics if called within a [`Context`] thread.
#[track_caller]
pub fn block_on<Fut>(future: Fut) -> Fut::Output
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some(context) = Context::current() {
let msg = format!("Attempt to block within Context {}", context.name());
gst::error!(RUNTIME_CAT, "{}", msg);
panic!("{}", msg);
}
// Not running in a Context thread so we can block
gst::debug!(RUNTIME_CAT, "Blocking on new dummy context");
Scheduler::block_on(future)
}
/// Yields execution back to the runtime.
#[inline]
pub fn yield_now() -> YieldNow {
YieldNow::default()
}
#[derive(Debug, Default)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct YieldNow(bool);
impl Future for YieldNow {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
if !self.0 {
self.0 = true;
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(())
}
}
}
#[derive(Clone, Debug)]
pub struct ContextWeak(HandleWeak);
impl ContextWeak {
pub fn upgrade(&self) -> Option<Context> {
self.0.upgrade().map(Context)
}
}
/// A `threadshare` `runtime` `Context`.
///
/// The `Context` provides low-level asynchronous processing features to
/// multiplex task execution on a single thread.
///
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
/// provide high-level features.
///
/// [`PadSrc`]: ../pad/struct.PadSrc.html
/// [`PadSink`]: ../pad/struct.PadSink.html
#[derive(Clone, Debug)]
pub struct Context(Handle);
impl PartialEq for Context {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
impl Eq for Context {}
impl Context {
pub fn acquire(context_name: &str, wait: Duration) -> Result<Self, io::Error> {
assert_ne!(context_name, Scheduler::DUMMY_NAME);
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context_weak) = contexts.get(context_name) {
if let Some(context) = context_weak.upgrade() {
gst::debug!(RUNTIME_CAT, "Joining Context '{}'", context.name());
return Ok(context);
}
}
let context = Context(Scheduler::start(context_name, wait));
contexts.insert(context_name.into(), context.downgrade());
gst::debug!(
RUNTIME_CAT,
"New Context '{}' throttling {:?}",
context.name(),
wait,
);
Ok(context)
}
pub fn downgrade(&self) -> ContextWeak {
ContextWeak(self.0.downgrade())
}
pub fn name(&self) -> &str {
self.0.context_name()
}
// FIXME this could be renamed as max_throttling
// but then, all elements should also change their
// wait variables and properties to max_throttling.
pub fn wait_duration(&self) -> Duration {
self.0.max_throttling()
}
/// Total duration the scheduler spent parked.
///
/// This is only useful for performance evaluation.
#[cfg(feature = "tuning")]
pub fn parked_duration(&self) -> Duration {
self.0.parked_duration()
}
/// Returns `true` if a `Context` is running on current thread.
pub fn is_context_thread() -> bool {
Scheduler::is_scheduler_thread()
}
/// Returns the `Context` running on current thread, if any.
pub fn current() -> Option<Context> {
Scheduler::current().map(Context)
}
/// Returns the `TaskId` running on current thread, if any.
pub fn current_task() -> Option<(Context, TaskId)> {
Scheduler::current().map(|scheduler| {
// Context users always operate on a Task
(Context(scheduler), TaskId::current().unwrap())
})
}
/// Executes the provided function relatively to this [`Context`].
///
/// Usefull to initialize i/o sources and timers from outside
/// of a [`Context`].
///
/// # Panic
///
/// This will block current thread and would panic if run
/// from the [`Context`].
#[track_caller]
pub fn enter<'a, F, O>(&'a self, f: F) -> O
where
F: FnOnce() -> O + Send + 'a,
O: Send + 'a,
{
if let Some(cur) = Context::current().as_ref() {
if cur == self {
panic!(
"Attempt to enter Context {} within itself, this would deadlock",
self.name()
);
} else {
| } else {
gst::debug!(RUNTIME_CAT, "Entering Context {}", self.name());
}
self.0.enter(f)
}
pub fn spawn<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn(future)
}
pub fn spawn_and_unpark<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn_and_unpark(future)
}
/// Forces the scheduler to unpark.
///
/// This is not needed by elements implementors as they are
/// supposed to call [`Self::spawn_and_unpark`] when needed.
/// However, it's useful for lower level implementations such as
/// `runtime::Task` so as to make sure the iteration loop yields
/// as soon as possible when a transition is requested.
pub(in crate::runtime) fn unpark(&self) {
self.0.unpark();
}
pub fn add_sub_task<T>(&self, task_id: TaskId, sub_task: T) -> Result<(), T>
where
T: Future<Output = SubTaskOutput> + Send + 'static,
{
self.0.add_sub_task(task_id, sub_task)
}
pub async fn drain_sub_tasks() -> SubTaskOutput {
let (ctx, task_id) = match Context::current_task() {
Some(task) => task,
None => return Ok(()),
};
ctx.0.drain_sub_tasks(task_id).await
}
}
impl From<Handle> for Context {
fn from(handle: Handle) -> Self {
Context(handle)
}
}
#[cfg(test)]
mod tests {
use futures::channel::mpsc;
use futures::lock::Mutex;
use futures::prelude::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::super::Scheduler;
use super::Context;
use crate::runtime::Async;
type Item = i32;
const SLEEP_DURATION_MS: u64 = 2;
const SLEEP_DURATION: Duration = Duration::from_millis(SLEEP_DURATION_MS);
const DELAY: Duration = Duration::from_millis(SLEEP_DURATION_MS * 10);
#[test]
fn block_on_task_id() {
gst::init().unwrap();
assert!(!Context::is_context_thread());
crate::runtime::executor::block_on(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), Scheduler::DUMMY_NAME);
assert_eq!(task_id, super::TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, super::TaskId(0));
Ok(())
});
assert!(res.is_ok());
assert!(Context::is_context_thread());
});
assert!(!Context::is_context_thread());
}
#[test]
fn block_on_timer() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let now = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
now.elapsed()
});
assert!(elapsed >= DELAY);
}
#[test]
fn context_task_id() {
use super::TaskId;
gst::init().unwrap();
let context = Context::acquire("context_task_id", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), "context_task_id");
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
// TaskId(0) is vacant again
let ctx_weak = context.downgrade();
let join_handle = context.spawn(async move {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
Ok(())
});
assert!(res.is_ok());
ctx_weak
.upgrade()
.unwrap()
.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
Ok(())
});
assert!(res.is_ok());
assert!(Context::drain_sub_tasks().await.is_ok());
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
})
.await
.unwrap();
assert!(Context::drain_sub_tasks().await.is_ok());
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
}
#[test]
fn drain_sub_tasks() {
// Setup
gst::init().unwrap();
let context = Context::acquire("drain_sub_tasks", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (sender, mut receiver) = mpsc::channel(1);
let sender: Arc<Mutex<mpsc::Sender<Item>>> = Arc::new(Mutex::new(sender));
let add_sub_task = move |item| {
let sender = sender.clone();
Context::current_task()
.ok_or(())
.and_then(|(ctx, task_id)| {
ctx.add_sub_task(task_id, async move {
sender
.lock()
.await
.send(item)
.await
.map_err(|_| gst::FlowError::Error)
})
.map_err(drop)
})
};
// Tests
// Drain empty queue
let drain_fut = Context::drain_sub_tasks();
drain_fut.await.unwrap();
// Add a subtask
add_sub_task(0).unwrap();
// Check that it was not executed yet
receiver.try_next().unwrap_err();
// Drain it now and check that it was executed
let drain_fut = Context::drain_sub_tasks();
drain_fut.await.unwrap();
assert_eq!(receiver.try_next().unwrap(), Some(0));
// Add another task and check that it's not executed yet
add_sub_task(1).unwrap();
receiver.try_next().unwrap_err();
// Return the receiver
receiver
});
let mut receiver = futures::executor::block_on(join_handle).unwrap();
// The last sub task should be simply dropped at this point
match receiver.try_next() {
Ok(None) | Err(_) => (),
other => panic!("Unexpected {other:?}"),
}
}
#[test]
fn block_on_from_sync() {
gst::init().unwrap();
let context = Context::acquire("block_on_from_sync", SLEEP_DURATION).unwrap();
let bytes_sent = crate::runtime::executor::block_on(context.spawn(async {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5001);
let socket = Async::<UdpSocket>::bind(saddr).unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4001);
socket.send_to(&[0; 10], saddr).await.unwrap()
}))
.unwrap();
assert_eq!(bytes_sent, 10);
let elapsed = crate::runtime::executor::block_on(context.spawn(async {
let start = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
start.elapsed()
}))
.unwrap();
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
#[test]
#[should_panic]
fn block_on_from_context() {
gst::init().unwrap();
let context = Context::acquire("block_on_from_context", SLEEP_DURATION).unwrap();
// Panic: attempt to `runtime::executor::block_on` within a `Context` thread
let join_handle = context.spawn(async {
crate::runtime::executor::block_on(crate::runtime::timer::delay_for(DELAY));
});
// Panic: task has failed
// (enforced by `async-task`, see comment in `Future` impl for `JoinHandle`).
futures::executor::block_on(join_handle).unwrap_err();
}
#[test]
fn enter_context_from_scheduler() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let context = Context::acquire("enter_context_from_executor", SLEEP_DURATION).unwrap();
let socket = context
.enter(|| {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5002);
Async::<UdpSocket>::bind(saddr)
})
.unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4002);
let bytes_sent = socket.send_to(&[0; 10], saddr).await.unwrap();
assert_eq!(bytes_sent, 10);
let (start, timer) =
context.enter(|| (Instant::now(), crate::runtime::timer::delay_for(DELAY)));
timer.await;
start.elapsed()
});
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
#[test]
fn enter_context_from_sync() {
gst::init().unwrap();
let context = Context::acquire("enter_context_from_sync", SLEEP_DURATION).unwrap();
let socket = context
.enter(|| {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5003);
Async::<UdpSocket>::bind(saddr)
})
.unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4003);
let bytes_sent = futures::executor::block_on(socket.send_to(&[0; 10], saddr)).unwrap();
assert_eq!(bytes_sent, 10);
let (start, timer) =
context.enter(|| (Instant::now(), crate::runtime::timer::delay_for(DELAY)));
let elapsed = crate::runtime::executor::block_on(async move {
timer.await;
start.elapsed()
});
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
}
| gst::warning!(
RUNTIME_CAT,
"Entering Context {} within {}",
self.name(),
cur.name()
);
}
| conditional_block |
context.rs | // Copyright (C) 2018-2020 Sebastian Dröge <sebastian@centricular.com>
// Copyright (C) 2019-2022 François Laignel <fengalin@free.fr>
//
// Take a look at the license at the top of the repository in the LICENSE file.
use futures::prelude::*;
use gst::glib::once_cell::sync::Lazy;
use std::collections::HashMap;
use std::io;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
use std::time::Duration;
use super::{Handle, HandleWeak, JoinHandle, Scheduler, SubTaskOutput, TaskId};
use crate::runtime::RUNTIME_CAT;
// We are bound to using `sync` for the `runtime` `Mutex`es. Attempts to use `async` `Mutex`es
// lead to the following issues:
//
// * `CONTEXTS`: can't `spawn` a `Future` when called from a `Context` thread via `ffi`.
// * `timers`: can't automatically `remove` the timer from `BinaryHeap` because `async drop`
// is not available.
// * `task_queues`: can't `add` a pending task when called from a `Context` thread via `ffi`.
//
// Also, we want to be able to `acquire` a `Context` outside of an `async` context.
// These `Mutex`es must be `lock`ed for a short period.
static CONTEXTS: Lazy<Mutex<HashMap<Arc<str>, ContextWeak>>> =
Lazy::new(|| Mutex::new(HashMap::new()));
/// Blocks on `future` in one way or another if possible.
///
/// IO & time related `Future`s must be handled within their own [`Context`].
/// Wait for the result using a [`JoinHandle`] or a `channel`.
///
/// If there's currently an active `Context` with a task, then the future is only queued up as a
/// pending sub task for that task.
///
/// Otherwise the current thread is blocking and the passed in future is executed.
///
/// Note that you must not pass any futures here that wait for the currently active task in one way
/// or another as this would deadlock!
#[track_caller]
pub fn block_on_or_add_sub_task<Fut>(future: Fut) -> Option<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some((cur_context, cur_task_id)) = Context::current_task() {
gst::debug!(
RUNTIME_CAT,
"Adding subtask to task {:?} on context {}",
cur_task_id,
cur_context.name()
);
let _ = cur_context.add_sub_task(cur_task_id, async move {
future.await;
Ok(())
});
return None;
}
// Not running in a Context thread so we can block
Some(block_on(future))
}
/// Blocks on `future`.
///
/// IO & time related `Future`s must be handled within their own [`Context`].
/// Wait for the result using a [`JoinHandle`] or a `channel`.
///
/// The current thread is blocking and the passed in future is executed.
///
/// # Panics
///
/// This function panics if called within a [`Context`] thread.
#[track_caller]
pub fn block_on<Fut>(future: Fut) -> Fut::Output
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some(context) = Context::current() {
let msg = format!("Attempt to block within Context {}", context.name());
gst::error!(RUNTIME_CAT, "{}", msg);
panic!("{}", msg);
}
// Not running in a Context thread so we can block
gst::debug!(RUNTIME_CAT, "Blocking on new dummy context");
Scheduler::block_on(future)
}
/// Yields execution back to the runtime.
#[inline]
pub fn yield_now() -> YieldNow {
YieldNow::default()
}
#[derive(Debug, Default)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct YieldNow(bool);
impl Future for YieldNow {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
if !self.0 {
self.0 = true;
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(())
}
}
}
#[derive(Clone, Debug)]
pub struct ContextWeak(HandleWeak);
impl ContextWeak {
pub fn upgrade(&self) -> Option<Context> {
self.0.upgrade().map(Context)
}
}
/// A `threadshare` `runtime` `Context`.
///
/// The `Context` provides low-level asynchronous processing features to
/// multiplex task execution on a single thread.
///
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
/// provide high-level features.
///
/// [`PadSrc`]: ../pad/struct.PadSrc.html
/// [`PadSink`]: ../pad/struct.PadSink.html
#[derive(Clone, Debug)]
pub struct Context(Handle);
impl PartialEq for Context {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
impl Eq for Context {}
impl Context {
pub fn acquire(context_name: &str, wait: Duration) -> Result<Self, io::Error> {
assert_ne!(context_name, Scheduler::DUMMY_NAME);
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context_weak) = contexts.get(context_name) {
if let Some(context) = context_weak.upgrade() {
gst::debug!(RUNTIME_CAT, "Joining Context '{}'", context.name());
return Ok(context);
}
}
let context = Context(Scheduler::start(context_name, wait));
contexts.insert(context_name.into(), context.downgrade());
gst::debug!(
RUNTIME_CAT,
"New Context '{}' throttling {:?}",
context.name(),
wait,
);
Ok(context)
}
pub fn downgrade(&self) -> ContextWeak {
ContextWeak(self.0.downgrade())
}
pub fn name(&self) -> &str {
self.0.context_name()
}
// FIXME this could be renamed as max_throttling
// but then, all elements should also change their
// wait variables and properties to max_throttling.
pub fn wait_duration(&self) -> Duration {
self.0.max_throttling()
}
/// Total duration the scheduler spent parked.
///
/// This is only useful for performance evaluation.
#[cfg(feature = "tuning")]
pub fn parked_duration(&self) -> Duration {
self.0.parked_duration()
}
/// Returns `true` if a `Context` is running on current thread.
pub fn is_context_thread() -> bool {
Scheduler::is_scheduler_thread()
}
/// Returns the `Context` running on current thread, if any.
pub fn current() -> Option<Context> {
Scheduler::current().map(Context)
}
/// Returns the `TaskId` running on current thread, if any.
pub fn current_task() -> Option<(Context, TaskId)> {
Scheduler::current().map(|scheduler| {
// Context users always operate on a Task
(Context(scheduler), TaskId::current().unwrap())
})
}
/// Executes the provided function relatively to this [`Context`].
///
/// Usefull to initialize i/o sources and timers from outside
/// of a [`Context`].
///
/// # Panic
///
/// This will block current thread and would panic if run
/// from the [`Context`].
#[track_caller]
pub fn enter<'a, F, O>(&'a self, f: F) -> O
where
F: FnOnce() -> O + Send + 'a,
O: Send + 'a,
{
| pub fn spawn<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn(future)
}
pub fn spawn_and_unpark<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn_and_unpark(future)
}
/// Forces the scheduler to unpark.
///
/// This is not needed by elements implementors as they are
/// supposed to call [`Self::spawn_and_unpark`] when needed.
/// However, it's useful for lower level implementations such as
/// `runtime::Task` so as to make sure the iteration loop yields
/// as soon as possible when a transition is requested.
pub(in crate::runtime) fn unpark(&self) {
self.0.unpark();
}
pub fn add_sub_task<T>(&self, task_id: TaskId, sub_task: T) -> Result<(), T>
where
T: Future<Output = SubTaskOutput> + Send + 'static,
{
self.0.add_sub_task(task_id, sub_task)
}
pub async fn drain_sub_tasks() -> SubTaskOutput {
let (ctx, task_id) = match Context::current_task() {
Some(task) => task,
None => return Ok(()),
};
ctx.0.drain_sub_tasks(task_id).await
}
}
impl From<Handle> for Context {
fn from(handle: Handle) -> Self {
Context(handle)
}
}
#[cfg(test)]
mod tests {
use futures::channel::mpsc;
use futures::lock::Mutex;
use futures::prelude::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::super::Scheduler;
use super::Context;
use crate::runtime::Async;
type Item = i32;
const SLEEP_DURATION_MS: u64 = 2;
const SLEEP_DURATION: Duration = Duration::from_millis(SLEEP_DURATION_MS);
const DELAY: Duration = Duration::from_millis(SLEEP_DURATION_MS * 10);
#[test]
fn block_on_task_id() {
gst::init().unwrap();
assert!(!Context::is_context_thread());
crate::runtime::executor::block_on(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), Scheduler::DUMMY_NAME);
assert_eq!(task_id, super::TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, super::TaskId(0));
Ok(())
});
assert!(res.is_ok());
assert!(Context::is_context_thread());
});
assert!(!Context::is_context_thread());
}
#[test]
fn block_on_timer() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let now = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
now.elapsed()
});
assert!(elapsed >= DELAY);
}
#[test]
fn context_task_id() {
use super::TaskId;
gst::init().unwrap();
let context = Context::acquire("context_task_id", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), "context_task_id");
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
// TaskId(0) is vacant again
let ctx_weak = context.downgrade();
let join_handle = context.spawn(async move {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
Ok(())
});
assert!(res.is_ok());
ctx_weak
.upgrade()
.unwrap()
.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
Ok(())
});
assert!(res.is_ok());
assert!(Context::drain_sub_tasks().await.is_ok());
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
})
.await
.unwrap();
assert!(Context::drain_sub_tasks().await.is_ok());
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
}
#[test]
fn drain_sub_tasks() {
// Setup
gst::init().unwrap();
let context = Context::acquire("drain_sub_tasks", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (sender, mut receiver) = mpsc::channel(1);
let sender: Arc<Mutex<mpsc::Sender<Item>>> = Arc::new(Mutex::new(sender));
let add_sub_task = move |item| {
let sender = sender.clone();
Context::current_task()
.ok_or(())
.and_then(|(ctx, task_id)| {
ctx.add_sub_task(task_id, async move {
sender
.lock()
.await
.send(item)
.await
.map_err(|_| gst::FlowError::Error)
})
.map_err(drop)
})
};
// Tests
// Drain empty queue
let drain_fut = Context::drain_sub_tasks();
drain_fut.await.unwrap();
// Add a subtask
add_sub_task(0).unwrap();
// Check that it was not executed yet
receiver.try_next().unwrap_err();
// Drain it now and check that it was executed
let drain_fut = Context::drain_sub_tasks();
drain_fut.await.unwrap();
assert_eq!(receiver.try_next().unwrap(), Some(0));
// Add another task and check that it's not executed yet
add_sub_task(1).unwrap();
receiver.try_next().unwrap_err();
// Return the receiver
receiver
});
let mut receiver = futures::executor::block_on(join_handle).unwrap();
// The last sub task should be simply dropped at this point
match receiver.try_next() {
Ok(None) | Err(_) => (),
other => panic!("Unexpected {other:?}"),
}
}
#[test]
fn block_on_from_sync() {
gst::init().unwrap();
let context = Context::acquire("block_on_from_sync", SLEEP_DURATION).unwrap();
let bytes_sent = crate::runtime::executor::block_on(context.spawn(async {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5001);
let socket = Async::<UdpSocket>::bind(saddr).unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4001);
socket.send_to(&[0; 10], saddr).await.unwrap()
}))
.unwrap();
assert_eq!(bytes_sent, 10);
let elapsed = crate::runtime::executor::block_on(context.spawn(async {
let start = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
start.elapsed()
}))
.unwrap();
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
#[test]
#[should_panic]
fn block_on_from_context() {
gst::init().unwrap();
let context = Context::acquire("block_on_from_context", SLEEP_DURATION).unwrap();
// Panic: attempt to `runtime::executor::block_on` within a `Context` thread
let join_handle = context.spawn(async {
crate::runtime::executor::block_on(crate::runtime::timer::delay_for(DELAY));
});
// Panic: task has failed
// (enforced by `async-task`, see comment in `Future` impl for `JoinHandle`).
futures::executor::block_on(join_handle).unwrap_err();
}
#[test]
fn enter_context_from_scheduler() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let context = Context::acquire("enter_context_from_executor", SLEEP_DURATION).unwrap();
let socket = context
.enter(|| {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5002);
Async::<UdpSocket>::bind(saddr)
})
.unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4002);
let bytes_sent = socket.send_to(&[0; 10], saddr).await.unwrap();
assert_eq!(bytes_sent, 10);
let (start, timer) =
context.enter(|| (Instant::now(), crate::runtime::timer::delay_for(DELAY)));
timer.await;
start.elapsed()
});
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
#[test]
fn enter_context_from_sync() {
gst::init().unwrap();
let context = Context::acquire("enter_context_from_sync", SLEEP_DURATION).unwrap();
let socket = context
.enter(|| {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5003);
Async::<UdpSocket>::bind(saddr)
})
.unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4003);
let bytes_sent = futures::executor::block_on(socket.send_to(&[0; 10], saddr)).unwrap();
assert_eq!(bytes_sent, 10);
let (start, timer) =
context.enter(|| (Instant::now(), crate::runtime::timer::delay_for(DELAY)));
let elapsed = crate::runtime::executor::block_on(async move {
timer.await;
start.elapsed()
});
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
}
| if let Some(cur) = Context::current().as_ref() {
if cur == self {
panic!(
"Attempt to enter Context {} within itself, this would deadlock",
self.name()
);
} else {
gst::warning!(
RUNTIME_CAT,
"Entering Context {} within {}",
self.name(),
cur.name()
);
}
} else {
gst::debug!(RUNTIME_CAT, "Entering Context {}", self.name());
}
self.0.enter(f)
}
| identifier_body |
context.rs | // Copyright (C) 2018-2020 Sebastian Dröge <sebastian@centricular.com>
// Copyright (C) 2019-2022 François Laignel <fengalin@free.fr>
//
// Take a look at the license at the top of the repository in the LICENSE file.
use futures::prelude::*;
use gst::glib::once_cell::sync::Lazy;
use std::collections::HashMap;
use std::io;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
use std::time::Duration;
use super::{Handle, HandleWeak, JoinHandle, Scheduler, SubTaskOutput, TaskId};
use crate::runtime::RUNTIME_CAT;
// We are bound to using `sync` for the `runtime` `Mutex`es. Attempts to use `async` `Mutex`es
// lead to the following issues:
//
// * `CONTEXTS`: can't `spawn` a `Future` when called from a `Context` thread via `ffi`.
// * `timers`: can't automatically `remove` the timer from `BinaryHeap` because `async drop`
// is not available.
// * `task_queues`: can't `add` a pending task when called from a `Context` thread via `ffi`.
//
// Also, we want to be able to `acquire` a `Context` outside of an `async` context.
// These `Mutex`es must be `lock`ed for a short period.
static CONTEXTS: Lazy<Mutex<HashMap<Arc<str>, ContextWeak>>> =
Lazy::new(|| Mutex::new(HashMap::new()));
/// Blocks on `future` in one way or another if possible.
///
/// IO & time related `Future`s must be handled within their own [`Context`].
/// Wait for the result using a [`JoinHandle`] or a `channel`.
///
/// If there's currently an active `Context` with a task, then the future is only queued up as a
/// pending sub task for that task.
///
/// Otherwise the current thread is blocking and the passed in future is executed.
///
/// Note that you must not pass any futures here that wait for the currently active task in one way
/// or another as this would deadlock!
#[track_caller]
pub fn block_on_or_add_sub_task<Fut>(future: Fut) -> Option<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some((cur_context, cur_task_id)) = Context::current_task() {
gst::debug!(
RUNTIME_CAT,
"Adding subtask to task {:?} on context {}",
cur_task_id,
cur_context.name()
);
let _ = cur_context.add_sub_task(cur_task_id, async move {
future.await;
Ok(())
});
return None;
}
// Not running in a Context thread so we can block
Some(block_on(future))
}
/// Blocks on `future`.
///
/// IO & time related `Future`s must be handled within their own [`Context`].
/// Wait for the result using a [`JoinHandle`] or a `channel`.
///
/// The current thread is blocking and the passed in future is executed.
///
/// # Panics
///
/// This function panics if called within a [`Context`] thread.
#[track_caller]
pub fn block_on<Fut>(future: Fut) -> Fut::Output
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some(context) = Context::current() {
let msg = format!("Attempt to block within Context {}", context.name());
gst::error!(RUNTIME_CAT, "{}", msg);
panic!("{}", msg);
}
// Not running in a Context thread so we can block
gst::debug!(RUNTIME_CAT, "Blocking on new dummy context");
Scheduler::block_on(future)
}
/// Yields execution back to the runtime.
#[inline]
pub fn yield_now() -> YieldNow {
YieldNow::default()
}
#[derive(Debug, Default)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct YieldNow(bool);
impl Future for YieldNow {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
if !self.0 {
self.0 = true;
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(())
}
}
}
#[derive(Clone, Debug)]
pub struct ContextWeak(HandleWeak);
impl ContextWeak {
pub fn upgrade(&self) -> Option<Context> {
self.0.upgrade().map(Context)
}
}
/// A `threadshare` `runtime` `Context`.
///
/// The `Context` provides low-level asynchronous processing features to
/// multiplex task execution on a single thread.
///
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
/// provide high-level features.
///
/// [`PadSrc`]: ../pad/struct.PadSrc.html
/// [`PadSink`]: ../pad/struct.PadSink.html
#[derive(Clone, Debug)]
pub struct Context(Handle);
impl PartialEq for Context {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
impl Eq for Context {}
impl Context {
pub fn acquire(context_name: &str, wait: Duration) -> Result<Self, io::Error> {
assert_ne!(context_name, Scheduler::DUMMY_NAME);
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context_weak) = contexts.get(context_name) {
if let Some(context) = context_weak.upgrade() {
gst::debug!(RUNTIME_CAT, "Joining Context '{}'", context.name());
return Ok(context);
}
}
let context = Context(Scheduler::start(context_name, wait));
contexts.insert(context_name.into(), context.downgrade());
gst::debug!(
RUNTIME_CAT,
"New Context '{}' throttling {:?}",
context.name(),
wait,
);
Ok(context)
}
pub fn downgrade(&self) -> ContextWeak {
ContextWeak(self.0.downgrade())
}
pub fn name(&self) -> &str {
self.0.context_name()
}
// FIXME this could be renamed as max_throttling
// but then, all elements should also change their
// wait variables and properties to max_throttling.
pub fn wait_duration(&self) -> Duration {
self.0.max_throttling()
}
/// Total duration the scheduler spent parked.
///
/// This is only useful for performance evaluation.
#[cfg(feature = "tuning")]
pub fn parked_duration(&self) -> Duration {
self.0.parked_duration()
}
/// Returns `true` if a `Context` is running on current thread.
pub fn is_context_thread() -> bool {
Scheduler::is_scheduler_thread()
}
/// Returns the `Context` running on current thread, if any.
pub fn current() -> Option<Context> {
Scheduler::current().map(Context)
}
/// Returns the `TaskId` running on current thread, if any.
pub fn current_task() -> Option<(Context, TaskId)> {
Scheduler::current().map(|scheduler| {
// Context users always operate on a Task
(Context(scheduler), TaskId::current().unwrap())
})
}
/// Executes the provided function relatively to this [`Context`].
///
/// Usefull to initialize i/o sources and timers from outside
/// of a [`Context`].
///
/// # Panic
///
/// This will block current thread and would panic if run
/// from the [`Context`].
#[track_caller]
pub fn enter<'a, F, O>(&'a self, f: F) -> O
where
F: FnOnce() -> O + Send + 'a,
O: Send + 'a,
{
if let Some(cur) = Context::current().as_ref() {
if cur == self {
panic!(
"Attempt to enter Context {} within itself, this would deadlock",
self.name()
);
} else {
gst::warning!(
RUNTIME_CAT,
"Entering Context {} within {}",
self.name(),
cur.name()
);
}
} else {
gst::debug!(RUNTIME_CAT, "Entering Context {}", self.name());
}
self.0.enter(f)
}
pub fn spawn<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn(future)
}
pub fn spawn_and_unpark<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn_and_unpark(future)
}
/// Forces the scheduler to unpark.
///
/// This is not needed by elements implementors as they are
/// supposed to call [`Self::spawn_and_unpark`] when needed.
/// However, it's useful for lower level implementations such as
/// `runtime::Task` so as to make sure the iteration loop yields
/// as soon as possible when a transition is requested.
pub(in crate::runtime) fn unpark(&self) {
self.0.unpark();
}
pub fn add_sub_task<T>(&self, task_id: TaskId, sub_task: T) -> Result<(), T>
where
T: Future<Output = SubTaskOutput> + Send + 'static,
{
self.0.add_sub_task(task_id, sub_task)
}
pub async fn drain_sub_tasks() -> SubTaskOutput {
let (ctx, task_id) = match Context::current_task() {
Some(task) => task,
None => return Ok(()),
};
ctx.0.drain_sub_tasks(task_id).await
}
}
impl From<Handle> for Context {
fn fr | andle: Handle) -> Self {
Context(handle)
}
}
#[cfg(test)]
mod tests {
use futures::channel::mpsc;
use futures::lock::Mutex;
use futures::prelude::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::super::Scheduler;
use super::Context;
use crate::runtime::Async;
type Item = i32;
const SLEEP_DURATION_MS: u64 = 2;
const SLEEP_DURATION: Duration = Duration::from_millis(SLEEP_DURATION_MS);
const DELAY: Duration = Duration::from_millis(SLEEP_DURATION_MS * 10);
#[test]
fn block_on_task_id() {
gst::init().unwrap();
assert!(!Context::is_context_thread());
crate::runtime::executor::block_on(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), Scheduler::DUMMY_NAME);
assert_eq!(task_id, super::TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, super::TaskId(0));
Ok(())
});
assert!(res.is_ok());
assert!(Context::is_context_thread());
});
assert!(!Context::is_context_thread());
}
#[test]
fn block_on_timer() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let now = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
now.elapsed()
});
assert!(elapsed >= DELAY);
}
#[test]
fn context_task_id() {
use super::TaskId;
gst::init().unwrap();
let context = Context::acquire("context_task_id", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), "context_task_id");
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
// TaskId(0) is vacant again
let ctx_weak = context.downgrade();
let join_handle = context.spawn(async move {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
Ok(())
});
assert!(res.is_ok());
ctx_weak
.upgrade()
.unwrap()
.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
Ok(())
});
assert!(res.is_ok());
assert!(Context::drain_sub_tasks().await.is_ok());
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(1));
})
.await
.unwrap();
assert!(Context::drain_sub_tasks().await.is_ok());
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
}
#[test]
fn drain_sub_tasks() {
// Setup
gst::init().unwrap();
let context = Context::acquire("drain_sub_tasks", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (sender, mut receiver) = mpsc::channel(1);
let sender: Arc<Mutex<mpsc::Sender<Item>>> = Arc::new(Mutex::new(sender));
let add_sub_task = move |item| {
let sender = sender.clone();
Context::current_task()
.ok_or(())
.and_then(|(ctx, task_id)| {
ctx.add_sub_task(task_id, async move {
sender
.lock()
.await
.send(item)
.await
.map_err(|_| gst::FlowError::Error)
})
.map_err(drop)
})
};
// Tests
// Drain empty queue
let drain_fut = Context::drain_sub_tasks();
drain_fut.await.unwrap();
// Add a subtask
add_sub_task(0).unwrap();
// Check that it was not executed yet
receiver.try_next().unwrap_err();
// Drain it now and check that it was executed
let drain_fut = Context::drain_sub_tasks();
drain_fut.await.unwrap();
assert_eq!(receiver.try_next().unwrap(), Some(0));
// Add another task and check that it's not executed yet
add_sub_task(1).unwrap();
receiver.try_next().unwrap_err();
// Return the receiver
receiver
});
let mut receiver = futures::executor::block_on(join_handle).unwrap();
// The last sub task should be simply dropped at this point
match receiver.try_next() {
Ok(None) | Err(_) => (),
other => panic!("Unexpected {other:?}"),
}
}
#[test]
fn block_on_from_sync() {
gst::init().unwrap();
let context = Context::acquire("block_on_from_sync", SLEEP_DURATION).unwrap();
let bytes_sent = crate::runtime::executor::block_on(context.spawn(async {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5001);
let socket = Async::<UdpSocket>::bind(saddr).unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4001);
socket.send_to(&[0; 10], saddr).await.unwrap()
}))
.unwrap();
assert_eq!(bytes_sent, 10);
let elapsed = crate::runtime::executor::block_on(context.spawn(async {
let start = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
start.elapsed()
}))
.unwrap();
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
#[test]
#[should_panic]
fn block_on_from_context() {
gst::init().unwrap();
let context = Context::acquire("block_on_from_context", SLEEP_DURATION).unwrap();
// Panic: attempt to `runtime::executor::block_on` within a `Context` thread
let join_handle = context.spawn(async {
crate::runtime::executor::block_on(crate::runtime::timer::delay_for(DELAY));
});
// Panic: task has failed
// (enforced by `async-task`, see comment in `Future` impl for `JoinHandle`).
futures::executor::block_on(join_handle).unwrap_err();
}
#[test]
fn enter_context_from_scheduler() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let context = Context::acquire("enter_context_from_executor", SLEEP_DURATION).unwrap();
let socket = context
.enter(|| {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5002);
Async::<UdpSocket>::bind(saddr)
})
.unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4002);
let bytes_sent = socket.send_to(&[0; 10], saddr).await.unwrap();
assert_eq!(bytes_sent, 10);
let (start, timer) =
context.enter(|| (Instant::now(), crate::runtime::timer::delay_for(DELAY)));
timer.await;
start.elapsed()
});
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
#[test]
fn enter_context_from_sync() {
gst::init().unwrap();
let context = Context::acquire("enter_context_from_sync", SLEEP_DURATION).unwrap();
let socket = context
.enter(|| {
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5003);
Async::<UdpSocket>::bind(saddr)
})
.unwrap();
let saddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4003);
let bytes_sent = futures::executor::block_on(socket.send_to(&[0; 10], saddr)).unwrap();
assert_eq!(bytes_sent, 10);
let (start, timer) =
context.enter(|| (Instant::now(), crate::runtime::timer::delay_for(DELAY)));
let elapsed = crate::runtime::executor::block_on(async move {
timer.await;
start.elapsed()
});
// Due to throttling, `Delay` may be fired earlier
assert!(elapsed + SLEEP_DURATION / 2 >= DELAY);
}
}
| om(h | identifier_name |
core.go | // Package natyla ...
// Natyla - FullStack API/Cache/Store
//
// 2014 - Fernando Scasserra - twitter: @fersca.
//
// Natyla is a persistance cache system written in golang that performs in constant time.
// It keeps a MAP to store the object internally, and a Double Linked list to purge the LRU elements.
//
// LRU updates are done in backgrounds gorutines.
// LRU and MAP modifications are performed through channels in order to keep them synchronized.
// Bytes stored are counted in order to limit the amount of memory used by the application.
//
// Core Module
// Manage the internal Memory Access, LRU, concurrency and Swapping
//
package natyla
import (
"container/list"
"encoding/json"
"errors"
"fmt"
"math/rand"
"runtime"
"strings"
"sync/atomic"
"time"
)
//Create the list to support the LRU List
var lruList *list.List
//Max byte in memory (Key + Data), today set to 100KB
var maxMemBytes int64
var memBytes int64
//const pointerLen int = 4+8 //Bytes of pointer in 32bits machines plus int64 for the key of element in hashmemBytes
var cacheNotFound = true
//Channel to sync the List, map
var lisChan chan int
//LRUChan to sync the LRU purge
var LRUChan chan int
//chennel to acces to the collection map
var collectionChan chan int
//Print information
const enablePrint bool = true
//Create the map that stores the list of collectionsge
var collections map[string]collectionChannel
var config map[string]interface{}
const readWrite = "read-write"
// Init the system variables
func init() {
//Welcome Message
fmt.Println("------------------------------------------------------------------")
fmt.Println("Starting Natyla...")
fmt.Println("Version: 1.02")
//Set the thread quantity based on the number of CPU's
coreNum := runtime.NumCPU()
fmt.Println("Number of cores: ", coreNum)
//read the config file
readConfig()
//create the data directory
createDataDir()
//set max memory form config
maxMemBytes, _ = config["memory"].(json.Number).Int64()
fmt.Println("Max memory defined as: ", maxMemBytes/1024/1024, " Mbytes")
runtime.GOMAXPROCS(coreNum)
//Create a new doble-linked list to act as LRU
lruList = list.New()
//Create the channels
lisChan = make(chan int, 1)
LRUChan = make(chan int, 1)
collectionChan = make(chan int, 1)
collections = make(map[string]collectionChannel)
//Read collections from disk
nRead := readAllFromDisk()
fmt.Println("Read", nRead, "entries from disk")
fmt.Println("Ready, API Listening on http://localhost:8080, Telnet on port 8081")
fmt.Println("------------------------------------------------------------------")
}
// Start the webserver
func Start() {
//Start the console
go console()
//Start the rest API
restAPI()
}
// Convert a Json string to a map
func convertJSONToMap(valor string) (map[string]interface{}, error) {
//Create the Json element
d := json.NewDecoder(strings.NewReader(valor))
d.UseNumber()
var f interface{}
err := d.Decode(&f)
if err != nil {
return nil, err
}
//transform it to a map
m := f.(map[string]interface{})
return m, nil
}
//Create a token for the specified user
func createToken(value string) ([]byte, error) {
m, err := convertJSONToMap(value)
if err != nil {
return nil, err
}
if m["scope"] == nil || !(m["scope"] == "read-only" || m["scope"] == readWrite) {
return nil, errors.New("Invalid scope, try with read-only or read-write")
}
now := time.Now().UnixNano()
r := rand.New(rand.NewSource(now))
id := r.Int63()
m["id"] = id
b, err := json.Marshal(m)
return b, err
}
// Create the element in the collection
func createElement(col string, id string, valor string, saveToDisk bool, deleted bool) (string, error) {
//create the list element
var elemento *list.Element
b := []byte(valor)
if deleted == false {
//Create the Json element
m, err := convertJSONToMap(valor)
if err != nil {
return "", err
}
//Checks the data tye of the ID field
switch m["id"].(type) {
case json.Number:
//id = strconv.FormatFloat(m["id"].(float64),'f',-1,64)
id = m["id"].(json.Number).String()
case string:
id = m["id"].(string)
default:
return "", errors.New("invalid_id")
}
//Add the value to the list and get the pointer to the node
n := &node{m, false, false}
lisChan <- 1
elemento = lruList.PushFront(n)
<-lisChan
} else {
//if not found cache is disabled
if cacheNotFound == false {
return id, nil
}
fmt.Println("Creating node as deleted: ", col, id)
//create the node as deleted
n := &node{nil, false, true}
elemento = &list.Element{Value: n}
}
//get the collection-channel relation
cc := collections[col]
createDir := false
if cc.Mapa == nil {
fmt.Println("Creating new collection: ", col)
//Create the new map and the new channel
var newMapa map[string]*list.Element
var newMapChann chan int
newMapa = make(map[string]*list.Element)
newMapChann = make(chan int, 1)
newCC := collectionChannel{newMapa, newMapChann}
newCC.Mapa[id] = elemento
//The collection doesn't exist, create one
collectionChan <- 1
collections[col] = newCC
<-collectionChan
createDir = true
} else {
fmt.Println("Using collection: ", col)
//Save the node in the map
cc.Canal <- 1
cc.Mapa[id] = elemento
<-cc.Canal
}
//if we are creating a deleted node, do not save it to disk
if deleted == false {
//Increase the memory counter in a diffetet gorutinie, save to disk and purge LRU
go func() {
//Increments the memory counter (Key + Value in LRU + len of col name, + Key in MAP)
fmt.Println("Suma 1: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
if enablePrint {
fmt.Println("Inc Bytes: ", memBytes)
}
//Save the Json to disk, if it is not already on disk
if saveToDisk == true {
saveJSONToDisk(createDir, col, id, valor)
}
//Purge de LRU
purgeLRU()
}()
}
return id, nil
}
// Get the element from the Map and push the element to the first position of the LRU-List
func getElement(col string, id string) ([]byte, error) {
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[id]
//checks if the element exists in the cache
if elemento == nil {
fmt.Println("Elemento not in memory, reading disk, ID: ", id)
//read the disk
content, er := readJSONFromDisK(col, id)
//if file doesnt exists cache the not found and return nil
if er != nil {
//create the element and set it as deleted
createElement(col, id, "", false, true) // set as deleted and do not save to disk
} else {
//Create the element from the disk content
_, err := createElement(col, id, string(content), false, false) // set to not save to disk
if err != nil {
return nil, errors.New("Invalid Disk JSON")
}
}
//call get element again (recursively)
return getElement(col, id)
}
//If the Not-found is cached, return false directely
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on getting, ID: ", id)
return nil, nil
}
//Move the element to the front of the LRU-List using a gorutine
go moveFront(elemento)
//Check if the element is mark as swapped
if elemento.Value.(*node).Swap == true {
//Read the swapped json from disk
b, _ := readJSONFromDisK(col, id)
//TODO: read if there was an error and do something...
m, err := convertJSONToMap(string(b))
if err != nil {
return nil, err
}
//save the map in the node, mark it as un-swapped
elemento.Value = &node{m, false, false}
//increase de memory counter
fmt.Println("Suma 2: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
//as we have load content from disk, we have to purge LRU
go purgeLRU()
}
//Return the element
b, err := json.Marshal(elemento.Value.(*node).V)
return b, err
}
// Get the number of elements
func getElements(col string) ([]byte, error) {
cc := collections[col]
b, err := json.Marshal(len(cc.Mapa))
return b, err
}
// Purge the LRU List deleting the last element
func purgeLRU() {
LRUChan <- 1
//Checks the memory limit and decrease it if it's necessary
for atomic.LoadInt64(&memBytes) > maxMemBytes {
//sync this procedure
lisChan <- 1
//Print Message
fmt.Println(memBytes, " - ", maxMemBytes, "dif: ", memBytes-maxMemBytes)
fmt.Println("Max memory reached! swapping", memBytes)
fmt.Println("LRU Elements: ", lruList.Len())
//Get the last element and remove it. Sync is not needed because nothing
//happens if the element is moved in the middle of this rutine, at last it will be removed
lastElement := lruList.Back()
if lastElement == nil {
fmt.Println("Empty LRU")
//unsync
<-lisChan
return
}
//Remove the element from the LRU
deleteElementFromLRU(lastElement)
//Mark the node as swapped
lastElement.Value.(*node).Deleted = false
lastElement.Value.(*node).Swap = true
lastElement.Value.(*node).V = nil
//Print a purge
if enablePrint {
fmt.Println("Purge Done: ", memBytes)
}
//unsync
<-lisChan
}
<-LRUChan | }
// Move the element to the front of the LRU, because it was readed or updated
func moveFront(elemento *list.Element) {
//Move the element
lisChan <- 1
lruList.MoveToFront(elemento)
<-lisChan
if enablePrint {
fmt.Println("LRU Updated")
}
}
// Delete the element from the disk, and if its enable, cache the not-found
func deleteElement(col string, clave string) bool {
//Get the element collection
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[clave]
//checks if the element exists in the cache
if elemento != nil {
//if it is marked as deleted, return a not-found directly without checking the disk
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on deleting, ID: ", clave)
return false
}
//the node was not previously deleted....so exists in the disk
//if not-found cache is enabled, mark the element as deleted
if cacheNotFound == true {
//created a new node and asign it to the element
elemento.Value = &node{nil, false, true}
fmt.Println("Caching Not-found for, ID: ", clave)
} else {
//if it is not enabled, delete the element from the memory
cc.Canal <- 1
delete(cc.Mapa, clave)
<-cc.Canal
}
//In both cases, remove the element from the list and from disk in a separated gorutine
go func() {
lisChan <- 1
deleteElementFromLRU(elemento)
<-lisChan
deleteJSONFromDisk(col, clave)
//Print message
if enablePrint {
fmt.Println("Delete successfull, ID: ", clave)
}
}()
} else {
fmt.Println("Delete element not in memory, ID: ", clave)
//Create a new element with the key in the cache, to save a not-found if it is enable
createElement(col, clave, "", false, true)
//Check is the element exist in the disk
err := deleteJSONFromDisk(col, clave)
//if exists, direcly remove it and return true
//if it not exist return false (because it was not found)
if err == nil {
return true
}
return false
}
return true
}
// Delete the element from de LRU and decrement the counters
func deleteElementFromLRU(elemento *list.Element) {
//Decrement the byte counter, decrease the Key * 2 + Value
n := (*elemento).Value.(*node)
b, _ := json.Marshal(n.V)
fmt.Println("b: ", string(b))
fmt.Println("Resta: ", int64(len(b)))
atomic.AddInt64(&memBytes, -int64(len(b)))
//Delete the element in the LRU List
lruList.Remove(elemento)
fmt.Println("Dec Bytes: ", len(b))
} | random_line_split | |
core.go | // Package natyla ...
// Natyla - FullStack API/Cache/Store
//
// 2014 - Fernando Scasserra - twitter: @fersca.
//
// Natyla is a persistance cache system written in golang that performs in constant time.
// It keeps a MAP to store the object internally, and a Double Linked list to purge the LRU elements.
//
// LRU updates are done in backgrounds gorutines.
// LRU and MAP modifications are performed through channels in order to keep them synchronized.
// Bytes stored are counted in order to limit the amount of memory used by the application.
//
// Core Module
// Manage the internal Memory Access, LRU, concurrency and Swapping
//
package natyla
import (
"container/list"
"encoding/json"
"errors"
"fmt"
"math/rand"
"runtime"
"strings"
"sync/atomic"
"time"
)
//Create the list to support the LRU List
var lruList *list.List
//Max byte in memory (Key + Data), today set to 100KB
var maxMemBytes int64
var memBytes int64
//const pointerLen int = 4+8 //Bytes of pointer in 32bits machines plus int64 for the key of element in hashmemBytes
var cacheNotFound = true
//Channel to sync the List, map
var lisChan chan int
//LRUChan to sync the LRU purge
var LRUChan chan int
//chennel to acces to the collection map
var collectionChan chan int
//Print information
const enablePrint bool = true
//Create the map that stores the list of collectionsge
var collections map[string]collectionChannel
var config map[string]interface{}
const readWrite = "read-write"
// Init the system variables
func init() {
//Welcome Message
fmt.Println("------------------------------------------------------------------")
fmt.Println("Starting Natyla...")
fmt.Println("Version: 1.02")
//Set the thread quantity based on the number of CPU's
coreNum := runtime.NumCPU()
fmt.Println("Number of cores: ", coreNum)
//read the config file
readConfig()
//create the data directory
createDataDir()
//set max memory form config
maxMemBytes, _ = config["memory"].(json.Number).Int64()
fmt.Println("Max memory defined as: ", maxMemBytes/1024/1024, " Mbytes")
runtime.GOMAXPROCS(coreNum)
//Create a new doble-linked list to act as LRU
lruList = list.New()
//Create the channels
lisChan = make(chan int, 1)
LRUChan = make(chan int, 1)
collectionChan = make(chan int, 1)
collections = make(map[string]collectionChannel)
//Read collections from disk
nRead := readAllFromDisk()
fmt.Println("Read", nRead, "entries from disk")
fmt.Println("Ready, API Listening on http://localhost:8080, Telnet on port 8081")
fmt.Println("------------------------------------------------------------------")
}
// Start the webserver
func Start() {
//Start the console
go console()
//Start the rest API
restAPI()
}
// Convert a Json string to a map
func | (valor string) (map[string]interface{}, error) {
//Create the Json element
d := json.NewDecoder(strings.NewReader(valor))
d.UseNumber()
var f interface{}
err := d.Decode(&f)
if err != nil {
return nil, err
}
//transform it to a map
m := f.(map[string]interface{})
return m, nil
}
//Create a token for the specified user
func createToken(value string) ([]byte, error) {
m, err := convertJSONToMap(value)
if err != nil {
return nil, err
}
if m["scope"] == nil || !(m["scope"] == "read-only" || m["scope"] == readWrite) {
return nil, errors.New("Invalid scope, try with read-only or read-write")
}
now := time.Now().UnixNano()
r := rand.New(rand.NewSource(now))
id := r.Int63()
m["id"] = id
b, err := json.Marshal(m)
return b, err
}
// Create the element in the collection
func createElement(col string, id string, valor string, saveToDisk bool, deleted bool) (string, error) {
//create the list element
var elemento *list.Element
b := []byte(valor)
if deleted == false {
//Create the Json element
m, err := convertJSONToMap(valor)
if err != nil {
return "", err
}
//Checks the data tye of the ID field
switch m["id"].(type) {
case json.Number:
//id = strconv.FormatFloat(m["id"].(float64),'f',-1,64)
id = m["id"].(json.Number).String()
case string:
id = m["id"].(string)
default:
return "", errors.New("invalid_id")
}
//Add the value to the list and get the pointer to the node
n := &node{m, false, false}
lisChan <- 1
elemento = lruList.PushFront(n)
<-lisChan
} else {
//if not found cache is disabled
if cacheNotFound == false {
return id, nil
}
fmt.Println("Creating node as deleted: ", col, id)
//create the node as deleted
n := &node{nil, false, true}
elemento = &list.Element{Value: n}
}
//get the collection-channel relation
cc := collections[col]
createDir := false
if cc.Mapa == nil {
fmt.Println("Creating new collection: ", col)
//Create the new map and the new channel
var newMapa map[string]*list.Element
var newMapChann chan int
newMapa = make(map[string]*list.Element)
newMapChann = make(chan int, 1)
newCC := collectionChannel{newMapa, newMapChann}
newCC.Mapa[id] = elemento
//The collection doesn't exist, create one
collectionChan <- 1
collections[col] = newCC
<-collectionChan
createDir = true
} else {
fmt.Println("Using collection: ", col)
//Save the node in the map
cc.Canal <- 1
cc.Mapa[id] = elemento
<-cc.Canal
}
//if we are creating a deleted node, do not save it to disk
if deleted == false {
//Increase the memory counter in a diffetet gorutinie, save to disk and purge LRU
go func() {
//Increments the memory counter (Key + Value in LRU + len of col name, + Key in MAP)
fmt.Println("Suma 1: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
if enablePrint {
fmt.Println("Inc Bytes: ", memBytes)
}
//Save the Json to disk, if it is not already on disk
if saveToDisk == true {
saveJSONToDisk(createDir, col, id, valor)
}
//Purge de LRU
purgeLRU()
}()
}
return id, nil
}
// Get the element from the Map and push the element to the first position of the LRU-List
func getElement(col string, id string) ([]byte, error) {
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[id]
//checks if the element exists in the cache
if elemento == nil {
fmt.Println("Elemento not in memory, reading disk, ID: ", id)
//read the disk
content, er := readJSONFromDisK(col, id)
//if file doesnt exists cache the not found and return nil
if er != nil {
//create the element and set it as deleted
createElement(col, id, "", false, true) // set as deleted and do not save to disk
} else {
//Create the element from the disk content
_, err := createElement(col, id, string(content), false, false) // set to not save to disk
if err != nil {
return nil, errors.New("Invalid Disk JSON")
}
}
//call get element again (recursively)
return getElement(col, id)
}
//If the Not-found is cached, return false directely
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on getting, ID: ", id)
return nil, nil
}
//Move the element to the front of the LRU-List using a gorutine
go moveFront(elemento)
//Check if the element is mark as swapped
if elemento.Value.(*node).Swap == true {
//Read the swapped json from disk
b, _ := readJSONFromDisK(col, id)
//TODO: read if there was an error and do something...
m, err := convertJSONToMap(string(b))
if err != nil {
return nil, err
}
//save the map in the node, mark it as un-swapped
elemento.Value = &node{m, false, false}
//increase de memory counter
fmt.Println("Suma 2: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
//as we have load content from disk, we have to purge LRU
go purgeLRU()
}
//Return the element
b, err := json.Marshal(elemento.Value.(*node).V)
return b, err
}
// Get the number of elements
func getElements(col string) ([]byte, error) {
cc := collections[col]
b, err := json.Marshal(len(cc.Mapa))
return b, err
}
// Purge the LRU List deleting the last element
func purgeLRU() {
LRUChan <- 1
//Checks the memory limit and decrease it if it's necessary
for atomic.LoadInt64(&memBytes) > maxMemBytes {
//sync this procedure
lisChan <- 1
//Print Message
fmt.Println(memBytes, " - ", maxMemBytes, "dif: ", memBytes-maxMemBytes)
fmt.Println("Max memory reached! swapping", memBytes)
fmt.Println("LRU Elements: ", lruList.Len())
//Get the last element and remove it. Sync is not needed because nothing
//happens if the element is moved in the middle of this rutine, at last it will be removed
lastElement := lruList.Back()
if lastElement == nil {
fmt.Println("Empty LRU")
//unsync
<-lisChan
return
}
//Remove the element from the LRU
deleteElementFromLRU(lastElement)
//Mark the node as swapped
lastElement.Value.(*node).Deleted = false
lastElement.Value.(*node).Swap = true
lastElement.Value.(*node).V = nil
//Print a purge
if enablePrint {
fmt.Println("Purge Done: ", memBytes)
}
//unsync
<-lisChan
}
<-LRUChan
}
// Move the element to the front of the LRU, because it was readed or updated
func moveFront(elemento *list.Element) {
//Move the element
lisChan <- 1
lruList.MoveToFront(elemento)
<-lisChan
if enablePrint {
fmt.Println("LRU Updated")
}
}
// Delete the element from the disk, and if its enable, cache the not-found
func deleteElement(col string, clave string) bool {
//Get the element collection
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[clave]
//checks if the element exists in the cache
if elemento != nil {
//if it is marked as deleted, return a not-found directly without checking the disk
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on deleting, ID: ", clave)
return false
}
//the node was not previously deleted....so exists in the disk
//if not-found cache is enabled, mark the element as deleted
if cacheNotFound == true {
//created a new node and asign it to the element
elemento.Value = &node{nil, false, true}
fmt.Println("Caching Not-found for, ID: ", clave)
} else {
//if it is not enabled, delete the element from the memory
cc.Canal <- 1
delete(cc.Mapa, clave)
<-cc.Canal
}
//In both cases, remove the element from the list and from disk in a separated gorutine
go func() {
lisChan <- 1
deleteElementFromLRU(elemento)
<-lisChan
deleteJSONFromDisk(col, clave)
//Print message
if enablePrint {
fmt.Println("Delete successfull, ID: ", clave)
}
}()
} else {
fmt.Println("Delete element not in memory, ID: ", clave)
//Create a new element with the key in the cache, to save a not-found if it is enable
createElement(col, clave, "", false, true)
//Check is the element exist in the disk
err := deleteJSONFromDisk(col, clave)
//if exists, direcly remove it and return true
//if it not exist return false (because it was not found)
if err == nil {
return true
}
return false
}
return true
}
// Delete the element from de LRU and decrement the counters
func deleteElementFromLRU(elemento *list.Element) {
//Decrement the byte counter, decrease the Key * 2 + Value
n := (*elemento).Value.(*node)
b, _ := json.Marshal(n.V)
fmt.Println("b: ", string(b))
fmt.Println("Resta: ", int64(len(b)))
atomic.AddInt64(&memBytes, -int64(len(b)))
//Delete the element in the LRU List
lruList.Remove(elemento)
fmt.Println("Dec Bytes: ", len(b))
}
| convertJSONToMap | identifier_name |
core.go | // Package natyla ...
// Natyla - FullStack API/Cache/Store
//
// 2014 - Fernando Scasserra - twitter: @fersca.
//
// Natyla is a persistance cache system written in golang that performs in constant time.
// It keeps a MAP to store the object internally, and a Double Linked list to purge the LRU elements.
//
// LRU updates are done in backgrounds gorutines.
// LRU and MAP modifications are performed through channels in order to keep them synchronized.
// Bytes stored are counted in order to limit the amount of memory used by the application.
//
// Core Module
// Manage the internal Memory Access, LRU, concurrency and Swapping
//
package natyla
import (
"container/list"
"encoding/json"
"errors"
"fmt"
"math/rand"
"runtime"
"strings"
"sync/atomic"
"time"
)
//Create the list to support the LRU List
var lruList *list.List
//Max byte in memory (Key + Data), today set to 100KB
var maxMemBytes int64
var memBytes int64
//const pointerLen int = 4+8 //Bytes of pointer in 32bits machines plus int64 for the key of element in hashmemBytes
var cacheNotFound = true
//Channel to sync the List, map
var lisChan chan int
//LRUChan to sync the LRU purge
var LRUChan chan int
//chennel to acces to the collection map
var collectionChan chan int
//Print information
const enablePrint bool = true
//Create the map that stores the list of collectionsge
var collections map[string]collectionChannel
var config map[string]interface{}
const readWrite = "read-write"
// Init the system variables
func init() {
//Welcome Message
fmt.Println("------------------------------------------------------------------")
fmt.Println("Starting Natyla...")
fmt.Println("Version: 1.02")
//Set the thread quantity based on the number of CPU's
coreNum := runtime.NumCPU()
fmt.Println("Number of cores: ", coreNum)
//read the config file
readConfig()
//create the data directory
createDataDir()
//set max memory form config
maxMemBytes, _ = config["memory"].(json.Number).Int64()
fmt.Println("Max memory defined as: ", maxMemBytes/1024/1024, " Mbytes")
runtime.GOMAXPROCS(coreNum)
//Create a new doble-linked list to act as LRU
lruList = list.New()
//Create the channels
lisChan = make(chan int, 1)
LRUChan = make(chan int, 1)
collectionChan = make(chan int, 1)
collections = make(map[string]collectionChannel)
//Read collections from disk
nRead := readAllFromDisk()
fmt.Println("Read", nRead, "entries from disk")
fmt.Println("Ready, API Listening on http://localhost:8080, Telnet on port 8081")
fmt.Println("------------------------------------------------------------------")
}
// Start the webserver
func Start() {
//Start the console
go console()
//Start the rest API
restAPI()
}
// Convert a Json string to a map
func convertJSONToMap(valor string) (map[string]interface{}, error) {
//Create the Json element
d := json.NewDecoder(strings.NewReader(valor))
d.UseNumber()
var f interface{}
err := d.Decode(&f)
if err != nil {
return nil, err
}
//transform it to a map
m := f.(map[string]interface{})
return m, nil
}
//Create a token for the specified user
func createToken(value string) ([]byte, error) {
m, err := convertJSONToMap(value)
if err != nil {
return nil, err
}
if m["scope"] == nil || !(m["scope"] == "read-only" || m["scope"] == readWrite) {
return nil, errors.New("Invalid scope, try with read-only or read-write")
}
now := time.Now().UnixNano()
r := rand.New(rand.NewSource(now))
id := r.Int63()
m["id"] = id
b, err := json.Marshal(m)
return b, err
}
// Create the element in the collection
func createElement(col string, id string, valor string, saveToDisk bool, deleted bool) (string, error) {
//create the list element
var elemento *list.Element
b := []byte(valor)
if deleted == false {
//Create the Json element
m, err := convertJSONToMap(valor)
if err != nil {
return "", err
}
//Checks the data tye of the ID field
switch m["id"].(type) {
case json.Number:
//id = strconv.FormatFloat(m["id"].(float64),'f',-1,64)
id = m["id"].(json.Number).String()
case string:
id = m["id"].(string)
default:
return "", errors.New("invalid_id")
}
//Add the value to the list and get the pointer to the node
n := &node{m, false, false}
lisChan <- 1
elemento = lruList.PushFront(n)
<-lisChan
} else {
//if not found cache is disabled
if cacheNotFound == false {
return id, nil
}
fmt.Println("Creating node as deleted: ", col, id)
//create the node as deleted
n := &node{nil, false, true}
elemento = &list.Element{Value: n}
}
//get the collection-channel relation
cc := collections[col]
createDir := false
if cc.Mapa == nil {
fmt.Println("Creating new collection: ", col)
//Create the new map and the new channel
var newMapa map[string]*list.Element
var newMapChann chan int
newMapa = make(map[string]*list.Element)
newMapChann = make(chan int, 1)
newCC := collectionChannel{newMapa, newMapChann}
newCC.Mapa[id] = elemento
//The collection doesn't exist, create one
collectionChan <- 1
collections[col] = newCC
<-collectionChan
createDir = true
} else {
fmt.Println("Using collection: ", col)
//Save the node in the map
cc.Canal <- 1
cc.Mapa[id] = elemento
<-cc.Canal
}
//if we are creating a deleted node, do not save it to disk
if deleted == false {
//Increase the memory counter in a diffetet gorutinie, save to disk and purge LRU
go func() {
//Increments the memory counter (Key + Value in LRU + len of col name, + Key in MAP)
fmt.Println("Suma 1: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
if enablePrint {
fmt.Println("Inc Bytes: ", memBytes)
}
//Save the Json to disk, if it is not already on disk
if saveToDisk == true {
saveJSONToDisk(createDir, col, id, valor)
}
//Purge de LRU
purgeLRU()
}()
}
return id, nil
}
// Get the element from the Map and push the element to the first position of the LRU-List
func getElement(col string, id string) ([]byte, error) {
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[id]
//checks if the element exists in the cache
if elemento == nil {
fmt.Println("Elemento not in memory, reading disk, ID: ", id)
//read the disk
content, er := readJSONFromDisK(col, id)
//if file doesnt exists cache the not found and return nil
if er != nil {
//create the element and set it as deleted
createElement(col, id, "", false, true) // set as deleted and do not save to disk
} else {
//Create the element from the disk content
_, err := createElement(col, id, string(content), false, false) // set to not save to disk
if err != nil {
return nil, errors.New("Invalid Disk JSON")
}
}
//call get element again (recursively)
return getElement(col, id)
}
//If the Not-found is cached, return false directely
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on getting, ID: ", id)
return nil, nil
}
//Move the element to the front of the LRU-List using a gorutine
go moveFront(elemento)
//Check if the element is mark as swapped
if elemento.Value.(*node).Swap == true {
//Read the swapped json from disk
b, _ := readJSONFromDisK(col, id)
//TODO: read if there was an error and do something...
m, err := convertJSONToMap(string(b))
if err != nil {
return nil, err
}
//save the map in the node, mark it as un-swapped
elemento.Value = &node{m, false, false}
//increase de memory counter
fmt.Println("Suma 2: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
//as we have load content from disk, we have to purge LRU
go purgeLRU()
}
//Return the element
b, err := json.Marshal(elemento.Value.(*node).V)
return b, err
}
// Get the number of elements
func getElements(col string) ([]byte, error) {
cc := collections[col]
b, err := json.Marshal(len(cc.Mapa))
return b, err
}
// Purge the LRU List deleting the last element
func purgeLRU() {
LRUChan <- 1
//Checks the memory limit and decrease it if it's necessary
for atomic.LoadInt64(&memBytes) > maxMemBytes {
//sync this procedure
lisChan <- 1
//Print Message
fmt.Println(memBytes, " - ", maxMemBytes, "dif: ", memBytes-maxMemBytes)
fmt.Println("Max memory reached! swapping", memBytes)
fmt.Println("LRU Elements: ", lruList.Len())
//Get the last element and remove it. Sync is not needed because nothing
//happens if the element is moved in the middle of this rutine, at last it will be removed
lastElement := lruList.Back()
if lastElement == nil {
fmt.Println("Empty LRU")
//unsync
<-lisChan
return
}
//Remove the element from the LRU
deleteElementFromLRU(lastElement)
//Mark the node as swapped
lastElement.Value.(*node).Deleted = false
lastElement.Value.(*node).Swap = true
lastElement.Value.(*node).V = nil
//Print a purge
if enablePrint {
fmt.Println("Purge Done: ", memBytes)
}
//unsync
<-lisChan
}
<-LRUChan
}
// Move the element to the front of the LRU, because it was readed or updated
func moveFront(elemento *list.Element) |
// Delete the element from the disk, and if its enable, cache the not-found
func deleteElement(col string, clave string) bool {
//Get the element collection
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[clave]
//checks if the element exists in the cache
if elemento != nil {
//if it is marked as deleted, return a not-found directly without checking the disk
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on deleting, ID: ", clave)
return false
}
//the node was not previously deleted....so exists in the disk
//if not-found cache is enabled, mark the element as deleted
if cacheNotFound == true {
//created a new node and asign it to the element
elemento.Value = &node{nil, false, true}
fmt.Println("Caching Not-found for, ID: ", clave)
} else {
//if it is not enabled, delete the element from the memory
cc.Canal <- 1
delete(cc.Mapa, clave)
<-cc.Canal
}
//In both cases, remove the element from the list and from disk in a separated gorutine
go func() {
lisChan <- 1
deleteElementFromLRU(elemento)
<-lisChan
deleteJSONFromDisk(col, clave)
//Print message
if enablePrint {
fmt.Println("Delete successfull, ID: ", clave)
}
}()
} else {
fmt.Println("Delete element not in memory, ID: ", clave)
//Create a new element with the key in the cache, to save a not-found if it is enable
createElement(col, clave, "", false, true)
//Check is the element exist in the disk
err := deleteJSONFromDisk(col, clave)
//if exists, direcly remove it and return true
//if it not exist return false (because it was not found)
if err == nil {
return true
}
return false
}
return true
}
// Delete the element from de LRU and decrement the counters
func deleteElementFromLRU(elemento *list.Element) {
//Decrement the byte counter, decrease the Key * 2 + Value
n := (*elemento).Value.(*node)
b, _ := json.Marshal(n.V)
fmt.Println("b: ", string(b))
fmt.Println("Resta: ", int64(len(b)))
atomic.AddInt64(&memBytes, -int64(len(b)))
//Delete the element in the LRU List
lruList.Remove(elemento)
fmt.Println("Dec Bytes: ", len(b))
}
| {
//Move the element
lisChan <- 1
lruList.MoveToFront(elemento)
<-lisChan
if enablePrint {
fmt.Println("LRU Updated")
}
} | identifier_body |
core.go | // Package natyla ...
// Natyla - FullStack API/Cache/Store
//
// 2014 - Fernando Scasserra - twitter: @fersca.
//
// Natyla is a persistance cache system written in golang that performs in constant time.
// It keeps a MAP to store the object internally, and a Double Linked list to purge the LRU elements.
//
// LRU updates are done in backgrounds gorutines.
// LRU and MAP modifications are performed through channels in order to keep them synchronized.
// Bytes stored are counted in order to limit the amount of memory used by the application.
//
// Core Module
// Manage the internal Memory Access, LRU, concurrency and Swapping
//
package natyla
import (
"container/list"
"encoding/json"
"errors"
"fmt"
"math/rand"
"runtime"
"strings"
"sync/atomic"
"time"
)
//Create the list to support the LRU List
var lruList *list.List
//Max byte in memory (Key + Data), today set to 100KB
var maxMemBytes int64
var memBytes int64
//const pointerLen int = 4+8 //Bytes of pointer in 32bits machines plus int64 for the key of element in hashmemBytes
var cacheNotFound = true
//Channel to sync the List, map
var lisChan chan int
//LRUChan to sync the LRU purge
var LRUChan chan int
//chennel to acces to the collection map
var collectionChan chan int
//Print information
const enablePrint bool = true
//Create the map that stores the list of collectionsge
var collections map[string]collectionChannel
var config map[string]interface{}
const readWrite = "read-write"
// Init the system variables
func init() {
//Welcome Message
fmt.Println("------------------------------------------------------------------")
fmt.Println("Starting Natyla...")
fmt.Println("Version: 1.02")
//Set the thread quantity based on the number of CPU's
coreNum := runtime.NumCPU()
fmt.Println("Number of cores: ", coreNum)
//read the config file
readConfig()
//create the data directory
createDataDir()
//set max memory form config
maxMemBytes, _ = config["memory"].(json.Number).Int64()
fmt.Println("Max memory defined as: ", maxMemBytes/1024/1024, " Mbytes")
runtime.GOMAXPROCS(coreNum)
//Create a new doble-linked list to act as LRU
lruList = list.New()
//Create the channels
lisChan = make(chan int, 1)
LRUChan = make(chan int, 1)
collectionChan = make(chan int, 1)
collections = make(map[string]collectionChannel)
//Read collections from disk
nRead := readAllFromDisk()
fmt.Println("Read", nRead, "entries from disk")
fmt.Println("Ready, API Listening on http://localhost:8080, Telnet on port 8081")
fmt.Println("------------------------------------------------------------------")
}
// Start the webserver
func Start() {
//Start the console
go console()
//Start the rest API
restAPI()
}
// Convert a Json string to a map
func convertJSONToMap(valor string) (map[string]interface{}, error) {
//Create the Json element
d := json.NewDecoder(strings.NewReader(valor))
d.UseNumber()
var f interface{}
err := d.Decode(&f)
if err != nil {
return nil, err
}
//transform it to a map
m := f.(map[string]interface{})
return m, nil
}
//Create a token for the specified user
func createToken(value string) ([]byte, error) {
m, err := convertJSONToMap(value)
if err != nil {
return nil, err
}
if m["scope"] == nil || !(m["scope"] == "read-only" || m["scope"] == readWrite) {
return nil, errors.New("Invalid scope, try with read-only or read-write")
}
now := time.Now().UnixNano()
r := rand.New(rand.NewSource(now))
id := r.Int63()
m["id"] = id
b, err := json.Marshal(m)
return b, err
}
// Create the element in the collection
func createElement(col string, id string, valor string, saveToDisk bool, deleted bool) (string, error) {
//create the list element
var elemento *list.Element
b := []byte(valor)
if deleted == false {
//Create the Json element
m, err := convertJSONToMap(valor)
if err != nil {
return "", err
}
//Checks the data tye of the ID field
switch m["id"].(type) {
case json.Number:
//id = strconv.FormatFloat(m["id"].(float64),'f',-1,64)
id = m["id"].(json.Number).String()
case string:
id = m["id"].(string)
default:
return "", errors.New("invalid_id")
}
//Add the value to the list and get the pointer to the node
n := &node{m, false, false}
lisChan <- 1
elemento = lruList.PushFront(n)
<-lisChan
} else {
//if not found cache is disabled
if cacheNotFound == false {
return id, nil
}
fmt.Println("Creating node as deleted: ", col, id)
//create the node as deleted
n := &node{nil, false, true}
elemento = &list.Element{Value: n}
}
//get the collection-channel relation
cc := collections[col]
createDir := false
if cc.Mapa == nil {
fmt.Println("Creating new collection: ", col)
//Create the new map and the new channel
var newMapa map[string]*list.Element
var newMapChann chan int
newMapa = make(map[string]*list.Element)
newMapChann = make(chan int, 1)
newCC := collectionChannel{newMapa, newMapChann}
newCC.Mapa[id] = elemento
//The collection doesn't exist, create one
collectionChan <- 1
collections[col] = newCC
<-collectionChan
createDir = true
} else {
fmt.Println("Using collection: ", col)
//Save the node in the map
cc.Canal <- 1
cc.Mapa[id] = elemento
<-cc.Canal
}
//if we are creating a deleted node, do not save it to disk
if deleted == false {
//Increase the memory counter in a diffetet gorutinie, save to disk and purge LRU
go func() {
//Increments the memory counter (Key + Value in LRU + len of col name, + Key in MAP)
fmt.Println("Suma 1: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
if enablePrint {
fmt.Println("Inc Bytes: ", memBytes)
}
//Save the Json to disk, if it is not already on disk
if saveToDisk == true {
saveJSONToDisk(createDir, col, id, valor)
}
//Purge de LRU
purgeLRU()
}()
}
return id, nil
}
// Get the element from the Map and push the element to the first position of the LRU-List
func getElement(col string, id string) ([]byte, error) {
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[id]
//checks if the element exists in the cache
if elemento == nil {
fmt.Println("Elemento not in memory, reading disk, ID: ", id)
//read the disk
content, er := readJSONFromDisK(col, id)
//if file doesnt exists cache the not found and return nil
if er != nil {
//create the element and set it as deleted
createElement(col, id, "", false, true) // set as deleted and do not save to disk
} else {
//Create the element from the disk content
_, err := createElement(col, id, string(content), false, false) // set to not save to disk
if err != nil {
return nil, errors.New("Invalid Disk JSON")
}
}
//call get element again (recursively)
return getElement(col, id)
}
//If the Not-found is cached, return false directely
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on getting, ID: ", id)
return nil, nil
}
//Move the element to the front of the LRU-List using a gorutine
go moveFront(elemento)
//Check if the element is mark as swapped
if elemento.Value.(*node).Swap == true {
//Read the swapped json from disk
b, _ := readJSONFromDisK(col, id)
//TODO: read if there was an error and do something...
m, err := convertJSONToMap(string(b))
if err != nil {
return nil, err
}
//save the map in the node, mark it as un-swapped
elemento.Value = &node{m, false, false}
//increase de memory counter
fmt.Println("Suma 2: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
//as we have load content from disk, we have to purge LRU
go purgeLRU()
}
//Return the element
b, err := json.Marshal(elemento.Value.(*node).V)
return b, err
}
// Get the number of elements
func getElements(col string) ([]byte, error) {
cc := collections[col]
b, err := json.Marshal(len(cc.Mapa))
return b, err
}
// Purge the LRU List deleting the last element
func purgeLRU() {
LRUChan <- 1
//Checks the memory limit and decrease it if it's necessary
for atomic.LoadInt64(&memBytes) > maxMemBytes {
//sync this procedure
lisChan <- 1
//Print Message
fmt.Println(memBytes, " - ", maxMemBytes, "dif: ", memBytes-maxMemBytes)
fmt.Println("Max memory reached! swapping", memBytes)
fmt.Println("LRU Elements: ", lruList.Len())
//Get the last element and remove it. Sync is not needed because nothing
//happens if the element is moved in the middle of this rutine, at last it will be removed
lastElement := lruList.Back()
if lastElement == nil {
fmt.Println("Empty LRU")
//unsync
<-lisChan
return
}
//Remove the element from the LRU
deleteElementFromLRU(lastElement)
//Mark the node as swapped
lastElement.Value.(*node).Deleted = false
lastElement.Value.(*node).Swap = true
lastElement.Value.(*node).V = nil
//Print a purge
if enablePrint |
//unsync
<-lisChan
}
<-LRUChan
}
// Move the element to the front of the LRU, because it was readed or updated
func moveFront(elemento *list.Element) {
//Move the element
lisChan <- 1
lruList.MoveToFront(elemento)
<-lisChan
if enablePrint {
fmt.Println("LRU Updated")
}
}
// Delete the element from the disk, and if its enable, cache the not-found
func deleteElement(col string, clave string) bool {
//Get the element collection
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[clave]
//checks if the element exists in the cache
if elemento != nil {
//if it is marked as deleted, return a not-found directly without checking the disk
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on deleting, ID: ", clave)
return false
}
//the node was not previously deleted....so exists in the disk
//if not-found cache is enabled, mark the element as deleted
if cacheNotFound == true {
//created a new node and asign it to the element
elemento.Value = &node{nil, false, true}
fmt.Println("Caching Not-found for, ID: ", clave)
} else {
//if it is not enabled, delete the element from the memory
cc.Canal <- 1
delete(cc.Mapa, clave)
<-cc.Canal
}
//In both cases, remove the element from the list and from disk in a separated gorutine
go func() {
lisChan <- 1
deleteElementFromLRU(elemento)
<-lisChan
deleteJSONFromDisk(col, clave)
//Print message
if enablePrint {
fmt.Println("Delete successfull, ID: ", clave)
}
}()
} else {
fmt.Println("Delete element not in memory, ID: ", clave)
//Create a new element with the key in the cache, to save a not-found if it is enable
createElement(col, clave, "", false, true)
//Check is the element exist in the disk
err := deleteJSONFromDisk(col, clave)
//if exists, direcly remove it and return true
//if it not exist return false (because it was not found)
if err == nil {
return true
}
return false
}
return true
}
// Delete the element from de LRU and decrement the counters
func deleteElementFromLRU(elemento *list.Element) {
//Decrement the byte counter, decrease the Key * 2 + Value
n := (*elemento).Value.(*node)
b, _ := json.Marshal(n.V)
fmt.Println("b: ", string(b))
fmt.Println("Resta: ", int64(len(b)))
atomic.AddInt64(&memBytes, -int64(len(b)))
//Delete the element in the LRU List
lruList.Remove(elemento)
fmt.Println("Dec Bytes: ", len(b))
}
| {
fmt.Println("Purge Done: ", memBytes)
} | conditional_block |
define.kChartDataManager.js | ;(function(namespace){
var util = namespace.util;
/**
* 刷新K线图数据时,相邻两次刷新操作之间的时间间隔。单位:毫秒
* @type {Number}
*/
var refreshGap = 2000;
var maxPageSize = 300;
/**
* @typedef {Object} KData
*/
/**
* 获取K线图数据
* @param {String} symbol 产品代码
* @param {Function} onsuccess 获取成功后执行的方法
* @param {Function} onerror 获取失败时执行的方法
*/
var getKChartData = function(symbol, type, count, date, onsuccess, onerror){
var url = "http://dt.jctytech.com/stock.php?u=17305&sign=86469c04bda0a04b9f26a2b458134cb4&stamp=1525435040897&type=kline&symbol=" + symbol;
if(null != date && (date = String(date).trim()) != "")
url += "&et=" + date;
if(null != type && (type = String(type).trim()) != "")
url += "&line=" + type;
if(null != count && (count = String(count).trim()) != "")
url += "&num=" + count;
var xhr = new XMLHttpRequest();
xhr.open("POST", url);
xhr.onreadystatechange = function(){
if(this.readyState != 4)
return;
var responseText = this.responseText;
if(this.status != 200)
return onerror && onerror(this.status, responseText);
try{
var resp = JSON.parse(responseText);
}catch(e){
console.error("Invalid response while retrieving k chart data");
return;
}
resp = resp || [];
resp = resp.reverse();
onsuccess && onsuccess(resp);
};
xhr.send();
};
/**
* 以既有的K线图数据为基准,过滤给定的K线图数据列表,使其移除重复的数据
* @param {KData[]} baseList 既有的K线图数据
* @param {KData[]} list 待过滤重复数据的K线图数据列表
*/
var filterDuplicateKData = function(baseList, list){
if(!Array.isArray(baseList) || !Array.isArray(list))
return list;
return list.filter(function(d){
return !baseList.some(function(k){
return k.Date == d.Date;
});
});
};
/**
* 将给定的K线图数据融合至目标列表中。如果不存在,则按顺序插入,否则覆盖既有取值
* @param {KData[]} targetList 目标列表
* @param {KData[]} list 待融合的列表
* @returns {KData[]}
*/
var mergeKDataList = function(targetList, list){
targetList = targetList || [];
list = list || [];
/* 按照时间升序排序 */
targetList = targetList.sort(function(a, b){
return a.Date > b.Date? 1: -1;
});
list.forEach(function(m){
var existingIndex = -1;
for(var i = 0; i < targetList.length; i++)
if(targetList[i].Date == m.Date){
existingIndex = i;
break;
}
if(-1 != existingIndex){
targetList[existingIndex] = m;
}else{
var index1 = null,/* 最大的,小于目标的索引 */
index2 = null;/* 最小的,大于目标的索引 */
for(var i = 0; i < targetList.length; i++){
var sec = targetList[i].Date;
if(sec > m.Date && null == index2)
index2 = i;
if(sec <= m.Date)
index1 = i;
}
if(index1 == null)
targetList.unshift(m);
else if(index2 == null)
targetList.push(m);
else
targetList = targetList.slice(0, index1 + 1).concat(m).concat(targetList.slice(index2));
}
});
return targetList;
};
/**
* 扫描给定的K线图数据,并计算涨跌量、涨跌幅、MA指标等,并将其附加至原始数据上
* @param {KData[]} kLineDataList K线图数据数组
*/
var scanAndEnrichKLineData = function(kLineDataList){
for(var i = 0; i < kLineDataList.length; i++){
var itm = kLineDataList[i];
// var previousClosePrice = kLineDataList[i - 1].closePrice;
/* 计算涨跌量和涨跌幅 */
if(null == itm.closePriceChangeAmount || null == itm.closePriceChangeRate){
var tmp = +itm.Open;
itm.closePriceChangeAmount = +itm.Close - tmp;
itm.closePriceChangeRate = itm.closePriceChangeAmount / (tmp || 1);
}
/** K线图支持的MA指标,如MA5、MA10等 */
var maIndices = [5, 10, 20, 30];
/* 计算MA */
maIndices.forEach(function(num){
var key = "ma" + num;
if(i < num - 1){
delete itm[key];
return;
}
if(key in itm)
return;
var sum = 0;
for(var j = i; j > i - num; j--)
sum += Number(kLineDataList[j].Close);
itm[key] = sum / num;
});
}
return kLineDataList;
};
var strigifyKDataList = function(list){
return list.map(function(t){
return t.Date;
});
};
/**
* K线图数据容器
* @param {String} symbol 产品代码
* @param {String} kType K线类型
* @constructor
*/
var KChartData = function(symbol, kType){
Object.defineProperty(this, "symbol", {value: symbol, configurable: false, writable: false});
Object.defineProperty(this, "kType", {value: kType, configurable: false, writable: false});
/* K线图数据 */
var dataList = [];
/**
* 可见数据的结束索引距离时间最晚的K线图数据的位移
* @type {number}
*/
var visibleListEndIndexOffsetFromLatest = 0;
/**
* 最新的K线图数据发生变化时执行的方法
* @type {Function[]}
*/
var latestKDataChangeListeners = [];
/**
* 最新的K线图数据刷新定时器
*/
var refreshTimer = null;
/**
* 获取K线图数据列表(自动消除重复数据)
* @returns {KData[]}
*/
this.getKDataList = function(){
return dataList;
};
/**
* 添加监听器:“最新的K线图数据发生变化”
* @param {Function} listener 要添加的监听器
* @param {Boolean} [trigggerOnlyWhenLatestKDataIsVisible=false] 是否仅当最新K线图数据可见时才出发
* @returns {KChartData}
*/
this.addLatestKDataChangeListener = function(listener, trigggerOnlyWhenLatestKDataIsVisible){
if(latestKDataChangeListeners.indexOf(listener) != -1)
return this;
if(arguments.lenth < 2)
trigggerOnlyWhenLatestKDataIsVisible = false;
listener.trigggerOnlyWhenLatestKDataIsVisible = trigggerOnlyWhenLatestKDataIsVisible;/* 暂存标记,供调用使用 */
latestKDataChangeListeners.push(listener);
return this;
};
/**
* 移除监听器:“最新的K线图数据发生变化”
* @param {Function} listener 要移除的监听器
* @returns {KChartData}
*/
this.removeLatestKDataChangeListener = function(listener){
var index = latestKDataChangeListeners.indexOf(listener);
if(index == -1)
return this;
latestKDataChangeListeners.splice(index, 1);
return this;
};
/**
* 在既有数据列表的末尾追加给定的K线图数据
* @param {KData[]} list 要在末尾处追加的K线图数据
* @returns {KChartData}
*/
this.appendKDataList = function(list){
if(!Array.isArray(list))
return this;
var oldLen = dataList.length;
dataList = mergeKDataList(dataList, list);
scanAndEnrichKLineData(dataList);
var newLen = dataList.length;
var lenDelta = newLen - oldLen;
/* 保持可见数据的一致性。等于0时,表示没有位移,需要继续保持为“没有位移” */
if(visibleListEndIndexOffsetFromLatest > 0)
visibleListEndIndexOffsetFromLatest += lenDelta;
return this;
};
/**
* 在既有数据列表的开始位置追加给定的K线图数据
* @param {KData[]} list 要在开始位置追加的K线图数据
* @returns {KChartData}
*/
this.prependKDataList = function(list){
if(!Array.isArray(list))
return this;
dataList = mergeKDataList(dataList, list);
scanAndEnrichKLineData(dataList);
return this;
};
/**
* 获取时间上最早的K线图数据
*/
this.getEarliestKData = function(){
if(dataList.length > 0)
return dataList[0];
return null;
};
/**
* 获取时间上最早的K线图数据产生的时间的时间戳。单位:秒
* @returns {Integer} 时间上最早的K线图数据产生的时间的时间戳。单位:秒
*/
this.getEarliestKDataSeconds = function(){
var earliestData = this.getEarliestKData();
if(null == earliestData)
return null;
return earliestData.Date;
};
/**
* 从时间最晚的数据开始,截断超出指定尺寸的数据,从而只保留最新的N条数据
* @param {Integer} sizeToKeep 要保留的K线图数据个数
*/
this.truncateKDataFromLatest = function(sizeToKeep){
/* 重置“可见数据的结束索引距离时间最晚的K线图数据的位移” */
this.resetVisibleKDataListEndIndexOffsetFromLatest();
var len = dataList.length;
if(len <= sizeToKeep)
return this;
dataList = dataList.slice(len - sizeToKeep);
return this;
};
/**
* 查询并更新K线图数据
* @returns {KChartData}
*/
this.queryAndUpdateLatestKData = function(){
var self = this;
getKChartData(symbol, kType, 20, null, function(resp){
var list = resp || [];
var oldLatestData = dataList.length > 0? dataList[dataList.length - 1]: null;
self.appendKDataList(list);
var newLatestData = list.length > 0? list[list.length - 1]: null;
if(JSON.stringify(oldLatestData) != newLatestData){
latestKDataChangeListeners.forEach(function(listener){
if(listener.trigggerOnlyWhenLatestKDataIsVisible && visibleListEndIndexOffsetFromLatest > 0)
return;
if(typeof listener == "function")
try{listener();}catch(e){console.error(e, e.stack);}
});
}
});
return this;
};
/**
* 停止K线图数据的周期性更新(Http通道)
* @returns {KChartData}
*/
this.stopPeriodicallyUpdateByHttp = function(){
clearInterval(refreshTimer);
refreshTimer = null;
return this;
};
/**
* 开始K线图数据的周期性更新(Http通道)
* @returns {KChartData}
*/
this.startPeriodicallyUpdateByHttp = function(){
if(null != refreshTimer){
return this;
}
var self = this;
self.queryAndUpdateLatestKData();
refreshTimer = setInterval(function(){
self.queryAndUpdateLatestKData();
}, refreshGap);
return this;
};
/**
* 获取可见的K线图数据列表
* @param {Integer} [count=maxPageSize] 要获取的数据尺寸
* @returns {KData[]}
*/
this.getVisibleKDataList = function(count){
if(arguments.length < 1)
count = maxPageSize;
var list = dataList;
var len = list.length;
visibleListEndIndexOffsetFromLatest = Math.max(visibleListEndIndexOffsetFromLatest, 0);
visibleListEndIndexOffsetFromLatest = Math.min(visibleListEndIndexOffsetFromLatest, len);
var arr = [];
if(len == 0 || visibleListEndIndexOffsetFromLatest >= len)
return arr;
var endIndex = len - 1 - visibleListEndIndexOffsetFromLatest;/* min: 0, max: len - 1 */
for(var i = endIndex, j = 0; i >= 0 && j < count; i--, j++)
arr.unshift(list[i]);
return arr;
};
/**
* 重置“可见数据的结束索引距离时间最晚的K线图数据的位移”为0
* @returns {KChartData}
*/
this.resetVisibleKDataListEndIndexOffsetFromLatest = function(){
visibleListEndIndexOffsetFromLatest = 0;
return this;
};
/**
* 更新“可见数据的结束索引距离时间最晚的K线图数据的位移”
* @param {Integer} offset 位移在既有基础上的偏移量
* @returns {KChartData}
*/
this.updateVisibleKDataListEndIndexOffsetFromLatest = function(offset){
visibleListEndIndexOffsetFromLatest += offset;
var len = dataList.length;
visibleListEndIndexOffsetFromLatest = Math.max(visibleListEndIndexOffsetFromLatest, 0);
visibleListEndIndexOffsetFromLatest = Math.min(visibleListEndIndexOffsetFromLatest, len);
return this;
};
/**
* 设置“可见数据的结束索引距离时间最晚的K线图数据的位移”
* @param {Integer} offset 新的位移
* @returns {KChartData}
*/
this.setVisibleKDataListEndIndexOffsetFromLatest = function(offset){
visibleListEndIndexOffsetFromLatest = offset;
var len = dataList.length;
visibleListEndIndexOffsetFromLatest = Math.max(visibleListEndIndexOffsetFromLatest, 0);
visibleListEndIndexOffsetFromLatest = Math.min(visibleListEndIndexOffsetFromLatest, len);
return this;
};
/**
* 根据当前的“可见数据的结束索引距离时间最晚的K线图数据的位移”,计算并返回“不可见的,时间比可见数据更早的K线图数据”的个数
* @param {Integer} [count=maxPageSize] 可见数据的数据尺寸
* @returns {Integer}
*/
this.getInvisibleEarlierKDataListLength = function(count){
count = count || maxPageSize;
var len = dataList.length;
var visibleAreaEndIndex = len - 1 - visibleListEndIndexOffsetFromLatest;/* min: 0, max: len - 1 */
var visibleAreaBeginIndex = visibleAreaEndIndex - (count - 1);
if(visibleAreaBeginIndex <= 0)
return 0;
return visibleAreaBeginIndex;
};
/**
* 根据当前的“可见数据的结束索引距离时间最晚的K线图数据的位移”,计算并返回“不可见的,时间比可见数据更晚的K线图数据”的个数
* @returns {Integer}
*/
this.getInvisibleLaterKDataListLength = function(){
return visibleListEndIndexOffsetFromLatest;
};
/**
* 状态重置
* @param {Integer} [sizeToKeep=maxPageSize] 要保留的最新的K线图数据的尺寸
* @returns {KChartData}
*/
this.reset = function(sizeToKeep){
if(arguments.length < 1)
sizeToKeep = maxPageSize;
if(sizeToKeep > 0)
this.truncateKDataFromLatest(sizeToKeep);
else
dataList = [];
this.resetVisibleKDataListEndIndexOffsetFromLatest();
return this;
};
};
/**
* @constructor
* K线图数据管理器,用于实现K线图数据的加载等
* @param {String} symbol 产品代码
* @param {String} kType K线类型
*/
var KChartDataManager = function(symbol, kType){
Object.defineProperty(this, "symbol", {value: symbol, configurable: false, writable: false});
Object.defineProperty(this, "kType", {value: kType, configurable: false, writable: false});
var kChartData = new KChartData(symbol, kType);
/** 是否正在加载K线图数据(互斥锁,用于保证同一时刻只有一个加载请求在执行) */
var isLoadingKData = false;
/** 是否所有历史数据均已加载完成 */
var isAllEarlierKDataLoaded = false;
var self = this;
/**
* 获取关联的K线图数据实例
* @returns {KChartData}
*/
this.getKChartData = function(){
return kChartData;
};
/**
* 加载更早的K线图数据
* @param {Integer} [count=maxPageSize] 要获取的数据尺寸
* @param {JsonObject} [ops] 控制选项
* @param {Function} [ops.callback] 获取到K线图数据后执行的方法
* @param {Function} [ops.action4NoMoreEarlierData] 可用数据不足,且历史数据加载完毕(没有更多历史数据)时执行的方法
* @returns {KChartDataManager}
*/
var loadEarlierKData = function(count, ops){
count = count || maxPageSize;
ops = util.setDftValue(ops, {
callback: null,
action4NoMoreEarlierData: null,
});
var loadedList = [];
var execCallback = function(){
kChartData.prependKDataList(loadedList);
var list = loadedList.slice(Math.max(loadedList.length - count, 0));
if(typeof ops.callback == "function")
ops.callback(list);
};
/**
* 执行K线图数据加载。
* 因为服务端限定了单次返回的最大数据量,所以客户端需要不断加载,直至累计的数据量满足业务调用方所需的数据量为止
*/
var doLoad = function(){
var loadedListEarliestSeconds = 0 == loadedList.length? null: loadedList[0].Date;
var kChartDataEarliestSeconds = kChartData.getEarliestKDataSeconds();
var endTime = loadedListEarliestSeconds || kChartDataEarliestSeconds || 0;
getKChartData(symbol, kType, count, endTime, function(resp){
var list = resp || [];
var obtainedListLen = list.length;
list = filterDuplicateKData(loadedList, list);
loadedList = list.concat(loadedList);
if(loadedList.length >= count){/* 数据量满足 */
execCallback();
}else{/* 数据量不足,需继续加载 */
if(obtainedListLen < count){/* 不会有更多历史数据 */
isAllEarlierKDataLoaded = true;
execCallback();
if(typeof ops.action4NoMoreEarlierData == "function")
ops.action4NoMoreEarlierData();
}else/* 继续加载 */
doLoad();
}
}, function(){
execCallback();
});
};
doLoad();
return self;
};
/**
* 获取可见的K线图数据列表。特性:
* 1. 优先使用本地数据执行回调方法(ops.callback),如果本地数据不足,则尝试加载历史数据,并在有更多历史数据时,再次执行回调方法
* 2. 如果本地数据的个数C不满足:C >= 1.5 * CV,则自动加载历史数据。其中CV为业务调用方索取的数据个数
*
* @param {Integer} [count=maxPageSize] 要获取的数据尺寸
* @param {JsonObject} [ops] 控制选项
* @param {Function} [ops.callback] 获取到K线图数据后执行的回调方法
* @param {Function} [ops.action4NoMoreEarlierData] 可用数据不足,且历史数据加载完毕(没有更多历史数据)时执行的方法
* @returns {KChartDataManager}
*/
this.getVisibleKDataList = function(count, ops){
count = count || maxPageSize;
ops = util.setDftValue(ops, {
callback: null,
action4NoMoreEarlierData: null,
});
var list = kChartData.getVisibleKDataList(count);
var invisibleEarlierKDataListLength = kChartData.getInvisibleEarlierKDataListLength(count);
var isLocalDataSufficient = list.length >= count;
var ifNeedToLoadEarlierData = !isLocalDataSufficient || (invisibleEarlierKDataListLength < count / 2);
var self = this;
var len = list.length;
var callbackTriggered = false;
if(0 != len){
if(typeof ops.callback == "function"){
console.log("Exec callback for the first time", strigifyKDataList(list));
ops.callback(list);
callbackTriggered = true;
}
}else
console.debug("No local data exist to exec callback");
if(ifNeedToLoadEarlierData && !isLoadingKData && !isAllEarlierKDataLoaded){
console.debug("Loading earlier data.", list.length, count);
isLoadingKData = true;
loadEarlierKData(count, {
callback: function(list){
isLoadingKData = false;
if(isLocalDataSufficient){
return;
}
console.log("Trying to get new k data list of count: " + count, strigifyKDataList(kChartData.getKDataList()));
var newList = kChartData.getVisibleKDataList(count);
if(!callbackTriggered || newList.length != len){
if(typeof ops.callback == "function"){
console.log("Exec callback for the second time", strigifyKDataList(newList));
ops.callback(newList);
}
}
},
action4NoMoreEarlierData: ops.action4NoMoreEarlierData
});
}
return this;
};
/**
* 更新“可见数据的结束索引距离时间最晚的K线图数据的位移”。如果偏移量为正,且没有更多历史数据,则忽略本次操作 | this.updateVisibleKDataListEndIndexOffsetFromLatest = function(visibleKDataCount, offset){
var currentOffset = kChartData.getInvisibleLaterKDataListLength();
var newOffset = currentOffset + offset;
var maxOffset = Math.max(kChartData.getKDataList().length - visibleKDataCount, 0);
if(isAllEarlierKDataLoaded && newOffset > maxOffset){
newOffset = maxOffset;
kChartData.setVisibleKDataListEndIndexOffsetFromLatest(newOffset);
}else
kChartData.updateVisibleKDataListEndIndexOffsetFromLatest(offset);
return this;
};
/**
* 状态重置
* @param {Integer} [sizeToKeep=maxPageSize] 要保留的最新的K线图数据的尺寸
* @returns {KChartDataManager}
*/
this.reset = function(sizeToKeep){
isLoadingKData = false;
isAllEarlierKDataLoaded = false;
kChartData.reset(sizeToKeep);
return this;
};
};
/**
* 获取指定产品代码对应的实例。如果实例不存在,则自动创建一个
* @param {String} symbol 产品代码
* @param {String} kType K线类型
*/
KChartDataManager.ofSymbolAndType = (function(){
/**
* 所有已经创建的实例
* @type {Object.<String, KChartDataManager>}
*/
var managerInstances = {};
return function(symbol, kType){
var k = symbol + "_" + kType;
var instance = managerInstances[k];
if(null == instance){
instance = new KChartDataManager(symbol, kType);
managerInstances[k] = instance;
}
return instance;
};
})();
namespace.KChartDataManager = KChartDataManager;
})(window); | * @param {Integer} visibleKDataCount 可见数据量
* @param {Integer} offset 位移在既有基础上的偏移量
* @returns {KChartData}
*/ | random_line_split |
app.py | import flask
import json
import uuid
import jwt
import smooch
from flask import Flask, request, Response
from redis import Redis
from smooch.rest import ApiException
from promise import Promise
from persistence import Persistence
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
# redis = Redis(host='redis', port=6379)
db = Persistence()
redis = db.db
# redis.flushall()
# Load environment variables
with open('env-vars.json') as env_vars_json:
data = json.load(env_vars_json)
APP_ID = data['SMOOCH_APP_ID']
KEY_ID = data['SMOOCH_KEY_ID']
SECRET = data['SMOOCH_SECRET']
def generate_jwt_token():
token_bytes = jwt.encode({
'scope': 'app'
},
SECRET,
algorithm='HS256',
headers={
'kid': KEY_ID
})
return token_bytes.decode('utf-8')
# Configure API key authorization: jwt
smooch.configuration.api_key['Authorization'] = generate_jwt_token()
smooch.configuration.api_key_prefix['Authorization'] = 'Bearer'
# create an instance of the API class
api_instance = smooch.ConversationApi()
app_create_body = smooch.AppCreate() # AppCreate | Body for a createApp request.
### Health Checks
# IMAGES
def image(text):
if text == "Bible":
return "http://static7.bigstockphoto.com/thumbs/8/1/3/small2/318707.jpg"
elif text == "Easter":
return "http://www.beliefnet.com/columnists//deaconsbench/files/import/assets_c/2010/04/jesus-cross-thumb-400x528-12594.jpg"
elif text == "Budgeting":
return "https://www.aiadallas.org/media/uploads/event-images/budget_thumbnail.png"
elif text == "Spending":
return "http://thumbnails.billiondigital.com/297/151/1151297/1151253_small_checkboard.jpg"
elif text == "Talk":
return "https://rfclipart.com/image/thumbnail/22-f6-00/small-coffee-cup-Download-Free-Vector-File-EPS-677.jpg"
else:
return ""
# PING
@app.route('/')
def ping():
# Pull all database data and log on screen
all_messages = json.dumps([eval(redis.get(key).decode('utf8')) for key in redis.scan_iter("messages-*")])
return Response(all_messages, status=200)
def postText(text):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
return message
def postTextWithReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='reply', text=reply, payload=reply))
message.actions = actions
return message
def postTextWithListReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='postback', text=reply, payload=reply))
message.actions = actions
return message
def postImage(uri):
message = smooch.MessagePost(role='appMaker', type='image')
message.media_url = uri
return message
def postFile(uri):
message = smooch.MessagePost(role='appMaker', type='file')
message.media_url = uri
return message
def postCarousel(list):
message = smooch.MessagePost(role='appMaker', type='carousel')
items = []
for item in list:
actions = []
actions.append(smooch.Action(type='postback', text=item, payload=item))
part = smooch.MessageItem(title=item, actions=actions)
part.media_url = image(item)
part.size = 'compact'
items.append(part)
message.items = items
return message
def handle_message(user_id, text):
if text == "Help":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Just say Hi, we can talk about Jesus or Money."))
elif text == "Talk":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://capuk.org/connect/contact-us"))
elif text == "Hello" or text == "Hey" or text == "Hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("What do you want to chat about?", ['Jesus', 'Money', 'Rachel']))
elif text == "hello" or text == "hey" or text == "hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("How do you feel about money?", [':D',':)',':(',':@']))
### JESUS ###
elif text == "Jesus":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Bible', 'Easter', 'Talk']))
elif text == "Bible":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://www.desiringgod.org/articles/how-to-read-the-bible-for-yourself"))
elif text == "Easter":
api_response = api_instance.post_message(APP_ID, user_id,
postText("http://www.st-helens.org.uk/internationals/who-is-jesus"))
### MONEY ###
elif text == "Money":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "Budgeting":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your budget?", ['Regular Budget', 'Weekly Allowance Budget']))
elif text == "Spending":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your spend?", ['Regular Spend', 'Weekly Allowance Spend']))
### BUDGET ###
elif text == "Regular Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your rent or mortgage?"))
elif text == "Weekly Allowance Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your weekly allowance budget?"))
elif text == "Weekly Allowance Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Regular Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! What did you spend money on?"))
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Home', 'Living', 'Travel', 'Family', 'Leisure', 'Future', 'Giving', 'Repayments']))
elif text == "Home":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Living":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Travel":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Family":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Leisure":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Future":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
| elif text == "Giving":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Repayments":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Yes please":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "I'm ok":
api_response = api_instance.post_message(APP_ID, user_id,
postText(":)"))
### RACHEL ###
elif text == "Rachel":
api_response = api_instance.post_message(APP_ID, user_id,
postFile("http://rachelschallenge.org/media/media_press_kit/Code_of_ethics.pdf"))
### EMOJI ###
elif text == ":D":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":)":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":(":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("You're not alone! I'm your budget buddy. I can help you cope better.", ['Yes please', 'I\'m ok']))
elif text == ":@":
api_response = api_instance.post_message(APP_ID, user_id,
postText("You're not alone!"))
elif text == "":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Speachless"))
else:
api_response = api_instance.post_message(APP_ID, user_id,
postText("I haven't learned that one yet"))
# Request handling logic
def parse_request_data(request_data):
body = json.loads(request_data)
user_id = body['appUser']['_id']
if body['trigger'] == 'message:appUser':
for message in body['messages']:
handle_message(user_id, message['text'])
elif body['trigger'] == 'postback':
for postback in body['postbacks']:
handle_message(user_id, postback['action']['payload'])
'''
# Persist message to database
author_id = body['messages'][0]['authorId']
message = body['messages'][0]['text']
persistence_data = None
existing_author_info = None
# if key exists then load its data
if db.key_exists(author_id):
existing_author_db_data = db.load_messages(author_id)
existing_author_info = json.loads(existing_author_db_data)
db.delete_messages(author_id) # remove key since its being retrieved and stored in memory
# Update db key if pre-existing
if isinstance(existing_author_info, (list)):
existing_author_info.append(message) # Problem here as it overrites list
persistence_data = json.dumps(existing_author_info)
print(persistence_data)
# perhaps delete that exists since we have retrieved its data in memory?
else:
persistence_data = json.dumps([message])
# Then finally save...
db.save_message(author_id, persistence_data)
'''
@app.route('/messages', methods=["POST"])
def handle_messages():
# print(request.get_json())
# Delay bot response and return immediate response
# to 'fix' facebook issue - https://chatbotsmagazine.com/listicle-of-things-missing-from-facebook-messenger-chatbot-platforms-documentation-d1d50922ef15
Promise.resolve(parse_request_data(request.get_data()))
return Response('ok', status=200) | random_line_split | |
app.py | import flask
import json
import uuid
import jwt
import smooch
from flask import Flask, request, Response
from redis import Redis
from smooch.rest import ApiException
from promise import Promise
from persistence import Persistence
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
# redis = Redis(host='redis', port=6379)
db = Persistence()
redis = db.db
# redis.flushall()
# Load environment variables
with open('env-vars.json') as env_vars_json:
data = json.load(env_vars_json)
APP_ID = data['SMOOCH_APP_ID']
KEY_ID = data['SMOOCH_KEY_ID']
SECRET = data['SMOOCH_SECRET']
def generate_jwt_token():
token_bytes = jwt.encode({
'scope': 'app'
},
SECRET,
algorithm='HS256',
headers={
'kid': KEY_ID
})
return token_bytes.decode('utf-8')
# Configure API key authorization: jwt
smooch.configuration.api_key['Authorization'] = generate_jwt_token()
smooch.configuration.api_key_prefix['Authorization'] = 'Bearer'
# create an instance of the API class
api_instance = smooch.ConversationApi()
app_create_body = smooch.AppCreate() # AppCreate | Body for a createApp request.
### Health Checks
# IMAGES
def image(text):
if text == "Bible":
return "http://static7.bigstockphoto.com/thumbs/8/1/3/small2/318707.jpg"
elif text == "Easter":
return "http://www.beliefnet.com/columnists//deaconsbench/files/import/assets_c/2010/04/jesus-cross-thumb-400x528-12594.jpg"
elif text == "Budgeting":
return "https://www.aiadallas.org/media/uploads/event-images/budget_thumbnail.png"
elif text == "Spending":
return "http://thumbnails.billiondigital.com/297/151/1151297/1151253_small_checkboard.jpg"
elif text == "Talk":
return "https://rfclipart.com/image/thumbnail/22-f6-00/small-coffee-cup-Download-Free-Vector-File-EPS-677.jpg"
else:
return ""
# PING
@app.route('/')
def ping():
# Pull all database data and log on screen
all_messages = json.dumps([eval(redis.get(key).decode('utf8')) for key in redis.scan_iter("messages-*")])
return Response(all_messages, status=200)
def postText(text):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
return message
def postTextWithReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='reply', text=reply, payload=reply))
message.actions = actions
return message
def postTextWithListReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='postback', text=reply, payload=reply))
message.actions = actions
return message
def postImage(uri):
message = smooch.MessagePost(role='appMaker', type='image')
message.media_url = uri
return message
def postFile(uri):
message = smooch.MessagePost(role='appMaker', type='file')
message.media_url = uri
return message
def postCarousel(list):
message = smooch.MessagePost(role='appMaker', type='carousel')
items = []
for item in list:
actions = []
actions.append(smooch.Action(type='postback', text=item, payload=item))
part = smooch.MessageItem(title=item, actions=actions)
part.media_url = image(item)
part.size = 'compact'
items.append(part)
message.items = items
return message
def handle_message(user_id, text):
if text == "Help":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Just say Hi, we can talk about Jesus or Money."))
elif text == "Talk":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://capuk.org/connect/contact-us"))
elif text == "Hello" or text == "Hey" or text == "Hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("What do you want to chat about?", ['Jesus', 'Money', 'Rachel']))
elif text == "hello" or text == "hey" or text == "hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("How do you feel about money?", [':D',':)',':(',':@']))
### JESUS ###
elif text == "Jesus":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Bible', 'Easter', 'Talk']))
elif text == "Bible":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://www.desiringgod.org/articles/how-to-read-the-bible-for-yourself"))
elif text == "Easter":
api_response = api_instance.post_message(APP_ID, user_id,
postText("http://www.st-helens.org.uk/internationals/who-is-jesus"))
### MONEY ###
elif text == "Money":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "Budgeting":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your budget?", ['Regular Budget', 'Weekly Allowance Budget']))
elif text == "Spending":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your spend?", ['Regular Spend', 'Weekly Allowance Spend']))
### BUDGET ###
elif text == "Regular Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your rent or mortgage?"))
elif text == "Weekly Allowance Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your weekly allowance budget?"))
elif text == "Weekly Allowance Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Regular Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! What did you spend money on?"))
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Home', 'Living', 'Travel', 'Family', 'Leisure', 'Future', 'Giving', 'Repayments']))
elif text == "Home":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Living":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Travel":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Family":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Leisure":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Future":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Giving":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Repayments":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Yes please":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "I'm ok":
api_response = api_instance.post_message(APP_ID, user_id,
postText(":)"))
### RACHEL ###
elif text == "Rachel":
api_response = api_instance.post_message(APP_ID, user_id,
postFile("http://rachelschallenge.org/media/media_press_kit/Code_of_ethics.pdf"))
### EMOJI ###
elif text == ":D":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":)":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":(":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("You're not alone! I'm your budget buddy. I can help you cope better.", ['Yes please', 'I\'m ok']))
elif text == ":@":
api_response = api_instance.post_message(APP_ID, user_id,
postText("You're not alone!"))
elif text == "":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Speachless"))
else:
api_response = api_instance.post_message(APP_ID, user_id,
postText("I haven't learned that one yet"))
# Request handling logic
def | (request_data):
body = json.loads(request_data)
user_id = body['appUser']['_id']
if body['trigger'] == 'message:appUser':
for message in body['messages']:
handle_message(user_id, message['text'])
elif body['trigger'] == 'postback':
for postback in body['postbacks']:
handle_message(user_id, postback['action']['payload'])
'''
# Persist message to database
author_id = body['messages'][0]['authorId']
message = body['messages'][0]['text']
persistence_data = None
existing_author_info = None
# if key exists then load its data
if db.key_exists(author_id):
existing_author_db_data = db.load_messages(author_id)
existing_author_info = json.loads(existing_author_db_data)
db.delete_messages(author_id) # remove key since its being retrieved and stored in memory
# Update db key if pre-existing
if isinstance(existing_author_info, (list)):
existing_author_info.append(message) # Problem here as it overrites list
persistence_data = json.dumps(existing_author_info)
print(persistence_data)
# perhaps delete that exists since we have retrieved its data in memory?
else:
persistence_data = json.dumps([message])
# Then finally save...
db.save_message(author_id, persistence_data)
'''
@app.route('/messages', methods=["POST"])
def handle_messages():
# print(request.get_json())
# Delay bot response and return immediate response
# to 'fix' facebook issue - https://chatbotsmagazine.com/listicle-of-things-missing-from-facebook-messenger-chatbot-platforms-documentation-d1d50922ef15
Promise.resolve(parse_request_data(request.get_data()))
return Response('ok', status=200) | parse_request_data | identifier_name |
app.py | import flask
import json
import uuid
import jwt
import smooch
from flask import Flask, request, Response
from redis import Redis
from smooch.rest import ApiException
from promise import Promise
from persistence import Persistence
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
# redis = Redis(host='redis', port=6379)
db = Persistence()
redis = db.db
# redis.flushall()
# Load environment variables
with open('env-vars.json') as env_vars_json:
data = json.load(env_vars_json)
APP_ID = data['SMOOCH_APP_ID']
KEY_ID = data['SMOOCH_KEY_ID']
SECRET = data['SMOOCH_SECRET']
def generate_jwt_token():
token_bytes = jwt.encode({
'scope': 'app'
},
SECRET,
algorithm='HS256',
headers={
'kid': KEY_ID
})
return token_bytes.decode('utf-8')
# Configure API key authorization: jwt
smooch.configuration.api_key['Authorization'] = generate_jwt_token()
smooch.configuration.api_key_prefix['Authorization'] = 'Bearer'
# create an instance of the API class
api_instance = smooch.ConversationApi()
app_create_body = smooch.AppCreate() # AppCreate | Body for a createApp request.
### Health Checks
# IMAGES
def image(text):
if text == "Bible":
return "http://static7.bigstockphoto.com/thumbs/8/1/3/small2/318707.jpg"
elif text == "Easter":
return "http://www.beliefnet.com/columnists//deaconsbench/files/import/assets_c/2010/04/jesus-cross-thumb-400x528-12594.jpg"
elif text == "Budgeting":
return "https://www.aiadallas.org/media/uploads/event-images/budget_thumbnail.png"
elif text == "Spending":
return "http://thumbnails.billiondigital.com/297/151/1151297/1151253_small_checkboard.jpg"
elif text == "Talk":
return "https://rfclipart.com/image/thumbnail/22-f6-00/small-coffee-cup-Download-Free-Vector-File-EPS-677.jpg"
else:
return ""
# PING
@app.route('/')
def ping():
# Pull all database data and log on screen
all_messages = json.dumps([eval(redis.get(key).decode('utf8')) for key in redis.scan_iter("messages-*")])
return Response(all_messages, status=200)
def postText(text):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
return message
def postTextWithReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='reply', text=reply, payload=reply))
message.actions = actions
return message
def postTextWithListReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='postback', text=reply, payload=reply))
message.actions = actions
return message
def postImage(uri):
message = smooch.MessagePost(role='appMaker', type='image')
message.media_url = uri
return message
def postFile(uri):
message = smooch.MessagePost(role='appMaker', type='file')
message.media_url = uri
return message
def postCarousel(list):
message = smooch.MessagePost(role='appMaker', type='carousel')
items = []
for item in list:
actions = []
actions.append(smooch.Action(type='postback', text=item, payload=item))
part = smooch.MessageItem(title=item, actions=actions)
part.media_url = image(item)
part.size = 'compact'
items.append(part)
message.items = items
return message
def handle_message(user_id, text):
if text == "Help":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Just say Hi, we can talk about Jesus or Money."))
elif text == "Talk":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://capuk.org/connect/contact-us"))
elif text == "Hello" or text == "Hey" or text == "Hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("What do you want to chat about?", ['Jesus', 'Money', 'Rachel']))
elif text == "hello" or text == "hey" or text == "hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("How do you feel about money?", [':D',':)',':(',':@']))
### JESUS ###
elif text == "Jesus":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Bible', 'Easter', 'Talk']))
elif text == "Bible":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://www.desiringgod.org/articles/how-to-read-the-bible-for-yourself"))
elif text == "Easter":
api_response = api_instance.post_message(APP_ID, user_id,
postText("http://www.st-helens.org.uk/internationals/who-is-jesus"))
### MONEY ###
elif text == "Money":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "Budgeting":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your budget?", ['Regular Budget', 'Weekly Allowance Budget']))
elif text == "Spending":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your spend?", ['Regular Spend', 'Weekly Allowance Spend']))
### BUDGET ###
elif text == "Regular Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your rent or mortgage?"))
elif text == "Weekly Allowance Budget":
|
elif text == "Weekly Allowance Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Regular Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! What did you spend money on?"))
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Home', 'Living', 'Travel', 'Family', 'Leisure', 'Future', 'Giving', 'Repayments']))
elif text == "Home":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Living":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Travel":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Family":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Leisure":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Future":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Giving":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Repayments":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Yes please":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "I'm ok":
api_response = api_instance.post_message(APP_ID, user_id,
postText(":)"))
### RACHEL ###
elif text == "Rachel":
api_response = api_instance.post_message(APP_ID, user_id,
postFile("http://rachelschallenge.org/media/media_press_kit/Code_of_ethics.pdf"))
### EMOJI ###
elif text == ":D":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":)":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":(":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("You're not alone! I'm your budget buddy. I can help you cope better.", ['Yes please', 'I\'m ok']))
elif text == ":@":
api_response = api_instance.post_message(APP_ID, user_id,
postText("You're not alone!"))
elif text == "":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Speachless"))
else:
api_response = api_instance.post_message(APP_ID, user_id,
postText("I haven't learned that one yet"))
# Request handling logic
def parse_request_data(request_data):
body = json.loads(request_data)
user_id = body['appUser']['_id']
if body['trigger'] == 'message:appUser':
for message in body['messages']:
handle_message(user_id, message['text'])
elif body['trigger'] == 'postback':
for postback in body['postbacks']:
handle_message(user_id, postback['action']['payload'])
'''
# Persist message to database
author_id = body['messages'][0]['authorId']
message = body['messages'][0]['text']
persistence_data = None
existing_author_info = None
# if key exists then load its data
if db.key_exists(author_id):
existing_author_db_data = db.load_messages(author_id)
existing_author_info = json.loads(existing_author_db_data)
db.delete_messages(author_id) # remove key since its being retrieved and stored in memory
# Update db key if pre-existing
if isinstance(existing_author_info, (list)):
existing_author_info.append(message) # Problem here as it overrites list
persistence_data = json.dumps(existing_author_info)
print(persistence_data)
# perhaps delete that exists since we have retrieved its data in memory?
else:
persistence_data = json.dumps([message])
# Then finally save...
db.save_message(author_id, persistence_data)
'''
@app.route('/messages', methods=["POST"])
def handle_messages():
# print(request.get_json())
# Delay bot response and return immediate response
# to 'fix' facebook issue - https://chatbotsmagazine.com/listicle-of-things-missing-from-facebook-messenger-chatbot-platforms-documentation-d1d50922ef15
Promise.resolve(parse_request_data(request.get_data()))
return Response('ok', status=200) | api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your weekly allowance budget?")) | conditional_block |
app.py | import flask
import json
import uuid
import jwt
import smooch
from flask import Flask, request, Response
from redis import Redis
from smooch.rest import ApiException
from promise import Promise
from persistence import Persistence
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
# redis = Redis(host='redis', port=6379)
db = Persistence()
redis = db.db
# redis.flushall()
# Load environment variables
with open('env-vars.json') as env_vars_json:
data = json.load(env_vars_json)
APP_ID = data['SMOOCH_APP_ID']
KEY_ID = data['SMOOCH_KEY_ID']
SECRET = data['SMOOCH_SECRET']
def generate_jwt_token():
token_bytes = jwt.encode({
'scope': 'app'
},
SECRET,
algorithm='HS256',
headers={
'kid': KEY_ID
})
return token_bytes.decode('utf-8')
# Configure API key authorization: jwt
smooch.configuration.api_key['Authorization'] = generate_jwt_token()
smooch.configuration.api_key_prefix['Authorization'] = 'Bearer'
# create an instance of the API class
api_instance = smooch.ConversationApi()
app_create_body = smooch.AppCreate() # AppCreate | Body for a createApp request.
### Health Checks
# IMAGES
def image(text):
if text == "Bible":
return "http://static7.bigstockphoto.com/thumbs/8/1/3/small2/318707.jpg"
elif text == "Easter":
return "http://www.beliefnet.com/columnists//deaconsbench/files/import/assets_c/2010/04/jesus-cross-thumb-400x528-12594.jpg"
elif text == "Budgeting":
return "https://www.aiadallas.org/media/uploads/event-images/budget_thumbnail.png"
elif text == "Spending":
return "http://thumbnails.billiondigital.com/297/151/1151297/1151253_small_checkboard.jpg"
elif text == "Talk":
return "https://rfclipart.com/image/thumbnail/22-f6-00/small-coffee-cup-Download-Free-Vector-File-EPS-677.jpg"
else:
return ""
# PING
@app.route('/')
def ping():
# Pull all database data and log on screen
all_messages = json.dumps([eval(redis.get(key).decode('utf8')) for key in redis.scan_iter("messages-*")])
return Response(all_messages, status=200)
def postText(text):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
return message
def postTextWithReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='reply', text=reply, payload=reply))
message.actions = actions
return message
def postTextWithListReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='postback', text=reply, payload=reply))
message.actions = actions
return message
def postImage(uri):
message = smooch.MessagePost(role='appMaker', type='image')
message.media_url = uri
return message
def postFile(uri):
|
def postCarousel(list):
message = smooch.MessagePost(role='appMaker', type='carousel')
items = []
for item in list:
actions = []
actions.append(smooch.Action(type='postback', text=item, payload=item))
part = smooch.MessageItem(title=item, actions=actions)
part.media_url = image(item)
part.size = 'compact'
items.append(part)
message.items = items
return message
def handle_message(user_id, text):
if text == "Help":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Just say Hi, we can talk about Jesus or Money."))
elif text == "Talk":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://capuk.org/connect/contact-us"))
elif text == "Hello" or text == "Hey" or text == "Hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("What do you want to chat about?", ['Jesus', 'Money', 'Rachel']))
elif text == "hello" or text == "hey" or text == "hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("How do you feel about money?", [':D',':)',':(',':@']))
### JESUS ###
elif text == "Jesus":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Bible', 'Easter', 'Talk']))
elif text == "Bible":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://www.desiringgod.org/articles/how-to-read-the-bible-for-yourself"))
elif text == "Easter":
api_response = api_instance.post_message(APP_ID, user_id,
postText("http://www.st-helens.org.uk/internationals/who-is-jesus"))
### MONEY ###
elif text == "Money":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "Budgeting":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your budget?", ['Regular Budget', 'Weekly Allowance Budget']))
elif text == "Spending":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your spend?", ['Regular Spend', 'Weekly Allowance Spend']))
### BUDGET ###
elif text == "Regular Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your rent or mortgage?"))
elif text == "Weekly Allowance Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your weekly allowance budget?"))
elif text == "Weekly Allowance Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Regular Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! What did you spend money on?"))
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Home', 'Living', 'Travel', 'Family', 'Leisure', 'Future', 'Giving', 'Repayments']))
elif text == "Home":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Living":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Travel":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Family":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Leisure":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Future":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Giving":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Repayments":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Yes please":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "I'm ok":
api_response = api_instance.post_message(APP_ID, user_id,
postText(":)"))
### RACHEL ###
elif text == "Rachel":
api_response = api_instance.post_message(APP_ID, user_id,
postFile("http://rachelschallenge.org/media/media_press_kit/Code_of_ethics.pdf"))
### EMOJI ###
elif text == ":D":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":)":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":(":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("You're not alone! I'm your budget buddy. I can help you cope better.", ['Yes please', 'I\'m ok']))
elif text == ":@":
api_response = api_instance.post_message(APP_ID, user_id,
postText("You're not alone!"))
elif text == "":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Speachless"))
else:
api_response = api_instance.post_message(APP_ID, user_id,
postText("I haven't learned that one yet"))
# Request handling logic
def parse_request_data(request_data):
body = json.loads(request_data)
user_id = body['appUser']['_id']
if body['trigger'] == 'message:appUser':
for message in body['messages']:
handle_message(user_id, message['text'])
elif body['trigger'] == 'postback':
for postback in body['postbacks']:
handle_message(user_id, postback['action']['payload'])
'''
# Persist message to database
author_id = body['messages'][0]['authorId']
message = body['messages'][0]['text']
persistence_data = None
existing_author_info = None
# if key exists then load its data
if db.key_exists(author_id):
existing_author_db_data = db.load_messages(author_id)
existing_author_info = json.loads(existing_author_db_data)
db.delete_messages(author_id) # remove key since its being retrieved and stored in memory
# Update db key if pre-existing
if isinstance(existing_author_info, (list)):
existing_author_info.append(message) # Problem here as it overrites list
persistence_data = json.dumps(existing_author_info)
print(persistence_data)
# perhaps delete that exists since we have retrieved its data in memory?
else:
persistence_data = json.dumps([message])
# Then finally save...
db.save_message(author_id, persistence_data)
'''
@app.route('/messages', methods=["POST"])
def handle_messages():
# print(request.get_json())
# Delay bot response and return immediate response
# to 'fix' facebook issue - https://chatbotsmagazine.com/listicle-of-things-missing-from-facebook-messenger-chatbot-platforms-documentation-d1d50922ef15
Promise.resolve(parse_request_data(request.get_data()))
return Response('ok', status=200) | message = smooch.MessagePost(role='appMaker', type='file')
message.media_url = uri
return message | identifier_body |
google_spreadsheets.rs | use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::sync::Arc;
use datafusion::arrow::array::{ArrayRef, BooleanArray, PrimitiveArray, StringArray};
use datafusion::arrow::datatypes::{DataType, Field, Schema};
use datafusion::arrow::datatypes::{Float64Type, Int64Type};
use datafusion::arrow::record_batch::RecordBatch;
use regex::Regex;
use reqwest::Client;
use serde_derive::Deserialize;
use uriparse::URIReference;
use crate::error::ColumnQError;
use crate::table::{TableOptionGoogleSpreasheet, TableSource};
// steps
// * Activate the Google Sheets API in the Google API Console.
//
// * Create service account: https://console.developers.google.com/apis/api/sheets.googleapis.com/credentials?project=roapi-302505
// * create key and save the json format somewhere safe
// * Share spreadsheet with service account
#[derive(Deserialize, Debug)]
struct SheetProperties {
#[serde(rename = "sheetId")]
sheet_id: usize,
title: String,
index: usize,
// other unused attributes:
//
// "sheetType": "GRID",
// "gridProperties": {
// "rowCount": 1000,
// "columnCount": 28
// }
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/sheets#SheetProperties
}
#[derive(Deserialize, Debug)]
struct Sheet {
properties: SheetProperties,
// for all available fields, see:
// https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
}
#[derive(Deserialize, Debug)]
struct Spreadsheets {
sheets: Vec<Sheet>,
// other unused attributes:
// * spreadsheetId
// * properties
// * spreadsheetUrl
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets
}
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct SpreadsheetValues {
range: String,
#[serde(rename = "majorDimension")]
major_dimension: String,
values: Vec<Vec<String>>,
}
// TODO: should we support optional column?
fn infer_value_type(v: &str) -> DataType {
// match order matters
match v {
// TODO: support Date64 and Time64
_ if v.parse::<i64>().is_ok() => DataType::Int64,
_ if v.parse::<f64>().is_ok() => DataType::Float64,
_ => match v.to_lowercase().as_str() {
"false" | "true" => DataType::Boolean,
_ => DataType::Utf8,
},
}
}
// util wrapper for calling google spreadsheet API
async fn gs_api_get(token: &str, url: &str) -> Result<reqwest::Response, ColumnQError> {
Client::builder()
.build()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to initialize HTTP client: {}",
e.to_string()
))
})?
.get(url)
.bearer_auth(token)
.send()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to send API request: {}",
e.to_string()
))
})
}
fn coerce_type(l: DataType, r: DataType) -> DataType {
match (l, r) {
(DataType::Boolean, DataType::Boolean) => DataType::Boolean,
(DataType::Date32, DataType::Date32) => DataType::Date32,
(DataType::Date64, DataType::Date64)
| (DataType::Date64, DataType::Date32)
| (DataType::Date32, DataType::Date64) => DataType::Date64,
(DataType::Int64, DataType::Int64) => DataType::Int64,
(DataType::Float64, DataType::Float64)
| (DataType::Float64, DataType::Int64)
| (DataType::Int64, DataType::Float64) => DataType::Float64,
_ => DataType::Utf8,
}
}
fn infer_schema(rows: &[Vec<String>]) -> Schema {
let mut col_types: HashMap<&str, HashSet<DataType>> = HashMap::new();
let col_names = &rows[0];
rows.iter().skip(1).for_each(|row| {
row.iter().enumerate().for_each(|(i, col_val)| {
let col_name = &col_names[i];
let col_type = infer_value_type(col_val);
let entry = col_types.entry(col_name).or_insert_with(HashSet::new);
entry.insert(col_type);
});
});
let fields: Vec<Field> = col_names
.iter()
.map(|col_name| {
let set = col_types.entry(col_name).or_insert_with(|| {
// TODO: this should never happen, maybe we should use panic instead?
let mut set = HashSet::new();
set.insert(DataType::Utf8);
set
});
let mut dt_iter = set.iter().cloned();
let dt_init = dt_iter.next().unwrap_or(DataType::Utf8);
let dt = dt_iter.fold(dt_init, coerce_type);
// normalize column name by replacing space with under score
Field::new(&col_name.replace(" ", "_"), dt, false)
})
.collect();
Schema::new(fields)
}
fn parse_boolean(s: &str) -> bool {
s.eq_ignore_ascii_case("true")
}
fn sheet_values_to_record_batch(values: &[Vec<String>]) -> Result<RecordBatch, ColumnQError> {
let schema = infer_schema(values);
let arrays = schema
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
// skip header row
let rows_iter = values.iter().skip(1);
Ok(match field.data_type() {
DataType::Boolean => Arc::new(
rows_iter
.map(|row| Some(parse_boolean(&row[i])))
.collect::<BooleanArray>(),
) as ArrayRef,
DataType::Int64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<i64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect int64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Int64Type>, ColumnQError>>()?,
) as ArrayRef,
DataType::Float64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<f64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect float64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Float64Type>, ColumnQError>>()?,
) as ArrayRef,
_ => Arc::new(rows_iter.map(|row| Some(&row[i])).collect::<StringArray>())
as ArrayRef,
})
})
.collect::<Result<Vec<ArrayRef>, ColumnQError>>()?;
Ok(RecordBatch::try_new(Arc::new(schema), arrays)?)
}
async fn fetch_auth_token(
opt: &TableOptionGoogleSpreasheet,
) -> Result<yup_oauth2::AccessToken, ColumnQError> {
// Read application creds from a file.The clientsecret file contains JSON like
// `{"installed":{"client_id": ... }}`
let creds = yup_oauth2::read_service_account_key(&opt.application_secret_path)
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error reading application secret from disk: {}",
e.to_string()
))
})?;
let sa = yup_oauth2::ServiceAccountAuthenticator::builder(creds)
.build()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error building service account authenticator: {}",
e.to_string()
))
})?;
let scopes = &["https://www.googleapis.com/auth/spreadsheets.readonly"];
sa.token(scopes).await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to obtain OAuth2 token: {}", e.to_string()))
})
}
async fn resolve_sheet_title<'a, 'b, 'c, 'd>(
token: &'a str,
spreadsheet_id: &'b str,
uri: &'c URIReference<'d>,
) -> Result<String, ColumnQError> {
// look up sheet title by sheet id through API
let resp = gs_api_get(
token,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}",
spreadsheet_id
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to resolve sheet title from API: {}",
e.to_string()
))
})?;
let spreadsheets = resp.json::<Spreadsheets>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
// when sheet id is not specified from config, try to parse it from URI
let sheet_id: Option<usize> = match uri.fragment() {
// if sheeit id is specified within the URI in the format of #gid=x
Some(fragment) => {
let s = fragment.as_str();
let parts: Vec<&str> = s.split('=').collect();
match parts.len() {
2 => match parts[0] {
"gid" => parts[1].parse().ok(),
_ => None,
},
_ => None,
}
}
None => None,
};
let sheet = match sheet_id {
Some(id) => spreadsheets
.sheets
.iter()
.find(|s| s.properties.sheet_id == id)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets(format!("Invalid sheet id {}", id)))?,
// no sheet id specified, default to the first sheet
None => spreadsheets
.sheets
.iter()
.find(|s| s.properties.index == 0)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets("spreadsheets is empty".to_string()))?,
};
Ok(sheet.properties.title.clone())
}
pub async fn to_mem_table(
t: &TableSource,
) -> Result<datafusion::datasource::MemTable, ColumnQError> {
lazy_static! {
static ref RE_GOOGLE_SHEET: Regex =
Regex::new(r"https://docs.google.com/spreadsheets/d/(.+)").unwrap();
}
let uri_str = t.get_uri_str();
if RE_GOOGLE_SHEET.captures(uri_str).is_none() |
let uri = URIReference::try_from(uri_str)?;
let spreadsheet_id = uri.path().segments()[2].as_str();
let opt = t
.option
.as_ref()
.ok_or(ColumnQError::MissingOption)?
.as_google_spreadsheet()?;
let token = fetch_auth_token(opt).await?;
let token_str = token.as_str();
let sheet_title = match &opt.sheet_title {
Some(t) => t.clone(),
None => resolve_sheet_title(token_str, spreadsheet_id, &uri).await?,
};
let resp = gs_api_get(
token_str,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}/values/{}",
spreadsheet_id, sheet_title,
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to load sheet value from API: {}",
e.to_string()
))
})?;
let sheet = resp.json::<SpreadsheetValues>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
let batch = sheet_values_to_record_batch(&sheet.values)?;
let schema_ref = batch.schema();
let partitions = vec![vec![batch]];
Ok(datafusion::datasource::MemTable::try_new(
schema_ref, partitions,
)?)
}
#[cfg(test)]
mod tests {
use super::*;
use datafusion::arrow::array::{BooleanArray, Int64Array};
fn row(raw: &[&str]) -> Vec<String> {
raw.iter().map(|s| s.to_string()).collect()
}
fn property_sheet() -> SpreadsheetValues {
SpreadsheetValues {
range: "Properties!A1:AB1000".to_string(),
major_dimension: "ROWS".to_string(),
values: vec![
row(&[
"Address",
"Image",
"Landlord",
"Bed",
"Bath",
"Occupied",
"Monthly Rent",
"Lease Expiration Date",
"Days Until Expiration",
]),
row(&[
"Bothell, WA",
"https://a.com/1.jpeg",
"Roger",
"3",
"2",
"FALSE",
"$2,000",
"10/23/2020",
"Expired",
]),
row(&[
"Mill Creek, WA",
"https://a.com/2.jpeg",
"Sam",
"3",
"3",
"TRUE",
"$3,500",
"8/4/2021",
"193",
]),
row(&[
"Fremont, WA",
"",
"Daniel",
"5",
"3",
"FALSE",
"$4,500",
"7/13/2019",
"Expired",
]),
row(&[
"Shoreline, WA",
"https://a.com/3.jpeg",
"Roger",
"1",
"1",
"TRUE",
"$1,200",
"12/9/2021",
"320",
]),
],
}
}
#[test]
fn schema_inference() {
let sheet = property_sheet();
let schema = infer_schema(&sheet.values);
assert_eq!(
schema,
Schema::new(vec![
Field::new("Address", DataType::Utf8, false),
Field::new("Image", DataType::Utf8, false),
Field::new("Landlord", DataType::Utf8, false),
Field::new("Bed", DataType::Int64, false),
Field::new("Bath", DataType::Int64, false),
Field::new("Occupied", DataType::Boolean, false),
Field::new("Monthly_Rent", DataType::Utf8, false),
Field::new("Lease_Expiration_Date", DataType::Utf8, false),
Field::new("Days_Until_Expiration", DataType::Utf8, false),
])
);
}
#[test]
fn sheetvalue_to_record_batch() -> anyhow::Result<()> {
let sheet = property_sheet();
let batch = sheet_values_to_record_batch(&sheet.values)?;
assert_eq!(batch.num_columns(), 9);
assert_eq!(
batch.column(3).as_ref(),
Arc::new(Int64Array::from(vec![3, 3, 5, 1])).as_ref(),
);
assert_eq!(
batch.column(5).as_ref(),
Arc::new(BooleanArray::from(vec![false, true, false, true])).as_ref(),
);
assert_eq!(
batch.column(2).as_ref(),
Arc::new(StringArray::from(vec!["Roger", "Sam", "Daniel", "Roger"])).as_ref(),
);
Ok(())
}
}
| {
return Err(ColumnQError::InvalidUri(uri_str.to_string()));
} | conditional_block |
google_spreadsheets.rs | use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::sync::Arc;
use datafusion::arrow::array::{ArrayRef, BooleanArray, PrimitiveArray, StringArray};
use datafusion::arrow::datatypes::{DataType, Field, Schema};
use datafusion::arrow::datatypes::{Float64Type, Int64Type};
use datafusion::arrow::record_batch::RecordBatch;
use regex::Regex;
use reqwest::Client;
use serde_derive::Deserialize;
use uriparse::URIReference;
use crate::error::ColumnQError;
use crate::table::{TableOptionGoogleSpreasheet, TableSource};
// steps
// * Activate the Google Sheets API in the Google API Console.
//
// * Create service account: https://console.developers.google.com/apis/api/sheets.googleapis.com/credentials?project=roapi-302505
// * create key and save the json format somewhere safe
// * Share spreadsheet with service account
#[derive(Deserialize, Debug)]
struct SheetProperties {
#[serde(rename = "sheetId")]
sheet_id: usize,
title: String,
index: usize,
// other unused attributes:
//
// "sheetType": "GRID",
// "gridProperties": {
// "rowCount": 1000,
// "columnCount": 28
// }
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/sheets#SheetProperties
}
#[derive(Deserialize, Debug)]
struct Sheet {
properties: SheetProperties,
// for all available fields, see:
// https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
}
#[derive(Deserialize, Debug)]
struct Spreadsheets {
sheets: Vec<Sheet>,
// other unused attributes:
// * spreadsheetId
// * properties
// * spreadsheetUrl
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets
}
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct SpreadsheetValues {
range: String,
#[serde(rename = "majorDimension")]
major_dimension: String,
values: Vec<Vec<String>>,
}
// TODO: should we support optional column?
fn infer_value_type(v: &str) -> DataType {
// match order matters
match v {
// TODO: support Date64 and Time64
_ if v.parse::<i64>().is_ok() => DataType::Int64,
_ if v.parse::<f64>().is_ok() => DataType::Float64,
_ => match v.to_lowercase().as_str() {
"false" | "true" => DataType::Boolean,
_ => DataType::Utf8,
},
}
}
// util wrapper for calling google spreadsheet API
async fn gs_api_get(token: &str, url: &str) -> Result<reqwest::Response, ColumnQError> {
Client::builder()
.build()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to initialize HTTP client: {}",
e.to_string()
))
})?
.get(url)
.bearer_auth(token)
.send()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to send API request: {}",
e.to_string()
))
})
}
fn coerce_type(l: DataType, r: DataType) -> DataType {
match (l, r) {
(DataType::Boolean, DataType::Boolean) => DataType::Boolean,
(DataType::Date32, DataType::Date32) => DataType::Date32,
(DataType::Date64, DataType::Date64)
| (DataType::Date64, DataType::Date32)
| (DataType::Date32, DataType::Date64) => DataType::Date64,
(DataType::Int64, DataType::Int64) => DataType::Int64,
(DataType::Float64, DataType::Float64)
| (DataType::Float64, DataType::Int64)
| (DataType::Int64, DataType::Float64) => DataType::Float64,
_ => DataType::Utf8,
}
}
fn infer_schema(rows: &[Vec<String>]) -> Schema {
let mut col_types: HashMap<&str, HashSet<DataType>> = HashMap::new();
let col_names = &rows[0];
rows.iter().skip(1).for_each(|row| {
row.iter().enumerate().for_each(|(i, col_val)| {
let col_name = &col_names[i];
let col_type = infer_value_type(col_val);
let entry = col_types.entry(col_name).or_insert_with(HashSet::new);
entry.insert(col_type);
});
}); | let fields: Vec<Field> = col_names
.iter()
.map(|col_name| {
let set = col_types.entry(col_name).or_insert_with(|| {
// TODO: this should never happen, maybe we should use panic instead?
let mut set = HashSet::new();
set.insert(DataType::Utf8);
set
});
let mut dt_iter = set.iter().cloned();
let dt_init = dt_iter.next().unwrap_or(DataType::Utf8);
let dt = dt_iter.fold(dt_init, coerce_type);
// normalize column name by replacing space with under score
Field::new(&col_name.replace(" ", "_"), dt, false)
})
.collect();
Schema::new(fields)
}
fn parse_boolean(s: &str) -> bool {
s.eq_ignore_ascii_case("true")
}
fn sheet_values_to_record_batch(values: &[Vec<String>]) -> Result<RecordBatch, ColumnQError> {
let schema = infer_schema(values);
let arrays = schema
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
// skip header row
let rows_iter = values.iter().skip(1);
Ok(match field.data_type() {
DataType::Boolean => Arc::new(
rows_iter
.map(|row| Some(parse_boolean(&row[i])))
.collect::<BooleanArray>(),
) as ArrayRef,
DataType::Int64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<i64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect int64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Int64Type>, ColumnQError>>()?,
) as ArrayRef,
DataType::Float64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<f64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect float64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Float64Type>, ColumnQError>>()?,
) as ArrayRef,
_ => Arc::new(rows_iter.map(|row| Some(&row[i])).collect::<StringArray>())
as ArrayRef,
})
})
.collect::<Result<Vec<ArrayRef>, ColumnQError>>()?;
Ok(RecordBatch::try_new(Arc::new(schema), arrays)?)
}
async fn fetch_auth_token(
opt: &TableOptionGoogleSpreasheet,
) -> Result<yup_oauth2::AccessToken, ColumnQError> {
// Read application creds from a file.The clientsecret file contains JSON like
// `{"installed":{"client_id": ... }}`
let creds = yup_oauth2::read_service_account_key(&opt.application_secret_path)
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error reading application secret from disk: {}",
e.to_string()
))
})?;
let sa = yup_oauth2::ServiceAccountAuthenticator::builder(creds)
.build()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error building service account authenticator: {}",
e.to_string()
))
})?;
let scopes = &["https://www.googleapis.com/auth/spreadsheets.readonly"];
sa.token(scopes).await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to obtain OAuth2 token: {}", e.to_string()))
})
}
async fn resolve_sheet_title<'a, 'b, 'c, 'd>(
token: &'a str,
spreadsheet_id: &'b str,
uri: &'c URIReference<'d>,
) -> Result<String, ColumnQError> {
// look up sheet title by sheet id through API
let resp = gs_api_get(
token,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}",
spreadsheet_id
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to resolve sheet title from API: {}",
e.to_string()
))
})?;
let spreadsheets = resp.json::<Spreadsheets>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
// when sheet id is not specified from config, try to parse it from URI
let sheet_id: Option<usize> = match uri.fragment() {
// if sheeit id is specified within the URI in the format of #gid=x
Some(fragment) => {
let s = fragment.as_str();
let parts: Vec<&str> = s.split('=').collect();
match parts.len() {
2 => match parts[0] {
"gid" => parts[1].parse().ok(),
_ => None,
},
_ => None,
}
}
None => None,
};
let sheet = match sheet_id {
Some(id) => spreadsheets
.sheets
.iter()
.find(|s| s.properties.sheet_id == id)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets(format!("Invalid sheet id {}", id)))?,
// no sheet id specified, default to the first sheet
None => spreadsheets
.sheets
.iter()
.find(|s| s.properties.index == 0)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets("spreadsheets is empty".to_string()))?,
};
Ok(sheet.properties.title.clone())
}
pub async fn to_mem_table(
t: &TableSource,
) -> Result<datafusion::datasource::MemTable, ColumnQError> {
lazy_static! {
static ref RE_GOOGLE_SHEET: Regex =
Regex::new(r"https://docs.google.com/spreadsheets/d/(.+)").unwrap();
}
let uri_str = t.get_uri_str();
if RE_GOOGLE_SHEET.captures(uri_str).is_none() {
return Err(ColumnQError::InvalidUri(uri_str.to_string()));
}
let uri = URIReference::try_from(uri_str)?;
let spreadsheet_id = uri.path().segments()[2].as_str();
let opt = t
.option
.as_ref()
.ok_or(ColumnQError::MissingOption)?
.as_google_spreadsheet()?;
let token = fetch_auth_token(opt).await?;
let token_str = token.as_str();
let sheet_title = match &opt.sheet_title {
Some(t) => t.clone(),
None => resolve_sheet_title(token_str, spreadsheet_id, &uri).await?,
};
let resp = gs_api_get(
token_str,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}/values/{}",
spreadsheet_id, sheet_title,
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to load sheet value from API: {}",
e.to_string()
))
})?;
let sheet = resp.json::<SpreadsheetValues>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
let batch = sheet_values_to_record_batch(&sheet.values)?;
let schema_ref = batch.schema();
let partitions = vec![vec![batch]];
Ok(datafusion::datasource::MemTable::try_new(
schema_ref, partitions,
)?)
}
#[cfg(test)]
mod tests {
use super::*;
use datafusion::arrow::array::{BooleanArray, Int64Array};
fn row(raw: &[&str]) -> Vec<String> {
raw.iter().map(|s| s.to_string()).collect()
}
fn property_sheet() -> SpreadsheetValues {
SpreadsheetValues {
range: "Properties!A1:AB1000".to_string(),
major_dimension: "ROWS".to_string(),
values: vec![
row(&[
"Address",
"Image",
"Landlord",
"Bed",
"Bath",
"Occupied",
"Monthly Rent",
"Lease Expiration Date",
"Days Until Expiration",
]),
row(&[
"Bothell, WA",
"https://a.com/1.jpeg",
"Roger",
"3",
"2",
"FALSE",
"$2,000",
"10/23/2020",
"Expired",
]),
row(&[
"Mill Creek, WA",
"https://a.com/2.jpeg",
"Sam",
"3",
"3",
"TRUE",
"$3,500",
"8/4/2021",
"193",
]),
row(&[
"Fremont, WA",
"",
"Daniel",
"5",
"3",
"FALSE",
"$4,500",
"7/13/2019",
"Expired",
]),
row(&[
"Shoreline, WA",
"https://a.com/3.jpeg",
"Roger",
"1",
"1",
"TRUE",
"$1,200",
"12/9/2021",
"320",
]),
],
}
}
#[test]
fn schema_inference() {
let sheet = property_sheet();
let schema = infer_schema(&sheet.values);
assert_eq!(
schema,
Schema::new(vec![
Field::new("Address", DataType::Utf8, false),
Field::new("Image", DataType::Utf8, false),
Field::new("Landlord", DataType::Utf8, false),
Field::new("Bed", DataType::Int64, false),
Field::new("Bath", DataType::Int64, false),
Field::new("Occupied", DataType::Boolean, false),
Field::new("Monthly_Rent", DataType::Utf8, false),
Field::new("Lease_Expiration_Date", DataType::Utf8, false),
Field::new("Days_Until_Expiration", DataType::Utf8, false),
])
);
}
#[test]
fn sheetvalue_to_record_batch() -> anyhow::Result<()> {
let sheet = property_sheet();
let batch = sheet_values_to_record_batch(&sheet.values)?;
assert_eq!(batch.num_columns(), 9);
assert_eq!(
batch.column(3).as_ref(),
Arc::new(Int64Array::from(vec![3, 3, 5, 1])).as_ref(),
);
assert_eq!(
batch.column(5).as_ref(),
Arc::new(BooleanArray::from(vec![false, true, false, true])).as_ref(),
);
assert_eq!(
batch.column(2).as_ref(),
Arc::new(StringArray::from(vec!["Roger", "Sam", "Daniel", "Roger"])).as_ref(),
);
Ok(())
}
} | random_line_split | |
google_spreadsheets.rs | use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::sync::Arc;
use datafusion::arrow::array::{ArrayRef, BooleanArray, PrimitiveArray, StringArray};
use datafusion::arrow::datatypes::{DataType, Field, Schema};
use datafusion::arrow::datatypes::{Float64Type, Int64Type};
use datafusion::arrow::record_batch::RecordBatch;
use regex::Regex;
use reqwest::Client;
use serde_derive::Deserialize;
use uriparse::URIReference;
use crate::error::ColumnQError;
use crate::table::{TableOptionGoogleSpreasheet, TableSource};
// steps
// * Activate the Google Sheets API in the Google API Console.
//
// * Create service account: https://console.developers.google.com/apis/api/sheets.googleapis.com/credentials?project=roapi-302505
// * create key and save the json format somewhere safe
// * Share spreadsheet with service account
#[derive(Deserialize, Debug)]
struct SheetProperties {
#[serde(rename = "sheetId")]
sheet_id: usize,
title: String,
index: usize,
// other unused attributes:
//
// "sheetType": "GRID",
// "gridProperties": {
// "rowCount": 1000,
// "columnCount": 28
// }
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/sheets#SheetProperties
}
#[derive(Deserialize, Debug)]
struct Sheet {
properties: SheetProperties,
// for all available fields, see:
// https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
}
#[derive(Deserialize, Debug)]
struct Spreadsheets {
sheets: Vec<Sheet>,
// other unused attributes:
// * spreadsheetId
// * properties
// * spreadsheetUrl
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets
}
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct SpreadsheetValues {
range: String,
#[serde(rename = "majorDimension")]
major_dimension: String,
values: Vec<Vec<String>>,
}
// TODO: should we support optional column?
fn infer_value_type(v: &str) -> DataType {
// match order matters
match v {
// TODO: support Date64 and Time64
_ if v.parse::<i64>().is_ok() => DataType::Int64,
_ if v.parse::<f64>().is_ok() => DataType::Float64,
_ => match v.to_lowercase().as_str() {
"false" | "true" => DataType::Boolean,
_ => DataType::Utf8,
},
}
}
// util wrapper for calling google spreadsheet API
async fn gs_api_get(token: &str, url: &str) -> Result<reqwest::Response, ColumnQError> |
fn coerce_type(l: DataType, r: DataType) -> DataType {
match (l, r) {
(DataType::Boolean, DataType::Boolean) => DataType::Boolean,
(DataType::Date32, DataType::Date32) => DataType::Date32,
(DataType::Date64, DataType::Date64)
| (DataType::Date64, DataType::Date32)
| (DataType::Date32, DataType::Date64) => DataType::Date64,
(DataType::Int64, DataType::Int64) => DataType::Int64,
(DataType::Float64, DataType::Float64)
| (DataType::Float64, DataType::Int64)
| (DataType::Int64, DataType::Float64) => DataType::Float64,
_ => DataType::Utf8,
}
}
fn infer_schema(rows: &[Vec<String>]) -> Schema {
let mut col_types: HashMap<&str, HashSet<DataType>> = HashMap::new();
let col_names = &rows[0];
rows.iter().skip(1).for_each(|row| {
row.iter().enumerate().for_each(|(i, col_val)| {
let col_name = &col_names[i];
let col_type = infer_value_type(col_val);
let entry = col_types.entry(col_name).or_insert_with(HashSet::new);
entry.insert(col_type);
});
});
let fields: Vec<Field> = col_names
.iter()
.map(|col_name| {
let set = col_types.entry(col_name).or_insert_with(|| {
// TODO: this should never happen, maybe we should use panic instead?
let mut set = HashSet::new();
set.insert(DataType::Utf8);
set
});
let mut dt_iter = set.iter().cloned();
let dt_init = dt_iter.next().unwrap_or(DataType::Utf8);
let dt = dt_iter.fold(dt_init, coerce_type);
// normalize column name by replacing space with under score
Field::new(&col_name.replace(" ", "_"), dt, false)
})
.collect();
Schema::new(fields)
}
fn parse_boolean(s: &str) -> bool {
s.eq_ignore_ascii_case("true")
}
fn sheet_values_to_record_batch(values: &[Vec<String>]) -> Result<RecordBatch, ColumnQError> {
let schema = infer_schema(values);
let arrays = schema
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
// skip header row
let rows_iter = values.iter().skip(1);
Ok(match field.data_type() {
DataType::Boolean => Arc::new(
rows_iter
.map(|row| Some(parse_boolean(&row[i])))
.collect::<BooleanArray>(),
) as ArrayRef,
DataType::Int64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<i64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect int64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Int64Type>, ColumnQError>>()?,
) as ArrayRef,
DataType::Float64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<f64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect float64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Float64Type>, ColumnQError>>()?,
) as ArrayRef,
_ => Arc::new(rows_iter.map(|row| Some(&row[i])).collect::<StringArray>())
as ArrayRef,
})
})
.collect::<Result<Vec<ArrayRef>, ColumnQError>>()?;
Ok(RecordBatch::try_new(Arc::new(schema), arrays)?)
}
async fn fetch_auth_token(
opt: &TableOptionGoogleSpreasheet,
) -> Result<yup_oauth2::AccessToken, ColumnQError> {
// Read application creds from a file.The clientsecret file contains JSON like
// `{"installed":{"client_id": ... }}`
let creds = yup_oauth2::read_service_account_key(&opt.application_secret_path)
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error reading application secret from disk: {}",
e.to_string()
))
})?;
let sa = yup_oauth2::ServiceAccountAuthenticator::builder(creds)
.build()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error building service account authenticator: {}",
e.to_string()
))
})?;
let scopes = &["https://www.googleapis.com/auth/spreadsheets.readonly"];
sa.token(scopes).await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to obtain OAuth2 token: {}", e.to_string()))
})
}
async fn resolve_sheet_title<'a, 'b, 'c, 'd>(
token: &'a str,
spreadsheet_id: &'b str,
uri: &'c URIReference<'d>,
) -> Result<String, ColumnQError> {
// look up sheet title by sheet id through API
let resp = gs_api_get(
token,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}",
spreadsheet_id
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to resolve sheet title from API: {}",
e.to_string()
))
})?;
let spreadsheets = resp.json::<Spreadsheets>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
// when sheet id is not specified from config, try to parse it from URI
let sheet_id: Option<usize> = match uri.fragment() {
// if sheeit id is specified within the URI in the format of #gid=x
Some(fragment) => {
let s = fragment.as_str();
let parts: Vec<&str> = s.split('=').collect();
match parts.len() {
2 => match parts[0] {
"gid" => parts[1].parse().ok(),
_ => None,
},
_ => None,
}
}
None => None,
};
let sheet = match sheet_id {
Some(id) => spreadsheets
.sheets
.iter()
.find(|s| s.properties.sheet_id == id)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets(format!("Invalid sheet id {}", id)))?,
// no sheet id specified, default to the first sheet
None => spreadsheets
.sheets
.iter()
.find(|s| s.properties.index == 0)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets("spreadsheets is empty".to_string()))?,
};
Ok(sheet.properties.title.clone())
}
pub async fn to_mem_table(
t: &TableSource,
) -> Result<datafusion::datasource::MemTable, ColumnQError> {
lazy_static! {
static ref RE_GOOGLE_SHEET: Regex =
Regex::new(r"https://docs.google.com/spreadsheets/d/(.+)").unwrap();
}
let uri_str = t.get_uri_str();
if RE_GOOGLE_SHEET.captures(uri_str).is_none() {
return Err(ColumnQError::InvalidUri(uri_str.to_string()));
}
let uri = URIReference::try_from(uri_str)?;
let spreadsheet_id = uri.path().segments()[2].as_str();
let opt = t
.option
.as_ref()
.ok_or(ColumnQError::MissingOption)?
.as_google_spreadsheet()?;
let token = fetch_auth_token(opt).await?;
let token_str = token.as_str();
let sheet_title = match &opt.sheet_title {
Some(t) => t.clone(),
None => resolve_sheet_title(token_str, spreadsheet_id, &uri).await?,
};
let resp = gs_api_get(
token_str,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}/values/{}",
spreadsheet_id, sheet_title,
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to load sheet value from API: {}",
e.to_string()
))
})?;
let sheet = resp.json::<SpreadsheetValues>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
let batch = sheet_values_to_record_batch(&sheet.values)?;
let schema_ref = batch.schema();
let partitions = vec![vec![batch]];
Ok(datafusion::datasource::MemTable::try_new(
schema_ref, partitions,
)?)
}
#[cfg(test)]
mod tests {
use super::*;
use datafusion::arrow::array::{BooleanArray, Int64Array};
fn row(raw: &[&str]) -> Vec<String> {
raw.iter().map(|s| s.to_string()).collect()
}
fn property_sheet() -> SpreadsheetValues {
SpreadsheetValues {
range: "Properties!A1:AB1000".to_string(),
major_dimension: "ROWS".to_string(),
values: vec![
row(&[
"Address",
"Image",
"Landlord",
"Bed",
"Bath",
"Occupied",
"Monthly Rent",
"Lease Expiration Date",
"Days Until Expiration",
]),
row(&[
"Bothell, WA",
"https://a.com/1.jpeg",
"Roger",
"3",
"2",
"FALSE",
"$2,000",
"10/23/2020",
"Expired",
]),
row(&[
"Mill Creek, WA",
"https://a.com/2.jpeg",
"Sam",
"3",
"3",
"TRUE",
"$3,500",
"8/4/2021",
"193",
]),
row(&[
"Fremont, WA",
"",
"Daniel",
"5",
"3",
"FALSE",
"$4,500",
"7/13/2019",
"Expired",
]),
row(&[
"Shoreline, WA",
"https://a.com/3.jpeg",
"Roger",
"1",
"1",
"TRUE",
"$1,200",
"12/9/2021",
"320",
]),
],
}
}
#[test]
fn schema_inference() {
let sheet = property_sheet();
let schema = infer_schema(&sheet.values);
assert_eq!(
schema,
Schema::new(vec![
Field::new("Address", DataType::Utf8, false),
Field::new("Image", DataType::Utf8, false),
Field::new("Landlord", DataType::Utf8, false),
Field::new("Bed", DataType::Int64, false),
Field::new("Bath", DataType::Int64, false),
Field::new("Occupied", DataType::Boolean, false),
Field::new("Monthly_Rent", DataType::Utf8, false),
Field::new("Lease_Expiration_Date", DataType::Utf8, false),
Field::new("Days_Until_Expiration", DataType::Utf8, false),
])
);
}
#[test]
fn sheetvalue_to_record_batch() -> anyhow::Result<()> {
let sheet = property_sheet();
let batch = sheet_values_to_record_batch(&sheet.values)?;
assert_eq!(batch.num_columns(), 9);
assert_eq!(
batch.column(3).as_ref(),
Arc::new(Int64Array::from(vec![3, 3, 5, 1])).as_ref(),
);
assert_eq!(
batch.column(5).as_ref(),
Arc::new(BooleanArray::from(vec![false, true, false, true])).as_ref(),
);
assert_eq!(
batch.column(2).as_ref(),
Arc::new(StringArray::from(vec!["Roger", "Sam", "Daniel", "Roger"])).as_ref(),
);
Ok(())
}
}
| {
Client::builder()
.build()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to initialize HTTP client: {}",
e.to_string()
))
})?
.get(url)
.bearer_auth(token)
.send()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to send API request: {}",
e.to_string()
))
})
} | identifier_body |
google_spreadsheets.rs | use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::sync::Arc;
use datafusion::arrow::array::{ArrayRef, BooleanArray, PrimitiveArray, StringArray};
use datafusion::arrow::datatypes::{DataType, Field, Schema};
use datafusion::arrow::datatypes::{Float64Type, Int64Type};
use datafusion::arrow::record_batch::RecordBatch;
use regex::Regex;
use reqwest::Client;
use serde_derive::Deserialize;
use uriparse::URIReference;
use crate::error::ColumnQError;
use crate::table::{TableOptionGoogleSpreasheet, TableSource};
// steps
// * Activate the Google Sheets API in the Google API Console.
//
// * Create service account: https://console.developers.google.com/apis/api/sheets.googleapis.com/credentials?project=roapi-302505
// * create key and save the json format somewhere safe
// * Share spreadsheet with service account
#[derive(Deserialize, Debug)]
struct SheetProperties {
#[serde(rename = "sheetId")]
sheet_id: usize,
title: String,
index: usize,
// other unused attributes:
//
// "sheetType": "GRID",
// "gridProperties": {
// "rowCount": 1000,
// "columnCount": 28
// }
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/sheets#SheetProperties
}
#[derive(Deserialize, Debug)]
struct Sheet {
properties: SheetProperties,
// for all available fields, see:
// https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
}
#[derive(Deserialize, Debug)]
struct Spreadsheets {
sheets: Vec<Sheet>,
// other unused attributes:
// * spreadsheetId
// * properties
// * spreadsheetUrl
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets
}
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct SpreadsheetValues {
range: String,
#[serde(rename = "majorDimension")]
major_dimension: String,
values: Vec<Vec<String>>,
}
// TODO: should we support optional column?
fn infer_value_type(v: &str) -> DataType {
// match order matters
match v {
// TODO: support Date64 and Time64
_ if v.parse::<i64>().is_ok() => DataType::Int64,
_ if v.parse::<f64>().is_ok() => DataType::Float64,
_ => match v.to_lowercase().as_str() {
"false" | "true" => DataType::Boolean,
_ => DataType::Utf8,
},
}
}
// util wrapper for calling google spreadsheet API
async fn gs_api_get(token: &str, url: &str) -> Result<reqwest::Response, ColumnQError> {
Client::builder()
.build()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to initialize HTTP client: {}",
e.to_string()
))
})?
.get(url)
.bearer_auth(token)
.send()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to send API request: {}",
e.to_string()
))
})
}
fn coerce_type(l: DataType, r: DataType) -> DataType {
match (l, r) {
(DataType::Boolean, DataType::Boolean) => DataType::Boolean,
(DataType::Date32, DataType::Date32) => DataType::Date32,
(DataType::Date64, DataType::Date64)
| (DataType::Date64, DataType::Date32)
| (DataType::Date32, DataType::Date64) => DataType::Date64,
(DataType::Int64, DataType::Int64) => DataType::Int64,
(DataType::Float64, DataType::Float64)
| (DataType::Float64, DataType::Int64)
| (DataType::Int64, DataType::Float64) => DataType::Float64,
_ => DataType::Utf8,
}
}
fn infer_schema(rows: &[Vec<String>]) -> Schema {
let mut col_types: HashMap<&str, HashSet<DataType>> = HashMap::new();
let col_names = &rows[0];
rows.iter().skip(1).for_each(|row| {
row.iter().enumerate().for_each(|(i, col_val)| {
let col_name = &col_names[i];
let col_type = infer_value_type(col_val);
let entry = col_types.entry(col_name).or_insert_with(HashSet::new);
entry.insert(col_type);
});
});
let fields: Vec<Field> = col_names
.iter()
.map(|col_name| {
let set = col_types.entry(col_name).or_insert_with(|| {
// TODO: this should never happen, maybe we should use panic instead?
let mut set = HashSet::new();
set.insert(DataType::Utf8);
set
});
let mut dt_iter = set.iter().cloned();
let dt_init = dt_iter.next().unwrap_or(DataType::Utf8);
let dt = dt_iter.fold(dt_init, coerce_type);
// normalize column name by replacing space with under score
Field::new(&col_name.replace(" ", "_"), dt, false)
})
.collect();
Schema::new(fields)
}
fn | (s: &str) -> bool {
s.eq_ignore_ascii_case("true")
}
fn sheet_values_to_record_batch(values: &[Vec<String>]) -> Result<RecordBatch, ColumnQError> {
let schema = infer_schema(values);
let arrays = schema
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
// skip header row
let rows_iter = values.iter().skip(1);
Ok(match field.data_type() {
DataType::Boolean => Arc::new(
rows_iter
.map(|row| Some(parse_boolean(&row[i])))
.collect::<BooleanArray>(),
) as ArrayRef,
DataType::Int64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<i64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect int64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Int64Type>, ColumnQError>>()?,
) as ArrayRef,
DataType::Float64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<f64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect float64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Float64Type>, ColumnQError>>()?,
) as ArrayRef,
_ => Arc::new(rows_iter.map(|row| Some(&row[i])).collect::<StringArray>())
as ArrayRef,
})
})
.collect::<Result<Vec<ArrayRef>, ColumnQError>>()?;
Ok(RecordBatch::try_new(Arc::new(schema), arrays)?)
}
async fn fetch_auth_token(
opt: &TableOptionGoogleSpreasheet,
) -> Result<yup_oauth2::AccessToken, ColumnQError> {
// Read application creds from a file.The clientsecret file contains JSON like
// `{"installed":{"client_id": ... }}`
let creds = yup_oauth2::read_service_account_key(&opt.application_secret_path)
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error reading application secret from disk: {}",
e.to_string()
))
})?;
let sa = yup_oauth2::ServiceAccountAuthenticator::builder(creds)
.build()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error building service account authenticator: {}",
e.to_string()
))
})?;
let scopes = &["https://www.googleapis.com/auth/spreadsheets.readonly"];
sa.token(scopes).await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to obtain OAuth2 token: {}", e.to_string()))
})
}
async fn resolve_sheet_title<'a, 'b, 'c, 'd>(
token: &'a str,
spreadsheet_id: &'b str,
uri: &'c URIReference<'d>,
) -> Result<String, ColumnQError> {
// look up sheet title by sheet id through API
let resp = gs_api_get(
token,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}",
spreadsheet_id
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to resolve sheet title from API: {}",
e.to_string()
))
})?;
let spreadsheets = resp.json::<Spreadsheets>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
// when sheet id is not specified from config, try to parse it from URI
let sheet_id: Option<usize> = match uri.fragment() {
// if sheeit id is specified within the URI in the format of #gid=x
Some(fragment) => {
let s = fragment.as_str();
let parts: Vec<&str> = s.split('=').collect();
match parts.len() {
2 => match parts[0] {
"gid" => parts[1].parse().ok(),
_ => None,
},
_ => None,
}
}
None => None,
};
let sheet = match sheet_id {
Some(id) => spreadsheets
.sheets
.iter()
.find(|s| s.properties.sheet_id == id)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets(format!("Invalid sheet id {}", id)))?,
// no sheet id specified, default to the first sheet
None => spreadsheets
.sheets
.iter()
.find(|s| s.properties.index == 0)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets("spreadsheets is empty".to_string()))?,
};
Ok(sheet.properties.title.clone())
}
pub async fn to_mem_table(
t: &TableSource,
) -> Result<datafusion::datasource::MemTable, ColumnQError> {
lazy_static! {
static ref RE_GOOGLE_SHEET: Regex =
Regex::new(r"https://docs.google.com/spreadsheets/d/(.+)").unwrap();
}
let uri_str = t.get_uri_str();
if RE_GOOGLE_SHEET.captures(uri_str).is_none() {
return Err(ColumnQError::InvalidUri(uri_str.to_string()));
}
let uri = URIReference::try_from(uri_str)?;
let spreadsheet_id = uri.path().segments()[2].as_str();
let opt = t
.option
.as_ref()
.ok_or(ColumnQError::MissingOption)?
.as_google_spreadsheet()?;
let token = fetch_auth_token(opt).await?;
let token_str = token.as_str();
let sheet_title = match &opt.sheet_title {
Some(t) => t.clone(),
None => resolve_sheet_title(token_str, spreadsheet_id, &uri).await?,
};
let resp = gs_api_get(
token_str,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}/values/{}",
spreadsheet_id, sheet_title,
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to load sheet value from API: {}",
e.to_string()
))
})?;
let sheet = resp.json::<SpreadsheetValues>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
let batch = sheet_values_to_record_batch(&sheet.values)?;
let schema_ref = batch.schema();
let partitions = vec![vec![batch]];
Ok(datafusion::datasource::MemTable::try_new(
schema_ref, partitions,
)?)
}
#[cfg(test)]
mod tests {
use super::*;
use datafusion::arrow::array::{BooleanArray, Int64Array};
fn row(raw: &[&str]) -> Vec<String> {
raw.iter().map(|s| s.to_string()).collect()
}
fn property_sheet() -> SpreadsheetValues {
SpreadsheetValues {
range: "Properties!A1:AB1000".to_string(),
major_dimension: "ROWS".to_string(),
values: vec![
row(&[
"Address",
"Image",
"Landlord",
"Bed",
"Bath",
"Occupied",
"Monthly Rent",
"Lease Expiration Date",
"Days Until Expiration",
]),
row(&[
"Bothell, WA",
"https://a.com/1.jpeg",
"Roger",
"3",
"2",
"FALSE",
"$2,000",
"10/23/2020",
"Expired",
]),
row(&[
"Mill Creek, WA",
"https://a.com/2.jpeg",
"Sam",
"3",
"3",
"TRUE",
"$3,500",
"8/4/2021",
"193",
]),
row(&[
"Fremont, WA",
"",
"Daniel",
"5",
"3",
"FALSE",
"$4,500",
"7/13/2019",
"Expired",
]),
row(&[
"Shoreline, WA",
"https://a.com/3.jpeg",
"Roger",
"1",
"1",
"TRUE",
"$1,200",
"12/9/2021",
"320",
]),
],
}
}
#[test]
fn schema_inference() {
let sheet = property_sheet();
let schema = infer_schema(&sheet.values);
assert_eq!(
schema,
Schema::new(vec![
Field::new("Address", DataType::Utf8, false),
Field::new("Image", DataType::Utf8, false),
Field::new("Landlord", DataType::Utf8, false),
Field::new("Bed", DataType::Int64, false),
Field::new("Bath", DataType::Int64, false),
Field::new("Occupied", DataType::Boolean, false),
Field::new("Monthly_Rent", DataType::Utf8, false),
Field::new("Lease_Expiration_Date", DataType::Utf8, false),
Field::new("Days_Until_Expiration", DataType::Utf8, false),
])
);
}
#[test]
fn sheetvalue_to_record_batch() -> anyhow::Result<()> {
let sheet = property_sheet();
let batch = sheet_values_to_record_batch(&sheet.values)?;
assert_eq!(batch.num_columns(), 9);
assert_eq!(
batch.column(3).as_ref(),
Arc::new(Int64Array::from(vec![3, 3, 5, 1])).as_ref(),
);
assert_eq!(
batch.column(5).as_ref(),
Arc::new(BooleanArray::from(vec![false, true, false, true])).as_ref(),
);
assert_eq!(
batch.column(2).as_ref(),
Arc::new(StringArray::from(vec!["Roger", "Sam", "Daniel", "Roger"])).as_ref(),
);
Ok(())
}
}
| parse_boolean | identifier_name |
end2end_test.go | package tests
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/amenzhinsky/iothub/iotdevice"
"github.com/amenzhinsky/iothub/iotdevice/transport"
"github.com/amenzhinsky/iothub/iotdevice/transport/mqtt"
"github.com/amenzhinsky/iothub/iotservice"
)
func TestEnd2End(t *testing.T) {
cs := os.Getenv("TEST_IOTHUB_SERVICE_CONNECTION_STRING")
if cs == "" {
t.Fatal("$TEST_IOTHUB_SERVICE_CONNECTION_STRING is empty")
}
sc, err := iotservice.NewFromConnectionString(cs) // iotservice.WithLogger(logger.New(logger.LevelDebug, nil)),
if err != nil {
t.Fatal(err)
}
defer sc.Close()
// create devices with all possible authentication types
_, err = sc.DeleteDevices(context.Background(), []*iotservice.Device{
{DeviceID: "golang-iothub-sas"},
{DeviceID: "golang-iothub-self-signed"},
{DeviceID: "golang-iothub-ca"},
}, true)
if err != nil {
t.Fatal(err)
}
result, err := sc.CreateDevices(context.Background(), []*iotservice.Device{{
DeviceID: "golang-iothub-sas",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSAS,
},
}, {
DeviceID: "golang-iothub-self-signed",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSelfSigned,
X509Thumbprint: &iotservice.X509Thumbprint{
PrimaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
SecondaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
},
},
}, {
DeviceID: "golang-iothub-ca",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthCA,
},
}})
if err != nil {
t.Fatal(err)
}
if !result.IsSuccessful {
t.Fatalf("couldn't create devices: %v", result.Errors)
}
for name, mktransport := range map[string]func() transport.Transport{
"mqtt": func() transport.Transport { return mqtt.New() },
"mqtt-ws": func() transport.Transport { return mqtt.New(mqtt.WithWebSocket(true)) },
// TODO: "amqp": func() transport.Transport { return amqp.New() },
// TODO: "http": func() transport.Transport { return http.New() },
} {
mktransport := mktransport
t.Run(name, func(t *testing.T) {
for auth, suite := range map[string]struct {
init func(transport transport.Transport) (*iotdevice.Client, error)
only string
}{
// TODO: ca authentication
"x509": {
func(transport transport.Transport) (*iotdevice.Client, error) {
return iotdevice.NewFromX509FromFile(
transport,
"golang-iothub-self-signed",
sc.HostName(),
"testdata/device.crt",
"testdata/device.key",
)
},
"DeviceToCloud", // just need to check access
},
"sas": {
func(transport transport.Transport) (*iotdevice.Client, error) {
device, err := sc.GetDevice(context.Background(), "golang-iothub-sas")
if err != nil {
return nil, err
}
dcs, err := sc.DeviceConnectionString(device, false)
if err != nil {
t.Fatal(err)
}
return iotdevice.NewFromConnectionString(transport, dcs)
},
"*",
},
} {
for name, test := range map[string]func(*testing.T, *iotservice.Client, *iotdevice.Client){
"DeviceToCloud": testDeviceToCloud,
"CloudToDevice": testCloudToDevice,
"DirectMethod": testDirectMethod,
"UpdateDeviceTwin": testUpdateTwin,
"SubscribeTwin": testSubscribeTwin,
} {
if suite.only != "*" && suite.only != name {
continue
}
test, suite, mktransport := test, suite, mktransport
t.Run(auth+"/"+name, func(t *testing.T) {
dc, err := suite.init(mktransport())
if err != nil {
t.Fatal(err)
}
defer dc.Close()
if err := dc.Connect(context.Background()); err != nil {
t.Fatal(err)
}
test(t, sc, dc)
})
}
}
})
}
}
func testDeviceToCloud(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
evsc := make(chan *iotservice.Event, 1)
errc := make(chan error, 2)
go func() {
errc <- sc.SubscribeEvents(context.Background(), func(ev *iotservice.Event) error {
if ev.ConnectionDeviceID == dc.DeviceID() {
evsc <- ev
}
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
done := make(chan struct{})
defer close(done)
// send events until one of them is received
go func() {
for {
if err := dc.SendEvent(context.Background(), payload,
iotdevice.WithSendMessageID(genID()),
iotdevice.WithSendCorrelationID(genID()),
iotdevice.WithSendProperties(props),
iotdevice.WithSendCreationTime(time.Now().Add(-24*time.Hour)),
); err != nil {
errc <- err
break
}
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
}
}()
select {
case msg := <-evsc:
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ConnectionDeviceID != dc.DeviceID() {
t.Errorf("ConnectionDeviceID = %q, want %q", msg.ConnectionDeviceID, dc.DeviceID())
}
if msg.ConnectionDeviceGenerationID == "" {
t.Error("ConnectionDeviceGenerationID is empty")
}
if msg.ConnectionAuthMethod == nil {
t.Error("ConnectionAuthMethod is nil")
}
if msg.MessageSource == "" {
t.Error("MessageSource is empty")
}
if msg.EnqueuedTime.IsZero() {
t.Error("EnqueuedTime is zero")
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.Properties["iothub-creation-time-utc"] == "" {
t.Error("iothub-creation-time-utc missing")
}
testProperties(t, msg.Properties, props)
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("d2c timed out")
}
}
func testProperties(t *testing.T, got, want map[string]string) {
t.Helper()
for k, v := range want {
x, ok := got[k]
if !ok || x != v {
t.Errorf("Properties = %v, want %v", got, want)
return
}
}
}
func testCloudToDevice(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
fbsc := make(chan *iotservice.Feedback, 1)
errc := make(chan error, 3)
sub, err := dc.SubscribeEvents(context.Background())
if err != nil {
t.Fatal(err)
}
// subscribe to feedback and report first registered message id
go func() {
errc <- sc.SubscribeFeedback(context.Background(), func(fb *iotservice.Feedback) error {
fbsc <- fb
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
uid := "golang-iothub"
mid := genID()
if err := sc.SendEvent(context.Background(), dc.DeviceID(), payload,
iotservice.WithSendAck(iotservice.AckFull),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uid),
iotservice.WithSendMessageID(mid),
iotservice.WithSendCorrelationID(genID()),
iotservice.WithSendExpiryTime(time.Now().Add(5*time.Second)),
); err != nil {
errc <- err
return
}
for {
select {
case msg := <-sub.C():
if msg.MessageID != mid {
continue
}
// validate event feedback
Outer:
for {
select {
case fb := <-fbsc:
if fb.OriginalMessageID != mid {
continue
}
if fb.StatusCode != "Success" {
t.Errorf("feedback status = %q, want %q", fb.StatusCode, "Success")
}
break Outer
case <-time.After(15 * time.Second):
t.Log("feedback timed out, probably due to eventhub batching")
break Outer
}
}
// validate message content
if msg.To == "" {
t.Error("To is empty")
}
if msg.UserID != uid {
t.Errorf("UserID = %q, want %q", msg.UserID, uid)
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ExpiryTime.IsZero() {
t.Error("ExpiryTime is zero")
}
testProperties(t, msg.Properties, props)
return
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("c2d timed out")
}
}
}
func testUpdateTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
// update state and keep track of version
s := fmt.Sprintf("%d", time.Now().UnixNano())
v, err := dc.UpdateTwinState(context.Background(), map[string]interface{}{
"ts": s,
})
if err != nil {
t.Fatal(err)
}
_, r, err := dc.RetrieveTwinState(context.Background())
if err != nil {
t.Fatal(err)
}
if v != r.Version() {
t.Errorf("update-twin version = %d, want %d", r.Version(), v)
}
if r["ts"] != s {
t.Errorf("update-twin parameter = %q, want %q", r["ts"], s)
}
}
// TODO: very flaky
func testSubscribeTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
sub, err := dc.SubscribeTwinUpdates(context.Background())
if err != nil |
defer dc.UnsubscribeTwinUpdates(sub)
// TODO: hacky, but reduces flakiness
time.Sleep(time.Second)
twin, err := sc.UpdateDeviceTwin(context.Background(), &iotservice.Twin{
DeviceID: dc.DeviceID(),
Tags: map[string]interface{}{
"test-device": true,
},
Properties: &iotservice.Properties{
Desired: map[string]interface{}{
"test-prop": time.Now().UnixNano() / 1000,
},
},
})
if err != nil {
t.Fatal(err)
}
select {
case state := <-sub.C():
if state["$version"] != twin.Properties.Desired["$version"] {
t.Errorf("version = %d, want %d", state["$version"], twin.Properties.Desired["$version"])
}
if state["test-prop"] != twin.Properties.Desired["test-prop"] {
t.Errorf("test-prop = %q, want %q", state["test-prop"], twin.Properties.Desired["test-prop"])
}
case <-time.After(10 * time.Second):
t.Fatal("SubscribeTwinUpdates timed out")
}
}
func testDirectMethod(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
if err := dc.RegisterMethod(
context.Background(),
"sum",
func(v map[string]interface{}) (int, map[string]interface{}, error) {
return 222, map[string]interface{}{
"result": v["a"].(float64) + v["b"].(float64),
}, nil
},
); err != nil {
t.Fatal(err)
}
resc := make(chan *iotservice.MethodResult, 1)
errc := make(chan error, 2)
go func() {
v, err := sc.CallDeviceMethod(context.Background(), dc.DeviceID(), &iotservice.MethodCall{
MethodName: "sum",
ConnectTimeout: 5,
ResponseTimeout: 5,
Payload: map[string]interface{}{
"a": 1.5,
"b": 3,
},
})
if err != nil {
errc <- err
}
resc <- v
}()
select {
case v := <-resc:
w := &iotservice.MethodResult{
Status: 222,
Payload: map[string]interface{}{
"result": 4.5,
},
}
if !reflect.DeepEqual(v, w) {
t.Errorf("direct-method result = %v, want %v", v, w)
}
case err := <-errc:
t.Fatal(err)
}
}
func genID() string {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
panic(err)
}
return hex.EncodeToString(b)
}
| {
t.Fatal(err)
} | conditional_block |
end2end_test.go | package tests
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/amenzhinsky/iothub/iotdevice"
"github.com/amenzhinsky/iothub/iotdevice/transport"
"github.com/amenzhinsky/iothub/iotdevice/transport/mqtt"
"github.com/amenzhinsky/iothub/iotservice"
)
func TestEnd2End(t *testing.T) {
cs := os.Getenv("TEST_IOTHUB_SERVICE_CONNECTION_STRING")
if cs == "" {
t.Fatal("$TEST_IOTHUB_SERVICE_CONNECTION_STRING is empty")
}
sc, err := iotservice.NewFromConnectionString(cs) // iotservice.WithLogger(logger.New(logger.LevelDebug, nil)),
if err != nil {
t.Fatal(err)
}
defer sc.Close()
// create devices with all possible authentication types
_, err = sc.DeleteDevices(context.Background(), []*iotservice.Device{
{DeviceID: "golang-iothub-sas"},
{DeviceID: "golang-iothub-self-signed"},
{DeviceID: "golang-iothub-ca"},
}, true)
if err != nil {
t.Fatal(err)
}
result, err := sc.CreateDevices(context.Background(), []*iotservice.Device{{
DeviceID: "golang-iothub-sas",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSAS,
},
}, {
DeviceID: "golang-iothub-self-signed",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSelfSigned,
X509Thumbprint: &iotservice.X509Thumbprint{
PrimaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
SecondaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
},
},
}, {
DeviceID: "golang-iothub-ca",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthCA,
},
}})
if err != nil {
t.Fatal(err)
}
if !result.IsSuccessful {
t.Fatalf("couldn't create devices: %v", result.Errors)
}
for name, mktransport := range map[string]func() transport.Transport{
"mqtt": func() transport.Transport { return mqtt.New() },
"mqtt-ws": func() transport.Transport { return mqtt.New(mqtt.WithWebSocket(true)) },
// TODO: "amqp": func() transport.Transport { return amqp.New() },
// TODO: "http": func() transport.Transport { return http.New() },
} {
mktransport := mktransport
t.Run(name, func(t *testing.T) {
for auth, suite := range map[string]struct {
init func(transport transport.Transport) (*iotdevice.Client, error)
only string
}{
// TODO: ca authentication
"x509": {
func(transport transport.Transport) (*iotdevice.Client, error) {
return iotdevice.NewFromX509FromFile(
transport,
"golang-iothub-self-signed",
sc.HostName(),
"testdata/device.crt",
"testdata/device.key",
)
},
"DeviceToCloud", // just need to check access
},
"sas": {
func(transport transport.Transport) (*iotdevice.Client, error) {
device, err := sc.GetDevice(context.Background(), "golang-iothub-sas")
if err != nil {
return nil, err
}
dcs, err := sc.DeviceConnectionString(device, false)
if err != nil {
t.Fatal(err)
}
return iotdevice.NewFromConnectionString(transport, dcs)
},
"*",
},
} {
for name, test := range map[string]func(*testing.T, *iotservice.Client, *iotdevice.Client){
"DeviceToCloud": testDeviceToCloud,
"CloudToDevice": testCloudToDevice,
"DirectMethod": testDirectMethod,
"UpdateDeviceTwin": testUpdateTwin,
"SubscribeTwin": testSubscribeTwin,
} {
if suite.only != "*" && suite.only != name {
continue
}
test, suite, mktransport := test, suite, mktransport
t.Run(auth+"/"+name, func(t *testing.T) {
dc, err := suite.init(mktransport())
if err != nil {
t.Fatal(err)
}
defer dc.Close()
if err := dc.Connect(context.Background()); err != nil {
t.Fatal(err)
}
test(t, sc, dc)
})
}
}
})
}
}
func testDeviceToCloud(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
evsc := make(chan *iotservice.Event, 1)
errc := make(chan error, 2)
go func() {
errc <- sc.SubscribeEvents(context.Background(), func(ev *iotservice.Event) error {
if ev.ConnectionDeviceID == dc.DeviceID() {
evsc <- ev
}
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
done := make(chan struct{})
defer close(done)
// send events until one of them is received
go func() {
for {
if err := dc.SendEvent(context.Background(), payload,
iotdevice.WithSendMessageID(genID()),
iotdevice.WithSendCorrelationID(genID()),
iotdevice.WithSendProperties(props),
iotdevice.WithSendCreationTime(time.Now().Add(-24*time.Hour)),
); err != nil {
errc <- err
break
}
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
}
}()
select {
case msg := <-evsc:
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ConnectionDeviceID != dc.DeviceID() {
t.Errorf("ConnectionDeviceID = %q, want %q", msg.ConnectionDeviceID, dc.DeviceID())
}
if msg.ConnectionDeviceGenerationID == "" {
t.Error("ConnectionDeviceGenerationID is empty")
}
if msg.ConnectionAuthMethod == nil {
t.Error("ConnectionAuthMethod is nil")
}
if msg.MessageSource == "" {
t.Error("MessageSource is empty")
}
if msg.EnqueuedTime.IsZero() {
t.Error("EnqueuedTime is zero")
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.Properties["iothub-creation-time-utc"] == "" {
t.Error("iothub-creation-time-utc missing")
}
testProperties(t, msg.Properties, props)
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("d2c timed out")
}
}
func testProperties(t *testing.T, got, want map[string]string) {
t.Helper()
for k, v := range want {
x, ok := got[k]
if !ok || x != v {
t.Errorf("Properties = %v, want %v", got, want)
return
}
}
}
func testCloudToDevice(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
fbsc := make(chan *iotservice.Feedback, 1)
errc := make(chan error, 3)
sub, err := dc.SubscribeEvents(context.Background())
if err != nil {
t.Fatal(err)
}
// subscribe to feedback and report first registered message id
go func() {
errc <- sc.SubscribeFeedback(context.Background(), func(fb *iotservice.Feedback) error {
fbsc <- fb
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
uid := "golang-iothub"
mid := genID()
if err := sc.SendEvent(context.Background(), dc.DeviceID(), payload,
iotservice.WithSendAck(iotservice.AckFull),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uid),
iotservice.WithSendMessageID(mid),
iotservice.WithSendCorrelationID(genID()),
iotservice.WithSendExpiryTime(time.Now().Add(5*time.Second)),
); err != nil {
errc <- err
return
}
for {
select {
case msg := <-sub.C():
if msg.MessageID != mid {
continue
}
// validate event feedback
Outer:
for {
select {
case fb := <-fbsc:
if fb.OriginalMessageID != mid {
continue
}
if fb.StatusCode != "Success" {
t.Errorf("feedback status = %q, want %q", fb.StatusCode, "Success")
}
break Outer
case <-time.After(15 * time.Second):
t.Log("feedback timed out, probably due to eventhub batching")
break Outer
}
}
// validate message content
if msg.To == "" {
t.Error("To is empty")
}
if msg.UserID != uid {
t.Errorf("UserID = %q, want %q", msg.UserID, uid)
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ExpiryTime.IsZero() {
t.Error("ExpiryTime is zero")
}
testProperties(t, msg.Properties, props)
return
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("c2d timed out")
}
}
}
func testUpdateTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
// update state and keep track of version
s := fmt.Sprintf("%d", time.Now().UnixNano())
v, err := dc.UpdateTwinState(context.Background(), map[string]interface{}{
"ts": s,
})
if err != nil {
t.Fatal(err)
}
_, r, err := dc.RetrieveTwinState(context.Background())
if err != nil {
t.Fatal(err)
}
if v != r.Version() {
t.Errorf("update-twin version = %d, want %d", r.Version(), v)
}
if r["ts"] != s {
t.Errorf("update-twin parameter = %q, want %q", r["ts"], s)
}
}
// TODO: very flaky
func testSubscribeTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
sub, err := dc.SubscribeTwinUpdates(context.Background())
if err != nil {
t.Fatal(err)
}
defer dc.UnsubscribeTwinUpdates(sub)
// TODO: hacky, but reduces flakiness
time.Sleep(time.Second)
twin, err := sc.UpdateDeviceTwin(context.Background(), &iotservice.Twin{
DeviceID: dc.DeviceID(),
Tags: map[string]interface{}{
"test-device": true,
},
Properties: &iotservice.Properties{
Desired: map[string]interface{}{
"test-prop": time.Now().UnixNano() / 1000,
},
},
})
if err != nil {
t.Fatal(err)
}
select {
case state := <-sub.C():
if state["$version"] != twin.Properties.Desired["$version"] {
t.Errorf("version = %d, want %d", state["$version"], twin.Properties.Desired["$version"])
}
if state["test-prop"] != twin.Properties.Desired["test-prop"] {
t.Errorf("test-prop = %q, want %q", state["test-prop"], twin.Properties.Desired["test-prop"])
}
case <-time.After(10 * time.Second):
t.Fatal("SubscribeTwinUpdates timed out")
}
}
func testDirectMethod(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
if err := dc.RegisterMethod(
context.Background(),
"sum",
func(v map[string]interface{}) (int, map[string]interface{}, error) {
return 222, map[string]interface{}{
"result": v["a"].(float64) + v["b"].(float64),
}, nil
},
); err != nil {
t.Fatal(err)
}
resc := make(chan *iotservice.MethodResult, 1)
errc := make(chan error, 2)
go func() {
v, err := sc.CallDeviceMethod(context.Background(), dc.DeviceID(), &iotservice.MethodCall{
MethodName: "sum",
ConnectTimeout: 5,
ResponseTimeout: 5,
Payload: map[string]interface{}{
"a": 1.5,
"b": 3,
},
})
if err != nil {
errc <- err
}
resc <- v
}()
select {
case v := <-resc:
w := &iotservice.MethodResult{
Status: 222,
Payload: map[string]interface{}{
"result": 4.5,
},
}
if !reflect.DeepEqual(v, w) {
t.Errorf("direct-method result = %v, want %v", v, w)
}
case err := <-errc:
t.Fatal(err)
}
}
func | () string {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
panic(err)
}
return hex.EncodeToString(b)
}
| genID | identifier_name |
end2end_test.go | package tests
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/amenzhinsky/iothub/iotdevice"
"github.com/amenzhinsky/iothub/iotdevice/transport"
"github.com/amenzhinsky/iothub/iotdevice/transport/mqtt"
"github.com/amenzhinsky/iothub/iotservice"
)
func TestEnd2End(t *testing.T) {
cs := os.Getenv("TEST_IOTHUB_SERVICE_CONNECTION_STRING")
if cs == "" {
t.Fatal("$TEST_IOTHUB_SERVICE_CONNECTION_STRING is empty")
}
sc, err := iotservice.NewFromConnectionString(cs) // iotservice.WithLogger(logger.New(logger.LevelDebug, nil)),
if err != nil {
t.Fatal(err)
}
defer sc.Close()
// create devices with all possible authentication types
_, err = sc.DeleteDevices(context.Background(), []*iotservice.Device{
{DeviceID: "golang-iothub-sas"},
{DeviceID: "golang-iothub-self-signed"},
{DeviceID: "golang-iothub-ca"},
}, true)
if err != nil {
t.Fatal(err)
}
result, err := sc.CreateDevices(context.Background(), []*iotservice.Device{{
DeviceID: "golang-iothub-sas",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSAS,
},
}, {
DeviceID: "golang-iothub-self-signed",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSelfSigned,
X509Thumbprint: &iotservice.X509Thumbprint{
PrimaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
SecondaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
},
},
}, {
DeviceID: "golang-iothub-ca",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthCA,
},
}})
if err != nil {
t.Fatal(err)
}
if !result.IsSuccessful {
t.Fatalf("couldn't create devices: %v", result.Errors)
}
for name, mktransport := range map[string]func() transport.Transport{
"mqtt": func() transport.Transport { return mqtt.New() },
"mqtt-ws": func() transport.Transport { return mqtt.New(mqtt.WithWebSocket(true)) },
// TODO: "amqp": func() transport.Transport { return amqp.New() },
// TODO: "http": func() transport.Transport { return http.New() },
} {
mktransport := mktransport
t.Run(name, func(t *testing.T) {
for auth, suite := range map[string]struct {
init func(transport transport.Transport) (*iotdevice.Client, error)
only string
}{
// TODO: ca authentication
"x509": {
func(transport transport.Transport) (*iotdevice.Client, error) {
return iotdevice.NewFromX509FromFile(
transport,
"golang-iothub-self-signed",
sc.HostName(),
"testdata/device.crt",
"testdata/device.key",
)
},
"DeviceToCloud", // just need to check access
},
"sas": {
func(transport transport.Transport) (*iotdevice.Client, error) {
device, err := sc.GetDevice(context.Background(), "golang-iothub-sas")
if err != nil {
return nil, err
}
dcs, err := sc.DeviceConnectionString(device, false)
if err != nil {
t.Fatal(err)
}
return iotdevice.NewFromConnectionString(transport, dcs)
},
"*",
},
} {
for name, test := range map[string]func(*testing.T, *iotservice.Client, *iotdevice.Client){
"DeviceToCloud": testDeviceToCloud,
"CloudToDevice": testCloudToDevice,
"DirectMethod": testDirectMethod,
"UpdateDeviceTwin": testUpdateTwin,
"SubscribeTwin": testSubscribeTwin,
} {
if suite.only != "*" && suite.only != name {
continue
}
test, suite, mktransport := test, suite, mktransport
t.Run(auth+"/"+name, func(t *testing.T) {
dc, err := suite.init(mktransport())
if err != nil {
t.Fatal(err)
}
defer dc.Close()
if err := dc.Connect(context.Background()); err != nil {
t.Fatal(err)
}
test(t, sc, dc)
})
}
}
})
}
}
func testDeviceToCloud(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
evsc := make(chan *iotservice.Event, 1)
errc := make(chan error, 2)
go func() {
errc <- sc.SubscribeEvents(context.Background(), func(ev *iotservice.Event) error {
if ev.ConnectionDeviceID == dc.DeviceID() {
evsc <- ev
}
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
done := make(chan struct{})
defer close(done)
// send events until one of them is received
go func() {
for {
if err := dc.SendEvent(context.Background(), payload,
iotdevice.WithSendMessageID(genID()),
iotdevice.WithSendCorrelationID(genID()),
iotdevice.WithSendProperties(props),
iotdevice.WithSendCreationTime(time.Now().Add(-24*time.Hour)),
); err != nil {
errc <- err
break
}
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
}
}()
select {
case msg := <-evsc:
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ConnectionDeviceID != dc.DeviceID() {
t.Errorf("ConnectionDeviceID = %q, want %q", msg.ConnectionDeviceID, dc.DeviceID())
}
if msg.ConnectionDeviceGenerationID == "" {
t.Error("ConnectionDeviceGenerationID is empty")
}
if msg.ConnectionAuthMethod == nil {
t.Error("ConnectionAuthMethod is nil")
}
if msg.MessageSource == "" {
t.Error("MessageSource is empty")
}
if msg.EnqueuedTime.IsZero() {
t.Error("EnqueuedTime is zero")
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.Properties["iothub-creation-time-utc"] == "" {
t.Error("iothub-creation-time-utc missing")
}
testProperties(t, msg.Properties, props)
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("d2c timed out")
}
}
func testProperties(t *testing.T, got, want map[string]string) {
t.Helper()
for k, v := range want {
x, ok := got[k]
if !ok || x != v {
t.Errorf("Properties = %v, want %v", got, want)
return
}
}
}
func testCloudToDevice(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
fbsc := make(chan *iotservice.Feedback, 1)
errc := make(chan error, 3)
sub, err := dc.SubscribeEvents(context.Background())
if err != nil {
t.Fatal(err)
}
// subscribe to feedback and report first registered message id
go func() {
errc <- sc.SubscribeFeedback(context.Background(), func(fb *iotservice.Feedback) error {
fbsc <- fb
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
uid := "golang-iothub"
mid := genID()
if err := sc.SendEvent(context.Background(), dc.DeviceID(), payload,
iotservice.WithSendAck(iotservice.AckFull),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uid),
iotservice.WithSendMessageID(mid),
iotservice.WithSendCorrelationID(genID()),
iotservice.WithSendExpiryTime(time.Now().Add(5*time.Second)),
); err != nil {
errc <- err
return
}
for {
select {
case msg := <-sub.C():
if msg.MessageID != mid {
continue
}
// validate event feedback
Outer:
for {
select {
case fb := <-fbsc:
if fb.OriginalMessageID != mid {
continue
}
if fb.StatusCode != "Success" {
t.Errorf("feedback status = %q, want %q", fb.StatusCode, "Success")
}
break Outer
case <-time.After(15 * time.Second):
t.Log("feedback timed out, probably due to eventhub batching")
break Outer
}
}
// validate message content
if msg.To == "" {
t.Error("To is empty")
}
if msg.UserID != uid {
t.Errorf("UserID = %q, want %q", msg.UserID, uid)
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ExpiryTime.IsZero() {
t.Error("ExpiryTime is zero")
}
testProperties(t, msg.Properties, props)
return
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("c2d timed out")
}
}
}
func testUpdateTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
// update state and keep track of version
s := fmt.Sprintf("%d", time.Now().UnixNano())
v, err := dc.UpdateTwinState(context.Background(), map[string]interface{}{
"ts": s,
})
if err != nil {
t.Fatal(err)
}
_, r, err := dc.RetrieveTwinState(context.Background())
if err != nil {
t.Fatal(err)
}
if v != r.Version() {
t.Errorf("update-twin version = %d, want %d", r.Version(), v)
}
if r["ts"] != s {
t.Errorf("update-twin parameter = %q, want %q", r["ts"], s)
}
}
// TODO: very flaky
func testSubscribeTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
sub, err := dc.SubscribeTwinUpdates(context.Background())
if err != nil {
t.Fatal(err)
}
defer dc.UnsubscribeTwinUpdates(sub)
// TODO: hacky, but reduces flakiness
time.Sleep(time.Second)
twin, err := sc.UpdateDeviceTwin(context.Background(), &iotservice.Twin{
DeviceID: dc.DeviceID(),
Tags: map[string]interface{}{
"test-device": true,
},
Properties: &iotservice.Properties{
Desired: map[string]interface{}{
"test-prop": time.Now().UnixNano() / 1000,
},
},
})
if err != nil {
t.Fatal(err) | if state["$version"] != twin.Properties.Desired["$version"] {
t.Errorf("version = %d, want %d", state["$version"], twin.Properties.Desired["$version"])
}
if state["test-prop"] != twin.Properties.Desired["test-prop"] {
t.Errorf("test-prop = %q, want %q", state["test-prop"], twin.Properties.Desired["test-prop"])
}
case <-time.After(10 * time.Second):
t.Fatal("SubscribeTwinUpdates timed out")
}
}
func testDirectMethod(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
if err := dc.RegisterMethod(
context.Background(),
"sum",
func(v map[string]interface{}) (int, map[string]interface{}, error) {
return 222, map[string]interface{}{
"result": v["a"].(float64) + v["b"].(float64),
}, nil
},
); err != nil {
t.Fatal(err)
}
resc := make(chan *iotservice.MethodResult, 1)
errc := make(chan error, 2)
go func() {
v, err := sc.CallDeviceMethod(context.Background(), dc.DeviceID(), &iotservice.MethodCall{
MethodName: "sum",
ConnectTimeout: 5,
ResponseTimeout: 5,
Payload: map[string]interface{}{
"a": 1.5,
"b": 3,
},
})
if err != nil {
errc <- err
}
resc <- v
}()
select {
case v := <-resc:
w := &iotservice.MethodResult{
Status: 222,
Payload: map[string]interface{}{
"result": 4.5,
},
}
if !reflect.DeepEqual(v, w) {
t.Errorf("direct-method result = %v, want %v", v, w)
}
case err := <-errc:
t.Fatal(err)
}
}
func genID() string {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
panic(err)
}
return hex.EncodeToString(b)
} | }
select {
case state := <-sub.C(): | random_line_split |
end2end_test.go | package tests
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/amenzhinsky/iothub/iotdevice"
"github.com/amenzhinsky/iothub/iotdevice/transport"
"github.com/amenzhinsky/iothub/iotdevice/transport/mqtt"
"github.com/amenzhinsky/iothub/iotservice"
)
func TestEnd2End(t *testing.T) {
cs := os.Getenv("TEST_IOTHUB_SERVICE_CONNECTION_STRING")
if cs == "" {
t.Fatal("$TEST_IOTHUB_SERVICE_CONNECTION_STRING is empty")
}
sc, err := iotservice.NewFromConnectionString(cs) // iotservice.WithLogger(logger.New(logger.LevelDebug, nil)),
if err != nil {
t.Fatal(err)
}
defer sc.Close()
// create devices with all possible authentication types
_, err = sc.DeleteDevices(context.Background(), []*iotservice.Device{
{DeviceID: "golang-iothub-sas"},
{DeviceID: "golang-iothub-self-signed"},
{DeviceID: "golang-iothub-ca"},
}, true)
if err != nil {
t.Fatal(err)
}
result, err := sc.CreateDevices(context.Background(), []*iotservice.Device{{
DeviceID: "golang-iothub-sas",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSAS,
},
}, {
DeviceID: "golang-iothub-self-signed",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSelfSigned,
X509Thumbprint: &iotservice.X509Thumbprint{
PrimaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
SecondaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
},
},
}, {
DeviceID: "golang-iothub-ca",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthCA,
},
}})
if err != nil {
t.Fatal(err)
}
if !result.IsSuccessful {
t.Fatalf("couldn't create devices: %v", result.Errors)
}
for name, mktransport := range map[string]func() transport.Transport{
"mqtt": func() transport.Transport { return mqtt.New() },
"mqtt-ws": func() transport.Transport { return mqtt.New(mqtt.WithWebSocket(true)) },
// TODO: "amqp": func() transport.Transport { return amqp.New() },
// TODO: "http": func() transport.Transport { return http.New() },
} {
mktransport := mktransport
t.Run(name, func(t *testing.T) {
for auth, suite := range map[string]struct {
init func(transport transport.Transport) (*iotdevice.Client, error)
only string
}{
// TODO: ca authentication
"x509": {
func(transport transport.Transport) (*iotdevice.Client, error) {
return iotdevice.NewFromX509FromFile(
transport,
"golang-iothub-self-signed",
sc.HostName(),
"testdata/device.crt",
"testdata/device.key",
)
},
"DeviceToCloud", // just need to check access
},
"sas": {
func(transport transport.Transport) (*iotdevice.Client, error) {
device, err := sc.GetDevice(context.Background(), "golang-iothub-sas")
if err != nil {
return nil, err
}
dcs, err := sc.DeviceConnectionString(device, false)
if err != nil {
t.Fatal(err)
}
return iotdevice.NewFromConnectionString(transport, dcs)
},
"*",
},
} {
for name, test := range map[string]func(*testing.T, *iotservice.Client, *iotdevice.Client){
"DeviceToCloud": testDeviceToCloud,
"CloudToDevice": testCloudToDevice,
"DirectMethod": testDirectMethod,
"UpdateDeviceTwin": testUpdateTwin,
"SubscribeTwin": testSubscribeTwin,
} {
if suite.only != "*" && suite.only != name {
continue
}
test, suite, mktransport := test, suite, mktransport
t.Run(auth+"/"+name, func(t *testing.T) {
dc, err := suite.init(mktransport())
if err != nil {
t.Fatal(err)
}
defer dc.Close()
if err := dc.Connect(context.Background()); err != nil {
t.Fatal(err)
}
test(t, sc, dc)
})
}
}
})
}
}
func testDeviceToCloud(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
evsc := make(chan *iotservice.Event, 1)
errc := make(chan error, 2)
go func() {
errc <- sc.SubscribeEvents(context.Background(), func(ev *iotservice.Event) error {
if ev.ConnectionDeviceID == dc.DeviceID() {
evsc <- ev
}
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
done := make(chan struct{})
defer close(done)
// send events until one of them is received
go func() {
for {
if err := dc.SendEvent(context.Background(), payload,
iotdevice.WithSendMessageID(genID()),
iotdevice.WithSendCorrelationID(genID()),
iotdevice.WithSendProperties(props),
iotdevice.WithSendCreationTime(time.Now().Add(-24*time.Hour)),
); err != nil {
errc <- err
break
}
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
}
}()
select {
case msg := <-evsc:
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ConnectionDeviceID != dc.DeviceID() {
t.Errorf("ConnectionDeviceID = %q, want %q", msg.ConnectionDeviceID, dc.DeviceID())
}
if msg.ConnectionDeviceGenerationID == "" {
t.Error("ConnectionDeviceGenerationID is empty")
}
if msg.ConnectionAuthMethod == nil {
t.Error("ConnectionAuthMethod is nil")
}
if msg.MessageSource == "" {
t.Error("MessageSource is empty")
}
if msg.EnqueuedTime.IsZero() {
t.Error("EnqueuedTime is zero")
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.Properties["iothub-creation-time-utc"] == "" {
t.Error("iothub-creation-time-utc missing")
}
testProperties(t, msg.Properties, props)
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("d2c timed out")
}
}
func testProperties(t *testing.T, got, want map[string]string) {
t.Helper()
for k, v := range want {
x, ok := got[k]
if !ok || x != v {
t.Errorf("Properties = %v, want %v", got, want)
return
}
}
}
func testCloudToDevice(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
fbsc := make(chan *iotservice.Feedback, 1)
errc := make(chan error, 3)
sub, err := dc.SubscribeEvents(context.Background())
if err != nil {
t.Fatal(err)
}
// subscribe to feedback and report first registered message id
go func() {
errc <- sc.SubscribeFeedback(context.Background(), func(fb *iotservice.Feedback) error {
fbsc <- fb
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
uid := "golang-iothub"
mid := genID()
if err := sc.SendEvent(context.Background(), dc.DeviceID(), payload,
iotservice.WithSendAck(iotservice.AckFull),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uid),
iotservice.WithSendMessageID(mid),
iotservice.WithSendCorrelationID(genID()),
iotservice.WithSendExpiryTime(time.Now().Add(5*time.Second)),
); err != nil {
errc <- err
return
}
for {
select {
case msg := <-sub.C():
if msg.MessageID != mid {
continue
}
// validate event feedback
Outer:
for {
select {
case fb := <-fbsc:
if fb.OriginalMessageID != mid {
continue
}
if fb.StatusCode != "Success" {
t.Errorf("feedback status = %q, want %q", fb.StatusCode, "Success")
}
break Outer
case <-time.After(15 * time.Second):
t.Log("feedback timed out, probably due to eventhub batching")
break Outer
}
}
// validate message content
if msg.To == "" {
t.Error("To is empty")
}
if msg.UserID != uid {
t.Errorf("UserID = %q, want %q", msg.UserID, uid)
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ExpiryTime.IsZero() {
t.Error("ExpiryTime is zero")
}
testProperties(t, msg.Properties, props)
return
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("c2d timed out")
}
}
}
func testUpdateTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
// update state and keep track of version
s := fmt.Sprintf("%d", time.Now().UnixNano())
v, err := dc.UpdateTwinState(context.Background(), map[string]interface{}{
"ts": s,
})
if err != nil {
t.Fatal(err)
}
_, r, err := dc.RetrieveTwinState(context.Background())
if err != nil {
t.Fatal(err)
}
if v != r.Version() {
t.Errorf("update-twin version = %d, want %d", r.Version(), v)
}
if r["ts"] != s {
t.Errorf("update-twin parameter = %q, want %q", r["ts"], s)
}
}
// TODO: very flaky
func testSubscribeTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
sub, err := dc.SubscribeTwinUpdates(context.Background())
if err != nil {
t.Fatal(err)
}
defer dc.UnsubscribeTwinUpdates(sub)
// TODO: hacky, but reduces flakiness
time.Sleep(time.Second)
twin, err := sc.UpdateDeviceTwin(context.Background(), &iotservice.Twin{
DeviceID: dc.DeviceID(),
Tags: map[string]interface{}{
"test-device": true,
},
Properties: &iotservice.Properties{
Desired: map[string]interface{}{
"test-prop": time.Now().UnixNano() / 1000,
},
},
})
if err != nil {
t.Fatal(err)
}
select {
case state := <-sub.C():
if state["$version"] != twin.Properties.Desired["$version"] {
t.Errorf("version = %d, want %d", state["$version"], twin.Properties.Desired["$version"])
}
if state["test-prop"] != twin.Properties.Desired["test-prop"] {
t.Errorf("test-prop = %q, want %q", state["test-prop"], twin.Properties.Desired["test-prop"])
}
case <-time.After(10 * time.Second):
t.Fatal("SubscribeTwinUpdates timed out")
}
}
func testDirectMethod(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) |
func genID() string {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
panic(err)
}
return hex.EncodeToString(b)
}
| {
if err := dc.RegisterMethod(
context.Background(),
"sum",
func(v map[string]interface{}) (int, map[string]interface{}, error) {
return 222, map[string]interface{}{
"result": v["a"].(float64) + v["b"].(float64),
}, nil
},
); err != nil {
t.Fatal(err)
}
resc := make(chan *iotservice.MethodResult, 1)
errc := make(chan error, 2)
go func() {
v, err := sc.CallDeviceMethod(context.Background(), dc.DeviceID(), &iotservice.MethodCall{
MethodName: "sum",
ConnectTimeout: 5,
ResponseTimeout: 5,
Payload: map[string]interface{}{
"a": 1.5,
"b": 3,
},
})
if err != nil {
errc <- err
}
resc <- v
}()
select {
case v := <-resc:
w := &iotservice.MethodResult{
Status: 222,
Payload: map[string]interface{}{
"result": 4.5,
},
}
if !reflect.DeepEqual(v, w) {
t.Errorf("direct-method result = %v, want %v", v, w)
}
case err := <-errc:
t.Fatal(err)
}
} | identifier_body |
build_ionospheric_model.py | #!/usr/bin/env python
"""build_ionospheric_model.py: module is dedicated to build foF2 model from fitacf data."""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import os
import datetime as dt
import pandas as pd
import numpy as np
from astropy import modeling
from scipy.optimize import curve_fit
from get_sd_data import *
def smooth(x,window_len=51,window="hanning"):
if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3: return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == "flat": w = numpy.ones(window_len,"d")
else: w = eval("np."+window+"(window_len)")
y = np.convolve(w/w.sum(),s,mode="valid")
d = window_len - 1
y = y[int(d/2):-int(d/2)]
return y
def fit_lambda(du, power_drop, tfreq, elv, xlabel="srange", ylabel="p_l", plot=True, fname="images/_out.png"):
x, y = du[xlabel], du[ylabel]
def | (xx, a0, c0, s0):
return a0 * (1/(s0*(np.sqrt(2*np.pi))))*(np.exp((-1.0/2.0)*(((xx-c0)/s0)**2)))
def _a_gaussian(xx, a0, c0, s0, sp ):
return _1gaussian(xx, a0, c0, s0 ) * (0.5 + (np.arctan(sp*(xx-c0))/np.pi))
def opt(f, x0, y0, p0):
popt, pcov = curve_fit(f, x0, y0, p0=p0)
perr = np.sqrt(np.diag(pcov))
return popt, perr
def estimate_skip_distance(popt):
xx = np.linspace(0,popt[1],30000)
yy = _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3])
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
def estimate_skip_distance_1D_params(a0, c0, s0):
xx = np.linspace(0,c0,30000)
yy = _1gaussian(xx, a0, c0, s0)
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
try:
popt_g, _ = opt(_1gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x)])
popt_ag, _ = opt(_a_gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x), 0.1])
skip_distance = estimate_skip_distance(popt_ag)
if plot:
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, popt_g[0], popt_g[1], popt_g[2]), color="b", lw=0.8, )
ax.plot(xx, _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3]), color="r", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname, bbox_inches="tight")
plt.close()
except:
import traceback
traceback.print_exc()
mx, mean, std = np.max(y), np.mean(x), np.std(x)
skip_distance = estimate_skip_distance_1D_params(mx, mean, std)
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, mx, mean, std), color="b", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname.replace(".png", "_e.png"), bbox_inches="tight")
plt.close()
return skip_distance
def build_oblique_foF2_observed_by_radar(rad="cvw", dates=[dt.datetime(2017,8,21), dt.datetime(2017,8,21)], bmnum=11, scan_num=1,
remove_first_range=800, remove_last_range=2500, power_drop=10., plot=True):
""" Estimate MUF from the GS power """
csv_fname = "data/{rad}_{start}_{end}.csv".format(rad=rad, start=dates[0].strftime("%Y.%m.%d.%H.%M"),
end=dates[1].strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(csv_fname):
fdata = FetchData( rad, [dates[0], dates[1]] )
s_params = ["noise.sky", "tfreq", "frang", "rsep", "scan", "bmnum"]
v_params = ["slist", "v", "w_l", "p_l", "elv"]
_, scans = fdata.fetch_data(by="scan", s_params=s_params, v_params=v_params)
beams = []
# Resacle tx_frequency and estimate slant range
for scan in scans:
for beam in scan.beams:
if len(beam.slist) > 0:
setattr(beam, "slant_range", beam.frang + np.array(beam.slist.tolist()) * beam.rsep)
setattr(beam, "tfreq", np.round(beam.tfreq/1e3,1))
beams.append(beam)
# Extract oblique foF2 or MUF scan by scan
print("\n Data will be averaged over {} scans".format(scan_num))
print(" Processing beam {}".format(bmnum))
print(" Remove first range {}".format(remove_first_range))
print(" Remove last range {}".format(remove_last_range))
print(" Power drop {}".format(power_drop))
skip_distance, o_foF2, time_start, time_end = [], [], [], []
for i in range(len(scans)-scan_num):
rscan = scans[i:i+scan_num]
p_l, srange, tfrq, angle = [], [], [], []
for scan in rscan:
for beam in scan.beams:
if beam.bmnum == bmnum:
if len(beam.slist) > 0:
p_l.extend(beam.p_l.tolist())
srange.extend(beam.slant_range.tolist())
tfrq.append(beam.tfreq)
if type(beam.elv) is list: angle.extend(beam.elv)
else: angle.extend(beam.elv.tolist())
du = pd.DataFrame()
du["p_l"], du["srange"] = p_l, srange
du = du[(du.srange>remove_first_range) & (du.srange<remove_last_range)]
fname = "images/{}.png".format(rscan[0].stime.strftime("%Y-%m-%d-%H-%M"))
if len(du) > 0:
sd = fit_lambda(du, power_drop, tfreq=np.mean(tfrq), elv=np.mean(angle).round(1),
xlabel="srange", ylabel="p_l", fname=fname, plot=plot)
if sd != np.nan:
skip_distance.append(sd)
o_foF2.append(np.mean(tfrq))
time_start.append(rscan[0].stime)
time_end.append(rscan[-1].stime)
df = pd.DataFrame()
df["skip_distance"], df["o_foF2"], df["time_start"], df["time_end"] = skip_distance, o_foF2, time_start, time_end
df.to_csv(csv_fname, index=False, header=True)
else: df = pd.read_csv(csv_fname, parse_dates=["time_start", "time_end"])
print(" Header:\n",df.head())
return df
def build_occultation_functions(rad, dates, time_range, bmnum=11, scan_num=1, remove_first_range=500, remove_last_range=2500,
power_drop=10., plot=True):
df = build_oblique_foF2_observed_by_radar(rad, dates, bmnum, scan_num, remove_first_range, remove_last_range, power_drop, plot)
upper = df[df.time_start <= time_range[0]]
lower = df[df.time_start >= time_range[1]]
def plot_rays(ax, u, color="r", ms=1, alpha=0.6, lw=1.5, wl=51):
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
sd = np.array(u.skip_distance)
sd[sd<=200.] = np.nan
u.skip_distance = sd
dfx = u.interpolate(method="polynomial", order=1)
print(" Modified(intp.) Header:\n",dfx.head())
ax.plot(u.time_start, u.skip_distance, color+"o", lw=lw, markersize=ms, alpha=alpha)
ax.plot(dfx.time_start, smooth(dfx.skip_distance, window_len=wl), lw=lw, color="k", ls="--")
secs, vals = [(t.to_pydatetime()-midnight).seconds for t in dfx.time_start], smooth(dfx.skip_distance, window_len=wl).tolist()
return secs, vals
def estimate_interpolated(eupper, elower):
middle_time = time_range[0] + dt.timedelta(seconds=(time_range[1]-time_range[0]).total_seconds())
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
middle_seconds = (middle_time-midnight).seconds
print(midnight, middle_time, middle_seconds)
start_seconds, end_seconds = middle_seconds-(30*60), middle_seconds+(30*60)
from scipy import interpolate
# x = eupper[0] + elower[0]
# y = eupper[1] + elower[1]
# fnc = interpolate.interp1d(x, y, kind="cubic")
# midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
# new_secs = np.arange(np.min(x), np.max(x))
# new_dates = [midnight + dt.timedelta(seconds=int(s)) for s in new_secs]
# print(new_dates[0], new_dates[-1])
# print(np.min(x), np.max(x), np.min(new_secs), np.max(new_secs))
# Y = fnc(new_secs)
return #Y, new_dates
fig = plt.figure(figsize=(4,3), dpi=120)
ax = fig.add_subplot(111)
fmt = mdates.DateFormatter("%H:%M")
ax.xaxis.set_major_formatter(fmt)
est_upper = plot_rays(ax, upper, "r", wl=21)
est_lower = plot_rays(ax, lower, "b", wl=21)
estimate_interpolated(est_upper, est_lower)
#ax.plot(new_dates, Y, lw=0.6, color="gray")
ax.set_ylabel("Skip Distance (km)")
ax.set_xlabel("Time (UT)")
ax.set_xlim(dates[0], dates[1])
ax.set_ylim(1000,3000)
fig.autofmt_xdate()
fig.savefig("images/ocultation.png", bbox_inches="tight")
return
def occultation_functions(z, a1=np.pi/2, a2=1):
fn_sech = lambda x, a0: 0.5*(2/(np.exp(a0*x) + np.exp(-a0*x)))
fn_tanh = lambda x, a0: 1-np.tanh(a0*x)**2
fig = plt.figure(figsize=(4,3), dpi=120)
ax = fig.add_subplot(111)
ax.plot(z, fn_sech(z,a1), lw=0.7, color="r", label="Sech")
ax.plot(z, fn_tanh(z,a2), lw=0.7, color="b", label="Tanh")
ax.set_ylim(0,1)
ax.set_xlim(z[0],z[-1])
ax.legend(loc=3)
ax.set_ylabel("Occultation (%)")
ax.set_xlabel("Time till tolality (Hours)")
fig.savefig("images/ocultation_model.png", bbox_inches="tight")
return
if __name__ == "__main__":
#build_oblique_foF2_observed_by_radar(dates=[dt.datetime(2017,8,21,15), dt.datetime(2017,8,21,20)])
#occultation_functions(np.linspace(-5,5,1+3600*10))
#build_occultation_functions(rad="cvw", dates=[dt.datetime(2017,8,21,15), dt.datetime(2017,8,21,19)])
#build_occultation_functions(rad="gbr", bmnum=7, dates=[dt.datetime(2021,6,9,9), dt.datetime(2021,6,9,10,30)],
# remove_first_range=1000, remove_last_range=2500)
build_occultation_functions(rad="gbr", bmnum=7, dates=[dt.datetime(2021,6,10,8), dt.datetime(2021,6,10,11)],
remove_first_range=1000, remove_last_range=2500, power_drop=5.,
time_range=[dt.datetime(2021,6,10,9,40), dt.datetime(2021,6,10,10,10)])
occultation_functions(np.linspace(-5,5,1+3600*10))
pass
| _1gaussian | identifier_name |
build_ionospheric_model.py | #!/usr/bin/env python
"""build_ionospheric_model.py: module is dedicated to build foF2 model from fitacf data."""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import os
import datetime as dt
import pandas as pd
import numpy as np
from astropy import modeling
from scipy.optimize import curve_fit
from get_sd_data import *
def smooth(x,window_len=51,window="hanning"):
if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3: return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == "flat": w = numpy.ones(window_len,"d")
else: w = eval("np."+window+"(window_len)")
y = np.convolve(w/w.sum(),s,mode="valid")
d = window_len - 1
y = y[int(d/2):-int(d/2)]
return y
def fit_lambda(du, power_drop, tfreq, elv, xlabel="srange", ylabel="p_l", plot=True, fname="images/_out.png"):
x, y = du[xlabel], du[ylabel]
def _1gaussian(xx, a0, c0, s0):
return a0 * (1/(s0*(np.sqrt(2*np.pi))))*(np.exp((-1.0/2.0)*(((xx-c0)/s0)**2)))
def _a_gaussian(xx, a0, c0, s0, sp ):
|
def opt(f, x0, y0, p0):
popt, pcov = curve_fit(f, x0, y0, p0=p0)
perr = np.sqrt(np.diag(pcov))
return popt, perr
def estimate_skip_distance(popt):
xx = np.linspace(0,popt[1],30000)
yy = _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3])
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
def estimate_skip_distance_1D_params(a0, c0, s0):
xx = np.linspace(0,c0,30000)
yy = _1gaussian(xx, a0, c0, s0)
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
try:
popt_g, _ = opt(_1gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x)])
popt_ag, _ = opt(_a_gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x), 0.1])
skip_distance = estimate_skip_distance(popt_ag)
if plot:
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, popt_g[0], popt_g[1], popt_g[2]), color="b", lw=0.8, )
ax.plot(xx, _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3]), color="r", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname, bbox_inches="tight")
plt.close()
except:
import traceback
traceback.print_exc()
mx, mean, std = np.max(y), np.mean(x), np.std(x)
skip_distance = estimate_skip_distance_1D_params(mx, mean, std)
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, mx, mean, std), color="b", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname.replace(".png", "_e.png"), bbox_inches="tight")
plt.close()
return skip_distance
def build_oblique_foF2_observed_by_radar(rad="cvw", dates=[dt.datetime(2017,8,21), dt.datetime(2017,8,21)], bmnum=11, scan_num=1,
remove_first_range=800, remove_last_range=2500, power_drop=10., plot=True):
""" Estimate MUF from the GS power """
csv_fname = "data/{rad}_{start}_{end}.csv".format(rad=rad, start=dates[0].strftime("%Y.%m.%d.%H.%M"),
end=dates[1].strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(csv_fname):
fdata = FetchData( rad, [dates[0], dates[1]] )
s_params = ["noise.sky", "tfreq", "frang", "rsep", "scan", "bmnum"]
v_params = ["slist", "v", "w_l", "p_l", "elv"]
_, scans = fdata.fetch_data(by="scan", s_params=s_params, v_params=v_params)
beams = []
# Resacle tx_frequency and estimate slant range
for scan in scans:
for beam in scan.beams:
if len(beam.slist) > 0:
setattr(beam, "slant_range", beam.frang + np.array(beam.slist.tolist()) * beam.rsep)
setattr(beam, "tfreq", np.round(beam.tfreq/1e3,1))
beams.append(beam)
# Extract oblique foF2 or MUF scan by scan
print("\n Data will be averaged over {} scans".format(scan_num))
print(" Processing beam {}".format(bmnum))
print(" Remove first range {}".format(remove_first_range))
print(" Remove last range {}".format(remove_last_range))
print(" Power drop {}".format(power_drop))
skip_distance, o_foF2, time_start, time_end = [], [], [], []
for i in range(len(scans)-scan_num):
rscan = scans[i:i+scan_num]
p_l, srange, tfrq, angle = [], [], [], []
for scan in rscan:
for beam in scan.beams:
if beam.bmnum == bmnum:
if len(beam.slist) > 0:
p_l.extend(beam.p_l.tolist())
srange.extend(beam.slant_range.tolist())
tfrq.append(beam.tfreq)
if type(beam.elv) is list: angle.extend(beam.elv)
else: angle.extend(beam.elv.tolist())
du = pd.DataFrame()
du["p_l"], du["srange"] = p_l, srange
du = du[(du.srange>remove_first_range) & (du.srange<remove_last_range)]
fname = "images/{}.png".format(rscan[0].stime.strftime("%Y-%m-%d-%H-%M"))
if len(du) > 0:
sd = fit_lambda(du, power_drop, tfreq=np.mean(tfrq), elv=np.mean(angle).round(1),
xlabel="srange", ylabel="p_l", fname=fname, plot=plot)
if sd != np.nan:
skip_distance.append(sd)
o_foF2.append(np.mean(tfrq))
time_start.append(rscan[0].stime)
time_end.append(rscan[-1].stime)
df = pd.DataFrame()
df["skip_distance"], df["o_foF2"], df["time_start"], df["time_end"] = skip_distance, o_foF2, time_start, time_end
df.to_csv(csv_fname, index=False, header=True)
else: df = pd.read_csv(csv_fname, parse_dates=["time_start", "time_end"])
print(" Header:\n",df.head())
return df
def build_occultation_functions(rad, dates, time_range, bmnum=11, scan_num=1, remove_first_range=500, remove_last_range=2500,
power_drop=10., plot=True):
df = build_oblique_foF2_observed_by_radar(rad, dates, bmnum, scan_num, remove_first_range, remove_last_range, power_drop, plot)
upper = df[df.time_start <= time_range[0]]
lower = df[df.time_start >= time_range[1]]
def plot_rays(ax, u, color="r", ms=1, alpha=0.6, lw=1.5, wl=51):
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
sd = np.array(u.skip_distance)
sd[sd<=200.] = np.nan
u.skip_distance = sd
dfx = u.interpolate(method="polynomial", order=1)
print(" Modified(intp.) Header:\n",dfx.head())
ax.plot(u.time_start, u.skip_distance, color+"o", lw=lw, markersize=ms, alpha=alpha)
ax.plot(dfx.time_start, smooth(dfx.skip_distance, window_len=wl), lw=lw, color="k", ls="--")
secs, vals = [(t.to_pydatetime()-midnight).seconds for t in dfx.time_start], smooth(dfx.skip_distance, window_len=wl).tolist()
return secs, vals
def estimate_interpolated(eupper, elower):
middle_time = time_range[0] + dt.timedelta(seconds=(time_range[1]-time_range[0]).total_seconds())
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
middle_seconds = (middle_time-midnight).seconds
print(midnight, middle_time, middle_seconds)
start_seconds, end_seconds = middle_seconds-(30*60), middle_seconds+(30*60)
from scipy import interpolate
# x = eupper[0] + elower[0]
# y = eupper[1] + elower[1]
# fnc = interpolate.interp1d(x, y, kind="cubic")
# midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
# new_secs = np.arange(np.min(x), np.max(x))
# new_dates = [midnight + dt.timedelta(seconds=int(s)) for s in new_secs]
# print(new_dates[0], new_dates[-1])
# print(np.min(x), np.max(x), np.min(new_secs), np.max(new_secs))
# Y = fnc(new_secs)
return #Y, new_dates
fig = plt.figure(figsize=(4,3), dpi=120)
ax = fig.add_subplot(111)
fmt = mdates.DateFormatter("%H:%M")
ax.xaxis.set_major_formatter(fmt)
est_upper = plot_rays(ax, upper, "r", wl=21)
est_lower = plot_rays(ax, lower, "b", wl=21)
estimate_interpolated(est_upper, est_lower)
#ax.plot(new_dates, Y, lw=0.6, color="gray")
ax.set_ylabel("Skip Distance (km)")
ax.set_xlabel("Time (UT)")
ax.set_xlim(dates[0], dates[1])
ax.set_ylim(1000,3000)
fig.autofmt_xdate()
fig.savefig("images/ocultation.png", bbox_inches="tight")
return
def occultation_functions(z, a1=np.pi/2, a2=1):
fn_sech = lambda x, a0: 0.5*(2/(np.exp(a0*x) + np.exp(-a0*x)))
fn_tanh = lambda x, a0: 1-np.tanh(a0*x)**2
fig = plt.figure(figsize=(4,3), dpi=120)
ax = fig.add_subplot(111)
ax.plot(z, fn_sech(z,a1), lw=0.7, color="r", label="Sech")
ax.plot(z, fn_tanh(z,a2), lw=0.7, color="b", label="Tanh")
ax.set_ylim(0,1)
ax.set_xlim(z[0],z[-1])
ax.legend(loc=3)
ax.set_ylabel("Occultation (%)")
ax.set_xlabel("Time till tolality (Hours)")
fig.savefig("images/ocultation_model.png", bbox_inches="tight")
return
if __name__ == "__main__":
#build_oblique_foF2_observed_by_radar(dates=[dt.datetime(2017,8,21,15), dt.datetime(2017,8,21,20)])
#occultation_functions(np.linspace(-5,5,1+3600*10))
#build_occultation_functions(rad="cvw", dates=[dt.datetime(2017,8,21,15), dt.datetime(2017,8,21,19)])
#build_occultation_functions(rad="gbr", bmnum=7, dates=[dt.datetime(2021,6,9,9), dt.datetime(2021,6,9,10,30)],
# remove_first_range=1000, remove_last_range=2500)
build_occultation_functions(rad="gbr", bmnum=7, dates=[dt.datetime(2021,6,10,8), dt.datetime(2021,6,10,11)],
remove_first_range=1000, remove_last_range=2500, power_drop=5.,
time_range=[dt.datetime(2021,6,10,9,40), dt.datetime(2021,6,10,10,10)])
occultation_functions(np.linspace(-5,5,1+3600*10))
pass
| return _1gaussian(xx, a0, c0, s0 ) * (0.5 + (np.arctan(sp*(xx-c0))/np.pi)) | identifier_body |
build_ionospheric_model.py | #!/usr/bin/env python
"""build_ionospheric_model.py: module is dedicated to build foF2 model from fitacf data."""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import os
import datetime as dt
import pandas as pd
import numpy as np
from astropy import modeling
from scipy.optimize import curve_fit
from get_sd_data import *
def smooth(x,window_len=51,window="hanning"):
if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3: return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == "flat": w = numpy.ones(window_len,"d")
else: w = eval("np."+window+"(window_len)")
y = np.convolve(w/w.sum(),s,mode="valid")
d = window_len - 1
y = y[int(d/2):-int(d/2)]
return y
def fit_lambda(du, power_drop, tfreq, elv, xlabel="srange", ylabel="p_l", plot=True, fname="images/_out.png"):
x, y = du[xlabel], du[ylabel]
def _1gaussian(xx, a0, c0, s0):
return a0 * (1/(s0*(np.sqrt(2*np.pi))))*(np.exp((-1.0/2.0)*(((xx-c0)/s0)**2)))
def _a_gaussian(xx, a0, c0, s0, sp ):
return _1gaussian(xx, a0, c0, s0 ) * (0.5 + (np.arctan(sp*(xx-c0))/np.pi))
def opt(f, x0, y0, p0):
popt, pcov = curve_fit(f, x0, y0, p0=p0)
perr = np.sqrt(np.diag(pcov))
return popt, perr
def estimate_skip_distance(popt):
xx = np.linspace(0,popt[1],30000)
yy = _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3])
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
def estimate_skip_distance_1D_params(a0, c0, s0):
xx = np.linspace(0,c0,30000)
yy = _1gaussian(xx, a0, c0, s0)
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
try:
popt_g, _ = opt(_1gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x)])
popt_ag, _ = opt(_a_gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x), 0.1])
skip_distance = estimate_skip_distance(popt_ag)
if plot:
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, popt_g[0], popt_g[1], popt_g[2]), color="b", lw=0.8, )
ax.plot(xx, _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3]), color="r", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname, bbox_inches="tight")
plt.close()
except:
import traceback
traceback.print_exc()
mx, mean, std = np.max(y), np.mean(x), np.std(x)
skip_distance = estimate_skip_distance_1D_params(mx, mean, std)
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, mx, mean, std), color="b", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname.replace(".png", "_e.png"), bbox_inches="tight")
plt.close()
return skip_distance
def build_oblique_foF2_observed_by_radar(rad="cvw", dates=[dt.datetime(2017,8,21), dt.datetime(2017,8,21)], bmnum=11, scan_num=1,
remove_first_range=800, remove_last_range=2500, power_drop=10., plot=True):
""" Estimate MUF from the GS power """
csv_fname = "data/{rad}_{start}_{end}.csv".format(rad=rad, start=dates[0].strftime("%Y.%m.%d.%H.%M"),
end=dates[1].strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(csv_fname):
fdata = FetchData( rad, [dates[0], dates[1]] )
s_params = ["noise.sky", "tfreq", "frang", "rsep", "scan", "bmnum"]
v_params = ["slist", "v", "w_l", "p_l", "elv"]
_, scans = fdata.fetch_data(by="scan", s_params=s_params, v_params=v_params)
beams = []
# Resacle tx_frequency and estimate slant range
for scan in scans: | for beam in scan.beams:
if len(beam.slist) > 0:
setattr(beam, "slant_range", beam.frang + np.array(beam.slist.tolist()) * beam.rsep)
setattr(beam, "tfreq", np.round(beam.tfreq/1e3,1))
beams.append(beam)
# Extract oblique foF2 or MUF scan by scan
print("\n Data will be averaged over {} scans".format(scan_num))
print(" Processing beam {}".format(bmnum))
print(" Remove first range {}".format(remove_first_range))
print(" Remove last range {}".format(remove_last_range))
print(" Power drop {}".format(power_drop))
skip_distance, o_foF2, time_start, time_end = [], [], [], []
for i in range(len(scans)-scan_num):
rscan = scans[i:i+scan_num]
p_l, srange, tfrq, angle = [], [], [], []
for scan in rscan:
for beam in scan.beams:
if beam.bmnum == bmnum:
if len(beam.slist) > 0:
p_l.extend(beam.p_l.tolist())
srange.extend(beam.slant_range.tolist())
tfrq.append(beam.tfreq)
if type(beam.elv) is list: angle.extend(beam.elv)
else: angle.extend(beam.elv.tolist())
du = pd.DataFrame()
du["p_l"], du["srange"] = p_l, srange
du = du[(du.srange>remove_first_range) & (du.srange<remove_last_range)]
fname = "images/{}.png".format(rscan[0].stime.strftime("%Y-%m-%d-%H-%M"))
if len(du) > 0:
sd = fit_lambda(du, power_drop, tfreq=np.mean(tfrq), elv=np.mean(angle).round(1),
xlabel="srange", ylabel="p_l", fname=fname, plot=plot)
if sd != np.nan:
skip_distance.append(sd)
o_foF2.append(np.mean(tfrq))
time_start.append(rscan[0].stime)
time_end.append(rscan[-1].stime)
df = pd.DataFrame()
df["skip_distance"], df["o_foF2"], df["time_start"], df["time_end"] = skip_distance, o_foF2, time_start, time_end
df.to_csv(csv_fname, index=False, header=True)
else: df = pd.read_csv(csv_fname, parse_dates=["time_start", "time_end"])
print(" Header:\n",df.head())
return df
def build_occultation_functions(rad, dates, time_range, bmnum=11, scan_num=1, remove_first_range=500, remove_last_range=2500,
power_drop=10., plot=True):
df = build_oblique_foF2_observed_by_radar(rad, dates, bmnum, scan_num, remove_first_range, remove_last_range, power_drop, plot)
upper = df[df.time_start <= time_range[0]]
lower = df[df.time_start >= time_range[1]]
def plot_rays(ax, u, color="r", ms=1, alpha=0.6, lw=1.5, wl=51):
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
sd = np.array(u.skip_distance)
sd[sd<=200.] = np.nan
u.skip_distance = sd
dfx = u.interpolate(method="polynomial", order=1)
print(" Modified(intp.) Header:\n",dfx.head())
ax.plot(u.time_start, u.skip_distance, color+"o", lw=lw, markersize=ms, alpha=alpha)
ax.plot(dfx.time_start, smooth(dfx.skip_distance, window_len=wl), lw=lw, color="k", ls="--")
secs, vals = [(t.to_pydatetime()-midnight).seconds for t in dfx.time_start], smooth(dfx.skip_distance, window_len=wl).tolist()
return secs, vals
def estimate_interpolated(eupper, elower):
middle_time = time_range[0] + dt.timedelta(seconds=(time_range[1]-time_range[0]).total_seconds())
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
middle_seconds = (middle_time-midnight).seconds
print(midnight, middle_time, middle_seconds)
start_seconds, end_seconds = middle_seconds-(30*60), middle_seconds+(30*60)
from scipy import interpolate
# x = eupper[0] + elower[0]
# y = eupper[1] + elower[1]
# fnc = interpolate.interp1d(x, y, kind="cubic")
# midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
# new_secs = np.arange(np.min(x), np.max(x))
# new_dates = [midnight + dt.timedelta(seconds=int(s)) for s in new_secs]
# print(new_dates[0], new_dates[-1])
# print(np.min(x), np.max(x), np.min(new_secs), np.max(new_secs))
# Y = fnc(new_secs)
return #Y, new_dates
fig = plt.figure(figsize=(4,3), dpi=120)
ax = fig.add_subplot(111)
fmt = mdates.DateFormatter("%H:%M")
ax.xaxis.set_major_formatter(fmt)
est_upper = plot_rays(ax, upper, "r", wl=21)
est_lower = plot_rays(ax, lower, "b", wl=21)
estimate_interpolated(est_upper, est_lower)
#ax.plot(new_dates, Y, lw=0.6, color="gray")
ax.set_ylabel("Skip Distance (km)")
ax.set_xlabel("Time (UT)")
ax.set_xlim(dates[0], dates[1])
ax.set_ylim(1000,3000)
fig.autofmt_xdate()
fig.savefig("images/ocultation.png", bbox_inches="tight")
return
def occultation_functions(z, a1=np.pi/2, a2=1):
fn_sech = lambda x, a0: 0.5*(2/(np.exp(a0*x) + np.exp(-a0*x)))
fn_tanh = lambda x, a0: 1-np.tanh(a0*x)**2
fig = plt.figure(figsize=(4,3), dpi=120)
ax = fig.add_subplot(111)
ax.plot(z, fn_sech(z,a1), lw=0.7, color="r", label="Sech")
ax.plot(z, fn_tanh(z,a2), lw=0.7, color="b", label="Tanh")
ax.set_ylim(0,1)
ax.set_xlim(z[0],z[-1])
ax.legend(loc=3)
ax.set_ylabel("Occultation (%)")
ax.set_xlabel("Time till tolality (Hours)")
fig.savefig("images/ocultation_model.png", bbox_inches="tight")
return
if __name__ == "__main__":
#build_oblique_foF2_observed_by_radar(dates=[dt.datetime(2017,8,21,15), dt.datetime(2017,8,21,20)])
#occultation_functions(np.linspace(-5,5,1+3600*10))
#build_occultation_functions(rad="cvw", dates=[dt.datetime(2017,8,21,15), dt.datetime(2017,8,21,19)])
#build_occultation_functions(rad="gbr", bmnum=7, dates=[dt.datetime(2021,6,9,9), dt.datetime(2021,6,9,10,30)],
# remove_first_range=1000, remove_last_range=2500)
build_occultation_functions(rad="gbr", bmnum=7, dates=[dt.datetime(2021,6,10,8), dt.datetime(2021,6,10,11)],
remove_first_range=1000, remove_last_range=2500, power_drop=5.,
time_range=[dt.datetime(2021,6,10,9,40), dt.datetime(2021,6,10,10,10)])
occultation_functions(np.linspace(-5,5,1+3600*10))
pass | random_line_split | |
build_ionospheric_model.py | #!/usr/bin/env python
"""build_ionospheric_model.py: module is dedicated to build foF2 model from fitacf data."""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import os
import datetime as dt
import pandas as pd
import numpy as np
from astropy import modeling
from scipy.optimize import curve_fit
from get_sd_data import *
def smooth(x,window_len=51,window="hanning"):
if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3: return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == "flat": w = numpy.ones(window_len,"d")
else: w = eval("np."+window+"(window_len)")
y = np.convolve(w/w.sum(),s,mode="valid")
d = window_len - 1
y = y[int(d/2):-int(d/2)]
return y
def fit_lambda(du, power_drop, tfreq, elv, xlabel="srange", ylabel="p_l", plot=True, fname="images/_out.png"):
x, y = du[xlabel], du[ylabel]
def _1gaussian(xx, a0, c0, s0):
return a0 * (1/(s0*(np.sqrt(2*np.pi))))*(np.exp((-1.0/2.0)*(((xx-c0)/s0)**2)))
def _a_gaussian(xx, a0, c0, s0, sp ):
return _1gaussian(xx, a0, c0, s0 ) * (0.5 + (np.arctan(sp*(xx-c0))/np.pi))
def opt(f, x0, y0, p0):
popt, pcov = curve_fit(f, x0, y0, p0=p0)
perr = np.sqrt(np.diag(pcov))
return popt, perr
def estimate_skip_distance(popt):
xx = np.linspace(0,popt[1],30000)
yy = _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3])
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
def estimate_skip_distance_1D_params(a0, c0, s0):
xx = np.linspace(0,c0,30000)
yy = _1gaussian(xx, a0, c0, s0)
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
try:
popt_g, _ = opt(_1gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x)])
popt_ag, _ = opt(_a_gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x), 0.1])
skip_distance = estimate_skip_distance(popt_ag)
if plot:
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, popt_g[0], popt_g[1], popt_g[2]), color="b", lw=0.8, )
ax.plot(xx, _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3]), color="r", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname, bbox_inches="tight")
plt.close()
except:
import traceback
traceback.print_exc()
mx, mean, std = np.max(y), np.mean(x), np.std(x)
skip_distance = estimate_skip_distance_1D_params(mx, mean, std)
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, mx, mean, std), color="b", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname.replace(".png", "_e.png"), bbox_inches="tight")
plt.close()
return skip_distance
def build_oblique_foF2_observed_by_radar(rad="cvw", dates=[dt.datetime(2017,8,21), dt.datetime(2017,8,21)], bmnum=11, scan_num=1,
remove_first_range=800, remove_last_range=2500, power_drop=10., plot=True):
""" Estimate MUF from the GS power """
csv_fname = "data/{rad}_{start}_{end}.csv".format(rad=rad, start=dates[0].strftime("%Y.%m.%d.%H.%M"),
end=dates[1].strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(csv_fname):
fdata = FetchData( rad, [dates[0], dates[1]] )
s_params = ["noise.sky", "tfreq", "frang", "rsep", "scan", "bmnum"]
v_params = ["slist", "v", "w_l", "p_l", "elv"]
_, scans = fdata.fetch_data(by="scan", s_params=s_params, v_params=v_params)
beams = []
# Resacle tx_frequency and estimate slant range
for scan in scans:
for beam in scan.beams:
if len(beam.slist) > 0:
setattr(beam, "slant_range", beam.frang + np.array(beam.slist.tolist()) * beam.rsep)
setattr(beam, "tfreq", np.round(beam.tfreq/1e3,1))
beams.append(beam)
# Extract oblique foF2 or MUF scan by scan
print("\n Data will be averaged over {} scans".format(scan_num))
print(" Processing beam {}".format(bmnum))
print(" Remove first range {}".format(remove_first_range))
print(" Remove last range {}".format(remove_last_range))
print(" Power drop {}".format(power_drop))
skip_distance, o_foF2, time_start, time_end = [], [], [], []
for i in range(len(scans)-scan_num):
rscan = scans[i:i+scan_num]
p_l, srange, tfrq, angle = [], [], [], []
for scan in rscan:
for beam in scan.beams:
if beam.bmnum == bmnum:
|
du = pd.DataFrame()
du["p_l"], du["srange"] = p_l, srange
du = du[(du.srange>remove_first_range) & (du.srange<remove_last_range)]
fname = "images/{}.png".format(rscan[0].stime.strftime("%Y-%m-%d-%H-%M"))
if len(du) > 0:
sd = fit_lambda(du, power_drop, tfreq=np.mean(tfrq), elv=np.mean(angle).round(1),
xlabel="srange", ylabel="p_l", fname=fname, plot=plot)
if sd != np.nan:
skip_distance.append(sd)
o_foF2.append(np.mean(tfrq))
time_start.append(rscan[0].stime)
time_end.append(rscan[-1].stime)
df = pd.DataFrame()
df["skip_distance"], df["o_foF2"], df["time_start"], df["time_end"] = skip_distance, o_foF2, time_start, time_end
df.to_csv(csv_fname, index=False, header=True)
else: df = pd.read_csv(csv_fname, parse_dates=["time_start", "time_end"])
print(" Header:\n",df.head())
return df
def build_occultation_functions(rad, dates, time_range, bmnum=11, scan_num=1, remove_first_range=500, remove_last_range=2500,
power_drop=10., plot=True):
df = build_oblique_foF2_observed_by_radar(rad, dates, bmnum, scan_num, remove_first_range, remove_last_range, power_drop, plot)
upper = df[df.time_start <= time_range[0]]
lower = df[df.time_start >= time_range[1]]
def plot_rays(ax, u, color="r", ms=1, alpha=0.6, lw=1.5, wl=51):
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
sd = np.array(u.skip_distance)
sd[sd<=200.] = np.nan
u.skip_distance = sd
dfx = u.interpolate(method="polynomial", order=1)
print(" Modified(intp.) Header:\n",dfx.head())
ax.plot(u.time_start, u.skip_distance, color+"o", lw=lw, markersize=ms, alpha=alpha)
ax.plot(dfx.time_start, smooth(dfx.skip_distance, window_len=wl), lw=lw, color="k", ls="--")
secs, vals = [(t.to_pydatetime()-midnight).seconds for t in dfx.time_start], smooth(dfx.skip_distance, window_len=wl).tolist()
return secs, vals
def estimate_interpolated(eupper, elower):
middle_time = time_range[0] + dt.timedelta(seconds=(time_range[1]-time_range[0]).total_seconds())
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
middle_seconds = (middle_time-midnight).seconds
print(midnight, middle_time, middle_seconds)
start_seconds, end_seconds = middle_seconds-(30*60), middle_seconds+(30*60)
from scipy import interpolate
# x = eupper[0] + elower[0]
# y = eupper[1] + elower[1]
# fnc = interpolate.interp1d(x, y, kind="cubic")
# midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
# new_secs = np.arange(np.min(x), np.max(x))
# new_dates = [midnight + dt.timedelta(seconds=int(s)) for s in new_secs]
# print(new_dates[0], new_dates[-1])
# print(np.min(x), np.max(x), np.min(new_secs), np.max(new_secs))
# Y = fnc(new_secs)
return #Y, new_dates
fig = plt.figure(figsize=(4,3), dpi=120)
ax = fig.add_subplot(111)
fmt = mdates.DateFormatter("%H:%M")
ax.xaxis.set_major_formatter(fmt)
est_upper = plot_rays(ax, upper, "r", wl=21)
est_lower = plot_rays(ax, lower, "b", wl=21)
estimate_interpolated(est_upper, est_lower)
#ax.plot(new_dates, Y, lw=0.6, color="gray")
ax.set_ylabel("Skip Distance (km)")
ax.set_xlabel("Time (UT)")
ax.set_xlim(dates[0], dates[1])
ax.set_ylim(1000,3000)
fig.autofmt_xdate()
fig.savefig("images/ocultation.png", bbox_inches="tight")
return
def occultation_functions(z, a1=np.pi/2, a2=1):
fn_sech = lambda x, a0: 0.5*(2/(np.exp(a0*x) + np.exp(-a0*x)))
fn_tanh = lambda x, a0: 1-np.tanh(a0*x)**2
fig = plt.figure(figsize=(4,3), dpi=120)
ax = fig.add_subplot(111)
ax.plot(z, fn_sech(z,a1), lw=0.7, color="r", label="Sech")
ax.plot(z, fn_tanh(z,a2), lw=0.7, color="b", label="Tanh")
ax.set_ylim(0,1)
ax.set_xlim(z[0],z[-1])
ax.legend(loc=3)
ax.set_ylabel("Occultation (%)")
ax.set_xlabel("Time till tolality (Hours)")
fig.savefig("images/ocultation_model.png", bbox_inches="tight")
return
if __name__ == "__main__":
#build_oblique_foF2_observed_by_radar(dates=[dt.datetime(2017,8,21,15), dt.datetime(2017,8,21,20)])
#occultation_functions(np.linspace(-5,5,1+3600*10))
#build_occultation_functions(rad="cvw", dates=[dt.datetime(2017,8,21,15), dt.datetime(2017,8,21,19)])
#build_occultation_functions(rad="gbr", bmnum=7, dates=[dt.datetime(2021,6,9,9), dt.datetime(2021,6,9,10,30)],
# remove_first_range=1000, remove_last_range=2500)
build_occultation_functions(rad="gbr", bmnum=7, dates=[dt.datetime(2021,6,10,8), dt.datetime(2021,6,10,11)],
remove_first_range=1000, remove_last_range=2500, power_drop=5.,
time_range=[dt.datetime(2021,6,10,9,40), dt.datetime(2021,6,10,10,10)])
occultation_functions(np.linspace(-5,5,1+3600*10))
pass
| if len(beam.slist) > 0:
p_l.extend(beam.p_l.tolist())
srange.extend(beam.slant_range.tolist())
tfrq.append(beam.tfreq)
if type(beam.elv) is list: angle.extend(beam.elv)
else: angle.extend(beam.elv.tolist()) | conditional_block |
actionitems.ts | import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams, PopoverController } from 'ionic-angular';
import { TopbarComponent } from '../../components/topbar/topbar';
import { AssessmentService } from '../../services/assessment.service';
import { GoogleAnalytics } from '../../application/helpers/GoogleAnalytics';
import { ReportInfoCardComponent } from "../../components/report-info-card/report-info-card";
// import { Ng2TableModule } from 'ng2-table/ng2-table';
// import { NgTableComponent, NgTableFilteringDirective, NgTablePagingDirective, NgTableSortingDirective } from 'ng2-table/ng2-table';
import * as XLSX from 'xlsx';
import { QuestionsPage } from '../questions/questions';
import { Apollo } from "apollo-angular";
import gql from "graphql-tag";
var assessmentQuery = gql`
query assessment($_id: String) {
assessment(_id: $_id) {
targetMRL
questions {
mrLevel
questionText
threadName
subThreadName
currentAnswer
questionId
answers {
when
who
risk
consequence
likelihood
what
reason
assumptionsNo
notesNo
answer
# technical
# schedule
# cost
}
}
files {
name
questionId
url
}
}
}
`
@IonicPage()
@Component({
selector: 'page-actionitems',
templateUrl: 'actionitems.html',
})
export class ActionitemsPage {
public data:any;
filterList: any = {};
unfilteredQuestions: any;
autoFilter = false;
public rows:Array<any> = [];
public columns:Array<any> = [
{title: 'Thread', name: 'threadName', filtering: {filterString: '', placeholder: 'Filter by thread'}},
{title: 'Subthread', name: 'subThreadName', filtering: {filterString: '', placeholder: 'Filter by subthread'}},
{title: 'Question', name: 'questionText', filtering: {filterString: '', placeholder: 'Filter by question'}},
// {title: 'Answer', name: 'currentAnswer', filtering: {filterString: '', placeholder: 'Filter by answer'}},
{title: 'Action', name: 'what', filtering: {filterString: '', placeholder: 'Filter by action'}},
{title: 'Due', name: 'when', filtering: {filterString: '', placeholder: 'Filter by due date'}, sort: 'asc'},
{title: 'Owner', name: 'who', filtering: {filterString: '', placeholder: 'Filter by owner'}},
{title: 'Risk Level', name: 'risk', filtering: {filterString: '', placeholder: 'Filter by risk level'}}
];
public page:number = 1;
public itemsPerPage:number = 10;
public maxSize:number = 5;
public numPages:number = 1;
public length:number = 0;
public config:any = {
paging: true,
sorting: {columns: this.columns},
filtering: {filterString: ''},
className: ['table-striped', 'table-bordered']
};
no: any;
assessmentId: any;
private attachments: any;
pageName: any = "Action Items";
assessmentIdFromParams: any;
constructor( private apollo: Apollo,
public navCtrl: NavController,
public navParams: NavParams,
public popOver: PopoverController,
private assessmentService: AssessmentService) {
this.assessmentIdFromParams = navParams.data.assessmentId;
console.log(this.assessmentIdFromParams);
this.autoFilter = navParams.data.autoFilter;
}
async ngOnInit() {
this.assessmentId = await this.assessmentService.getCurrentAssessmentId();
this.apollo.watchQuery({
query: assessmentQuery,
variables: {_id: this.assessmentId},
fetchPolicy: "network-only"
}).valueChanges
.subscribe(data => {
this.no = (<any>data.data).assessment.questions.filter( a => {
if (a.answers.length > 0 ) {
return a.answers[a.answers.length - 1].answer == "No"
}
});
var targetMRL = (<any>data.data).assessment.targetMRL;
this.attachments = (<any>data.data).assessment.files;
var newData:Array<any> = [];
console.log(this.no);
this.no.forEach( (element) => {
var newObj:any = {};
newObj.threadName = "" + element.threadName;
newObj.subThreadName = "" + element.subThreadName;
newObj.questionText = "" + element.questionText;
// newObj.currentAnswer = "" + element.answers[element.answers.length - 1].answer;
newObj.what = "" + element.answers[element.answers.length - 1].what;
newObj.when = this.formatDate( element.answers[element.answers.length - 1].when);
newObj.who = "" + element.answers[element.answers.length - 1].who;
newObj.level = "" + element.mrLevel;
var cur = element.answers[element.answers.length - 1];
newObj.risk = "" + this.calculateRiskScore(cur.likelihood, cur.consequence);
newData.push(newObj);
});
this.data = newData;
this.unfilteredQuestions = newData;
if (this.autoFilter){
console.log('here');
this.filterList.filterMRL = targetMRL;
console.log(targetMRL)
this.data = this.unfilteredQuestions.filter(question => {
if (question.level == targetMRL){
return question;
}
});
} else {
this.data = this.unfilteredQuestions;
}
// console.log(this.data);
// this.length = this.data.length;
// this.onChangeTable(this.config);
});
}
/**
* @purpose: format data as a sortable string for table
* @input: date: a new Date() parsable string
* @output: string, format YYYY-MM-DD
*/
formatDate(date){
if ( date ) {
return new Date(date).toISOString().substr(0,10);
} else {
return '';
}
}
filterTheList(){
console.log(this.filterList.filterMRL)
if (this.filterList.filterMRL && this.filterList.filterMRL != 0) {
var filteredQuestions = this.unfilteredQuestions.filter(question => {
if (question.level == this.filterList.filterMRL) {
// console.log('here')
return question
}
});
console.log(filteredQuestions);
this.data = filteredQuestions;
} else {
this.data = this.unfilteredQuestions;
}
}
clearFilter() {
this.filterList.filterMRL = 0;
this.filterTheList();
}
public saveXLS() {
var headers = this.columns.map(c => c.title);
var values = this.no.map(nq => {
return [
nq.threadName,
nq.subThreadName,
nq.questionText,
// nq.answers[nq.answers.length - 1].answer,
nq.answers[nq.answers.length - 1].what,
nq.answers[nq.answers.length - 1].when,
nq.answers[nq.answers.length - 1].who,
this.calculateRiskScore(nq.answers[nq.answers.length - 1].likelihood, nq.answers[nq.answers.length - 1].consequence)
];
})
var worksheet = [headers, ...values];
var ws = XLSX.utils.aoa_to_sheet(worksheet);
var wb = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Action Items');
/* save to file */
XLSX.writeFile(wb, 'action_items.xlsx');
}
public changePage(page:any, data:Array<any> = this.data):Array<any> {
let start = (page.page - 1) * page.itemsPerPage;
let end = page.itemsPerPage > -1 ? (start + page.itemsPerPage) : data.length;
return data.slice(start, end);
}
public changeSort(data:any, config:any):any {
if (!config.sorting) {
return data;
}
let columns = this.config.sorting.columns || [];
let columnName:string = void 0;
let sort:string = void 0;
for (let i = 0; i < columns.length; i++) {
if (columns[i].sort !== '' && columns[i].sort !== false) {
columnName = columns[i].name;
sort = columns[i].sort;
}
}
if (!columnName) {
return data;
}
// simple sorting
return data.sort((previous:any, current:any) => {
if (previous[columnName] > current[columnName]) {
return sort === 'desc' ? -1 : 1;
} else if (previous[columnName] < current[columnName]) {
return sort === 'asc' ? -1 : 1;
}
return 0;
});
}
public changeFilter(data:any, config:any):any {
let filteredData:Array<any> = data;
this.columns.forEach((column:any) => {
if (column.filtering) {
filteredData = filteredData.filter((item:any) => {
return item[column.name].match(column.filtering.filterString);
});
}
});
if (!config.filtering) {
return filteredData;
}
if (config.filtering.columnName) {
return filteredData.filter((item:any) =>
item[config.filtering.columnName].match(this.config.filtering.filterString));
}
let tempArray:Array<any> = [];
filteredData.forEach((item:any) => {
let flag = false;
this.columns.forEach((column:any) => {
if (item[column.name].toString().match(this.config.filtering.filterString)) {
flag = true;
}
});
if (flag) {
tempArray.push(item);
}
});
filteredData = tempArray;
return filteredData;
}
public onChangeTable(config:any, page:any = {page: this.page, itemsPerPage: this.itemsPerPage}):any {
if (config.filtering) {
Object.assign(this.config.filtering, config.filtering);
}
if (config.sorting) {
Object.assign(this.config.sorting, config.sorting);
}
let filteredData = this.changeFilter(this.data, this.config);
let sortedData = this.changeSort(filteredData, this.config);
this.rows = page && config.paging ? this.changePage(page, sortedData) : sortedData;
this.length = sortedData.length;
}
public onCellClick(data: any): any {
console.log(data);
}
unique = (item, index, array) => array.indexOf(item) == index
ionViewWillEnter() {
GoogleAnalytics.trackPage("actionitems");
}
displayRisks(q) {
var risks = [];
q.technical ? risks.push("Technical") : null
q.schedule ? risks.push("Schedule") : null
q.cost ? risks.push("Cost") : null
return risks.join(", ") || "none";
}
getAttachments(q) {
return this.attachments.filter(a => a.questionId == q.questionId );
}
| (questionId) {
this.navCtrl.push(QuestionsPage, {
questionId: questionId
});
}
public calculateRiskScore(likelihood, consequence) {
// preventing off by one errors, with nulls.
// values should always be 1-5
var riskMatrix = [
[ null ],
[ null, 1, 3, 5, 8, 12],
[ null, 2, 7, 11, 14, 17],
[ null, 4, 10, 15, 19, 21],
[ null, 6, 12, 18, 22, 24],
[ null, 9, 16, 20, 23, 25]
];
if ( likelihood && consequence ) {
// value is the same as the index, b/c we put nulls in the matrix
var likelihoodIndex = Number(likelihood);
var consequenceIndex = Number(consequence);
// var name = selectedBox.className.replace(/ selected/g, '')
// selectedBox.className = `${name} selected`;
return riskMatrix[likelihoodIndex][consequenceIndex];
} else {
return "";
}
}
}
| navToQuestion | identifier_name |
actionitems.ts | import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams, PopoverController } from 'ionic-angular';
import { TopbarComponent } from '../../components/topbar/topbar';
import { AssessmentService } from '../../services/assessment.service';
import { GoogleAnalytics } from '../../application/helpers/GoogleAnalytics';
import { ReportInfoCardComponent } from "../../components/report-info-card/report-info-card";
// import { Ng2TableModule } from 'ng2-table/ng2-table';
// import { NgTableComponent, NgTableFilteringDirective, NgTablePagingDirective, NgTableSortingDirective } from 'ng2-table/ng2-table';
import * as XLSX from 'xlsx';
import { QuestionsPage } from '../questions/questions';
import { Apollo } from "apollo-angular";
import gql from "graphql-tag";
var assessmentQuery = gql`
query assessment($_id: String) {
assessment(_id: $_id) {
targetMRL
questions {
mrLevel
questionText
threadName
subThreadName
currentAnswer
questionId
answers {
when
who
risk
consequence
likelihood
what
reason
assumptionsNo
notesNo
answer
# technical
# schedule
# cost
}
}
files {
name
questionId
url
}
}
}
`
@IonicPage()
@Component({
selector: 'page-actionitems',
templateUrl: 'actionitems.html',
})
export class ActionitemsPage {
public data:any;
filterList: any = {};
unfilteredQuestions: any;
autoFilter = false;
public rows:Array<any> = [];
public columns:Array<any> = [
{title: 'Thread', name: 'threadName', filtering: {filterString: '', placeholder: 'Filter by thread'}},
{title: 'Subthread', name: 'subThreadName', filtering: {filterString: '', placeholder: 'Filter by subthread'}},
{title: 'Question', name: 'questionText', filtering: {filterString: '', placeholder: 'Filter by question'}},
// {title: 'Answer', name: 'currentAnswer', filtering: {filterString: '', placeholder: 'Filter by answer'}},
{title: 'Action', name: 'what', filtering: {filterString: '', placeholder: 'Filter by action'}},
{title: 'Due', name: 'when', filtering: {filterString: '', placeholder: 'Filter by due date'}, sort: 'asc'},
{title: 'Owner', name: 'who', filtering: {filterString: '', placeholder: 'Filter by owner'}},
{title: 'Risk Level', name: 'risk', filtering: {filterString: '', placeholder: 'Filter by risk level'}}
];
public page:number = 1;
public itemsPerPage:number = 10;
public maxSize:number = 5;
public numPages:number = 1;
public length:number = 0;
public config:any = {
paging: true,
sorting: {columns: this.columns},
filtering: {filterString: ''},
className: ['table-striped', 'table-bordered']
};
no: any;
assessmentId: any;
private attachments: any;
pageName: any = "Action Items";
assessmentIdFromParams: any;
constructor( private apollo: Apollo,
public navCtrl: NavController,
public navParams: NavParams,
public popOver: PopoverController,
private assessmentService: AssessmentService) {
this.assessmentIdFromParams = navParams.data.assessmentId;
console.log(this.assessmentIdFromParams);
this.autoFilter = navParams.data.autoFilter;
}
async ngOnInit() {
this.assessmentId = await this.assessmentService.getCurrentAssessmentId();
this.apollo.watchQuery({
query: assessmentQuery,
variables: {_id: this.assessmentId},
fetchPolicy: "network-only"
}).valueChanges
.subscribe(data => {
this.no = (<any>data.data).assessment.questions.filter( a => {
if (a.answers.length > 0 ) {
return a.answers[a.answers.length - 1].answer == "No"
}
});
var targetMRL = (<any>data.data).assessment.targetMRL;
this.attachments = (<any>data.data).assessment.files;
var newData:Array<any> = [];
console.log(this.no);
this.no.forEach( (element) => {
var newObj:any = {};
newObj.threadName = "" + element.threadName;
newObj.subThreadName = "" + element.subThreadName;
newObj.questionText = "" + element.questionText;
// newObj.currentAnswer = "" + element.answers[element.answers.length - 1].answer;
newObj.what = "" + element.answers[element.answers.length - 1].what;
newObj.when = this.formatDate( element.answers[element.answers.length - 1].when);
newObj.who = "" + element.answers[element.answers.length - 1].who;
newObj.level = "" + element.mrLevel;
var cur = element.answers[element.answers.length - 1];
newObj.risk = "" + this.calculateRiskScore(cur.likelihood, cur.consequence);
newData.push(newObj);
});
this.data = newData;
this.unfilteredQuestions = newData;
if (this.autoFilter){
console.log('here');
this.filterList.filterMRL = targetMRL;
console.log(targetMRL)
this.data = this.unfilteredQuestions.filter(question => {
if (question.level == targetMRL){
return question;
}
});
} else {
this.data = this.unfilteredQuestions;
}
// console.log(this.data);
// this.length = this.data.length;
// this.onChangeTable(this.config);
});
}
/**
* @purpose: format data as a sortable string for table
* @input: date: a new Date() parsable string
* @output: string, format YYYY-MM-DD
*/
formatDate(date){
if ( date ) {
return new Date(date).toISOString().substr(0,10);
} else {
return '';
}
}
filterTheList(){
console.log(this.filterList.filterMRL)
if (this.filterList.filterMRL && this.filterList.filterMRL != 0) {
var filteredQuestions = this.unfilteredQuestions.filter(question => {
if (question.level == this.filterList.filterMRL) {
// console.log('here')
return question
}
});
console.log(filteredQuestions);
this.data = filteredQuestions;
} else {
this.data = this.unfilteredQuestions;
}
}
clearFilter() {
this.filterList.filterMRL = 0;
this.filterTheList();
}
public saveXLS() {
var headers = this.columns.map(c => c.title);
var values = this.no.map(nq => {
return [
nq.threadName,
nq.subThreadName,
nq.questionText,
// nq.answers[nq.answers.length - 1].answer,
nq.answers[nq.answers.length - 1].what,
nq.answers[nq.answers.length - 1].when,
nq.answers[nq.answers.length - 1].who,
this.calculateRiskScore(nq.answers[nq.answers.length - 1].likelihood, nq.answers[nq.answers.length - 1].consequence)
];
})
var worksheet = [headers, ...values];
var ws = XLSX.utils.aoa_to_sheet(worksheet);
var wb = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Action Items');
/* save to file */
XLSX.writeFile(wb, 'action_items.xlsx');
}
public changePage(page:any, data:Array<any> = this.data):Array<any> {
let start = (page.page - 1) * page.itemsPerPage;
let end = page.itemsPerPage > -1 ? (start + page.itemsPerPage) : data.length;
return data.slice(start, end);
}
public changeSort(data:any, config:any):any {
if (!config.sorting) {
return data;
}
let columns = this.config.sorting.columns || [];
let columnName:string = void 0;
let sort:string = void 0;
for (let i = 0; i < columns.length; i++) {
if (columns[i].sort !== '' && columns[i].sort !== false) {
columnName = columns[i].name;
sort = columns[i].sort;
}
}
if (!columnName) {
return data;
}
// simple sorting
return data.sort((previous:any, current:any) => {
if (previous[columnName] > current[columnName]) {
return sort === 'desc' ? -1 : 1;
} else if (previous[columnName] < current[columnName]) {
return sort === 'asc' ? -1 : 1;
}
return 0;
});
}
public changeFilter(data:any, config:any):any {
let filteredData:Array<any> = data;
this.columns.forEach((column:any) => {
if (column.filtering) {
filteredData = filteredData.filter((item:any) => {
return item[column.name].match(column.filtering.filterString);
});
}
});
if (!config.filtering) {
return filteredData;
}
if (config.filtering.columnName) {
return filteredData.filter((item:any) =>
item[config.filtering.columnName].match(this.config.filtering.filterString));
}
let tempArray:Array<any> = [];
filteredData.forEach((item:any) => {
let flag = false;
this.columns.forEach((column:any) => {
if (item[column.name].toString().match(this.config.filtering.filterString)) {
flag = true;
}
});
if (flag) {
tempArray.push(item);
}
});
filteredData = tempArray;
return filteredData;
}
public onChangeTable(config:any, page:any = {page: this.page, itemsPerPage: this.itemsPerPage}):any {
if (config.filtering) {
Object.assign(this.config.filtering, config.filtering);
}
if (config.sorting) {
Object.assign(this.config.sorting, config.sorting);
}
let filteredData = this.changeFilter(this.data, this.config);
let sortedData = this.changeSort(filteredData, this.config);
this.rows = page && config.paging ? this.changePage(page, sortedData) : sortedData;
this.length = sortedData.length;
}
public onCellClick(data: any): any {
console.log(data);
}
unique = (item, index, array) => array.indexOf(item) == index
ionViewWillEnter() |
displayRisks(q) {
var risks = [];
q.technical ? risks.push("Technical") : null
q.schedule ? risks.push("Schedule") : null
q.cost ? risks.push("Cost") : null
return risks.join(", ") || "none";
}
getAttachments(q) {
return this.attachments.filter(a => a.questionId == q.questionId );
}
navToQuestion(questionId) {
this.navCtrl.push(QuestionsPage, {
questionId: questionId
});
}
public calculateRiskScore(likelihood, consequence) {
// preventing off by one errors, with nulls.
// values should always be 1-5
var riskMatrix = [
[ null ],
[ null, 1, 3, 5, 8, 12],
[ null, 2, 7, 11, 14, 17],
[ null, 4, 10, 15, 19, 21],
[ null, 6, 12, 18, 22, 24],
[ null, 9, 16, 20, 23, 25]
];
if ( likelihood && consequence ) {
// value is the same as the index, b/c we put nulls in the matrix
var likelihoodIndex = Number(likelihood);
var consequenceIndex = Number(consequence);
// var name = selectedBox.className.replace(/ selected/g, '')
// selectedBox.className = `${name} selected`;
return riskMatrix[likelihoodIndex][consequenceIndex];
} else {
return "";
}
}
}
| {
GoogleAnalytics.trackPage("actionitems");
} | identifier_body |
actionitems.ts | import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams, PopoverController } from 'ionic-angular';
import { TopbarComponent } from '../../components/topbar/topbar';
import { AssessmentService } from '../../services/assessment.service';
import { GoogleAnalytics } from '../../application/helpers/GoogleAnalytics';
import { ReportInfoCardComponent } from "../../components/report-info-card/report-info-card";
// import { Ng2TableModule } from 'ng2-table/ng2-table';
// import { NgTableComponent, NgTableFilteringDirective, NgTablePagingDirective, NgTableSortingDirective } from 'ng2-table/ng2-table';
import * as XLSX from 'xlsx';
import { QuestionsPage } from '../questions/questions';
import { Apollo } from "apollo-angular";
import gql from "graphql-tag";
var assessmentQuery = gql`
query assessment($_id: String) {
assessment(_id: $_id) {
targetMRL
questions {
mrLevel
questionText
threadName
subThreadName
currentAnswer
questionId
answers {
when
who
risk
consequence
likelihood
what
reason
assumptionsNo
notesNo
answer
# technical
# schedule
# cost
}
}
files {
name
questionId
url
}
}
}
`
@IonicPage()
@Component({
selector: 'page-actionitems',
templateUrl: 'actionitems.html',
})
export class ActionitemsPage {
public data:any;
filterList: any = {};
unfilteredQuestions: any;
autoFilter = false;
public rows:Array<any> = [];
public columns:Array<any> = [
{title: 'Thread', name: 'threadName', filtering: {filterString: '', placeholder: 'Filter by thread'}},
{title: 'Subthread', name: 'subThreadName', filtering: {filterString: '', placeholder: 'Filter by subthread'}},
{title: 'Question', name: 'questionText', filtering: {filterString: '', placeholder: 'Filter by question'}},
// {title: 'Answer', name: 'currentAnswer', filtering: {filterString: '', placeholder: 'Filter by answer'}},
{title: 'Action', name: 'what', filtering: {filterString: '', placeholder: 'Filter by action'}},
{title: 'Due', name: 'when', filtering: {filterString: '', placeholder: 'Filter by due date'}, sort: 'asc'},
{title: 'Owner', name: 'who', filtering: {filterString: '', placeholder: 'Filter by owner'}},
{title: 'Risk Level', name: 'risk', filtering: {filterString: '', placeholder: 'Filter by risk level'}}
];
public page:number = 1;
public itemsPerPage:number = 10;
public maxSize:number = 5;
public numPages:number = 1;
public length:number = 0;
public config:any = {
paging: true,
sorting: {columns: this.columns},
filtering: {filterString: ''},
className: ['table-striped', 'table-bordered']
};
no: any;
assessmentId: any;
private attachments: any;
pageName: any = "Action Items";
assessmentIdFromParams: any;
constructor( private apollo: Apollo,
public navCtrl: NavController,
public navParams: NavParams,
public popOver: PopoverController,
private assessmentService: AssessmentService) {
this.assessmentIdFromParams = navParams.data.assessmentId;
console.log(this.assessmentIdFromParams);
this.autoFilter = navParams.data.autoFilter;
}
async ngOnInit() {
this.assessmentId = await this.assessmentService.getCurrentAssessmentId();
this.apollo.watchQuery({
query: assessmentQuery,
variables: {_id: this.assessmentId},
fetchPolicy: "network-only"
}).valueChanges
.subscribe(data => {
this.no = (<any>data.data).assessment.questions.filter( a => {
if (a.answers.length > 0 ) {
return a.answers[a.answers.length - 1].answer == "No"
}
});
var targetMRL = (<any>data.data).assessment.targetMRL;
this.attachments = (<any>data.data).assessment.files;
var newData:Array<any> = [];
console.log(this.no);
this.no.forEach( (element) => {
var newObj:any = {};
newObj.threadName = "" + element.threadName;
newObj.subThreadName = "" + element.subThreadName;
newObj.questionText = "" + element.questionText;
// newObj.currentAnswer = "" + element.answers[element.answers.length - 1].answer;
newObj.what = "" + element.answers[element.answers.length - 1].what;
newObj.when = this.formatDate( element.answers[element.answers.length - 1].when);
newObj.who = "" + element.answers[element.answers.length - 1].who;
newObj.level = "" + element.mrLevel;
var cur = element.answers[element.answers.length - 1];
newObj.risk = "" + this.calculateRiskScore(cur.likelihood, cur.consequence);
newData.push(newObj);
});
this.data = newData;
this.unfilteredQuestions = newData;
if (this.autoFilter){
console.log('here');
this.filterList.filterMRL = targetMRL;
console.log(targetMRL)
this.data = this.unfilteredQuestions.filter(question => {
if (question.level == targetMRL){
return question;
}
});
} else {
this.data = this.unfilteredQuestions;
}
// console.log(this.data);
// this.length = this.data.length;
// this.onChangeTable(this.config);
});
}
/**
* @purpose: format data as a sortable string for table
* @input: date: a new Date() parsable string
* @output: string, format YYYY-MM-DD
*/
formatDate(date){
if ( date ) {
return new Date(date).toISOString().substr(0,10);
} else {
return '';
}
}
filterTheList(){
console.log(this.filterList.filterMRL)
if (this.filterList.filterMRL && this.filterList.filterMRL != 0) {
var filteredQuestions = this.unfilteredQuestions.filter(question => {
if (question.level == this.filterList.filterMRL) {
// console.log('here')
return question
}
});
console.log(filteredQuestions);
this.data = filteredQuestions;
} else {
this.data = this.unfilteredQuestions;
}
}
clearFilter() {
this.filterList.filterMRL = 0;
this.filterTheList();
}
public saveXLS() {
var headers = this.columns.map(c => c.title);
var values = this.no.map(nq => {
return [
nq.threadName,
nq.subThreadName,
nq.questionText,
// nq.answers[nq.answers.length - 1].answer,
nq.answers[nq.answers.length - 1].what,
nq.answers[nq.answers.length - 1].when,
nq.answers[nq.answers.length - 1].who,
this.calculateRiskScore(nq.answers[nq.answers.length - 1].likelihood, nq.answers[nq.answers.length - 1].consequence)
];
})
var worksheet = [headers, ...values];
var ws = XLSX.utils.aoa_to_sheet(worksheet);
var wb = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Action Items');
/* save to file */
XLSX.writeFile(wb, 'action_items.xlsx');
}
public changePage(page:any, data:Array<any> = this.data):Array<any> {
let start = (page.page - 1) * page.itemsPerPage;
let end = page.itemsPerPage > -1 ? (start + page.itemsPerPage) : data.length;
return data.slice(start, end);
}
public changeSort(data:any, config:any):any {
if (!config.sorting) {
return data;
}
let columns = this.config.sorting.columns || [];
let columnName:string = void 0;
let sort:string = void 0;
for (let i = 0; i < columns.length; i++) {
if (columns[i].sort !== '' && columns[i].sort !== false) {
columnName = columns[i].name;
sort = columns[i].sort;
}
}
if (!columnName) {
return data;
}
// simple sorting
return data.sort((previous:any, current:any) => {
if (previous[columnName] > current[columnName]) {
return sort === 'desc' ? -1 : 1;
} else if (previous[columnName] < current[columnName]) {
return sort === 'asc' ? -1 : 1;
}
return 0;
});
}
public changeFilter(data:any, config:any):any {
let filteredData:Array<any> = data;
this.columns.forEach((column:any) => {
if (column.filtering) {
filteredData = filteredData.filter((item:any) => {
return item[column.name].match(column.filtering.filterString);
});
}
});
if (!config.filtering) {
return filteredData;
}
if (config.filtering.columnName) {
return filteredData.filter((item:any) =>
item[config.filtering.columnName].match(this.config.filtering.filterString));
}
let tempArray:Array<any> = [];
filteredData.forEach((item:any) => {
let flag = false;
this.columns.forEach((column:any) => {
if (item[column.name].toString().match(this.config.filtering.filterString)) {
flag = true;
}
});
if (flag) {
tempArray.push(item);
}
});
filteredData = tempArray; |
public onChangeTable(config:any, page:any = {page: this.page, itemsPerPage: this.itemsPerPage}):any {
if (config.filtering) {
Object.assign(this.config.filtering, config.filtering);
}
if (config.sorting) {
Object.assign(this.config.sorting, config.sorting);
}
let filteredData = this.changeFilter(this.data, this.config);
let sortedData = this.changeSort(filteredData, this.config);
this.rows = page && config.paging ? this.changePage(page, sortedData) : sortedData;
this.length = sortedData.length;
}
public onCellClick(data: any): any {
console.log(data);
}
unique = (item, index, array) => array.indexOf(item) == index
ionViewWillEnter() {
GoogleAnalytics.trackPage("actionitems");
}
displayRisks(q) {
var risks = [];
q.technical ? risks.push("Technical") : null
q.schedule ? risks.push("Schedule") : null
q.cost ? risks.push("Cost") : null
return risks.join(", ") || "none";
}
getAttachments(q) {
return this.attachments.filter(a => a.questionId == q.questionId );
}
navToQuestion(questionId) {
this.navCtrl.push(QuestionsPage, {
questionId: questionId
});
}
public calculateRiskScore(likelihood, consequence) {
// preventing off by one errors, with nulls.
// values should always be 1-5
var riskMatrix = [
[ null ],
[ null, 1, 3, 5, 8, 12],
[ null, 2, 7, 11, 14, 17],
[ null, 4, 10, 15, 19, 21],
[ null, 6, 12, 18, 22, 24],
[ null, 9, 16, 20, 23, 25]
];
if ( likelihood && consequence ) {
// value is the same as the index, b/c we put nulls in the matrix
var likelihoodIndex = Number(likelihood);
var consequenceIndex = Number(consequence);
// var name = selectedBox.className.replace(/ selected/g, '')
// selectedBox.className = `${name} selected`;
return riskMatrix[likelihoodIndex][consequenceIndex];
} else {
return "";
}
}
} |
return filteredData;
} | random_line_split |
actionitems.ts | import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams, PopoverController } from 'ionic-angular';
import { TopbarComponent } from '../../components/topbar/topbar';
import { AssessmentService } from '../../services/assessment.service';
import { GoogleAnalytics } from '../../application/helpers/GoogleAnalytics';
import { ReportInfoCardComponent } from "../../components/report-info-card/report-info-card";
// import { Ng2TableModule } from 'ng2-table/ng2-table';
// import { NgTableComponent, NgTableFilteringDirective, NgTablePagingDirective, NgTableSortingDirective } from 'ng2-table/ng2-table';
import * as XLSX from 'xlsx';
import { QuestionsPage } from '../questions/questions';
import { Apollo } from "apollo-angular";
import gql from "graphql-tag";
var assessmentQuery = gql`
query assessment($_id: String) {
assessment(_id: $_id) {
targetMRL
questions {
mrLevel
questionText
threadName
subThreadName
currentAnswer
questionId
answers {
when
who
risk
consequence
likelihood
what
reason
assumptionsNo
notesNo
answer
# technical
# schedule
# cost
}
}
files {
name
questionId
url
}
}
}
`
@IonicPage()
@Component({
selector: 'page-actionitems',
templateUrl: 'actionitems.html',
})
export class ActionitemsPage {
public data:any;
filterList: any = {};
unfilteredQuestions: any;
autoFilter = false;
public rows:Array<any> = [];
public columns:Array<any> = [
{title: 'Thread', name: 'threadName', filtering: {filterString: '', placeholder: 'Filter by thread'}},
{title: 'Subthread', name: 'subThreadName', filtering: {filterString: '', placeholder: 'Filter by subthread'}},
{title: 'Question', name: 'questionText', filtering: {filterString: '', placeholder: 'Filter by question'}},
// {title: 'Answer', name: 'currentAnswer', filtering: {filterString: '', placeholder: 'Filter by answer'}},
{title: 'Action', name: 'what', filtering: {filterString: '', placeholder: 'Filter by action'}},
{title: 'Due', name: 'when', filtering: {filterString: '', placeholder: 'Filter by due date'}, sort: 'asc'},
{title: 'Owner', name: 'who', filtering: {filterString: '', placeholder: 'Filter by owner'}},
{title: 'Risk Level', name: 'risk', filtering: {filterString: '', placeholder: 'Filter by risk level'}}
];
public page:number = 1;
public itemsPerPage:number = 10;
public maxSize:number = 5;
public numPages:number = 1;
public length:number = 0;
public config:any = {
paging: true,
sorting: {columns: this.columns},
filtering: {filterString: ''},
className: ['table-striped', 'table-bordered']
};
no: any;
assessmentId: any;
private attachments: any;
pageName: any = "Action Items";
assessmentIdFromParams: any;
constructor( private apollo: Apollo,
public navCtrl: NavController,
public navParams: NavParams,
public popOver: PopoverController,
private assessmentService: AssessmentService) {
this.assessmentIdFromParams = navParams.data.assessmentId;
console.log(this.assessmentIdFromParams);
this.autoFilter = navParams.data.autoFilter;
}
async ngOnInit() {
this.assessmentId = await this.assessmentService.getCurrentAssessmentId();
this.apollo.watchQuery({
query: assessmentQuery,
variables: {_id: this.assessmentId},
fetchPolicy: "network-only"
}).valueChanges
.subscribe(data => {
this.no = (<any>data.data).assessment.questions.filter( a => {
if (a.answers.length > 0 ) |
});
var targetMRL = (<any>data.data).assessment.targetMRL;
this.attachments = (<any>data.data).assessment.files;
var newData:Array<any> = [];
console.log(this.no);
this.no.forEach( (element) => {
var newObj:any = {};
newObj.threadName = "" + element.threadName;
newObj.subThreadName = "" + element.subThreadName;
newObj.questionText = "" + element.questionText;
// newObj.currentAnswer = "" + element.answers[element.answers.length - 1].answer;
newObj.what = "" + element.answers[element.answers.length - 1].what;
newObj.when = this.formatDate( element.answers[element.answers.length - 1].when);
newObj.who = "" + element.answers[element.answers.length - 1].who;
newObj.level = "" + element.mrLevel;
var cur = element.answers[element.answers.length - 1];
newObj.risk = "" + this.calculateRiskScore(cur.likelihood, cur.consequence);
newData.push(newObj);
});
this.data = newData;
this.unfilteredQuestions = newData;
if (this.autoFilter){
console.log('here');
this.filterList.filterMRL = targetMRL;
console.log(targetMRL)
this.data = this.unfilteredQuestions.filter(question => {
if (question.level == targetMRL){
return question;
}
});
} else {
this.data = this.unfilteredQuestions;
}
// console.log(this.data);
// this.length = this.data.length;
// this.onChangeTable(this.config);
});
}
/**
* @purpose: format data as a sortable string for table
* @input: date: a new Date() parsable string
* @output: string, format YYYY-MM-DD
*/
formatDate(date){
if ( date ) {
return new Date(date).toISOString().substr(0,10);
} else {
return '';
}
}
filterTheList(){
console.log(this.filterList.filterMRL)
if (this.filterList.filterMRL && this.filterList.filterMRL != 0) {
var filteredQuestions = this.unfilteredQuestions.filter(question => {
if (question.level == this.filterList.filterMRL) {
// console.log('here')
return question
}
});
console.log(filteredQuestions);
this.data = filteredQuestions;
} else {
this.data = this.unfilteredQuestions;
}
}
clearFilter() {
this.filterList.filterMRL = 0;
this.filterTheList();
}
public saveXLS() {
var headers = this.columns.map(c => c.title);
var values = this.no.map(nq => {
return [
nq.threadName,
nq.subThreadName,
nq.questionText,
// nq.answers[nq.answers.length - 1].answer,
nq.answers[nq.answers.length - 1].what,
nq.answers[nq.answers.length - 1].when,
nq.answers[nq.answers.length - 1].who,
this.calculateRiskScore(nq.answers[nq.answers.length - 1].likelihood, nq.answers[nq.answers.length - 1].consequence)
];
})
var worksheet = [headers, ...values];
var ws = XLSX.utils.aoa_to_sheet(worksheet);
var wb = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Action Items');
/* save to file */
XLSX.writeFile(wb, 'action_items.xlsx');
}
public changePage(page:any, data:Array<any> = this.data):Array<any> {
let start = (page.page - 1) * page.itemsPerPage;
let end = page.itemsPerPage > -1 ? (start + page.itemsPerPage) : data.length;
return data.slice(start, end);
}
public changeSort(data:any, config:any):any {
if (!config.sorting) {
return data;
}
let columns = this.config.sorting.columns || [];
let columnName:string = void 0;
let sort:string = void 0;
for (let i = 0; i < columns.length; i++) {
if (columns[i].sort !== '' && columns[i].sort !== false) {
columnName = columns[i].name;
sort = columns[i].sort;
}
}
if (!columnName) {
return data;
}
// simple sorting
return data.sort((previous:any, current:any) => {
if (previous[columnName] > current[columnName]) {
return sort === 'desc' ? -1 : 1;
} else if (previous[columnName] < current[columnName]) {
return sort === 'asc' ? -1 : 1;
}
return 0;
});
}
public changeFilter(data:any, config:any):any {
let filteredData:Array<any> = data;
this.columns.forEach((column:any) => {
if (column.filtering) {
filteredData = filteredData.filter((item:any) => {
return item[column.name].match(column.filtering.filterString);
});
}
});
if (!config.filtering) {
return filteredData;
}
if (config.filtering.columnName) {
return filteredData.filter((item:any) =>
item[config.filtering.columnName].match(this.config.filtering.filterString));
}
let tempArray:Array<any> = [];
filteredData.forEach((item:any) => {
let flag = false;
this.columns.forEach((column:any) => {
if (item[column.name].toString().match(this.config.filtering.filterString)) {
flag = true;
}
});
if (flag) {
tempArray.push(item);
}
});
filteredData = tempArray;
return filteredData;
}
public onChangeTable(config:any, page:any = {page: this.page, itemsPerPage: this.itemsPerPage}):any {
if (config.filtering) {
Object.assign(this.config.filtering, config.filtering);
}
if (config.sorting) {
Object.assign(this.config.sorting, config.sorting);
}
let filteredData = this.changeFilter(this.data, this.config);
let sortedData = this.changeSort(filteredData, this.config);
this.rows = page && config.paging ? this.changePage(page, sortedData) : sortedData;
this.length = sortedData.length;
}
public onCellClick(data: any): any {
console.log(data);
}
unique = (item, index, array) => array.indexOf(item) == index
ionViewWillEnter() {
GoogleAnalytics.trackPage("actionitems");
}
displayRisks(q) {
var risks = [];
q.technical ? risks.push("Technical") : null
q.schedule ? risks.push("Schedule") : null
q.cost ? risks.push("Cost") : null
return risks.join(", ") || "none";
}
getAttachments(q) {
return this.attachments.filter(a => a.questionId == q.questionId );
}
navToQuestion(questionId) {
this.navCtrl.push(QuestionsPage, {
questionId: questionId
});
}
public calculateRiskScore(likelihood, consequence) {
// preventing off by one errors, with nulls.
// values should always be 1-5
var riskMatrix = [
[ null ],
[ null, 1, 3, 5, 8, 12],
[ null, 2, 7, 11, 14, 17],
[ null, 4, 10, 15, 19, 21],
[ null, 6, 12, 18, 22, 24],
[ null, 9, 16, 20, 23, 25]
];
if ( likelihood && consequence ) {
// value is the same as the index, b/c we put nulls in the matrix
var likelihoodIndex = Number(likelihood);
var consequenceIndex = Number(consequence);
// var name = selectedBox.className.replace(/ selected/g, '')
// selectedBox.className = `${name} selected`;
return riskMatrix[likelihoodIndex][consequenceIndex];
} else {
return "";
}
}
}
| {
return a.answers[a.answers.length - 1].answer == "No"
} | conditional_block |
pm.rs | //! Implementation of the power manager (PM) peripheral.
use bpm;
use bscif;
use core::cell::Cell;
use core::sync::atomic::Ordering;
use flashcalw;
use gpio;
use kernel::common::VolatileCell;
use scif;
#[repr(C)]
struct PmRegisters { | pbbsel: VolatileCell<u32>,
pbcsel: VolatileCell<u32>,
pbdsel: VolatileCell<u32>,
_reserved2: VolatileCell<u32>,
cpumask: VolatileCell<u32>, // 0x020
hsbmask: VolatileCell<u32>,
pbamask: VolatileCell<u32>,
pbbmask: VolatileCell<u32>,
pbcmask: VolatileCell<u32>,
pbdmask: VolatileCell<u32>,
_reserved3: [VolatileCell<u32>; 2],
pbadivmask: VolatileCell<u32>, // 0x040
_reserved4: [VolatileCell<u32>; 4],
cfdctrl: VolatileCell<u32>,
unlock: VolatileCell<u32>,
_reserved5: [VolatileCell<u32>; 25], // 0x60
ier: VolatileCell<u32>, // 0xC0
idr: VolatileCell<u32>,
imr: VolatileCell<u32>,
isr: VolatileCell<u32>,
icr: VolatileCell<u32>,
sr: VolatileCell<u32>,
_reserved6: [VolatileCell<u32>; 34], // 0x100
ppcr: VolatileCell<u32>, // 0x160
_reserved7: [VolatileCell<u32>; 7],
rcause: VolatileCell<u32>, // 0x180
wcause: VolatileCell<u32>,
awen: VolatileCell<u32>,
protctrl: VolatileCell<u32>,
_reserved8: VolatileCell<u32>,
fastsleep: VolatileCell<u32>,
_reserved9: [VolatileCell<u32>; 152],
config: VolatileCell<u32>, // 0x200
version: VolatileCell<u32>,
}
pub enum MainClock {
RCSYS,
OSC0,
PLL,
DFLL,
RC80M,
RCFAST,
RC1M,
}
#[derive(Copy, Clone, Debug)]
pub enum Clock {
HSB(HSBClock),
PBA(PBAClock),
PBB(PBBClock),
PBC(PBCClock),
PBD(PBDClock),
}
#[derive(Copy, Clone, Debug)]
pub enum HSBClock {
PDCA,
FLASHCALW,
FLASHCALWP,
USBC,
CRCCU,
APBA,
APBB,
APBC,
APBD,
AESA,
}
#[derive(Copy, Clone, Debug)]
pub enum PBAClock {
IISC,
SPI,
TC0,
TC1,
TWIM0,
TWIS0,
TWIM1,
TWIS1,
USART0,
USART1,
USART2,
USART3,
ADCIFE,
DACC,
ACIFC,
GLOC,
ABSACB,
TRNG,
PARC,
CATB,
NULL,
TWIM2,
TWIM3,
LCDCA,
}
#[derive(Copy, Clone, Debug)]
pub enum PBBClock {
FLASHCALW,
HRAMC1,
HMATRIX,
PDCA,
CRCCU,
USBC,
PEVC,
}
#[derive(Copy, Clone, Debug)]
pub enum PBCClock {
PM,
CHIPID,
SCIF,
FREQM,
GPIO,
}
#[derive(Copy, Clone, Debug)]
pub enum PBDClock {
BPM,
BSCIF,
AST,
WDT,
EIC,
PICOUART,
}
/// Frequency of the external oscillator. For the SAM4L, different
/// configurations are needed for different ranges of oscillator frequency, so
/// based on the input frequency, various configurations may need to change.
/// When additional oscillator frequencies are needed, they should be added
/// here and the `setup_system_clock` function should be modified to support
/// it.
#[derive(Copy, Clone, Debug)]
pub enum OscillatorFrequency {
/// 16 MHz external oscillator
Frequency16MHz,
}
/// Configuration for the startup time of the external oscillator. In practice
/// we have found that some boards work with a short startup time, while others
/// need a slow start in order to properly wake from sleep. In general, we find
/// that for systems that do not work, at fast speed, they will hang or panic
/// after several entries into WAIT mode.
#[derive(Copy, Clone, Debug)]
pub enum OscillatorStartup {
/// Use a fast startup. ~0.5 ms in practice.
FastStart,
/// Use a slow startup. ~8.9 ms in practice.
SlowStart,
}
/// Which source the system clock should be generated from. These are specified
/// as system clock source appended with the clock that it is sourced from
/// appended with the final frequency of the system. So for example, one option
/// is to use the DFLL sourced from the RC32K with a final frequency of 48 MHz.
///
/// When new options (either sources or final frequencies) are needed, they
/// should be added to this list, and then the `setup_system_clock` function
/// can be modified to support it. This is necessary because configurations
/// must be changed not just with the input source but also based on the
/// desired final frequency.
///
/// For options utilizing an external oscillator, the configurations for that
/// oscillator must also be provided.
#[derive(Copy, Clone, Debug)]
pub enum SystemClockSource {
/// Use the RCSYS clock (which the system starts up on anyways). Final
/// system frequency will be 115 kHz. Note that while this is the default,
/// Tock is NOT guaranteed to work on this setting and will likely fail.
RcsysAt115kHz,
/// Use the internal digital frequency locked loop (DFLL) sourced from
/// the internal RC32K clock. Note this typically requires calibration
/// of the RC32K to have a consistent clock. Final frequency of 48 MHz.
DfllRc32kAt48MHz,
/// Use an external crystal oscillator as the direct source for the
/// system clock. The final system frequency will match the frequency of
/// the external oscillator.
ExternalOscillator {
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
},
/// Use an external crystal oscillator as the input to the internal phase
/// locked loop (PLL) for the system clock. This results in a final
/// frequency of 48 MHz.
PllExternalOscillatorAt48MHz {
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
},
}
const PM_BASE: usize = 0x400E0000;
const HSB_MASK_OFFSET: u32 = 0x24;
const PBA_MASK_OFFSET: u32 = 0x28;
const PBB_MASK_OFFSET: u32 = 0x2C;
const PBC_MASK_OFFSET: u32 = 0x30;
const PBD_MASK_OFFSET: u32 = 0x34;
static mut PM_REGS: *mut PmRegisters = PM_BASE as *mut PmRegisters;
/// Contains state for the power management peripheral. This includes the
/// configurations for various system clocks and the final frequency that the
/// system is running at.
pub struct PowerManager {
/// Frequency at which the system clock is running.
system_frequency: Cell<u32>,
/// Clock source configuration
system_clock_source: Cell<SystemClockSource>,
}
pub static mut PM: PowerManager = PowerManager {
/// Set to the RCSYS frequency by default (115 kHz).
system_frequency: Cell::new(115000),
/// Set to the RCSYS by default.
system_clock_source: Cell::new(SystemClockSource::RcsysAt115kHz),
};
impl PowerManager {
/// Sets up the system clock. This should be called as one of the first
/// lines in the `reset_handler` within the platform's `main.rs`.
pub unsafe fn setup_system_clock(&self, clock_source: SystemClockSource) {
// save configuration
self.system_clock_source.set(clock_source);
// For now, always go to PS2 as it enables all core speeds
bpm::set_power_scaling(bpm::PowerScaling::PS2);
match clock_source {
SystemClockSource::RcsysAt115kHz => {
// no configurations necessary, already running off the RCSYS
self.system_frequency.set(115000);
}
SystemClockSource::DfllRc32kAt48MHz => {
configure_48mhz_dfll();
self.system_frequency.set(48000000);
}
SystemClockSource::ExternalOscillator {
frequency,
startup_mode,
} => {
configure_external_oscillator(frequency, startup_mode);
match frequency {
OscillatorFrequency::Frequency16MHz => self.system_frequency.set(16000000),
};
}
SystemClockSource::PllExternalOscillatorAt48MHz {
frequency,
startup_mode,
} => {
configure_external_oscillator_pll(frequency, startup_mode);
self.system_frequency.set(48000000);
}
}
}
}
unsafe fn unlock(register_offset: u32) {
(*PM_REGS).unlock.set(0xAA000000 | register_offset);
}
unsafe fn select_main_clock(clock: MainClock) {
unlock(0);
(*PM_REGS).mcctrl.set(clock as u32);
}
/// Configure the system clock to use the DFLL with the RC32K as the source.
/// Run at 48 MHz.
unsafe fn configure_48mhz_dfll() {
// Enable HCACHE
flashcalw::FLASH_CONTROLLER.enable_cache();
// start the dfll
scif::setup_dfll_rc32k_48mhz();
// Since we are running at a fast speed we have to set a clock delay
// for flash, as well as enable fast flash mode.
flashcalw::FLASH_CONTROLLER.enable_high_speed_flash();
// Choose the main clock
select_main_clock(MainClock::DFLL);
}
/// Configure the system clock to use the 16 MHz external crystal directly
unsafe fn configure_external_oscillator(
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
) {
// Use the cache
flashcalw::FLASH_CONTROLLER.enable_cache();
// Need the 32k RC oscillator for things like BPM module and AST.
bscif::enable_rc32k();
// start the external oscillator
match frequency {
OscillatorFrequency::Frequency16MHz => {
match startup_mode {
OscillatorStartup::FastStart => scif::setup_osc_16mhz_fast_startup(),
OscillatorStartup::SlowStart => scif::setup_osc_16mhz_slow_startup(),
};
}
}
// Go to high speed flash mode
flashcalw::FLASH_CONTROLLER.enable_high_speed_flash();
// Set the main clock to be the external oscillator
select_main_clock(MainClock::OSC0);
}
/// Configure the system clock to use the PLL with the 16 MHz external crystal
unsafe fn configure_external_oscillator_pll(
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
) {
// Use the cache
flashcalw::FLASH_CONTROLLER.enable_cache();
// Need the 32k RC oscillator for things like BPM module and AST.
bscif::enable_rc32k();
// start the external oscillator
match frequency {
OscillatorFrequency::Frequency16MHz => {
match startup_mode {
OscillatorStartup::FastStart => scif::setup_osc_16mhz_fast_startup(),
OscillatorStartup::SlowStart => scif::setup_osc_16mhz_slow_startup(),
};
}
}
// Setup the PLL
scif::setup_pll_osc_48mhz();
// Go to high speed flash mode
flashcalw::FLASH_CONTROLLER.enable_high_speed_flash();
// Set the main clock to be the PLL
select_main_clock(MainClock::PLL);
}
pub fn get_system_frequency() -> u32 {
unsafe { PM.system_frequency.get() }
}
/// Utility macro to modify clock mask registers
///
/// It takes one of two forms:
///
/// mask_clock!(CLOCK: pm_register | value)
///
/// which performs a logical-or on the existing register value, or
///
/// mask_clock!(CLOCK: pm_register & value)
///
/// which performs a logical-and.
///
/// CLOCK is one of HSB, PBA, PBB, PBC or PBD
///
/// pm_register is one of hsbmask, pbamask, pbbmask, pbcmask or pbdmask.
///
macro_rules! mask_clock {
($module:ident: $field:ident | $mask:expr) => ({
unlock(concat_idents!($module, _MASK_OFFSET));
let val = (*PM_REGS).$field.get() | ($mask);
(*PM_REGS).$field.set(val);
});
($module:ident: $field:ident & $mask:expr) => ({
unlock(concat_idents!($module, _MASK_OFFSET));
let val = (*PM_REGS).$field.get() & ($mask);
(*PM_REGS).$field.set(val);
});
}
/// Utility macro to get value of clock register. Used to check if a specific
/// clock is enabled or not. See above description of `make_clock!`.
macro_rules! get_clock {
($module:ident: $field:ident & $mask:expr) => ({
unlock(concat_idents!($module, _MASK_OFFSET));
((*PM_REGS).$field.get() & ($mask)) != 0
});
}
// Clock masks that allow us to go into deep sleep without disabling any active
// peripherals.
// FLASHCALW clocks and APBx clocks are allowed
//
// This is identical to the reset value of the HSBMASK except it allows the
// PicoCache RAM clock to be on as well.
const DEEP_SLEEP_HSBMASK: u32 = 0x1e6;
// No clocks allowed on PBA
const DEEP_SLEEP_PBAMASK: u32 = 0x0;
// FLASHCALW and HRAMC1 clocks allowed
//
// This is identical to the reset value of the PBBMASK except it allows the
// flash's HRAMC1 clock as well.
const DEEP_SLEEP_PBBMASK: u32 = 0x3;
/// Determines if the chip can safely go into deep sleep without preventing
/// currently active peripherals from operating.
///
/// We look at the PM's clock mask registers and compare them against a set of
/// known masks that include no peripherals that can't operate in deep
/// sleep (or that have no function during sleep). Specifically:
///
/// * HSB may only have clocks for the flash (and PicoCache) and APBx bridges on.
///
/// * PBA may not have _any_ clocks on.
///
/// * PBB may only have clocks for the flash and HRAMC1 (also flash related) on.
///
/// * PBC and PBD may have any clocks on.
///
/// This means it is the responsibility of each peripheral to disable it's clock
/// mask whenever it is idle.
///
/// We also special case GPIO (which is in PBCMASK), and just see if any interrupts are pending
/// through the INTERRUPT_COUNT variable.
pub fn deep_sleep_ready() -> bool {
unsafe {
(*PM_REGS).hsbmask.get() & !(DEEP_SLEEP_HSBMASK) == 0
&& (*PM_REGS).pbamask.get() & !(DEEP_SLEEP_PBAMASK) == 0
&& (*PM_REGS).pbbmask.get() & !(DEEP_SLEEP_PBBMASK) == 0
&& gpio::INTERRUPT_COUNT.load(Ordering::Relaxed) == 0
}
}
pub unsafe fn enable_clock(clock: Clock) {
match clock {
Clock::HSB(v) => mask_clock!(HSB: hsbmask | 1 << (v as u32)),
Clock::PBA(v) => mask_clock!(PBA: pbamask | 1 << (v as u32)),
Clock::PBB(v) => mask_clock!(PBB: pbbmask | 1 << (v as u32)),
Clock::PBC(v) => mask_clock!(PBC: pbcmask | 1 << (v as u32)),
Clock::PBD(v) => mask_clock!(PBD: pbdmask | 1 << (v as u32)),
}
}
pub unsafe fn disable_clock(clock: Clock) {
match clock {
Clock::HSB(v) => mask_clock!(HSB: hsbmask & !(1 << (v as u32))),
Clock::PBA(v) => mask_clock!(PBA: pbamask & !(1 << (v as u32))),
Clock::PBB(v) => mask_clock!(PBB: pbbmask & !(1 << (v as u32))),
Clock::PBC(v) => mask_clock!(PBC: pbcmask & !(1 << (v as u32))),
Clock::PBD(v) => mask_clock!(PBD: pbdmask & !(1 << (v as u32))),
}
}
pub unsafe fn is_clock_enabled(clock: Clock) -> bool {
match clock {
Clock::HSB(v) => get_clock!(HSB: hsbmask & (1 << (v as u32))),
Clock::PBA(v) => get_clock!(PBA: pbamask & (1 << (v as u32))),
Clock::PBB(v) => get_clock!(PBB: pbbmask & (1 << (v as u32))),
Clock::PBC(v) => get_clock!(PBC: pbcmask & (1 << (v as u32))),
Clock::PBD(v) => get_clock!(PBD: pbdmask & (1 << (v as u32))),
}
} | mcctrl: VolatileCell<u32>,
cpusel: VolatileCell<u32>,
_reserved1: VolatileCell<u32>,
pbasel: VolatileCell<u32>, | random_line_split |
pm.rs | //! Implementation of the power manager (PM) peripheral.
use bpm;
use bscif;
use core::cell::Cell;
use core::sync::atomic::Ordering;
use flashcalw;
use gpio;
use kernel::common::VolatileCell;
use scif;
#[repr(C)]
struct PmRegisters {
mcctrl: VolatileCell<u32>,
cpusel: VolatileCell<u32>,
_reserved1: VolatileCell<u32>,
pbasel: VolatileCell<u32>,
pbbsel: VolatileCell<u32>,
pbcsel: VolatileCell<u32>,
pbdsel: VolatileCell<u32>,
_reserved2: VolatileCell<u32>,
cpumask: VolatileCell<u32>, // 0x020
hsbmask: VolatileCell<u32>,
pbamask: VolatileCell<u32>,
pbbmask: VolatileCell<u32>,
pbcmask: VolatileCell<u32>,
pbdmask: VolatileCell<u32>,
_reserved3: [VolatileCell<u32>; 2],
pbadivmask: VolatileCell<u32>, // 0x040
_reserved4: [VolatileCell<u32>; 4],
cfdctrl: VolatileCell<u32>,
unlock: VolatileCell<u32>,
_reserved5: [VolatileCell<u32>; 25], // 0x60
ier: VolatileCell<u32>, // 0xC0
idr: VolatileCell<u32>,
imr: VolatileCell<u32>,
isr: VolatileCell<u32>,
icr: VolatileCell<u32>,
sr: VolatileCell<u32>,
_reserved6: [VolatileCell<u32>; 34], // 0x100
ppcr: VolatileCell<u32>, // 0x160
_reserved7: [VolatileCell<u32>; 7],
rcause: VolatileCell<u32>, // 0x180
wcause: VolatileCell<u32>,
awen: VolatileCell<u32>,
protctrl: VolatileCell<u32>,
_reserved8: VolatileCell<u32>,
fastsleep: VolatileCell<u32>,
_reserved9: [VolatileCell<u32>; 152],
config: VolatileCell<u32>, // 0x200
version: VolatileCell<u32>,
}
pub enum MainClock {
RCSYS,
OSC0,
PLL,
DFLL,
RC80M,
RCFAST,
RC1M,
}
#[derive(Copy, Clone, Debug)]
pub enum Clock {
HSB(HSBClock),
PBA(PBAClock),
PBB(PBBClock),
PBC(PBCClock),
PBD(PBDClock),
}
#[derive(Copy, Clone, Debug)]
pub enum HSBClock {
PDCA,
FLASHCALW,
FLASHCALWP,
USBC,
CRCCU,
APBA,
APBB,
APBC,
APBD,
AESA,
}
#[derive(Copy, Clone, Debug)]
pub enum PBAClock {
IISC,
SPI,
TC0,
TC1,
TWIM0,
TWIS0,
TWIM1,
TWIS1,
USART0,
USART1,
USART2,
USART3,
ADCIFE,
DACC,
ACIFC,
GLOC,
ABSACB,
TRNG,
PARC,
CATB,
NULL,
TWIM2,
TWIM3,
LCDCA,
}
#[derive(Copy, Clone, Debug)]
pub enum PBBClock {
FLASHCALW,
HRAMC1,
HMATRIX,
PDCA,
CRCCU,
USBC,
PEVC,
}
#[derive(Copy, Clone, Debug)]
pub enum PBCClock {
PM,
CHIPID,
SCIF,
FREQM,
GPIO,
}
#[derive(Copy, Clone, Debug)]
pub enum PBDClock {
BPM,
BSCIF,
AST,
WDT,
EIC,
PICOUART,
}
/// Frequency of the external oscillator. For the SAM4L, different
/// configurations are needed for different ranges of oscillator frequency, so
/// based on the input frequency, various configurations may need to change.
/// When additional oscillator frequencies are needed, they should be added
/// here and the `setup_system_clock` function should be modified to support
/// it.
#[derive(Copy, Clone, Debug)]
pub enum | {
/// 16 MHz external oscillator
Frequency16MHz,
}
/// Configuration for the startup time of the external oscillator. In practice
/// we have found that some boards work with a short startup time, while others
/// need a slow start in order to properly wake from sleep. In general, we find
/// that for systems that do not work, at fast speed, they will hang or panic
/// after several entries into WAIT mode.
#[derive(Copy, Clone, Debug)]
pub enum OscillatorStartup {
/// Use a fast startup. ~0.5 ms in practice.
FastStart,
/// Use a slow startup. ~8.9 ms in practice.
SlowStart,
}
/// Which source the system clock should be generated from. These are specified
/// as system clock source appended with the clock that it is sourced from
/// appended with the final frequency of the system. So for example, one option
/// is to use the DFLL sourced from the RC32K with a final frequency of 48 MHz.
///
/// When new options (either sources or final frequencies) are needed, they
/// should be added to this list, and then the `setup_system_clock` function
/// can be modified to support it. This is necessary because configurations
/// must be changed not just with the input source but also based on the
/// desired final frequency.
///
/// For options utilizing an external oscillator, the configurations for that
/// oscillator must also be provided.
#[derive(Copy, Clone, Debug)]
pub enum SystemClockSource {
/// Use the RCSYS clock (which the system starts up on anyways). Final
/// system frequency will be 115 kHz. Note that while this is the default,
/// Tock is NOT guaranteed to work on this setting and will likely fail.
RcsysAt115kHz,
/// Use the internal digital frequency locked loop (DFLL) sourced from
/// the internal RC32K clock. Note this typically requires calibration
/// of the RC32K to have a consistent clock. Final frequency of 48 MHz.
DfllRc32kAt48MHz,
/// Use an external crystal oscillator as the direct source for the
/// system clock. The final system frequency will match the frequency of
/// the external oscillator.
ExternalOscillator {
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
},
/// Use an external crystal oscillator as the input to the internal phase
/// locked loop (PLL) for the system clock. This results in a final
/// frequency of 48 MHz.
PllExternalOscillatorAt48MHz {
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
},
}
const PM_BASE: usize = 0x400E0000;
const HSB_MASK_OFFSET: u32 = 0x24;
const PBA_MASK_OFFSET: u32 = 0x28;
const PBB_MASK_OFFSET: u32 = 0x2C;
const PBC_MASK_OFFSET: u32 = 0x30;
const PBD_MASK_OFFSET: u32 = 0x34;
static mut PM_REGS: *mut PmRegisters = PM_BASE as *mut PmRegisters;
/// Contains state for the power management peripheral. This includes the
/// configurations for various system clocks and the final frequency that the
/// system is running at.
pub struct PowerManager {
/// Frequency at which the system clock is running.
system_frequency: Cell<u32>,
/// Clock source configuration
system_clock_source: Cell<SystemClockSource>,
}
pub static mut PM: PowerManager = PowerManager {
/// Set to the RCSYS frequency by default (115 kHz).
system_frequency: Cell::new(115000),
/// Set to the RCSYS by default.
system_clock_source: Cell::new(SystemClockSource::RcsysAt115kHz),
};
impl PowerManager {
/// Sets up the system clock. This should be called as one of the first
/// lines in the `reset_handler` within the platform's `main.rs`.
pub unsafe fn setup_system_clock(&self, clock_source: SystemClockSource) {
// save configuration
self.system_clock_source.set(clock_source);
// For now, always go to PS2 as it enables all core speeds
bpm::set_power_scaling(bpm::PowerScaling::PS2);
match clock_source {
SystemClockSource::RcsysAt115kHz => {
// no configurations necessary, already running off the RCSYS
self.system_frequency.set(115000);
}
SystemClockSource::DfllRc32kAt48MHz => {
configure_48mhz_dfll();
self.system_frequency.set(48000000);
}
SystemClockSource::ExternalOscillator {
frequency,
startup_mode,
} => {
configure_external_oscillator(frequency, startup_mode);
match frequency {
OscillatorFrequency::Frequency16MHz => self.system_frequency.set(16000000),
};
}
SystemClockSource::PllExternalOscillatorAt48MHz {
frequency,
startup_mode,
} => {
configure_external_oscillator_pll(frequency, startup_mode);
self.system_frequency.set(48000000);
}
}
}
}
unsafe fn unlock(register_offset: u32) {
(*PM_REGS).unlock.set(0xAA000000 | register_offset);
}
unsafe fn select_main_clock(clock: MainClock) {
unlock(0);
(*PM_REGS).mcctrl.set(clock as u32);
}
/// Configure the system clock to use the DFLL with the RC32K as the source.
/// Run at 48 MHz.
unsafe fn configure_48mhz_dfll() {
// Enable HCACHE
flashcalw::FLASH_CONTROLLER.enable_cache();
// start the dfll
scif::setup_dfll_rc32k_48mhz();
// Since we are running at a fast speed we have to set a clock delay
// for flash, as well as enable fast flash mode.
flashcalw::FLASH_CONTROLLER.enable_high_speed_flash();
// Choose the main clock
select_main_clock(MainClock::DFLL);
}
/// Configure the system clock to use the 16 MHz external crystal directly
unsafe fn configure_external_oscillator(
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
) {
// Use the cache
flashcalw::FLASH_CONTROLLER.enable_cache();
// Need the 32k RC oscillator for things like BPM module and AST.
bscif::enable_rc32k();
// start the external oscillator
match frequency {
OscillatorFrequency::Frequency16MHz => {
match startup_mode {
OscillatorStartup::FastStart => scif::setup_osc_16mhz_fast_startup(),
OscillatorStartup::SlowStart => scif::setup_osc_16mhz_slow_startup(),
};
}
}
// Go to high speed flash mode
flashcalw::FLASH_CONTROLLER.enable_high_speed_flash();
// Set the main clock to be the external oscillator
select_main_clock(MainClock::OSC0);
}
/// Configure the system clock to use the PLL with the 16 MHz external crystal
unsafe fn configure_external_oscillator_pll(
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
) {
// Use the cache
flashcalw::FLASH_CONTROLLER.enable_cache();
// Need the 32k RC oscillator for things like BPM module and AST.
bscif::enable_rc32k();
// start the external oscillator
match frequency {
OscillatorFrequency::Frequency16MHz => {
match startup_mode {
OscillatorStartup::FastStart => scif::setup_osc_16mhz_fast_startup(),
OscillatorStartup::SlowStart => scif::setup_osc_16mhz_slow_startup(),
};
}
}
// Setup the PLL
scif::setup_pll_osc_48mhz();
// Go to high speed flash mode
flashcalw::FLASH_CONTROLLER.enable_high_speed_flash();
// Set the main clock to be the PLL
select_main_clock(MainClock::PLL);
}
pub fn get_system_frequency() -> u32 {
unsafe { PM.system_frequency.get() }
}
/// Utility macro to modify clock mask registers
///
/// It takes one of two forms:
///
/// mask_clock!(CLOCK: pm_register | value)
///
/// which performs a logical-or on the existing register value, or
///
/// mask_clock!(CLOCK: pm_register & value)
///
/// which performs a logical-and.
///
/// CLOCK is one of HSB, PBA, PBB, PBC or PBD
///
/// pm_register is one of hsbmask, pbamask, pbbmask, pbcmask or pbdmask.
///
macro_rules! mask_clock {
($module:ident: $field:ident | $mask:expr) => ({
unlock(concat_idents!($module, _MASK_OFFSET));
let val = (*PM_REGS).$field.get() | ($mask);
(*PM_REGS).$field.set(val);
});
($module:ident: $field:ident & $mask:expr) => ({
unlock(concat_idents!($module, _MASK_OFFSET));
let val = (*PM_REGS).$field.get() & ($mask);
(*PM_REGS).$field.set(val);
});
}
/// Utility macro to get value of clock register. Used to check if a specific
/// clock is enabled or not. See above description of `make_clock!`.
macro_rules! get_clock {
($module:ident: $field:ident & $mask:expr) => ({
unlock(concat_idents!($module, _MASK_OFFSET));
((*PM_REGS).$field.get() & ($mask)) != 0
});
}
// Clock masks that allow us to go into deep sleep without disabling any active
// peripherals.
// FLASHCALW clocks and APBx clocks are allowed
//
// This is identical to the reset value of the HSBMASK except it allows the
// PicoCache RAM clock to be on as well.
const DEEP_SLEEP_HSBMASK: u32 = 0x1e6;
// No clocks allowed on PBA
const DEEP_SLEEP_PBAMASK: u32 = 0x0;
// FLASHCALW and HRAMC1 clocks allowed
//
// This is identical to the reset value of the PBBMASK except it allows the
// flash's HRAMC1 clock as well.
const DEEP_SLEEP_PBBMASK: u32 = 0x3;
/// Determines if the chip can safely go into deep sleep without preventing
/// currently active peripherals from operating.
///
/// We look at the PM's clock mask registers and compare them against a set of
/// known masks that include no peripherals that can't operate in deep
/// sleep (or that have no function during sleep). Specifically:
///
/// * HSB may only have clocks for the flash (and PicoCache) and APBx bridges on.
///
/// * PBA may not have _any_ clocks on.
///
/// * PBB may only have clocks for the flash and HRAMC1 (also flash related) on.
///
/// * PBC and PBD may have any clocks on.
///
/// This means it is the responsibility of each peripheral to disable it's clock
/// mask whenever it is idle.
///
/// We also special case GPIO (which is in PBCMASK), and just see if any interrupts are pending
/// through the INTERRUPT_COUNT variable.
pub fn deep_sleep_ready() -> bool {
unsafe {
(*PM_REGS).hsbmask.get() & !(DEEP_SLEEP_HSBMASK) == 0
&& (*PM_REGS).pbamask.get() & !(DEEP_SLEEP_PBAMASK) == 0
&& (*PM_REGS).pbbmask.get() & !(DEEP_SLEEP_PBBMASK) == 0
&& gpio::INTERRUPT_COUNT.load(Ordering::Relaxed) == 0
}
}
pub unsafe fn enable_clock(clock: Clock) {
match clock {
Clock::HSB(v) => mask_clock!(HSB: hsbmask | 1 << (v as u32)),
Clock::PBA(v) => mask_clock!(PBA: pbamask | 1 << (v as u32)),
Clock::PBB(v) => mask_clock!(PBB: pbbmask | 1 << (v as u32)),
Clock::PBC(v) => mask_clock!(PBC: pbcmask | 1 << (v as u32)),
Clock::PBD(v) => mask_clock!(PBD: pbdmask | 1 << (v as u32)),
}
}
pub unsafe fn disable_clock(clock: Clock) {
match clock {
Clock::HSB(v) => mask_clock!(HSB: hsbmask & !(1 << (v as u32))),
Clock::PBA(v) => mask_clock!(PBA: pbamask & !(1 << (v as u32))),
Clock::PBB(v) => mask_clock!(PBB: pbbmask & !(1 << (v as u32))),
Clock::PBC(v) => mask_clock!(PBC: pbcmask & !(1 << (v as u32))),
Clock::PBD(v) => mask_clock!(PBD: pbdmask & !(1 << (v as u32))),
}
}
pub unsafe fn is_clock_enabled(clock: Clock) -> bool {
match clock {
Clock::HSB(v) => get_clock!(HSB: hsbmask & (1 << (v as u32))),
Clock::PBA(v) => get_clock!(PBA: pbamask & (1 << (v as u32))),
Clock::PBB(v) => get_clock!(PBB: pbbmask & (1 << (v as u32))),
Clock::PBC(v) => get_clock!(PBC: pbcmask & (1 << (v as u32))),
Clock::PBD(v) => get_clock!(PBD: pbdmask & (1 << (v as u32))),
}
}
| OscillatorFrequency | identifier_name |
hal.py | import os
import ujson
import utime
import machine
# LEDs
STATUS = 0
SONAR = 1
LEFT_LINE = 2
RIGHT_LINE = 3
# Directions
STOP = 0
LEFT = 1
RIGHT = 2
SEARCH = 3
FORWARD = 4
BACKWARD = 5
class Sumorobot(object):
# Constructor
def __init__(self):
# Open and parse the config file
with open('config.json', 'r') as config_file:
self.config = ujson.load(config_file)
### PWMs
# Right & Left motor PWMs
self.pwm = {
LEFT: machine.PWM(machine.Pin(15), freq=50, duty=0),
RIGHT: machine.PWM(machine.Pin(4), freq=50, duty=0)
}
# Memorise previous servo speeds
self.prev_speed = {LEFT: 0, RIGHT: 0}
### LEDs
# Enable / Disable LED sensor feedback
self.sensor_feedback = True
# Bottom status LED
self.status_led = machine.Pin(self.config['status_led_pin'], machine.Pin.OUT)
# Bottom status LED is in reverse polarity
self.status_led.value(1)
# Sensor LEDs
self.sonar_led = machine.Pin(16, machine.Pin.OUT)
self.left_line_led = machine.Pin(17, machine.Pin.OUT)
self.right_line_led = machine.Pin(12, machine.Pin.OUT)
### Sonar
# To average sonar sensor value
self.sonar_score = 0
# Sonar distance sensor
self.echo = machine.Pin(14, machine.Pin.IN)
self.trigger = machine.Pin(27, machine.Pin.OUT)
### ADCs
# Battery gauge
self.bat_status = 4.3 # voltage
self.move_counter = 0
self.battery_level = 0 # percentage
self.adc_battery = machine.ADC(machine.Pin(32))
self.bat_charge = machine.Pin(25, machine.Pin.IN) # charging / not charging
# The pullups for the phototransistors
machine.Pin(19, machine.Pin.IN, machine.Pin.PULL_UP)
machine.Pin(23, machine.Pin.IN, machine.Pin.PULL_UP)
# The phototransistors
self.adc_line_left = machine.ADC(machine.Pin(34))
self.adc_line_right = machine.ADC(machine.Pin(33))
# Set reference voltage to 3.3V
self.adc_battery.atten(machine.ADC.ATTN_11DB)
self.adc_line_left.atten(machine.ADC.ATTN_11DB)
self.adc_line_right.atten(machine.ADC.ATTN_11DB)
# For terminating sleep and loops
self.terminate = False
# For search mode
self.search = False
self.last_line = LEFT
self.search_counter = 0
# Function to set LED states
def set_led(self, led, value):
# Turn the given LED on or off
if led == STATUS:
# Status LED is reverse polarity
self.status_led.value(0 if value else 1)
elif led == SONAR:
self.sonar_led.value(value)
elif led == LEFT_LINE:
self.left_line_led.value(value)
elif led == RIGHT_LINE:
self.right_line_led.value(value)
# Function to get battery level in percentage
def get_battery_level(self):
# When the SumoRobot is not moving
if self.prev_speed[LEFT] == 0 and self.prev_speed[RIGHT] == 0:
# Calculate battery voltage
battery_voltage = round(self.config['battery_coeff'] * (self.adc_battery.read() * 3.3 / 4096), 2)
# Map battery voltage to percentage
temp_battery_level = 0.0 + ((100.0 - 0.0) / (4.2 - 3.2)) * (battery_voltage - 3.2)
# When battery level changed more than 5 percent
if abs(self.battery_level - temp_battery_level) > 5:
# Update battery level
self.battery_level = round(temp_battery_level)
# Return the battery level in percentage
return min(100, max(0, self.battery_level))
# Function to get distance (cm) from the object in front of the SumoRobot
def get_sonar_value(self):
# Send a pulse
self.trigger.value(0)
utime.sleep_us(5)
self.trigger.value(1)
# Wait for the pulse and calculate the distance
return round((machine.time_pulse_us(self.echo, 1, 30000) / 2) / 29.1)
# Function to get boolean if there is something in front of the SumoRobot
def is_sonar(self):
# Get the sonar value
self.sonar_value = self.get_sonar_value()
# When the sonar value is small and the ping actually returned
if self.sonar_value < self.config['sonar_threshold'] and self.sonar_value > 0:
# When not maximum score
if self.sonar_score < 5:
# Increase the sonar score
self.sonar_score += 1
# When no sonar ping was returned
else:
# When not lowest score
if self.sonar_score > 0:
# Decrease the sonar score
self.sonar_score -= 1
# When the sensor saw something more than 2 times
value = True if self.sonar_score > 2 else False
return value
# Function to update the config file
def update_config_file(self):
# Update the config file
with open('config.part', 'w') as config_file:
config_file.write(ujson.dumps(self.config))
os.rename('config.part', 'config.json')
# Function to update line calibration and write it to the config file
def calibrate_line_values(self):
# Read the line sensor values
self.config['left_line_value'] = self.adc_line_left.read()
self.config['right_line_value'] = self.adc_line_right.read()
# Function to get light inensity from the phototransistors
def get_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Return the given line sensor value
if line == LEFT:
return self.adc_line_left.read()
elif line == RIGHT:
return self.adc_line_right.read()
def is_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Define config prefix
prefix = 'left' if line == LEFT else 'right'
# Check for line
value = abs(self.get_line(line) - self.config[prefix + '_line_value']) > self.config[prefix + '_line_threshold']
# Update last line direction if line was detected
self.last_line = value if value else self.last_line
# Return the given line sensor value
return value
def set_servo(self, servo, speed):
# Check if the direction is valid
assert servo in (LEFT, RIGHT)
# Check if the speed is valid
assert speed <= 100 and speed >= -100
# Reverse the speed for the right wheel
# So negative speeds make wheels go backward, positive forward
if servo == RIGHT:
speed = -speed
# Save the new speed
self.prev_speed[servo] = speed
# Set the given servo speed
if speed == 0:
self.pwm[servo].duty(0)
else:
# Define config prefix
prefix = 'left' if servo == LEFT else 'right'
# -100 ... 100 to min_tuning .. max_tuning
index = 0 if speed < 0 else 2
min_tuning = self.config[prefix + '_servo_calib'][index]
max_tuning = self.config[prefix + '_servo_calib'][index+1]
if speed < 0:
# Reverse the speed, so smaller negative numbers represent slower speeds and larger
# faster speeds
speed = -1 * (speed + 101)
speed = int((speed + 1) * (max_tuning - min_tuning) / -99 + min_tuning)
self.pwm[servo].duty(speed)
else:
speed = int(speed * (max_tuning - min_tuning) / 100 + min_tuning)
self.pwm[servo].duty(speed)
def move(self, dir):
# Check if the direction is valid
assert dir in (SEARCH, STOP, RIGHT, LEFT, BACKWARD, FORWARD)
# Go to the given direction
if dir == STOP:
self.set_servo(LEFT, 0)
self.set_servo(RIGHT, 0)
elif dir == LEFT:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, 100)
elif dir == RIGHT:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, -100)
elif dir == SEARCH:
# Change search mode after X seconds
if self.search_counter == 50:
self.search = not self.search
self.search_counter = 0
# When in search mode
if self.search:
self.move(FORWARD)
elif self.last_line == RIGHT:
self.move(LEFT)
else:
self.move(RIGHT)
# Increase search counter
self.search_counter += 1
elif dir == FORWARD:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, 100)
elif dir == BACKWARD:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, -100)
def update_sensor_feedback(self):
if self.sensor_feedback:
# Show sensor feedback trough LEDs
self.set_led(SONAR, self.is_sonar())
self.set_led(LEFT_LINE, self.is_line(LEFT))
self.set_led(RIGHT_LINE, self.is_line(RIGHT))
def get_sensor_scope(self):
# TODO: implement sensor value caching
|
def get_configuration_scope(self):
return str(self.config['sumorobot_name']) + ',' \
+ str(self.config['firmware_version']) + ',' \
+ str(self.config['left_line_value']) + ',' \
+ str(self.config['right_line_value']) + ',' \
+ str(self.config['left_line_threshold']) + ',' \
+ str(self.config['right_line_threshold']) + ',' \
+ str(self.config['sonar_threshold'])
def sleep(self, delay):
# Check for valid delay
assert delay > 0
# Split the delay into 50ms chunks
while delay:
# Check for forceful termination
if self.terminate:
# Terminate the delay
return
else:
utime.sleep_ms(50)
delay -= 50
| return str(self.get_sonar_value()) + ',' \
+ str(self.get_line(LEFT)) + ',' \
+ str(self.get_line(RIGHT)) + ',' \
+ str(self.bat_charge.value()) + ',' \
+ str(self.get_battery_level()) | identifier_body |
hal.py | import os
import ujson
import utime
import machine
# LEDs
STATUS = 0
SONAR = 1
LEFT_LINE = 2
RIGHT_LINE = 3
# Directions
STOP = 0
LEFT = 1
RIGHT = 2
SEARCH = 3
FORWARD = 4
BACKWARD = 5
class Sumorobot(object):
# Constructor
def __init__(self):
# Open and parse the config file
with open('config.json', 'r') as config_file:
self.config = ujson.load(config_file)
### PWMs
# Right & Left motor PWMs
self.pwm = {
LEFT: machine.PWM(machine.Pin(15), freq=50, duty=0),
RIGHT: machine.PWM(machine.Pin(4), freq=50, duty=0)
}
# Memorise previous servo speeds
self.prev_speed = {LEFT: 0, RIGHT: 0}
### LEDs
# Enable / Disable LED sensor feedback
self.sensor_feedback = True
# Bottom status LED
self.status_led = machine.Pin(self.config['status_led_pin'], machine.Pin.OUT)
# Bottom status LED is in reverse polarity
self.status_led.value(1)
# Sensor LEDs
self.sonar_led = machine.Pin(16, machine.Pin.OUT)
self.left_line_led = machine.Pin(17, machine.Pin.OUT)
self.right_line_led = machine.Pin(12, machine.Pin.OUT)
### Sonar
# To average sonar sensor value
self.sonar_score = 0
# Sonar distance sensor
self.echo = machine.Pin(14, machine.Pin.IN)
self.trigger = machine.Pin(27, machine.Pin.OUT)
### ADCs
# Battery gauge
self.bat_status = 4.3 # voltage
self.move_counter = 0
self.battery_level = 0 # percentage
self.adc_battery = machine.ADC(machine.Pin(32))
self.bat_charge = machine.Pin(25, machine.Pin.IN) # charging / not charging
# The pullups for the phototransistors
machine.Pin(19, machine.Pin.IN, machine.Pin.PULL_UP)
machine.Pin(23, machine.Pin.IN, machine.Pin.PULL_UP)
# The phototransistors
self.adc_line_left = machine.ADC(machine.Pin(34))
self.adc_line_right = machine.ADC(machine.Pin(33))
# Set reference voltage to 3.3V
self.adc_battery.atten(machine.ADC.ATTN_11DB)
self.adc_line_left.atten(machine.ADC.ATTN_11DB)
self.adc_line_right.atten(machine.ADC.ATTN_11DB)
# For terminating sleep and loops
self.terminate = False
# For search mode
self.search = False
self.last_line = LEFT
self.search_counter = 0
# Function to set LED states
def set_led(self, led, value):
# Turn the given LED on or off
if led == STATUS:
# Status LED is reverse polarity
self.status_led.value(0 if value else 1)
elif led == SONAR:
self.sonar_led.value(value)
elif led == LEFT_LINE:
self.left_line_led.value(value)
elif led == RIGHT_LINE:
self.right_line_led.value(value)
# Function to get battery level in percentage
def get_battery_level(self):
# When the SumoRobot is not moving
if self.prev_speed[LEFT] == 0 and self.prev_speed[RIGHT] == 0:
# Calculate battery voltage
battery_voltage = round(self.config['battery_coeff'] * (self.adc_battery.read() * 3.3 / 4096), 2)
# Map battery voltage to percentage
temp_battery_level = 0.0 + ((100.0 - 0.0) / (4.2 - 3.2)) * (battery_voltage - 3.2)
# When battery level changed more than 5 percent
if abs(self.battery_level - temp_battery_level) > 5:
# Update battery level
self.battery_level = round(temp_battery_level)
# Return the battery level in percentage
return min(100, max(0, self.battery_level))
# Function to get distance (cm) from the object in front of the SumoRobot
def get_sonar_value(self):
# Send a pulse
self.trigger.value(0)
utime.sleep_us(5)
self.trigger.value(1)
# Wait for the pulse and calculate the distance
return round((machine.time_pulse_us(self.echo, 1, 30000) / 2) / 29.1)
# Function to get boolean if there is something in front of the SumoRobot
def is_sonar(self):
# Get the sonar value
self.sonar_value = self.get_sonar_value()
# When the sonar value is small and the ping actually returned
if self.sonar_value < self.config['sonar_threshold'] and self.sonar_value > 0:
# When not maximum score
if self.sonar_score < 5:
# Increase the sonar score
self.sonar_score += 1
# When no sonar ping was returned
else:
# When not lowest score
if self.sonar_score > 0:
# Decrease the sonar score
self.sonar_score -= 1
# When the sensor saw something more than 2 times
value = True if self.sonar_score > 2 else False
return value
# Function to update the config file
def update_config_file(self):
# Update the config file
with open('config.part', 'w') as config_file:
config_file.write(ujson.dumps(self.config))
os.rename('config.part', 'config.json')
# Function to update line calibration and write it to the config file
def calibrate_line_values(self):
# Read the line sensor values
self.config['left_line_value'] = self.adc_line_left.read()
self.config['right_line_value'] = self.adc_line_right.read()
# Function to get light inensity from the phototransistors
def get_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Return the given line sensor value
if line == LEFT:
return self.adc_line_left.read()
elif line == RIGHT:
return self.adc_line_right.read()
def is_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Define config prefix
prefix = 'left' if line == LEFT else 'right'
# Check for line
value = abs(self.get_line(line) - self.config[prefix + '_line_value']) > self.config[prefix + '_line_threshold']
# Update last line direction if line was detected
self.last_line = value if value else self.last_line
# Return the given line sensor value
return value
def set_servo(self, servo, speed):
# Check if the direction is valid
assert servo in (LEFT, RIGHT)
# Check if the speed is valid
assert speed <= 100 and speed >= -100
# Reverse the speed for the right wheel
# So negative speeds make wheels go backward, positive forward
if servo == RIGHT:
speed = -speed
# Save the new speed
self.prev_speed[servo] = speed
# Set the given servo speed
if speed == 0:
self.pwm[servo].duty(0)
else:
# Define config prefix
prefix = 'left' if servo == LEFT else 'right'
# -100 ... 100 to min_tuning .. max_tuning
index = 0 if speed < 0 else 2
min_tuning = self.config[prefix + '_servo_calib'][index]
max_tuning = self.config[prefix + '_servo_calib'][index+1]
if speed < 0:
# Reverse the speed, so smaller negative numbers represent slower speeds and larger
# faster speeds
speed = -1 * (speed + 101)
speed = int((speed + 1) * (max_tuning - min_tuning) / -99 + min_tuning)
self.pwm[servo].duty(speed)
else:
speed = int(speed * (max_tuning - min_tuning) / 100 + min_tuning)
self.pwm[servo].duty(speed)
def move(self, dir):
# Check if the direction is valid
assert dir in (SEARCH, STOP, RIGHT, LEFT, BACKWARD, FORWARD)
# Go to the given direction
if dir == STOP:
self.set_servo(LEFT, 0)
self.set_servo(RIGHT, 0)
elif dir == LEFT:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, 100)
elif dir == RIGHT:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, -100)
elif dir == SEARCH:
# Change search mode after X seconds
if self.search_counter == 50:
self.search = not self.search
self.search_counter = 0
# When in search mode
if self.search:
self.move(FORWARD)
elif self.last_line == RIGHT:
self.move(LEFT)
else:
self.move(RIGHT)
# Increase search counter
self.search_counter += 1
elif dir == FORWARD:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, 100)
elif dir == BACKWARD:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, -100)
def update_sensor_feedback(self):
if self.sensor_feedback:
# Show sensor feedback trough LEDs
self.set_led(SONAR, self.is_sonar())
self.set_led(LEFT_LINE, self.is_line(LEFT))
self.set_led(RIGHT_LINE, self.is_line(RIGHT))
def | (self):
# TODO: implement sensor value caching
return str(self.get_sonar_value()) + ',' \
+ str(self.get_line(LEFT)) + ',' \
+ str(self.get_line(RIGHT)) + ',' \
+ str(self.bat_charge.value()) + ',' \
+ str(self.get_battery_level())
def get_configuration_scope(self):
return str(self.config['sumorobot_name']) + ',' \
+ str(self.config['firmware_version']) + ',' \
+ str(self.config['left_line_value']) + ',' \
+ str(self.config['right_line_value']) + ',' \
+ str(self.config['left_line_threshold']) + ',' \
+ str(self.config['right_line_threshold']) + ',' \
+ str(self.config['sonar_threshold'])
def sleep(self, delay):
# Check for valid delay
assert delay > 0
# Split the delay into 50ms chunks
while delay:
# Check for forceful termination
if self.terminate:
# Terminate the delay
return
else:
utime.sleep_ms(50)
delay -= 50
| get_sensor_scope | identifier_name |
hal.py | import os
import ujson
import utime
import machine
# LEDs
STATUS = 0
SONAR = 1 | LEFT_LINE = 2
RIGHT_LINE = 3
# Directions
STOP = 0
LEFT = 1
RIGHT = 2
SEARCH = 3
FORWARD = 4
BACKWARD = 5
class Sumorobot(object):
# Constructor
def __init__(self):
# Open and parse the config file
with open('config.json', 'r') as config_file:
self.config = ujson.load(config_file)
### PWMs
# Right & Left motor PWMs
self.pwm = {
LEFT: machine.PWM(machine.Pin(15), freq=50, duty=0),
RIGHT: machine.PWM(machine.Pin(4), freq=50, duty=0)
}
# Memorise previous servo speeds
self.prev_speed = {LEFT: 0, RIGHT: 0}
### LEDs
# Enable / Disable LED sensor feedback
self.sensor_feedback = True
# Bottom status LED
self.status_led = machine.Pin(self.config['status_led_pin'], machine.Pin.OUT)
# Bottom status LED is in reverse polarity
self.status_led.value(1)
# Sensor LEDs
self.sonar_led = machine.Pin(16, machine.Pin.OUT)
self.left_line_led = machine.Pin(17, machine.Pin.OUT)
self.right_line_led = machine.Pin(12, machine.Pin.OUT)
### Sonar
# To average sonar sensor value
self.sonar_score = 0
# Sonar distance sensor
self.echo = machine.Pin(14, machine.Pin.IN)
self.trigger = machine.Pin(27, machine.Pin.OUT)
### ADCs
# Battery gauge
self.bat_status = 4.3 # voltage
self.move_counter = 0
self.battery_level = 0 # percentage
self.adc_battery = machine.ADC(machine.Pin(32))
self.bat_charge = machine.Pin(25, machine.Pin.IN) # charging / not charging
# The pullups for the phototransistors
machine.Pin(19, machine.Pin.IN, machine.Pin.PULL_UP)
machine.Pin(23, machine.Pin.IN, machine.Pin.PULL_UP)
# The phototransistors
self.adc_line_left = machine.ADC(machine.Pin(34))
self.adc_line_right = machine.ADC(machine.Pin(33))
# Set reference voltage to 3.3V
self.adc_battery.atten(machine.ADC.ATTN_11DB)
self.adc_line_left.atten(machine.ADC.ATTN_11DB)
self.adc_line_right.atten(machine.ADC.ATTN_11DB)
# For terminating sleep and loops
self.terminate = False
# For search mode
self.search = False
self.last_line = LEFT
self.search_counter = 0
# Function to set LED states
def set_led(self, led, value):
# Turn the given LED on or off
if led == STATUS:
# Status LED is reverse polarity
self.status_led.value(0 if value else 1)
elif led == SONAR:
self.sonar_led.value(value)
elif led == LEFT_LINE:
self.left_line_led.value(value)
elif led == RIGHT_LINE:
self.right_line_led.value(value)
# Function to get battery level in percentage
def get_battery_level(self):
# When the SumoRobot is not moving
if self.prev_speed[LEFT] == 0 and self.prev_speed[RIGHT] == 0:
# Calculate battery voltage
battery_voltage = round(self.config['battery_coeff'] * (self.adc_battery.read() * 3.3 / 4096), 2)
# Map battery voltage to percentage
temp_battery_level = 0.0 + ((100.0 - 0.0) / (4.2 - 3.2)) * (battery_voltage - 3.2)
# When battery level changed more than 5 percent
if abs(self.battery_level - temp_battery_level) > 5:
# Update battery level
self.battery_level = round(temp_battery_level)
# Return the battery level in percentage
return min(100, max(0, self.battery_level))
# Function to get distance (cm) from the object in front of the SumoRobot
def get_sonar_value(self):
# Send a pulse
self.trigger.value(0)
utime.sleep_us(5)
self.trigger.value(1)
# Wait for the pulse and calculate the distance
return round((machine.time_pulse_us(self.echo, 1, 30000) / 2) / 29.1)
# Function to get boolean if there is something in front of the SumoRobot
def is_sonar(self):
# Get the sonar value
self.sonar_value = self.get_sonar_value()
# When the sonar value is small and the ping actually returned
if self.sonar_value < self.config['sonar_threshold'] and self.sonar_value > 0:
# When not maximum score
if self.sonar_score < 5:
# Increase the sonar score
self.sonar_score += 1
# When no sonar ping was returned
else:
# When not lowest score
if self.sonar_score > 0:
# Decrease the sonar score
self.sonar_score -= 1
# When the sensor saw something more than 2 times
value = True if self.sonar_score > 2 else False
return value
# Function to update the config file
def update_config_file(self):
# Update the config file
with open('config.part', 'w') as config_file:
config_file.write(ujson.dumps(self.config))
os.rename('config.part', 'config.json')
# Function to update line calibration and write it to the config file
def calibrate_line_values(self):
# Read the line sensor values
self.config['left_line_value'] = self.adc_line_left.read()
self.config['right_line_value'] = self.adc_line_right.read()
# Function to get light inensity from the phototransistors
def get_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Return the given line sensor value
if line == LEFT:
return self.adc_line_left.read()
elif line == RIGHT:
return self.adc_line_right.read()
def is_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Define config prefix
prefix = 'left' if line == LEFT else 'right'
# Check for line
value = abs(self.get_line(line) - self.config[prefix + '_line_value']) > self.config[prefix + '_line_threshold']
# Update last line direction if line was detected
self.last_line = value if value else self.last_line
# Return the given line sensor value
return value
def set_servo(self, servo, speed):
# Check if the direction is valid
assert servo in (LEFT, RIGHT)
# Check if the speed is valid
assert speed <= 100 and speed >= -100
# Reverse the speed for the right wheel
# So negative speeds make wheels go backward, positive forward
if servo == RIGHT:
speed = -speed
# Save the new speed
self.prev_speed[servo] = speed
# Set the given servo speed
if speed == 0:
self.pwm[servo].duty(0)
else:
# Define config prefix
prefix = 'left' if servo == LEFT else 'right'
# -100 ... 100 to min_tuning .. max_tuning
index = 0 if speed < 0 else 2
min_tuning = self.config[prefix + '_servo_calib'][index]
max_tuning = self.config[prefix + '_servo_calib'][index+1]
if speed < 0:
# Reverse the speed, so smaller negative numbers represent slower speeds and larger
# faster speeds
speed = -1 * (speed + 101)
speed = int((speed + 1) * (max_tuning - min_tuning) / -99 + min_tuning)
self.pwm[servo].duty(speed)
else:
speed = int(speed * (max_tuning - min_tuning) / 100 + min_tuning)
self.pwm[servo].duty(speed)
def move(self, dir):
# Check if the direction is valid
assert dir in (SEARCH, STOP, RIGHT, LEFT, BACKWARD, FORWARD)
# Go to the given direction
if dir == STOP:
self.set_servo(LEFT, 0)
self.set_servo(RIGHT, 0)
elif dir == LEFT:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, 100)
elif dir == RIGHT:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, -100)
elif dir == SEARCH:
# Change search mode after X seconds
if self.search_counter == 50:
self.search = not self.search
self.search_counter = 0
# When in search mode
if self.search:
self.move(FORWARD)
elif self.last_line == RIGHT:
self.move(LEFT)
else:
self.move(RIGHT)
# Increase search counter
self.search_counter += 1
elif dir == FORWARD:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, 100)
elif dir == BACKWARD:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, -100)
def update_sensor_feedback(self):
if self.sensor_feedback:
# Show sensor feedback trough LEDs
self.set_led(SONAR, self.is_sonar())
self.set_led(LEFT_LINE, self.is_line(LEFT))
self.set_led(RIGHT_LINE, self.is_line(RIGHT))
def get_sensor_scope(self):
# TODO: implement sensor value caching
return str(self.get_sonar_value()) + ',' \
+ str(self.get_line(LEFT)) + ',' \
+ str(self.get_line(RIGHT)) + ',' \
+ str(self.bat_charge.value()) + ',' \
+ str(self.get_battery_level())
def get_configuration_scope(self):
return str(self.config['sumorobot_name']) + ',' \
+ str(self.config['firmware_version']) + ',' \
+ str(self.config['left_line_value']) + ',' \
+ str(self.config['right_line_value']) + ',' \
+ str(self.config['left_line_threshold']) + ',' \
+ str(self.config['right_line_threshold']) + ',' \
+ str(self.config['sonar_threshold'])
def sleep(self, delay):
# Check for valid delay
assert delay > 0
# Split the delay into 50ms chunks
while delay:
# Check for forceful termination
if self.terminate:
# Terminate the delay
return
else:
utime.sleep_ms(50)
delay -= 50 | random_line_split | |
hal.py | import os
import ujson
import utime
import machine
# LEDs
STATUS = 0
SONAR = 1
LEFT_LINE = 2
RIGHT_LINE = 3
# Directions
STOP = 0
LEFT = 1
RIGHT = 2
SEARCH = 3
FORWARD = 4
BACKWARD = 5
class Sumorobot(object):
# Constructor
def __init__(self):
# Open and parse the config file
with open('config.json', 'r') as config_file:
self.config = ujson.load(config_file)
### PWMs
# Right & Left motor PWMs
self.pwm = {
LEFT: machine.PWM(machine.Pin(15), freq=50, duty=0),
RIGHT: machine.PWM(machine.Pin(4), freq=50, duty=0)
}
# Memorise previous servo speeds
self.prev_speed = {LEFT: 0, RIGHT: 0}
### LEDs
# Enable / Disable LED sensor feedback
self.sensor_feedback = True
# Bottom status LED
self.status_led = machine.Pin(self.config['status_led_pin'], machine.Pin.OUT)
# Bottom status LED is in reverse polarity
self.status_led.value(1)
# Sensor LEDs
self.sonar_led = machine.Pin(16, machine.Pin.OUT)
self.left_line_led = machine.Pin(17, machine.Pin.OUT)
self.right_line_led = machine.Pin(12, machine.Pin.OUT)
### Sonar
# To average sonar sensor value
self.sonar_score = 0
# Sonar distance sensor
self.echo = machine.Pin(14, machine.Pin.IN)
self.trigger = machine.Pin(27, machine.Pin.OUT)
### ADCs
# Battery gauge
self.bat_status = 4.3 # voltage
self.move_counter = 0
self.battery_level = 0 # percentage
self.adc_battery = machine.ADC(machine.Pin(32))
self.bat_charge = machine.Pin(25, machine.Pin.IN) # charging / not charging
# The pullups for the phototransistors
machine.Pin(19, machine.Pin.IN, machine.Pin.PULL_UP)
machine.Pin(23, machine.Pin.IN, machine.Pin.PULL_UP)
# The phototransistors
self.adc_line_left = machine.ADC(machine.Pin(34))
self.adc_line_right = machine.ADC(machine.Pin(33))
# Set reference voltage to 3.3V
self.adc_battery.atten(machine.ADC.ATTN_11DB)
self.adc_line_left.atten(machine.ADC.ATTN_11DB)
self.adc_line_right.atten(machine.ADC.ATTN_11DB)
# For terminating sleep and loops
self.terminate = False
# For search mode
self.search = False
self.last_line = LEFT
self.search_counter = 0
# Function to set LED states
def set_led(self, led, value):
# Turn the given LED on or off
if led == STATUS:
# Status LED is reverse polarity
self.status_led.value(0 if value else 1)
elif led == SONAR:
self.sonar_led.value(value)
elif led == LEFT_LINE:
self.left_line_led.value(value)
elif led == RIGHT_LINE:
self.right_line_led.value(value)
# Function to get battery level in percentage
def get_battery_level(self):
# When the SumoRobot is not moving
if self.prev_speed[LEFT] == 0 and self.prev_speed[RIGHT] == 0:
# Calculate battery voltage
battery_voltage = round(self.config['battery_coeff'] * (self.adc_battery.read() * 3.3 / 4096), 2)
# Map battery voltage to percentage
temp_battery_level = 0.0 + ((100.0 - 0.0) / (4.2 - 3.2)) * (battery_voltage - 3.2)
# When battery level changed more than 5 percent
if abs(self.battery_level - temp_battery_level) > 5:
# Update battery level
self.battery_level = round(temp_battery_level)
# Return the battery level in percentage
return min(100, max(0, self.battery_level))
# Function to get distance (cm) from the object in front of the SumoRobot
def get_sonar_value(self):
# Send a pulse
self.trigger.value(0)
utime.sleep_us(5)
self.trigger.value(1)
# Wait for the pulse and calculate the distance
return round((machine.time_pulse_us(self.echo, 1, 30000) / 2) / 29.1)
# Function to get boolean if there is something in front of the SumoRobot
def is_sonar(self):
# Get the sonar value
self.sonar_value = self.get_sonar_value()
# When the sonar value is small and the ping actually returned
if self.sonar_value < self.config['sonar_threshold'] and self.sonar_value > 0:
# When not maximum score
if self.sonar_score < 5:
# Increase the sonar score
self.sonar_score += 1
# When no sonar ping was returned
else:
# When not lowest score
if self.sonar_score > 0:
# Decrease the sonar score
self.sonar_score -= 1
# When the sensor saw something more than 2 times
value = True if self.sonar_score > 2 else False
return value
# Function to update the config file
def update_config_file(self):
# Update the config file
with open('config.part', 'w') as config_file:
config_file.write(ujson.dumps(self.config))
os.rename('config.part', 'config.json')
# Function to update line calibration and write it to the config file
def calibrate_line_values(self):
# Read the line sensor values
self.config['left_line_value'] = self.adc_line_left.read()
self.config['right_line_value'] = self.adc_line_right.read()
# Function to get light inensity from the phototransistors
def get_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Return the given line sensor value
if line == LEFT:
return self.adc_line_left.read()
elif line == RIGHT:
return self.adc_line_right.read()
def is_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Define config prefix
prefix = 'left' if line == LEFT else 'right'
# Check for line
value = abs(self.get_line(line) - self.config[prefix + '_line_value']) > self.config[prefix + '_line_threshold']
# Update last line direction if line was detected
self.last_line = value if value else self.last_line
# Return the given line sensor value
return value
def set_servo(self, servo, speed):
# Check if the direction is valid
assert servo in (LEFT, RIGHT)
# Check if the speed is valid
assert speed <= 100 and speed >= -100
# Reverse the speed for the right wheel
# So negative speeds make wheels go backward, positive forward
if servo == RIGHT:
speed = -speed
# Save the new speed
self.prev_speed[servo] = speed
# Set the given servo speed
if speed == 0:
self.pwm[servo].duty(0)
else:
# Define config prefix
prefix = 'left' if servo == LEFT else 'right'
# -100 ... 100 to min_tuning .. max_tuning
index = 0 if speed < 0 else 2
min_tuning = self.config[prefix + '_servo_calib'][index]
max_tuning = self.config[prefix + '_servo_calib'][index+1]
if speed < 0:
# Reverse the speed, so smaller negative numbers represent slower speeds and larger
# faster speeds
speed = -1 * (speed + 101)
speed = int((speed + 1) * (max_tuning - min_tuning) / -99 + min_tuning)
self.pwm[servo].duty(speed)
else:
speed = int(speed * (max_tuning - min_tuning) / 100 + min_tuning)
self.pwm[servo].duty(speed)
def move(self, dir):
# Check if the direction is valid
assert dir in (SEARCH, STOP, RIGHT, LEFT, BACKWARD, FORWARD)
# Go to the given direction
if dir == STOP:
self.set_servo(LEFT, 0)
self.set_servo(RIGHT, 0)
elif dir == LEFT:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, 100)
elif dir == RIGHT:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, -100)
elif dir == SEARCH:
# Change search mode after X seconds
if self.search_counter == 50:
self.search = not self.search
self.search_counter = 0
# When in search mode
if self.search:
self.move(FORWARD)
elif self.last_line == RIGHT:
|
else:
self.move(RIGHT)
# Increase search counter
self.search_counter += 1
elif dir == FORWARD:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, 100)
elif dir == BACKWARD:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, -100)
def update_sensor_feedback(self):
if self.sensor_feedback:
# Show sensor feedback trough LEDs
self.set_led(SONAR, self.is_sonar())
self.set_led(LEFT_LINE, self.is_line(LEFT))
self.set_led(RIGHT_LINE, self.is_line(RIGHT))
def get_sensor_scope(self):
# TODO: implement sensor value caching
return str(self.get_sonar_value()) + ',' \
+ str(self.get_line(LEFT)) + ',' \
+ str(self.get_line(RIGHT)) + ',' \
+ str(self.bat_charge.value()) + ',' \
+ str(self.get_battery_level())
def get_configuration_scope(self):
return str(self.config['sumorobot_name']) + ',' \
+ str(self.config['firmware_version']) + ',' \
+ str(self.config['left_line_value']) + ',' \
+ str(self.config['right_line_value']) + ',' \
+ str(self.config['left_line_threshold']) + ',' \
+ str(self.config['right_line_threshold']) + ',' \
+ str(self.config['sonar_threshold'])
def sleep(self, delay):
# Check for valid delay
assert delay > 0
# Split the delay into 50ms chunks
while delay:
# Check for forceful termination
if self.terminate:
# Terminate the delay
return
else:
utime.sleep_ms(50)
delay -= 50
| self.move(LEFT) | conditional_block |
bdplatfrom.js | //协议
if (!Array.prototype.shuffle) {
Array.prototype.shuffle = function () {
for (var j, x, i = this.length; i; j = parseInt(Math.random() * i), x = this[--i], this[i] = this[j], this[j] = x);
return this;
};
}
/**
* 百度SDK
*/
class bdplatform
{
/**
* 平台配置
*/
constructor() {
/*****平台配置******************************************** */
this.AppID = "17008570";
//正常标准广告
this.NormalAdunits = [
"6580232",
"6580230",
"6580229",
"6580225",
"6580224",
];
//弹窗
this.OtherAdunits = [
];
/*** 成功结算 */
| this.VideoAd1 = "6580222";
/*** 金币获取激励视频 */
this.VideoAd2 = "6580147";
/**签到双倍奖励视频 */
this.VideoAd3 = "6580146";
/**再来一份视频奖励 */
this.VideoAd4 = "6580145";
/**转盘获取次数奖励 */
this.VideoAd5 = "6580144";
/**转盘双倍奖励视频 */
this.VideoAd6= "6580143";
/**金币不足视频 */
this.VideoAd7= "6580142";
/**复活激励视频 */
this.VideoAd8= "6580010";
/**结算双倍奖励视频 */
this.VideoAd9= "6580006"; //OK
/************************************************* */
this.appSid="e3e02a8c";
let that = this;
let shareFunc = swan.onShareAppMessage;
shareFunc(() => ({
title: '炎炎夏日,你确定不来清凉一夏么?',
imageUrl: 'wxlocal/share1.jpg'
}));
//右上角menu转发
swan.showShareMenu({});
//是否开启震动
this.isOpenVibration = true;
this.shareFailCnt = 0;
this.adUnitId = null;
// this.lastShareDate = this.getLastShareDate();
// this.todayShareCount = this.getTodayShareCount();
}
/**登录 */
login(cb)
{
swan.login({
success:res =>
{
let data = {};
if (res.code)
{
data.code = 0;
data.wxcode = res.code;
console.log("bd.login sucess res.code="+res.code);
}
else
{
console.log('bd.login !' + res.errMsg);
data.code = -1;
data.msg = res.errMsg;
}
cb && cb(data);
},
fail(res)
{
console.log('bd.login fail !' + res.errMsg);
let data = {};
data.code = -1;
data.msg = res.errMsg;
cb && cb(data);
}
}
);
}
/**检查用户登录状态是否有效 */
checkSession()
{
swan.checkSession(
{
success: res => {
console.log('登录态有效');
},
fail: function ()
{
console.log('登录态无效');
}
});
}
/**获取小游戏启动参数 */
launchInfo()
{
return swan.getLaunchOptionsSync()
}
/**加载 */
startLoading(_callback)
{
let that = this;
that._loadingCallback = _callback;
}
onLoading(_percent)
{
let that = this;
if (that._loadingCallback) {
that._loadingCallback(_percent);
return true;
}
return false;
}
/**退出小游戏 */
exitMiniProgram()
{
console.log("退出小游戏");
}
/**跳转小程序 进入另一个 */
navigateToMiniProgram(_data)
{
if (swan.navigateToMiniProgram)
{
let successCallback = _data.success;
_data.success = function (res) {
window["UserData"].instance.removeNavAppId(_data.appId);
if (successCallback && typeof (successCallback) == "function")
{
successCallback(res);
}
}
swan.navigateToMiniProgram(_data);
}
}
/**跳转小程序 返回上一个小程序 暂时还没 等待验证 */
navigateBackMiniProgram(data)
{
let callback = data.success
data.success = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(true);
}
}
data.fail = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(false);
}
}
swan.navigateBackMiniProgram(data);
}
/**创建底部剧中 Banner adUnitId:标识 finishCallback:完成回调 */
createBannerAd(_adUnitId, _finishCallback)
{
if(swan.createBannerAd==null)
{
return null;
};
if (this.bannerAd)
{
this.bannerAd.destroy();
}
const {windowWidth,windowHeight,} = swan.getSystemInfoSync();
let self = this
var o = Math.floor;
var sysInfo = swan.getSystemInfoSync();
var r = o(1 * sysInfo.windowWidth), n = o((sysInfo.windowWidth - r) / 2), d = sysInfo.screenHeight;
var top = d - o(r / 3.4) - (d - r / 0.5625) / 2 + 10;
var bannerAd =swan.createBannerAd({
adUnitId: _adUnitId,
appSid:this.appSid,
style: {
left: n+40, //+ 向右偏移 -向左偏移
top: top+80, // +向下 偏移 -向上偏移
width: r-80
}
});
// 回调里再次调整尺寸,要确保不要触发死循环!!!
bannerAd.onResize(size =>
{
if (scale < 2) {
top = sysInfo.windowHeight - e.height-32;
} else {
top = sysInfo.windowHeight - e.height - 32;
}
if (bannerAd) {
bannerAd.style.top = top;
bannerAd.style.left = (sysInfo.windowWidth - bannerAd.style.realWidth) * 0.5;
}
});
bannerAd.onLoad(() =>
{
bannerAd.show().then(()=>
{
console.log("加载成功 开始展示");
_finishCallback && _finishCallback();
}).catch(err =>
{
console.log('广告组件出现问题', err);
})
});
this.bannerAd = bannerAd
return bannerAd
}
//关闭广告
closeBannerAd()
{
if (!this.bannerAd)
{
return;
}
this.bannerAd.destroy();
this.bannerAd = null;
this.adUnitId = null;
};
//设置广告的显示与隐藏
setBannerVisible(visible)
{
if (!this.bannerAd)
{
return;
}
if (visible)
{
this.bannerAd.show();
}
else
{
this.bannerAd.hide();
}
}
//视频广告 是否预加载 需要在plantfrom中添加
createRewardedVideoAd(_adUnitId, callback)
{
if(!swan.createRewardedVideoAd)
{
return;
};
//回调
let videoItemInfo =
{
adUnitId: _adUnitId,
appSid: this.appSid
}
if(!this.videoAdInfo)
{
this.videoAdInfo = {}
this.videoAdInfo.ins = swan.createRewardedVideoAd(videoItemInfo);
}
else
{
console.log(" 有视频广告信息");
this.videoAdInfo.ins.offError(this.videoAdInfo.error)
this.videoAdInfo.ins.offLoad(this.videoAdInfo.load)
this.videoAdInfo.ins.offClose(this.videoAdInfo.close)
}
let onLoad = function()
{
console.log('视频广告加载成功');
}
let onError = function(errCode, errMsg)
{
console.log(`createRewardedVideoAd errCode = ${errCode}, ${errMsg}`)
callback && callback(false)
}
let onClose = function(res)
{
if(res && res.isEnded)
{
console.log("视频看完了,发放奖励")
window["TaskAchMgr"].instance.refreshProgress(1, 2, 1);
callback && callback(true)
}
else
{
console.log("视频没看完")
callback && callback(false)
}
};
this.videoAdInfo.error = onError
this.videoAdInfo.load = onLoad
this.videoAdInfo.close = onClose
let videoAd = this.videoAdInfo.ins;
if (videoAd)
{
videoAd.load().then(()=>
{
console.log('广告显示成功');
videoAd.show().then(onLoad).catch(err=>{ videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});});
}).catch(err=>
{ console.log("视频加载失败 重新加载一边");
videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});
onsole.log('广告组件出现问题 重新加载一次', err);
})
videoAd.onError(onError)
videoAd.onClose(onClose)
}
return videoAd;
}
/**
* 百度授权
*/
authorize( call)
{
console.log("baidu authorize===");
function p()
{
//异步 同步
return new Promise(function(resolve, reject)
{
//设置 获取用户在该小游戏中已授权过的权限
swan.getSetting({
success:function(res)
{
if(res && res.userInfo)
{
console.log("小游戏中已授权过的权限")
call && call(true) //已经授权过了
}
else
{
resolve()
}
},
fail:function()
{
console.log("小游戏未授权过权限")
call && call(false)
},
})
})
}
p().then(function()
{
//授权小游戏使用某项功能或获取用户的某些数据
btnClick(e)
{
let scope = e.currentTarget.id;
swan.authorize({
scope,
success: res => {
swan.showToast({
title: '授权成功'
});
},
fail: err => {
swan.showToast({
title: '授权失败'
});
console.log('authorize fail', err);
}
});
}
})
};
/**小游戏返回前台 */
onShow(_callback)
{
swan.onShow(function (_param)
{
if (_callback) {
_callback(_param);
}
})
}
/**小游戏退到后台 */
onHide(_callback)
{
swan.onHide(function (_param)
{
if (_callback)
{
_callback(_param);
}
})
}
/**主动分享 */
onShare(call)
{
if(!call)
{
return ;
};
swan.shareAppMessage(
{
title: '炎炎夏日,你确定不来清凉一夏么?',//转发标题,不传则默认使用后台配置或当前小游戏的名称。
//imageUrl: 'wxlocal/share1.jpg',//转发显示图片的链接,可以是网络图片路径或本地图片文件路径或相对代码包根目录的图片文件路径,显示图片长宽比推荐 5:4
success()
{
console.log('分享成功');
console.log('分享成功');
// call && call(true)
//显示分享成功的消息提示框
swan.showToast({
title:"分享成功",
//duration:2000,
icon:"none"
})
},
fail(e) {
swan.showToast({
title:"分享失败",
//duration:2000,
icon:"none"
})
console.log('分享失败');
}
})
};
/**短震动 */
vibrateShort()
{
swan.vibrateShort();
}
/**长震动 */
vibrateLong()
{
swan.vibrateLong();
}
/**获取设备信息 同步 */
getSystemInfoSync()
{
let data= swan.getSystemInfoSync();
console.log('手机品牌信息:', data.brand);
}
/**显示Loading提示框 */
showLoading()
{
swan.showLoading(
{
title: title,
success()
{
call && call(true)
},
fail()
{
call && call(false)
}
})
};
/**隐藏 loading 提示框 */
hideLoading()
{
swan.hideLoading(
{
success()
{
console.log("隐藏加载提示框 成功");
},
fail()
{
console.log("隐藏加载提示框 失败");
}
})
};
/**视频组件控制 */
createVideo()
{
//swan.createVideoContext 创建并返回 video 上下文 videoContext 对象。通过 videoId 跟一个 video 组件绑定,通过它可以操作一个 video 组件
//console.log("创建视频")
};
/**退出视频控制 */
videoHide()
{
// VideoContext.play;
// VideoContext.pause;
// VideoContext.seek;
// VideoContext.sendDanmu;
// VideoContext.showStatusBar;
//console.log("退出视频控制");
};
/**获取玩家信息 */
getUserInfo()
{
let p = new Promise((resolve, reject) => {
swan.getUserInfo(
{
// withCredentials: false, //是否需要返回敏感数据
success: function (res)
{
var userInfo = res.userInfo
this.setData(
{
nickname: userInfo.nickName || '百度网友',
imageSrc: userInfo.avatarUrl || '../../images/avator.png',
nameColor: 'active'
});
resolve(userInfo);
},
fail:function (res)
{
console.log(err);
swan.showToast(
{
title: '请先授权'
});
}
})
});
p().then(function(res)
{
console.log("getUserInfo res ==== ", res)
});
};
/**是否支持卖量 */
isGeneralize()
{
return false;
};
/**是否只有视频 没有分享 */
isOnlyVideo()
{
return true;
}
/**是否至此录屏 */
isPlatformSupportRecord ()
{
return false;
}
//停止录屏
stopRecorder()
{
console.log("录屏功能");
}
// // 获取今日日期
// getTodayDate() {
// var myDate = new Date();
// var year = myDate.getFullYear();
// var month = myDate.getMonth() + 1;
// var date = myDate.getDate();
// var today = '' + year + '_' + month + '_' + date;
// return today;
// }
// // 分享日期
// getLastShareDate() {
// if (this.lastShareDate) return this.lastShareDate;
// var lastShareDate = window.localStorage.getItem('LastShareDate');
// if (lastShareDate === '' || lastShareDate === null || lastShareDate === undefined) {
// lastShareDate = '0';
// }
// return lastShareDate;
// }
// // 刷新分享日期
// updateLastShareDate() {
// var today = this.getTodayDate();
// if (this.lastShareDate !== today) {
// window.localStorage.setItem('LastShareDate', today);
// }
// }
// // 获取今日分享次数
// getTodayShareCount() {
// if (this.todayShareCount !== null && this.todayShareCount !== undefined) return this.todayShareCount;
// if (this.getLastShareDate() !== this.getTodayDate()) {
// this.updateLastShareDate();
// this.setTodayShareCount(0);
// return 0;
// }
// let cnt = window.localStorage.getItem('TodayShareCount');
// if (cnt === '' || cnt === null || cnt === undefined) {
// cnt = '0';
// }
// return parseInt(cnt);
// };
// // 设置分享次数
// setTodayShareCount(cnt) {
// this.todayShareCount = cnt;
// window.localStorage.setItem('TodayShareCount', '' + cnt);
// this.updateLastShareDate();
// }
}
window.platform = new bdplatform(); | /*** 离线双倍奖励激励视频 */ | random_line_split |
bdplatfrom.js |
//协议
if (!Array.prototype.shuffle) {
Array.prototype.shuffle = function () {
for (var j, x, i = this.length; i; j = parseInt(Math.random() * i), x = this[--i], this[i] = this[j], this[j] = x);
return this;
};
}
/**
* 百度SDK
*/
class bdplatform
{
/**
* 平台配置
*/
constructor() {
/*****平台配置******************************************** */
this.AppID = "17008570";
//正常标准广告
this.NormalAdunits = [
"6580232",
"6580230",
"6580229",
"6580225",
"6580224",
];
//弹窗
this.OtherAdunits = [
];
/*** 成功结算 */
/*** 离线双倍奖励激励视频 */
this.VideoAd1 = "6580222";
/*** 金币获取激励视频 */
this.VideoAd2 = "6580147";
/**签到双倍奖励视频 */
this.VideoAd3 = "6580146";
/**再来一份视频奖励 */
this.VideoAd4 = "6580145";
/**转盘获取次数奖励 */
this.VideoAd5 = "6580144";
/**转盘双倍奖励视频 */
this.VideoAd6= "6580143";
/**金币不足视频 */
this.VideoAd7= "6580142";
/**复活激励视频 */
this.VideoAd8= "6580010";
/**结算双倍奖励视频 */
this.VideoAd9= "6580006"; //OK
/************************************************* */
this.appSid="e3e02a8c";
let that = this;
let shareFunc = swan.onShareAppMessage;
shareFunc(() => ({
title: '炎炎夏日,你确定不来清凉一夏么?',
imageUrl: 'wxlocal/share1.jpg'
}));
//右上角menu转发
swan.showShareMenu({});
//是否开启震动
this.isOpenVibration = true;
this.shareFailCnt = 0;
this.adUnitId = null;
// this.lastShareDate = this.getLastShareDate();
// this.todayShareCount = this.getTodayShareCount();
}
/**登录 */
login(cb)
{
swan.login({
success:res =>
{
let data = {};
if (res.code)
{
data.code = 0;
data.wxcode = res.code;
console.log("bd.login sucess res.code="+res.code);
}
else
{
console.log('bd.login !' + res.errMsg);
data.code = -1;
data.msg = res.errMsg;
}
cb && cb(data);
},
fail(res)
{
console.log('bd.login fail !' + res.errMsg);
let data = {};
data.code = -1;
data.msg = res.errMsg;
cb && cb(data);
}
}
);
}
/**检查用户登录状态是否有效 */
checkSession()
{
swan.checkSession(
{
success: res => {
console.log('登录态有效');
},
fail: function ()
{
console.log('登录态无效');
}
});
}
/**获取小游戏启动参数 */
launchInfo()
{
return swan.getLaunchOptionsSync()
}
/**加载 */
startLoading(_callback)
{
let that = this;
that._loadingCallback = _callback;
}
onLoading(_percent)
{
let that = this;
if (that._loadingCallback) {
that._loadingCallback(_percent);
return true;
}
return false;
}
/**退出小游戏 */
exitMiniProgram()
{
console.log("退出小游戏");
}
/**跳转小程序 进入另一个 */
navigateToMiniProgram(_data)
{
if (swan.navigateToMiniProgram)
{
let successCallback = _data.success;
_data.success = function (res) {
window["UserData"].instance.removeNavAppId(_data.appId);
if (successCallback && typeof (successCallback) == "function")
{
successCallback(res);
}
}
swan.navigateToMiniProgram(_data);
}
}
/**跳转小程序 返回上一个小程序 暂时还没 等待验证 */
navigateBackMiniProgram(data)
{
let callback = data.success
data.success = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(true);
}
}
data.fail = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(false);
}
}
swan.navigateBackMiniProgram(data);
}
/**创建底部剧中 Banner adUnitId:标识 finishCallback:完成回调 */
createBannerAd(_adUnitId, _finishCallback)
{
if(swan.createBannerAd==null)
{
return null;
};
if (this.bannerAd)
{
this.bannerAd.destroy();
}
const {windowWidth,windowHeight,} = swan.getSystemInfoSync();
let self = this
var o = Math.floor;
var sysInfo = swan.getSystemInfoSync();
var r = o(1 * sysInfo.windowWidth), n = o((sysInfo.windowWidth - r) / 2), d = sysInfo.screenHeight;
var top = d - o(r / 3.4) - (d - r / 0.5625) / 2 + 10;
var bannerAd =swan.createBannerAd({
adUnitId: _adUnitId,
appSid:this.appSid,
style: {
left: n+40, //+ 向右偏移 -向左偏移
top: top+80, // +向下 偏移 -向上偏移
width: r-80
}
});
// 回调里再次调整尺寸,要确保不要触发死循环!!!
bannerAd.onResize(size =>
{
if (scale < 2) {
top = sysInfo.windowHeight - e.height-32;
} else {
top = sysInfo.windowHeight - e.height - 32;
}
if (bannerAd) {
bannerAd.style.top = top;
bannerAd.style.left = (sysInfo.windowWidth - bannerAd.style.realWidth) * 0.5;
}
});
bannerAd.onLoad(() =>
{
bannerAd.show().then(()=>
{
console.log("加载成功 开始展示");
_finishCallback && _finishCallback();
}).catch(err =>
{
console.log('广告组件出现问题', err);
})
});
this.bannerAd = bannerAd
return bannerAd
}
//关闭广告
closeBannerAd()
{
if (!this.bannerAd)
{
return;
}
this.bannerAd.destroy();
this.bannerAd = null;
this.adUnitId = null;
};
//设置广告的显示与隐藏
setBannerVisible(visible)
{
if (!this.bannerAd)
{
return;
}
if (visible)
{
this.bannerAd.show();
}
else
{
this.bannerAd.hide();
}
}
//视频广告 是否预加载 需要在plantfrom中添加
createRewardedVideoAd(_adUnitId, callback)
{
if(!swan.createRewardedVideoAd)
{
return;
};
//回调
let videoItemInfo =
{
adUnitId: _adUnitId,
appSid: this.appSid
}
if(!this.videoAdInfo)
{
this.videoAdInfo = {}
this.videoAdInfo.ins = swan.createRewardedVideoAd(videoItemInfo);
}
else
{
console.log(" 有视频广告信息");
this.videoAdInfo.ins.offError(this.videoAdInfo.error)
this.videoAdInfo.ins.offLoad(this.videoAdInfo.load)
this.videoAdInfo.ins.offClose(this.videoAdInfo.close)
}
let onLoad = function()
{
console.log('视频广告加载成功');
}
let onError = function(errCode, errMsg)
{
console.log(`createRewardedVideoAd errCode = ${errCode}, ${errMsg}`)
callback && callback(false)
}
let onClose = function(res)
{
if(res && res.isEnded)
{
console.log("视频看完了,发放奖励")
window["TaskAchMgr"].instance.refreshProgress(1, 2, 1);
callback && callback(true)
}
else
{
console.log("视频没看完")
callback && callback(false)
}
};
this.videoAdInfo.error = onError
this.videoAdInfo.load = onLoad
this.videoAdInfo.close = onClose
let videoAd = this.videoAdInfo.ins;
if (videoAd)
{
videoAd.load().then(()=>
{
console.log('广告显示成功');
videoAd.show().then(onLoad).catch(err=>{ videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});});
}).catch(err=>
{ console.log("视频加载失败 重新加载一边");
videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});
onsole.log('广告组件出现问题 重新加载一次', err);
})
videoAd.onError(onError)
videoAd.onClose(onClose)
}
return videoAd;
}
/**
* 百度授权
*/
authorize( call)
{
console.log("baidu authorize===");
function p()
{
//异步 同步
return new Promise(function(resolve, reject)
{
//设置 获取用户在该小游戏中已授权过的权限
swan.getSetting({
success:function(res)
{
if(res && res.userInfo)
{
console.log("小游戏中已授权过的权限")
call && call(true) //已经授权过了
}
else
{
resolve()
}
},
fail:function()
{
console.log("小游戏未授权过权限")
call && call(false)
},
})
})
}
p().then(function()
{
//授权小游戏使用某项功能或获取用户的某些数据
btnClick(e)
{
let scope = e.currentTarget.id;
swan.authorize({
scope,
success: res => {
swan.showToast({
title: '授权成功'
});
},
fail: err => {
swan.showToast({
title: '授权失败'
});
console.log('authorize fail', err);
}
});
}
})
};
/**小游戏返回前台 */
onShow(_callback)
{
swan.onShow(function (_param)
{
if (_callback) {
_callback(_param);
}
})
}
/**小游戏退到后台 */
onHide(_callback)
{
swan.onHide(function (_param)
{
if (_callback)
{
_callback(_param);
}
})
}
/**主动分享 */
onShare(call)
{
if(!call)
{
return ;
};
swan.shareAppMessage(
{
title: '炎炎夏日,你确定不来清凉一夏么?',//转发标题,不传则默认使用后台配置或当前小游戏的名称。
//imageUrl: 'wxlocal/share1.jpg',//转发显示图片的链接,可以是网络图片路径或本地图片文件路径或相对代码包根目录的图片文件路径,显示图片长宽比推荐 5:4
success()
{
console.log('分享成功');
console.log('分享成功');
// call && call(true)
//显示分享成功的消息提示框
swan.showToast({
title:"分享成功",
//duration:2000,
icon:"none"
})
},
fail(e) {
swan.showToast({
title:"分享失败",
//duration:2000,
icon:"none"
})
console.log('分享失败');
}
})
};
/**短震动 */
vibrateShort()
{
swan.vibrateShort();
}
/**长震动 */
vibrateLong()
{
swan.vibrateLong();
}
/**获取设备信息 同步 */
getSystemInfoSync()
{
let data= swan.getSystemInfoSync();
console.log('手机品牌信息:', data.brand);
}
/**显示Loading提示框 */
showLoading()
{
swan.showLoading(
{
title: title,
success()
{
call && call(true)
},
fail()
{
call && call(false)
}
})
};
/**隐藏 loading 提示框 */
hideLoading()
{
swan.hideLoading(
{
success()
{
console.log("隐藏加载提示框 成功");
},
fail()
{
console.log("隐藏加载提示框 失败");
}
})
};
/**视频组件控制 */
createVideo()
{
//swan.createVideoContext 创建并返回 video 上下文 videoContext 对象。通过 videoId 跟一个 video 组件绑定,通过它可以操作一个 video 组件
//console.log("创建视频")
};
/**退出视频控制 */
videoHide()
{
// VideoContext.play;
// VideoContext.pause;
// VideoContext.seek;
// VideoContext.sendDanmu;
// VideoContext.showStatusBar;
//console.log("退出视频控制");
};
/**获取玩家信息 */
getUserInfo()
{
let p = new Promise((resolve, reject) => {
swan.getUserInfo(
{
// withCredentials: false, //是否需要返回敏感数据
success: function (res)
{
var userInfo = res.userInfo
this.setData(
{
nickname: userInfo.nickName || '百度网友',
imageSrc: userInfo.avatarUrl || '../../images/avator.png',
nameColor: 'active'
});
resolve(userInfo);
},
fail:function (res)
{
console.log(err);
swan.showToast(
{
title: '请先授权'
});
}
})
});
p().then(function(res)
{
console.log("getUserInfo res ==== ", res)
});
};
/**是否支持卖量 */
isGeneralize()
{
return false;
};
/**是否只有视频 没有分享 */
isOnlyVideo()
{
return true;
}
/**是否至此录屏 */
isPlatformSupportRecord ()
{
return false;
}
//停止录屏
stopRecorder()
{
console.log("录屏功能");
}
// // 获取今日日期
// getTodayDate() {
// var myDate = new Date();
// | today;
// }
// // 分享日期
// getLastShareDate() {
// if (this.lastShareDate) return this.lastShareDate;
// var lastShareDate = window.localStorage.getItem('LastShareDate');
// if (lastShareDate === '' || lastShareDate === null || lastShareDate === undefined) {
// lastShareDate = '0';
// }
// return lastShareDate;
// }
// // 刷新分享日期
// updateLastShareDate() {
// var today = this.getTodayDate();
// if (this.lastShareDate !== today) {
// window.localStorage.setItem('LastShareDate', today);
// }
// }
// // 获取今日分享次数
// getTodayShareCount() {
// if (this.todayShareCount !== null && this.todayShareCount !== undefined) return this.todayShareCount;
// if (this.getLastShareDate() !== this.getTodayDate()) {
// this.updateLastShareDate();
// this.setTodayShareCount(0);
// return 0;
// }
// let cnt = window.localStorage.getItem('TodayShareCount');
// if (cnt === '' || cnt === null || cnt === undefined) {
// cnt = '0';
// }
// return parseInt(cnt);
// };
// // 设置分享次数
// setTodayShareCount(cnt) {
// this.todayShareCount = cnt;
// window.localStorage.setItem('TodayShareCount', '' + cnt);
// this.updateLastShareDate();
// }
}
window.platform = new bdplatform(); | var year = myDate.getFullYear();
// var month = myDate.getMonth() + 1;
// var date = myDate.getDate();
// var today = '' + year + '_' + month + '_' + date;
// return | identifier_body |
bdplatfrom.js |
//协议
if (!Array.prototype.shuffle) {
Array.prototype.shuffle = function () {
for (var j, x, i = this.length; i; j = parseInt(Math.random() * i), x = this[--i], this[i] = this[j], this[j] = x);
return this;
};
}
/**
* 百度SDK
*/
class bdplatform
{
/**
* 平台配置
*/
constructor() {
/*****平台配置******************************************** */
this.AppID = "17008570";
//正常标准广告
this.NormalAdunits = [
"6580232",
"6580230",
"6580229",
"6580225",
"6580224",
];
//弹窗
this.OtherAdunits = [
];
/*** 成功结算 */
/*** 离线双倍奖励激励视频 */
this.VideoAd1 = "6580222";
/*** 金币获取激励视频 */
this.VideoAd2 = "6580147";
/**签到双倍奖励视频 */
this.VideoAd3 = "6580146";
/**再来一份视频奖励 */
this.VideoAd4 = "6580145";
/**转盘获取次数奖励 */
this.VideoAd5 = "6580144";
/**转盘双倍奖励视频 */
this.VideoAd6= "6580143";
/**金币不足视频 */
this.VideoAd7= "6580142";
/**复活激励视频 */
this.VideoAd8= "6580010";
/**结算双倍奖励视频 */
this.VideoAd9= "6580006"; //OK
/************************************************* */
this.appSid="e3e02a8c";
let that = this;
let shareFunc = swan.onShareAppMessage;
shareFunc(() => ({
title: '炎炎夏日,你确定不来清凉一夏么?',
imageUrl: 'wxlocal/share1.jpg'
}));
//右上角menu转发
swan.showShareMenu({});
//是否开启震动
this.isOpenVibration = true;
this.shareFailCnt = 0;
this.adUnitId = null;
// this.lastShareDate = this.getLastShareDate();
// this.todayShareCount = this.getTodayShareCount();
}
/**登录 */
login(cb)
{
swan.login({
success:res =>
{
let data = {};
if (res.code)
{
data.code = 0;
data.wxcode = res.code;
console.log("bd.login sucess res.code="+res.code);
}
else
{
console.log('bd.login !' + res.errMsg) | fail(res)
{
console.log('bd.login fail !' + res.errMsg);
let data = {};
data.code = -1;
data.msg = res.errMsg;
cb && cb(data);
}
}
);
}
/**检查用户登录状态是否有效 */
checkSession()
{
swan.checkSession(
{
success: res => {
console.log('登录态有效');
},
fail: function ()
{
console.log('登录态无效');
}
});
}
/**获取小游戏启动参数 */
launchInfo()
{
return swan.getLaunchOptionsSync()
}
/**加载 */
startLoading(_callback)
{
let that = this;
that._loadingCallback = _callback;
}
onLoading(_percent)
{
let that = this;
if (that._loadingCallback) {
that._loadingCallback(_percent);
return true;
}
return false;
}
/**退出小游戏 */
exitMiniProgram()
{
console.log("退出小游戏");
}
/**跳转小程序 进入另一个 */
navigateToMiniProgram(_data)
{
if (swan.navigateToMiniProgram)
{
let successCallback = _data.success;
_data.success = function (res) {
window["UserData"].instance.removeNavAppId(_data.appId);
if (successCallback && typeof (successCallback) == "function")
{
successCallback(res);
}
}
swan.navigateToMiniProgram(_data);
}
}
/**跳转小程序 返回上一个小程序 暂时还没 等待验证 */
navigateBackMiniProgram(data)
{
let callback = data.success
data.success = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(true);
}
}
data.fail = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(false);
}
}
swan.navigateBackMiniProgram(data);
}
/**创建底部剧中 Banner adUnitId:标识 finishCallback:完成回调 */
createBannerAd(_adUnitId, _finishCallback)
{
if(swan.createBannerAd==null)
{
return null;
};
if (this.bannerAd)
{
this.bannerAd.destroy();
}
const {windowWidth,windowHeight,} = swan.getSystemInfoSync();
let self = this
var o = Math.floor;
var sysInfo = swan.getSystemInfoSync();
var r = o(1 * sysInfo.windowWidth), n = o((sysInfo.windowWidth - r) / 2), d = sysInfo.screenHeight;
var top = d - o(r / 3.4) - (d - r / 0.5625) / 2 + 10;
var bannerAd =swan.createBannerAd({
adUnitId: _adUnitId,
appSid:this.appSid,
style: {
left: n+40, //+ 向右偏移 -向左偏移
top: top+80, // +向下 偏移 -向上偏移
width: r-80
}
});
// 回调里再次调整尺寸,要确保不要触发死循环!!!
bannerAd.onResize(size =>
{
if (scale < 2) {
top = sysInfo.windowHeight - e.height-32;
} else {
top = sysInfo.windowHeight - e.height - 32;
}
if (bannerAd) {
bannerAd.style.top = top;
bannerAd.style.left = (sysInfo.windowWidth - bannerAd.style.realWidth) * 0.5;
}
});
bannerAd.onLoad(() =>
{
bannerAd.show().then(()=>
{
console.log("加载成功 开始展示");
_finishCallback && _finishCallback();
}).catch(err =>
{
console.log('广告组件出现问题', err);
})
});
this.bannerAd = bannerAd
return bannerAd
}
//关闭广告
closeBannerAd()
{
if (!this.bannerAd)
{
return;
}
this.bannerAd.destroy();
this.bannerAd = null;
this.adUnitId = null;
};
//设置广告的显示与隐藏
setBannerVisible(visible)
{
if (!this.bannerAd)
{
return;
}
if (visible)
{
this.bannerAd.show();
}
else
{
this.bannerAd.hide();
}
}
//视频广告 是否预加载 需要在plantfrom中添加
createRewardedVideoAd(_adUnitId, callback)
{
if(!swan.createRewardedVideoAd)
{
return;
};
//回调
let videoItemInfo =
{
adUnitId: _adUnitId,
appSid: this.appSid
}
if(!this.videoAdInfo)
{
this.videoAdInfo = {}
this.videoAdInfo.ins = swan.createRewardedVideoAd(videoItemInfo);
}
else
{
console.log(" 有视频广告信息");
this.videoAdInfo.ins.offError(this.videoAdInfo.error)
this.videoAdInfo.ins.offLoad(this.videoAdInfo.load)
this.videoAdInfo.ins.offClose(this.videoAdInfo.close)
}
let onLoad = function()
{
console.log('视频广告加载成功');
}
let onError = function(errCode, errMsg)
{
console.log(`createRewardedVideoAd errCode = ${errCode}, ${errMsg}`)
callback && callback(false)
}
let onClose = function(res)
{
if(res && res.isEnded)
{
console.log("视频看完了,发放奖励")
window["TaskAchMgr"].instance.refreshProgress(1, 2, 1);
callback && callback(true)
}
else
{
console.log("视频没看完")
callback && callback(false)
}
};
this.videoAdInfo.error = onError
this.videoAdInfo.load = onLoad
this.videoAdInfo.close = onClose
let videoAd = this.videoAdInfo.ins;
if (videoAd)
{
videoAd.load().then(()=>
{
console.log('广告显示成功');
videoAd.show().then(onLoad).catch(err=>{ videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});});
}).catch(err=>
{ console.log("视频加载失败 重新加载一边");
videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});
onsole.log('广告组件出现问题 重新加载一次', err);
})
videoAd.onError(onError)
videoAd.onClose(onClose)
}
return videoAd;
}
/**
* 百度授权
*/
authorize( call)
{
console.log("baidu authorize===");
function p()
{
//异步 同步
return new Promise(function(resolve, reject)
{
//设置 获取用户在该小游戏中已授权过的权限
swan.getSetting({
success:function(res)
{
if(res && res.userInfo)
{
console.log("小游戏中已授权过的权限")
call && call(true) //已经授权过了
}
else
{
resolve()
}
},
fail:function()
{
console.log("小游戏未授权过权限")
call && call(false)
},
})
})
}
p().then(function()
{
//授权小游戏使用某项功能或获取用户的某些数据
btnClick(e)
{
let scope = e.currentTarget.id;
swan.authorize({
scope,
success: res => {
swan.showToast({
title: '授权成功'
});
},
fail: err => {
swan.showToast({
title: '授权失败'
});
console.log('authorize fail', err);
}
});
}
})
};
/**小游戏返回前台 */
onShow(_callback)
{
swan.onShow(function (_param)
{
if (_callback) {
_callback(_param);
}
})
}
/**小游戏退到后台 */
onHide(_callback)
{
swan.onHide(function (_param)
{
if (_callback)
{
_callback(_param);
}
})
}
/**主动分享 */
onShare(call)
{
if(!call)
{
return ;
};
swan.shareAppMessage(
{
title: '炎炎夏日,你确定不来清凉一夏么?',//转发标题,不传则默认使用后台配置或当前小游戏的名称。
//imageUrl: 'wxlocal/share1.jpg',//转发显示图片的链接,可以是网络图片路径或本地图片文件路径或相对代码包根目录的图片文件路径,显示图片长宽比推荐 5:4
success()
{
console.log('分享成功');
console.log('分享成功');
// call && call(true)
//显示分享成功的消息提示框
swan.showToast({
title:"分享成功",
//duration:2000,
icon:"none"
})
},
fail(e) {
swan.showToast({
title:"分享失败",
//duration:2000,
icon:"none"
})
console.log('分享失败');
}
})
};
/**短震动 */
vibrateShort()
{
swan.vibrateShort();
}
/**长震动 */
vibrateLong()
{
swan.vibrateLong();
}
/**获取设备信息 同步 */
getSystemInfoSync()
{
let data= swan.getSystemInfoSync();
console.log('手机品牌信息:', data.brand);
}
/**显示Loading提示框 */
showLoading()
{
swan.showLoading(
{
title: title,
success()
{
call && call(true)
},
fail()
{
call && call(false)
}
})
};
/**隐藏 loading 提示框 */
hideLoading()
{
swan.hideLoading(
{
success()
{
console.log("隐藏加载提示框 成功");
},
fail()
{
console.log("隐藏加载提示框 失败");
}
})
};
/**视频组件控制 */
createVideo()
{
//swan.createVideoContext 创建并返回 video 上下文 videoContext 对象。通过 videoId 跟一个 video 组件绑定,通过它可以操作一个 video 组件
//console.log("创建视频")
};
/**退出视频控制 */
videoHide()
{
// VideoContext.play;
// VideoContext.pause;
// VideoContext.seek;
// VideoContext.sendDanmu;
// VideoContext.showStatusBar;
//console.log("退出视频控制");
};
/**获取玩家信息 */
getUserInfo()
{
let p = new Promise((resolve, reject) => {
swan.getUserInfo(
{
// withCredentials: false, //是否需要返回敏感数据
success: function (res)
{
var userInfo = res.userInfo
this.setData(
{
nickname: userInfo.nickName || '百度网友',
imageSrc: userInfo.avatarUrl || '../../images/avator.png',
nameColor: 'active'
});
resolve(userInfo);
},
fail:function (res)
{
console.log(err);
swan.showToast(
{
title: '请先授权'
});
}
})
});
p().then(function(res)
{
console.log("getUserInfo res ==== ", res)
});
};
/**是否支持卖量 */
isGeneralize()
{
return false;
};
/**是否只有视频 没有分享 */
isOnlyVideo()
{
return true;
}
/**是否至此录屏 */
isPlatformSupportRecord ()
{
return false;
}
//停止录屏
stopRecorder()
{
console.log("录屏功能");
}
// // 获取今日日期
// getTodayDate() {
// var myDate = new Date();
// var year = myDate.getFullYear();
// var month = myDate.getMonth() + 1;
// var date = myDate.getDate();
// var today = '' + year + '_' + month + '_' + date;
// return today;
// }
// // 分享日期
// getLastShareDate() {
// if (this.lastShareDate) return this.lastShareDate;
// var lastShareDate = window.localStorage.getItem('LastShareDate');
// if (lastShareDate === '' || lastShareDate === null || lastShareDate === undefined) {
// lastShareDate = '0';
// }
// return lastShareDate;
// }
// // 刷新分享日期
// updateLastShareDate() {
// var today = this.getTodayDate();
// if (this.lastShareDate !== today) {
// window.localStorage.setItem('LastShareDate', today);
// }
// }
// // 获取今日分享次数
// getTodayShareCount() {
// if (this.todayShareCount !== null && this.todayShareCount !== undefined) return this.todayShareCount;
// if (this.getLastShareDate() !== this.getTodayDate()) {
// this.updateLastShareDate();
// this.setTodayShareCount(0);
// return 0;
// }
// let cnt = window.localStorage.getItem('TodayShareCount');
// if (cnt === '' || cnt === null || cnt === undefined) {
// cnt = '0';
// }
// return parseInt(cnt);
// };
// // 设置分享次数
// setTodayShareCount(cnt) {
// this.todayShareCount = cnt;
// window.localStorage.setItem('TodayShareCount', '' + cnt);
// this.updateLastShareDate();
// }
}
window.platform = new bdplatform(); | ;
data.code = -1;
data.msg = res.errMsg;
}
cb && cb(data);
},
| conditional_block |
bdplatfrom.js |
//协议
if (!Array.prototype.shuffle) {
Array.prototype.shuffle = function () {
for (var j, x, i = this.length; i; j = parseInt(Math.random() * i), x = this[--i], this[i] = this[j], this[j] = x);
return this;
};
}
/**
* 百度SDK
*/
class bdplatform
{
/**
* 平台配置
*/
constructor() {
| 台配置******************************************** */
this.AppID = "17008570";
//正常标准广告
this.NormalAdunits = [
"6580232",
"6580230",
"6580229",
"6580225",
"6580224",
];
//弹窗
this.OtherAdunits = [
];
/*** 成功结算 */
/*** 离线双倍奖励激励视频 */
this.VideoAd1 = "6580222";
/*** 金币获取激励视频 */
this.VideoAd2 = "6580147";
/**签到双倍奖励视频 */
this.VideoAd3 = "6580146";
/**再来一份视频奖励 */
this.VideoAd4 = "6580145";
/**转盘获取次数奖励 */
this.VideoAd5 = "6580144";
/**转盘双倍奖励视频 */
this.VideoAd6= "6580143";
/**金币不足视频 */
this.VideoAd7= "6580142";
/**复活激励视频 */
this.VideoAd8= "6580010";
/**结算双倍奖励视频 */
this.VideoAd9= "6580006"; //OK
/************************************************* */
this.appSid="e3e02a8c";
let that = this;
let shareFunc = swan.onShareAppMessage;
shareFunc(() => ({
title: '炎炎夏日,你确定不来清凉一夏么?',
imageUrl: 'wxlocal/share1.jpg'
}));
//右上角menu转发
swan.showShareMenu({});
//是否开启震动
this.isOpenVibration = true;
this.shareFailCnt = 0;
this.adUnitId = null;
// this.lastShareDate = this.getLastShareDate();
// this.todayShareCount = this.getTodayShareCount();
}
/**登录 */
login(cb)
{
swan.login({
success:res =>
{
let data = {};
if (res.code)
{
data.code = 0;
data.wxcode = res.code;
console.log("bd.login sucess res.code="+res.code);
}
else
{
console.log('bd.login !' + res.errMsg);
data.code = -1;
data.msg = res.errMsg;
}
cb && cb(data);
},
fail(res)
{
console.log('bd.login fail !' + res.errMsg);
let data = {};
data.code = -1;
data.msg = res.errMsg;
cb && cb(data);
}
}
);
}
/**检查用户登录状态是否有效 */
checkSession()
{
swan.checkSession(
{
success: res => {
console.log('登录态有效');
},
fail: function ()
{
console.log('登录态无效');
}
});
}
/**获取小游戏启动参数 */
launchInfo()
{
return swan.getLaunchOptionsSync()
}
/**加载 */
startLoading(_callback)
{
let that = this;
that._loadingCallback = _callback;
}
onLoading(_percent)
{
let that = this;
if (that._loadingCallback) {
that._loadingCallback(_percent);
return true;
}
return false;
}
/**退出小游戏 */
exitMiniProgram()
{
console.log("退出小游戏");
}
/**跳转小程序 进入另一个 */
navigateToMiniProgram(_data)
{
if (swan.navigateToMiniProgram)
{
let successCallback = _data.success;
_data.success = function (res) {
window["UserData"].instance.removeNavAppId(_data.appId);
if (successCallback && typeof (successCallback) == "function")
{
successCallback(res);
}
}
swan.navigateToMiniProgram(_data);
}
}
/**跳转小程序 返回上一个小程序 暂时还没 等待验证 */
navigateBackMiniProgram(data)
{
let callback = data.success
data.success = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(true);
}
}
data.fail = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(false);
}
}
swan.navigateBackMiniProgram(data);
}
/**创建底部剧中 Banner adUnitId:标识 finishCallback:完成回调 */
createBannerAd(_adUnitId, _finishCallback)
{
if(swan.createBannerAd==null)
{
return null;
};
if (this.bannerAd)
{
this.bannerAd.destroy();
}
const {windowWidth,windowHeight,} = swan.getSystemInfoSync();
let self = this
var o = Math.floor;
var sysInfo = swan.getSystemInfoSync();
var r = o(1 * sysInfo.windowWidth), n = o((sysInfo.windowWidth - r) / 2), d = sysInfo.screenHeight;
var top = d - o(r / 3.4) - (d - r / 0.5625) / 2 + 10;
var bannerAd =swan.createBannerAd({
adUnitId: _adUnitId,
appSid:this.appSid,
style: {
left: n+40, //+ 向右偏移 -向左偏移
top: top+80, // +向下 偏移 -向上偏移
width: r-80
}
});
// 回调里再次调整尺寸,要确保不要触发死循环!!!
bannerAd.onResize(size =>
{
if (scale < 2) {
top = sysInfo.windowHeight - e.height-32;
} else {
top = sysInfo.windowHeight - e.height - 32;
}
if (bannerAd) {
bannerAd.style.top = top;
bannerAd.style.left = (sysInfo.windowWidth - bannerAd.style.realWidth) * 0.5;
}
});
bannerAd.onLoad(() =>
{
bannerAd.show().then(()=>
{
console.log("加载成功 开始展示");
_finishCallback && _finishCallback();
}).catch(err =>
{
console.log('广告组件出现问题', err);
})
});
this.bannerAd = bannerAd
return bannerAd
}
//关闭广告
closeBannerAd()
{
if (!this.bannerAd)
{
return;
}
this.bannerAd.destroy();
this.bannerAd = null;
this.adUnitId = null;
};
//设置广告的显示与隐藏
setBannerVisible(visible)
{
if (!this.bannerAd)
{
return;
}
if (visible)
{
this.bannerAd.show();
}
else
{
this.bannerAd.hide();
}
}
//视频广告 是否预加载 需要在plantfrom中添加
createRewardedVideoAd(_adUnitId, callback)
{
if(!swan.createRewardedVideoAd)
{
return;
};
//回调
let videoItemInfo =
{
adUnitId: _adUnitId,
appSid: this.appSid
}
if(!this.videoAdInfo)
{
this.videoAdInfo = {}
this.videoAdInfo.ins = swan.createRewardedVideoAd(videoItemInfo);
}
else
{
console.log(" 有视频广告信息");
this.videoAdInfo.ins.offError(this.videoAdInfo.error)
this.videoAdInfo.ins.offLoad(this.videoAdInfo.load)
this.videoAdInfo.ins.offClose(this.videoAdInfo.close)
}
let onLoad = function()
{
console.log('视频广告加载成功');
}
let onError = function(errCode, errMsg)
{
console.log(`createRewardedVideoAd errCode = ${errCode}, ${errMsg}`)
callback && callback(false)
}
let onClose = function(res)
{
if(res && res.isEnded)
{
console.log("视频看完了,发放奖励")
window["TaskAchMgr"].instance.refreshProgress(1, 2, 1);
callback && callback(true)
}
else
{
console.log("视频没看完")
callback && callback(false)
}
};
this.videoAdInfo.error = onError
this.videoAdInfo.load = onLoad
this.videoAdInfo.close = onClose
let videoAd = this.videoAdInfo.ins;
if (videoAd)
{
videoAd.load().then(()=>
{
console.log('广告显示成功');
videoAd.show().then(onLoad).catch(err=>{ videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});});
}).catch(err=>
{ console.log("视频加载失败 重新加载一边");
videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});
onsole.log('广告组件出现问题 重新加载一次', err);
})
videoAd.onError(onError)
videoAd.onClose(onClose)
}
return videoAd;
}
/**
* 百度授权
*/
authorize( call)
{
console.log("baidu authorize===");
function p()
{
//异步 同步
return new Promise(function(resolve, reject)
{
//设置 获取用户在该小游戏中已授权过的权限
swan.getSetting({
success:function(res)
{
if(res && res.userInfo)
{
console.log("小游戏中已授权过的权限")
call && call(true) //已经授权过了
}
else
{
resolve()
}
},
fail:function()
{
console.log("小游戏未授权过权限")
call && call(false)
},
})
})
}
p().then(function()
{
//授权小游戏使用某项功能或获取用户的某些数据
btnClick(e)
{
let scope = e.currentTarget.id;
swan.authorize({
scope,
success: res => {
swan.showToast({
title: '授权成功'
});
},
fail: err => {
swan.showToast({
title: '授权失败'
});
console.log('authorize fail', err);
}
});
}
})
};
/**小游戏返回前台 */
onShow(_callback)
{
swan.onShow(function (_param)
{
if (_callback) {
_callback(_param);
}
})
}
/**小游戏退到后台 */
onHide(_callback)
{
swan.onHide(function (_param)
{
if (_callback)
{
_callback(_param);
}
})
}
/**主动分享 */
onShare(call)
{
if(!call)
{
return ;
};
swan.shareAppMessage(
{
title: '炎炎夏日,你确定不来清凉一夏么?',//转发标题,不传则默认使用后台配置或当前小游戏的名称。
//imageUrl: 'wxlocal/share1.jpg',//转发显示图片的链接,可以是网络图片路径或本地图片文件路径或相对代码包根目录的图片文件路径,显示图片长宽比推荐 5:4
success()
{
console.log('分享成功');
console.log('分享成功');
// call && call(true)
//显示分享成功的消息提示框
swan.showToast({
title:"分享成功",
//duration:2000,
icon:"none"
})
},
fail(e) {
swan.showToast({
title:"分享失败",
//duration:2000,
icon:"none"
})
console.log('分享失败');
}
})
};
/**短震动 */
vibrateShort()
{
swan.vibrateShort();
}
/**长震动 */
vibrateLong()
{
swan.vibrateLong();
}
/**获取设备信息 同步 */
getSystemInfoSync()
{
let data= swan.getSystemInfoSync();
console.log('手机品牌信息:', data.brand);
}
/**显示Loading提示框 */
showLoading()
{
swan.showLoading(
{
title: title,
success()
{
call && call(true)
},
fail()
{
call && call(false)
}
})
};
/**隐藏 loading 提示框 */
hideLoading()
{
swan.hideLoading(
{
success()
{
console.log("隐藏加载提示框 成功");
},
fail()
{
console.log("隐藏加载提示框 失败");
}
})
};
/**视频组件控制 */
createVideo()
{
//swan.createVideoContext 创建并返回 video 上下文 videoContext 对象。通过 videoId 跟一个 video 组件绑定,通过它可以操作一个 video 组件
//console.log("创建视频")
};
/**退出视频控制 */
videoHide()
{
// VideoContext.play;
// VideoContext.pause;
// VideoContext.seek;
// VideoContext.sendDanmu;
// VideoContext.showStatusBar;
//console.log("退出视频控制");
};
/**获取玩家信息 */
getUserInfo()
{
let p = new Promise((resolve, reject) => {
swan.getUserInfo(
{
// withCredentials: false, //是否需要返回敏感数据
success: function (res)
{
var userInfo = res.userInfo
this.setData(
{
nickname: userInfo.nickName || '百度网友',
imageSrc: userInfo.avatarUrl || '../../images/avator.png',
nameColor: 'active'
});
resolve(userInfo);
},
fail:function (res)
{
console.log(err);
swan.showToast(
{
title: '请先授权'
});
}
})
});
p().then(function(res)
{
console.log("getUserInfo res ==== ", res)
});
};
/**是否支持卖量 */
isGeneralize()
{
return false;
};
/**是否只有视频 没有分享 */
isOnlyVideo()
{
return true;
}
/**是否至此录屏 */
isPlatformSupportRecord ()
{
return false;
}
//停止录屏
stopRecorder()
{
console.log("录屏功能");
}
// // 获取今日日期
// getTodayDate() {
// var myDate = new Date();
// var year = myDate.getFullYear();
// var month = myDate.getMonth() + 1;
// var date = myDate.getDate();
// var today = '' + year + '_' + month + '_' + date;
// return today;
// }
// // 分享日期
// getLastShareDate() {
// if (this.lastShareDate) return this.lastShareDate;
// var lastShareDate = window.localStorage.getItem('LastShareDate');
// if (lastShareDate === '' || lastShareDate === null || lastShareDate === undefined) {
// lastShareDate = '0';
// }
// return lastShareDate;
// }
// // 刷新分享日期
// updateLastShareDate() {
// var today = this.getTodayDate();
// if (this.lastShareDate !== today) {
// window.localStorage.setItem('LastShareDate', today);
// }
// }
// // 获取今日分享次数
// getTodayShareCount() {
// if (this.todayShareCount !== null && this.todayShareCount !== undefined) return this.todayShareCount;
// if (this.getLastShareDate() !== this.getTodayDate()) {
// this.updateLastShareDate();
// this.setTodayShareCount(0);
// return 0;
// }
// let cnt = window.localStorage.getItem('TodayShareCount');
// if (cnt === '' || cnt === null || cnt === undefined) {
// cnt = '0';
// }
// return parseInt(cnt);
// };
// // 设置分享次数
// setTodayShareCount(cnt) {
// this.todayShareCount = cnt;
// window.localStorage.setItem('TodayShareCount', '' + cnt);
// this.updateLastShareDate();
// }
}
window.platform = new bdplatform(); | /*****平 | identifier_name |
blockparse.py | #!/usr/bin/python3 -OO
'''
writing parser.cpp replacement in Python3
using ideas and code from
http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html,
http://www.righto.com/2014/02/bitcoin-mining-hard-way-algorithms.html,
https://bitcoin.org/en/developer-guide,
https://bitcoin.org/en/developer-reference,
and many other sources.
it won't work the same but has the same general purpose, to present block
files in a readable format.
'''
from __future__ import division, print_function
import sys, os, struct, binascii, logging, hashlib, re, time
from datetime import datetime
from glob import glob
# some Python3 to Python2 mappings
if bytes([65]) != b'A': # python2
class bytes(str):
def __new__(cls, initial=''):
if type(initial) == list:
joined = ''.join(map(chr, initial))
return super(bytes, cls).__new__(cls, joined)
else:
return super(bytes, cls).__new__(cls, initial)
def __repr__(self):
return 'b' + super(bytes, self).__repr__()
__str__ = __repr__
bytevalue = lambda byte: ord(byte)
bytevalues = lambda string: map(ord, string)
byte = chr
FileNotFoundError = IOError
else: # python3
bytevalue = lambda byte: byte
bytevalues = list
byte = lambda number: chr(number).encode('latin1')
LOGLEVEL = getattr(logging, os.getenv('LOGLEVEL', 'INFO'))
logging.getLogger().level=logging.DEBUG if __debug__ else LOGLEVEL
DEFAULT = sorted(glob(os.path.expanduser('~/.bitcoin/blocks/blk*.dat')))
MAGIC = {
'bitcoin': binascii.a2b_hex(b'F9BEB4D9'),
'dogecoin': binascii.a2b_hex(b'C0C0C0C0'),
'testnet': binascii.a2b_hex(b'FABFB5DA'),
'testnet3': binascii.a2b_hex(b'0B110907'),
'namecoin': binascii.a2b_hex(b'F9BEB4FE'),
'americancoin': binascii.a2b_hex(b'414D433A'),
}
VARINT = {
# struct format, offset, length
# remember in Python3 b'\xfd'[0] == 253
0xfd: ('<H', 1, 2),
0xfe: ('<L', 1, 4),
0xff: ('<Q', 1, 8),
}
# extend VARINT for Python2:
VARINT.update(dict((chr(n), l) for n, l in VARINT.items()))
UNPACKER = {
# fetch using len(bytestring)
1: 'B',
2: '<H',
4: '<L',
8: '<Q',
}
NULLBLOCK = b'\0' * 32 # pointed to by genesis block
def nextprefix(openfile):
'''
helper function for nextchunk
tries to read block prefix from an open file
'''
try:
prefix = openfile.read(8)
except AttributeError: # openfile is None
prefix = b''
return prefix
def nextchunk(blockfiles=None, minblock=0, maxblock=sys.maxsize, wait=True):
'''
generator that fetches and returns raw blocks out of blockfiles
with defaults, waits forever until terminated by signal
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
fileindex = 0
currentfile = None
done = False
while True:
prefix = nextprefix(currentfile)
if prefix == b'':
try:
newfile = open(blockfiles[fileindex], 'rb')
fileindex += 1
if fileindex == len(blockfiles):
blockfiles.append(nextfile(blockfiles[-1]))
currentfile = newfile
except FileNotFoundError:
if not wait:
logging.info('end of current data, not waiting')
done = True
else:
logging.debug('waiting for %s to come online',
blockfiles[fileindex])
time.sleep(10)
continue
if done:
raise StopIteration('No more blocks at this time')
else:
magic = prefix[:4]
blocksize = struct.unpack('<L', prefix[4:])[0]
logging.debug('yielding block of size %d', blocksize)
yield prefix + currentfile.read(blocksize)
def nextfile(filename):
'''
returns "next" filename in series from numbered files e.g. blk0001.dat
>>> nextfile('blk0001.dat')
'blk0002.dat'
>>> try: nextfile('blk.dat')
... except: pass
>>> nextfile('00041')
'00042'
'''
pattern = r'^(?P<prefix>[^0-9]*)(?P<number>[0-9]+)(?P<suffix>[^0-9]*)$'
directory, filename = os.path.split(filename)
try:
match = re.compile(pattern).match(filename).groupdict()
except AttributeError as match_failed:
raise ValueError('No numeric pattern found in {}'.format(filename))
newnumber = '{number:0{width}}'.format(
number=int(match['number']) + 1,
width=len(match['number']))
filename = match['prefix'] + newnumber + match['suffix']
return os.path.join(directory, filename) if directory else filename
def nextblock(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
generator that fetches and returns raw blocks out of blockfiles
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
for blockfile in blockfiles:
magic = ''
index = 0
logging.debug('blockfile "%s" of blockfiles %s', blockfile, blockfiles)
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
|
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
dump out block files
'''
minheight, maxheight = int(minblock), int(maxblock)
logging.debug('minheight: %d, maxheight: %d', minheight, maxheight)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
# if file was specified on commandline, make it into a list
for blockfile in blockfiles:
magic = ''
index = 0
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.info('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.info('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.info('block size: %d', blocksize)
logging.info('block header: %r', blockheader)
parse_blockheader(blockheader)
logging.info('transactions (partial): %r', transactions[:80])
count, data = parse_transactions(transactions)
logging.info('transaction count: %d', count)
logging.debug('remaining data (partial): %r', data[:80])
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse_blockheader(blockheader):
'''
return contents of block header
'''
version = blockheader[:4]
previous = blockheader[4:36]
merkle_root = blockheader[36:68]
unix_time = blockheader[68:72]
nbits = blockheader[72:76]
nonce = blockheader[76:]
blockhash = get_hash(blockheader)
if len(nonce) != 4:
raise ValueError('Nonce wrong size: %d bytes' % len(nonce))
logging.info('block version: %s', show_long(version))
logging.info('previous block hash: %s', show_hash(previous))
logging.info('merkle root: %s', show_hash(merkle_root))
logging.info('unix time: %s', timestamp(unix_time))
logging.info('nbits: %r', to_hex(nbits))
logging.info('nonce: %s', to_hex(nonce))
logging.info('block hash: %s', show_hash(blockhash))
return version, previous, merkle_root, unix_time, nbits, nonce, blockhash
def to_long(bytestring):
'''
for unpacking 8, 16, 32, or 64-bit number
'''
return struct.unpack(UNPACKER[(len(bytestring))], bytestring)[0]
def show_long(bytestring):
'''
for displaying 32-bit number
'''
number = to_long(bytestring)
return '0x%08x (%d)' % (number, number)
def timestamp(bytestring):
'''
for displaying 32-bit number as UTC time
'''
return datetime.utcfromtimestamp(to_long(bytestring)).isoformat()
def to_hex(bytestring):
'''
for displaying bytes in hexadecimal
the str() and .decode() stuff is necessary to get an unadorned string
in both Python2 and Python3
to_hex('\x01\xff')
'ff01'
'''
logging.debug('to_hex bytestring: %r', bytestring)
return str(binascii.b2a_hex(bytestring).decode('utf8'))
def get_hash(bytestring, repeat=2):
'''
return sha256 hash digest of bytestring
default is to return hash of hash; for simple hash, pass `repeat=1`
'''
for iteration in range(repeat):
bytestring = hashlib.sha256(bytestring).digest()
return bytestring
def show_hash(bytestring):
'''
return a sha256 hash, or any other bytestring, reversed and hexlified
'''
return to_hex(bytestring[::-1])
def parse_transactions(data):
'''
return parsed transaction length and transactions
'''
transactions = []
rawcount, count, data = get_count(data)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
transactions.append(raw_transaction)
return count, data
def next_transaction(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
iterates over each transaction in every input block
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
for height, header, transactions in blocks:
rawcount, count, data = get_count(transactions)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
txhash = get_hash(raw_transaction)
yield height, txhash, transaction
class Node(object):
'''
tree node
'''
def __init__(self, parent=None, blockhash=None, blocktime=''):
self.parent = parent
self.blockhash = blockhash
self.blocktime = blocktime
def countback(self, searchblock=NULLBLOCK):
r'''
return list of nodes that ends with this block
if attempting to get "height", caller is responsible to zero-base
the result, counting the genesis block as height 0
>>> node = Node(None, NULLBLOCK) # not a real node
>>> node = Node(node, b'\0') # height 0, genesis block
>>> node = Node(node, b'\1') # height 1
>>> node = Node(node, b'\2') # height 2
>>> len(node.countback())
3
>>> len(node.countback(b'\0'))
2
>>> try:
... node.countback(None)
... except AttributeError:
... print('failed')
failed
'''
traversed = [self]
parent = self.parent
while parent.blockhash != searchblock:
#logging.debug('parent.blockhash: %s', show_hash(parent.blockhash))
traversed.insert(0, parent)
parent = parent.parent
return traversed
def __str__(self):
return "{'Node': {'hash': '%s', 'timestamp': '%s'}}" % (
show_hash(self.blockhash),
self.blocktime)
__repr__ = __str__
def reorder(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
removes orphan blocks and corrects height
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
lastnode = Node(None, NULLBLOCK)
chains = [[lastnode]]
logging.debug('chains: %s', chains)
chain = 0
for height, header, transactions in blocks:
parsed = parse_blockheader(header)
previous, blockhash = parsed[1], parsed[6]
blocktime = timestamp(parsed[3])
if previous != lastnode.blockhash:
logging.warning('reorder at block %s',
Node(None, blockhash, blocktime))
logging.debug('previous block should be: %s', show_hash(previous))
logging.info('lastnode: %s', lastnode)
found, count = None, 0
try:
logging.debug('assuming previous block in this same chain')
nodes = lastnode.countback(previous)
found = nodes[0].parent
logging.info('reorder found %s %d blocks back',
found, len(nodes) + 1)
chain = len(chains)
chains.append([])
except AttributeError:
logging.debug('searching other chains')
for chain in reversed(chains):
node = chain[-1]
if node.blockhash == previous:
logging.info('reorder found %s at end of another chain',
found)
found = node
chain = chains.index(chain)
for chain in reversed(chains):
found = ([node for node in chain
if node.blockhash == previous] + [None])[0]
if found is not None:
logging.info('reorder found %s in another chain',
found)
chain = len(chains)
chains.append([])
break
if found is None:
raise ValueError('Previous block %s not found', previous)
else:
lastnode = found
# sanity check on above programming
assert_true(previous == lastnode.blockhash)
node = Node(lastnode, blockhash, blocktime)
chains[chain].append(node)
logging.info('current chain: %d out of %d', chain, len(chains))
lastnode = node
nodes = chains[chain][-1].countback()
logging.info('final [real] height: %d out of %d', len(nodes) - 1, height)
print(nodes)
def parse_transaction(data):
'''
return parsed transaction
'''
version = data[:4]
raw_transaction = version
logging.info('transaction version: %s', show_long(version))
raw_in_count, in_count, data = get_count(data[4:])
logging.info('number of transaction inputs: %d', in_count)
raw_inputs, inputs, data = parse_inputs(in_count, data)
logging.debug('length of data after parse_inputs: %d', len(data))
raw_out_count, out_count, data = get_count(data)
logging.info('number of transaction outputs: %d', out_count)
raw_outputs, outputs, data = parse_outputs(out_count, data)
logging.debug('length of data after parse_outputs: %d', len(data))
raw_transaction += (raw_in_count + b''.join(raw_inputs) +
raw_out_count + b''.join(raw_outputs))
lock_time, data = data[:4], data[4:]
raw_transaction += lock_time
logging.info('lock time: %s', to_hex(lock_time))
logging.debug('raw transaction (%d bytes): %s',
len(raw_transaction), to_hex(raw_transaction))
transaction = [version, raw_in_count, inputs, raw_out_count,
outputs, lock_time]
logging.debug('raw transaction split: %s', transaction)
logging.info('transaction hash: %s', show_hash(get_hash(raw_transaction)))
return raw_transaction, transaction, data
def parse_inputs(count, data):
'''
return transaction inputs
'''
raw_inputs = []
inputs = []
for index in range(count):
logging.debug('parse_inputs: len(data): %d', len(data))
tx_input, input_split, data = parse_input(data)
raw_inputs.append(tx_input)
inputs.append(input_split)
return raw_inputs, inputs, data
def parse_outputs(count, data):
'''
return transaction outputs
'''
raw_outputs = []
outputs = []
for index in range(count):
tx_output, output_split, data = parse_output(data)
raw_outputs.append(tx_output)
outputs.append(output_split)
return raw_outputs, outputs, data
def parse_input(data):
'''
parse and return a single transaction input
'''
logging.debug('parse_input: len(data): %d', len(data))
previous_hash = data[:32]
logging.info('txin previous txout hash: %s', show_hash(previous_hash))
previous_index = data[32:36]
raw_input = data[:36]
logging.info('txin previous txout index: %s', show_long(previous_index))
raw_length, script_length, data = get_count(data[36:])
raw_input += raw_length
logging.debug('script_length: %d', script_length)
script, data = data[:script_length], data[script_length:]
raw_input += script
logging.info('txin script: %r', script)
sequence = data[:4]
logging.info('txin sequence number: %s', show_long(sequence))
raw_input += sequence
split_input = [previous_hash, previous_index, raw_length, script, sequence]
return raw_input, split_input, data[4:]
def parse_output(data):
'''
parse and return a single transaction output
'''
logging.debug('first part of output: %s', to_hex(data[:256]))
raw_output = raw_amount = data[:8]
value = to_long(raw_amount)
logging.info('txout value: %.8f', value / 100000000)
# script probably broken if amount is very high
if __debug__ and value > 100000000000000:
raise ValueError('Unusual value, is script broken?')
raw_length, script_length, data = get_count(data[8:])
script, data = data[:script_length], data[script_length:]
logging.info('txout script: %r', script)
raw_output += raw_length + script
output = [raw_amount, raw_length, script]
return raw_output, output, data
def get_count(data):
r'''
extract and decode VarInt count and return it with remainder of data
# the following failed (got 253) before VARINT dict was corrected
>>> get_count(b'\xfd@\x01\x04\xe3v@\x05\x99')[1]
320
>>> get_count(b'\xfdP\x01D\x87\x1c\x00\x00\x00')[1]
336
'''
logging.debug('get_count: next 9 data bytes: %r', data[:9])
packing, offset, length = VARINT.get(data[0], ('B', 0, 1))
logging.debug('packing: %s, offset: %d, length: %d',
packing, offset, length)
count = struct.unpack(packing, data[offset:offset + length])[0]
raw_count, data = data[:offset + length], data[offset + length:]
logging.debug('length of data after get_count: %d', len(data))
return raw_count, count, data
def varint_length(data):
r'''
create new VarInt count of raw data
>>> repr(varint_length('\0' * 512)).endswith("'\\xfd\\x00\\x02'")
True
'''
length = len(data)
if length < 0xfd:
return bytes([length])
elif length <= 0xffff:
return b'\xfd' + struct.pack('<H', length)
elif length <= 0xffffffff:
return b'\xfe' + struct.pack('<L', length)
else: # will throw struct.error if above quad range
return b'\xff' + struct.pack('<Q', length)
# make sure assertions work even if optimized
try:
assert 1 == 0 # check if running optimized
# the above would have raised an AssertionError if not
def assert_true(statement):
if not statement:
raise AssertionError
except AssertionError:
def assert_true(statement):
assert(statement)
if __name__ == '__main__':
blockparse = parse
COMMAND = os.path.splitext(os.path.split(sys.argv[0])[1])[0]
BLOCKFILES = [sys.argv[1]] if len(sys.argv) > 1 else DEFAULT
eval(COMMAND)(BLOCKFILES, *sys.argv[2:])
| logging.debug('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.debug('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.debug('block size: %d', blocksize)
logging.debug('block header: %r', blockheader)
logging.debug('transactions (partial): %r', transactions[:80])
yield (height, blockheader, transactions) | conditional_block |
blockparse.py | #!/usr/bin/python3 -OO
'''
writing parser.cpp replacement in Python3
using ideas and code from
http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html,
http://www.righto.com/2014/02/bitcoin-mining-hard-way-algorithms.html,
https://bitcoin.org/en/developer-guide,
https://bitcoin.org/en/developer-reference,
and many other sources.
it won't work the same but has the same general purpose, to present block
files in a readable format.
'''
from __future__ import division, print_function
import sys, os, struct, binascii, logging, hashlib, re, time
from datetime import datetime
from glob import glob
# some Python3 to Python2 mappings
if bytes([65]) != b'A': # python2
class bytes(str):
def __new__(cls, initial=''):
if type(initial) == list:
joined = ''.join(map(chr, initial))
return super(bytes, cls).__new__(cls, joined)
else:
return super(bytes, cls).__new__(cls, initial)
def __repr__(self):
return 'b' + super(bytes, self).__repr__()
__str__ = __repr__
bytevalue = lambda byte: ord(byte)
bytevalues = lambda string: map(ord, string)
byte = chr
FileNotFoundError = IOError
else: # python3
bytevalue = lambda byte: byte
bytevalues = list
byte = lambda number: chr(number).encode('latin1')
LOGLEVEL = getattr(logging, os.getenv('LOGLEVEL', 'INFO'))
logging.getLogger().level=logging.DEBUG if __debug__ else LOGLEVEL
DEFAULT = sorted(glob(os.path.expanduser('~/.bitcoin/blocks/blk*.dat')))
MAGIC = {
'bitcoin': binascii.a2b_hex(b'F9BEB4D9'),
'dogecoin': binascii.a2b_hex(b'C0C0C0C0'),
'testnet': binascii.a2b_hex(b'FABFB5DA'),
'testnet3': binascii.a2b_hex(b'0B110907'),
'namecoin': binascii.a2b_hex(b'F9BEB4FE'),
'americancoin': binascii.a2b_hex(b'414D433A'),
}
VARINT = {
# struct format, offset, length
# remember in Python3 b'\xfd'[0] == 253
0xfd: ('<H', 1, 2),
0xfe: ('<L', 1, 4),
0xff: ('<Q', 1, 8),
}
# extend VARINT for Python2:
VARINT.update(dict((chr(n), l) for n, l in VARINT.items()))
UNPACKER = {
# fetch using len(bytestring)
1: 'B',
2: '<H',
4: '<L',
8: '<Q',
}
NULLBLOCK = b'\0' * 32 # pointed to by genesis block
def nextprefix(openfile):
'''
helper function for nextchunk
tries to read block prefix from an open file
'''
try:
prefix = openfile.read(8)
except AttributeError: # openfile is None
prefix = b''
return prefix
def nextchunk(blockfiles=None, minblock=0, maxblock=sys.maxsize, wait=True):
'''
generator that fetches and returns raw blocks out of blockfiles
with defaults, waits forever until terminated by signal
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
fileindex = 0
currentfile = None
done = False
while True:
prefix = nextprefix(currentfile)
if prefix == b'':
try:
newfile = open(blockfiles[fileindex], 'rb')
fileindex += 1
if fileindex == len(blockfiles):
blockfiles.append(nextfile(blockfiles[-1]))
currentfile = newfile
except FileNotFoundError:
if not wait:
logging.info('end of current data, not waiting')
done = True
else:
logging.debug('waiting for %s to come online',
blockfiles[fileindex])
time.sleep(10)
continue
if done:
raise StopIteration('No more blocks at this time')
else:
magic = prefix[:4]
blocksize = struct.unpack('<L', prefix[4:])[0]
logging.debug('yielding block of size %d', blocksize)
yield prefix + currentfile.read(blocksize)
def nextfile(filename):
'''
returns "next" filename in series from numbered files e.g. blk0001.dat
>>> nextfile('blk0001.dat')
'blk0002.dat'
>>> try: nextfile('blk.dat')
... except: pass
>>> nextfile('00041')
'00042'
'''
pattern = r'^(?P<prefix>[^0-9]*)(?P<number>[0-9]+)(?P<suffix>[^0-9]*)$'
directory, filename = os.path.split(filename)
try:
match = re.compile(pattern).match(filename).groupdict()
except AttributeError as match_failed:
raise ValueError('No numeric pattern found in {}'.format(filename))
newnumber = '{number:0{width}}'.format(
number=int(match['number']) + 1,
width=len(match['number']))
filename = match['prefix'] + newnumber + match['suffix']
return os.path.join(directory, filename) if directory else filename
def nextblock(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
generator that fetches and returns raw blocks out of blockfiles
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
for blockfile in blockfiles:
magic = ''
index = 0
logging.debug('blockfile "%s" of blockfiles %s', blockfile, blockfiles)
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.debug('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.debug('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.debug('block size: %d', blocksize)
logging.debug('block header: %r', blockheader)
logging.debug('transactions (partial): %r', transactions[:80])
yield (height, blockheader, transactions)
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
dump out block files
'''
minheight, maxheight = int(minblock), int(maxblock)
logging.debug('minheight: %d, maxheight: %d', minheight, maxheight)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
# if file was specified on commandline, make it into a list
for blockfile in blockfiles:
magic = ''
index = 0
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.info('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.info('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.info('block size: %d', blocksize)
logging.info('block header: %r', blockheader)
parse_blockheader(blockheader)
logging.info('transactions (partial): %r', transactions[:80])
count, data = parse_transactions(transactions)
logging.info('transaction count: %d', count)
logging.debug('remaining data (partial): %r', data[:80])
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse_blockheader(blockheader):
'''
return contents of block header
'''
version = blockheader[:4]
previous = blockheader[4:36]
merkle_root = blockheader[36:68]
unix_time = blockheader[68:72]
nbits = blockheader[72:76]
nonce = blockheader[76:]
blockhash = get_hash(blockheader)
if len(nonce) != 4:
raise ValueError('Nonce wrong size: %d bytes' % len(nonce))
logging.info('block version: %s', show_long(version))
logging.info('previous block hash: %s', show_hash(previous))
logging.info('merkle root: %s', show_hash(merkle_root))
logging.info('unix time: %s', timestamp(unix_time))
logging.info('nbits: %r', to_hex(nbits))
logging.info('nonce: %s', to_hex(nonce))
logging.info('block hash: %s', show_hash(blockhash))
return version, previous, merkle_root, unix_time, nbits, nonce, blockhash
def to_long(bytestring):
'''
for unpacking 8, 16, 32, or 64-bit number
'''
return struct.unpack(UNPACKER[(len(bytestring))], bytestring)[0]
def show_long(bytestring):
'''
for displaying 32-bit number
'''
number = to_long(bytestring)
return '0x%08x (%d)' % (number, number)
def timestamp(bytestring):
'''
for displaying 32-bit number as UTC time
'''
return datetime.utcfromtimestamp(to_long(bytestring)).isoformat()
def to_hex(bytestring):
'''
for displaying bytes in hexadecimal
the str() and .decode() stuff is necessary to get an unadorned string
in both Python2 and Python3
to_hex('\x01\xff')
'ff01'
'''
logging.debug('to_hex bytestring: %r', bytestring)
return str(binascii.b2a_hex(bytestring).decode('utf8'))
def get_hash(bytestring, repeat=2):
'''
return sha256 hash digest of bytestring
default is to return hash of hash; for simple hash, pass `repeat=1`
'''
for iteration in range(repeat):
bytestring = hashlib.sha256(bytestring).digest()
return bytestring
def show_hash(bytestring):
'''
return a sha256 hash, or any other bytestring, reversed and hexlified
'''
return to_hex(bytestring[::-1])
def parse_transactions(data):
'''
return parsed transaction length and transactions
'''
transactions = []
rawcount, count, data = get_count(data)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
transactions.append(raw_transaction)
return count, data
def next_transaction(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
iterates over each transaction in every input block
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
for height, header, transactions in blocks:
rawcount, count, data = get_count(transactions)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
txhash = get_hash(raw_transaction)
yield height, txhash, transaction
class Node(object):
'''
tree node
'''
def __init__(self, parent=None, blockhash=None, blocktime=''):
self.parent = parent
self.blockhash = blockhash
self.blocktime = blocktime
def countback(self, searchblock=NULLBLOCK):
r'''
return list of nodes that ends with this block
if attempting to get "height", caller is responsible to zero-base
the result, counting the genesis block as height 0
>>> node = Node(None, NULLBLOCK) # not a real node
>>> node = Node(node, b'\0') # height 0, genesis block
>>> node = Node(node, b'\1') # height 1
>>> node = Node(node, b'\2') # height 2
>>> len(node.countback())
3
>>> len(node.countback(b'\0'))
2
>>> try:
... node.countback(None)
... except AttributeError:
... print('failed')
failed
'''
traversed = [self]
parent = self.parent
while parent.blockhash != searchblock:
#logging.debug('parent.blockhash: %s', show_hash(parent.blockhash))
traversed.insert(0, parent)
parent = parent.parent
return traversed
def __str__(self):
return "{'Node': {'hash': '%s', 'timestamp': '%s'}}" % (
show_hash(self.blockhash),
self.blocktime)
__repr__ = __str__
def reorder(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
removes orphan blocks and corrects height
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
lastnode = Node(None, NULLBLOCK)
chains = [[lastnode]]
logging.debug('chains: %s', chains)
chain = 0
for height, header, transactions in blocks:
parsed = parse_blockheader(header)
previous, blockhash = parsed[1], parsed[6]
blocktime = timestamp(parsed[3])
if previous != lastnode.blockhash:
logging.warning('reorder at block %s',
Node(None, blockhash, blocktime))
logging.debug('previous block should be: %s', show_hash(previous))
logging.info('lastnode: %s', lastnode)
found, count = None, 0
try:
logging.debug('assuming previous block in this same chain')
nodes = lastnode.countback(previous)
found = nodes[0].parent
logging.info('reorder found %s %d blocks back',
found, len(nodes) + 1)
chain = len(chains)
chains.append([])
except AttributeError:
logging.debug('searching other chains')
for chain in reversed(chains):
node = chain[-1]
if node.blockhash == previous:
logging.info('reorder found %s at end of another chain',
found)
found = node
chain = chains.index(chain)
for chain in reversed(chains):
found = ([node for node in chain
if node.blockhash == previous] + [None])[0]
if found is not None:
logging.info('reorder found %s in another chain',
found)
chain = len(chains)
chains.append([])
break
if found is None:
raise ValueError('Previous block %s not found', previous)
else:
lastnode = found
# sanity check on above programming
assert_true(previous == lastnode.blockhash)
node = Node(lastnode, blockhash, blocktime)
chains[chain].append(node)
logging.info('current chain: %d out of %d', chain, len(chains))
lastnode = node
nodes = chains[chain][-1].countback()
logging.info('final [real] height: %d out of %d', len(nodes) - 1, height)
print(nodes)
def parse_transaction(data):
'''
return parsed transaction
'''
version = data[:4]
raw_transaction = version
logging.info('transaction version: %s', show_long(version))
raw_in_count, in_count, data = get_count(data[4:])
logging.info('number of transaction inputs: %d', in_count)
raw_inputs, inputs, data = parse_inputs(in_count, data)
logging.debug('length of data after parse_inputs: %d', len(data))
raw_out_count, out_count, data = get_count(data)
logging.info('number of transaction outputs: %d', out_count)
raw_outputs, outputs, data = parse_outputs(out_count, data)
logging.debug('length of data after parse_outputs: %d', len(data))
raw_transaction += (raw_in_count + b''.join(raw_inputs) +
raw_out_count + b''.join(raw_outputs))
lock_time, data = data[:4], data[4:]
raw_transaction += lock_time
logging.info('lock time: %s', to_hex(lock_time))
logging.debug('raw transaction (%d bytes): %s',
len(raw_transaction), to_hex(raw_transaction))
transaction = [version, raw_in_count, inputs, raw_out_count,
outputs, lock_time]
logging.debug('raw transaction split: %s', transaction)
logging.info('transaction hash: %s', show_hash(get_hash(raw_transaction)))
return raw_transaction, transaction, data
def parse_inputs(count, data):
'''
return transaction inputs
'''
raw_inputs = []
inputs = []
for index in range(count):
logging.debug('parse_inputs: len(data): %d', len(data))
tx_input, input_split, data = parse_input(data)
raw_inputs.append(tx_input)
inputs.append(input_split)
return raw_inputs, inputs, data
def | (count, data):
'''
return transaction outputs
'''
raw_outputs = []
outputs = []
for index in range(count):
tx_output, output_split, data = parse_output(data)
raw_outputs.append(tx_output)
outputs.append(output_split)
return raw_outputs, outputs, data
def parse_input(data):
'''
parse and return a single transaction input
'''
logging.debug('parse_input: len(data): %d', len(data))
previous_hash = data[:32]
logging.info('txin previous txout hash: %s', show_hash(previous_hash))
previous_index = data[32:36]
raw_input = data[:36]
logging.info('txin previous txout index: %s', show_long(previous_index))
raw_length, script_length, data = get_count(data[36:])
raw_input += raw_length
logging.debug('script_length: %d', script_length)
script, data = data[:script_length], data[script_length:]
raw_input += script
logging.info('txin script: %r', script)
sequence = data[:4]
logging.info('txin sequence number: %s', show_long(sequence))
raw_input += sequence
split_input = [previous_hash, previous_index, raw_length, script, sequence]
return raw_input, split_input, data[4:]
def parse_output(data):
'''
parse and return a single transaction output
'''
logging.debug('first part of output: %s', to_hex(data[:256]))
raw_output = raw_amount = data[:8]
value = to_long(raw_amount)
logging.info('txout value: %.8f', value / 100000000)
# script probably broken if amount is very high
if __debug__ and value > 100000000000000:
raise ValueError('Unusual value, is script broken?')
raw_length, script_length, data = get_count(data[8:])
script, data = data[:script_length], data[script_length:]
logging.info('txout script: %r', script)
raw_output += raw_length + script
output = [raw_amount, raw_length, script]
return raw_output, output, data
def get_count(data):
r'''
extract and decode VarInt count and return it with remainder of data
# the following failed (got 253) before VARINT dict was corrected
>>> get_count(b'\xfd@\x01\x04\xe3v@\x05\x99')[1]
320
>>> get_count(b'\xfdP\x01D\x87\x1c\x00\x00\x00')[1]
336
'''
logging.debug('get_count: next 9 data bytes: %r', data[:9])
packing, offset, length = VARINT.get(data[0], ('B', 0, 1))
logging.debug('packing: %s, offset: %d, length: %d',
packing, offset, length)
count = struct.unpack(packing, data[offset:offset + length])[0]
raw_count, data = data[:offset + length], data[offset + length:]
logging.debug('length of data after get_count: %d', len(data))
return raw_count, count, data
def varint_length(data):
r'''
create new VarInt count of raw data
>>> repr(varint_length('\0' * 512)).endswith("'\\xfd\\x00\\x02'")
True
'''
length = len(data)
if length < 0xfd:
return bytes([length])
elif length <= 0xffff:
return b'\xfd' + struct.pack('<H', length)
elif length <= 0xffffffff:
return b'\xfe' + struct.pack('<L', length)
else: # will throw struct.error if above quad range
return b'\xff' + struct.pack('<Q', length)
# make sure assertions work even if optimized
try:
assert 1 == 0 # check if running optimized
# the above would have raised an AssertionError if not
def assert_true(statement):
if not statement:
raise AssertionError
except AssertionError:
def assert_true(statement):
assert(statement)
if __name__ == '__main__':
blockparse = parse
COMMAND = os.path.splitext(os.path.split(sys.argv[0])[1])[0]
BLOCKFILES = [sys.argv[1]] if len(sys.argv) > 1 else DEFAULT
eval(COMMAND)(BLOCKFILES, *sys.argv[2:])
| parse_outputs | identifier_name |
blockparse.py | #!/usr/bin/python3 -OO
'''
writing parser.cpp replacement in Python3
using ideas and code from
http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html,
http://www.righto.com/2014/02/bitcoin-mining-hard-way-algorithms.html,
https://bitcoin.org/en/developer-guide,
https://bitcoin.org/en/developer-reference,
and many other sources.
it won't work the same but has the same general purpose, to present block
files in a readable format.
'''
from __future__ import division, print_function
import sys, os, struct, binascii, logging, hashlib, re, time
from datetime import datetime
from glob import glob
# some Python3 to Python2 mappings
if bytes([65]) != b'A': # python2
class bytes(str):
def __new__(cls, initial=''):
if type(initial) == list:
joined = ''.join(map(chr, initial))
return super(bytes, cls).__new__(cls, joined)
else:
return super(bytes, cls).__new__(cls, initial)
def __repr__(self):
|
__str__ = __repr__
bytevalue = lambda byte: ord(byte)
bytevalues = lambda string: map(ord, string)
byte = chr
FileNotFoundError = IOError
else: # python3
bytevalue = lambda byte: byte
bytevalues = list
byte = lambda number: chr(number).encode('latin1')
LOGLEVEL = getattr(logging, os.getenv('LOGLEVEL', 'INFO'))
logging.getLogger().level=logging.DEBUG if __debug__ else LOGLEVEL
DEFAULT = sorted(glob(os.path.expanduser('~/.bitcoin/blocks/blk*.dat')))
MAGIC = {
'bitcoin': binascii.a2b_hex(b'F9BEB4D9'),
'dogecoin': binascii.a2b_hex(b'C0C0C0C0'),
'testnet': binascii.a2b_hex(b'FABFB5DA'),
'testnet3': binascii.a2b_hex(b'0B110907'),
'namecoin': binascii.a2b_hex(b'F9BEB4FE'),
'americancoin': binascii.a2b_hex(b'414D433A'),
}
VARINT = {
# struct format, offset, length
# remember in Python3 b'\xfd'[0] == 253
0xfd: ('<H', 1, 2),
0xfe: ('<L', 1, 4),
0xff: ('<Q', 1, 8),
}
# extend VARINT for Python2:
VARINT.update(dict((chr(n), l) for n, l in VARINT.items()))
UNPACKER = {
# fetch using len(bytestring)
1: 'B',
2: '<H',
4: '<L',
8: '<Q',
}
NULLBLOCK = b'\0' * 32 # pointed to by genesis block
def nextprefix(openfile):
'''
helper function for nextchunk
tries to read block prefix from an open file
'''
try:
prefix = openfile.read(8)
except AttributeError: # openfile is None
prefix = b''
return prefix
def nextchunk(blockfiles=None, minblock=0, maxblock=sys.maxsize, wait=True):
'''
generator that fetches and returns raw blocks out of blockfiles
with defaults, waits forever until terminated by signal
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
fileindex = 0
currentfile = None
done = False
while True:
prefix = nextprefix(currentfile)
if prefix == b'':
try:
newfile = open(blockfiles[fileindex], 'rb')
fileindex += 1
if fileindex == len(blockfiles):
blockfiles.append(nextfile(blockfiles[-1]))
currentfile = newfile
except FileNotFoundError:
if not wait:
logging.info('end of current data, not waiting')
done = True
else:
logging.debug('waiting for %s to come online',
blockfiles[fileindex])
time.sleep(10)
continue
if done:
raise StopIteration('No more blocks at this time')
else:
magic = prefix[:4]
blocksize = struct.unpack('<L', prefix[4:])[0]
logging.debug('yielding block of size %d', blocksize)
yield prefix + currentfile.read(blocksize)
def nextfile(filename):
'''
returns "next" filename in series from numbered files e.g. blk0001.dat
>>> nextfile('blk0001.dat')
'blk0002.dat'
>>> try: nextfile('blk.dat')
... except: pass
>>> nextfile('00041')
'00042'
'''
pattern = r'^(?P<prefix>[^0-9]*)(?P<number>[0-9]+)(?P<suffix>[^0-9]*)$'
directory, filename = os.path.split(filename)
try:
match = re.compile(pattern).match(filename).groupdict()
except AttributeError as match_failed:
raise ValueError('No numeric pattern found in {}'.format(filename))
newnumber = '{number:0{width}}'.format(
number=int(match['number']) + 1,
width=len(match['number']))
filename = match['prefix'] + newnumber + match['suffix']
return os.path.join(directory, filename) if directory else filename
def nextblock(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
generator that fetches and returns raw blocks out of blockfiles
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
for blockfile in blockfiles:
magic = ''
index = 0
logging.debug('blockfile "%s" of blockfiles %s', blockfile, blockfiles)
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.debug('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.debug('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.debug('block size: %d', blocksize)
logging.debug('block header: %r', blockheader)
logging.debug('transactions (partial): %r', transactions[:80])
yield (height, blockheader, transactions)
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
dump out block files
'''
minheight, maxheight = int(minblock), int(maxblock)
logging.debug('minheight: %d, maxheight: %d', minheight, maxheight)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
# if file was specified on commandline, make it into a list
for blockfile in blockfiles:
magic = ''
index = 0
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.info('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.info('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.info('block size: %d', blocksize)
logging.info('block header: %r', blockheader)
parse_blockheader(blockheader)
logging.info('transactions (partial): %r', transactions[:80])
count, data = parse_transactions(transactions)
logging.info('transaction count: %d', count)
logging.debug('remaining data (partial): %r', data[:80])
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse_blockheader(blockheader):
'''
return contents of block header
'''
version = blockheader[:4]
previous = blockheader[4:36]
merkle_root = blockheader[36:68]
unix_time = blockheader[68:72]
nbits = blockheader[72:76]
nonce = blockheader[76:]
blockhash = get_hash(blockheader)
if len(nonce) != 4:
raise ValueError('Nonce wrong size: %d bytes' % len(nonce))
logging.info('block version: %s', show_long(version))
logging.info('previous block hash: %s', show_hash(previous))
logging.info('merkle root: %s', show_hash(merkle_root))
logging.info('unix time: %s', timestamp(unix_time))
logging.info('nbits: %r', to_hex(nbits))
logging.info('nonce: %s', to_hex(nonce))
logging.info('block hash: %s', show_hash(blockhash))
return version, previous, merkle_root, unix_time, nbits, nonce, blockhash
def to_long(bytestring):
'''
for unpacking 8, 16, 32, or 64-bit number
'''
return struct.unpack(UNPACKER[(len(bytestring))], bytestring)[0]
def show_long(bytestring):
'''
for displaying 32-bit number
'''
number = to_long(bytestring)
return '0x%08x (%d)' % (number, number)
def timestamp(bytestring):
'''
for displaying 32-bit number as UTC time
'''
return datetime.utcfromtimestamp(to_long(bytestring)).isoformat()
def to_hex(bytestring):
'''
for displaying bytes in hexadecimal
the str() and .decode() stuff is necessary to get an unadorned string
in both Python2 and Python3
to_hex('\x01\xff')
'ff01'
'''
logging.debug('to_hex bytestring: %r', bytestring)
return str(binascii.b2a_hex(bytestring).decode('utf8'))
def get_hash(bytestring, repeat=2):
'''
return sha256 hash digest of bytestring
default is to return hash of hash; for simple hash, pass `repeat=1`
'''
for iteration in range(repeat):
bytestring = hashlib.sha256(bytestring).digest()
return bytestring
def show_hash(bytestring):
'''
return a sha256 hash, or any other bytestring, reversed and hexlified
'''
return to_hex(bytestring[::-1])
def parse_transactions(data):
'''
return parsed transaction length and transactions
'''
transactions = []
rawcount, count, data = get_count(data)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
transactions.append(raw_transaction)
return count, data
def next_transaction(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
iterates over each transaction in every input block
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
for height, header, transactions in blocks:
rawcount, count, data = get_count(transactions)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
txhash = get_hash(raw_transaction)
yield height, txhash, transaction
class Node(object):
'''
tree node
'''
def __init__(self, parent=None, blockhash=None, blocktime=''):
self.parent = parent
self.blockhash = blockhash
self.blocktime = blocktime
def countback(self, searchblock=NULLBLOCK):
r'''
return list of nodes that ends with this block
if attempting to get "height", caller is responsible to zero-base
the result, counting the genesis block as height 0
>>> node = Node(None, NULLBLOCK) # not a real node
>>> node = Node(node, b'\0') # height 0, genesis block
>>> node = Node(node, b'\1') # height 1
>>> node = Node(node, b'\2') # height 2
>>> len(node.countback())
3
>>> len(node.countback(b'\0'))
2
>>> try:
... node.countback(None)
... except AttributeError:
... print('failed')
failed
'''
traversed = [self]
parent = self.parent
while parent.blockhash != searchblock:
#logging.debug('parent.blockhash: %s', show_hash(parent.blockhash))
traversed.insert(0, parent)
parent = parent.parent
return traversed
def __str__(self):
return "{'Node': {'hash': '%s', 'timestamp': '%s'}}" % (
show_hash(self.blockhash),
self.blocktime)
__repr__ = __str__
def reorder(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
removes orphan blocks and corrects height
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
lastnode = Node(None, NULLBLOCK)
chains = [[lastnode]]
logging.debug('chains: %s', chains)
chain = 0
for height, header, transactions in blocks:
parsed = parse_blockheader(header)
previous, blockhash = parsed[1], parsed[6]
blocktime = timestamp(parsed[3])
if previous != lastnode.blockhash:
logging.warning('reorder at block %s',
Node(None, blockhash, blocktime))
logging.debug('previous block should be: %s', show_hash(previous))
logging.info('lastnode: %s', lastnode)
found, count = None, 0
try:
logging.debug('assuming previous block in this same chain')
nodes = lastnode.countback(previous)
found = nodes[0].parent
logging.info('reorder found %s %d blocks back',
found, len(nodes) + 1)
chain = len(chains)
chains.append([])
except AttributeError:
logging.debug('searching other chains')
for chain in reversed(chains):
node = chain[-1]
if node.blockhash == previous:
logging.info('reorder found %s at end of another chain',
found)
found = node
chain = chains.index(chain)
for chain in reversed(chains):
found = ([node for node in chain
if node.blockhash == previous] + [None])[0]
if found is not None:
logging.info('reorder found %s in another chain',
found)
chain = len(chains)
chains.append([])
break
if found is None:
raise ValueError('Previous block %s not found', previous)
else:
lastnode = found
# sanity check on above programming
assert_true(previous == lastnode.blockhash)
node = Node(lastnode, blockhash, blocktime)
chains[chain].append(node)
logging.info('current chain: %d out of %d', chain, len(chains))
lastnode = node
nodes = chains[chain][-1].countback()
logging.info('final [real] height: %d out of %d', len(nodes) - 1, height)
print(nodes)
def parse_transaction(data):
'''
return parsed transaction
'''
version = data[:4]
raw_transaction = version
logging.info('transaction version: %s', show_long(version))
raw_in_count, in_count, data = get_count(data[4:])
logging.info('number of transaction inputs: %d', in_count)
raw_inputs, inputs, data = parse_inputs(in_count, data)
logging.debug('length of data after parse_inputs: %d', len(data))
raw_out_count, out_count, data = get_count(data)
logging.info('number of transaction outputs: %d', out_count)
raw_outputs, outputs, data = parse_outputs(out_count, data)
logging.debug('length of data after parse_outputs: %d', len(data))
raw_transaction += (raw_in_count + b''.join(raw_inputs) +
raw_out_count + b''.join(raw_outputs))
lock_time, data = data[:4], data[4:]
raw_transaction += lock_time
logging.info('lock time: %s', to_hex(lock_time))
logging.debug('raw transaction (%d bytes): %s',
len(raw_transaction), to_hex(raw_transaction))
transaction = [version, raw_in_count, inputs, raw_out_count,
outputs, lock_time]
logging.debug('raw transaction split: %s', transaction)
logging.info('transaction hash: %s', show_hash(get_hash(raw_transaction)))
return raw_transaction, transaction, data
def parse_inputs(count, data):
'''
return transaction inputs
'''
raw_inputs = []
inputs = []
for index in range(count):
logging.debug('parse_inputs: len(data): %d', len(data))
tx_input, input_split, data = parse_input(data)
raw_inputs.append(tx_input)
inputs.append(input_split)
return raw_inputs, inputs, data
def parse_outputs(count, data):
'''
return transaction outputs
'''
raw_outputs = []
outputs = []
for index in range(count):
tx_output, output_split, data = parse_output(data)
raw_outputs.append(tx_output)
outputs.append(output_split)
return raw_outputs, outputs, data
def parse_input(data):
'''
parse and return a single transaction input
'''
logging.debug('parse_input: len(data): %d', len(data))
previous_hash = data[:32]
logging.info('txin previous txout hash: %s', show_hash(previous_hash))
previous_index = data[32:36]
raw_input = data[:36]
logging.info('txin previous txout index: %s', show_long(previous_index))
raw_length, script_length, data = get_count(data[36:])
raw_input += raw_length
logging.debug('script_length: %d', script_length)
script, data = data[:script_length], data[script_length:]
raw_input += script
logging.info('txin script: %r', script)
sequence = data[:4]
logging.info('txin sequence number: %s', show_long(sequence))
raw_input += sequence
split_input = [previous_hash, previous_index, raw_length, script, sequence]
return raw_input, split_input, data[4:]
def parse_output(data):
'''
parse and return a single transaction output
'''
logging.debug('first part of output: %s', to_hex(data[:256]))
raw_output = raw_amount = data[:8]
value = to_long(raw_amount)
logging.info('txout value: %.8f', value / 100000000)
# script probably broken if amount is very high
if __debug__ and value > 100000000000000:
raise ValueError('Unusual value, is script broken?')
raw_length, script_length, data = get_count(data[8:])
script, data = data[:script_length], data[script_length:]
logging.info('txout script: %r', script)
raw_output += raw_length + script
output = [raw_amount, raw_length, script]
return raw_output, output, data
def get_count(data):
r'''
extract and decode VarInt count and return it with remainder of data
# the following failed (got 253) before VARINT dict was corrected
>>> get_count(b'\xfd@\x01\x04\xe3v@\x05\x99')[1]
320
>>> get_count(b'\xfdP\x01D\x87\x1c\x00\x00\x00')[1]
336
'''
logging.debug('get_count: next 9 data bytes: %r', data[:9])
packing, offset, length = VARINT.get(data[0], ('B', 0, 1))
logging.debug('packing: %s, offset: %d, length: %d',
packing, offset, length)
count = struct.unpack(packing, data[offset:offset + length])[0]
raw_count, data = data[:offset + length], data[offset + length:]
logging.debug('length of data after get_count: %d', len(data))
return raw_count, count, data
def varint_length(data):
r'''
create new VarInt count of raw data
>>> repr(varint_length('\0' * 512)).endswith("'\\xfd\\x00\\x02'")
True
'''
length = len(data)
if length < 0xfd:
return bytes([length])
elif length <= 0xffff:
return b'\xfd' + struct.pack('<H', length)
elif length <= 0xffffffff:
return b'\xfe' + struct.pack('<L', length)
else: # will throw struct.error if above quad range
return b'\xff' + struct.pack('<Q', length)
# make sure assertions work even if optimized
try:
assert 1 == 0 # check if running optimized
# the above would have raised an AssertionError if not
def assert_true(statement):
if not statement:
raise AssertionError
except AssertionError:
def assert_true(statement):
assert(statement)
if __name__ == '__main__':
blockparse = parse
COMMAND = os.path.splitext(os.path.split(sys.argv[0])[1])[0]
BLOCKFILES = [sys.argv[1]] if len(sys.argv) > 1 else DEFAULT
eval(COMMAND)(BLOCKFILES, *sys.argv[2:])
| return 'b' + super(bytes, self).__repr__() | identifier_body |
blockparse.py | #!/usr/bin/python3 -OO
'''
writing parser.cpp replacement in Python3
using ideas and code from
http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html,
http://www.righto.com/2014/02/bitcoin-mining-hard-way-algorithms.html,
https://bitcoin.org/en/developer-guide,
https://bitcoin.org/en/developer-reference,
and many other sources.
it won't work the same but has the same general purpose, to present block
files in a readable format.
'''
from __future__ import division, print_function
import sys, os, struct, binascii, logging, hashlib, re, time
from datetime import datetime
from glob import glob
# some Python3 to Python2 mappings
if bytes([65]) != b'A': # python2
class bytes(str):
def __new__(cls, initial=''):
if type(initial) == list:
joined = ''.join(map(chr, initial))
return super(bytes, cls).__new__(cls, joined)
else:
return super(bytes, cls).__new__(cls, initial)
def __repr__(self):
return 'b' + super(bytes, self).__repr__()
__str__ = __repr__
bytevalue = lambda byte: ord(byte)
bytevalues = lambda string: map(ord, string)
byte = chr
FileNotFoundError = IOError
else: # python3
bytevalue = lambda byte: byte
bytevalues = list
byte = lambda number: chr(number).encode('latin1')
LOGLEVEL = getattr(logging, os.getenv('LOGLEVEL', 'INFO'))
logging.getLogger().level=logging.DEBUG if __debug__ else LOGLEVEL
DEFAULT = sorted(glob(os.path.expanduser('~/.bitcoin/blocks/blk*.dat')))
MAGIC = {
'bitcoin': binascii.a2b_hex(b'F9BEB4D9'),
'dogecoin': binascii.a2b_hex(b'C0C0C0C0'),
'testnet': binascii.a2b_hex(b'FABFB5DA'),
'testnet3': binascii.a2b_hex(b'0B110907'),
'namecoin': binascii.a2b_hex(b'F9BEB4FE'),
'americancoin': binascii.a2b_hex(b'414D433A'),
}
VARINT = {
# struct format, offset, length
# remember in Python3 b'\xfd'[0] == 253
0xfd: ('<H', 1, 2),
0xfe: ('<L', 1, 4),
0xff: ('<Q', 1, 8),
}
# extend VARINT for Python2:
VARINT.update(dict((chr(n), l) for n, l in VARINT.items()))
UNPACKER = {
# fetch using len(bytestring)
1: 'B',
2: '<H',
4: '<L',
8: '<Q',
}
NULLBLOCK = b'\0' * 32 # pointed to by genesis block
def nextprefix(openfile):
'''
helper function for nextchunk
tries to read block prefix from an open file
'''
try:
prefix = openfile.read(8)
except AttributeError: # openfile is None
prefix = b''
return prefix
def nextchunk(blockfiles=None, minblock=0, maxblock=sys.maxsize, wait=True):
'''
generator that fetches and returns raw blocks out of blockfiles
with defaults, waits forever until terminated by signal
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
fileindex = 0
currentfile = None
done = False
while True:
prefix = nextprefix(currentfile)
if prefix == b'':
try:
newfile = open(blockfiles[fileindex], 'rb')
fileindex += 1
if fileindex == len(blockfiles):
blockfiles.append(nextfile(blockfiles[-1]))
currentfile = newfile
except FileNotFoundError:
if not wait:
logging.info('end of current data, not waiting')
done = True
else:
logging.debug('waiting for %s to come online',
blockfiles[fileindex])
time.sleep(10)
continue
if done:
raise StopIteration('No more blocks at this time')
else:
magic = prefix[:4]
blocksize = struct.unpack('<L', prefix[4:])[0]
logging.debug('yielding block of size %d', blocksize)
yield prefix + currentfile.read(blocksize)
def nextfile(filename):
'''
returns "next" filename in series from numbered files e.g. blk0001.dat
>>> nextfile('blk0001.dat')
'blk0002.dat'
>>> try: nextfile('blk.dat')
... except: pass
>>> nextfile('00041')
'00042'
''' | try:
match = re.compile(pattern).match(filename).groupdict()
except AttributeError as match_failed:
raise ValueError('No numeric pattern found in {}'.format(filename))
newnumber = '{number:0{width}}'.format(
number=int(match['number']) + 1,
width=len(match['number']))
filename = match['prefix'] + newnumber + match['suffix']
return os.path.join(directory, filename) if directory else filename
def nextblock(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
generator that fetches and returns raw blocks out of blockfiles
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
for blockfile in blockfiles:
magic = ''
index = 0
logging.debug('blockfile "%s" of blockfiles %s', blockfile, blockfiles)
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.debug('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.debug('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.debug('block size: %d', blocksize)
logging.debug('block header: %r', blockheader)
logging.debug('transactions (partial): %r', transactions[:80])
yield (height, blockheader, transactions)
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
dump out block files
'''
minheight, maxheight = int(minblock), int(maxblock)
logging.debug('minheight: %d, maxheight: %d', minheight, maxheight)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
# if file was specified on commandline, make it into a list
for blockfile in blockfiles:
magic = ''
index = 0
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.info('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.info('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.info('block size: %d', blocksize)
logging.info('block header: %r', blockheader)
parse_blockheader(blockheader)
logging.info('transactions (partial): %r', transactions[:80])
count, data = parse_transactions(transactions)
logging.info('transaction count: %d', count)
logging.debug('remaining data (partial): %r', data[:80])
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse_blockheader(blockheader):
'''
return contents of block header
'''
version = blockheader[:4]
previous = blockheader[4:36]
merkle_root = blockheader[36:68]
unix_time = blockheader[68:72]
nbits = blockheader[72:76]
nonce = blockheader[76:]
blockhash = get_hash(blockheader)
if len(nonce) != 4:
raise ValueError('Nonce wrong size: %d bytes' % len(nonce))
logging.info('block version: %s', show_long(version))
logging.info('previous block hash: %s', show_hash(previous))
logging.info('merkle root: %s', show_hash(merkle_root))
logging.info('unix time: %s', timestamp(unix_time))
logging.info('nbits: %r', to_hex(nbits))
logging.info('nonce: %s', to_hex(nonce))
logging.info('block hash: %s', show_hash(blockhash))
return version, previous, merkle_root, unix_time, nbits, nonce, blockhash
def to_long(bytestring):
'''
for unpacking 8, 16, 32, or 64-bit number
'''
return struct.unpack(UNPACKER[(len(bytestring))], bytestring)[0]
def show_long(bytestring):
'''
for displaying 32-bit number
'''
number = to_long(bytestring)
return '0x%08x (%d)' % (number, number)
def timestamp(bytestring):
'''
for displaying 32-bit number as UTC time
'''
return datetime.utcfromtimestamp(to_long(bytestring)).isoformat()
def to_hex(bytestring):
'''
for displaying bytes in hexadecimal
the str() and .decode() stuff is necessary to get an unadorned string
in both Python2 and Python3
to_hex('\x01\xff')
'ff01'
'''
logging.debug('to_hex bytestring: %r', bytestring)
return str(binascii.b2a_hex(bytestring).decode('utf8'))
def get_hash(bytestring, repeat=2):
'''
return sha256 hash digest of bytestring
default is to return hash of hash; for simple hash, pass `repeat=1`
'''
for iteration in range(repeat):
bytestring = hashlib.sha256(bytestring).digest()
return bytestring
def show_hash(bytestring):
'''
return a sha256 hash, or any other bytestring, reversed and hexlified
'''
return to_hex(bytestring[::-1])
def parse_transactions(data):
'''
return parsed transaction length and transactions
'''
transactions = []
rawcount, count, data = get_count(data)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
transactions.append(raw_transaction)
return count, data
def next_transaction(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
iterates over each transaction in every input block
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
for height, header, transactions in blocks:
rawcount, count, data = get_count(transactions)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
txhash = get_hash(raw_transaction)
yield height, txhash, transaction
class Node(object):
'''
tree node
'''
def __init__(self, parent=None, blockhash=None, blocktime=''):
self.parent = parent
self.blockhash = blockhash
self.blocktime = blocktime
def countback(self, searchblock=NULLBLOCK):
r'''
return list of nodes that ends with this block
if attempting to get "height", caller is responsible to zero-base
the result, counting the genesis block as height 0
>>> node = Node(None, NULLBLOCK) # not a real node
>>> node = Node(node, b'\0') # height 0, genesis block
>>> node = Node(node, b'\1') # height 1
>>> node = Node(node, b'\2') # height 2
>>> len(node.countback())
3
>>> len(node.countback(b'\0'))
2
>>> try:
... node.countback(None)
... except AttributeError:
... print('failed')
failed
'''
traversed = [self]
parent = self.parent
while parent.blockhash != searchblock:
#logging.debug('parent.blockhash: %s', show_hash(parent.blockhash))
traversed.insert(0, parent)
parent = parent.parent
return traversed
def __str__(self):
return "{'Node': {'hash': '%s', 'timestamp': '%s'}}" % (
show_hash(self.blockhash),
self.blocktime)
__repr__ = __str__
def reorder(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
removes orphan blocks and corrects height
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
lastnode = Node(None, NULLBLOCK)
chains = [[lastnode]]
logging.debug('chains: %s', chains)
chain = 0
for height, header, transactions in blocks:
parsed = parse_blockheader(header)
previous, blockhash = parsed[1], parsed[6]
blocktime = timestamp(parsed[3])
if previous != lastnode.blockhash:
logging.warning('reorder at block %s',
Node(None, blockhash, blocktime))
logging.debug('previous block should be: %s', show_hash(previous))
logging.info('lastnode: %s', lastnode)
found, count = None, 0
try:
logging.debug('assuming previous block in this same chain')
nodes = lastnode.countback(previous)
found = nodes[0].parent
logging.info('reorder found %s %d blocks back',
found, len(nodes) + 1)
chain = len(chains)
chains.append([])
except AttributeError:
logging.debug('searching other chains')
for chain in reversed(chains):
node = chain[-1]
if node.blockhash == previous:
logging.info('reorder found %s at end of another chain',
found)
found = node
chain = chains.index(chain)
for chain in reversed(chains):
found = ([node for node in chain
if node.blockhash == previous] + [None])[0]
if found is not None:
logging.info('reorder found %s in another chain',
found)
chain = len(chains)
chains.append([])
break
if found is None:
raise ValueError('Previous block %s not found', previous)
else:
lastnode = found
# sanity check on above programming
assert_true(previous == lastnode.blockhash)
node = Node(lastnode, blockhash, blocktime)
chains[chain].append(node)
logging.info('current chain: %d out of %d', chain, len(chains))
lastnode = node
nodes = chains[chain][-1].countback()
logging.info('final [real] height: %d out of %d', len(nodes) - 1, height)
print(nodes)
def parse_transaction(data):
'''
return parsed transaction
'''
version = data[:4]
raw_transaction = version
logging.info('transaction version: %s', show_long(version))
raw_in_count, in_count, data = get_count(data[4:])
logging.info('number of transaction inputs: %d', in_count)
raw_inputs, inputs, data = parse_inputs(in_count, data)
logging.debug('length of data after parse_inputs: %d', len(data))
raw_out_count, out_count, data = get_count(data)
logging.info('number of transaction outputs: %d', out_count)
raw_outputs, outputs, data = parse_outputs(out_count, data)
logging.debug('length of data after parse_outputs: %d', len(data))
raw_transaction += (raw_in_count + b''.join(raw_inputs) +
raw_out_count + b''.join(raw_outputs))
lock_time, data = data[:4], data[4:]
raw_transaction += lock_time
logging.info('lock time: %s', to_hex(lock_time))
logging.debug('raw transaction (%d bytes): %s',
len(raw_transaction), to_hex(raw_transaction))
transaction = [version, raw_in_count, inputs, raw_out_count,
outputs, lock_time]
logging.debug('raw transaction split: %s', transaction)
logging.info('transaction hash: %s', show_hash(get_hash(raw_transaction)))
return raw_transaction, transaction, data
def parse_inputs(count, data):
'''
return transaction inputs
'''
raw_inputs = []
inputs = []
for index in range(count):
logging.debug('parse_inputs: len(data): %d', len(data))
tx_input, input_split, data = parse_input(data)
raw_inputs.append(tx_input)
inputs.append(input_split)
return raw_inputs, inputs, data
def parse_outputs(count, data):
'''
return transaction outputs
'''
raw_outputs = []
outputs = []
for index in range(count):
tx_output, output_split, data = parse_output(data)
raw_outputs.append(tx_output)
outputs.append(output_split)
return raw_outputs, outputs, data
def parse_input(data):
'''
parse and return a single transaction input
'''
logging.debug('parse_input: len(data): %d', len(data))
previous_hash = data[:32]
logging.info('txin previous txout hash: %s', show_hash(previous_hash))
previous_index = data[32:36]
raw_input = data[:36]
logging.info('txin previous txout index: %s', show_long(previous_index))
raw_length, script_length, data = get_count(data[36:])
raw_input += raw_length
logging.debug('script_length: %d', script_length)
script, data = data[:script_length], data[script_length:]
raw_input += script
logging.info('txin script: %r', script)
sequence = data[:4]
logging.info('txin sequence number: %s', show_long(sequence))
raw_input += sequence
split_input = [previous_hash, previous_index, raw_length, script, sequence]
return raw_input, split_input, data[4:]
def parse_output(data):
'''
parse and return a single transaction output
'''
logging.debug('first part of output: %s', to_hex(data[:256]))
raw_output = raw_amount = data[:8]
value = to_long(raw_amount)
logging.info('txout value: %.8f', value / 100000000)
# script probably broken if amount is very high
if __debug__ and value > 100000000000000:
raise ValueError('Unusual value, is script broken?')
raw_length, script_length, data = get_count(data[8:])
script, data = data[:script_length], data[script_length:]
logging.info('txout script: %r', script)
raw_output += raw_length + script
output = [raw_amount, raw_length, script]
return raw_output, output, data
def get_count(data):
r'''
extract and decode VarInt count and return it with remainder of data
# the following failed (got 253) before VARINT dict was corrected
>>> get_count(b'\xfd@\x01\x04\xe3v@\x05\x99')[1]
320
>>> get_count(b'\xfdP\x01D\x87\x1c\x00\x00\x00')[1]
336
'''
logging.debug('get_count: next 9 data bytes: %r', data[:9])
packing, offset, length = VARINT.get(data[0], ('B', 0, 1))
logging.debug('packing: %s, offset: %d, length: %d',
packing, offset, length)
count = struct.unpack(packing, data[offset:offset + length])[0]
raw_count, data = data[:offset + length], data[offset + length:]
logging.debug('length of data after get_count: %d', len(data))
return raw_count, count, data
def varint_length(data):
r'''
create new VarInt count of raw data
>>> repr(varint_length('\0' * 512)).endswith("'\\xfd\\x00\\x02'")
True
'''
length = len(data)
if length < 0xfd:
return bytes([length])
elif length <= 0xffff:
return b'\xfd' + struct.pack('<H', length)
elif length <= 0xffffffff:
return b'\xfe' + struct.pack('<L', length)
else: # will throw struct.error if above quad range
return b'\xff' + struct.pack('<Q', length)
# make sure assertions work even if optimized
try:
assert 1 == 0 # check if running optimized
# the above would have raised an AssertionError if not
def assert_true(statement):
if not statement:
raise AssertionError
except AssertionError:
def assert_true(statement):
assert(statement)
if __name__ == '__main__':
blockparse = parse
COMMAND = os.path.splitext(os.path.split(sys.argv[0])[1])[0]
BLOCKFILES = [sys.argv[1]] if len(sys.argv) > 1 else DEFAULT
eval(COMMAND)(BLOCKFILES, *sys.argv[2:]) | pattern = r'^(?P<prefix>[^0-9]*)(?P<number>[0-9]+)(?P<suffix>[^0-9]*)$'
directory, filename = os.path.split(filename) | random_line_split |
main.rs | /*
This tool is part of the WhiteboxTools geospatial analysis library.
Authors: Daniel Newman
Created: 22/06/2020
Last Modified: 22/06/2020
License: MIT
*/
use whitebox_raster::*;
use nalgebra::{Matrix5, RowVector5, Vector5};
use num_cpus;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::path;
use std::sync::mpsc;
use std::sync::Arc;
use std::thread;
use std::time::Instant;
use whitebox_common::utils::get_formatted_elapsed_time;
/// This tool is an implementation of the constrained quadratic regression algorithm
/// using a flexible window size described in Wood (1996). A quadratic surface is fit
/// to local areas of input DEM (`--dem`), defined by a filter size
/// (`--filter`) using least squares regression. Note that the model is constrained such
/// that it must pass through the cell at the center of the filter. This is accomplished
/// by representing all elevations relative to the center cell, and by making the equation
/// constant 0.
///
/// Surface derivatives are calculated from the coefficients of the local quadratic
/// surface once they are known. These include: Slope, Aspect, Profile convexity, Plan convexity,
/// Longitudinal curvature, Cross-sectional curvature, and Minimum profile convexity,
/// all as defined in Wood (1996). The goodness-of-fit (r-squared) of each local quadratic
/// model is also returned.
///
/// Due to the fact that large filter sizes require long processing times, and that
/// fitting the surface is the most time consuming part of the algorithm, all LSPs are
/// output every time this tool is run. The content of each output is described by the suffixes
/// of the output file names.
///
/// # Reference
/// Wood, J. (1996). The Geomorphological Characterisation of Digital Elevation Models. University
/// of Leicester.
///
/// # See Also
/// `Aspect`, `Slope`, `PlanCurvature`, `ProfileCurvature`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("local_quadratic_regression{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
local_quadratic_regression Help
This tool is an implementation of the constrained quadratic regression algorithm
using a flexible window size described in Wood (1996)
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
-d, --dem Name of the input DEM raster file.
-o, --output Name of the output raster file.
--filter Edge length of the filter kernel.
Input/output file names can be fully qualified, or can rely on the working directory contained in
the WhiteboxTools settings.json file.
Example Usage:
>> .*EXE_NAME run --dem=DEM.tif --output=out_ras.tif --filter=15
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"local_quadratic_regression v{} by Dr. John B. Lindsay (c) 2021.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("LocalQuadraticRegression") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if !working_directory.is_empty() && !working_directory.ends_with(&sep) {
working_directory += &sep;
}
let mut input_file = String::new();
let mut output_file = String::new();
let mut filter_size = 3usize;
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-d" || flag_val == "-dem" {
if keyval {
input_file = vec[1].to_string();
} else {
input_file = args[i + 1].to_string();
}
} else if flag_val == "-o" || flag_val == "-output" {
if keyval {
output_file = vec[1].to_string();
} else {
output_file = args[i + 1].to_string();
}
} else if flag_val == "-filter" {
if keyval {
filter_size = vec[1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
} else {
filter_size = args[i + 1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
}
}
}
if filter_size < 3 { filter_size = 3; }
// The filter dimensions must be odd numbers such that there is a middle pixel
if (filter_size as f64 / 2f64).floor() == (filter_size as f64 / 2f64) {
filter_size += 1;
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if configurations.verbose_mode {
println!("Reading data...")
};
let input = Arc::new(Raster::new(&input_file, "r")?);
let rows = input.configs.rows as isize;
let columns = input.configs.columns as isize;
let nodata = input.configs.nodata;
let resolution = input.configs.resolution_x; // assume square
let path_parts: Vec<&str> = output_file.rsplitn(2, ".").collect();
let mut outputs: [Raster; 8] = [
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Slp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Asp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "ProC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PlaC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "LonC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "CrsC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PrCM", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "GoF", &path_parts[0]), &input)
];
let start = Instant::now();
// no weights simplifies matrices
let offset = (filter_size - 1) / 2;
let num_cells = filter_size * filter_size;
// determine filter offsets
let mut dx = vec![0isize; num_cells];
let mut dy = vec![0isize; num_cells];
let mut idx = 0usize;
for i in 0..filter_size {
for j in 0..filter_size {
dx[idx] = (j - offset) as isize;
dy[idx] = (i - offset) as isize;
idx += 1;
}
}
let num_procs = num_cpus::get() as isize;
let (tx, rx) = mpsc::channel();
for tid in 0..num_procs {
let input = input.clone();
let dx = dx.clone();
let dy = dy.clone();
let tx = tx.clone();
// let a_decomp = a_decomp.clone();
thread::spawn(move || {
let mut z: f64;
let mut zi: f64;
for row in (0..rows).filter(|r| r % num_procs == tid) {
let mut slopes = vec![nodata; columns as usize];
let mut aspects = vec![nodata; columns as usize];
let mut prof_cs = vec![nodata; columns as usize];
let mut plan_cs = vec![nodata; columns as usize];
let mut long_cs = vec![nodata; columns as usize];
let mut cross_cs = vec![nodata; columns as usize];
let mut procmin_cs = vec![nodata; columns as usize];
let mut gofs = vec![nodata; columns as usize];
for col in 0..columns {
z = input[(row, col)];
if z != nodata {
let (mut zx2, mut zy2, mut zxy, mut zx, mut zy, mut _zw) = (0f64,0f64,0f64,0f64,0f64,0f64);
let (mut x2, mut x2y2, mut x4) = (0f64, 0f64, 0f64);
let mut num_valid = 0usize;
let (mut z_pred, mut z_act): (f64, f64);
let (mut sum_x, mut sum_y, mut sum_xy, mut sum_xx, mut sum_yy) = (0f64, 0f64, 0f64, 0f64, 0f64);
let (r, n): (f64, f64);
let mut xs = vec![];
let mut ys = vec![]; | for c in 0..num_cells {
zi = input[((row + dy[c] as isize), (col + dx[c] as isize))];
if zi != nodata {
xs.push(dx[c] as f64 * resolution);
ys.push(dy[c] as f64 * resolution);
zs.push(zi - z); // elevation relative to center
num_valid += 1;
}
}
if num_valid >= 8 {//6 { // need at least six samples
// compute sums
for i in 0..num_valid {
zx2 += zs[i] * xs[i].powi(2);
zy2 += zs[i] * ys[i].powi(2);
zxy += zs[i] * xs[i] * ys[i];
zx += zs[i] * xs[i];
zy += zs[i] * ys[i];
_zw += zs[i];
x2 += xs[i].powi(2);
x2y2 += xs[i].powi(2) * ys[i].powi(2);
x4 += xs[i].powi(4);
}
let a = Matrix5::from_rows(&[
RowVector5::new(x4, x2y2, 0f64, 0f64, 0f64),
RowVector5::new(x2y2, x4, 0f64, 0f64, 0f64),
RowVector5::new(0f64,0f64,x2y2, 0f64, 0f64),
RowVector5::new(0f64, 0f64, 0f64, x2, 0f64),
RowVector5::new(0f64, 0f64, 0f64, 0f64, x2),
]);
let b = Vector5::new(zx2, zy2, zxy, zx, zy);
let fitted_surface = Quadratic2d::from_normals_origin(a, b);
for i in 0..num_valid {
z_act = zs[i];
sum_x += z_act;
sum_xx += z_act * z_act;
z_pred = fitted_surface.solve(xs[i], ys[i]);
sum_y += z_pred;
sum_yy += z_pred * z_pred;
sum_xy += z_act * z_pred;
}
n = num_valid as f64;
let noom = n * sum_xy - (sum_x * sum_y);
let den = (n * sum_xx - (sum_x * sum_x)).sqrt() * ((n * sum_yy - (sum_y * sum_y)).sqrt());
if noom == 0f64 || den == 0f64 {
r = 0f64;
} else {
r = noom / den;
}
slopes[col as usize] = fitted_surface.slope();
aspects[col as usize] = fitted_surface.aspect();
prof_cs[col as usize] = fitted_surface.profile_convexity();
plan_cs[col as usize] = fitted_surface.plan_convexity();
long_cs[col as usize] = fitted_surface.longitudinal_curvature();
cross_cs[col as usize] = fitted_surface.cross_sectional_curvature();
procmin_cs[col as usize] = fitted_surface.min_prof_convexity();
gofs[col as usize] = r * r;
}
}
}
tx.send(
(row,
slopes,
aspects,
prof_cs,
plan_cs,
long_cs,
cross_cs,
procmin_cs,
gofs)
).unwrap();
}
});
}
for row in 0..rows {
let data = rx.recv().expect("Error receiving data from thread.");
outputs[0].set_row_data(data.0, data.1);
outputs[1].set_row_data(data.0, data.2);
outputs[2].set_row_data(data.0, data.3);
outputs[3].set_row_data(data.0, data.4);
outputs[4].set_row_data(data.0, data.5);
outputs[5].set_row_data(data.0, data.6);
outputs[6].set_row_data(data.0, data.7);
outputs[7].set_row_data(data.0, data.8);
if configurations.verbose_mode {
progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Performing analysis: {}%", progress);
old_progress = progress;
}
}
}
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!("Saving data...")
};
for o in 0..outputs.len() {
outputs[o].configs.palette = "grey.plt".to_string();
outputs[o].add_metadata_entry(format!(
"Created by whitebox_tools\' {} tool",
tool_name
));
outputs[o].add_metadata_entry(format!("Input file: {}", input_file));
outputs[o].add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time));
let _ = match outputs[o].write() {
Ok(_) => {
if configurations.verbose_mode {
println!("Output file {:?} written", o+1);
}
}
Err(e) => return Err(e),
};
}
if configurations.verbose_mode {
println!(
"{}",
&format!("Elapsed Time (excluding I/O): {}", elapsed_time)
);
}
Ok(())
}
// Equation of a 2d quadratic model:
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
#[derive(Default, Clone, Copy)]
struct Quadratic2d {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64
}
impl Quadratic2d {
fn new(a: f64, b: f64, c: f64, d: f64, e: f64, f: f64) -> Quadratic2d {
Quadratic2d {
a: a,
b: b,
c: c,
d: d,
e: e,
f: f
}
}
// solves a system of normal equations ax = b
// fn from_normal_equations(a: Matrix6<f64>, b: Vector6<f64>) -> Quadratic2d {
// let decomp = a.lu();
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution failed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn from_normals_origin(a: Matrix5<f64>, b: Vector5<f64>) -> Quadratic2d {
let decomp = a.lu();
if decomp.is_invertible() {
let x = decomp.solve(&b).expect("Linear resolution failed.");
Quadratic2d::new(
*x.get(0).unwrap(), // a
*x.get(1).unwrap(), // b
*x.get(2).unwrap(), // c
*x.get(3).unwrap(), // d
*x.get(4).unwrap(), // e
0f64, //f
)
} else {
Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
}
}
// fn from_decomposed_normals(
// decomp: LU<f64, nalgebra::base::dimension::U6, nalgebra::base::dimension::U6>,
// b: Vector6<f64>
// ) -> Quadratic2d {
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution fialed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn slope(&self) -> f64 {
// (self.a*self.a + self.b*self.b).sqrt().atan().to_degrees()
(self.d*self.d + self.e*self.e).sqrt().atan()
}
fn aspect(&self) -> f64 {
if self.e == 0f64 || self.d == 0f64 {
0f64
} else {
(self.e / self.d).atan()
}
}
fn profile_convexity(&self) -> f64 {
let nu = -200f64 * ((self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)) * (1f64 + (self.d*self.d) + (self.e*self.e)).powf(1.5);
if nu == 0f64 || de == 0f64 {
0f64
} else {
nu / de
}
}
fn plan_convexity(&self) -> f64 {
let nu = 200f64 * ((self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)).powf(1.5);
if nu == 0f64 || de == 0f64 {
0f64
} else {
nu / de
}
}
fn longitudinal_curvature(&self) -> f64 {
let nu = (self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
fn cross_sectional_curvature(&self) -> f64 {
let nu = (self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
// fn max_prof_convexity(&self) -> f64 {
// (self.a * -1f64) - self.b + ((self.a - self.b).powi(2) + (self.c * self.c)).sqrt()
// }
fn min_prof_convexity(&self) -> f64 {
(self.a * -1f64) - self.b - ((self.a - self.b).powi(2) + (self.c * self.c)).sqrt()
}
fn solve(&self, x: f64, y: f64) -> f64 {
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
return (self.a*(x*x)) + (self.b*(y*y)) + (self.c*(x*y)) + (self.d*x) + (self.e*y) + self.f
}
} | let mut zs = vec![];
| random_line_split |
main.rs | /*
This tool is part of the WhiteboxTools geospatial analysis library.
Authors: Daniel Newman
Created: 22/06/2020
Last Modified: 22/06/2020
License: MIT
*/
use whitebox_raster::*;
use nalgebra::{Matrix5, RowVector5, Vector5};
use num_cpus;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::path;
use std::sync::mpsc;
use std::sync::Arc;
use std::thread;
use std::time::Instant;
use whitebox_common::utils::get_formatted_elapsed_time;
/// This tool is an implementation of the constrained quadratic regression algorithm
/// using a flexible window size described in Wood (1996). A quadratic surface is fit
/// to local areas of input DEM (`--dem`), defined by a filter size
/// (`--filter`) using least squares regression. Note that the model is constrained such
/// that it must pass through the cell at the center of the filter. This is accomplished
/// by representing all elevations relative to the center cell, and by making the equation
/// constant 0.
///
/// Surface derivatives are calculated from the coefficients of the local quadratic
/// surface once they are known. These include: Slope, Aspect, Profile convexity, Plan convexity,
/// Longitudinal curvature, Cross-sectional curvature, and Minimum profile convexity,
/// all as defined in Wood (1996). The goodness-of-fit (r-squared) of each local quadratic
/// model is also returned.
///
/// Due to the fact that large filter sizes require long processing times, and that
/// fitting the surface is the most time consuming part of the algorithm, all LSPs are
/// output every time this tool is run. The content of each output is described by the suffixes
/// of the output file names.
///
/// # Reference
/// Wood, J. (1996). The Geomorphological Characterisation of Digital Elevation Models. University
/// of Leicester.
///
/// # See Also
/// `Aspect`, `Slope`, `PlanCurvature`, `ProfileCurvature`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("local_quadratic_regression{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
local_quadratic_regression Help
This tool is an implementation of the constrained quadratic regression algorithm
using a flexible window size described in Wood (1996)
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
-d, --dem Name of the input DEM raster file.
-o, --output Name of the output raster file.
--filter Edge length of the filter kernel.
Input/output file names can be fully qualified, or can rely on the working directory contained in
the WhiteboxTools settings.json file.
Example Usage:
>> .*EXE_NAME run --dem=DEM.tif --output=out_ras.tif --filter=15
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"local_quadratic_regression v{} by Dr. John B. Lindsay (c) 2021.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("LocalQuadraticRegression") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if !working_directory.is_empty() && !working_directory.ends_with(&sep) {
working_directory += &sep;
}
let mut input_file = String::new();
let mut output_file = String::new();
let mut filter_size = 3usize;
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-d" || flag_val == "-dem" {
if keyval {
input_file = vec[1].to_string();
} else {
input_file = args[i + 1].to_string();
}
} else if flag_val == "-o" || flag_val == "-output" {
if keyval {
output_file = vec[1].to_string();
} else {
output_file = args[i + 1].to_string();
}
} else if flag_val == "-filter" {
if keyval {
filter_size = vec[1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
} else {
filter_size = args[i + 1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
}
}
}
if filter_size < 3 { filter_size = 3; }
// The filter dimensions must be odd numbers such that there is a middle pixel
if (filter_size as f64 / 2f64).floor() == (filter_size as f64 / 2f64) {
filter_size += 1;
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if configurations.verbose_mode {
println!("Reading data...")
};
let input = Arc::new(Raster::new(&input_file, "r")?);
let rows = input.configs.rows as isize;
let columns = input.configs.columns as isize;
let nodata = input.configs.nodata;
let resolution = input.configs.resolution_x; // assume square
let path_parts: Vec<&str> = output_file.rsplitn(2, ".").collect();
let mut outputs: [Raster; 8] = [
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Slp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Asp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "ProC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PlaC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "LonC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "CrsC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PrCM", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "GoF", &path_parts[0]), &input)
];
let start = Instant::now();
// no weights simplifies matrices
let offset = (filter_size - 1) / 2;
let num_cells = filter_size * filter_size;
// determine filter offsets
let mut dx = vec![0isize; num_cells];
let mut dy = vec![0isize; num_cells];
let mut idx = 0usize;
for i in 0..filter_size {
for j in 0..filter_size {
dx[idx] = (j - offset) as isize;
dy[idx] = (i - offset) as isize;
idx += 1;
}
}
let num_procs = num_cpus::get() as isize;
let (tx, rx) = mpsc::channel();
for tid in 0..num_procs {
let input = input.clone();
let dx = dx.clone();
let dy = dy.clone();
let tx = tx.clone();
// let a_decomp = a_decomp.clone();
thread::spawn(move || {
let mut z: f64;
let mut zi: f64;
for row in (0..rows).filter(|r| r % num_procs == tid) {
let mut slopes = vec![nodata; columns as usize];
let mut aspects = vec![nodata; columns as usize];
let mut prof_cs = vec![nodata; columns as usize];
let mut plan_cs = vec![nodata; columns as usize];
let mut long_cs = vec![nodata; columns as usize];
let mut cross_cs = vec![nodata; columns as usize];
let mut procmin_cs = vec![nodata; columns as usize];
let mut gofs = vec![nodata; columns as usize];
for col in 0..columns {
z = input[(row, col)];
if z != nodata {
let (mut zx2, mut zy2, mut zxy, mut zx, mut zy, mut _zw) = (0f64,0f64,0f64,0f64,0f64,0f64);
let (mut x2, mut x2y2, mut x4) = (0f64, 0f64, 0f64);
let mut num_valid = 0usize;
let (mut z_pred, mut z_act): (f64, f64);
let (mut sum_x, mut sum_y, mut sum_xy, mut sum_xx, mut sum_yy) = (0f64, 0f64, 0f64, 0f64, 0f64);
let (r, n): (f64, f64);
let mut xs = vec![];
let mut ys = vec![];
let mut zs = vec![];
for c in 0..num_cells {
zi = input[((row + dy[c] as isize), (col + dx[c] as isize))];
if zi != nodata {
xs.push(dx[c] as f64 * resolution);
ys.push(dy[c] as f64 * resolution);
zs.push(zi - z); // elevation relative to center
num_valid += 1;
}
}
if num_valid >= 8 {//6 { // need at least six samples
// compute sums
for i in 0..num_valid {
zx2 += zs[i] * xs[i].powi(2);
zy2 += zs[i] * ys[i].powi(2);
zxy += zs[i] * xs[i] * ys[i];
zx += zs[i] * xs[i];
zy += zs[i] * ys[i];
_zw += zs[i];
x2 += xs[i].powi(2);
x2y2 += xs[i].powi(2) * ys[i].powi(2);
x4 += xs[i].powi(4);
}
let a = Matrix5::from_rows(&[
RowVector5::new(x4, x2y2, 0f64, 0f64, 0f64),
RowVector5::new(x2y2, x4, 0f64, 0f64, 0f64),
RowVector5::new(0f64,0f64,x2y2, 0f64, 0f64),
RowVector5::new(0f64, 0f64, 0f64, x2, 0f64),
RowVector5::new(0f64, 0f64, 0f64, 0f64, x2),
]);
let b = Vector5::new(zx2, zy2, zxy, zx, zy);
let fitted_surface = Quadratic2d::from_normals_origin(a, b);
for i in 0..num_valid {
z_act = zs[i];
sum_x += z_act;
sum_xx += z_act * z_act;
z_pred = fitted_surface.solve(xs[i], ys[i]);
sum_y += z_pred;
sum_yy += z_pred * z_pred;
sum_xy += z_act * z_pred;
}
n = num_valid as f64;
let noom = n * sum_xy - (sum_x * sum_y);
let den = (n * sum_xx - (sum_x * sum_x)).sqrt() * ((n * sum_yy - (sum_y * sum_y)).sqrt());
if noom == 0f64 || den == 0f64 {
r = 0f64;
} else {
r = noom / den;
}
slopes[col as usize] = fitted_surface.slope();
aspects[col as usize] = fitted_surface.aspect();
prof_cs[col as usize] = fitted_surface.profile_convexity();
plan_cs[col as usize] = fitted_surface.plan_convexity();
long_cs[col as usize] = fitted_surface.longitudinal_curvature();
cross_cs[col as usize] = fitted_surface.cross_sectional_curvature();
procmin_cs[col as usize] = fitted_surface.min_prof_convexity();
gofs[col as usize] = r * r;
}
}
}
tx.send(
(row,
slopes,
aspects,
prof_cs,
plan_cs,
long_cs,
cross_cs,
procmin_cs,
gofs)
).unwrap();
}
});
}
for row in 0..rows {
let data = rx.recv().expect("Error receiving data from thread.");
outputs[0].set_row_data(data.0, data.1);
outputs[1].set_row_data(data.0, data.2);
outputs[2].set_row_data(data.0, data.3);
outputs[3].set_row_data(data.0, data.4);
outputs[4].set_row_data(data.0, data.5);
outputs[5].set_row_data(data.0, data.6);
outputs[6].set_row_data(data.0, data.7);
outputs[7].set_row_data(data.0, data.8);
if configurations.verbose_mode {
progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Performing analysis: {}%", progress);
old_progress = progress;
}
}
}
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!("Saving data...")
};
for o in 0..outputs.len() {
outputs[o].configs.palette = "grey.plt".to_string();
outputs[o].add_metadata_entry(format!(
"Created by whitebox_tools\' {} tool",
tool_name
));
outputs[o].add_metadata_entry(format!("Input file: {}", input_file));
outputs[o].add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time));
let _ = match outputs[o].write() {
Ok(_) => {
if configurations.verbose_mode {
println!("Output file {:?} written", o+1);
}
}
Err(e) => return Err(e),
};
}
if configurations.verbose_mode {
println!(
"{}",
&format!("Elapsed Time (excluding I/O): {}", elapsed_time)
);
}
Ok(())
}
// Equation of a 2d quadratic model:
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
#[derive(Default, Clone, Copy)]
struct Quadratic2d {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64
}
impl Quadratic2d {
fn new(a: f64, b: f64, c: f64, d: f64, e: f64, f: f64) -> Quadratic2d {
Quadratic2d {
a: a,
b: b,
c: c,
d: d,
e: e,
f: f
}
}
// solves a system of normal equations ax = b
// fn from_normal_equations(a: Matrix6<f64>, b: Vector6<f64>) -> Quadratic2d {
// let decomp = a.lu();
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution failed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn from_normals_origin(a: Matrix5<f64>, b: Vector5<f64>) -> Quadratic2d {
let decomp = a.lu();
if decomp.is_invertible() {
let x = decomp.solve(&b).expect("Linear resolution failed.");
Quadratic2d::new(
*x.get(0).unwrap(), // a
*x.get(1).unwrap(), // b
*x.get(2).unwrap(), // c
*x.get(3).unwrap(), // d
*x.get(4).unwrap(), // e
0f64, //f
)
} else {
Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
}
}
// fn from_decomposed_normals(
// decomp: LU<f64, nalgebra::base::dimension::U6, nalgebra::base::dimension::U6>,
// b: Vector6<f64>
// ) -> Quadratic2d {
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution fialed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn slope(&self) -> f64 {
// (self.a*self.a + self.b*self.b).sqrt().atan().to_degrees()
(self.d*self.d + self.e*self.e).sqrt().atan()
}
fn aspect(&self) -> f64 {
if self.e == 0f64 || self.d == 0f64 {
0f64
} else {
(self.e / self.d).atan()
}
}
fn profile_convexity(&self) -> f64 {
let nu = -200f64 * ((self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)) * (1f64 + (self.d*self.d) + (self.e*self.e)).powf(1.5);
if nu == 0f64 || de == 0f64 | else {
nu / de
}
}
fn plan_convexity(&self) -> f64 {
let nu = 200f64 * ((self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)).powf(1.5);
if nu == 0f64 || de == 0f64 {
0f64
} else {
nu / de
}
}
fn longitudinal_curvature(&self) -> f64 {
let nu = (self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
fn cross_sectional_curvature(&self) -> f64 {
let nu = (self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
// fn max_prof_convexity(&self) -> f64 {
// (self.a * -1f64) - self.b + ((self.a - self.b).powi(2) + (self.c * self.c)).sqrt()
// }
fn min_prof_convexity(&self) -> f64 {
(self.a * -1f64) - self.b - ((self.a - self.b).powi(2) + (self.c * self.c)).sqrt()
}
fn solve(&self, x: f64, y: f64) -> f64 {
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
return (self.a*(x*x)) + (self.b*(y*y)) + (self.c*(x*y)) + (self.d*x) + (self.e*y) + self.f
}
}
| {
0f64
} | conditional_block |
main.rs | /*
This tool is part of the WhiteboxTools geospatial analysis library.
Authors: Daniel Newman
Created: 22/06/2020
Last Modified: 22/06/2020
License: MIT
*/
use whitebox_raster::*;
use nalgebra::{Matrix5, RowVector5, Vector5};
use num_cpus;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::path;
use std::sync::mpsc;
use std::sync::Arc;
use std::thread;
use std::time::Instant;
use whitebox_common::utils::get_formatted_elapsed_time;
/// This tool is an implementation of the constrained quadratic regression algorithm
/// using a flexible window size described in Wood (1996). A quadratic surface is fit
/// to local areas of input DEM (`--dem`), defined by a filter size
/// (`--filter`) using least squares regression. Note that the model is constrained such
/// that it must pass through the cell at the center of the filter. This is accomplished
/// by representing all elevations relative to the center cell, and by making the equation
/// constant 0.
///
/// Surface derivatives are calculated from the coefficients of the local quadratic
/// surface once they are known. These include: Slope, Aspect, Profile convexity, Plan convexity,
/// Longitudinal curvature, Cross-sectional curvature, and Minimum profile convexity,
/// all as defined in Wood (1996). The goodness-of-fit (r-squared) of each local quadratic
/// model is also returned.
///
/// Due to the fact that large filter sizes require long processing times, and that
/// fitting the surface is the most time consuming part of the algorithm, all LSPs are
/// output every time this tool is run. The content of each output is described by the suffixes
/// of the output file names.
///
/// # Reference
/// Wood, J. (1996). The Geomorphological Characterisation of Digital Elevation Models. University
/// of Leicester.
///
/// # See Also
/// `Aspect`, `Slope`, `PlanCurvature`, `ProfileCurvature`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("local_quadratic_regression{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
local_quadratic_regression Help
This tool is an implementation of the constrained quadratic regression algorithm
using a flexible window size described in Wood (1996)
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
-d, --dem Name of the input DEM raster file.
-o, --output Name of the output raster file.
--filter Edge length of the filter kernel.
Input/output file names can be fully qualified, or can rely on the working directory contained in
the WhiteboxTools settings.json file.
Example Usage:
>> .*EXE_NAME run --dem=DEM.tif --output=out_ras.tif --filter=15
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"local_quadratic_regression v{} by Dr. John B. Lindsay (c) 2021.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("LocalQuadraticRegression") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if !working_directory.is_empty() && !working_directory.ends_with(&sep) {
working_directory += &sep;
}
let mut input_file = String::new();
let mut output_file = String::new();
let mut filter_size = 3usize;
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-d" || flag_val == "-dem" {
if keyval {
input_file = vec[1].to_string();
} else {
input_file = args[i + 1].to_string();
}
} else if flag_val == "-o" || flag_val == "-output" {
if keyval {
output_file = vec[1].to_string();
} else {
output_file = args[i + 1].to_string();
}
} else if flag_val == "-filter" {
if keyval {
filter_size = vec[1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
} else {
filter_size = args[i + 1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
}
}
}
if filter_size < 3 { filter_size = 3; }
// The filter dimensions must be odd numbers such that there is a middle pixel
if (filter_size as f64 / 2f64).floor() == (filter_size as f64 / 2f64) {
filter_size += 1;
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if configurations.verbose_mode {
println!("Reading data...")
};
let input = Arc::new(Raster::new(&input_file, "r")?);
let rows = input.configs.rows as isize;
let columns = input.configs.columns as isize;
let nodata = input.configs.nodata;
let resolution = input.configs.resolution_x; // assume square
let path_parts: Vec<&str> = output_file.rsplitn(2, ".").collect();
let mut outputs: [Raster; 8] = [
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Slp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Asp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "ProC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PlaC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "LonC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "CrsC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PrCM", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "GoF", &path_parts[0]), &input)
];
let start = Instant::now();
// no weights simplifies matrices
let offset = (filter_size - 1) / 2;
let num_cells = filter_size * filter_size;
// determine filter offsets
let mut dx = vec![0isize; num_cells];
let mut dy = vec![0isize; num_cells];
let mut idx = 0usize;
for i in 0..filter_size {
for j in 0..filter_size {
dx[idx] = (j - offset) as isize;
dy[idx] = (i - offset) as isize;
idx += 1;
}
}
let num_procs = num_cpus::get() as isize;
let (tx, rx) = mpsc::channel();
for tid in 0..num_procs {
let input = input.clone();
let dx = dx.clone();
let dy = dy.clone();
let tx = tx.clone();
// let a_decomp = a_decomp.clone();
thread::spawn(move || {
let mut z: f64;
let mut zi: f64;
for row in (0..rows).filter(|r| r % num_procs == tid) {
let mut slopes = vec![nodata; columns as usize];
let mut aspects = vec![nodata; columns as usize];
let mut prof_cs = vec![nodata; columns as usize];
let mut plan_cs = vec![nodata; columns as usize];
let mut long_cs = vec![nodata; columns as usize];
let mut cross_cs = vec![nodata; columns as usize];
let mut procmin_cs = vec![nodata; columns as usize];
let mut gofs = vec![nodata; columns as usize];
for col in 0..columns {
z = input[(row, col)];
if z != nodata {
let (mut zx2, mut zy2, mut zxy, mut zx, mut zy, mut _zw) = (0f64,0f64,0f64,0f64,0f64,0f64);
let (mut x2, mut x2y2, mut x4) = (0f64, 0f64, 0f64);
let mut num_valid = 0usize;
let (mut z_pred, mut z_act): (f64, f64);
let (mut sum_x, mut sum_y, mut sum_xy, mut sum_xx, mut sum_yy) = (0f64, 0f64, 0f64, 0f64, 0f64);
let (r, n): (f64, f64);
let mut xs = vec![];
let mut ys = vec![];
let mut zs = vec![];
for c in 0..num_cells {
zi = input[((row + dy[c] as isize), (col + dx[c] as isize))];
if zi != nodata {
xs.push(dx[c] as f64 * resolution);
ys.push(dy[c] as f64 * resolution);
zs.push(zi - z); // elevation relative to center
num_valid += 1;
}
}
if num_valid >= 8 {//6 { // need at least six samples
// compute sums
for i in 0..num_valid {
zx2 += zs[i] * xs[i].powi(2);
zy2 += zs[i] * ys[i].powi(2);
zxy += zs[i] * xs[i] * ys[i];
zx += zs[i] * xs[i];
zy += zs[i] * ys[i];
_zw += zs[i];
x2 += xs[i].powi(2);
x2y2 += xs[i].powi(2) * ys[i].powi(2);
x4 += xs[i].powi(4);
}
let a = Matrix5::from_rows(&[
RowVector5::new(x4, x2y2, 0f64, 0f64, 0f64),
RowVector5::new(x2y2, x4, 0f64, 0f64, 0f64),
RowVector5::new(0f64,0f64,x2y2, 0f64, 0f64),
RowVector5::new(0f64, 0f64, 0f64, x2, 0f64),
RowVector5::new(0f64, 0f64, 0f64, 0f64, x2),
]);
let b = Vector5::new(zx2, zy2, zxy, zx, zy);
let fitted_surface = Quadratic2d::from_normals_origin(a, b);
for i in 0..num_valid {
z_act = zs[i];
sum_x += z_act;
sum_xx += z_act * z_act;
z_pred = fitted_surface.solve(xs[i], ys[i]);
sum_y += z_pred;
sum_yy += z_pred * z_pred;
sum_xy += z_act * z_pred;
}
n = num_valid as f64;
let noom = n * sum_xy - (sum_x * sum_y);
let den = (n * sum_xx - (sum_x * sum_x)).sqrt() * ((n * sum_yy - (sum_y * sum_y)).sqrt());
if noom == 0f64 || den == 0f64 {
r = 0f64;
} else {
r = noom / den;
}
slopes[col as usize] = fitted_surface.slope();
aspects[col as usize] = fitted_surface.aspect();
prof_cs[col as usize] = fitted_surface.profile_convexity();
plan_cs[col as usize] = fitted_surface.plan_convexity();
long_cs[col as usize] = fitted_surface.longitudinal_curvature();
cross_cs[col as usize] = fitted_surface.cross_sectional_curvature();
procmin_cs[col as usize] = fitted_surface.min_prof_convexity();
gofs[col as usize] = r * r;
}
}
}
tx.send(
(row,
slopes,
aspects,
prof_cs,
plan_cs,
long_cs,
cross_cs,
procmin_cs,
gofs)
).unwrap();
}
});
}
for row in 0..rows {
let data = rx.recv().expect("Error receiving data from thread.");
outputs[0].set_row_data(data.0, data.1);
outputs[1].set_row_data(data.0, data.2);
outputs[2].set_row_data(data.0, data.3);
outputs[3].set_row_data(data.0, data.4);
outputs[4].set_row_data(data.0, data.5);
outputs[5].set_row_data(data.0, data.6);
outputs[6].set_row_data(data.0, data.7);
outputs[7].set_row_data(data.0, data.8);
if configurations.verbose_mode {
progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Performing analysis: {}%", progress);
old_progress = progress;
}
}
}
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!("Saving data...")
};
for o in 0..outputs.len() {
outputs[o].configs.palette = "grey.plt".to_string();
outputs[o].add_metadata_entry(format!(
"Created by whitebox_tools\' {} tool",
tool_name
));
outputs[o].add_metadata_entry(format!("Input file: {}", input_file));
outputs[o].add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time));
let _ = match outputs[o].write() {
Ok(_) => {
if configurations.verbose_mode {
println!("Output file {:?} written", o+1);
}
}
Err(e) => return Err(e),
};
}
if configurations.verbose_mode {
println!(
"{}",
&format!("Elapsed Time (excluding I/O): {}", elapsed_time)
);
}
Ok(())
}
// Equation of a 2d quadratic model:
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
#[derive(Default, Clone, Copy)]
struct Quadratic2d {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64
}
impl Quadratic2d {
fn new(a: f64, b: f64, c: f64, d: f64, e: f64, f: f64) -> Quadratic2d {
Quadratic2d {
a: a,
b: b,
c: c,
d: d,
e: e,
f: f
}
}
// solves a system of normal equations ax = b
// fn from_normal_equations(a: Matrix6<f64>, b: Vector6<f64>) -> Quadratic2d {
// let decomp = a.lu();
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution failed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn from_normals_origin(a: Matrix5<f64>, b: Vector5<f64>) -> Quadratic2d {
let decomp = a.lu();
if decomp.is_invertible() {
let x = decomp.solve(&b).expect("Linear resolution failed.");
Quadratic2d::new(
*x.get(0).unwrap(), // a
*x.get(1).unwrap(), // b
*x.get(2).unwrap(), // c
*x.get(3).unwrap(), // d
*x.get(4).unwrap(), // e
0f64, //f
)
} else {
Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
}
}
// fn from_decomposed_normals(
// decomp: LU<f64, nalgebra::base::dimension::U6, nalgebra::base::dimension::U6>,
// b: Vector6<f64>
// ) -> Quadratic2d {
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution fialed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn slope(&self) -> f64 {
// (self.a*self.a + self.b*self.b).sqrt().atan().to_degrees()
(self.d*self.d + self.e*self.e).sqrt().atan()
}
fn aspect(&self) -> f64 {
if self.e == 0f64 || self.d == 0f64 {
0f64
} else {
(self.e / self.d).atan()
}
}
fn profile_convexity(&self) -> f64 {
let nu = -200f64 * ((self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)) * (1f64 + (self.d*self.d) + (self.e*self.e)).powf(1.5);
if nu == 0f64 || de == 0f64 {
0f64
} else {
nu / de
}
}
fn plan_convexity(&self) -> f64 {
let nu = 200f64 * ((self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)).powf(1.5);
if nu == 0f64 || de == 0f64 {
0f64
} else {
nu / de
}
}
fn longitudinal_curvature(&self) -> f64 {
let nu = (self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
fn cross_sectional_curvature(&self) -> f64 {
let nu = (self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
// fn max_prof_convexity(&self) -> f64 {
// (self.a * -1f64) - self.b + ((self.a - self.b).powi(2) + (self.c * self.c)).sqrt()
// }
fn | (&self) -> f64 {
(self.a * -1f64) - self.b - ((self.a - self.b).powi(2) + (self.c * self.c)).sqrt()
}
fn solve(&self, x: f64, y: f64) -> f64 {
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
return (self.a*(x*x)) + (self.b*(y*y)) + (self.c*(x*y)) + (self.d*x) + (self.e*y) + self.f
}
}
| min_prof_convexity | identifier_name |
main.rs | /*
This tool is part of the WhiteboxTools geospatial analysis library.
Authors: Daniel Newman
Created: 22/06/2020
Last Modified: 22/06/2020
License: MIT
*/
use whitebox_raster::*;
use nalgebra::{Matrix5, RowVector5, Vector5};
use num_cpus;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::path;
use std::sync::mpsc;
use std::sync::Arc;
use std::thread;
use std::time::Instant;
use whitebox_common::utils::get_formatted_elapsed_time;
/// This tool is an implementation of the constrained quadratic regression algorithm
/// using a flexible window size described in Wood (1996). A quadratic surface is fit
/// to local areas of input DEM (`--dem`), defined by a filter size
/// (`--filter`) using least squares regression. Note that the model is constrained such
/// that it must pass through the cell at the center of the filter. This is accomplished
/// by representing all elevations relative to the center cell, and by making the equation
/// constant 0.
///
/// Surface derivatives are calculated from the coefficients of the local quadratic
/// surface once they are known. These include: Slope, Aspect, Profile convexity, Plan convexity,
/// Longitudinal curvature, Cross-sectional curvature, and Minimum profile convexity,
/// all as defined in Wood (1996). The goodness-of-fit (r-squared) of each local quadratic
/// model is also returned.
///
/// Due to the fact that large filter sizes require long processing times, and that
/// fitting the surface is the most time consuming part of the algorithm, all LSPs are
/// output every time this tool is run. The content of each output is described by the suffixes
/// of the output file names.
///
/// # Reference
/// Wood, J. (1996). The Geomorphological Characterisation of Digital Elevation Models. University
/// of Leicester.
///
/// # See Also
/// `Aspect`, `Slope`, `PlanCurvature`, `ProfileCurvature`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("local_quadratic_regression{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
local_quadratic_regression Help
This tool is an implementation of the constrained quadratic regression algorithm
using a flexible window size described in Wood (1996)
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
-d, --dem Name of the input DEM raster file.
-o, --output Name of the output raster file.
--filter Edge length of the filter kernel.
Input/output file names can be fully qualified, or can rely on the working directory contained in
the WhiteboxTools settings.json file.
Example Usage:
>> .*EXE_NAME run --dem=DEM.tif --output=out_ras.tif --filter=15
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"local_quadratic_regression v{} by Dr. John B. Lindsay (c) 2021.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("LocalQuadraticRegression") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> |
// Equation of a 2d quadratic model:
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
#[derive(Default, Clone, Copy)]
struct Quadratic2d {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64
}
impl Quadratic2d {
fn new(a: f64, b: f64, c: f64, d: f64, e: f64, f: f64) -> Quadratic2d {
Quadratic2d {
a: a,
b: b,
c: c,
d: d,
e: e,
f: f
}
}
// solves a system of normal equations ax = b
// fn from_normal_equations(a: Matrix6<f64>, b: Vector6<f64>) -> Quadratic2d {
// let decomp = a.lu();
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution failed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn from_normals_origin(a: Matrix5<f64>, b: Vector5<f64>) -> Quadratic2d {
let decomp = a.lu();
if decomp.is_invertible() {
let x = decomp.solve(&b).expect("Linear resolution failed.");
Quadratic2d::new(
*x.get(0).unwrap(), // a
*x.get(1).unwrap(), // b
*x.get(2).unwrap(), // c
*x.get(3).unwrap(), // d
*x.get(4).unwrap(), // e
0f64, //f
)
} else {
Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
}
}
// fn from_decomposed_normals(
// decomp: LU<f64, nalgebra::base::dimension::U6, nalgebra::base::dimension::U6>,
// b: Vector6<f64>
// ) -> Quadratic2d {
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution fialed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn slope(&self) -> f64 {
// (self.a*self.a + self.b*self.b).sqrt().atan().to_degrees()
(self.d*self.d + self.e*self.e).sqrt().atan()
}
fn aspect(&self) -> f64 {
if self.e == 0f64 || self.d == 0f64 {
0f64
} else {
(self.e / self.d).atan()
}
}
fn profile_convexity(&self) -> f64 {
let nu = -200f64 * ((self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)) * (1f64 + (self.d*self.d) + (self.e*self.e)).powf(1.5);
if nu == 0f64 || de == 0f64 {
0f64
} else {
nu / de
}
}
fn plan_convexity(&self) -> f64 {
let nu = 200f64 * ((self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)).powf(1.5);
if nu == 0f64 || de == 0f64 {
0f64
} else {
nu / de
}
}
fn longitudinal_curvature(&self) -> f64 {
let nu = (self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
fn cross_sectional_curvature(&self) -> f64 {
let nu = (self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
// fn max_prof_convexity(&self) -> f64 {
// (self.a * -1f64) - self.b + ((self.a - self.b).powi(2) + (self.c * self.c)).sqrt()
// }
fn min_prof_convexity(&self) -> f64 {
(self.a * -1f64) - self.b - ((self.a - self.b).powi(2) + (self.c * self.c)).sqrt()
}
fn solve(&self, x: f64, y: f64) -> f64 {
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
return (self.a*(x*x)) + (self.b*(y*y)) + (self.c*(x*y)) + (self.d*x) + (self.e*y) + self.f
}
}
| {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if !working_directory.is_empty() && !working_directory.ends_with(&sep) {
working_directory += &sep;
}
let mut input_file = String::new();
let mut output_file = String::new();
let mut filter_size = 3usize;
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-d" || flag_val == "-dem" {
if keyval {
input_file = vec[1].to_string();
} else {
input_file = args[i + 1].to_string();
}
} else if flag_val == "-o" || flag_val == "-output" {
if keyval {
output_file = vec[1].to_string();
} else {
output_file = args[i + 1].to_string();
}
} else if flag_val == "-filter" {
if keyval {
filter_size = vec[1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
} else {
filter_size = args[i + 1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
}
}
}
if filter_size < 3 { filter_size = 3; }
// The filter dimensions must be odd numbers such that there is a middle pixel
if (filter_size as f64 / 2f64).floor() == (filter_size as f64 / 2f64) {
filter_size += 1;
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if configurations.verbose_mode {
println!("Reading data...")
};
let input = Arc::new(Raster::new(&input_file, "r")?);
let rows = input.configs.rows as isize;
let columns = input.configs.columns as isize;
let nodata = input.configs.nodata;
let resolution = input.configs.resolution_x; // assume square
let path_parts: Vec<&str> = output_file.rsplitn(2, ".").collect();
let mut outputs: [Raster; 8] = [
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Slp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Asp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "ProC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PlaC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "LonC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "CrsC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PrCM", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "GoF", &path_parts[0]), &input)
];
let start = Instant::now();
// no weights simplifies matrices
let offset = (filter_size - 1) / 2;
let num_cells = filter_size * filter_size;
// determine filter offsets
let mut dx = vec![0isize; num_cells];
let mut dy = vec![0isize; num_cells];
let mut idx = 0usize;
for i in 0..filter_size {
for j in 0..filter_size {
dx[idx] = (j - offset) as isize;
dy[idx] = (i - offset) as isize;
idx += 1;
}
}
let num_procs = num_cpus::get() as isize;
let (tx, rx) = mpsc::channel();
for tid in 0..num_procs {
let input = input.clone();
let dx = dx.clone();
let dy = dy.clone();
let tx = tx.clone();
// let a_decomp = a_decomp.clone();
thread::spawn(move || {
let mut z: f64;
let mut zi: f64;
for row in (0..rows).filter(|r| r % num_procs == tid) {
let mut slopes = vec![nodata; columns as usize];
let mut aspects = vec![nodata; columns as usize];
let mut prof_cs = vec![nodata; columns as usize];
let mut plan_cs = vec![nodata; columns as usize];
let mut long_cs = vec![nodata; columns as usize];
let mut cross_cs = vec![nodata; columns as usize];
let mut procmin_cs = vec![nodata; columns as usize];
let mut gofs = vec![nodata; columns as usize];
for col in 0..columns {
z = input[(row, col)];
if z != nodata {
let (mut zx2, mut zy2, mut zxy, mut zx, mut zy, mut _zw) = (0f64,0f64,0f64,0f64,0f64,0f64);
let (mut x2, mut x2y2, mut x4) = (0f64, 0f64, 0f64);
let mut num_valid = 0usize;
let (mut z_pred, mut z_act): (f64, f64);
let (mut sum_x, mut sum_y, mut sum_xy, mut sum_xx, mut sum_yy) = (0f64, 0f64, 0f64, 0f64, 0f64);
let (r, n): (f64, f64);
let mut xs = vec![];
let mut ys = vec![];
let mut zs = vec![];
for c in 0..num_cells {
zi = input[((row + dy[c] as isize), (col + dx[c] as isize))];
if zi != nodata {
xs.push(dx[c] as f64 * resolution);
ys.push(dy[c] as f64 * resolution);
zs.push(zi - z); // elevation relative to center
num_valid += 1;
}
}
if num_valid >= 8 {//6 { // need at least six samples
// compute sums
for i in 0..num_valid {
zx2 += zs[i] * xs[i].powi(2);
zy2 += zs[i] * ys[i].powi(2);
zxy += zs[i] * xs[i] * ys[i];
zx += zs[i] * xs[i];
zy += zs[i] * ys[i];
_zw += zs[i];
x2 += xs[i].powi(2);
x2y2 += xs[i].powi(2) * ys[i].powi(2);
x4 += xs[i].powi(4);
}
let a = Matrix5::from_rows(&[
RowVector5::new(x4, x2y2, 0f64, 0f64, 0f64),
RowVector5::new(x2y2, x4, 0f64, 0f64, 0f64),
RowVector5::new(0f64,0f64,x2y2, 0f64, 0f64),
RowVector5::new(0f64, 0f64, 0f64, x2, 0f64),
RowVector5::new(0f64, 0f64, 0f64, 0f64, x2),
]);
let b = Vector5::new(zx2, zy2, zxy, zx, zy);
let fitted_surface = Quadratic2d::from_normals_origin(a, b);
for i in 0..num_valid {
z_act = zs[i];
sum_x += z_act;
sum_xx += z_act * z_act;
z_pred = fitted_surface.solve(xs[i], ys[i]);
sum_y += z_pred;
sum_yy += z_pred * z_pred;
sum_xy += z_act * z_pred;
}
n = num_valid as f64;
let noom = n * sum_xy - (sum_x * sum_y);
let den = (n * sum_xx - (sum_x * sum_x)).sqrt() * ((n * sum_yy - (sum_y * sum_y)).sqrt());
if noom == 0f64 || den == 0f64 {
r = 0f64;
} else {
r = noom / den;
}
slopes[col as usize] = fitted_surface.slope();
aspects[col as usize] = fitted_surface.aspect();
prof_cs[col as usize] = fitted_surface.profile_convexity();
plan_cs[col as usize] = fitted_surface.plan_convexity();
long_cs[col as usize] = fitted_surface.longitudinal_curvature();
cross_cs[col as usize] = fitted_surface.cross_sectional_curvature();
procmin_cs[col as usize] = fitted_surface.min_prof_convexity();
gofs[col as usize] = r * r;
}
}
}
tx.send(
(row,
slopes,
aspects,
prof_cs,
plan_cs,
long_cs,
cross_cs,
procmin_cs,
gofs)
).unwrap();
}
});
}
for row in 0..rows {
let data = rx.recv().expect("Error receiving data from thread.");
outputs[0].set_row_data(data.0, data.1);
outputs[1].set_row_data(data.0, data.2);
outputs[2].set_row_data(data.0, data.3);
outputs[3].set_row_data(data.0, data.4);
outputs[4].set_row_data(data.0, data.5);
outputs[5].set_row_data(data.0, data.6);
outputs[6].set_row_data(data.0, data.7);
outputs[7].set_row_data(data.0, data.8);
if configurations.verbose_mode {
progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Performing analysis: {}%", progress);
old_progress = progress;
}
}
}
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!("Saving data...")
};
for o in 0..outputs.len() {
outputs[o].configs.palette = "grey.plt".to_string();
outputs[o].add_metadata_entry(format!(
"Created by whitebox_tools\' {} tool",
tool_name
));
outputs[o].add_metadata_entry(format!("Input file: {}", input_file));
outputs[o].add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time));
let _ = match outputs[o].write() {
Ok(_) => {
if configurations.verbose_mode {
println!("Output file {:?} written", o+1);
}
}
Err(e) => return Err(e),
};
}
if configurations.verbose_mode {
println!(
"{}",
&format!("Elapsed Time (excluding I/O): {}", elapsed_time)
);
}
Ok(())
} | identifier_body |
Login.ts | import { basic, thread, thread as th, reflection, Common, ScopicCommand, bind, Api, net, encoding, Notification, collection } from "../lib/Q/sys/Corelib"
import { AI } from '../lib/q/sys/AI';
import { db } from '../lib/q/sys/db';
import { UI } from '../lib/q/sys/UI';
import { Controller, sdata } from '../lib/q/sys/System';
import { models } from "../abstract/Models";
//import { GetVars } from '../Desktop/Common';
//import { InitModule } from '../Desktop/Common';
//import { basics } from './extra/Basics';
import { eServices } from '../abstract/Services';
import { defs } from '../lib/q/sys/defs';
import { InitModule, GetVars } from "../abstract/extra/Common";
import { basics } from "../abstract/extra/Basics";
import { Components } from "../lib/q/sys/Components";
var key;
__global.ApiServer = new System.basics.Url(envirenment.isChromeApp ? 'http://127.0.0.1/': location.origin);
var onSigninStatChanged = new bind.EventListener<(v: boolean) => void>(key = Math.random() * 2099837662);
var GData: basics.vars;
GetVars((v) => { GData = v; return !true; });
declare var __LOCALSAVE__;
export namespace Apps {
interface CSVParserParam<T> {
name: string;
csvIndex: number;
prop: bind.DProperty<any, T>
}
export class AuthentificationApp extends UI.AuthApp {
public get RedirectApp() { return this.redirectApp; }
public set RedirectApp(v) { }
private _signupPage: UI.Page;
private _loginPage: UI.Page;
private user: models.Login;
constructor(private redirectApp: defs.UI.IApp) {
super(key,onSigninStatChanged);
window['auth'] = this;
GData.user.OnMessage(
(s: bind.PropBinding, ev: bind.EventArgs<boolean, models.Login>) => onSigninStatChanged.Invoke(key, [ev._new]));
onSigninStatChanged.On = (v) => {
if (v)
AuthentificationApp.Download();
else this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, v]);
};
}
public Is | >(callback: (v: boolean, param: T) => void, param: T) {
callback(GData.user.IsLogged, param);
}
private createSignupPage() {
var p = new UI.Page(this, 'Signup', 'Signup');
p.OnInitialized = p => p.Add(new UI.TControl('Client.signup', GData.user));
this.AddPage(this._signupPage = p);
}
private createLoginPage() {
var p = new UI.Page(this, 'Login', 'Login');
p.OnInitialized = p => p.Add(new UI.TControl('Client.login', GData.user));
this.AddPage(this._loginPage = p);
}
private auth = thread.Dispatcher.cretaeJob(this.Login, [], this, true);
private autoAuth = thread.Dispatcher.cretaeJob(this._AutoLogin, [], this, true);
private AutoLogin() {
var ident =basic.Settings.get("Identification");
GData.user.Identification = ident;
thread.Dispatcher.Push(this.autoAuth);
}
private Login() {
var isl = GData.user.IsLogged;
Api.RiseApi("Auth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private _AutoLogin() {
var isl = GData.user.IsLogged;
Api.RiseApi("autoAuth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private static dx;
public static Download() {
Notification.fire('ReLoadUserSetting', [models.UserSetting.Default]);
if (this.dx) {
GData.spin.Pause(); return;
}
this.dx = true;
GData.__data.Clear();
Api.RiseApi('log', { callback: null, data: null });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Start("Wait a moment");
GData.requester.Push(models.Categories, GData.__data.Categories, null, (d, r) => { GData.spin.Message = "Categories"; GData.spin.Start("Wait a moment"); });
if (typeof __LOCALSAVE__ !== 'undefined')
GData.db.Get('Products').table.LoadTableFromDB(GData.__data.Products, () => {
GData.apis.Product.SmartUpdate(new Date(GData.db.Get('Products').info.LastUpdate || 0));
});
else {
GData.requester.Request(models.Products, "GETCSV", null, null, (pd, json, iss, req) => {
GData.__data.Products.FromCsv(req.Response);
});
//GData.requester.Push(models.Products, GData.__data.Products, null, (d, r) => { GData.spin.Message = "Products"; });
}
GData.requester.Push(models.Costumers, GData.__data.Costumers, null, (d, r) => { GData.spin.Message = "Costumers"; });
if (iss) GData.requester.Push(models.Agents, GData.__data.Agents, null, (d, r) => { GData.spin.Message = "Agents"; });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Pause();
Api.RiseApi('log', { callback: null, data: null });
});
});
}
public initialize() {
GData.requester.Request(models.IsSecured, "GET", undefined, undefined, (a, b, c) => {
__global.https = b as any;
this.AutoLogin();
});
}
private fullInitialize() {
if (this.finit) return;
if (this.IsInit) this._initialize();
else this.OnInitialized = t => t._initialize();
}
private _initialize() {
super.initialize();
this.finit = true;
this.createLoginPage();
this.createSignupPage();
initJobs.call(this);
this.SelectedPage = this._loginPage;
}
private finit: boolean;
public Signout() {
}
Logout() {
logout((islogout) => {
if (islogout) {
} else {
}
});
}
OnAttached() {
if (!this.isf) return this.isf = true;
this.fullInitialize();
}
isf;
OnDetached() {
}
}
}
function initJobs() {
bind.Register(new bind.Job('openlogin', null, null, null, (ji, e) => {
var dm = ji.dom;
dm.addEventListener('click', () => this.Open(this._loginPage))
}, null));
bind.Register(new bind.Job('login', null, null, null, (ji, e) => {
if (!GData.user.Client) GData.user.Client = new models.Client(0);
ji.dom.addEventListener('click', (() => { GData.spin.Start('login'); this.Login(); }).bind(ji))
}, null));
bind.Register(new bind.Job('opensignup', undefined, undefined, undefined, (ji, e) => {
var dm = ji.dom;
if (!GData.user.Client) GData.user.Client = new models.Client(0);
dm.addEventListener('click', () => {
this.Open(this._signupPage);
})
}, null));
bind.Register(new bind.Job('signup', () => {
}, null, null, (ji, e) => {
ji.addEventListener('click', 'click', (() => {
var t = ji.Scop;
var v = t.Value as models.Login;
v.ReGenerateEncPwd("eval(code)", v.Pwd);
GData.requester.Post(models.Signup, t.Value, null, (callback, p, iss) => {
if (iss)
var m = UI.Modal.ShowDialog('Signup', 'The Signup was successfully created .Please Send a message with your code to activate the account');
else {
}
});
}).bind(ji));
}, null));
bind.Register(new bind.Job('loggedjob', (ji) => {
var b = ji.Scop.Value as boolean;
var dm = ji.dom as HTMLElement;
if (b)
dm.innerText = 'YOU ARE LOGGED';
else {
dm.innerText = 'YOU ARE NOT LOGGED';
}
}, null, null, (j, e) => { }, null));
}
Api.RegisterApiCallback({
Name: "ReAuth", DoApiCallback: function (a, b, c) {
GData.spin.Start("Authenticating");
GData.user.Stat = 0;
GData.requester.Push(models.Login, GData.user, null, function (callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
basic.Settings.set("Identification", login.Identification);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('There no connection to server', false);
this.OnInitialized = (t) => t.fullInitialize();
c.callback && c.callback(c, false);
});
}, Owner: this
});
Api.RegisterApiCallback({
Name: "Auth", DoApiCallback: function (a, b, c) {
GData.user.Stat = 0;
function callback1(callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
saveLoginData(login);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('Error de connecter a server', false);
c.callback && c.callback(c, false);
}
GData.requester.Push(models.Login, GData.user, null, callback1);
}, Owner: this
});
function saveLoginData(login: models.Login) {
basic.Settings.set("Identification", login.Identification);
basic.Settings.set("LoginID", login.Id);
}
function loadLoginData(login: models.Login, alsoID: boolean) {
login.Identification = basic.Settings.get("Identification");
if (alsoID)
login.Id = basic.Settings.get("LoginID") || login.Id;
}
Api.RegisterApiCallback({
Name: "autoAuth", DoApiCallback: function (a, b, c) {
GData.user.Stat = 0;
function callback1(callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
basic.Settings.set("Identification", login.Identification);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('Error de connecter a server', false);
c.callback && c.callback(c, false);
}
GData.requester.Costume({ Method: 0, Url: '~checklogging' }, undefined, undefined, (e, rslt, succ, req) => {
if (rslt === true) {
GData.spin.Pause();
GData.user.FromJson({ CheckLogging: true }, encoding.SerializationContext.GlobalContext, true);
return;
}
loadLoginData(GData.user, true);
GData.requester.Request(models.Login, "AutoLogin", GData.user, GData.user as any,
(callback, s, iss) => {
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
GData.spin.Pause();
saveLoginData(login);
c.callback && c.callback(c, true);
return;
}
}
GData.requester.Push(models.Login, GData.user, null, callback1);
});
}, undefined);
}, Owner: this
});
function logout(callback:(isLogout:boolean)=>void) {
GData.requester.Get(models.Login, GData.user, null, function(s, r: any, iss) {
if (!iss) {
callback(null);
} else
if (!GData.user.IsLogged) {
basic.Settings.set("Identification", "");
GData.user.Identification = undefined;
GData.user.Username = undefined;
GData.user.Pwd = undefined;
document.cookie = "id=;";
callback && callback(true);
} else {
//UI.Modal.ShowDialog("Signout failled !!!!", "Some thing happened when logout this session please contact the administrator if site", undefined, "Retry", "Cancel");
callback && callback(false);
}
}, function(r, t) {
r.Url = "/~Signout";
});
}
var lr;
declare type callback<T> = (thred: number, param: T) => void;
function myfunction<T>(onConnected: callback<T>, onSignOut: callback<T>, onConnectionLost: callback<T>, param: T) {
var intThread;
var c = new XMLHttpRequest();
var self = this;
c.onreadystatechange = function() {
if (this.readyState == 4)
if (this.status == 200 && this.responseText == "true")
if (this.responseText == "true")
return onConnected(intThread, param);
else if (this.responseText == "false")
return onSignOut(intThread, param);
else throw "Uknow stat";
else
onConnectionLost(intThread, param);
};
c.onerror = function() {
onConnectionLost(intThread, param);
};
intThread = setInterval(() => {
c.open('get', __global.GetApiAddress('/~CheckLogging'));
c.setRequestHeader('Access-Control-Allow-Origin', 'true');
try { c.send(); c.timeout = 10000; } catch (e) { }
}, 12000);
}
myfunction(
(t, p) => { },
(t, p) => { UI.Desktop.Current.OpenSignin(); },
(t, p) => { UI.Desktop.Current.OpenSignin(); },
this); | Logged<T | identifier_name |
Login.ts | import { basic, thread, thread as th, reflection, Common, ScopicCommand, bind, Api, net, encoding, Notification, collection } from "../lib/Q/sys/Corelib"
import { AI } from '../lib/q/sys/AI';
import { db } from '../lib/q/sys/db';
import { UI } from '../lib/q/sys/UI';
import { Controller, sdata } from '../lib/q/sys/System';
import { models } from "../abstract/Models";
//import { GetVars } from '../Desktop/Common';
//import { InitModule } from '../Desktop/Common';
//import { basics } from './extra/Basics';
import { eServices } from '../abstract/Services';
import { defs } from '../lib/q/sys/defs';
import { InitModule, GetVars } from "../abstract/extra/Common";
import { basics } from "../abstract/extra/Basics";
import { Components } from "../lib/q/sys/Components";
var key;
__global.ApiServer = new System.basics.Url(envirenment.isChromeApp ? 'http://127.0.0.1/': location.origin);
var onSigninStatChanged = new bind.EventListener<(v: boolean) => void>(key = Math.random() * 2099837662);
var GData: basics.vars;
GetVars((v) => { GData = v; return !true; });
declare var __LOCALSAVE__;
export namespace Apps {
interface CSVParserParam<T> {
name: string;
csvIndex: number;
prop: bind.DProperty<any, T>
}
export class AuthentificationApp extends UI.AuthApp {
public get RedirectApp() { return this.redirectApp; }
public set RedirectApp(v) { }
private _signupPage: UI.Page;
private _loginPage: UI.Page;
private user: models.Login;
constructor(private redirectApp: defs.UI.IApp) {
super(key,onSigninStatChanged);
window['auth'] = this;
GData.user.OnMessage(
(s: bind.PropBinding, ev: bind.EventArgs<boolean, models.Login>) => onSigninStatChanged.Invoke(key, [ev._new]));
onSigninStatChanged.On = (v) => {
if (v)
AuthentificationApp.Download();
else this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, v]);
};
}
public IsLogged<T>(callback: (v: boolean, param: T) => void, param: T) {
callback(GData.user.IsLogged, param);
}
private createSignupPage() {
var p = new UI.Page(this, 'Signup', 'Signup');
p.OnInitialized = p => p.Add(new UI.TControl('Client.signup', GData.user));
this.AddPage(this._signupPage = p);
}
private createLoginPage() {
var p = new UI.Page(this, 'Login', 'Login');
p.OnInitialized = p => p.Add(new UI.TControl('Client.login', GData.user));
this.AddPage(this._loginPage = p);
}
private auth = thread.Dispatcher.cretaeJob(this.Login, [], this, true);
private autoAuth = thread.Dispatcher.cretaeJob(this._AutoLogin, [], this, true);
private AutoLogin() {
var ident =basic.Settings.get("Identification");
GData.user.Identification = ident;
thread.Dispatcher.Push(this.autoAuth);
}
private Login() {
var isl = GData.user.IsLogged;
Api.RiseApi("Auth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private _AutoLogin() {
var isl = GData.user.IsLogged;
Api.RiseApi("autoAuth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private static dx;
public static Download() { |
this.dx = true;
GData.__data.Clear();
Api.RiseApi('log', { callback: null, data: null });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Start("Wait a moment");
GData.requester.Push(models.Categories, GData.__data.Categories, null, (d, r) => { GData.spin.Message = "Categories"; GData.spin.Start("Wait a moment"); });
if (typeof __LOCALSAVE__ !== 'undefined')
GData.db.Get('Products').table.LoadTableFromDB(GData.__data.Products, () => {
GData.apis.Product.SmartUpdate(new Date(GData.db.Get('Products').info.LastUpdate || 0));
});
else {
GData.requester.Request(models.Products, "GETCSV", null, null, (pd, json, iss, req) => {
GData.__data.Products.FromCsv(req.Response);
});
//GData.requester.Push(models.Products, GData.__data.Products, null, (d, r) => { GData.spin.Message = "Products"; });
}
GData.requester.Push(models.Costumers, GData.__data.Costumers, null, (d, r) => { GData.spin.Message = "Costumers"; });
if (iss) GData.requester.Push(models.Agents, GData.__data.Agents, null, (d, r) => { GData.spin.Message = "Agents"; });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Pause();
Api.RiseApi('log', { callback: null, data: null });
});
});
}
public initialize() {
GData.requester.Request(models.IsSecured, "GET", undefined, undefined, (a, b, c) => {
__global.https = b as any;
this.AutoLogin();
});
}
private fullInitialize() {
if (this.finit) return;
if (this.IsInit) this._initialize();
else this.OnInitialized = t => t._initialize();
}
private _initialize() {
super.initialize();
this.finit = true;
this.createLoginPage();
this.createSignupPage();
initJobs.call(this);
this.SelectedPage = this._loginPage;
}
private finit: boolean;
public Signout() {
}
Logout() {
logout((islogout) => {
if (islogout) {
} else {
}
});
}
OnAttached() {
if (!this.isf) return this.isf = true;
this.fullInitialize();
}
isf;
OnDetached() {
}
}
}
function initJobs() {
bind.Register(new bind.Job('openlogin', null, null, null, (ji, e) => {
var dm = ji.dom;
dm.addEventListener('click', () => this.Open(this._loginPage))
}, null));
bind.Register(new bind.Job('login', null, null, null, (ji, e) => {
if (!GData.user.Client) GData.user.Client = new models.Client(0);
ji.dom.addEventListener('click', (() => { GData.spin.Start('login'); this.Login(); }).bind(ji))
}, null));
bind.Register(new bind.Job('opensignup', undefined, undefined, undefined, (ji, e) => {
var dm = ji.dom;
if (!GData.user.Client) GData.user.Client = new models.Client(0);
dm.addEventListener('click', () => {
this.Open(this._signupPage);
})
}, null));
bind.Register(new bind.Job('signup', () => {
}, null, null, (ji, e) => {
ji.addEventListener('click', 'click', (() => {
var t = ji.Scop;
var v = t.Value as models.Login;
v.ReGenerateEncPwd("eval(code)", v.Pwd);
GData.requester.Post(models.Signup, t.Value, null, (callback, p, iss) => {
if (iss)
var m = UI.Modal.ShowDialog('Signup', 'The Signup was successfully created .Please Send a message with your code to activate the account');
else {
}
});
}).bind(ji));
}, null));
bind.Register(new bind.Job('loggedjob', (ji) => {
var b = ji.Scop.Value as boolean;
var dm = ji.dom as HTMLElement;
if (b)
dm.innerText = 'YOU ARE LOGGED';
else {
dm.innerText = 'YOU ARE NOT LOGGED';
}
}, null, null, (j, e) => { }, null));
}
Api.RegisterApiCallback({
Name: "ReAuth", DoApiCallback: function (a, b, c) {
GData.spin.Start("Authenticating");
GData.user.Stat = 0;
GData.requester.Push(models.Login, GData.user, null, function (callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
basic.Settings.set("Identification", login.Identification);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('There no connection to server', false);
this.OnInitialized = (t) => t.fullInitialize();
c.callback && c.callback(c, false);
});
}, Owner: this
});
Api.RegisterApiCallback({
Name: "Auth", DoApiCallback: function (a, b, c) {
GData.user.Stat = 0;
function callback1(callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
saveLoginData(login);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('Error de connecter a server', false);
c.callback && c.callback(c, false);
}
GData.requester.Push(models.Login, GData.user, null, callback1);
}, Owner: this
});
function saveLoginData(login: models.Login) {
basic.Settings.set("Identification", login.Identification);
basic.Settings.set("LoginID", login.Id);
}
function loadLoginData(login: models.Login, alsoID: boolean) {
login.Identification = basic.Settings.get("Identification");
if (alsoID)
login.Id = basic.Settings.get("LoginID") || login.Id;
}
Api.RegisterApiCallback({
Name: "autoAuth", DoApiCallback: function (a, b, c) {
GData.user.Stat = 0;
function callback1(callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
basic.Settings.set("Identification", login.Identification);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('Error de connecter a server', false);
c.callback && c.callback(c, false);
}
GData.requester.Costume({ Method: 0, Url: '~checklogging' }, undefined, undefined, (e, rslt, succ, req) => {
if (rslt === true) {
GData.spin.Pause();
GData.user.FromJson({ CheckLogging: true }, encoding.SerializationContext.GlobalContext, true);
return;
}
loadLoginData(GData.user, true);
GData.requester.Request(models.Login, "AutoLogin", GData.user, GData.user as any,
(callback, s, iss) => {
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
GData.spin.Pause();
saveLoginData(login);
c.callback && c.callback(c, true);
return;
}
}
GData.requester.Push(models.Login, GData.user, null, callback1);
});
}, undefined);
}, Owner: this
});
function logout(callback:(isLogout:boolean)=>void) {
GData.requester.Get(models.Login, GData.user, null, function(s, r: any, iss) {
if (!iss) {
callback(null);
} else
if (!GData.user.IsLogged) {
basic.Settings.set("Identification", "");
GData.user.Identification = undefined;
GData.user.Username = undefined;
GData.user.Pwd = undefined;
document.cookie = "id=;";
callback && callback(true);
} else {
//UI.Modal.ShowDialog("Signout failled !!!!", "Some thing happened when logout this session please contact the administrator if site", undefined, "Retry", "Cancel");
callback && callback(false);
}
}, function(r, t) {
r.Url = "/~Signout";
});
}
var lr;
declare type callback<T> = (thred: number, param: T) => void;
function myfunction<T>(onConnected: callback<T>, onSignOut: callback<T>, onConnectionLost: callback<T>, param: T) {
var intThread;
var c = new XMLHttpRequest();
var self = this;
c.onreadystatechange = function() {
if (this.readyState == 4)
if (this.status == 200 && this.responseText == "true")
if (this.responseText == "true")
return onConnected(intThread, param);
else if (this.responseText == "false")
return onSignOut(intThread, param);
else throw "Uknow stat";
else
onConnectionLost(intThread, param);
};
c.onerror = function() {
onConnectionLost(intThread, param);
};
intThread = setInterval(() => {
c.open('get', __global.GetApiAddress('/~CheckLogging'));
c.setRequestHeader('Access-Control-Allow-Origin', 'true');
try { c.send(); c.timeout = 10000; } catch (e) { }
}, 12000);
}
myfunction(
(t, p) => { },
(t, p) => { UI.Desktop.Current.OpenSignin(); },
(t, p) => { UI.Desktop.Current.OpenSignin(); },
this); | Notification.fire('ReLoadUserSetting', [models.UserSetting.Default]);
if (this.dx) {
GData.spin.Pause(); return;
} | random_line_split |
Login.ts | import { basic, thread, thread as th, reflection, Common, ScopicCommand, bind, Api, net, encoding, Notification, collection } from "../lib/Q/sys/Corelib"
import { AI } from '../lib/q/sys/AI';
import { db } from '../lib/q/sys/db';
import { UI } from '../lib/q/sys/UI';
import { Controller, sdata } from '../lib/q/sys/System';
import { models } from "../abstract/Models";
//import { GetVars } from '../Desktop/Common';
//import { InitModule } from '../Desktop/Common';
//import { basics } from './extra/Basics';
import { eServices } from '../abstract/Services';
import { defs } from '../lib/q/sys/defs';
import { InitModule, GetVars } from "../abstract/extra/Common";
import { basics } from "../abstract/extra/Basics";
import { Components } from "../lib/q/sys/Components";
var key;
__global.ApiServer = new System.basics.Url(envirenment.isChromeApp ? 'http://127.0.0.1/': location.origin);
var onSigninStatChanged = new bind.EventListener<(v: boolean) => void>(key = Math.random() * 2099837662);
var GData: basics.vars;
GetVars((v) => { GData = v; return !true; });
declare var __LOCALSAVE__;
export namespace Apps {
interface CSVParserParam<T> {
name: string;
csvIndex: number;
prop: bind.DProperty<any, T>
}
export class AuthentificationApp extends UI.AuthApp {
public get RedirectApp() { return this.redirectApp; }
public set RedirectApp(v) { }
private _signupPage: UI.Page;
private _loginPage: UI.Page;
private user: models.Login;
constructor(private redirectApp: defs.UI.IApp) {
super(key,onSigninStatChanged);
window['auth'] = this;
GData.user.OnMessage(
(s: bind.PropBinding, ev: bind.EventArgs<boolean, models.Login>) => onSigninStatChanged.Invoke(key, [ev._new]));
onSigninStatChanged.On = (v) => {
if (v)
AuthentificationApp.Download();
else this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, v]);
};
}
public IsLogged<T>(callback: (v: boolean, param: T) => void, param: T) {
callback(GData.user.IsLogged, param);
}
private createSignupPage() {
| private createLoginPage() {
var p = new UI.Page(this, 'Login', 'Login');
p.OnInitialized = p => p.Add(new UI.TControl('Client.login', GData.user));
this.AddPage(this._loginPage = p);
}
private auth = thread.Dispatcher.cretaeJob(this.Login, [], this, true);
private autoAuth = thread.Dispatcher.cretaeJob(this._AutoLogin, [], this, true);
private AutoLogin() {
var ident =basic.Settings.get("Identification");
GData.user.Identification = ident;
thread.Dispatcher.Push(this.autoAuth);
}
private Login() {
var isl = GData.user.IsLogged;
Api.RiseApi("Auth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private _AutoLogin() {
var isl = GData.user.IsLogged;
Api.RiseApi("autoAuth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private static dx;
public static Download() {
Notification.fire('ReLoadUserSetting', [models.UserSetting.Default]);
if (this.dx) {
GData.spin.Pause(); return;
}
this.dx = true;
GData.__data.Clear();
Api.RiseApi('log', { callback: null, data: null });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Start("Wait a moment");
GData.requester.Push(models.Categories, GData.__data.Categories, null, (d, r) => { GData.spin.Message = "Categories"; GData.spin.Start("Wait a moment"); });
if (typeof __LOCALSAVE__ !== 'undefined')
GData.db.Get('Products').table.LoadTableFromDB(GData.__data.Products, () => {
GData.apis.Product.SmartUpdate(new Date(GData.db.Get('Products').info.LastUpdate || 0));
});
else {
GData.requester.Request(models.Products, "GETCSV", null, null, (pd, json, iss, req) => {
GData.__data.Products.FromCsv(req.Response);
});
//GData.requester.Push(models.Products, GData.__data.Products, null, (d, r) => { GData.spin.Message = "Products"; });
}
GData.requester.Push(models.Costumers, GData.__data.Costumers, null, (d, r) => { GData.spin.Message = "Costumers"; });
if (iss) GData.requester.Push(models.Agents, GData.__data.Agents, null, (d, r) => { GData.spin.Message = "Agents"; });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Pause();
Api.RiseApi('log', { callback: null, data: null });
});
});
}
public initialize() {
GData.requester.Request(models.IsSecured, "GET", undefined, undefined, (a, b, c) => {
__global.https = b as any;
this.AutoLogin();
});
}
private fullInitialize() {
if (this.finit) return;
if (this.IsInit) this._initialize();
else this.OnInitialized = t => t._initialize();
}
private _initialize() {
super.initialize();
this.finit = true;
this.createLoginPage();
this.createSignupPage();
initJobs.call(this);
this.SelectedPage = this._loginPage;
}
private finit: boolean;
public Signout() {
}
Logout() {
logout((islogout) => {
if (islogout) {
} else {
}
});
}
OnAttached() {
if (!this.isf) return this.isf = true;
this.fullInitialize();
}
isf;
OnDetached() {
}
}
}
function initJobs() {
bind.Register(new bind.Job('openlogin', null, null, null, (ji, e) => {
var dm = ji.dom;
dm.addEventListener('click', () => this.Open(this._loginPage))
}, null));
bind.Register(new bind.Job('login', null, null, null, (ji, e) => {
if (!GData.user.Client) GData.user.Client = new models.Client(0);
ji.dom.addEventListener('click', (() => { GData.spin.Start('login'); this.Login(); }).bind(ji))
}, null));
bind.Register(new bind.Job('opensignup', undefined, undefined, undefined, (ji, e) => {
var dm = ji.dom;
if (!GData.user.Client) GData.user.Client = new models.Client(0);
dm.addEventListener('click', () => {
this.Open(this._signupPage);
})
}, null));
bind.Register(new bind.Job('signup', () => {
}, null, null, (ji, e) => {
ji.addEventListener('click', 'click', (() => {
var t = ji.Scop;
var v = t.Value as models.Login;
v.ReGenerateEncPwd("eval(code)", v.Pwd);
GData.requester.Post(models.Signup, t.Value, null, (callback, p, iss) => {
if (iss)
var m = UI.Modal.ShowDialog('Signup', 'The Signup was successfully created .Please Send a message with your code to activate the account');
else {
}
});
}).bind(ji));
}, null));
bind.Register(new bind.Job('loggedjob', (ji) => {
var b = ji.Scop.Value as boolean;
var dm = ji.dom as HTMLElement;
if (b)
dm.innerText = 'YOU ARE LOGGED';
else {
dm.innerText = 'YOU ARE NOT LOGGED';
}
}, null, null, (j, e) => { }, null));
}
Api.RegisterApiCallback({
Name: "ReAuth", DoApiCallback: function (a, b, c) {
GData.spin.Start("Authenticating");
GData.user.Stat = 0;
GData.requester.Push(models.Login, GData.user, null, function (callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
basic.Settings.set("Identification", login.Identification);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('There no connection to server', false);
this.OnInitialized = (t) => t.fullInitialize();
c.callback && c.callback(c, false);
});
}, Owner: this
});
Api.RegisterApiCallback({
Name: "Auth", DoApiCallback: function (a, b, c) {
GData.user.Stat = 0;
function callback1(callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
saveLoginData(login);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('Error de connecter a server', false);
c.callback && c.callback(c, false);
}
GData.requester.Push(models.Login, GData.user, null, callback1);
}, Owner: this
});
function saveLoginData(login: models.Login) {
basic.Settings.set("Identification", login.Identification);
basic.Settings.set("LoginID", login.Id);
}
function loadLoginData(login: models.Login, alsoID: boolean) {
login.Identification = basic.Settings.get("Identification");
if (alsoID)
login.Id = basic.Settings.get("LoginID") || login.Id;
}
Api.RegisterApiCallback({
Name: "autoAuth", DoApiCallback: function (a, b, c) {
GData.user.Stat = 0;
function callback1(callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
basic.Settings.set("Identification", login.Identification);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('Error de connecter a server', false);
c.callback && c.callback(c, false);
}
GData.requester.Costume({ Method: 0, Url: '~checklogging' }, undefined, undefined, (e, rslt, succ, req) => {
if (rslt === true) {
GData.spin.Pause();
GData.user.FromJson({ CheckLogging: true }, encoding.SerializationContext.GlobalContext, true);
return;
}
loadLoginData(GData.user, true);
GData.requester.Request(models.Login, "AutoLogin", GData.user, GData.user as any,
(callback, s, iss) => {
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
GData.spin.Pause();
saveLoginData(login);
c.callback && c.callback(c, true);
return;
}
}
GData.requester.Push(models.Login, GData.user, null, callback1);
});
}, undefined);
}, Owner: this
});
function logout(callback:(isLogout:boolean)=>void) {
GData.requester.Get(models.Login, GData.user, null, function(s, r: any, iss) {
if (!iss) {
callback(null);
} else
if (!GData.user.IsLogged) {
basic.Settings.set("Identification", "");
GData.user.Identification = undefined;
GData.user.Username = undefined;
GData.user.Pwd = undefined;
document.cookie = "id=;";
callback && callback(true);
} else {
//UI.Modal.ShowDialog("Signout failled !!!!", "Some thing happened when logout this session please contact the administrator if site", undefined, "Retry", "Cancel");
callback && callback(false);
}
}, function(r, t) {
r.Url = "/~Signout";
});
}
var lr;
declare type callback<T> = (thred: number, param: T) => void;
function myfunction<T>(onConnected: callback<T>, onSignOut: callback<T>, onConnectionLost: callback<T>, param: T) {
var intThread;
var c = new XMLHttpRequest();
var self = this;
c.onreadystatechange = function() {
if (this.readyState == 4)
if (this.status == 200 && this.responseText == "true")
if (this.responseText == "true")
return onConnected(intThread, param);
else if (this.responseText == "false")
return onSignOut(intThread, param);
else throw "Uknow stat";
else
onConnectionLost(intThread, param);
};
c.onerror = function() {
onConnectionLost(intThread, param);
};
intThread = setInterval(() => {
c.open('get', __global.GetApiAddress('/~CheckLogging'));
c.setRequestHeader('Access-Control-Allow-Origin', 'true');
try { c.send(); c.timeout = 10000; } catch (e) { }
}, 12000);
}
myfunction(
(t, p) => { },
(t, p) => { UI.Desktop.Current.OpenSignin(); },
(t, p) => { UI.Desktop.Current.OpenSignin(); },
this); | var p = new UI.Page(this, 'Signup', 'Signup');
p.OnInitialized = p => p.Add(new UI.TControl('Client.signup', GData.user));
this.AddPage(this._signupPage = p);
}
| identifier_body |
mod.rs | mod fifo;
mod background_fifo;
mod sprite_fifo;
use crate::bus::*;
use crate::cpu::{interrupt, InterruptType};
use crate::clock::ClockListener;
use std::fmt;
use std::cell::RefCell;
use std::rc::Rc;
use crate::graphics_driver::GraphicsDriver;
use crate::ppu::background_fifo::BackgroundFifo;
use crate::ppu::sprite_fifo::SpriteFifo;
pub const DISPLAY_WIDTH: u8 = 160;
pub const DISPLAY_HEIGHT: u8 = 144;
const PITCH: usize = DISPLAY_WIDTH as usize * DISPLAY_HEIGHT as usize;
const VIRTUAL_DISPLAY_HEIGHT: u8 = 154;
const LCDC_DISPLAY_ENABLE: u8 = 1 << 7;
const LCDC_WINDOW_TILE_MAP_SELECT: u8 = 1 << 6;
const LCDC_WINDOW_ENABLE: u8 = 1 << 5;
const LCDC_TILE_DATA_SELECT: u8 = 1 << 4;
const LCDC_TILE_MAP_SELECT: u8 = 1 << 3;
const LCDC_SPRITE_SIZE: u8 = 1 << 2; // 1: Double height
const LCDC_SPRITE_ENABLE: u8 = 1 << 1;
// const LCDC_BG_VS_WINDOW_PRIORITY: u8 = 1 << 1;
const MAX_SPRITES_PER_LINE: usize = 10;
const STAT_LYC_INTERRUPT: u8 = 1 << 6;
const STAT_OAM_INTERRUPT: u8 = 1 << 5;
const STAT_VBLANK_INTERRUPT: u8 = 1 << 4;
const STAT_HBLANK_INTERRUPT: u8 = 1 << 3;
const STAT_LYC_FLAG: u8 = 1 << 2;
const STAT_MODE_MASK: u8 = 0x03;
const VRAM_BASE_ADDRESS: Address = 0x8000;
const TILE_MAP_LO_BASE: Address = 0x1800; // VRAM Relative Address; Bus Address 0x9800;
const TILE_MAP_HI_BASE: Address = 0x1C00; // VRAM Relative Address; Bus Address 0x9C00;
const TILE_DATA_BLOCK_BASE: [Address; 3] = [0x0000, 0x0800, 0x1000]; // VRAM Relative Addresses
// Timings from "The Ultimate Game Boy Talk"
const OAM_CYCLES: u16 = 20;
const DRAW_CYCLES: u16 = 43;
const HBLANK_CYCLES: u16 = 51;
const VBLANK_LINE_CYCLES: u16 = 114;
const SCREEN_CYCLES: u16 = VBLANK_LINE_CYCLES * VIRTUAL_DISPLAY_HEIGHT as u16;
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq)]
enum Mode {
HBlank = 0,
VBlank,
OAM,
Draw,
}
#[derive(Debug, Copy, Clone)]
struct Point {
x: u16,
y: u16,
}
#[derive(Debug, Copy, Clone)]
pub struct Registers {
LCDC: Byte,
STAT: Byte,
SCY: Byte,
SCX: Byte,
// LX // Fake register specifying X position of renderer
LX: Byte,
LY: Byte,
LYC: Byte,
WY: Byte,
WX: Byte,
BGP: Byte,
OBP0: Byte,
OBP1: Byte,
dma_active: bool,
dma_address: Byte,
dma_counter: Byte,
}
#[derive(Debug)]
pub struct PPU {
on: bool,
mode: Mode,
clock: u16,
pixel_buffer: [u32; PITCH],
palette_buffer: [u32; 4],
render_flag: bool,
VRAM: [Byte; 0x2000],
OAM: [Byte; 0x100],
registers: Registers,
bgfifo: BackgroundFifo,
spfifo: SpriteFifo,
}
impl PPU {
pub fn new() -> Self {
Self {
on: false,
mode: Mode::VBlank,
clock: 0,
pixel_buffer: [0x00; PITCH],
palette_buffer: [0xFFFFFF, 0xC0C0C0, 0x404040, 0x000000],
render_flag: true,
VRAM: [0; 0x2000],
OAM: [0; 0x100],
registers: Registers {
LCDC: 0,
STAT: Mode::VBlank as u8,
SCY: 0,
SCX: 0,
LX: 0,
LY: 0,
LYC: 0,
WY: 0,
WX: 0,
BGP: 0,
OBP0: 0,
OBP1: 0,
dma_active: false,
dma_counter: 0,
dma_address: 0,
},
bgfifo: BackgroundFifo::new(),
spfifo: SpriteFifo::new(),
}
}
pub fn update<'a>(&mut self, driver: &mut (dyn GraphicsDriver + 'a)) {
if !self.render_flag {
return;
}
let start = crate::graphics_driver::Point { x: 0, y: 0 };
let end = crate::graphics_driver::Point {
x: DISPLAY_WIDTH as u16,
y: DISPLAY_HEIGHT as u16,
};
if !self.on {
let screen: [u32; PITCH] = [0; PITCH];
driver.render(&screen);
}
else {
driver.render(&self.pixel_buffer);
}
self.render_flag = false;
}
fn set_mode(&mut self, bus: &mut Bus, mode: Mode) {
self.mode = mode;
// Clear previous mode flag
self.registers.STAT &= 0xFF ^ STAT_MODE_MASK;
// Set current mode flag
self.registers.STAT |= mode as u8;
const INTERRUPT_SOURCE_FLAGS: [u8; 3] = [
STAT_HBLANK_INTERRUPT,
STAT_VBLANK_INTERRUPT,
STAT_OAM_INTERRUPT
];
match mode {
// Draw does not have an associated interrupt.
Mode::Draw => return,
Mode::VBlank => interrupt(bus, InterruptType::VBlank),
_ => {},
}
if self.registers.STAT & INTERRUPT_SOURCE_FLAGS[mode as usize] != 0 {
interrupt(bus, InterruptType::LCDStat);
}
}
pub fn reset(&mut self, bus: &mut Bus) {
self.set_mode(bus, Mode::OAM);
self.registers.LY = 0;
self.clock = 0;
}
}
impl fmt::Display for PPU {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write! {f,
concat! {
"PPU | MODE {:6?}\n",
" | LCDC {:02X} STAT {:02X}\n",
" | SCY {:02X} SCX {:02X}\n",
" | LY {:02X} LYC {:02X}\n",
" | LY {:02X} LX {:02X}\n",
" | WY {:02X} WX {:02X}\n\n",
"BGF | MODE {:?}\n"
},
self.mode,
self.registers.LCDC, self.registers.STAT, self.registers.SCY, self.registers.SCX,
self.registers.LY, self.registers.LYC, self.registers.LY, self.registers.LX,
self.registers.WY, self.registers.WX,
self.bgfifo.state,
}
}
}
impl BusListener for PPU {
fn bus_attach(&mut self) -> Vec<Attach> {
vec![
Attach::BlockRange(0x80, 0x9F), // VRAM
Attach::Block(0xFE), // OAM Sprite Memory (Note that OAM is only up to 0xFE9F)
Attach::RegisterRange(0x40, 0x4B), // LCD Position / Palettes / DMA Transfer Start Address
// Attach::Register(0x4F), // VRAM Bank Selector
// Attach::RegisterRange(0x51, 0x55), // HDMA 1-5
// Attach::RegisterRange(0x68, 0x6B), // CGB Palletes
]
}
fn bus_read(&self, address: Address) -> Byte {
// TODO: Prevent access during OAM or Draw.
match address {
0x8000..=0x9FFF => self.VRAM[(address - 0x8000) as usize],
0xFE00..=0xFE9F => self.OAM[(address - 0xFE00) as usize],
0xFEA0..=0xFEFF => 0, // This range is unusable
0xFF40 => self.registers.LCDC,
0xFF41 => self.registers.STAT,
0xFF42 => self.registers.SCY,
0xFF43 => self.registers.SCX,
//0xFF44 => 0x90, //DEBUG//
0xFF44 => self.registers.LY,
0xFF45 => self.registers.LYC,
0xFF46 => self.registers.dma_address,
0xFF47 => self.registers.BGP,
0xFF48 => self.registers.OBP0,
0xFF49 => self.registers.OBP1,
0xFF4A => self.registers.WY,
0xFF4B => self.registers.WX,
// 0xFF4F | 0xFF51..=0xFF55 | 0xFF68..=0xFF6B => 0x00, // TODO
_ => panic!("PPU Address ({:04X}) Not Implemented", address),
}
}
fn bus_write(&mut self, _bus: &mut Bus, address: Address, value: Byte) {
match address {
// 0xFF4F | 0xFF51..=0xFF55 | 0xFF68..=0xFF6B => return, // TODO
0xFEA0..=0xFEFF => return, // This range is unusable
0xFF41 => {
// Lower 3 bits of STAT are read-only mode indicators.
let stat = self.registers.STAT;
self.registers.STAT = (value & 0xF8) | (stat & 0x07);
return;
}
_ => {},
}
let ptr = match address {
0x8000..=0x9FFF => &mut self.VRAM[(address - 0x8000) as usize],
0xFE00..=0xFE9F => {
if self.mode == Mode::OAM || self.mode == Mode::Draw {
panic!("Illegal write to OAM table.");
}
&mut self.OAM[(address - 0xFE00) as usize]
},
0xFF40 => &mut self.registers.LCDC,
// 0xFF41 HANDLED ABOVE //
0xFF42 => &mut self.registers.SCY,
0xFF43 => &mut self.registers.SCX,
// 0xFF44 (LY) is READ ONLY //
0xFF45 => &mut self.registers.LYC,
0xFF47 => &mut self.registers.BGP,
0xFF48 => &mut self.registers.OBP0,
0xFF49 => &mut self.registers.OBP1,
0xFF4A => &mut self.registers.WY,
0xFF4B => &mut self.registers.WX,
// Writing to the DMA Transfer Register initializes transfer
0xFF46 => {
self.registers.dma_active = true;
self.registers.dma_counter = 0;
assert!(value <= 0xF1);
&mut self.registers.dma_address
},
_ => panic!("PPU Address ({:04X}) Not Implemented", address),
};
*ptr = value;
}
}
impl ClockListener for PPU {
fn callback(&mut self, bus: &mut Bus, cycles: u8) {
if self.registers.LCDC & LCDC_DISPLAY_ENABLE == 0 {
self.on = false;
self.clock += cycles as u16;
if SCREEN_CYCLES < self.clock {
self.clock -= SCREEN_CYCLES;
self.render_flag = true;
}
return;
}
else if !self.on {
self.reset(bus);
self.on = true;
}
// DMA Transfer Loop
for _ in 0..cycles {
// DMA may terminate in the middle of this loop.
if !self.registers.dma_active {
break;
}
let dma_counter = self.registers.dma_counter as u16;
let data = bus.read_byte(((self.registers.dma_address as Address) << 8) | dma_counter);
self.OAM[dma_counter as usize] = data;
|
self.clock += cycles as u16;
use Mode::*;
match self.mode {
OAM => {
for _ in 0..(cycles << 1) {
self.spfifo.scan_next_oam_table_entry(&self.OAM, &self.registers);
}
if self.clock < OAM_CYCLES {
return;
}
self.clock -= OAM_CYCLES;
self.set_mode(bus, Draw);
}
Draw => {
// Render cycle: Push pixels onto the screen.
for _ in 0..(cycles << 1) {
self.bgfifo.step(&self.VRAM, self.registers);
self.spfifo.step(&self.VRAM, self.registers);
for _ in 0..2 {
// TODO: Window Handling
if DISPLAY_WIDTH <= self.registers.LX {
break;
}
let mut pixel_index = 0u8;
match self.bgfifo.pop() {
None => break,
Some(index) => pixel_index = index,
}
let alt_palette_buffer: [u32; 4] = [0xFFFFFF, 0xCC0000, 0x440000, 0xFF0000];
let mut pixel = self.palette_buffer[pixel_index as usize];
// TODO: Sprite priority.
match self.spfifo.pop(self.registers.LX) {
None => {},
Some(index) => {
pixel_index = index;
pixel = alt_palette_buffer[pixel_index as usize];
},
}
let buffer_index = (self.registers.LY as u16 * DISPLAY_WIDTH as u16)
+ self.registers.LX as u16;
self.pixel_buffer[buffer_index as usize] = pixel;
self.registers.LX += 1;
}
}
if self.registers.LX < DISPLAY_WIDTH || self.clock < DRAW_CYCLES {
return;
}
self.clock -= DRAW_CYCLES;
self.set_mode(bus, HBlank);
}
HBlank => {
if self.clock < HBLANK_CYCLES {
return;
}
self.clock -= HBLANK_CYCLES;
if self.registers.LY == self.registers.LYC {
// Set the LYC flag
self.registers.STAT |= STAT_LYC_FLAG;
if self.registers.STAT & STAT_LYC_INTERRUPT != 0 {
interrupt(bus, InterruptType::LCDStat);
}
}
else {
// Clear the LYC flag.
self.registers.STAT &= 0xFF ^ STAT_LYC_FLAG;
}
self.bgfifo.reset(0);
self.spfifo.reset();
self.registers.LX = 0;
self.registers.LY += 1;
if self.registers.LY >= DISPLAY_HEIGHT {
self.set_mode(bus, VBlank);
}
else {
self.set_mode(bus, OAM);
}
}
VBlank => {
if self.clock < VBLANK_LINE_CYCLES {
return;
}
self.clock -= VBLANK_LINE_CYCLES;
self.registers.LY += 1;
if self.registers.LY < VIRTUAL_DISPLAY_HEIGHT {
return;
}
self.render_flag = true;
self.registers.LY = 0;
self.set_mode(bus, OAM);
}
}
}
} | self.registers.dma_counter += 1;
self.registers.dma_active = self.registers.dma_counter < DISPLAY_WIDTH;
} | random_line_split |
mod.rs | mod fifo;
mod background_fifo;
mod sprite_fifo;
use crate::bus::*;
use crate::cpu::{interrupt, InterruptType};
use crate::clock::ClockListener;
use std::fmt;
use std::cell::RefCell;
use std::rc::Rc;
use crate::graphics_driver::GraphicsDriver;
use crate::ppu::background_fifo::BackgroundFifo;
use crate::ppu::sprite_fifo::SpriteFifo;
pub const DISPLAY_WIDTH: u8 = 160;
pub const DISPLAY_HEIGHT: u8 = 144;
const PITCH: usize = DISPLAY_WIDTH as usize * DISPLAY_HEIGHT as usize;
const VIRTUAL_DISPLAY_HEIGHT: u8 = 154;
const LCDC_DISPLAY_ENABLE: u8 = 1 << 7;
const LCDC_WINDOW_TILE_MAP_SELECT: u8 = 1 << 6;
const LCDC_WINDOW_ENABLE: u8 = 1 << 5;
const LCDC_TILE_DATA_SELECT: u8 = 1 << 4;
const LCDC_TILE_MAP_SELECT: u8 = 1 << 3;
const LCDC_SPRITE_SIZE: u8 = 1 << 2; // 1: Double height
const LCDC_SPRITE_ENABLE: u8 = 1 << 1;
// const LCDC_BG_VS_WINDOW_PRIORITY: u8 = 1 << 1;
const MAX_SPRITES_PER_LINE: usize = 10;
const STAT_LYC_INTERRUPT: u8 = 1 << 6;
const STAT_OAM_INTERRUPT: u8 = 1 << 5;
const STAT_VBLANK_INTERRUPT: u8 = 1 << 4;
const STAT_HBLANK_INTERRUPT: u8 = 1 << 3;
const STAT_LYC_FLAG: u8 = 1 << 2;
const STAT_MODE_MASK: u8 = 0x03;
const VRAM_BASE_ADDRESS: Address = 0x8000;
const TILE_MAP_LO_BASE: Address = 0x1800; // VRAM Relative Address; Bus Address 0x9800;
const TILE_MAP_HI_BASE: Address = 0x1C00; // VRAM Relative Address; Bus Address 0x9C00;
const TILE_DATA_BLOCK_BASE: [Address; 3] = [0x0000, 0x0800, 0x1000]; // VRAM Relative Addresses
// Timings from "The Ultimate Game Boy Talk"
const OAM_CYCLES: u16 = 20;
const DRAW_CYCLES: u16 = 43;
const HBLANK_CYCLES: u16 = 51;
const VBLANK_LINE_CYCLES: u16 = 114;
const SCREEN_CYCLES: u16 = VBLANK_LINE_CYCLES * VIRTUAL_DISPLAY_HEIGHT as u16;
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq)]
enum Mode {
HBlank = 0,
VBlank,
OAM,
Draw,
}
#[derive(Debug, Copy, Clone)]
struct Point {
x: u16,
y: u16,
}
#[derive(Debug, Copy, Clone)]
pub struct Registers {
LCDC: Byte,
STAT: Byte,
SCY: Byte,
SCX: Byte,
// LX // Fake register specifying X position of renderer
LX: Byte,
LY: Byte,
LYC: Byte,
WY: Byte,
WX: Byte,
BGP: Byte,
OBP0: Byte,
OBP1: Byte,
dma_active: bool,
dma_address: Byte,
dma_counter: Byte,
}
#[derive(Debug)]
pub struct PPU {
on: bool,
mode: Mode,
clock: u16,
pixel_buffer: [u32; PITCH],
palette_buffer: [u32; 4],
render_flag: bool,
VRAM: [Byte; 0x2000],
OAM: [Byte; 0x100],
registers: Registers,
bgfifo: BackgroundFifo,
spfifo: SpriteFifo,
}
impl PPU {
pub fn | () -> Self {
Self {
on: false,
mode: Mode::VBlank,
clock: 0,
pixel_buffer: [0x00; PITCH],
palette_buffer: [0xFFFFFF, 0xC0C0C0, 0x404040, 0x000000],
render_flag: true,
VRAM: [0; 0x2000],
OAM: [0; 0x100],
registers: Registers {
LCDC: 0,
STAT: Mode::VBlank as u8,
SCY: 0,
SCX: 0,
LX: 0,
LY: 0,
LYC: 0,
WY: 0,
WX: 0,
BGP: 0,
OBP0: 0,
OBP1: 0,
dma_active: false,
dma_counter: 0,
dma_address: 0,
},
bgfifo: BackgroundFifo::new(),
spfifo: SpriteFifo::new(),
}
}
pub fn update<'a>(&mut self, driver: &mut (dyn GraphicsDriver + 'a)) {
if !self.render_flag {
return;
}
let start = crate::graphics_driver::Point { x: 0, y: 0 };
let end = crate::graphics_driver::Point {
x: DISPLAY_WIDTH as u16,
y: DISPLAY_HEIGHT as u16,
};
if !self.on {
let screen: [u32; PITCH] = [0; PITCH];
driver.render(&screen);
}
else {
driver.render(&self.pixel_buffer);
}
self.render_flag = false;
}
fn set_mode(&mut self, bus: &mut Bus, mode: Mode) {
self.mode = mode;
// Clear previous mode flag
self.registers.STAT &= 0xFF ^ STAT_MODE_MASK;
// Set current mode flag
self.registers.STAT |= mode as u8;
const INTERRUPT_SOURCE_FLAGS: [u8; 3] = [
STAT_HBLANK_INTERRUPT,
STAT_VBLANK_INTERRUPT,
STAT_OAM_INTERRUPT
];
match mode {
// Draw does not have an associated interrupt.
Mode::Draw => return,
Mode::VBlank => interrupt(bus, InterruptType::VBlank),
_ => {},
}
if self.registers.STAT & INTERRUPT_SOURCE_FLAGS[mode as usize] != 0 {
interrupt(bus, InterruptType::LCDStat);
}
}
pub fn reset(&mut self, bus: &mut Bus) {
self.set_mode(bus, Mode::OAM);
self.registers.LY = 0;
self.clock = 0;
}
}
impl fmt::Display for PPU {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write! {f,
concat! {
"PPU | MODE {:6?}\n",
" | LCDC {:02X} STAT {:02X}\n",
" | SCY {:02X} SCX {:02X}\n",
" | LY {:02X} LYC {:02X}\n",
" | LY {:02X} LX {:02X}\n",
" | WY {:02X} WX {:02X}\n\n",
"BGF | MODE {:?}\n"
},
self.mode,
self.registers.LCDC, self.registers.STAT, self.registers.SCY, self.registers.SCX,
self.registers.LY, self.registers.LYC, self.registers.LY, self.registers.LX,
self.registers.WY, self.registers.WX,
self.bgfifo.state,
}
}
}
impl BusListener for PPU {
fn bus_attach(&mut self) -> Vec<Attach> {
vec![
Attach::BlockRange(0x80, 0x9F), // VRAM
Attach::Block(0xFE), // OAM Sprite Memory (Note that OAM is only up to 0xFE9F)
Attach::RegisterRange(0x40, 0x4B), // LCD Position / Palettes / DMA Transfer Start Address
// Attach::Register(0x4F), // VRAM Bank Selector
// Attach::RegisterRange(0x51, 0x55), // HDMA 1-5
// Attach::RegisterRange(0x68, 0x6B), // CGB Palletes
]
}
fn bus_read(&self, address: Address) -> Byte {
// TODO: Prevent access during OAM or Draw.
match address {
0x8000..=0x9FFF => self.VRAM[(address - 0x8000) as usize],
0xFE00..=0xFE9F => self.OAM[(address - 0xFE00) as usize],
0xFEA0..=0xFEFF => 0, // This range is unusable
0xFF40 => self.registers.LCDC,
0xFF41 => self.registers.STAT,
0xFF42 => self.registers.SCY,
0xFF43 => self.registers.SCX,
//0xFF44 => 0x90, //DEBUG//
0xFF44 => self.registers.LY,
0xFF45 => self.registers.LYC,
0xFF46 => self.registers.dma_address,
0xFF47 => self.registers.BGP,
0xFF48 => self.registers.OBP0,
0xFF49 => self.registers.OBP1,
0xFF4A => self.registers.WY,
0xFF4B => self.registers.WX,
// 0xFF4F | 0xFF51..=0xFF55 | 0xFF68..=0xFF6B => 0x00, // TODO
_ => panic!("PPU Address ({:04X}) Not Implemented", address),
}
}
fn bus_write(&mut self, _bus: &mut Bus, address: Address, value: Byte) {
match address {
// 0xFF4F | 0xFF51..=0xFF55 | 0xFF68..=0xFF6B => return, // TODO
0xFEA0..=0xFEFF => return, // This range is unusable
0xFF41 => {
// Lower 3 bits of STAT are read-only mode indicators.
let stat = self.registers.STAT;
self.registers.STAT = (value & 0xF8) | (stat & 0x07);
return;
}
_ => {},
}
let ptr = match address {
0x8000..=0x9FFF => &mut self.VRAM[(address - 0x8000) as usize],
0xFE00..=0xFE9F => {
if self.mode == Mode::OAM || self.mode == Mode::Draw {
panic!("Illegal write to OAM table.");
}
&mut self.OAM[(address - 0xFE00) as usize]
},
0xFF40 => &mut self.registers.LCDC,
// 0xFF41 HANDLED ABOVE //
0xFF42 => &mut self.registers.SCY,
0xFF43 => &mut self.registers.SCX,
// 0xFF44 (LY) is READ ONLY //
0xFF45 => &mut self.registers.LYC,
0xFF47 => &mut self.registers.BGP,
0xFF48 => &mut self.registers.OBP0,
0xFF49 => &mut self.registers.OBP1,
0xFF4A => &mut self.registers.WY,
0xFF4B => &mut self.registers.WX,
// Writing to the DMA Transfer Register initializes transfer
0xFF46 => {
self.registers.dma_active = true;
self.registers.dma_counter = 0;
assert!(value <= 0xF1);
&mut self.registers.dma_address
},
_ => panic!("PPU Address ({:04X}) Not Implemented", address),
};
*ptr = value;
}
}
impl ClockListener for PPU {
fn callback(&mut self, bus: &mut Bus, cycles: u8) {
if self.registers.LCDC & LCDC_DISPLAY_ENABLE == 0 {
self.on = false;
self.clock += cycles as u16;
if SCREEN_CYCLES < self.clock {
self.clock -= SCREEN_CYCLES;
self.render_flag = true;
}
return;
}
else if !self.on {
self.reset(bus);
self.on = true;
}
// DMA Transfer Loop
for _ in 0..cycles {
// DMA may terminate in the middle of this loop.
if !self.registers.dma_active {
break;
}
let dma_counter = self.registers.dma_counter as u16;
let data = bus.read_byte(((self.registers.dma_address as Address) << 8) | dma_counter);
self.OAM[dma_counter as usize] = data;
self.registers.dma_counter += 1;
self.registers.dma_active = self.registers.dma_counter < DISPLAY_WIDTH;
}
self.clock += cycles as u16;
use Mode::*;
match self.mode {
OAM => {
for _ in 0..(cycles << 1) {
self.spfifo.scan_next_oam_table_entry(&self.OAM, &self.registers);
}
if self.clock < OAM_CYCLES {
return;
}
self.clock -= OAM_CYCLES;
self.set_mode(bus, Draw);
}
Draw => {
// Render cycle: Push pixels onto the screen.
for _ in 0..(cycles << 1) {
self.bgfifo.step(&self.VRAM, self.registers);
self.spfifo.step(&self.VRAM, self.registers);
for _ in 0..2 {
// TODO: Window Handling
if DISPLAY_WIDTH <= self.registers.LX {
break;
}
let mut pixel_index = 0u8;
match self.bgfifo.pop() {
None => break,
Some(index) => pixel_index = index,
}
let alt_palette_buffer: [u32; 4] = [0xFFFFFF, 0xCC0000, 0x440000, 0xFF0000];
let mut pixel = self.palette_buffer[pixel_index as usize];
// TODO: Sprite priority.
match self.spfifo.pop(self.registers.LX) {
None => {},
Some(index) => {
pixel_index = index;
pixel = alt_palette_buffer[pixel_index as usize];
},
}
let buffer_index = (self.registers.LY as u16 * DISPLAY_WIDTH as u16)
+ self.registers.LX as u16;
self.pixel_buffer[buffer_index as usize] = pixel;
self.registers.LX += 1;
}
}
if self.registers.LX < DISPLAY_WIDTH || self.clock < DRAW_CYCLES {
return;
}
self.clock -= DRAW_CYCLES;
self.set_mode(bus, HBlank);
}
HBlank => {
if self.clock < HBLANK_CYCLES {
return;
}
self.clock -= HBLANK_CYCLES;
if self.registers.LY == self.registers.LYC {
// Set the LYC flag
self.registers.STAT |= STAT_LYC_FLAG;
if self.registers.STAT & STAT_LYC_INTERRUPT != 0 {
interrupt(bus, InterruptType::LCDStat);
}
}
else {
// Clear the LYC flag.
self.registers.STAT &= 0xFF ^ STAT_LYC_FLAG;
}
self.bgfifo.reset(0);
self.spfifo.reset();
self.registers.LX = 0;
self.registers.LY += 1;
if self.registers.LY >= DISPLAY_HEIGHT {
self.set_mode(bus, VBlank);
}
else {
self.set_mode(bus, OAM);
}
}
VBlank => {
if self.clock < VBLANK_LINE_CYCLES {
return;
}
self.clock -= VBLANK_LINE_CYCLES;
self.registers.LY += 1;
if self.registers.LY < VIRTUAL_DISPLAY_HEIGHT {
return;
}
self.render_flag = true;
self.registers.LY = 0;
self.set_mode(bus, OAM);
}
}
}
}
| new | identifier_name |
addon.go | /*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addon
import (
"context"
"encoding/json"
"fmt"
"net/url"
"path"
"path/filepath"
"strings"
"sync"
"time"
"cuelang.org/go/cue"
cueyaml "cuelang.org/go/encoding/yaml"
"github.com/google/go-github/v32/github"
"github.com/pkg/errors"
"golang.org/x/oauth2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8syaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
common2 "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
utils2 "github.com/oam-dev/kubevela/pkg/controller/utils"
cuemodel "github.com/oam-dev/kubevela/pkg/cue/model"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
const (
// ReadmeFileName is the addon readme file name
ReadmeFileName string = "readme.md"
// MetadataFileName is the addon meatadata.yaml file name
MetadataFileName string = "metadata.yaml"
// TemplateFileName is the addon template.yaml dir name
TemplateFileName string = "template.yaml"
// ResourcesDirName is the addon resources/ dir name
ResourcesDirName string = "resources"
// DefinitionsDirName is the addon definitions/ dir name
DefinitionsDirName string = "definitions"
)
// ListOptions contains flags mark what files should be read in an addon directory
type ListOptions struct {
GetDetail bool
GetDefinition bool
GetResource bool
GetParameter bool
GetTemplate bool
}
var (
// GetLevelOptions used when get or list addons
GetLevelOptions = ListOptions{GetDetail: true, GetDefinition: true, GetParameter: true}
// EnableLevelOptions used when enable addon
EnableLevelOptions = ListOptions{GetDetail: true, GetDefinition: true, GetResource: true, GetTemplate: true, GetParameter: true}
)
// aError is internal error type of addon
type aError error
var (
// ErrNotExist means addon not exists
ErrNotExist aError = errors.New("addon not exist")
)
// gitHelper helps get addon's file by git
type gitHelper struct {
Client *github.Client
Meta *utils.Content
}
// GitAddonSource defines the information about the Git as addon source
type GitAddonSource struct {
URL string `json:"url,omitempty" validate:"required"`
Path string `json:"path,omitempty"`
Token string `json:"token,omitempty"`
}
// asyncReader helps async read files of addon
type asyncReader struct {
addon *types.Addon
h *gitHelper
item *github.RepositoryContent
errChan chan error
}
// SetReadContent set which file to read
func (r *asyncReader) SetReadContent(content *github.RepositoryContent) {
r.item = content
}
// GetAddon get a addon info from GitAddonSource, can be used for get or enable
func GetAddon(name string, git *GitAddonSource, opt ListOptions) (*types.Addon, error) {
addon, err := getSingleAddonFromGit(git.URL, git.Path, name, git.Token, opt)
if err != nil {
return nil, err
}
return addon, nil
}
// ListAddons list addons' info from GitAddonSource
func ListAddons(git *GitAddonSource, opt ListOptions) ([]*types.Addon, error) {
gitAddons, err := getAddonsFromGit(git.URL, git.Path, git.Token, opt)
if err != nil {
return nil, err
}
return gitAddons, nil
}
func getAddonsFromGit(baseURL, dir, token string, opt ListOptions) ([]*types.Addon, error) {
var addons []*types.Addon
var err error
var wg sync.WaitGroup
errChan := make(chan error, 1)
gith, err := createGitHelper(baseURL, dir, token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
for _, subItems := range items {
if subItems.GetType() != "dir" {
continue
}
wg.Add(1)
go func(item *github.RepositoryContent) {
defer wg.Done()
addonRes, err := getSingleAddonFromGit(baseURL, dir, item.GetName(), token, opt)
if err != nil {
errChan <- err
return
}
addons = append(addons, addonRes)
}(subItems)
}
wg.Wait()
if len(errChan) != 0 {
return nil, <-errChan
}
return addons, nil
}
func getSingleAddonFromGit(baseURL, dir, addonName, token string, opt ListOptions) (*types.Addon, error) {
var wg sync.WaitGroup
gith, err := createGitHelper(baseURL, path.Join(dir, addonName), token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
reader := asyncReader{
addon: &types.Addon{},
h: gith,
errChan: make(chan error, 1),
}
for _, item := range items {
switch strings.ToLower(item.GetName()) {
case ReadmeFileName:
if !opt.GetDetail {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readReadme(&wg, reader)
case MetadataFileName:
reader.SetReadContent(item)
wg.Add(1)
go readMetadata(&wg, reader)
case DefinitionsDirName:
if !opt.GetDefinition {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(&wg, reader)
case ResourcesDirName:
if !opt.GetResource && !opt.GetParameter {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readResources(&wg, reader)
case TemplateFileName:
if !opt.GetTemplate {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readTemplate(&wg, reader)
}
}
wg.Wait()
if opt.GetParameter && reader.addon.Parameters != "" {
err = genAddonAPISchema(reader.addon)
if err != nil {
return nil, err
}
}
return reader.addon, nil
}
func readTemplate(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
data, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
reader.addon.AppTemplate = &v1beta1.Application{}
_, _, err = dec.Decode([]byte(data), nil, reader.addon.AppTemplate)
if err != nil {
reader.errChan <- err
return
}
}
func readResources(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, ResourcesDirName)
if err != nil {
reader.errChan <- err
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readResFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readResources(wg, reader)
}
}
}
// readResFile read single resource file
func readResFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
if reader.item.GetName() == "parameter.cue" {
reader.addon.Parameters = b
return
}
switch filepath.Ext(reader.item.GetName()) {
case ".cue":
reader.addon.CUETemplates = append(reader.addon.CUETemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
default:
reader.addon.YAMLTemplates = append(reader.addon.YAMLTemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
}
func readDefinitions(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, DefinitionsDirName)
if err != nil {
reader.errChan <- err
return
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readDefFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(wg, reader)
}
}
}
// readDefFile read single definition file
func readDefFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
reader.addon.Definitions = append(reader.addon.Definitions, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
func readMetadata(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
err = yaml.Unmarshal([]byte(b), &reader.addon.AddonMeta)
if err != nil {
reader.errChan <- err
return
}
}
func readReadme(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
reader.addon.Detail, err = content.GetContent()
if err != nil {
reader.errChan <- err
return
}
}
func createGitHelper(baseURL, dir, token string) (*gitHelper, error) {
var ts oauth2.TokenSource
if token != "" {
ts = oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
}
tc := oauth2.NewClient(context.Background(), ts)
tc.Timeout = time.Second * 10
cli := github.NewClient(tc)
baseURL = strings.TrimSuffix(baseURL, ".git")
u, err := url.Parse(baseURL)
if err != nil {
return nil, errors.New("addon registry invalid")
}
u.Path = path.Join(u.Path, dir)
_, gitmeta, err := utils.Parse(u.String())
if err != nil {
return nil, errors.New("addon registry invalid")
}
return &gitHelper{
Client: cli,
Meta: gitmeta,
}, nil
}
func (h *gitHelper) readRepo(path string) (*github.RepositoryContent, []*github.RepositoryContent, error) |
func genAddonAPISchema(addonRes *types.Addon) error {
param, err := utils2.PrepareParameterCue(addonRes.Name, addonRes.Parameters)
if err != nil {
return err
}
var r cue.Runtime
cueInst, err := r.Compile("-", param)
if err != nil {
return err
}
data, err := common.GenOpenAPI(cueInst)
if err != nil {
return err
}
schema, err := utils2.ConvertOpenAPISchema2SwaggerObject(data)
if err != nil {
return err
}
utils2.FixOpenAPISchema("", schema)
addonRes.APISchema = schema
return nil
}
func cutPathUntil(path []string, end string) ([]string, error) {
for i, d := range path {
if d == end {
return path[i:], nil
}
}
return nil, errors.New("cut path fail, target directory name not found")
}
// RenderApplication render a K8s application
func RenderApplication(addon *types.Addon, args map[string]string) (*v1beta1.Application, []*unstructured.Unstructured, error) {
if args == nil {
args = map[string]string{}
}
app := addon.AppTemplate
if app == nil {
app = &v1beta1.Application{
TypeMeta: metav1.TypeMeta{APIVersion: "core.oam.dev/v1beta1", Kind: "Application"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2AppName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
Labels: map[string]string{
oam.LabelAddonName: addon.Name,
},
},
Spec: v1beta1.ApplicationSpec{
Components: []common2.ApplicationComponent{},
},
}
}
app.Name = Convert2AppName(addon.Name)
app.Labels = util.MergeMapOverrideWithDst(app.Labels, map[string]string{oam.LabelAddonName: addon.Name})
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{}
}
for _, namespace := range addon.NeedNamespace {
comp := common2.ApplicationComponent{
Type: "raw",
Name: fmt.Sprintf("%s-namespace", namespace),
Properties: util.Object2RawExtension(renderNamespace(namespace)),
}
app.Spec.Components = append(app.Spec.Components, comp)
}
for _, tmpl := range addon.YAMLTemplates {
comp, err := renderRawComponent(tmpl)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
for _, tmpl := range addon.CUETemplates {
comp, err := renderCUETemplate(tmpl, addon.Parameters, args)
if err != nil {
return nil, nil, ErrRenderCueTmpl
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
var defObjs []*unstructured.Unstructured
if isDeployToRuntimeOnly(addon) {
// Runtime cluster mode needs to deploy definitions to control plane k8s.
for _, def := range addon.Definitions {
obj, err := renderObject(def)
if err != nil {
return nil, nil, err
}
defObjs = append(defObjs, obj)
}
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{Steps: make([]v1beta1.WorkflowStep, 0)}
}
app.Spec.Workflow.Steps = append(app.Spec.Workflow.Steps,
v1beta1.WorkflowStep{
Name: "deploy-control-plane",
Type: "apply-application",
},
v1beta1.WorkflowStep{
Name: "deploy-runtime",
Type: "deploy2runtime",
})
} else {
for _, def := range addon.Definitions {
comp, err := renderRawComponent(def)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
}
return app, defObjs, nil
}
func isDeployToRuntimeOnly(addon *types.Addon) bool {
if addon.DeployTo == nil {
return false
}
return addon.DeployTo.RuntimeCluster
}
func renderObject(elem types.AddonElementFile) (*unstructured.Unstructured, error) {
obj := &unstructured.Unstructured{}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
_, _, err := dec.Decode([]byte(elem.Data), nil, obj)
if err != nil {
return nil, err
}
return obj, nil
}
func renderNamespace(namespace string) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetAPIVersion("v1")
u.SetKind("Namespace")
u.SetName(namespace)
return u
}
// renderRawComponent will return a component in raw type from string
func renderRawComponent(elem types.AddonElementFile) (*common2.ApplicationComponent, error) {
baseRawComponent := common2.ApplicationComponent{
Type: "raw",
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
obj, err := renderObject(elem)
if err != nil {
return nil, err
}
baseRawComponent.Properties = util.Object2RawExtension(obj)
return &baseRawComponent, nil
}
// renderCUETemplate will return a component from cue template
func renderCUETemplate(elem types.AddonElementFile, parameters string, args map[string]string) (*common2.ApplicationComponent, error) {
bt, err := json.Marshal(args)
if err != nil {
return nil, err
}
var paramFile = cuemodel.ParameterFieldName + ": {}"
if string(bt) != "null" {
paramFile = fmt.Sprintf("%s: %s", cuemodel.ParameterFieldName, string(bt))
}
param := fmt.Sprintf("%s\n%s", paramFile, parameters)
v, err := value.NewValue(param, nil, "")
if err != nil {
return nil, err
}
out, err := v.LookupByScript(fmt.Sprintf("{%s}", elem.Data))
if err != nil {
return nil, err
}
compContent, err := out.LookupValue("output")
if err != nil {
return nil, err
}
b, err := cueyaml.Encode(compContent.CueValue())
if err != nil {
return nil, err
}
comp := common2.ApplicationComponent{
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
err = yaml.Unmarshal(b, &comp)
if err != nil {
return nil, err
}
return &comp, err
}
const addonAppPrefix = "addon-"
const addonSecPrefix = "addon-secret-"
// Convert2AppName -
func Convert2AppName(name string) string {
return addonAppPrefix + name
}
// Convert2AddonName -
func Convert2AddonName(name string) string {
return strings.TrimPrefix(name, addonAppPrefix)
}
// RenderArgsSecret TODO add desc
func RenderArgsSecret(addon *types.Addon, args map[string]string) *v1.Secret {
sec := v1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2SecName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
},
StringData: args,
Type: v1.SecretTypeOpaque,
}
return &sec
}
// Convert2SecName TODO add desc
func Convert2SecName(name string) string {
return addonSecPrefix + name
}
// CheckDependencies checks if addon's dependent addons is enabled
func CheckDependencies(ctx context.Context, clt client.Client, addon *types.Addon) bool {
var app v1beta1.Application
for _, dep := range addon.Dependencies {
err := clt.Get(ctx, client.ObjectKey{
Namespace: types.DefaultKubeVelaNS,
Name: Convert2AppName(dep.Name),
}, &app)
if err != nil {
return false
}
}
return true
}
| {
file, items, _, err := h.Client.Repositories.GetContents(context.Background(), h.Meta.Owner, h.Meta.Repo, path, nil)
if err != nil {
return nil, nil, WrapErrRateLimit(err)
}
return file, items, nil
} | identifier_body |
addon.go | /*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addon
import (
"context"
"encoding/json"
"fmt"
"net/url"
"path"
"path/filepath"
"strings"
"sync"
"time"
"cuelang.org/go/cue"
cueyaml "cuelang.org/go/encoding/yaml"
"github.com/google/go-github/v32/github"
"github.com/pkg/errors"
"golang.org/x/oauth2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8syaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
common2 "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
utils2 "github.com/oam-dev/kubevela/pkg/controller/utils"
cuemodel "github.com/oam-dev/kubevela/pkg/cue/model"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
const (
// ReadmeFileName is the addon readme file name
ReadmeFileName string = "readme.md"
// MetadataFileName is the addon meatadata.yaml file name
MetadataFileName string = "metadata.yaml"
// TemplateFileName is the addon template.yaml dir name
TemplateFileName string = "template.yaml"
// ResourcesDirName is the addon resources/ dir name
ResourcesDirName string = "resources"
// DefinitionsDirName is the addon definitions/ dir name
DefinitionsDirName string = "definitions"
)
// ListOptions contains flags mark what files should be read in an addon directory
type ListOptions struct {
GetDetail bool
GetDefinition bool
GetResource bool
GetParameter bool
GetTemplate bool
}
var (
// GetLevelOptions used when get or list addons
GetLevelOptions = ListOptions{GetDetail: true, GetDefinition: true, GetParameter: true}
// EnableLevelOptions used when enable addon
EnableLevelOptions = ListOptions{GetDetail: true, GetDefinition: true, GetResource: true, GetTemplate: true, GetParameter: true}
)
// aError is internal error type of addon
type aError error
var (
// ErrNotExist means addon not exists
ErrNotExist aError = errors.New("addon not exist")
)
// gitHelper helps get addon's file by git
type gitHelper struct {
Client *github.Client
Meta *utils.Content
}
// GitAddonSource defines the information about the Git as addon source
type GitAddonSource struct {
URL string `json:"url,omitempty" validate:"required"`
Path string `json:"path,omitempty"`
Token string `json:"token,omitempty"`
}
// asyncReader helps async read files of addon
type asyncReader struct {
addon *types.Addon
h *gitHelper
item *github.RepositoryContent
errChan chan error
}
// SetReadContent set which file to read
func (r *asyncReader) SetReadContent(content *github.RepositoryContent) {
r.item = content
}
// GetAddon get a addon info from GitAddonSource, can be used for get or enable
func GetAddon(name string, git *GitAddonSource, opt ListOptions) (*types.Addon, error) {
addon, err := getSingleAddonFromGit(git.URL, git.Path, name, git.Token, opt)
if err != nil {
return nil, err
}
return addon, nil
}
// ListAddons list addons' info from GitAddonSource
func ListAddons(git *GitAddonSource, opt ListOptions) ([]*types.Addon, error) {
gitAddons, err := getAddonsFromGit(git.URL, git.Path, git.Token, opt)
if err != nil {
return nil, err
}
return gitAddons, nil
}
func getAddonsFromGit(baseURL, dir, token string, opt ListOptions) ([]*types.Addon, error) {
var addons []*types.Addon
var err error
var wg sync.WaitGroup
errChan := make(chan error, 1)
gith, err := createGitHelper(baseURL, dir, token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
for _, subItems := range items {
if subItems.GetType() != "dir" {
continue
}
wg.Add(1)
go func(item *github.RepositoryContent) {
defer wg.Done()
addonRes, err := getSingleAddonFromGit(baseURL, dir, item.GetName(), token, opt)
if err != nil {
errChan <- err
return
}
addons = append(addons, addonRes)
}(subItems)
}
wg.Wait()
if len(errChan) != 0 {
return nil, <-errChan
}
return addons, nil
}
func getSingleAddonFromGit(baseURL, dir, addonName, token string, opt ListOptions) (*types.Addon, error) {
var wg sync.WaitGroup
gith, err := createGitHelper(baseURL, path.Join(dir, addonName), token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
reader := asyncReader{
addon: &types.Addon{},
h: gith,
errChan: make(chan error, 1),
}
for _, item := range items {
switch strings.ToLower(item.GetName()) {
case ReadmeFileName:
if !opt.GetDetail {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readReadme(&wg, reader)
case MetadataFileName:
reader.SetReadContent(item)
wg.Add(1)
go readMetadata(&wg, reader)
case DefinitionsDirName:
if !opt.GetDefinition {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(&wg, reader)
case ResourcesDirName:
if !opt.GetResource && !opt.GetParameter {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readResources(&wg, reader)
case TemplateFileName:
if !opt.GetTemplate {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readTemplate(&wg, reader)
}
}
wg.Wait()
if opt.GetParameter && reader.addon.Parameters != "" {
err = genAddonAPISchema(reader.addon)
if err != nil {
return nil, err
}
}
return reader.addon, nil
}
func readTemplate(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
data, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
reader.addon.AppTemplate = &v1beta1.Application{}
_, _, err = dec.Decode([]byte(data), nil, reader.addon.AppTemplate)
if err != nil {
reader.errChan <- err
return
}
}
func readResources(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, ResourcesDirName)
if err != nil {
reader.errChan <- err
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readResFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readResources(wg, reader)
}
}
}
// readResFile read single resource file
func | (wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
if reader.item.GetName() == "parameter.cue" {
reader.addon.Parameters = b
return
}
switch filepath.Ext(reader.item.GetName()) {
case ".cue":
reader.addon.CUETemplates = append(reader.addon.CUETemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
default:
reader.addon.YAMLTemplates = append(reader.addon.YAMLTemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
}
func readDefinitions(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, DefinitionsDirName)
if err != nil {
reader.errChan <- err
return
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readDefFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(wg, reader)
}
}
}
// readDefFile read single definition file
func readDefFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
reader.addon.Definitions = append(reader.addon.Definitions, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
func readMetadata(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
err = yaml.Unmarshal([]byte(b), &reader.addon.AddonMeta)
if err != nil {
reader.errChan <- err
return
}
}
func readReadme(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
reader.addon.Detail, err = content.GetContent()
if err != nil {
reader.errChan <- err
return
}
}
func createGitHelper(baseURL, dir, token string) (*gitHelper, error) {
var ts oauth2.TokenSource
if token != "" {
ts = oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
}
tc := oauth2.NewClient(context.Background(), ts)
tc.Timeout = time.Second * 10
cli := github.NewClient(tc)
baseURL = strings.TrimSuffix(baseURL, ".git")
u, err := url.Parse(baseURL)
if err != nil {
return nil, errors.New("addon registry invalid")
}
u.Path = path.Join(u.Path, dir)
_, gitmeta, err := utils.Parse(u.String())
if err != nil {
return nil, errors.New("addon registry invalid")
}
return &gitHelper{
Client: cli,
Meta: gitmeta,
}, nil
}
func (h *gitHelper) readRepo(path string) (*github.RepositoryContent, []*github.RepositoryContent, error) {
file, items, _, err := h.Client.Repositories.GetContents(context.Background(), h.Meta.Owner, h.Meta.Repo, path, nil)
if err != nil {
return nil, nil, WrapErrRateLimit(err)
}
return file, items, nil
}
func genAddonAPISchema(addonRes *types.Addon) error {
param, err := utils2.PrepareParameterCue(addonRes.Name, addonRes.Parameters)
if err != nil {
return err
}
var r cue.Runtime
cueInst, err := r.Compile("-", param)
if err != nil {
return err
}
data, err := common.GenOpenAPI(cueInst)
if err != nil {
return err
}
schema, err := utils2.ConvertOpenAPISchema2SwaggerObject(data)
if err != nil {
return err
}
utils2.FixOpenAPISchema("", schema)
addonRes.APISchema = schema
return nil
}
func cutPathUntil(path []string, end string) ([]string, error) {
for i, d := range path {
if d == end {
return path[i:], nil
}
}
return nil, errors.New("cut path fail, target directory name not found")
}
// RenderApplication render a K8s application
func RenderApplication(addon *types.Addon, args map[string]string) (*v1beta1.Application, []*unstructured.Unstructured, error) {
if args == nil {
args = map[string]string{}
}
app := addon.AppTemplate
if app == nil {
app = &v1beta1.Application{
TypeMeta: metav1.TypeMeta{APIVersion: "core.oam.dev/v1beta1", Kind: "Application"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2AppName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
Labels: map[string]string{
oam.LabelAddonName: addon.Name,
},
},
Spec: v1beta1.ApplicationSpec{
Components: []common2.ApplicationComponent{},
},
}
}
app.Name = Convert2AppName(addon.Name)
app.Labels = util.MergeMapOverrideWithDst(app.Labels, map[string]string{oam.LabelAddonName: addon.Name})
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{}
}
for _, namespace := range addon.NeedNamespace {
comp := common2.ApplicationComponent{
Type: "raw",
Name: fmt.Sprintf("%s-namespace", namespace),
Properties: util.Object2RawExtension(renderNamespace(namespace)),
}
app.Spec.Components = append(app.Spec.Components, comp)
}
for _, tmpl := range addon.YAMLTemplates {
comp, err := renderRawComponent(tmpl)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
for _, tmpl := range addon.CUETemplates {
comp, err := renderCUETemplate(tmpl, addon.Parameters, args)
if err != nil {
return nil, nil, ErrRenderCueTmpl
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
var defObjs []*unstructured.Unstructured
if isDeployToRuntimeOnly(addon) {
// Runtime cluster mode needs to deploy definitions to control plane k8s.
for _, def := range addon.Definitions {
obj, err := renderObject(def)
if err != nil {
return nil, nil, err
}
defObjs = append(defObjs, obj)
}
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{Steps: make([]v1beta1.WorkflowStep, 0)}
}
app.Spec.Workflow.Steps = append(app.Spec.Workflow.Steps,
v1beta1.WorkflowStep{
Name: "deploy-control-plane",
Type: "apply-application",
},
v1beta1.WorkflowStep{
Name: "deploy-runtime",
Type: "deploy2runtime",
})
} else {
for _, def := range addon.Definitions {
comp, err := renderRawComponent(def)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
}
return app, defObjs, nil
}
func isDeployToRuntimeOnly(addon *types.Addon) bool {
if addon.DeployTo == nil {
return false
}
return addon.DeployTo.RuntimeCluster
}
func renderObject(elem types.AddonElementFile) (*unstructured.Unstructured, error) {
obj := &unstructured.Unstructured{}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
_, _, err := dec.Decode([]byte(elem.Data), nil, obj)
if err != nil {
return nil, err
}
return obj, nil
}
func renderNamespace(namespace string) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetAPIVersion("v1")
u.SetKind("Namespace")
u.SetName(namespace)
return u
}
// renderRawComponent will return a component in raw type from string
func renderRawComponent(elem types.AddonElementFile) (*common2.ApplicationComponent, error) {
baseRawComponent := common2.ApplicationComponent{
Type: "raw",
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
obj, err := renderObject(elem)
if err != nil {
return nil, err
}
baseRawComponent.Properties = util.Object2RawExtension(obj)
return &baseRawComponent, nil
}
// renderCUETemplate will return a component from cue template
func renderCUETemplate(elem types.AddonElementFile, parameters string, args map[string]string) (*common2.ApplicationComponent, error) {
bt, err := json.Marshal(args)
if err != nil {
return nil, err
}
var paramFile = cuemodel.ParameterFieldName + ": {}"
if string(bt) != "null" {
paramFile = fmt.Sprintf("%s: %s", cuemodel.ParameterFieldName, string(bt))
}
param := fmt.Sprintf("%s\n%s", paramFile, parameters)
v, err := value.NewValue(param, nil, "")
if err != nil {
return nil, err
}
out, err := v.LookupByScript(fmt.Sprintf("{%s}", elem.Data))
if err != nil {
return nil, err
}
compContent, err := out.LookupValue("output")
if err != nil {
return nil, err
}
b, err := cueyaml.Encode(compContent.CueValue())
if err != nil {
return nil, err
}
comp := common2.ApplicationComponent{
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
err = yaml.Unmarshal(b, &comp)
if err != nil {
return nil, err
}
return &comp, err
}
const addonAppPrefix = "addon-"
const addonSecPrefix = "addon-secret-"
// Convert2AppName -
func Convert2AppName(name string) string {
return addonAppPrefix + name
}
// Convert2AddonName -
func Convert2AddonName(name string) string {
return strings.TrimPrefix(name, addonAppPrefix)
}
// RenderArgsSecret TODO add desc
func RenderArgsSecret(addon *types.Addon, args map[string]string) *v1.Secret {
sec := v1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2SecName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
},
StringData: args,
Type: v1.SecretTypeOpaque,
}
return &sec
}
// Convert2SecName TODO add desc
func Convert2SecName(name string) string {
return addonSecPrefix + name
}
// CheckDependencies checks if addon's dependent addons is enabled
func CheckDependencies(ctx context.Context, clt client.Client, addon *types.Addon) bool {
var app v1beta1.Application
for _, dep := range addon.Dependencies {
err := clt.Get(ctx, client.ObjectKey{
Namespace: types.DefaultKubeVelaNS,
Name: Convert2AppName(dep.Name),
}, &app)
if err != nil {
return false
}
}
return true
}
| readResFile | identifier_name |
addon.go | /*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addon
import (
"context"
"encoding/json"
"fmt"
"net/url"
"path"
"path/filepath"
"strings"
"sync"
"time"
"cuelang.org/go/cue"
cueyaml "cuelang.org/go/encoding/yaml"
"github.com/google/go-github/v32/github"
"github.com/pkg/errors"
"golang.org/x/oauth2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8syaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
common2 "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
utils2 "github.com/oam-dev/kubevela/pkg/controller/utils"
cuemodel "github.com/oam-dev/kubevela/pkg/cue/model"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
const (
// ReadmeFileName is the addon readme file name
ReadmeFileName string = "readme.md"
// MetadataFileName is the addon meatadata.yaml file name
MetadataFileName string = "metadata.yaml"
// TemplateFileName is the addon template.yaml dir name
TemplateFileName string = "template.yaml"
// ResourcesDirName is the addon resources/ dir name
ResourcesDirName string = "resources"
// DefinitionsDirName is the addon definitions/ dir name
DefinitionsDirName string = "definitions"
)
// ListOptions contains flags mark what files should be read in an addon directory
type ListOptions struct {
GetDetail bool
GetDefinition bool
GetResource bool
GetParameter bool
GetTemplate bool
}
var (
// GetLevelOptions used when get or list addons
GetLevelOptions = ListOptions{GetDetail: true, GetDefinition: true, GetParameter: true}
// EnableLevelOptions used when enable addon
EnableLevelOptions = ListOptions{GetDetail: true, GetDefinition: true, GetResource: true, GetTemplate: true, GetParameter: true}
)
// aError is internal error type of addon
type aError error
var (
// ErrNotExist means addon not exists
ErrNotExist aError = errors.New("addon not exist")
)
// gitHelper helps get addon's file by git
type gitHelper struct {
Client *github.Client
Meta *utils.Content
}
// GitAddonSource defines the information about the Git as addon source
type GitAddonSource struct {
URL string `json:"url,omitempty" validate:"required"`
Path string `json:"path,omitempty"`
Token string `json:"token,omitempty"`
}
// asyncReader helps async read files of addon
type asyncReader struct {
addon *types.Addon
h *gitHelper
item *github.RepositoryContent
errChan chan error
}
// SetReadContent set which file to read
func (r *asyncReader) SetReadContent(content *github.RepositoryContent) {
r.item = content
}
// GetAddon get a addon info from GitAddonSource, can be used for get or enable
func GetAddon(name string, git *GitAddonSource, opt ListOptions) (*types.Addon, error) {
addon, err := getSingleAddonFromGit(git.URL, git.Path, name, git.Token, opt)
if err != nil {
return nil, err
}
return addon, nil
}
// ListAddons list addons' info from GitAddonSource
func ListAddons(git *GitAddonSource, opt ListOptions) ([]*types.Addon, error) {
gitAddons, err := getAddonsFromGit(git.URL, git.Path, git.Token, opt)
if err != nil {
return nil, err
}
return gitAddons, nil
}
func getAddonsFromGit(baseURL, dir, token string, opt ListOptions) ([]*types.Addon, error) {
var addons []*types.Addon
var err error
var wg sync.WaitGroup
errChan := make(chan error, 1)
gith, err := createGitHelper(baseURL, dir, token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
for _, subItems := range items {
if subItems.GetType() != "dir" {
continue
}
wg.Add(1)
go func(item *github.RepositoryContent) {
defer wg.Done()
addonRes, err := getSingleAddonFromGit(baseURL, dir, item.GetName(), token, opt)
if err != nil {
errChan <- err
return
}
addons = append(addons, addonRes)
}(subItems)
}
wg.Wait()
if len(errChan) != 0 {
return nil, <-errChan
}
return addons, nil
}
func getSingleAddonFromGit(baseURL, dir, addonName, token string, opt ListOptions) (*types.Addon, error) {
var wg sync.WaitGroup
gith, err := createGitHelper(baseURL, path.Join(dir, addonName), token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
reader := asyncReader{
addon: &types.Addon{},
h: gith,
errChan: make(chan error, 1),
}
for _, item := range items {
switch strings.ToLower(item.GetName()) {
case ReadmeFileName:
if !opt.GetDetail {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readReadme(&wg, reader)
case MetadataFileName:
reader.SetReadContent(item)
wg.Add(1)
go readMetadata(&wg, reader)
case DefinitionsDirName:
if !opt.GetDefinition {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(&wg, reader)
case ResourcesDirName:
if !opt.GetResource && !opt.GetParameter {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readResources(&wg, reader)
case TemplateFileName:
if !opt.GetTemplate {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readTemplate(&wg, reader)
}
}
wg.Wait()
if opt.GetParameter && reader.addon.Parameters != "" {
err = genAddonAPISchema(reader.addon)
if err != nil {
return nil, err
}
}
return reader.addon, nil
}
func readTemplate(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
data, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
reader.addon.AppTemplate = &v1beta1.Application{}
_, _, err = dec.Decode([]byte(data), nil, reader.addon.AppTemplate)
if err != nil {
reader.errChan <- err
return
}
}
func readResources(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, ResourcesDirName)
if err != nil {
reader.errChan <- err
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readResFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readResources(wg, reader)
}
}
}
// readResFile read single resource file
func readResFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
if reader.item.GetName() == "parameter.cue" {
reader.addon.Parameters = b
return
}
switch filepath.Ext(reader.item.GetName()) {
case ".cue":
reader.addon.CUETemplates = append(reader.addon.CUETemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
default:
reader.addon.YAMLTemplates = append(reader.addon.YAMLTemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
}
func readDefinitions(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, DefinitionsDirName)
if err != nil {
reader.errChan <- err
return
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readDefFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(wg, reader)
}
}
}
// readDefFile read single definition file
func readDefFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
reader.addon.Definitions = append(reader.addon.Definitions, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
func readMetadata(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
err = yaml.Unmarshal([]byte(b), &reader.addon.AddonMeta)
if err != nil {
reader.errChan <- err
return
}
}
func readReadme(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
reader.addon.Detail, err = content.GetContent()
if err != nil {
reader.errChan <- err
return
}
}
func createGitHelper(baseURL, dir, token string) (*gitHelper, error) {
var ts oauth2.TokenSource
if token != "" {
ts = oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
}
tc := oauth2.NewClient(context.Background(), ts)
tc.Timeout = time.Second * 10
cli := github.NewClient(tc)
baseURL = strings.TrimSuffix(baseURL, ".git")
u, err := url.Parse(baseURL)
if err != nil {
return nil, errors.New("addon registry invalid")
}
u.Path = path.Join(u.Path, dir)
_, gitmeta, err := utils.Parse(u.String())
if err != nil {
return nil, errors.New("addon registry invalid")
}
return &gitHelper{
Client: cli,
Meta: gitmeta,
}, nil
}
func (h *gitHelper) readRepo(path string) (*github.RepositoryContent, []*github.RepositoryContent, error) {
file, items, _, err := h.Client.Repositories.GetContents(context.Background(), h.Meta.Owner, h.Meta.Repo, path, nil)
if err != nil {
return nil, nil, WrapErrRateLimit(err)
}
return file, items, nil
}
func genAddonAPISchema(addonRes *types.Addon) error {
param, err := utils2.PrepareParameterCue(addonRes.Name, addonRes.Parameters)
if err != nil {
return err
}
var r cue.Runtime
cueInst, err := r.Compile("-", param)
if err != nil {
return err
}
data, err := common.GenOpenAPI(cueInst)
if err != nil {
return err
}
schema, err := utils2.ConvertOpenAPISchema2SwaggerObject(data)
if err != nil {
return err
}
utils2.FixOpenAPISchema("", schema)
addonRes.APISchema = schema
return nil
}
func cutPathUntil(path []string, end string) ([]string, error) {
for i, d := range path {
if d == end {
return path[i:], nil
}
}
return nil, errors.New("cut path fail, target directory name not found")
}
// RenderApplication render a K8s application
func RenderApplication(addon *types.Addon, args map[string]string) (*v1beta1.Application, []*unstructured.Unstructured, error) {
if args == nil {
args = map[string]string{}
}
app := addon.AppTemplate
if app == nil {
app = &v1beta1.Application{
TypeMeta: metav1.TypeMeta{APIVersion: "core.oam.dev/v1beta1", Kind: "Application"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2AppName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
Labels: map[string]string{
oam.LabelAddonName: addon.Name,
},
},
Spec: v1beta1.ApplicationSpec{
Components: []common2.ApplicationComponent{},
},
}
}
app.Name = Convert2AppName(addon.Name)
app.Labels = util.MergeMapOverrideWithDst(app.Labels, map[string]string{oam.LabelAddonName: addon.Name})
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{}
}
for _, namespace := range addon.NeedNamespace {
comp := common2.ApplicationComponent{
Type: "raw",
Name: fmt.Sprintf("%s-namespace", namespace),
Properties: util.Object2RawExtension(renderNamespace(namespace)),
}
app.Spec.Components = append(app.Spec.Components, comp)
}
for _, tmpl := range addon.YAMLTemplates {
comp, err := renderRawComponent(tmpl)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
for _, tmpl := range addon.CUETemplates {
comp, err := renderCUETemplate(tmpl, addon.Parameters, args)
if err != nil {
return nil, nil, ErrRenderCueTmpl
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
var defObjs []*unstructured.Unstructured
if isDeployToRuntimeOnly(addon) {
// Runtime cluster mode needs to deploy definitions to control plane k8s.
for _, def := range addon.Definitions {
obj, err := renderObject(def)
if err != nil {
return nil, nil, err
}
defObjs = append(defObjs, obj)
}
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{Steps: make([]v1beta1.WorkflowStep, 0)}
}
app.Spec.Workflow.Steps = append(app.Spec.Workflow.Steps,
v1beta1.WorkflowStep{
Name: "deploy-control-plane",
Type: "apply-application",
},
v1beta1.WorkflowStep{
Name: "deploy-runtime",
Type: "deploy2runtime",
})
} else {
for _, def := range addon.Definitions {
comp, err := renderRawComponent(def)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
}
return app, defObjs, nil
}
func isDeployToRuntimeOnly(addon *types.Addon) bool {
if addon.DeployTo == nil {
return false
}
return addon.DeployTo.RuntimeCluster
}
func renderObject(elem types.AddonElementFile) (*unstructured.Unstructured, error) {
obj := &unstructured.Unstructured{}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
_, _, err := dec.Decode([]byte(elem.Data), nil, obj)
if err != nil {
return nil, err
}
return obj, nil
}
func renderNamespace(namespace string) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetAPIVersion("v1")
u.SetKind("Namespace")
u.SetName(namespace)
return u
}
// renderRawComponent will return a component in raw type from string
func renderRawComponent(elem types.AddonElementFile) (*common2.ApplicationComponent, error) {
baseRawComponent := common2.ApplicationComponent{
Type: "raw",
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
obj, err := renderObject(elem)
if err != nil {
return nil, err
}
baseRawComponent.Properties = util.Object2RawExtension(obj)
return &baseRawComponent, nil
}
// renderCUETemplate will return a component from cue template
func renderCUETemplate(elem types.AddonElementFile, parameters string, args map[string]string) (*common2.ApplicationComponent, error) {
bt, err := json.Marshal(args)
if err != nil {
return nil, err
}
var paramFile = cuemodel.ParameterFieldName + ": {}"
if string(bt) != "null" {
paramFile = fmt.Sprintf("%s: %s", cuemodel.ParameterFieldName, string(bt))
}
param := fmt.Sprintf("%s\n%s", paramFile, parameters)
v, err := value.NewValue(param, nil, "")
if err != nil {
return nil, err
}
out, err := v.LookupByScript(fmt.Sprintf("{%s}", elem.Data))
if err != nil {
return nil, err
}
compContent, err := out.LookupValue("output")
if err != nil {
return nil, err
}
b, err := cueyaml.Encode(compContent.CueValue())
if err != nil {
return nil, err
}
comp := common2.ApplicationComponent{
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
err = yaml.Unmarshal(b, &comp)
if err != nil {
return nil, err
}
return &comp, err
}
const addonAppPrefix = "addon-"
const addonSecPrefix = "addon-secret-"
// Convert2AppName -
func Convert2AppName(name string) string {
return addonAppPrefix + name
}
// Convert2AddonName -
func Convert2AddonName(name string) string {
return strings.TrimPrefix(name, addonAppPrefix)
}
// RenderArgsSecret TODO add desc
func RenderArgsSecret(addon *types.Addon, args map[string]string) *v1.Secret {
sec := v1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2SecName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
},
StringData: args,
Type: v1.SecretTypeOpaque,
}
return &sec
}
// Convert2SecName TODO add desc
func Convert2SecName(name string) string {
return addonSecPrefix + name
}
// CheckDependencies checks if addon's dependent addons is enabled
func CheckDependencies(ctx context.Context, clt client.Client, addon *types.Addon) bool {
var app v1beta1.Application
for _, dep := range addon.Dependencies |
return true
}
| {
err := clt.Get(ctx, client.ObjectKey{
Namespace: types.DefaultKubeVelaNS,
Name: Convert2AppName(dep.Name),
}, &app)
if err != nil {
return false
}
} | conditional_block |
addon.go | /*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addon
import (
"context"
"encoding/json"
"fmt"
"net/url"
"path"
"path/filepath"
"strings"
"sync"
"time"
"cuelang.org/go/cue"
cueyaml "cuelang.org/go/encoding/yaml"
"github.com/google/go-github/v32/github"
"github.com/pkg/errors"
"golang.org/x/oauth2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8syaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
common2 "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
utils2 "github.com/oam-dev/kubevela/pkg/controller/utils"
cuemodel "github.com/oam-dev/kubevela/pkg/cue/model"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
const (
// ReadmeFileName is the addon readme file name
ReadmeFileName string = "readme.md"
// MetadataFileName is the addon meatadata.yaml file name
MetadataFileName string = "metadata.yaml"
// TemplateFileName is the addon template.yaml dir name
TemplateFileName string = "template.yaml"
// ResourcesDirName is the addon resources/ dir name
ResourcesDirName string = "resources"
// DefinitionsDirName is the addon definitions/ dir name
DefinitionsDirName string = "definitions"
)
// ListOptions contains flags mark what files should be read in an addon directory
type ListOptions struct {
GetDetail bool
GetDefinition bool
GetResource bool
GetParameter bool
GetTemplate bool
}
var (
// GetLevelOptions used when get or list addons
GetLevelOptions = ListOptions{GetDetail: true, GetDefinition: true, GetParameter: true}
// EnableLevelOptions used when enable addon
EnableLevelOptions = ListOptions{GetDetail: true, GetDefinition: true, GetResource: true, GetTemplate: true, GetParameter: true}
)
// aError is internal error type of addon
type aError error
var (
// ErrNotExist means addon not exists
ErrNotExist aError = errors.New("addon not exist")
)
// gitHelper helps get addon's file by git
type gitHelper struct {
Client *github.Client
Meta *utils.Content
}
// GitAddonSource defines the information about the Git as addon source
type GitAddonSource struct {
URL string `json:"url,omitempty" validate:"required"`
Path string `json:"path,omitempty"`
Token string `json:"token,omitempty"`
}
// asyncReader helps async read files of addon
type asyncReader struct {
addon *types.Addon
h *gitHelper
item *github.RepositoryContent
errChan chan error
}
// SetReadContent set which file to read
func (r *asyncReader) SetReadContent(content *github.RepositoryContent) {
r.item = content
}
// GetAddon get a addon info from GitAddonSource, can be used for get or enable
func GetAddon(name string, git *GitAddonSource, opt ListOptions) (*types.Addon, error) {
addon, err := getSingleAddonFromGit(git.URL, git.Path, name, git.Token, opt)
if err != nil {
return nil, err
}
return addon, nil
}
// ListAddons list addons' info from GitAddonSource
func ListAddons(git *GitAddonSource, opt ListOptions) ([]*types.Addon, error) {
gitAddons, err := getAddonsFromGit(git.URL, git.Path, git.Token, opt)
if err != nil {
return nil, err
}
return gitAddons, nil
}
func getAddonsFromGit(baseURL, dir, token string, opt ListOptions) ([]*types.Addon, error) {
var addons []*types.Addon
var err error
var wg sync.WaitGroup
errChan := make(chan error, 1)
gith, err := createGitHelper(baseURL, dir, token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
for _, subItems := range items {
if subItems.GetType() != "dir" {
continue
}
wg.Add(1)
go func(item *github.RepositoryContent) {
defer wg.Done()
addonRes, err := getSingleAddonFromGit(baseURL, dir, item.GetName(), token, opt)
if err != nil {
errChan <- err
return
}
addons = append(addons, addonRes)
}(subItems)
}
wg.Wait()
if len(errChan) != 0 {
return nil, <-errChan
}
return addons, nil
}
func getSingleAddonFromGit(baseURL, dir, addonName, token string, opt ListOptions) (*types.Addon, error) {
var wg sync.WaitGroup
gith, err := createGitHelper(baseURL, path.Join(dir, addonName), token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
reader := asyncReader{
addon: &types.Addon{},
h: gith,
errChan: make(chan error, 1),
}
for _, item := range items {
switch strings.ToLower(item.GetName()) {
case ReadmeFileName:
if !opt.GetDetail {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readReadme(&wg, reader)
case MetadataFileName:
reader.SetReadContent(item)
wg.Add(1)
go readMetadata(&wg, reader)
case DefinitionsDirName:
if !opt.GetDefinition {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(&wg, reader)
case ResourcesDirName:
if !opt.GetResource && !opt.GetParameter {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readResources(&wg, reader)
case TemplateFileName:
if !opt.GetTemplate {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readTemplate(&wg, reader)
}
}
wg.Wait()
if opt.GetParameter && reader.addon.Parameters != "" {
err = genAddonAPISchema(reader.addon)
if err != nil {
return nil, err
}
}
return reader.addon, nil
}
func readTemplate(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
data, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
reader.addon.AppTemplate = &v1beta1.Application{}
_, _, err = dec.Decode([]byte(data), nil, reader.addon.AppTemplate)
if err != nil {
reader.errChan <- err
return
}
}
func readResources(wg *sync.WaitGroup, reader asyncReader) { | dirPath, err := cutPathUntil(dirPath, ResourcesDirName)
if err != nil {
reader.errChan <- err
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readResFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readResources(wg, reader)
}
}
}
// readResFile read single resource file
func readResFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
if reader.item.GetName() == "parameter.cue" {
reader.addon.Parameters = b
return
}
switch filepath.Ext(reader.item.GetName()) {
case ".cue":
reader.addon.CUETemplates = append(reader.addon.CUETemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
default:
reader.addon.YAMLTemplates = append(reader.addon.YAMLTemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
}
func readDefinitions(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, DefinitionsDirName)
if err != nil {
reader.errChan <- err
return
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readDefFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(wg, reader)
}
}
}
// readDefFile read single definition file
func readDefFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
reader.addon.Definitions = append(reader.addon.Definitions, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
func readMetadata(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
err = yaml.Unmarshal([]byte(b), &reader.addon.AddonMeta)
if err != nil {
reader.errChan <- err
return
}
}
func readReadme(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
reader.addon.Detail, err = content.GetContent()
if err != nil {
reader.errChan <- err
return
}
}
func createGitHelper(baseURL, dir, token string) (*gitHelper, error) {
var ts oauth2.TokenSource
if token != "" {
ts = oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
}
tc := oauth2.NewClient(context.Background(), ts)
tc.Timeout = time.Second * 10
cli := github.NewClient(tc)
baseURL = strings.TrimSuffix(baseURL, ".git")
u, err := url.Parse(baseURL)
if err != nil {
return nil, errors.New("addon registry invalid")
}
u.Path = path.Join(u.Path, dir)
_, gitmeta, err := utils.Parse(u.String())
if err != nil {
return nil, errors.New("addon registry invalid")
}
return &gitHelper{
Client: cli,
Meta: gitmeta,
}, nil
}
func (h *gitHelper) readRepo(path string) (*github.RepositoryContent, []*github.RepositoryContent, error) {
file, items, _, err := h.Client.Repositories.GetContents(context.Background(), h.Meta.Owner, h.Meta.Repo, path, nil)
if err != nil {
return nil, nil, WrapErrRateLimit(err)
}
return file, items, nil
}
func genAddonAPISchema(addonRes *types.Addon) error {
param, err := utils2.PrepareParameterCue(addonRes.Name, addonRes.Parameters)
if err != nil {
return err
}
var r cue.Runtime
cueInst, err := r.Compile("-", param)
if err != nil {
return err
}
data, err := common.GenOpenAPI(cueInst)
if err != nil {
return err
}
schema, err := utils2.ConvertOpenAPISchema2SwaggerObject(data)
if err != nil {
return err
}
utils2.FixOpenAPISchema("", schema)
addonRes.APISchema = schema
return nil
}
func cutPathUntil(path []string, end string) ([]string, error) {
for i, d := range path {
if d == end {
return path[i:], nil
}
}
return nil, errors.New("cut path fail, target directory name not found")
}
// RenderApplication render a K8s application
func RenderApplication(addon *types.Addon, args map[string]string) (*v1beta1.Application, []*unstructured.Unstructured, error) {
if args == nil {
args = map[string]string{}
}
app := addon.AppTemplate
if app == nil {
app = &v1beta1.Application{
TypeMeta: metav1.TypeMeta{APIVersion: "core.oam.dev/v1beta1", Kind: "Application"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2AppName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
Labels: map[string]string{
oam.LabelAddonName: addon.Name,
},
},
Spec: v1beta1.ApplicationSpec{
Components: []common2.ApplicationComponent{},
},
}
}
app.Name = Convert2AppName(addon.Name)
app.Labels = util.MergeMapOverrideWithDst(app.Labels, map[string]string{oam.LabelAddonName: addon.Name})
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{}
}
for _, namespace := range addon.NeedNamespace {
comp := common2.ApplicationComponent{
Type: "raw",
Name: fmt.Sprintf("%s-namespace", namespace),
Properties: util.Object2RawExtension(renderNamespace(namespace)),
}
app.Spec.Components = append(app.Spec.Components, comp)
}
for _, tmpl := range addon.YAMLTemplates {
comp, err := renderRawComponent(tmpl)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
for _, tmpl := range addon.CUETemplates {
comp, err := renderCUETemplate(tmpl, addon.Parameters, args)
if err != nil {
return nil, nil, ErrRenderCueTmpl
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
var defObjs []*unstructured.Unstructured
if isDeployToRuntimeOnly(addon) {
// Runtime cluster mode needs to deploy definitions to control plane k8s.
for _, def := range addon.Definitions {
obj, err := renderObject(def)
if err != nil {
return nil, nil, err
}
defObjs = append(defObjs, obj)
}
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{Steps: make([]v1beta1.WorkflowStep, 0)}
}
app.Spec.Workflow.Steps = append(app.Spec.Workflow.Steps,
v1beta1.WorkflowStep{
Name: "deploy-control-plane",
Type: "apply-application",
},
v1beta1.WorkflowStep{
Name: "deploy-runtime",
Type: "deploy2runtime",
})
} else {
for _, def := range addon.Definitions {
comp, err := renderRawComponent(def)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
}
return app, defObjs, nil
}
func isDeployToRuntimeOnly(addon *types.Addon) bool {
if addon.DeployTo == nil {
return false
}
return addon.DeployTo.RuntimeCluster
}
func renderObject(elem types.AddonElementFile) (*unstructured.Unstructured, error) {
obj := &unstructured.Unstructured{}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
_, _, err := dec.Decode([]byte(elem.Data), nil, obj)
if err != nil {
return nil, err
}
return obj, nil
}
func renderNamespace(namespace string) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetAPIVersion("v1")
u.SetKind("Namespace")
u.SetName(namespace)
return u
}
// renderRawComponent will return a component in raw type from string
func renderRawComponent(elem types.AddonElementFile) (*common2.ApplicationComponent, error) {
baseRawComponent := common2.ApplicationComponent{
Type: "raw",
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
obj, err := renderObject(elem)
if err != nil {
return nil, err
}
baseRawComponent.Properties = util.Object2RawExtension(obj)
return &baseRawComponent, nil
}
// renderCUETemplate will return a component from cue template
func renderCUETemplate(elem types.AddonElementFile, parameters string, args map[string]string) (*common2.ApplicationComponent, error) {
bt, err := json.Marshal(args)
if err != nil {
return nil, err
}
var paramFile = cuemodel.ParameterFieldName + ": {}"
if string(bt) != "null" {
paramFile = fmt.Sprintf("%s: %s", cuemodel.ParameterFieldName, string(bt))
}
param := fmt.Sprintf("%s\n%s", paramFile, parameters)
v, err := value.NewValue(param, nil, "")
if err != nil {
return nil, err
}
out, err := v.LookupByScript(fmt.Sprintf("{%s}", elem.Data))
if err != nil {
return nil, err
}
compContent, err := out.LookupValue("output")
if err != nil {
return nil, err
}
b, err := cueyaml.Encode(compContent.CueValue())
if err != nil {
return nil, err
}
comp := common2.ApplicationComponent{
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
err = yaml.Unmarshal(b, &comp)
if err != nil {
return nil, err
}
return &comp, err
}
const addonAppPrefix = "addon-"
const addonSecPrefix = "addon-secret-"
// Convert2AppName -
func Convert2AppName(name string) string {
return addonAppPrefix + name
}
// Convert2AddonName -
func Convert2AddonName(name string) string {
return strings.TrimPrefix(name, addonAppPrefix)
}
// RenderArgsSecret TODO add desc
func RenderArgsSecret(addon *types.Addon, args map[string]string) *v1.Secret {
sec := v1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2SecName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
},
StringData: args,
Type: v1.SecretTypeOpaque,
}
return &sec
}
// Convert2SecName TODO add desc
func Convert2SecName(name string) string {
return addonSecPrefix + name
}
// CheckDependencies checks if addon's dependent addons is enabled
func CheckDependencies(ctx context.Context, clt client.Client, addon *types.Addon) bool {
var app v1beta1.Application
for _, dep := range addon.Dependencies {
err := clt.Get(ctx, client.ObjectKey{
Namespace: types.DefaultKubeVelaNS,
Name: Convert2AppName(dep.Name),
}, &app)
if err != nil {
return false
}
}
return true
} | defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/") | random_line_split |
cryptologer.py | #file should have all ciphers as simple format... just text
#for rsa_dec, file should have text as n:<no...> \n c:<no....> e:<no....>
#same for rsa_enc...
#importing...
#import sys
#import re
import argparse
#import operator
#import math
#import requests
#import json
#import binascii
#import os
#import time
'''
def whereami():
with open('/root/directory.txt','r') as f1:
location=f1.read()
return location
location=whereami()'''
#appending file paths
'''
import Encrypting as enc
import Decrypting as dec
import Algorithms as alg
import rsa_types as RSA'''
'''
sys.path.append('%s/Encrypting/' % location)
sys.path.append('%s/Decrypting/' % location)
sys.path.append('%s/Algorithms/' % location)
sys.path.append('%s/rsa_types/' % location)'''
#file imports for encryption
'''
from caeser import julius,bruteforce
from vigenere_enc import encrypt
from affine_enc import aff_enc
from bacon_enc import steak
from railfence_enc import rail
from atbash import atb
from polybius_enc import tsquaree
from substitution_enc import substitute
from rsa_encryptor import rivest
from rot import rotate,rotate_brute
from skip import skip
from rot47 import rot47'''
#algorithms import
from alg.Ext_Euclid import reversing, printing
#file imports for decryption
'''
from vigenere_dec import impossible, withkey#import impossible, withkey
from affine_dec import fine
from bacon_dec import pork
from polybius_dec import psquaree
from railfence_dec import fence
from substitution_dec import manual
from simplersa import init
from Weiner import attack
from small_e import smallie
from internal_attack import company
from hastad import broadcast
from multi_cipher import multi
from boneh_durfee import example'''
#parsing starts
def initializeParser():
parser=argparse.ArgumentParser(description="Decryptor for Caeser, Vigenere, types of RSA and more...")
parser.add_argument("--decrypt","--dec","-d",help="Performs Decryption",action="store_true")
parser.add_argument("--encrypt","--enc","-e",help="Performs Encryption",action="store_true")
parser.add_argument("--sourcefile","--sf","-f",help="Input file with ciphertext",type=str)
parser.add_argument("--cipher","--cip","-c",help="Input cipher as test",type=str)
parser.add_argument("--key","-k",help="If the key is known (text for vignere, shift for caeser,ROT)",type=str)
parser.add_argument("--len","-l",help="User-defined max probable key length",type=str)
parser.add_argument("--caeser","-C",help="If the cipher is caeser cipher",action="store_true")
parser.add_argument("--vignere","-V",help="If the cipher is vignere cipher",action="store_true")
parser.add_argument("--affine","-A",help="If the cipher is affine cipher",action="store_true")
parser.add_argument("--bacon","-B",help="If the cipher is bacon cipher",action="store_true")
parser.add_argument("--polybius","-P",help="If the cipher is encrypted by a simple 6x6 polybius square",action="store_true")
parser.add_argument("--railfence","-F",help="If railfence encryption is used",action="store_true")
parser.add_argument("--skip","-K",help="If skip cipher is used",action="store_true")
parser.add_argument("--atbash","-T",help="If atbash rotation is done on the plaintext",action="store_true")
parser.add_argument("--rot","-O",help="If the cipher is any rotation cipher",action="store_true")
parser.add_argument("--rot47","-47",help="If the cipher is rotated by ROT47",action="store_true")
parser.add_argument("--substitution","-S",help="If the plaintext in encrypted using simple substitution cipher",action="store_true")
parser.add_argument("--rsa","-R",help="If the cipher is RSA related",action="store_true") #contains simple and multi_rsa
#parser.add_argument("--factordb","--fb","-O",help="Using factordb to crack the rsa",action="store_true")
parser.add_argument("--weiner","-W",help="Cracking RSA using Weiner attack",action="store_true")
parser.add_argument("--smalle","-E",help="Cracking RSA provided e is very small",action="store_true")
parser.add_argument("--internal","-I",help="If an internal attack for RSA is being performed",action="store_true")
parser.add_argument("--multi","-M",help="If the message has loads of encrypted ciphers",action="store_true")
#parser.add_argument("--fermat","-M",help="Fermat's attack on the RSA encrypted text",action="store_true")
#parser.add_argument("--twin","-N",help="If the RSA public is a product of twin prime, use this",action="store_true")
parser.add_argument("--chinese","-H",help="Using the Chinese Remainder Theorem for cracking RSA from e packets having the same n",action="store_true")
parser.add_argument("--boneh","-D",help="Using the famous boneh_durfee to calculate d, provided d< N^0.292",action="store_true")
return parser
def readfile(filename):
with open(filename,"r") as f1:
return f1.read()
def read_rsa(filename):
with open(filename,"r") as f2:
line=f2.readline()
e=0
while line:
symbol=line[0].lower()
if symbol=='n':
n=int(line[2:])
elif symbol=='e':
e=int(line[2:])
elif symbol=='c':
c=line[2:]
elif symbol=='m':
c=line[2:]
else:
raise Exception("the contents of the file can't be read properly")
break
line=f2.readline()
return n,e,c
def read_chinese(filename):
with open(filename,"r") as f3:
line=f3.readline()
while line:
symbol=line[:2].lower()
if symbol=='n1':
n1=int(line[3:])
elif symbol=='n2':
n2=int(line[3:])
elif symbol=='n3':
n3=int(line[3:])
elif symbol=='c1':
c1=int(line[3:])
elif symbol=='c2':
c2=int(line[3:])
elif symbol=='c3':
c3=int(line[3:])
else:
raise Exception("the contents of the file can't be read properly")
break
line=f3.readline()
return n1,n2,n3,c1,c2,c3
def main():
parser=initializeParser()
args=parser.parse_args()
if args.encrypt or args.decrypt:
if args.cipher!=None:
rawtext=args.cipher
else:
rawtext=readfile(args.sourcefile)
key=args.key
if args.encrypt:
plaintext=rawtext
if args.caeser:
from enc.caeser import julius
if args.key==None:
shift=int(input("enter shift:"))
else:
shift=int(key)
ciphertext=julius(plaintext,shift)
elif args.vignere:
from enc.vigenere_enc import encrypt
if args.key==None:
key=input("enter key:")
ciphertext=encrypt(plaintext,key)
elif args.affine:
from enc.affine_enc import aff_enc
ciphertext=aff_enc(plaintext)
elif args.bacon:
|
elif args.railfence:
from enc.railfence_enc import rail
ciphertext=rail(plaintext)
elif args.rot:
from enc.rot import rotate
if args.key==None:
shift=input("enter shift:")
ciphertext=rotate(plaintext,shift)
elif args.rot47:
from enc.rot47 import rot47
ciphertext=rot47(plaintext)
elif args.skip:
from enc.skip import skip
loop=int(input("Enter skip:"))
ciphertext=skip(plaintext,loop)
elif args.atbash:
from enc.atbash import atb
ciphertext=atb(plaintext)
elif args.polybius:
from enc.polybius_enc import tsquaree
ciphertext=tsquaree(plaintext)
elif args.substitution:
from enc.substitution_enc import substitute
ciphertext=substitute(plaintext)
elif args.rsa:
from enc.rsa_encryptor import rivest
n,e,m=read_rsa(args.sourcefile)
ciphertext=rivest(n,e,m)
display='message:'
try:
print("ciphertext:",ciphertext,end='')
except UnboundLocalError:
print("NullError: no ciphering technique mentioned")
elif args.decrypt:
ciphertext=rawtext
display='plaintext:'
if args.caeser:
from enc.caeser import julius,bruteforce
if args.key!=None:
plaintext=julius(ciphertext,-int(key))
else:
display=''
plaintext=bruteforce(ciphertext)
elif args.vignere:
from dec.vigenere_dec import impossible, withkey
length=args.len
if key!=None:
plaintext=withkey(ciphertext,key)
else:
plaintext=impossible(ciphertext,length)
elif args.affine:
from dec.affine_dec import fine
plaintext=fine(ciphertext)
elif args.bacon:
from dec.bacon_dec import pork
if args.key!=None:
plaintext=pork(ciphertext,key)
else:
plaintext=pork(ciphertext,0)
elif args.railfence:
from dec.railfence_dec import fence
length=args.len
if args.len!=None:
plaintext=fence(ciphertext,int(length))
else:
plaintext=fence(ciphertext,None)
display=''
elif args.skip:
from enc.skip import skip
plaintext=skip(ciphertext,None)
display=''
elif args.atbash:
from enc.atbash import atb
plaintext=atb(ciphertext)
elif args.rot:
from enc.rot import rotate,rotate_brute
if key!=None:
plaintext=rotate(ciphertext,key)
else:
plaintext=rotate_brute(ciphertext)
display=''
elif args.rot47:
from enc.rot47 import rot47
plaintext=rot47(ciphertext)
elif args.polybius:
from dec.polybius_dec import psquaree
plaintext=psquaree(ciphertext)
elif args.substitution:
from dec.substitution_dec import manual
plaintext=manual(ciphertext)
elif args.chinese:
from RSA.hastad import broadcast
n1,n2,n3,c1,c2,c3=read_chinese(args.sourcefile)
display='message:'
plaintext=printing(broadcast(n1,n2,n3,c1,c2,c3))
else:
n,e,c=read_rsa(args.sourcefile)
display='message:'
if not args.multi:
c=int(c)
if args.rsa:
from RSA.simplersa import init
plaintext=init(n,e,c)
elif args.weiner:
from RSA.Weiner import attack
plaintext=reversing(n,attack(e,n),c)
elif args.smalle:
from RSA.small_e import smallie
plaintext=printing(smallie(n,c))
elif args.internal:
from RSA.internal_attack import company
plaintext=company(n,e,c)
elif args.boneh:
from RSA.boneh_durfee import example
plaintext=reversing(n,example(n,e),c)
elif args.multi:
from RSA.multi_cipher import multi
arraytext=multi(n,e,c)
plaintext=''
for i in arraytext:
plaintext+=printing(i)
try:
print("%s" % display,plaintext,end='')
except:
print("NullError: no ciphering technique mentioned")
if __name__=="__main__":
main()
| from enc.bacon_enc import steak
ciphertext=steak(plaintext) | conditional_block |
cryptologer.py | #file should have all ciphers as simple format... just text
#for rsa_dec, file should have text as n:<no...> \n c:<no....> e:<no....>
#same for rsa_enc...
#importing...
#import sys
| #import operator
#import math
#import requests
#import json
#import binascii
#import os
#import time
'''
def whereami():
with open('/root/directory.txt','r') as f1:
location=f1.read()
return location
location=whereami()'''
#appending file paths
'''
import Encrypting as enc
import Decrypting as dec
import Algorithms as alg
import rsa_types as RSA'''
'''
sys.path.append('%s/Encrypting/' % location)
sys.path.append('%s/Decrypting/' % location)
sys.path.append('%s/Algorithms/' % location)
sys.path.append('%s/rsa_types/' % location)'''
#file imports for encryption
'''
from caeser import julius,bruteforce
from vigenere_enc import encrypt
from affine_enc import aff_enc
from bacon_enc import steak
from railfence_enc import rail
from atbash import atb
from polybius_enc import tsquaree
from substitution_enc import substitute
from rsa_encryptor import rivest
from rot import rotate,rotate_brute
from skip import skip
from rot47 import rot47'''
#algorithms import
from alg.Ext_Euclid import reversing, printing
#file imports for decryption
'''
from vigenere_dec import impossible, withkey#import impossible, withkey
from affine_dec import fine
from bacon_dec import pork
from polybius_dec import psquaree
from railfence_dec import fence
from substitution_dec import manual
from simplersa import init
from Weiner import attack
from small_e import smallie
from internal_attack import company
from hastad import broadcast
from multi_cipher import multi
from boneh_durfee import example'''
#parsing starts
def initializeParser():
parser=argparse.ArgumentParser(description="Decryptor for Caeser, Vigenere, types of RSA and more...")
parser.add_argument("--decrypt","--dec","-d",help="Performs Decryption",action="store_true")
parser.add_argument("--encrypt","--enc","-e",help="Performs Encryption",action="store_true")
parser.add_argument("--sourcefile","--sf","-f",help="Input file with ciphertext",type=str)
parser.add_argument("--cipher","--cip","-c",help="Input cipher as test",type=str)
parser.add_argument("--key","-k",help="If the key is known (text for vignere, shift for caeser,ROT)",type=str)
parser.add_argument("--len","-l",help="User-defined max probable key length",type=str)
parser.add_argument("--caeser","-C",help="If the cipher is caeser cipher",action="store_true")
parser.add_argument("--vignere","-V",help="If the cipher is vignere cipher",action="store_true")
parser.add_argument("--affine","-A",help="If the cipher is affine cipher",action="store_true")
parser.add_argument("--bacon","-B",help="If the cipher is bacon cipher",action="store_true")
parser.add_argument("--polybius","-P",help="If the cipher is encrypted by a simple 6x6 polybius square",action="store_true")
parser.add_argument("--railfence","-F",help="If railfence encryption is used",action="store_true")
parser.add_argument("--skip","-K",help="If skip cipher is used",action="store_true")
parser.add_argument("--atbash","-T",help="If atbash rotation is done on the plaintext",action="store_true")
parser.add_argument("--rot","-O",help="If the cipher is any rotation cipher",action="store_true")
parser.add_argument("--rot47","-47",help="If the cipher is rotated by ROT47",action="store_true")
parser.add_argument("--substitution","-S",help="If the plaintext in encrypted using simple substitution cipher",action="store_true")
parser.add_argument("--rsa","-R",help="If the cipher is RSA related",action="store_true") #contains simple and multi_rsa
#parser.add_argument("--factordb","--fb","-O",help="Using factordb to crack the rsa",action="store_true")
parser.add_argument("--weiner","-W",help="Cracking RSA using Weiner attack",action="store_true")
parser.add_argument("--smalle","-E",help="Cracking RSA provided e is very small",action="store_true")
parser.add_argument("--internal","-I",help="If an internal attack for RSA is being performed",action="store_true")
parser.add_argument("--multi","-M",help="If the message has loads of encrypted ciphers",action="store_true")
#parser.add_argument("--fermat","-M",help="Fermat's attack on the RSA encrypted text",action="store_true")
#parser.add_argument("--twin","-N",help="If the RSA public is a product of twin prime, use this",action="store_true")
parser.add_argument("--chinese","-H",help="Using the Chinese Remainder Theorem for cracking RSA from e packets having the same n",action="store_true")
parser.add_argument("--boneh","-D",help="Using the famous boneh_durfee to calculate d, provided d< N^0.292",action="store_true")
return parser
def readfile(filename):
with open(filename,"r") as f1:
return f1.read()
def read_rsa(filename):
with open(filename,"r") as f2:
line=f2.readline()
e=0
while line:
symbol=line[0].lower()
if symbol=='n':
n=int(line[2:])
elif symbol=='e':
e=int(line[2:])
elif symbol=='c':
c=line[2:]
elif symbol=='m':
c=line[2:]
else:
raise Exception("the contents of the file can't be read properly")
break
line=f2.readline()
return n,e,c
def read_chinese(filename):
with open(filename,"r") as f3:
line=f3.readline()
while line:
symbol=line[:2].lower()
if symbol=='n1':
n1=int(line[3:])
elif symbol=='n2':
n2=int(line[3:])
elif symbol=='n3':
n3=int(line[3:])
elif symbol=='c1':
c1=int(line[3:])
elif symbol=='c2':
c2=int(line[3:])
elif symbol=='c3':
c3=int(line[3:])
else:
raise Exception("the contents of the file can't be read properly")
break
line=f3.readline()
return n1,n2,n3,c1,c2,c3
def main():
parser=initializeParser()
args=parser.parse_args()
if args.encrypt or args.decrypt:
if args.cipher!=None:
rawtext=args.cipher
else:
rawtext=readfile(args.sourcefile)
key=args.key
if args.encrypt:
plaintext=rawtext
if args.caeser:
from enc.caeser import julius
if args.key==None:
shift=int(input("enter shift:"))
else:
shift=int(key)
ciphertext=julius(plaintext,shift)
elif args.vignere:
from enc.vigenere_enc import encrypt
if args.key==None:
key=input("enter key:")
ciphertext=encrypt(plaintext,key)
elif args.affine:
from enc.affine_enc import aff_enc
ciphertext=aff_enc(plaintext)
elif args.bacon:
from enc.bacon_enc import steak
ciphertext=steak(plaintext)
elif args.railfence:
from enc.railfence_enc import rail
ciphertext=rail(plaintext)
elif args.rot:
from enc.rot import rotate
if args.key==None:
shift=input("enter shift:")
ciphertext=rotate(plaintext,shift)
elif args.rot47:
from enc.rot47 import rot47
ciphertext=rot47(plaintext)
elif args.skip:
from enc.skip import skip
loop=int(input("Enter skip:"))
ciphertext=skip(plaintext,loop)
elif args.atbash:
from enc.atbash import atb
ciphertext=atb(plaintext)
elif args.polybius:
from enc.polybius_enc import tsquaree
ciphertext=tsquaree(plaintext)
elif args.substitution:
from enc.substitution_enc import substitute
ciphertext=substitute(plaintext)
elif args.rsa:
from enc.rsa_encryptor import rivest
n,e,m=read_rsa(args.sourcefile)
ciphertext=rivest(n,e,m)
display='message:'
try:
print("ciphertext:",ciphertext,end='')
except UnboundLocalError:
print("NullError: no ciphering technique mentioned")
elif args.decrypt:
ciphertext=rawtext
display='plaintext:'
if args.caeser:
from enc.caeser import julius,bruteforce
if args.key!=None:
plaintext=julius(ciphertext,-int(key))
else:
display=''
plaintext=bruteforce(ciphertext)
elif args.vignere:
from dec.vigenere_dec import impossible, withkey
length=args.len
if key!=None:
plaintext=withkey(ciphertext,key)
else:
plaintext=impossible(ciphertext,length)
elif args.affine:
from dec.affine_dec import fine
plaintext=fine(ciphertext)
elif args.bacon:
from dec.bacon_dec import pork
if args.key!=None:
plaintext=pork(ciphertext,key)
else:
plaintext=pork(ciphertext,0)
elif args.railfence:
from dec.railfence_dec import fence
length=args.len
if args.len!=None:
plaintext=fence(ciphertext,int(length))
else:
plaintext=fence(ciphertext,None)
display=''
elif args.skip:
from enc.skip import skip
plaintext=skip(ciphertext,None)
display=''
elif args.atbash:
from enc.atbash import atb
plaintext=atb(ciphertext)
elif args.rot:
from enc.rot import rotate,rotate_brute
if key!=None:
plaintext=rotate(ciphertext,key)
else:
plaintext=rotate_brute(ciphertext)
display=''
elif args.rot47:
from enc.rot47 import rot47
plaintext=rot47(ciphertext)
elif args.polybius:
from dec.polybius_dec import psquaree
plaintext=psquaree(ciphertext)
elif args.substitution:
from dec.substitution_dec import manual
plaintext=manual(ciphertext)
elif args.chinese:
from RSA.hastad import broadcast
n1,n2,n3,c1,c2,c3=read_chinese(args.sourcefile)
display='message:'
plaintext=printing(broadcast(n1,n2,n3,c1,c2,c3))
else:
n,e,c=read_rsa(args.sourcefile)
display='message:'
if not args.multi:
c=int(c)
if args.rsa:
from RSA.simplersa import init
plaintext=init(n,e,c)
elif args.weiner:
from RSA.Weiner import attack
plaintext=reversing(n,attack(e,n),c)
elif args.smalle:
from RSA.small_e import smallie
plaintext=printing(smallie(n,c))
elif args.internal:
from RSA.internal_attack import company
plaintext=company(n,e,c)
elif args.boneh:
from RSA.boneh_durfee import example
plaintext=reversing(n,example(n,e),c)
elif args.multi:
from RSA.multi_cipher import multi
arraytext=multi(n,e,c)
plaintext=''
for i in arraytext:
plaintext+=printing(i)
try:
print("%s" % display,plaintext,end='')
except:
print("NullError: no ciphering technique mentioned")
if __name__=="__main__":
main() | #import re
import argparse
| random_line_split |
cryptologer.py | #file should have all ciphers as simple format... just text
#for rsa_dec, file should have text as n:<no...> \n c:<no....> e:<no....>
#same for rsa_enc...
#importing...
#import sys
#import re
import argparse
#import operator
#import math
#import requests
#import json
#import binascii
#import os
#import time
'''
def whereami():
with open('/root/directory.txt','r') as f1:
location=f1.read()
return location
location=whereami()'''
#appending file paths
'''
import Encrypting as enc
import Decrypting as dec
import Algorithms as alg
import rsa_types as RSA'''
'''
sys.path.append('%s/Encrypting/' % location)
sys.path.append('%s/Decrypting/' % location)
sys.path.append('%s/Algorithms/' % location)
sys.path.append('%s/rsa_types/' % location)'''
#file imports for encryption
'''
from caeser import julius,bruteforce
from vigenere_enc import encrypt
from affine_enc import aff_enc
from bacon_enc import steak
from railfence_enc import rail
from atbash import atb
from polybius_enc import tsquaree
from substitution_enc import substitute
from rsa_encryptor import rivest
from rot import rotate,rotate_brute
from skip import skip
from rot47 import rot47'''
#algorithms import
from alg.Ext_Euclid import reversing, printing
#file imports for decryption
'''
from vigenere_dec import impossible, withkey#import impossible, withkey
from affine_dec import fine
from bacon_dec import pork
from polybius_dec import psquaree
from railfence_dec import fence
from substitution_dec import manual
from simplersa import init
from Weiner import attack
from small_e import smallie
from internal_attack import company
from hastad import broadcast
from multi_cipher import multi
from boneh_durfee import example'''
#parsing starts
def | ():
parser=argparse.ArgumentParser(description="Decryptor for Caeser, Vigenere, types of RSA and more...")
parser.add_argument("--decrypt","--dec","-d",help="Performs Decryption",action="store_true")
parser.add_argument("--encrypt","--enc","-e",help="Performs Encryption",action="store_true")
parser.add_argument("--sourcefile","--sf","-f",help="Input file with ciphertext",type=str)
parser.add_argument("--cipher","--cip","-c",help="Input cipher as test",type=str)
parser.add_argument("--key","-k",help="If the key is known (text for vignere, shift for caeser,ROT)",type=str)
parser.add_argument("--len","-l",help="User-defined max probable key length",type=str)
parser.add_argument("--caeser","-C",help="If the cipher is caeser cipher",action="store_true")
parser.add_argument("--vignere","-V",help="If the cipher is vignere cipher",action="store_true")
parser.add_argument("--affine","-A",help="If the cipher is affine cipher",action="store_true")
parser.add_argument("--bacon","-B",help="If the cipher is bacon cipher",action="store_true")
parser.add_argument("--polybius","-P",help="If the cipher is encrypted by a simple 6x6 polybius square",action="store_true")
parser.add_argument("--railfence","-F",help="If railfence encryption is used",action="store_true")
parser.add_argument("--skip","-K",help="If skip cipher is used",action="store_true")
parser.add_argument("--atbash","-T",help="If atbash rotation is done on the plaintext",action="store_true")
parser.add_argument("--rot","-O",help="If the cipher is any rotation cipher",action="store_true")
parser.add_argument("--rot47","-47",help="If the cipher is rotated by ROT47",action="store_true")
parser.add_argument("--substitution","-S",help="If the plaintext in encrypted using simple substitution cipher",action="store_true")
parser.add_argument("--rsa","-R",help="If the cipher is RSA related",action="store_true") #contains simple and multi_rsa
#parser.add_argument("--factordb","--fb","-O",help="Using factordb to crack the rsa",action="store_true")
parser.add_argument("--weiner","-W",help="Cracking RSA using Weiner attack",action="store_true")
parser.add_argument("--smalle","-E",help="Cracking RSA provided e is very small",action="store_true")
parser.add_argument("--internal","-I",help="If an internal attack for RSA is being performed",action="store_true")
parser.add_argument("--multi","-M",help="If the message has loads of encrypted ciphers",action="store_true")
#parser.add_argument("--fermat","-M",help="Fermat's attack on the RSA encrypted text",action="store_true")
#parser.add_argument("--twin","-N",help="If the RSA public is a product of twin prime, use this",action="store_true")
parser.add_argument("--chinese","-H",help="Using the Chinese Remainder Theorem for cracking RSA from e packets having the same n",action="store_true")
parser.add_argument("--boneh","-D",help="Using the famous boneh_durfee to calculate d, provided d< N^0.292",action="store_true")
return parser
def readfile(filename):
with open(filename,"r") as f1:
return f1.read()
def read_rsa(filename):
with open(filename,"r") as f2:
line=f2.readline()
e=0
while line:
symbol=line[0].lower()
if symbol=='n':
n=int(line[2:])
elif symbol=='e':
e=int(line[2:])
elif symbol=='c':
c=line[2:]
elif symbol=='m':
c=line[2:]
else:
raise Exception("the contents of the file can't be read properly")
break
line=f2.readline()
return n,e,c
def read_chinese(filename):
with open(filename,"r") as f3:
line=f3.readline()
while line:
symbol=line[:2].lower()
if symbol=='n1':
n1=int(line[3:])
elif symbol=='n2':
n2=int(line[3:])
elif symbol=='n3':
n3=int(line[3:])
elif symbol=='c1':
c1=int(line[3:])
elif symbol=='c2':
c2=int(line[3:])
elif symbol=='c3':
c3=int(line[3:])
else:
raise Exception("the contents of the file can't be read properly")
break
line=f3.readline()
return n1,n2,n3,c1,c2,c3
def main():
parser=initializeParser()
args=parser.parse_args()
if args.encrypt or args.decrypt:
if args.cipher!=None:
rawtext=args.cipher
else:
rawtext=readfile(args.sourcefile)
key=args.key
if args.encrypt:
plaintext=rawtext
if args.caeser:
from enc.caeser import julius
if args.key==None:
shift=int(input("enter shift:"))
else:
shift=int(key)
ciphertext=julius(plaintext,shift)
elif args.vignere:
from enc.vigenere_enc import encrypt
if args.key==None:
key=input("enter key:")
ciphertext=encrypt(plaintext,key)
elif args.affine:
from enc.affine_enc import aff_enc
ciphertext=aff_enc(plaintext)
elif args.bacon:
from enc.bacon_enc import steak
ciphertext=steak(plaintext)
elif args.railfence:
from enc.railfence_enc import rail
ciphertext=rail(plaintext)
elif args.rot:
from enc.rot import rotate
if args.key==None:
shift=input("enter shift:")
ciphertext=rotate(plaintext,shift)
elif args.rot47:
from enc.rot47 import rot47
ciphertext=rot47(plaintext)
elif args.skip:
from enc.skip import skip
loop=int(input("Enter skip:"))
ciphertext=skip(plaintext,loop)
elif args.atbash:
from enc.atbash import atb
ciphertext=atb(plaintext)
elif args.polybius:
from enc.polybius_enc import tsquaree
ciphertext=tsquaree(plaintext)
elif args.substitution:
from enc.substitution_enc import substitute
ciphertext=substitute(plaintext)
elif args.rsa:
from enc.rsa_encryptor import rivest
n,e,m=read_rsa(args.sourcefile)
ciphertext=rivest(n,e,m)
display='message:'
try:
print("ciphertext:",ciphertext,end='')
except UnboundLocalError:
print("NullError: no ciphering technique mentioned")
elif args.decrypt:
ciphertext=rawtext
display='plaintext:'
if args.caeser:
from enc.caeser import julius,bruteforce
if args.key!=None:
plaintext=julius(ciphertext,-int(key))
else:
display=''
plaintext=bruteforce(ciphertext)
elif args.vignere:
from dec.vigenere_dec import impossible, withkey
length=args.len
if key!=None:
plaintext=withkey(ciphertext,key)
else:
plaintext=impossible(ciphertext,length)
elif args.affine:
from dec.affine_dec import fine
plaintext=fine(ciphertext)
elif args.bacon:
from dec.bacon_dec import pork
if args.key!=None:
plaintext=pork(ciphertext,key)
else:
plaintext=pork(ciphertext,0)
elif args.railfence:
from dec.railfence_dec import fence
length=args.len
if args.len!=None:
plaintext=fence(ciphertext,int(length))
else:
plaintext=fence(ciphertext,None)
display=''
elif args.skip:
from enc.skip import skip
plaintext=skip(ciphertext,None)
display=''
elif args.atbash:
from enc.atbash import atb
plaintext=atb(ciphertext)
elif args.rot:
from enc.rot import rotate,rotate_brute
if key!=None:
plaintext=rotate(ciphertext,key)
else:
plaintext=rotate_brute(ciphertext)
display=''
elif args.rot47:
from enc.rot47 import rot47
plaintext=rot47(ciphertext)
elif args.polybius:
from dec.polybius_dec import psquaree
plaintext=psquaree(ciphertext)
elif args.substitution:
from dec.substitution_dec import manual
plaintext=manual(ciphertext)
elif args.chinese:
from RSA.hastad import broadcast
n1,n2,n3,c1,c2,c3=read_chinese(args.sourcefile)
display='message:'
plaintext=printing(broadcast(n1,n2,n3,c1,c2,c3))
else:
n,e,c=read_rsa(args.sourcefile)
display='message:'
if not args.multi:
c=int(c)
if args.rsa:
from RSA.simplersa import init
plaintext=init(n,e,c)
elif args.weiner:
from RSA.Weiner import attack
plaintext=reversing(n,attack(e,n),c)
elif args.smalle:
from RSA.small_e import smallie
plaintext=printing(smallie(n,c))
elif args.internal:
from RSA.internal_attack import company
plaintext=company(n,e,c)
elif args.boneh:
from RSA.boneh_durfee import example
plaintext=reversing(n,example(n,e),c)
elif args.multi:
from RSA.multi_cipher import multi
arraytext=multi(n,e,c)
plaintext=''
for i in arraytext:
plaintext+=printing(i)
try:
print("%s" % display,plaintext,end='')
except:
print("NullError: no ciphering technique mentioned")
if __name__=="__main__":
main()
| initializeParser | identifier_name |
cryptologer.py | #file should have all ciphers as simple format... just text
#for rsa_dec, file should have text as n:<no...> \n c:<no....> e:<no....>
#same for rsa_enc...
#importing...
#import sys
#import re
import argparse
#import operator
#import math
#import requests
#import json
#import binascii
#import os
#import time
'''
def whereami():
with open('/root/directory.txt','r') as f1:
location=f1.read()
return location
location=whereami()'''
#appending file paths
'''
import Encrypting as enc
import Decrypting as dec
import Algorithms as alg
import rsa_types as RSA'''
'''
sys.path.append('%s/Encrypting/' % location)
sys.path.append('%s/Decrypting/' % location)
sys.path.append('%s/Algorithms/' % location)
sys.path.append('%s/rsa_types/' % location)'''
#file imports for encryption
'''
from caeser import julius,bruteforce
from vigenere_enc import encrypt
from affine_enc import aff_enc
from bacon_enc import steak
from railfence_enc import rail
from atbash import atb
from polybius_enc import tsquaree
from substitution_enc import substitute
from rsa_encryptor import rivest
from rot import rotate,rotate_brute
from skip import skip
from rot47 import rot47'''
#algorithms import
from alg.Ext_Euclid import reversing, printing
#file imports for decryption
'''
from vigenere_dec import impossible, withkey#import impossible, withkey
from affine_dec import fine
from bacon_dec import pork
from polybius_dec import psquaree
from railfence_dec import fence
from substitution_dec import manual
from simplersa import init
from Weiner import attack
from small_e import smallie
from internal_attack import company
from hastad import broadcast
from multi_cipher import multi
from boneh_durfee import example'''
#parsing starts
def initializeParser():
parser=argparse.ArgumentParser(description="Decryptor for Caeser, Vigenere, types of RSA and more...")
parser.add_argument("--decrypt","--dec","-d",help="Performs Decryption",action="store_true")
parser.add_argument("--encrypt","--enc","-e",help="Performs Encryption",action="store_true")
parser.add_argument("--sourcefile","--sf","-f",help="Input file with ciphertext",type=str)
parser.add_argument("--cipher","--cip","-c",help="Input cipher as test",type=str)
parser.add_argument("--key","-k",help="If the key is known (text for vignere, shift for caeser,ROT)",type=str)
parser.add_argument("--len","-l",help="User-defined max probable key length",type=str)
parser.add_argument("--caeser","-C",help="If the cipher is caeser cipher",action="store_true")
parser.add_argument("--vignere","-V",help="If the cipher is vignere cipher",action="store_true")
parser.add_argument("--affine","-A",help="If the cipher is affine cipher",action="store_true")
parser.add_argument("--bacon","-B",help="If the cipher is bacon cipher",action="store_true")
parser.add_argument("--polybius","-P",help="If the cipher is encrypted by a simple 6x6 polybius square",action="store_true")
parser.add_argument("--railfence","-F",help="If railfence encryption is used",action="store_true")
parser.add_argument("--skip","-K",help="If skip cipher is used",action="store_true")
parser.add_argument("--atbash","-T",help="If atbash rotation is done on the plaintext",action="store_true")
parser.add_argument("--rot","-O",help="If the cipher is any rotation cipher",action="store_true")
parser.add_argument("--rot47","-47",help="If the cipher is rotated by ROT47",action="store_true")
parser.add_argument("--substitution","-S",help="If the plaintext in encrypted using simple substitution cipher",action="store_true")
parser.add_argument("--rsa","-R",help="If the cipher is RSA related",action="store_true") #contains simple and multi_rsa
#parser.add_argument("--factordb","--fb","-O",help="Using factordb to crack the rsa",action="store_true")
parser.add_argument("--weiner","-W",help="Cracking RSA using Weiner attack",action="store_true")
parser.add_argument("--smalle","-E",help="Cracking RSA provided e is very small",action="store_true")
parser.add_argument("--internal","-I",help="If an internal attack for RSA is being performed",action="store_true")
parser.add_argument("--multi","-M",help="If the message has loads of encrypted ciphers",action="store_true")
#parser.add_argument("--fermat","-M",help="Fermat's attack on the RSA encrypted text",action="store_true")
#parser.add_argument("--twin","-N",help="If the RSA public is a product of twin prime, use this",action="store_true")
parser.add_argument("--chinese","-H",help="Using the Chinese Remainder Theorem for cracking RSA from e packets having the same n",action="store_true")
parser.add_argument("--boneh","-D",help="Using the famous boneh_durfee to calculate d, provided d< N^0.292",action="store_true")
return parser
def readfile(filename):
with open(filename,"r") as f1:
return f1.read()
def read_rsa(filename):
|
def read_chinese(filename):
with open(filename,"r") as f3:
line=f3.readline()
while line:
symbol=line[:2].lower()
if symbol=='n1':
n1=int(line[3:])
elif symbol=='n2':
n2=int(line[3:])
elif symbol=='n3':
n3=int(line[3:])
elif symbol=='c1':
c1=int(line[3:])
elif symbol=='c2':
c2=int(line[3:])
elif symbol=='c3':
c3=int(line[3:])
else:
raise Exception("the contents of the file can't be read properly")
break
line=f3.readline()
return n1,n2,n3,c1,c2,c3
def main():
parser=initializeParser()
args=parser.parse_args()
if args.encrypt or args.decrypt:
if args.cipher!=None:
rawtext=args.cipher
else:
rawtext=readfile(args.sourcefile)
key=args.key
if args.encrypt:
plaintext=rawtext
if args.caeser:
from enc.caeser import julius
if args.key==None:
shift=int(input("enter shift:"))
else:
shift=int(key)
ciphertext=julius(plaintext,shift)
elif args.vignere:
from enc.vigenere_enc import encrypt
if args.key==None:
key=input("enter key:")
ciphertext=encrypt(plaintext,key)
elif args.affine:
from enc.affine_enc import aff_enc
ciphertext=aff_enc(plaintext)
elif args.bacon:
from enc.bacon_enc import steak
ciphertext=steak(plaintext)
elif args.railfence:
from enc.railfence_enc import rail
ciphertext=rail(plaintext)
elif args.rot:
from enc.rot import rotate
if args.key==None:
shift=input("enter shift:")
ciphertext=rotate(plaintext,shift)
elif args.rot47:
from enc.rot47 import rot47
ciphertext=rot47(plaintext)
elif args.skip:
from enc.skip import skip
loop=int(input("Enter skip:"))
ciphertext=skip(plaintext,loop)
elif args.atbash:
from enc.atbash import atb
ciphertext=atb(plaintext)
elif args.polybius:
from enc.polybius_enc import tsquaree
ciphertext=tsquaree(plaintext)
elif args.substitution:
from enc.substitution_enc import substitute
ciphertext=substitute(plaintext)
elif args.rsa:
from enc.rsa_encryptor import rivest
n,e,m=read_rsa(args.sourcefile)
ciphertext=rivest(n,e,m)
display='message:'
try:
print("ciphertext:",ciphertext,end='')
except UnboundLocalError:
print("NullError: no ciphering technique mentioned")
elif args.decrypt:
ciphertext=rawtext
display='plaintext:'
if args.caeser:
from enc.caeser import julius,bruteforce
if args.key!=None:
plaintext=julius(ciphertext,-int(key))
else:
display=''
plaintext=bruteforce(ciphertext)
elif args.vignere:
from dec.vigenere_dec import impossible, withkey
length=args.len
if key!=None:
plaintext=withkey(ciphertext,key)
else:
plaintext=impossible(ciphertext,length)
elif args.affine:
from dec.affine_dec import fine
plaintext=fine(ciphertext)
elif args.bacon:
from dec.bacon_dec import pork
if args.key!=None:
plaintext=pork(ciphertext,key)
else:
plaintext=pork(ciphertext,0)
elif args.railfence:
from dec.railfence_dec import fence
length=args.len
if args.len!=None:
plaintext=fence(ciphertext,int(length))
else:
plaintext=fence(ciphertext,None)
display=''
elif args.skip:
from enc.skip import skip
plaintext=skip(ciphertext,None)
display=''
elif args.atbash:
from enc.atbash import atb
plaintext=atb(ciphertext)
elif args.rot:
from enc.rot import rotate,rotate_brute
if key!=None:
plaintext=rotate(ciphertext,key)
else:
plaintext=rotate_brute(ciphertext)
display=''
elif args.rot47:
from enc.rot47 import rot47
plaintext=rot47(ciphertext)
elif args.polybius:
from dec.polybius_dec import psquaree
plaintext=psquaree(ciphertext)
elif args.substitution:
from dec.substitution_dec import manual
plaintext=manual(ciphertext)
elif args.chinese:
from RSA.hastad import broadcast
n1,n2,n3,c1,c2,c3=read_chinese(args.sourcefile)
display='message:'
plaintext=printing(broadcast(n1,n2,n3,c1,c2,c3))
else:
n,e,c=read_rsa(args.sourcefile)
display='message:'
if not args.multi:
c=int(c)
if args.rsa:
from RSA.simplersa import init
plaintext=init(n,e,c)
elif args.weiner:
from RSA.Weiner import attack
plaintext=reversing(n,attack(e,n),c)
elif args.smalle:
from RSA.small_e import smallie
plaintext=printing(smallie(n,c))
elif args.internal:
from RSA.internal_attack import company
plaintext=company(n,e,c)
elif args.boneh:
from RSA.boneh_durfee import example
plaintext=reversing(n,example(n,e),c)
elif args.multi:
from RSA.multi_cipher import multi
arraytext=multi(n,e,c)
plaintext=''
for i in arraytext:
plaintext+=printing(i)
try:
print("%s" % display,plaintext,end='')
except:
print("NullError: no ciphering technique mentioned")
if __name__=="__main__":
main()
| with open(filename,"r") as f2:
line=f2.readline()
e=0
while line:
symbol=line[0].lower()
if symbol=='n':
n=int(line[2:])
elif symbol=='e':
e=int(line[2:])
elif symbol=='c':
c=line[2:]
elif symbol=='m':
c=line[2:]
else:
raise Exception("the contents of the file can't be read properly")
break
line=f2.readline()
return n,e,c | identifier_body |
metadata.rs | // Claxon -- A FLAC decoding library in Rust
// Copyright 2014 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
//! The `metadata` module deals with metadata at the beginning of a FLAC stream.
use error::{Error, Result, fmt_err};
use input::ReadBytes;
use std::str;
use std::slice;
#[derive(Clone, Copy)]
struct MetadataBlockHeader {
is_last: bool,
block_type: u8,
length: u32,
}
/// The streaminfo metadata block, with important information about the stream.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct StreamInfo {
// TODO: "size" would better be called "duration" for clarity.
/// The minimum block size (in inter-channel samples) used in the stream.
///
/// This number is independent of the number of channels. To get the minimum
/// block duration in seconds, divide this by the sample rate.
pub min_block_size: u16,
/// The maximum block size (in inter-channel samples) used in the stream.
///
/// This number is independent of the number of channels. To get the
/// maximum block duration in seconds, divide by the sample rate. To avoid
/// allocations during decoding, a buffer of this size times the number of
/// channels can be allocated up front and passed into
/// `FrameReader::read_next_or_eof()`.
pub max_block_size: u16,
/// The minimum frame size (in bytes) used in the stream.
pub min_frame_size: Option<u32>,
/// The maximum frame size (in bytes) used in the stream.
pub max_frame_size: Option<u32>,
/// The sample rate in Hz.
pub sample_rate: u32,
/// The number of channels.
pub channels: u32,
/// The number of bits per sample.
pub bits_per_sample: u32,
/// The total number of inter-channel samples in the stream.
// TODO: rename to `duration` for clarity?
pub samples: Option<u64>,
/// MD5 signature of the unencoded audio data.
pub md5sum: [u8; 16],
}
/// A seek point in the seek table.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct SeekPoint {
/// Sample number of the first sample in the target frame, or 2<sup>64</sup> - 1 for a placeholder.
pub sample: u64,
/// Offset in bytes from the first byte of the first frame header to the first byte of the
/// target frame's header.
pub offset: u64,
/// Number of samples in the target frame.
pub samples: u16,
}
/// A seek table to aid seeking in the stream.
pub struct SeekTable {
/// The seek points, sorted in ascending order by sample number.
#[allow(dead_code)] // TODO: Implement seeking.
seekpoints: Vec<SeekPoint>,
}
/// Vorbis comments, also known as FLAC tags (e.g. artist, title, etc.).
pub struct VorbisComment {
/// The “vendor string”, chosen by the encoder vendor.
///
/// This string usually contains the name and version of the program that
/// encoded the FLAC stream, such as `reference libFLAC 1.3.2 20170101`
/// or `Lavf57.25.100`.
pub vendor: String,
/// Name-value pairs of Vorbis comments, such as `ARTIST=Queen`.
///
/// This struct stores a raw low-level representation of tags. Use
/// `FlacReader::tags()` for a friendlier iterator. The tuple consists of
/// the string in `"NAME=value"` format, and the index of the `'='` into
/// that string.
///
/// The name is supposed to be interpreted case-insensitively, and is
/// guaranteed to consist of ASCII characters. Claxon does not normalize
/// the casing of the name. Use `metadata::GetTag` to do a case-insensitive
/// lookup.
///
/// Names need not be unique. For instance, multiple `ARTIST` comments might
/// be present on a collaboration track.
///
/// See <https://www.xiph.org/vorbis/doc/v-comment.html> for more details.
pub comments: Vec<(String, usize)>,
}
/// A metadata about the flac stream.
pub enum MetadataBlock {
/// A stream info block.
StreamInfo(StreamInfo),
/// A padding block (with no meaningful data).
Padding {
/// The number of padding bytes.
length: u32,
},
/// An application block with application-specific data.
Application {
/// The registered application ID.
id: u32,
/// The contents of the application block.
data: Vec<u8>,
},
/// A seek table block.
SeekTable(SeekTable),
/// A Vorbis comment block, also known as FLAC tags.
VorbisComment(VorbisComment),
/// A CUE sheet block.
CueSheet, // TODO
/// A picture block.
Picture, // TODO
/// A block with a reserved block type, not supported by this library.
Reserved,
}
/// Iterates over Vorbis comments (FLAC tags) in a FLAC stream.
///
/// See `FlacReader::tags()` for more details.
pub struct Tags<'a> {
/// The underlying iterator.
iter: slice::Iter<'a, (String, usize)>,
}
impl<'a> Tags<'a> {
/// Returns a new `Tags` iterator.
#[inline]
pub fn new(comments: &'a [(String, usize)]) -> Tags<'a> {
Tags {
iter: comments.iter(),
}
}
}
impl<'a> Iterator for Tags<'a> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
return self.iter.next().map(|&(ref comment, sep_idx)| {
(&comment[..sep_idx], &comment[sep_idx+1..])
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<'a> ExactSizeIterator for Tags<'a> {}
/// Iterates over Vorbis comments looking for a specific one; returns its values as `&str`.
///
/// See `FlacReader::get_tag()` for more details.
pub struct GetTag<'a> {
/// The Vorbis comments to search through.
vorbis_comments: &'a [(String, usize)],
/// The tag to look for.
needle: &'a str,
/// The index of the (name, value) pair that should be inspected next.
index: usize,
}
impl<'a> GetTag<'a> {
/// Returns a new `GetTag` iterator.
#[inline]
pub fn new(vorbis_comments: &'a [(String, usize)], needle: &'a str) -> GetTag<'a> {
GetTag {
vorbis_comments: vorbis_comments,
needle: needle,
index: 0,
}
}
}
impl<'a> Iterator for GetTag<'a> {
type Item = &'a str;
#[inline]
fn next(&mut self) -> Option<&'a str> {
// This import is actually required on Rust 1.13.
#[allow(unused_imports)]
use std::ascii::AsciiExt;
while self.index < self.vorbis_comments.len() {
let (ref comment, sep_idx) = self.vorbis_comments[self.index];
self.index += 1;
if comment[..sep_idx].eq_ignore_ascii_case(self.needle) {
return Some(&comment[sep_idx + 1..])
}
}
return None
}
}
#[inline]
fn read_metadata_block_header<R: ReadBytes>(input: &mut R) -> Result<MetadataBlockHeader> {
let byte = try!(input.read_u8());
// The first bit specifies whether this is the last block, the next 7 bits
// specify the type of the metadata block to follow.
let is_last = (byte >> 7) == 1;
let block_type = byte & 0b0111_1111;
// The length field is 24 bits, or 3 bytes.
let length = try!(input.read_be_u24());
let header = MetadataBlockHeader {
is_last: is_last,
block_type: block_type,
length: length,
};
Ok(header)
}
/// Read a single metadata block header and body from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the Ogg format embeds
/// metadata blocks including their header verbatim in packets. This function
/// can be used to decode that raw data.
#[inline]
pub fn read_metadata_block_with_header<R: ReadBytes>(input: &mut R)
-> Result<MetadataBlock> {
let header = try!(read_metadata_block_header(input));
read_metadata_block(input, header.block_type, header.length)
}
/// Read a single metadata block of the given type and length from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the MP4 format sports
/// a “FLAC Specific Box” which contains the block type and the raw data. This
/// function can be used to decode that raw data.
#[inline]
pub fn read_metadata_block<R: ReadBytes>(input: &mut R,
block_type: u8,
length: u32)
-> Result<MetadataBlock> {
match block_type {
0 => {
// The streaminfo block has a fixed size of 34 bytes.
if length == 34 {
let streaminfo = try!(read_streaminfo_block(input));
Ok(MetadataBlock::StreamInfo(streaminfo))
} else {
fmt_err("invalid streaminfo metadata block length")
}
}
1 => {
try!(read_padding_block(input, length));
Ok(MetadataBlock::Padding { length: length })
}
2 => {
let (id, data) = try!(read_application_block(input, length));
Ok(MetadataBlock::Application {
id: id,
data: data,
})
}
3 => {
// TODO: implement seektable reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
4 => {
let vorbis_comment = try!(read_vorbis_comment_block(input, length));
Ok(MetadataBlock::VorbisComment(vorbis_comment))
}
5 => {
// TODO: implement CUE sheet reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
6 => {
// TODO: implement picture reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
127 => {
// This code is invalid to avoid confusion with a frame sync code.
fmt_err("invalid metadata block type")
}
_ => {
// Any other block type is 'reserved' at the moment of writing. The
// reference implementation reads it as an 'unknown' block. That is
// one way of handling it, but maybe there should be some kind of
// 'strict' mode (configurable at compile time?) so that this can
// be an error if desired.
try!(input.skip(length));
Ok(MetadataBlock::Reserved)
}
}
}
fn read_streaminfo_block<R: ReadBytes>(input: &mut R) -> Result<StreamInfo> {
let min_block_size = try!(input.read_be_u16());
let max_block_size = try!(input.read_be_u16());
// The frame size fields are 24 bits, or 3 bytes.
let min_frame_size = try!(input.read_be_u24());
let max_frame_size = try!(input.read_be_u24());
// Next up are 20 bits that determine the sample rate.
let sample_rate_msb = try!(input.read_be_u16());
let sample_rate_lsb = try!(input.read_u8());
// Stitch together the value from the first 16 bits,
// and then the 4 most significant bits of the next byte.
let sample_rate = (sample_rate_msb as u32) << 4 | (sample_rate_lsb as u32) >> 4;
// Next three bits are the number of channels - 1. Mask them out and add 1.
let n_channels_bps = sample_rate_lsb;
let n_channels = ((n_channels_bps >> 1) & 0b0000_0111) + 1;
// The final bit is the most significant of bits per sample - 1. Bits per
// sample - 1 is 5 bits in total.
let bps_msb = n_channels_bps & 1;
let bps_lsb_n_samples = try!(input.read_u8());
// Stitch together these values, add 1 because # - 1 is stored.
let bits_per_sample = (bps_msb << 4 | (bps_lsb_n_samples >> 4)) + 1;
// Number of samples in 36 bits, we have 4 already, 32 to go.
let n_samples_msb = bps_lsb_n_samples & 0b0000_1111;
let n_samples_lsb = try!(input.read_be_u32());
let n_samples = (n_samples_msb as u64) << 32 | n_samples_lsb as u64;
// Next are 128 bits (16 bytes) of MD5 signature.
let mut md5sum = [0u8; 16];
try!(input.read_into(&mut md5sum));
// Lower bounds can never be larger than upper bounds. Note that 0 indicates
// unknown for the frame size. Also, the block size must be at least 16.
if min_block_size > max_block_size {
return fmt_err("inconsistent bounds, min block size > max block size");
}
if min_block_size < 16 {
return fmt_err("invalid block size, must be at least 16");
}
if min_frame_size > max_frame_size && max_frame_size != 0 {
return fmt_err("inconsistent bounds, min frame size > max frame size");
}
// A sample rate of 0 is invalid, and the maximum sample rate is limited by
// the structure of the frame headers to 655350 Hz.
if sample_rate == 0 || sample_rate > 655350 {
return fmt_err("invalid sample rate");
}
let stream_info = StreamInfo {
min_block_size: min_block_size,
max_block_size: max_block_size,
min_frame_size: if min_frame_size == 0 {
None
} else {
Some(min_frame_size)
},
max_frame_size: if max_frame_size == 0 {
None
} else {
Some(max_frame_size)
},
sample_rate: sample_rate,
channels: n_channels as u32,
bits_per_sample: bits_per_sample as u32,
samples: if n_samples == 0 {
None
} else {
Some(n_samples)
},
md5sum: md5sum,
};
Ok(stream_info)
}
fn read_vorbis_comment_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<VorbisComment> {
if length < 8 {
// We expect at a minimum a 32-bit vendor string length, and a 32-bit
// comment count.
return fmt_err("Vorbis comment block is too short")
}
// Fail if the length of the Vorbis comment block is larger than 1 MiB. This
// block is full of length-prefixed strings for which we allocate memory up
// front. If there were no limit on these, a maliciously crafted file could
// cause OOM by claiming to contain large strings. But at least the strings
// cannot be longer than the size of the Vorbis comment block, and by
// limiting the size of that block, we can mitigate such DoS attacks.
//
// The typical size of a the Vorbis comment block is 1 KiB; on a corpus of
// real-world flac files, the 0.05 and 0.95 quantiles were 792 and 1257
// bytes respectively, with even the 0.99 quantile below 2 KiB. The only
// reason for having a large Vorbis comment block is when cover art is
// incorrectly embedded there, but the Vorbis comment block is not the right
// place for that anyway.
if length > 10 * 1024 * 1024 {
let msg = "Vorbis comment blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
// The Vorbis comment block starts with a length-prefixed "vendor string".
// It cannot be larger than the block length - 8, because there are the
// 32-bit vendor string length, and comment count.
let vendor_len = try!(input.read_le_u32());
if vendor_len > length - 8 { return fmt_err("vendor string too long") }
let mut vendor_bytes = Vec::with_capacity(vendor_len as usize);
// We can safely set the lenght of the vector here; the uninitialized memory
// is not exposed. If `read_into` succeeds, it will have overwritten all
// bytes. If not, an error is returned and the memory is never exposed.
unsafe { vendor_bytes.set_len(vendor_len as usize); }
try!(input.read_into(&mut vendor_bytes));
let vendor = try!(String::from_utf8(vendor_bytes));
// Next up is the number of comments. Because every comment is at least 4
// bytes to indicate its length, there cannot be more comments than the
// length of the block divided by 4. This is only an upper bound to ensure
// that we don't allocate a big vector, to protect against DoS attacks.
let mut comments_len = try!(input.read_le_u32());
if comments_len >= length / 4 {
return fmt_err("too many entries for Vorbis comment block")
}
let mut comments = Vec::with_capacity(comments_len as usize);
let mut bytes_left = length - 8 - vendor_len;
// For every comment, there is a length-prefixed string of the form
// "NAME=value".
while bytes_left >= 4 && comments.len() < comments_len as usize {
let comment_len = try!(input.read_le_u32());
bytes_left -= 4;
if comment_len > bytes_left {
return fmt_err("Vorbis comment too long for Vorbis comment block")
}
// Some older versions of libflac allowed writing zero-length Vorbis
// comments. ALthough such files are invalid, they do occur in the wild,
// so we skip over the empty comment.
if comment_len == 0 {
// Does not overflow because `comments_len > comments.len() >= 0`.
comments_len -= 1;
continue;
}
// For the same reason as above, setting the length is safe here.
let mut comment_bytes = Vec::with_capacity(comment_len as usize);
unsafe { comment_bytes.set_len(comment_len as usize); }
try!(input.read_into(&mut comment_bytes));
bytes_left -= comment_len;
if let Some(sep_index) = comment_bytes.iter().position(|&x| x == b'=') {
{
let name_bytes = &comment_bytes[..sep_index];
// According to the Vorbis spec, the field name may consist of ascii
// bytes 0x20 through 0x7d, 0x3d (`=`) excluded. Verifying this has
// the advantage that if the check passes, the result is valid
// UTF-8, so the conversion to string will not fail.
if name_bytes.iter().any(|&x| x < 0x20 || x > 0x7d) {
return fmt_err("Vorbis comment field name contains invalid byte")
}
}
let comment = try!(String::from_utf8(comment_bytes));
comments.push((comment, sep_index));
} else {
return fmt_err("Vorbis comment does not contain '='")
}
}
if bytes_left != 0 {
return fmt_err("Vorbis comment block has excess data")
}
if comments.len() != comments_len as usize {
return fmt_err("Vorbis comment block contains wrong number of entries")
}
let vorbis_comment = VorbisComment {
vendor: vendor,
comments: comments,
};
Ok(vorbis_comment)
}
fn read_padding_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<()> {
// The specification dictates that all bits of the padding block must be 0.
// However, the reference implementation does not issue an error when this
// is not the case, and frankly, when you are going to skip over these
// bytes and do nothing with them whatsoever, why waste all those CPU
// cycles checking that the padding is valid?
Ok(try!(input.skip(length)))
}
fn read_application_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<(u32, Vec<u8>)> {
if length < 4 {
return fmt_err("application block length must be at least 4 bytes")
}
// Reject large application blocks to avoid memory-based denial-
// of-service attacks. See also the more elaborate motivation in
// `read_vorbis_comment_block()`.
if length > 10 * 1024 * 1024 {
let msg = "application blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
let id = try!(input.read_be_u32());
// Four bytes of the block have been used for the ID, the rest is payload.
// Create a vector of uninitialized memory, and read the block into it. The
// uninitialized memory is never exposed: read_into will either fill the
// buffer completely, or return an err, in which case the memory is not
// exposed.
let mut data = Vec::with_capacity(length as usize - 4);
unsafe { data.set_len(length as usize - 4); }
try!(input.read_into(&mut data));
Ok((id, data))
}
/// Reads metadata blocks from a stream and exposes them as an iterator.
///
/// It is assumed that the next byte that the reader will read, is the first
/// byte of a metadata block header. This means that the iterator will yield at
/// least a single value. If the iterator ever yields an error, then no more
/// data will be read thereafter, and the next value will be `None`.
pub struct MetadataBlockReader<R: ReadBytes> {
input: R,
done: bool,
}
/// Either a `MetadataBlock` or an `Error`.
pub type MetadataBlockResult = Result<MetadataBlock>;
impl<R: ReadBytes> MetadataBlockReader<R> {
/// Creates a metadata block reader that will yield at least one element.
pub fn new(input: R) -> MetadataBlockReader<R> {
MetadataBlockReader {
input: input,
done: false,
}
}
#[inline]
fn read_next(&mut self) -> MetadataBlockResult {
let header = try!(read_metadata_block_header(&mut self.input));
let block = try!(read_metadata_block(&mut self.input, header.block_type, header.length));
self.done = header.is_last;
Ok(block)
}
}
impl<R: ReadBytes> Iterator for MetadataBlockReader<R> {
type Item = MetadataBlockResult;
#[inline]
fn next(&mut self) -> Option<MetadataBlockResult> {
| inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// When done, there will be no more blocks,
// when not done, there will be at least one more.
if self.done { (0, Some(0)) } else { (1, None) }
}
}
| if self.done {
None
} else {
let block = self.read_next();
// After a failure, no more attempts to read will be made,
// because we don't know where we are in the stream.
if !block.is_ok() {
self.done = true;
}
Some(block)
}
}
#[ | identifier_body |
metadata.rs | // Claxon -- A FLAC decoding library in Rust
// Copyright 2014 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
//! The `metadata` module deals with metadata at the beginning of a FLAC stream.
use error::{Error, Result, fmt_err};
use input::ReadBytes;
use std::str;
use std::slice;
#[derive(Clone, Copy)]
struct MetadataBlockHeader {
is_last: bool,
block_type: u8,
length: u32,
}
/// The streaminfo metadata block, with important information about the stream.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct StreamInfo {
// TODO: "size" would better be called "duration" for clarity.
/// The minimum block size (in inter-channel samples) used in the stream.
///
/// This number is independent of the number of channels. To get the minimum
/// block duration in seconds, divide this by the sample rate.
pub min_block_size: u16,
/// The maximum block size (in inter-channel samples) used in the stream.
///
/// This number is independent of the number of channels. To get the
/// maximum block duration in seconds, divide by the sample rate. To avoid
/// allocations during decoding, a buffer of this size times the number of
/// channels can be allocated up front and passed into
/// `FrameReader::read_next_or_eof()`.
pub max_block_size: u16,
/// The minimum frame size (in bytes) used in the stream.
pub min_frame_size: Option<u32>,
/// The maximum frame size (in bytes) used in the stream.
pub max_frame_size: Option<u32>,
/// The sample rate in Hz.
pub sample_rate: u32,
/// The number of channels.
pub channels: u32,
/// The number of bits per sample.
pub bits_per_sample: u32,
/// The total number of inter-channel samples in the stream.
// TODO: rename to `duration` for clarity?
pub samples: Option<u64>,
/// MD5 signature of the unencoded audio data.
pub md5sum: [u8; 16],
}
/// A seek point in the seek table.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct SeekPoint {
/// Sample number of the first sample in the target frame, or 2<sup>64</sup> - 1 for a placeholder.
pub sample: u64,
/// Offset in bytes from the first byte of the first frame header to the first byte of the
/// target frame's header.
pub offset: u64,
/// Number of samples in the target frame.
pub samples: u16,
}
/// A seek table to aid seeking in the stream.
pub struct SeekTable {
/// The seek points, sorted in ascending order by sample number.
#[allow(dead_code)] // TODO: Implement seeking.
seekpoints: Vec<SeekPoint>,
}
/// Vorbis comments, also known as FLAC tags (e.g. artist, title, etc.).
pub struct VorbisComment {
/// The “vendor string”, chosen by the encoder vendor.
///
/// This string usually contains the name and version of the program that
/// encoded the FLAC stream, such as `reference libFLAC 1.3.2 20170101`
/// or `Lavf57.25.100`.
pub vendor: String,
/// Name-value pairs of Vorbis comments, such as `ARTIST=Queen`.
///
/// This struct stores a raw low-level representation of tags. Use
/// `FlacReader::tags()` for a friendlier iterator. The tuple consists of
/// the string in `"NAME=value"` format, and the index of the `'='` into
/// that string.
///
/// The name is supposed to be interpreted case-insensitively, and is
/// guaranteed to consist of ASCII characters. Claxon does not normalize
/// the casing of the name. Use `metadata::GetTag` to do a case-insensitive
/// lookup.
///
/// Names need not be unique. For instance, multiple `ARTIST` comments might
/// be present on a collaboration track.
///
/// See <https://www.xiph.org/vorbis/doc/v-comment.html> for more details.
pub comments: Vec<(String, usize)>,
}
/// A metadata about the flac stream.
pub enum MetadataBlock {
/// A stream info block.
StreamInfo(StreamInfo),
/// A padding block (with no meaningful data).
Padding {
/// The number of padding bytes.
length: u32,
},
/// An application block with application-specific data.
Application {
/// The registered application ID.
id: u32,
/// The contents of the application block.
data: Vec<u8>,
},
/// A seek table block.
SeekTable(SeekTable),
/// A Vorbis comment block, also known as FLAC tags.
VorbisComment(VorbisComment),
/// A CUE sheet block.
CueSheet, // TODO
/// A picture block.
Picture, // TODO
/// A block with a reserved block type, not supported by this library.
Reserved,
}
/// Iterates over Vorbis comments (FLAC tags) in a FLAC stream.
///
/// See `FlacReader::tags()` for more details.
pub struct Tags<'a> {
/// The underlying iterator.
iter: slice::Iter<'a, (String, usize)>,
}
impl<'a> Tags<'a> {
/// Returns a new `Tags` iterator.
#[inline]
pub fn new(comments: &'a [(String, usize)]) -> Tags<'a> {
Tags {
iter: comments.iter(),
}
}
}
impl<'a> Iterator for Tags<'a> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
return self.iter.next().map(|&(ref comment, sep_idx)| {
(&comment[..sep_idx], &comment[sep_idx+1..])
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<'a> ExactSizeIterator for Tags<'a> {}
/// Iterates over Vorbis comments looking for a specific one; returns its values as `&str`.
///
/// See `FlacReader::get_tag()` for more details.
pub struct GetTag<'a> {
/// The Vorbis comments to search through.
vorbis_comments: &'a [(String, usize)],
/// The tag to look for.
needle: &'a str,
/// The index of the (name, value) pair that should be inspected next.
index: usize,
}
impl<'a> GetTag<'a> {
/// Returns a new `GetTag` iterator.
#[inline]
pub fn new(vorbis_comments: &'a [(String, usize)], needle: &'a str) -> GetTag<'a> {
GetTag {
vorbis_comments: vorbis_comments,
needle: needle,
index: 0,
}
}
}
impl<'a> Iterator for GetTag<'a> {
type Item = &'a str;
#[inline]
fn next(&mut self) -> Option<&'a str> {
// This import is actually required on Rust 1.13.
#[allow(unused_imports)]
use std::ascii::AsciiExt;
while self.index < self.vorbis_comments.len() {
let (ref comment, sep_idx) = self.vorbis_comments[self.index];
self.index += 1;
if comment[..sep_idx].eq_ignore_ascii_case(self.needle) {
return Some(&comment[sep_idx + 1..])
}
}
return None
}
}
#[inline]
fn read_metadata_block_header<R: ReadBytes>(input: &mut R) -> Result<MetadataBlockHeader> {
let byte = try!(input.read_u8());
// The first bit specifies whether this is the last block, the next 7 bits
// specify the type of the metadata block to follow.
let is_last = (byte >> 7) == 1;
let block_type = byte & 0b0111_1111;
// The length field is 24 bits, or 3 bytes.
let length = try!(input.read_be_u24());
let header = MetadataBlockHeader {
is_last: is_last,
block_type: block_type,
length: length,
};
Ok(header)
}
/// Read a single metadata block header and body from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the Ogg format embeds
/// metadata blocks including their header verbatim in packets. This function
/// can be used to decode that raw data.
#[inline]
pub fn read_metadata_block_with_header<R: ReadBytes>(input: &mut R)
-> Result<MetadataBlock> {
let header = try!(read_metadata_block_header(input));
read_metadata_block(input, header.block_type, header.length)
}
/// Read a single metadata block of the given type and length from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the MP4 format sports
/// a “FLAC Specific Box” which contains the block type and the raw data. This
/// function can be used to decode that raw data.
#[inline]
pub fn read_metadata_block<R: ReadBytes>(input: &mut R,
block_type: u8,
length: u32)
-> Result<MetadataBlock> {
match block_type {
0 => {
// The streaminfo block has a fixed size of 34 bytes.
if length == 34 {
let streaminfo = try!(read_streaminfo_block(input));
Ok(MetadataBlock::StreamInfo(streaminfo))
} else {
fmt_err("invalid streaminfo metadata block length")
}
}
1 => {
try!(read_padding_block(input, length));
Ok(MetadataBlock::Padding { length: length })
}
2 => {
let (id, data) = try!(read_application_block(input, length));
Ok(MetadataBlock::Application {
id: id,
data: data,
})
}
3 => {
// TODO: implement seektable reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
4 => {
let vorbis_comment = try!(read_vorbis_comment_block(input, length));
Ok(MetadataBlock::VorbisComment(vorbis_comment))
}
5 => {
// TODO: implement CUE sheet reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
6 => {
// TODO: implement picture reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
127 => {
// This code is invalid to avoid confusion with a frame sync code.
fmt_err("invalid metadata block type")
}
_ => {
// Any other block type is 'reserved' at the moment of writing. The
// reference implementation reads it as an 'unknown' block. That is
// one way of handling it, but maybe there should be some kind of
// 'strict' mode (configurable at compile time?) so that this can
// be an error if desired.
try!(input.skip(length));
Ok(MetadataBlock::Reserved)
}
}
}
fn read_streaminfo_block<R: ReadBytes>(input: &mut R) -> Result<StreamInfo> {
let min_block_size = try!(input.read_be_u16());
let max_block_size = try!(input.read_be_u16());
// The frame size fields are 24 bits, or 3 bytes.
let min_frame_size = try!(input.read_be_u24());
let max_frame_size = try!(input.read_be_u24());
// Next up are 20 bits that determine the sample rate.
let sample_rate_msb = try!(input.read_be_u16());
let sample_rate_lsb = try!(input.read_u8());
// Stitch together the value from the first 16 bits,
// and then the 4 most significant bits of the next byte.
let sample_rate = (sample_rate_msb as u32) << 4 | (sample_rate_lsb as u32) >> 4;
// Next three bits are the number of channels - 1. Mask them out and add 1.
let n_channels_bps = sample_rate_lsb;
let n_channels = ((n_channels_bps >> 1) & 0b0000_0111) + 1;
// The final bit is the most significant of bits per sample - 1. Bits per
// sample - 1 is 5 bits in total.
let bps_msb = n_channels_bps & 1;
let bps_lsb_n_samples = try!(input.read_u8());
// Stitch together these values, add 1 because # - 1 is stored.
let bits_per_sample = (bps_msb << 4 | (bps_lsb_n_samples >> 4)) + 1;
// Number of samples in 36 bits, we have 4 already, 32 to go.
let n_samples_msb = bps_lsb_n_samples & 0b0000_1111;
let n_samples_lsb = try!(input.read_be_u32());
let n_samples = (n_samples_msb as u64) << 32 | n_samples_lsb as u64;
// Next are 128 bits (16 bytes) of MD5 signature.
let mut md5sum = [0u8; 16];
try!(input.read_into(&mut md5sum));
// Lower bounds can never be larger than upper bounds. Note that 0 indicates
// unknown for the frame size. Also, the block size must be at least 16.
if min_block_size > max_block_size {
return fmt_err("inconsistent bounds, min block size > max block size");
}
if min_block_size < 16 {
return fmt_err("invalid block size, must be at least 16");
}
if min_frame_size > max_frame_size && max_frame_size != 0 {
return fmt_err("inconsistent bounds, min frame size > max frame size");
}
// A sample rate of 0 is invalid, and the maximum sample rate is limited by
// the structure of the frame headers to 655350 Hz.
if sample_rate == 0 || sample_rate > 655350 {
return fmt_err("invalid sample rate");
}
let stream_info = StreamInfo {
min_block_size: min_block_size,
max_block_size: max_block_size,
min_frame_size: if min_frame_size == 0 {
None
} else {
Some(min_frame_size)
},
max_frame_size: if max_frame_size == 0 {
None
} else {
Some(max_frame_size)
},
sample_rate: sample_rate,
channels: n_channels as u32,
bits_per_sample: bits_per_sample as u32,
samples: if n_samples == 0 {
None
} else {
Some(n_samples)
},
md5sum: md5sum,
};
Ok(stream_info)
}
fn read_vorbis_comment_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<VorbisComment> {
if length < 8 {
// We expect at a minimum a 32-bit vendor string length, and a 32-bit
// comment count.
return fmt_err("Vorbis comment block is too short")
}
// Fail if the length of the Vorbis comment block is larger than 1 MiB. This
// block is full of length-prefixed strings for which we allocate memory up
// front. If there were no limit on these, a maliciously crafted file could
// cause OOM by claiming to contain large strings. But at least the strings
// cannot be longer than the size of the Vorbis comment block, and by
// limiting the size of that block, we can mitigate such DoS attacks.
//
// The typical size of a the Vorbis comment block is 1 KiB; on a corpus of
// real-world flac files, the 0.05 and 0.95 quantiles were 792 and 1257
// bytes respectively, with even the 0.99 quantile below 2 KiB. The only
// reason for having a large Vorbis comment block is when cover art is
// incorrectly embedded there, but the Vorbis comment block is not the right
// place for that anyway.
if length > 10 * 1024 * 1024 {
let msg = "Vorbis comment blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
// The Vorbis comment block starts with a length-prefixed "vendor string".
// It cannot be larger than the block length - 8, because there are the
// 32-bit vendor string length, and comment count.
let vendor_len = try!(input.read_le_u32());
if vendor_len > length - 8 { return fmt_err("vendor string too long") }
let mut vendor_bytes = Vec::with_capacity(vendor_len as usize);
// We can safely set the lenght of the vector here; the uninitialized memory
// is not exposed. If `read_into` succeeds, it will have overwritten all
// bytes. If not, an error is returned and the memory is never exposed.
unsafe { vendor_bytes.set_len(vendor_len as usize); }
try!(input.read_into(&mut vendor_bytes));
let vendor = try!(String::from_utf8(vendor_bytes));
// Next up is the number of comments. Because every comment is at least 4
// bytes to indicate its length, there cannot be more comments than the
// length of the block divided by 4. This is only an upper bound to ensure
// that we don't allocate a big vector, to protect against DoS attacks.
let mut comments_len = try!(input.read_le_u32());
if comments_len >= length / 4 {
return fmt_err("too many entries for Vorbis comment block")
}
let mut comments = Vec::with_capacity(comments_len as usize);
let mut bytes_left = length - 8 - vendor_len;
// For every comment, there is a length-prefixed string of the form
// "NAME=value".
while bytes_left >= 4 && comments.len() < comments_len as usize {
let comment_len = try!(input.read_le_u32());
bytes_left -= 4;
if comment_len > bytes_left {
return fmt_err("Vorbis comment too long for Vorbis comment block")
}
// Some older versions of libflac allowed writing zero-length Vorbis
// comments. ALthough such files are invalid, they do occur in the wild,
// so we skip over the empty comment.
if comment_len == 0 {
// Does not overflow because `comments_len > comments.len() >= 0`.
comments_len -= 1;
continue;
}
// For the same reason as above, setting the length is safe here.
let mut comment_bytes = Vec::with_capacity(comment_len as usize);
unsafe { comment_bytes.set_len(comment_len as usize); }
try!(input.read_into(&mut comment_bytes));
bytes_left -= comment_len;
if let Some(sep_index) = comment_bytes.iter().position(|&x| x == b'=') {
{
let name_bytes = &comment_bytes[..sep_index];
// According to the Vorbis spec, the field name may consist of ascii
// bytes 0x20 through 0x7d, 0x3d (`=`) excluded. Verifying this has
// the advantage that if the check passes, the result is valid
// UTF-8, so the conversion to string will not fail.
if name_bytes.iter().any(|&x| x < 0x20 || x > 0x7d) {
return fmt_err("Vorbis comment field name contains invalid byte")
}
}
let comment = try!(String::from_utf8(comment_bytes));
comments.push((comment, sep_index));
} else {
return fmt_err("Vorbis comment does not contain '='")
}
}
if bytes_left != 0 {
return fmt_err("Vorbis comment block has excess data")
}
if comments.len() != comments_len as usize {
return fmt_err("Vorbis comment block contains wrong number of entries")
}
let vorbis_comment = VorbisComment {
vendor: vendor,
comments: comments,
};
Ok(vorbis_comment)
}
fn read_padding_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<()> {
// The specification dictates that all bits of the padding block must be 0.
// However, the reference implementation does not issue an error when this
// is not the case, and frankly, when you are going to skip over these
// bytes and do nothing with them whatsoever, why waste all those CPU
// cycles checking that the padding is valid?
Ok(try!(input.skip(length)))
}
fn read_application_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<(u32, Vec<u8>)> {
if length < 4 {
return fmt_err("application block length must be at least 4 bytes")
}
// Reject large application blocks to avoid memory-based denial-
// of-service attacks. See also the more elaborate motivation in
// `read_vorbis_comment_block()`.
if length > 10 * 1024 * 1024 {
let msg = "application blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
let id = try!(input.read_be_u32());
// Four bytes of the block have been used for the ID, the rest is payload.
// Create a vector of uninitialized memory, and read the block into it. The
// uninitialized memory is never exposed: read_into will either fill the
// buffer completely, or return an err, in which case the memory is not
// exposed.
let mut data = Vec::with_capacity(length as usize - 4);
unsafe { data.set_len(length as usize - 4); }
try!(input.read_into(&mut data));
Ok((id, data))
}
/// Reads metadata blocks from a stream and exposes them as an iterator.
///
/// It is assumed that the next byte that the reader will read, is the first
/// byte of a metadata block header. This means that the iterator will yield at
/// least a single value. If the iterator ever yields an error, then no more
/// data will be read thereafter, and the next value will be `None`.
pub struct MetadataBlockReader<R: ReadBytes> {
input: R,
done: bool,
}
/// Either a `MetadataBlock` or an `Error`.
pub type MetadataBlockResult = Result<MetadataBlock>;
impl<R: ReadBytes> MetadataBlockReader<R> {
/// Creates a metadata block reader that will yield at least one element.
pub fn new(inpu | R) -> MetadataBlockReader<R> {
MetadataBlockReader {
input: input,
done: false,
}
}
#[inline]
fn read_next(&mut self) -> MetadataBlockResult {
let header = try!(read_metadata_block_header(&mut self.input));
let block = try!(read_metadata_block(&mut self.input, header.block_type, header.length));
self.done = header.is_last;
Ok(block)
}
}
impl<R: ReadBytes> Iterator for MetadataBlockReader<R> {
type Item = MetadataBlockResult;
#[inline]
fn next(&mut self) -> Option<MetadataBlockResult> {
if self.done {
None
} else {
let block = self.read_next();
// After a failure, no more attempts to read will be made,
// because we don't know where we are in the stream.
if !block.is_ok() {
self.done = true;
}
Some(block)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// When done, there will be no more blocks,
// when not done, there will be at least one more.
if self.done { (0, Some(0)) } else { (1, None) }
}
}
| t: | identifier_name |
metadata.rs | // Claxon -- A FLAC decoding library in Rust
// Copyright 2014 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
//! The `metadata` module deals with metadata at the beginning of a FLAC stream.
use error::{Error, Result, fmt_err};
use input::ReadBytes;
use std::str;
use std::slice;
#[derive(Clone, Copy)]
struct MetadataBlockHeader {
is_last: bool,
block_type: u8,
length: u32,
}
/// The streaminfo metadata block, with important information about the stream.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct StreamInfo {
// TODO: "size" would better be called "duration" for clarity.
/// The minimum block size (in inter-channel samples) used in the stream.
///
/// This number is independent of the number of channels. To get the minimum
/// block duration in seconds, divide this by the sample rate.
pub min_block_size: u16,
/// The maximum block size (in inter-channel samples) used in the stream.
///
/// This number is independent of the number of channels. To get the
/// maximum block duration in seconds, divide by the sample rate. To avoid
/// allocations during decoding, a buffer of this size times the number of
/// channels can be allocated up front and passed into
/// `FrameReader::read_next_or_eof()`.
pub max_block_size: u16,
/// The minimum frame size (in bytes) used in the stream.
pub min_frame_size: Option<u32>,
/// The maximum frame size (in bytes) used in the stream.
pub max_frame_size: Option<u32>,
/// The sample rate in Hz.
pub sample_rate: u32,
/// The number of channels.
pub channels: u32,
/// The number of bits per sample.
pub bits_per_sample: u32,
/// The total number of inter-channel samples in the stream.
// TODO: rename to `duration` for clarity?
pub samples: Option<u64>,
/// MD5 signature of the unencoded audio data.
pub md5sum: [u8; 16],
}
/// A seek point in the seek table.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct SeekPoint {
/// Sample number of the first sample in the target frame, or 2<sup>64</sup> - 1 for a placeholder.
pub sample: u64,
/// Offset in bytes from the first byte of the first frame header to the first byte of the
/// target frame's header.
pub offset: u64,
/// Number of samples in the target frame.
pub samples: u16,
}
/// A seek table to aid seeking in the stream.
pub struct SeekTable {
/// The seek points, sorted in ascending order by sample number.
#[allow(dead_code)] // TODO: Implement seeking.
seekpoints: Vec<SeekPoint>,
}
/// Vorbis comments, also known as FLAC tags (e.g. artist, title, etc.).
pub struct VorbisComment {
/// The “vendor string”, chosen by the encoder vendor.
///
/// This string usually contains the name and version of the program that
/// encoded the FLAC stream, such as `reference libFLAC 1.3.2 20170101`
/// or `Lavf57.25.100`.
pub vendor: String,
/// Name-value pairs of Vorbis comments, such as `ARTIST=Queen`.
///
/// This struct stores a raw low-level representation of tags. Use
/// `FlacReader::tags()` for a friendlier iterator. The tuple consists of
/// the string in `"NAME=value"` format, and the index of the `'='` into
/// that string.
///
/// The name is supposed to be interpreted case-insensitively, and is
/// guaranteed to consist of ASCII characters. Claxon does not normalize
/// the casing of the name. Use `metadata::GetTag` to do a case-insensitive
/// lookup.
///
/// Names need not be unique. For instance, multiple `ARTIST` comments might
/// be present on a collaboration track.
///
/// See <https://www.xiph.org/vorbis/doc/v-comment.html> for more details.
pub comments: Vec<(String, usize)>,
}
/// A metadata about the flac stream.
pub enum MetadataBlock {
/// A stream info block.
StreamInfo(StreamInfo),
/// A padding block (with no meaningful data).
Padding {
/// The number of padding bytes.
length: u32,
},
/// An application block with application-specific data.
Application {
/// The registered application ID.
id: u32,
/// The contents of the application block.
data: Vec<u8>,
},
/// A seek table block.
SeekTable(SeekTable),
/// A Vorbis comment block, also known as FLAC tags.
VorbisComment(VorbisComment),
/// A CUE sheet block.
CueSheet, // TODO
/// A picture block.
Picture, // TODO
/// A block with a reserved block type, not supported by this library.
Reserved,
}
/// Iterates over Vorbis comments (FLAC tags) in a FLAC stream.
///
/// See `FlacReader::tags()` for more details.
pub struct Tags<'a> {
/// The underlying iterator.
iter: slice::Iter<'a, (String, usize)>,
}
impl<'a> Tags<'a> {
/// Returns a new `Tags` iterator.
#[inline]
pub fn new(comments: &'a [(String, usize)]) -> Tags<'a> {
Tags {
iter: comments.iter(),
}
}
}
impl<'a> Iterator for Tags<'a> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
return self.iter.next().map(|&(ref comment, sep_idx)| {
(&comment[..sep_idx], &comment[sep_idx+1..])
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<'a> ExactSizeIterator for Tags<'a> {}
/// Iterates over Vorbis comments looking for a specific one; returns its values as `&str`.
///
/// See `FlacReader::get_tag()` for more details.
pub struct GetTag<'a> {
/// The Vorbis comments to search through.
vorbis_comments: &'a [(String, usize)],
/// The tag to look for.
needle: &'a str,
/// The index of the (name, value) pair that should be inspected next.
index: usize,
}
impl<'a> GetTag<'a> {
/// Returns a new `GetTag` iterator.
#[inline]
pub fn new(vorbis_comments: &'a [(String, usize)], needle: &'a str) -> GetTag<'a> {
GetTag {
vorbis_comments: vorbis_comments,
needle: needle,
index: 0,
}
}
}
impl<'a> Iterator for GetTag<'a> {
type Item = &'a str;
#[inline]
fn next(&mut self) -> Option<&'a str> {
// This import is actually required on Rust 1.13.
#[allow(unused_imports)]
use std::ascii::AsciiExt;
while self.index < self.vorbis_comments.len() {
let (ref comment, sep_idx) = self.vorbis_comments[self.index];
self.index += 1;
if comment[..sep_idx].eq_ignore_ascii_case(self.needle) {
return Some(&comment[sep_idx + 1..])
}
}
return None
}
}
#[inline]
fn read_metadata_block_header<R: ReadBytes>(input: &mut R) -> Result<MetadataBlockHeader> {
let byte = try!(input.read_u8());
// The first bit specifies whether this is the last block, the next 7 bits
// specify the type of the metadata block to follow.
let is_last = (byte >> 7) == 1;
let block_type = byte & 0b0111_1111;
// The length field is 24 bits, or 3 bytes.
let length = try!(input.read_be_u24());
let header = MetadataBlockHeader {
is_last: is_last,
block_type: block_type,
length: length,
};
Ok(header)
}
/// Read a single metadata block header and body from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the Ogg format embeds
/// metadata blocks including their header verbatim in packets. This function
/// can be used to decode that raw data.
#[inline]
pub fn read_metadata_block_with_header<R: ReadBytes>(input: &mut R)
-> Result<MetadataBlock> {
let header = try!(read_metadata_block_header(input));
read_metadata_block(input, header.block_type, header.length)
}
/// Read a single metadata block of the given type and length from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the MP4 format sports
/// a “FLAC Specific Box” which contains the block type and the raw data. This
/// function can be used to decode that raw data.
#[inline]
pub fn read_metadata_block<R: ReadBytes>(input: &mut R,
block_type: u8,
length: u32)
-> Result<MetadataBlock> {
match block_type {
0 => {
// The streaminfo block has a fixed size of 34 bytes.
if length == 34 {
let streaminfo = try!(read_streaminfo_block(input));
Ok(MetadataBlock::StreamInfo(streaminfo))
} else {
fmt_err("invalid streaminfo metadata block length")
}
}
1 => {
| 2 => {
let (id, data) = try!(read_application_block(input, length));
Ok(MetadataBlock::Application {
id: id,
data: data,
})
}
3 => {
// TODO: implement seektable reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
4 => {
let vorbis_comment = try!(read_vorbis_comment_block(input, length));
Ok(MetadataBlock::VorbisComment(vorbis_comment))
}
5 => {
// TODO: implement CUE sheet reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
6 => {
// TODO: implement picture reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
127 => {
// This code is invalid to avoid confusion with a frame sync code.
fmt_err("invalid metadata block type")
}
_ => {
// Any other block type is 'reserved' at the moment of writing. The
// reference implementation reads it as an 'unknown' block. That is
// one way of handling it, but maybe there should be some kind of
// 'strict' mode (configurable at compile time?) so that this can
// be an error if desired.
try!(input.skip(length));
Ok(MetadataBlock::Reserved)
}
}
}
fn read_streaminfo_block<R: ReadBytes>(input: &mut R) -> Result<StreamInfo> {
let min_block_size = try!(input.read_be_u16());
let max_block_size = try!(input.read_be_u16());
// The frame size fields are 24 bits, or 3 bytes.
let min_frame_size = try!(input.read_be_u24());
let max_frame_size = try!(input.read_be_u24());
// Next up are 20 bits that determine the sample rate.
let sample_rate_msb = try!(input.read_be_u16());
let sample_rate_lsb = try!(input.read_u8());
// Stitch together the value from the first 16 bits,
// and then the 4 most significant bits of the next byte.
let sample_rate = (sample_rate_msb as u32) << 4 | (sample_rate_lsb as u32) >> 4;
// Next three bits are the number of channels - 1. Mask them out and add 1.
let n_channels_bps = sample_rate_lsb;
let n_channels = ((n_channels_bps >> 1) & 0b0000_0111) + 1;
// The final bit is the most significant of bits per sample - 1. Bits per
// sample - 1 is 5 bits in total.
let bps_msb = n_channels_bps & 1;
let bps_lsb_n_samples = try!(input.read_u8());
// Stitch together these values, add 1 because # - 1 is stored.
let bits_per_sample = (bps_msb << 4 | (bps_lsb_n_samples >> 4)) + 1;
// Number of samples in 36 bits, we have 4 already, 32 to go.
let n_samples_msb = bps_lsb_n_samples & 0b0000_1111;
let n_samples_lsb = try!(input.read_be_u32());
let n_samples = (n_samples_msb as u64) << 32 | n_samples_lsb as u64;
// Next are 128 bits (16 bytes) of MD5 signature.
let mut md5sum = [0u8; 16];
try!(input.read_into(&mut md5sum));
// Lower bounds can never be larger than upper bounds. Note that 0 indicates
// unknown for the frame size. Also, the block size must be at least 16.
if min_block_size > max_block_size {
return fmt_err("inconsistent bounds, min block size > max block size");
}
if min_block_size < 16 {
return fmt_err("invalid block size, must be at least 16");
}
if min_frame_size > max_frame_size && max_frame_size != 0 {
return fmt_err("inconsistent bounds, min frame size > max frame size");
}
// A sample rate of 0 is invalid, and the maximum sample rate is limited by
// the structure of the frame headers to 655350 Hz.
if sample_rate == 0 || sample_rate > 655350 {
return fmt_err("invalid sample rate");
}
let stream_info = StreamInfo {
min_block_size: min_block_size,
max_block_size: max_block_size,
min_frame_size: if min_frame_size == 0 {
None
} else {
Some(min_frame_size)
},
max_frame_size: if max_frame_size == 0 {
None
} else {
Some(max_frame_size)
},
sample_rate: sample_rate,
channels: n_channels as u32,
bits_per_sample: bits_per_sample as u32,
samples: if n_samples == 0 {
None
} else {
Some(n_samples)
},
md5sum: md5sum,
};
Ok(stream_info)
}
fn read_vorbis_comment_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<VorbisComment> {
if length < 8 {
// We expect at a minimum a 32-bit vendor string length, and a 32-bit
// comment count.
return fmt_err("Vorbis comment block is too short")
}
// Fail if the length of the Vorbis comment block is larger than 1 MiB. This
// block is full of length-prefixed strings for which we allocate memory up
// front. If there were no limit on these, a maliciously crafted file could
// cause OOM by claiming to contain large strings. But at least the strings
// cannot be longer than the size of the Vorbis comment block, and by
// limiting the size of that block, we can mitigate such DoS attacks.
//
// The typical size of a the Vorbis comment block is 1 KiB; on a corpus of
// real-world flac files, the 0.05 and 0.95 quantiles were 792 and 1257
// bytes respectively, with even the 0.99 quantile below 2 KiB. The only
// reason for having a large Vorbis comment block is when cover art is
// incorrectly embedded there, but the Vorbis comment block is not the right
// place for that anyway.
if length > 10 * 1024 * 1024 {
let msg = "Vorbis comment blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
// The Vorbis comment block starts with a length-prefixed "vendor string".
// It cannot be larger than the block length - 8, because there are the
// 32-bit vendor string length, and comment count.
let vendor_len = try!(input.read_le_u32());
if vendor_len > length - 8 { return fmt_err("vendor string too long") }
let mut vendor_bytes = Vec::with_capacity(vendor_len as usize);
// We can safely set the lenght of the vector here; the uninitialized memory
// is not exposed. If `read_into` succeeds, it will have overwritten all
// bytes. If not, an error is returned and the memory is never exposed.
unsafe { vendor_bytes.set_len(vendor_len as usize); }
try!(input.read_into(&mut vendor_bytes));
let vendor = try!(String::from_utf8(vendor_bytes));
// Next up is the number of comments. Because every comment is at least 4
// bytes to indicate its length, there cannot be more comments than the
// length of the block divided by 4. This is only an upper bound to ensure
// that we don't allocate a big vector, to protect against DoS attacks.
let mut comments_len = try!(input.read_le_u32());
if comments_len >= length / 4 {
return fmt_err("too many entries for Vorbis comment block")
}
let mut comments = Vec::with_capacity(comments_len as usize);
let mut bytes_left = length - 8 - vendor_len;
// For every comment, there is a length-prefixed string of the form
// "NAME=value".
while bytes_left >= 4 && comments.len() < comments_len as usize {
let comment_len = try!(input.read_le_u32());
bytes_left -= 4;
if comment_len > bytes_left {
return fmt_err("Vorbis comment too long for Vorbis comment block")
}
// Some older versions of libflac allowed writing zero-length Vorbis
// comments. ALthough such files are invalid, they do occur in the wild,
// so we skip over the empty comment.
if comment_len == 0 {
// Does not overflow because `comments_len > comments.len() >= 0`.
comments_len -= 1;
continue;
}
// For the same reason as above, setting the length is safe here.
let mut comment_bytes = Vec::with_capacity(comment_len as usize);
unsafe { comment_bytes.set_len(comment_len as usize); }
try!(input.read_into(&mut comment_bytes));
bytes_left -= comment_len;
if let Some(sep_index) = comment_bytes.iter().position(|&x| x == b'=') {
{
let name_bytes = &comment_bytes[..sep_index];
// According to the Vorbis spec, the field name may consist of ascii
// bytes 0x20 through 0x7d, 0x3d (`=`) excluded. Verifying this has
// the advantage that if the check passes, the result is valid
// UTF-8, so the conversion to string will not fail.
if name_bytes.iter().any(|&x| x < 0x20 || x > 0x7d) {
return fmt_err("Vorbis comment field name contains invalid byte")
}
}
let comment = try!(String::from_utf8(comment_bytes));
comments.push((comment, sep_index));
} else {
return fmt_err("Vorbis comment does not contain '='")
}
}
if bytes_left != 0 {
return fmt_err("Vorbis comment block has excess data")
}
if comments.len() != comments_len as usize {
return fmt_err("Vorbis comment block contains wrong number of entries")
}
let vorbis_comment = VorbisComment {
vendor: vendor,
comments: comments,
};
Ok(vorbis_comment)
}
fn read_padding_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<()> {
// The specification dictates that all bits of the padding block must be 0.
// However, the reference implementation does not issue an error when this
// is not the case, and frankly, when you are going to skip over these
// bytes and do nothing with them whatsoever, why waste all those CPU
// cycles checking that the padding is valid?
Ok(try!(input.skip(length)))
}
fn read_application_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<(u32, Vec<u8>)> {
if length < 4 {
return fmt_err("application block length must be at least 4 bytes")
}
// Reject large application blocks to avoid memory-based denial-
// of-service attacks. See also the more elaborate motivation in
// `read_vorbis_comment_block()`.
if length > 10 * 1024 * 1024 {
let msg = "application blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
let id = try!(input.read_be_u32());
// Four bytes of the block have been used for the ID, the rest is payload.
// Create a vector of uninitialized memory, and read the block into it. The
// uninitialized memory is never exposed: read_into will either fill the
// buffer completely, or return an err, in which case the memory is not
// exposed.
let mut data = Vec::with_capacity(length as usize - 4);
unsafe { data.set_len(length as usize - 4); }
try!(input.read_into(&mut data));
Ok((id, data))
}
/// Reads metadata blocks from a stream and exposes them as an iterator.
///
/// It is assumed that the next byte that the reader will read, is the first
/// byte of a metadata block header. This means that the iterator will yield at
/// least a single value. If the iterator ever yields an error, then no more
/// data will be read thereafter, and the next value will be `None`.
pub struct MetadataBlockReader<R: ReadBytes> {
input: R,
done: bool,
}
/// Either a `MetadataBlock` or an `Error`.
pub type MetadataBlockResult = Result<MetadataBlock>;
impl<R: ReadBytes> MetadataBlockReader<R> {
/// Creates a metadata block reader that will yield at least one element.
pub fn new(input: R) -> MetadataBlockReader<R> {
MetadataBlockReader {
input: input,
done: false,
}
}
#[inline]
fn read_next(&mut self) -> MetadataBlockResult {
let header = try!(read_metadata_block_header(&mut self.input));
let block = try!(read_metadata_block(&mut self.input, header.block_type, header.length));
self.done = header.is_last;
Ok(block)
}
}
impl<R: ReadBytes> Iterator for MetadataBlockReader<R> {
type Item = MetadataBlockResult;
#[inline]
fn next(&mut self) -> Option<MetadataBlockResult> {
if self.done {
None
} else {
let block = self.read_next();
// After a failure, no more attempts to read will be made,
// because we don't know where we are in the stream.
if !block.is_ok() {
self.done = true;
}
Some(block)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// When done, there will be no more blocks,
// when not done, there will be at least one more.
if self.done { (0, Some(0)) } else { (1, None) }
}
}
| try!(read_padding_block(input, length));
Ok(MetadataBlock::Padding { length: length })
}
| conditional_block |
metadata.rs | // Claxon -- A FLAC decoding library in Rust
// Copyright 2014 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
//! The `metadata` module deals with metadata at the beginning of a FLAC stream.
use error::{Error, Result, fmt_err};
use input::ReadBytes;
use std::str;
use std::slice;
#[derive(Clone, Copy)]
struct MetadataBlockHeader {
is_last: bool,
block_type: u8,
length: u32,
}
/// The streaminfo metadata block, with important information about the stream.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct StreamInfo {
// TODO: "size" would better be called "duration" for clarity.
/// The minimum block size (in inter-channel samples) used in the stream.
///
/// This number is independent of the number of channels. To get the minimum
/// block duration in seconds, divide this by the sample rate.
pub min_block_size: u16,
/// The maximum block size (in inter-channel samples) used in the stream.
///
/// This number is independent of the number of channels. To get the
/// maximum block duration in seconds, divide by the sample rate. To avoid
/// allocations during decoding, a buffer of this size times the number of
/// channels can be allocated up front and passed into
/// `FrameReader::read_next_or_eof()`.
pub max_block_size: u16,
/// The minimum frame size (in bytes) used in the stream.
pub min_frame_size: Option<u32>,
/// The maximum frame size (in bytes) used in the stream.
pub max_frame_size: Option<u32>,
/// The sample rate in Hz.
pub sample_rate: u32,
/// The number of channels.
pub channels: u32,
/// The number of bits per sample.
pub bits_per_sample: u32,
/// The total number of inter-channel samples in the stream.
// TODO: rename to `duration` for clarity?
pub samples: Option<u64>,
/// MD5 signature of the unencoded audio data.
pub md5sum: [u8; 16],
}
/// A seek point in the seek table.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct SeekPoint {
/// Sample number of the first sample in the target frame, or 2<sup>64</sup> - 1 for a placeholder.
pub sample: u64,
/// Offset in bytes from the first byte of the first frame header to the first byte of the
/// target frame's header.
pub offset: u64,
/// Number of samples in the target frame.
pub samples: u16,
}
/// A seek table to aid seeking in the stream.
pub struct SeekTable {
/// The seek points, sorted in ascending order by sample number.
#[allow(dead_code)] // TODO: Implement seeking.
seekpoints: Vec<SeekPoint>,
}
/// Vorbis comments, also known as FLAC tags (e.g. artist, title, etc.).
pub struct VorbisComment {
/// The “vendor string”, chosen by the encoder vendor.
///
/// This string usually contains the name and version of the program that
/// encoded the FLAC stream, such as `reference libFLAC 1.3.2 20170101`
/// or `Lavf57.25.100`.
pub vendor: String,
/// Name-value pairs of Vorbis comments, such as `ARTIST=Queen`.
///
/// This struct stores a raw low-level representation of tags. Use
/// `FlacReader::tags()` for a friendlier iterator. The tuple consists of
/// the string in `"NAME=value"` format, and the index of the `'='` into
/// that string.
///
/// The name is supposed to be interpreted case-insensitively, and is
/// guaranteed to consist of ASCII characters. Claxon does not normalize
/// the casing of the name. Use `metadata::GetTag` to do a case-insensitive
/// lookup.
///
/// Names need not be unique. For instance, multiple `ARTIST` comments might
/// be present on a collaboration track.
///
/// See <https://www.xiph.org/vorbis/doc/v-comment.html> for more details.
pub comments: Vec<(String, usize)>,
}
/// A metadata about the flac stream. | /// The number of padding bytes.
length: u32,
},
/// An application block with application-specific data.
Application {
/// The registered application ID.
id: u32,
/// The contents of the application block.
data: Vec<u8>,
},
/// A seek table block.
SeekTable(SeekTable),
/// A Vorbis comment block, also known as FLAC tags.
VorbisComment(VorbisComment),
/// A CUE sheet block.
CueSheet, // TODO
/// A picture block.
Picture, // TODO
/// A block with a reserved block type, not supported by this library.
Reserved,
}
/// Iterates over Vorbis comments (FLAC tags) in a FLAC stream.
///
/// See `FlacReader::tags()` for more details.
pub struct Tags<'a> {
/// The underlying iterator.
iter: slice::Iter<'a, (String, usize)>,
}
impl<'a> Tags<'a> {
/// Returns a new `Tags` iterator.
#[inline]
pub fn new(comments: &'a [(String, usize)]) -> Tags<'a> {
Tags {
iter: comments.iter(),
}
}
}
impl<'a> Iterator for Tags<'a> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
return self.iter.next().map(|&(ref comment, sep_idx)| {
(&comment[..sep_idx], &comment[sep_idx+1..])
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<'a> ExactSizeIterator for Tags<'a> {}
/// Iterates over Vorbis comments looking for a specific one; returns its values as `&str`.
///
/// See `FlacReader::get_tag()` for more details.
pub struct GetTag<'a> {
/// The Vorbis comments to search through.
vorbis_comments: &'a [(String, usize)],
/// The tag to look for.
needle: &'a str,
/// The index of the (name, value) pair that should be inspected next.
index: usize,
}
impl<'a> GetTag<'a> {
/// Returns a new `GetTag` iterator.
#[inline]
pub fn new(vorbis_comments: &'a [(String, usize)], needle: &'a str) -> GetTag<'a> {
GetTag {
vorbis_comments: vorbis_comments,
needle: needle,
index: 0,
}
}
}
impl<'a> Iterator for GetTag<'a> {
type Item = &'a str;
#[inline]
fn next(&mut self) -> Option<&'a str> {
// This import is actually required on Rust 1.13.
#[allow(unused_imports)]
use std::ascii::AsciiExt;
while self.index < self.vorbis_comments.len() {
let (ref comment, sep_idx) = self.vorbis_comments[self.index];
self.index += 1;
if comment[..sep_idx].eq_ignore_ascii_case(self.needle) {
return Some(&comment[sep_idx + 1..])
}
}
return None
}
}
#[inline]
fn read_metadata_block_header<R: ReadBytes>(input: &mut R) -> Result<MetadataBlockHeader> {
let byte = try!(input.read_u8());
// The first bit specifies whether this is the last block, the next 7 bits
// specify the type of the metadata block to follow.
let is_last = (byte >> 7) == 1;
let block_type = byte & 0b0111_1111;
// The length field is 24 bits, or 3 bytes.
let length = try!(input.read_be_u24());
let header = MetadataBlockHeader {
is_last: is_last,
block_type: block_type,
length: length,
};
Ok(header)
}
/// Read a single metadata block header and body from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the Ogg format embeds
/// metadata blocks including their header verbatim in packets. This function
/// can be used to decode that raw data.
#[inline]
pub fn read_metadata_block_with_header<R: ReadBytes>(input: &mut R)
-> Result<MetadataBlock> {
let header = try!(read_metadata_block_header(input));
read_metadata_block(input, header.block_type, header.length)
}
/// Read a single metadata block of the given type and length from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the MP4 format sports
/// a “FLAC Specific Box” which contains the block type and the raw data. This
/// function can be used to decode that raw data.
#[inline]
pub fn read_metadata_block<R: ReadBytes>(input: &mut R,
block_type: u8,
length: u32)
-> Result<MetadataBlock> {
match block_type {
0 => {
// The streaminfo block has a fixed size of 34 bytes.
if length == 34 {
let streaminfo = try!(read_streaminfo_block(input));
Ok(MetadataBlock::StreamInfo(streaminfo))
} else {
fmt_err("invalid streaminfo metadata block length")
}
}
1 => {
try!(read_padding_block(input, length));
Ok(MetadataBlock::Padding { length: length })
}
2 => {
let (id, data) = try!(read_application_block(input, length));
Ok(MetadataBlock::Application {
id: id,
data: data,
})
}
3 => {
// TODO: implement seektable reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
4 => {
let vorbis_comment = try!(read_vorbis_comment_block(input, length));
Ok(MetadataBlock::VorbisComment(vorbis_comment))
}
5 => {
// TODO: implement CUE sheet reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
6 => {
// TODO: implement picture reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
127 => {
// This code is invalid to avoid confusion with a frame sync code.
fmt_err("invalid metadata block type")
}
_ => {
// Any other block type is 'reserved' at the moment of writing. The
// reference implementation reads it as an 'unknown' block. That is
// one way of handling it, but maybe there should be some kind of
// 'strict' mode (configurable at compile time?) so that this can
// be an error if desired.
try!(input.skip(length));
Ok(MetadataBlock::Reserved)
}
}
}
fn read_streaminfo_block<R: ReadBytes>(input: &mut R) -> Result<StreamInfo> {
let min_block_size = try!(input.read_be_u16());
let max_block_size = try!(input.read_be_u16());
// The frame size fields are 24 bits, or 3 bytes.
let min_frame_size = try!(input.read_be_u24());
let max_frame_size = try!(input.read_be_u24());
// Next up are 20 bits that determine the sample rate.
let sample_rate_msb = try!(input.read_be_u16());
let sample_rate_lsb = try!(input.read_u8());
// Stitch together the value from the first 16 bits,
// and then the 4 most significant bits of the next byte.
let sample_rate = (sample_rate_msb as u32) << 4 | (sample_rate_lsb as u32) >> 4;
// Next three bits are the number of channels - 1. Mask them out and add 1.
let n_channels_bps = sample_rate_lsb;
let n_channels = ((n_channels_bps >> 1) & 0b0000_0111) + 1;
// The final bit is the most significant of bits per sample - 1. Bits per
// sample - 1 is 5 bits in total.
let bps_msb = n_channels_bps & 1;
let bps_lsb_n_samples = try!(input.read_u8());
// Stitch together these values, add 1 because # - 1 is stored.
let bits_per_sample = (bps_msb << 4 | (bps_lsb_n_samples >> 4)) + 1;
// Number of samples in 36 bits, we have 4 already, 32 to go.
let n_samples_msb = bps_lsb_n_samples & 0b0000_1111;
let n_samples_lsb = try!(input.read_be_u32());
let n_samples = (n_samples_msb as u64) << 32 | n_samples_lsb as u64;
// Next are 128 bits (16 bytes) of MD5 signature.
let mut md5sum = [0u8; 16];
try!(input.read_into(&mut md5sum));
// Lower bounds can never be larger than upper bounds. Note that 0 indicates
// unknown for the frame size. Also, the block size must be at least 16.
if min_block_size > max_block_size {
return fmt_err("inconsistent bounds, min block size > max block size");
}
if min_block_size < 16 {
return fmt_err("invalid block size, must be at least 16");
}
if min_frame_size > max_frame_size && max_frame_size != 0 {
return fmt_err("inconsistent bounds, min frame size > max frame size");
}
// A sample rate of 0 is invalid, and the maximum sample rate is limited by
// the structure of the frame headers to 655350 Hz.
if sample_rate == 0 || sample_rate > 655350 {
return fmt_err("invalid sample rate");
}
let stream_info = StreamInfo {
min_block_size: min_block_size,
max_block_size: max_block_size,
min_frame_size: if min_frame_size == 0 {
None
} else {
Some(min_frame_size)
},
max_frame_size: if max_frame_size == 0 {
None
} else {
Some(max_frame_size)
},
sample_rate: sample_rate,
channels: n_channels as u32,
bits_per_sample: bits_per_sample as u32,
samples: if n_samples == 0 {
None
} else {
Some(n_samples)
},
md5sum: md5sum,
};
Ok(stream_info)
}
fn read_vorbis_comment_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<VorbisComment> {
if length < 8 {
// We expect at a minimum a 32-bit vendor string length, and a 32-bit
// comment count.
return fmt_err("Vorbis comment block is too short")
}
// Fail if the length of the Vorbis comment block is larger than 1 MiB. This
// block is full of length-prefixed strings for which we allocate memory up
// front. If there were no limit on these, a maliciously crafted file could
// cause OOM by claiming to contain large strings. But at least the strings
// cannot be longer than the size of the Vorbis comment block, and by
// limiting the size of that block, we can mitigate such DoS attacks.
//
// The typical size of a the Vorbis comment block is 1 KiB; on a corpus of
// real-world flac files, the 0.05 and 0.95 quantiles were 792 and 1257
// bytes respectively, with even the 0.99 quantile below 2 KiB. The only
// reason for having a large Vorbis comment block is when cover art is
// incorrectly embedded there, but the Vorbis comment block is not the right
// place for that anyway.
if length > 10 * 1024 * 1024 {
let msg = "Vorbis comment blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
// The Vorbis comment block starts with a length-prefixed "vendor string".
// It cannot be larger than the block length - 8, because there are the
// 32-bit vendor string length, and comment count.
let vendor_len = try!(input.read_le_u32());
if vendor_len > length - 8 { return fmt_err("vendor string too long") }
let mut vendor_bytes = Vec::with_capacity(vendor_len as usize);
// We can safely set the lenght of the vector here; the uninitialized memory
// is not exposed. If `read_into` succeeds, it will have overwritten all
// bytes. If not, an error is returned and the memory is never exposed.
unsafe { vendor_bytes.set_len(vendor_len as usize); }
try!(input.read_into(&mut vendor_bytes));
let vendor = try!(String::from_utf8(vendor_bytes));
// Next up is the number of comments. Because every comment is at least 4
// bytes to indicate its length, there cannot be more comments than the
// length of the block divided by 4. This is only an upper bound to ensure
// that we don't allocate a big vector, to protect against DoS attacks.
let mut comments_len = try!(input.read_le_u32());
if comments_len >= length / 4 {
return fmt_err("too many entries for Vorbis comment block")
}
let mut comments = Vec::with_capacity(comments_len as usize);
let mut bytes_left = length - 8 - vendor_len;
// For every comment, there is a length-prefixed string of the form
// "NAME=value".
while bytes_left >= 4 && comments.len() < comments_len as usize {
let comment_len = try!(input.read_le_u32());
bytes_left -= 4;
if comment_len > bytes_left {
return fmt_err("Vorbis comment too long for Vorbis comment block")
}
// Some older versions of libflac allowed writing zero-length Vorbis
// comments. ALthough such files are invalid, they do occur in the wild,
// so we skip over the empty comment.
if comment_len == 0 {
// Does not overflow because `comments_len > comments.len() >= 0`.
comments_len -= 1;
continue;
}
// For the same reason as above, setting the length is safe here.
let mut comment_bytes = Vec::with_capacity(comment_len as usize);
unsafe { comment_bytes.set_len(comment_len as usize); }
try!(input.read_into(&mut comment_bytes));
bytes_left -= comment_len;
if let Some(sep_index) = comment_bytes.iter().position(|&x| x == b'=') {
{
let name_bytes = &comment_bytes[..sep_index];
// According to the Vorbis spec, the field name may consist of ascii
// bytes 0x20 through 0x7d, 0x3d (`=`) excluded. Verifying this has
// the advantage that if the check passes, the result is valid
// UTF-8, so the conversion to string will not fail.
if name_bytes.iter().any(|&x| x < 0x20 || x > 0x7d) {
return fmt_err("Vorbis comment field name contains invalid byte")
}
}
let comment = try!(String::from_utf8(comment_bytes));
comments.push((comment, sep_index));
} else {
return fmt_err("Vorbis comment does not contain '='")
}
}
if bytes_left != 0 {
return fmt_err("Vorbis comment block has excess data")
}
if comments.len() != comments_len as usize {
return fmt_err("Vorbis comment block contains wrong number of entries")
}
let vorbis_comment = VorbisComment {
vendor: vendor,
comments: comments,
};
Ok(vorbis_comment)
}
fn read_padding_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<()> {
// The specification dictates that all bits of the padding block must be 0.
// However, the reference implementation does not issue an error when this
// is not the case, and frankly, when you are going to skip over these
// bytes and do nothing with them whatsoever, why waste all those CPU
// cycles checking that the padding is valid?
Ok(try!(input.skip(length)))
}
fn read_application_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<(u32, Vec<u8>)> {
if length < 4 {
return fmt_err("application block length must be at least 4 bytes")
}
// Reject large application blocks to avoid memory-based denial-
// of-service attacks. See also the more elaborate motivation in
// `read_vorbis_comment_block()`.
if length > 10 * 1024 * 1024 {
let msg = "application blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
let id = try!(input.read_be_u32());
// Four bytes of the block have been used for the ID, the rest is payload.
// Create a vector of uninitialized memory, and read the block into it. The
// uninitialized memory is never exposed: read_into will either fill the
// buffer completely, or return an err, in which case the memory is not
// exposed.
let mut data = Vec::with_capacity(length as usize - 4);
unsafe { data.set_len(length as usize - 4); }
try!(input.read_into(&mut data));
Ok((id, data))
}
/// Reads metadata blocks from a stream and exposes them as an iterator.
///
/// It is assumed that the next byte that the reader will read, is the first
/// byte of a metadata block header. This means that the iterator will yield at
/// least a single value. If the iterator ever yields an error, then no more
/// data will be read thereafter, and the next value will be `None`.
pub struct MetadataBlockReader<R: ReadBytes> {
input: R,
done: bool,
}
/// Either a `MetadataBlock` or an `Error`.
pub type MetadataBlockResult = Result<MetadataBlock>;
impl<R: ReadBytes> MetadataBlockReader<R> {
/// Creates a metadata block reader that will yield at least one element.
pub fn new(input: R) -> MetadataBlockReader<R> {
MetadataBlockReader {
input: input,
done: false,
}
}
#[inline]
fn read_next(&mut self) -> MetadataBlockResult {
let header = try!(read_metadata_block_header(&mut self.input));
let block = try!(read_metadata_block(&mut self.input, header.block_type, header.length));
self.done = header.is_last;
Ok(block)
}
}
impl<R: ReadBytes> Iterator for MetadataBlockReader<R> {
type Item = MetadataBlockResult;
#[inline]
fn next(&mut self) -> Option<MetadataBlockResult> {
if self.done {
None
} else {
let block = self.read_next();
// After a failure, no more attempts to read will be made,
// because we don't know where we are in the stream.
if !block.is_ok() {
self.done = true;
}
Some(block)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// When done, there will be no more blocks,
// when not done, there will be at least one more.
if self.done { (0, Some(0)) } else { (1, None) }
}
} | pub enum MetadataBlock {
/// A stream info block.
StreamInfo(StreamInfo),
/// A padding block (with no meaningful data).
Padding { | random_line_split |
test.py | # -*- coding: utf-8 -*-
"""Training script for the WaveNet network on the VCTK corpus.
This script trains a network with the WaveNet using data from the VCTK corpus,
which can be freely downloaded at the following site (~10 GB):
http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import argparse
from datetime import datetime
import json,math
import os
import sys
import time
import matplotlib.pyplot as pl
import tensorflow as tf
from tensorflow.python.client import timeline
from wavenet import WaveNetModel, optimizer_factory, AudioReader
from wavenet.model import create_variable, create_bias_variable
from wavenet.util.visual import figure_joint_skeleton
from analysis.Corr_Dim import fnn, Tao, Dim_Corr
from analysis.Fourier_utils import *
BATCH_SIZE = 1
DATA_DIRECTORY = './VCTK-Corpus'
LOGDIR_ROOT = './logdir'
CHECKPOINT_EVERY = 500
NUM_STEPS = int(1e5)
LEARNING_RATE = 1e-4
WAVENET_PARAMS = ['./WAVE_params/wavenet_params_1.json','./WAVE_params/wavenet_params_2.json','./WAVE_params/wavenet_params_3.json']
STARTED_DATESTRING = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
SAMPLE_SIZE = None
L2_REGULARIZATION_STRENGTH = 0
SILENCE_THRESHOLD = 0.3
EPSILON = 0.001
MOMENTUM = 0.9
MAX_TO_KEEP = 5
METADATA = False
def figure_hand_back(uvd_pt,uvd_pt1,uvd_pt2,path,test_num):
#uvd_pt = np.reshape(uvd_pt, (20, 3))
uvd_pt = uvd_pt.reshape(-1, 3)
uvd_pt1 = uvd_pt1.reshape(-1, 3)
uvd_pt2 = uvd_pt2.reshape(-1, 3)
fig = plt.figure(1)
fig.clear()
ax = plt.subplot(111, projection='3d')
fig_color = ['c', 'm', 'y', 'g', 'r']
ax.scatter(uvd_pt[0, 0], uvd_pt[0, 1], uvd_pt[0, 2], s=10, c='b')
ax.scatter(uvd_pt[1, 0], uvd_pt[1, 1], uvd_pt[1, 2], s=10, c='b')
ax.scatter(uvd_pt[2, 0], uvd_pt[2, 1], uvd_pt[2, 2], s=10, c='b')
ax.plot([uvd_pt[0, 0], uvd_pt[1, 0]],
[uvd_pt[0, 1], uvd_pt[1, 1]],
[uvd_pt[0, 2], uvd_pt[1, 2]], color='b', linewidth=1)
ax.plot([uvd_pt[1, 0], uvd_pt[2, 0]],
[uvd_pt[1, 1], uvd_pt[2, 1]],
[uvd_pt[1, 2], uvd_pt[2, 2]], color='b', linewidth=1)
ax.plot([uvd_pt[2, 0], uvd_pt[0, 0]],
[uvd_pt[2, 1], uvd_pt[0, 1]],
[uvd_pt[2, 2], uvd_pt[0, 2]], color='b', linewidth=1)
plt.ylim(-300, 300)
plt.xlim(-300, 300)
ax.set_zlim(-300, 300)
ax.scatter(uvd_pt1[0, 0], uvd_pt1[0, 1], uvd_pt1[0, 2], s=10, c='g')
ax.scatter(uvd_pt1[1, 0], uvd_pt1[1, 1], uvd_pt1[1, 2], s=10, c='g')
ax.scatter(uvd_pt1[2, 0], uvd_pt1[2, 1], uvd_pt1[2, 2], s=10, c='g')
ax.plot([uvd_pt1[0, 0], uvd_pt1[1, 0]],
[uvd_pt1[0, 1], uvd_pt1[1, 1]],
[uvd_pt1[0, 2], uvd_pt1[1, 2]], color='g', linewidth=1)
ax.plot([uvd_pt1[1, 0], uvd_pt1[2, 0]],
[uvd_pt1[1, 1], uvd_pt1[2, 1]],
[uvd_pt1[1, 2], uvd_pt1[2, 2]], color='g', linewidth=1)
ax.plot([uvd_pt1[2, 0], uvd_pt1[0, 0]],
[uvd_pt1[2, 1], uvd_pt1[0, 1]],
[uvd_pt1[2, 2], uvd_pt1[0, 2]], color='g', linewidth=1)
ax.scatter(uvd_pt2[0, 0], uvd_pt2[0, 1], uvd_pt2[0, 2], s=10, c='r')
ax.scatter(uvd_pt2[1, 0], uvd_pt2[1, 1], uvd_pt2[1, 2], s=10, c='r')
ax.scatter(uvd_pt2[2, 0], uvd_pt2[2, 1], uvd_pt2[2, 2], s=10, c='r')
ax.plot([uvd_pt2[0, 0], uvd_pt2[1, 0]],
[uvd_pt2[0, 1], uvd_pt2[1, 1]],
[uvd_pt2[0, 2], uvd_pt2[1, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[1, 0], uvd_pt2[2, 0]],
[uvd_pt2[1, 1], uvd_pt2[2, 1]],
[uvd_pt2[1, 2], uvd_pt2[2, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[2, 0], uvd_pt2[0, 0]],
[uvd_pt2[2, 1], uvd_pt2[0, 1]],
[uvd_pt2[2, 2], uvd_pt2[0, 2]], color='r', linewidth=1)
plt.savefig(path+str(test_num).zfill(7)+".png")
def get_arguments():
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore '
'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
#parser.add_argument('--wavenet_params', type=list, default=WAVENET_PARAMS,
# help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def validate_directories(args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
istest = False
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
wavenet_params = []
for file_name in WAVENET_PARAMS:
with open(file_name, 'r') as f:
wavenet_params.append(json.load(f))
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params[2]['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold,
pad = False,
path = "/media/chen/4CBEA7F1BEA7D1AE/Download/hand_dataset/pakinson/degree_dataset/3",
rand = False)
audio_batch = reader.dequeue(args.batch_size)
audio_batch_file = reader.dequeue_str(args.batch_size)
audio_batch = tf.squeeze(audio_batch)
one_receptive_field = WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"])
audio_batch = tf.pad(audio_batch, [[one_receptive_field, 0], [0, 0]],
'constant')
audio_batch = tf.expand_dims(audio_batch, 0)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
# Create network.
net = [WaveNetModel(
batch_size=args.batch_size,
dilations=one_params["dilations"],
filter_width=one_params["filter_width"],
residual_channels=one_params["residual_channels"],
dilation_channels=one_params["dilation_channels"],
skip_channels=one_params["skip_channels"],
quantization_channels=one_params["quantization_channels"],
use_biases=one_params["use_biases"],
scalar_input=one_params["scalar_input"],
initial_filter_width=one_params["initial_filter_width"],
histograms=args.histograms,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality,
namespace = str(one_params_i))
for one_params_i,one_params in enumerate(wavenet_params)]
post_par = []
for one_params_i, one_params in enumerate(wavenet_params):
with tf.variable_scope('postprocessing_'+'stage_id_'+str(one_params_i)):
current = dict()
current['postprocess1'] = create_variable(
'postprocess1',
[1, 64, 32])
current['postprocess2'] = create_variable(
'postprocess2',
[1, 32, 3])
current['postprocess1_bias'] = create_bias_variable(
'postprocess1_bias',
[32])
current['postprocess2_bias'] = create_bias_variable(
'postprocess2_bias',
[3])
post_par.append(current)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
#compute
loss_list = []
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
optim_list = []
raw_output_list = []
audio_batch_list = []
loss_all_list = []
for one_params_i, _ in enumerate(wavenet_params):
with tf.name_scope('stage_' + str(one_params_i) + '_postcompute'):
if one_params_i==0:
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
audio_batch_list.append(audio_batch)
else:
#将前一步骤的补偿结果作用在下一步的输入
raw_output = tf.pad(raw_output, [[one_ | 1x1 conv -> ReLU -> 1x1 conv to
# postprocess the output.
w1 = post_par[one_params_i]['postprocess1']
w2 = post_par[one_params_i]['postprocess2']
b1 = post_par[one_params_i]['postprocess1_bias']
b2 = post_par[one_params_i]['postprocess2_bias']
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w1, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b1)
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w2, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b2)
raw_output = tf.squeeze(raw_output)
raw_output_list.append(raw_output)
network_label = tf.squeeze(network_label)
# Set up logging for TensorBoard.
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
# Set up session
#config = tf.ConfigProto(log_device_placement=False)
#config.gpu_options.allow_growth = True
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=tf_config)
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if is_overwritten_training or saved_global_step is None:
# The first training step will be saved_global_step + 1,
# therefore we put -1 here for new or overwritten trainings.
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
from tqdm import tqdm
try:
pass_loss = 0.0
for step in tqdm(range(90400)):
'''
if step < args.num_steps*0.0:
muti_step_id = 0
elif step < args.num_steps*0.99:
muti_step_id = 1
else:
muti_step_id = 2
'''
muti_step_id = 1
start_time = time.time()
audio_batch_list_ori_v, audio_batch_list_now_v,\
raw_output_list_ori_v, raw_output_list_now_v,\
file_name= \
sess.run([audio_batch_list[muti_step_id-1],audio_batch_list[muti_step_id], #label input
raw_output_list[muti_step_id-1], raw_output_list[muti_step_id],
audio_batch_file])
audio_batch_list_ori_v = audio_batch_list_ori_v[0, one_receptive_field:, :]
audio_batch_list_now_v = audio_batch_list_now_v[0, one_receptive_field:, :]
#writer.add_summary(summary, step)
if step%1==0:
wave_result0 = raw_output_list_ori_v[(one_receptive_field+1):, :] #t'
wave_result1 = raw_output_list_now_v[(one_receptive_field+1):, :] + wave_result0 #t'+(t-t')'
wave_target = audio_batch_list_ori_v[:,9:12][one_receptive_field:, :] #t
wave_input = audio_batch_list_ori_v[:, 0:3][one_receptive_field:, :] #t+v
num_sampe = np.size(wave_target,axis=0)
total_tremor = np.zeros(3)
remove_tremor0 = np.zeros(3)
remove_tremor1 = np.zeros(3)
#统计补偿的震颤的比例:
for tmp_over1_i in range(num_sampe):
for xyz_i in range(3):
# 累加振幅
total_tremor[xyz_i] += abs(wave_target[tmp_over1_i, xyz_i])
# 计算有效的消除震颤的部分——附加后震颤幅度变小:方向一致且幅度小于2×target
if wave_target[tmp_over1_i, xyz_i] * wave_result0[tmp_over1_i, xyz_i] > 0:
if abs(wave_result0[tmp_over1_i, xyz_i]) < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor0[xyz_i] += abs(wave_result0[tmp_over1_i, xyz_i])
elif abs(wave_result0[tmp_over1_i, xyz_i]) / 2 < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor0[xyz_i] += 2 * abs(wave_target[tmp_over1_i, xyz_i]) - abs(
wave_result0[tmp_over1_i, xyz_i])
if wave_target[tmp_over1_i, xyz_i] * wave_result1[tmp_over1_i, xyz_i] > 0:
if abs(wave_result1[tmp_over1_i, xyz_i]) < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor1[xyz_i] += abs(wave_result1[tmp_over1_i, xyz_i])
elif abs(wave_result1[tmp_over1_i, xyz_i]) / 2 < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor1[xyz_i] += 2 * abs(wave_target[tmp_over1_i, xyz_i]) - abs(
wave_result1[tmp_over1_i, xyz_i])
file_name = file_name[0].split('/')[-2] #'044'
# print(total_tremor)
# print(remove_tremor0)
# print(remove_tremor1)
# print(np.sum(remove_tremor0) / np.sum(total_tremor))
# print(np.sum(remove_tremor1) / np.sum(total_tremor))
wave_result0 = wave_input - wave_result0
wave_result1 = wave_input - wave_result1
wave_target = wave_input - wave_target
wave_analysis = np.array([np.sum(remove_tremor0) / np.sum(total_tremor), np.sum(remove_tremor1) / np.sum(total_tremor)])
# /home/chen/Documents/tensorflow-wavenet-master/analysis/test
# 清道夫的工作
test_path = "/home/chen/Documents/tensorflow-wavenet-master/analysis/test/3/" + file_name + ".txt"
#if not os.path.exists(test_path):
# os.makedirs(test_path)
# print(test_path)
if not os.path.exists(test_path):
np.savetxt(test_path, wave_analysis)
else:
wave_analysis_his = np.loadtxt(test_path)
wave_analysis = np.vstack((wave_analysis_his, wave_analysis))
np.savetxt(test_path, wave_analysis)
# 绘制3D图
if step % 100 == 0:
fig = plt.figure(1)
fig.clear()
ax1 = plt.subplot(231, projection='3d')
ax2 = plt.subplot(232, projection='3d')
ax3 = plt.subplot(233, projection='3d')
ax4 = plt.subplot(234, projection='3d')
ax5 = plt.subplot(235, projection='3d')
ax6 = plt.subplot(236, projection='3d')
lim_show = np.size(wave_input,axis = 0)
lim_show = int(math.floor(lim_show/5))
ax1.plot(wave_input[:, 0], wave_input[:, 1], wave_input[:, 2], linewidth=0.3)
ax4.plot(wave_target[:, 0], wave_target[:, 1], wave_target[:, 2], linewidth=0.3)
ax2.plot(wave_result0[:, 0], wave_result0[:, 1], wave_result0[:, 2], linewidth=0.3)
ax2.set_title(str(np.sum(remove_tremor0) / np.sum(total_tremor)))
ax5.plot(wave_result1[:, 0], wave_result1[:, 1], wave_result1[:, 2], linewidth=0.3)
ax5.set_title(str(np.sum(remove_tremor1) / np.sum(total_tremor)))
ax3.plot(wave_result0[-lim_show:, 0], wave_result0[-lim_show:, 1], wave_result0[-lim_show:, 2], linewidth=0.3)
ax3.plot(wave_target[-lim_show:, 0], wave_target[-lim_show:, 1], wave_target[-lim_show:, 2], linewidth=0.3)
ax6.plot(wave_result1[-lim_show:, 0], wave_result1[-lim_show:, 1], wave_result1[-lim_show:, 2], linewidth=0.3)
ax6.plot(wave_target[-lim_show:, 0], wave_target[-lim_show:, 1], wave_target[-lim_show:, 2], linewidth=0.3)
plt.savefig("/home/chen/Documents/tensorflow-wavenet-master/images/test_result/" + str(step).zfill(7) + ".png")
#if step % args.checkpoint_every == 0:
# save(saver, sess, logdir, step)
# last_saved_step = step
except KeyboardInterrupt:
# Introduce a line break after ^C is displayed so save message
# is on its own line.
print()
finally:
if step > last_saved_step:
save(saver, sess, logdir, step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
| receptive_field-1, 0], [0, 0]])
raw_output = tf.concat([raw_output, raw_output, raw_output, raw_output], axis=1)
raw_output = tf.pad(raw_output, [[0, 0], [0, 6]])
raw_output = tf.expand_dims(raw_output, 0)
audio_batch = audio_batch - raw_output
audio_batch_list.append(audio_batch)
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
# Perform (+) -> ReLU -> | conditional_block |
test.py | # -*- coding: utf-8 -*-
"""Training script for the WaveNet network on the VCTK corpus.
This script trains a network with the WaveNet using data from the VCTK corpus,
which can be freely downloaded at the following site (~10 GB):
http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import argparse
from datetime import datetime
import json,math
import os
import sys
import time
import matplotlib.pyplot as pl
import tensorflow as tf
from tensorflow.python.client import timeline
from wavenet import WaveNetModel, optimizer_factory, AudioReader
from wavenet.model import create_variable, create_bias_variable
from wavenet.util.visual import figure_joint_skeleton
from analysis.Corr_Dim import fnn, Tao, Dim_Corr
from analysis.Fourier_utils import *
BATCH_SIZE = 1
DATA_DIRECTORY = './VCTK-Corpus'
LOGDIR_ROOT = './logdir'
CHECKPOINT_EVERY = 500
NUM_STEPS = int(1e5)
LEARNING_RATE = 1e-4
WAVENET_PARAMS = ['./WAVE_params/wavenet_params_1.json','./WAVE_params/wavenet_params_2.json','./WAVE_params/wavenet_params_3.json']
STARTED_DATESTRING = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
SAMPLE_SIZE = None
L2_REGULARIZATION_STRENGTH = 0
SILENCE_THRESHOLD = 0.3
EPSILON = 0.001
MOMENTUM = 0.9
MAX_TO_KEEP = 5
METADATA = False
def figure_hand_back(uvd_pt,uvd_pt1,uvd_pt2,path,test_num):
#uvd_pt = np.reshape(uvd_pt, (20, 3))
uvd_pt = uvd_pt.reshape(-1, 3)
uvd_pt1 = uvd_pt1.reshape(-1, 3)
uvd_pt2 = uvd_pt2.reshape(-1, 3)
fig = plt.figure(1)
fig.clear()
ax = plt.subplot(111, projection='3d')
fig_color = ['c', 'm', 'y', 'g', 'r']
ax.scatter(uvd_pt[0, 0], uvd_pt[0, 1], uvd_pt[0, 2], s=10, c='b')
ax.scatter(uvd_pt[1, 0], uvd_pt[1, 1], uvd_pt[1, 2], s=10, c='b')
ax.scatter(uvd_pt[2, 0], uvd_pt[2, 1], uvd_pt[2, 2], s=10, c='b')
ax.plot([uvd_pt[0, 0], uvd_pt[1, 0]],
[uvd_pt[0, 1], uvd_pt[1, 1]],
[uvd_pt[0, 2], uvd_pt[1, 2]], color='b', linewidth=1)
ax.plot([uvd_pt[1, 0], uvd_pt[2, 0]],
[uvd_pt[1, 1], uvd_pt[2, 1]],
[uvd_pt[1, 2], uvd_pt[2, 2]], color='b', linewidth=1)
ax.plot([uvd_pt[2, 0], uvd_pt[0, 0]],
[uvd_pt[2, 1], uvd_pt[0, 1]],
[uvd_pt[2, 2], uvd_pt[0, 2]], color='b', linewidth=1)
plt.ylim(-300, 300)
plt.xlim(-300, 300)
ax.set_zlim(-300, 300)
ax.scatter(uvd_pt1[0, 0], uvd_pt1[0, 1], uvd_pt1[0, 2], s=10, c='g')
ax.scatter(uvd_pt1[1, 0], uvd_pt1[1, 1], uvd_pt1[1, 2], s=10, c='g')
ax.scatter(uvd_pt1[2, 0], uvd_pt1[2, 1], uvd_pt1[2, 2], s=10, c='g')
ax.plot([uvd_pt1[0, 0], uvd_pt1[1, 0]],
[uvd_pt1[0, 1], uvd_pt1[1, 1]],
[uvd_pt1[0, 2], uvd_pt1[1, 2]], color='g', linewidth=1)
ax.plot([uvd_pt1[1, 0], uvd_pt1[2, 0]],
[uvd_pt1[1, 1], uvd_pt1[2, 1]],
[uvd_pt1[1, 2], uvd_pt1[2, 2]], color='g', linewidth=1)
ax.plot([uvd_pt1[2, 0], uvd_pt1[0, 0]],
[uvd_pt1[2, 1], uvd_pt1[0, 1]],
[uvd_pt1[2, 2], uvd_pt1[0, 2]], color='g', linewidth=1)
ax.scatter(uvd_pt2[0, 0], uvd_pt2[0, 1], uvd_pt2[0, 2], s=10, c='r')
ax.scatter(uvd_pt2[1, 0], uvd_pt2[1, 1], uvd_pt2[1, 2], s=10, c='r')
ax.scatter(uvd_pt2[2, 0], uvd_pt2[2, 1], uvd_pt2[2, 2], s=10, c='r')
ax.plot([uvd_pt2[0, 0], uvd_pt2[1, 0]],
[uvd_pt2[0, 1], uvd_pt2[1, 1]],
[uvd_pt2[0, 2], uvd_pt2[1, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[1, 0], uvd_pt2[2, 0]],
[uvd_pt2[1, 1], uvd_pt2[2, 1]],
[uvd_pt2[1, 2], uvd_pt2[2, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[2, 0], uvd_pt2[0, 0]],
[uvd_pt2[2, 1], uvd_pt2[0, 1]],
[uvd_pt2[2, 2], uvd_pt2[0, 2]], color='r', linewidth=1)
plt.savefig(path+str(test_num).zfill(7)+".png")
def get_arguments():
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore '
'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
#parser.add_argument('--wavenet_params', type=list, default=WAVENET_PARAMS,
# help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def | (args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
istest = False
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
wavenet_params = []
for file_name in WAVENET_PARAMS:
with open(file_name, 'r') as f:
wavenet_params.append(json.load(f))
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params[2]['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold,
pad = False,
path = "/media/chen/4CBEA7F1BEA7D1AE/Download/hand_dataset/pakinson/degree_dataset/3",
rand = False)
audio_batch = reader.dequeue(args.batch_size)
audio_batch_file = reader.dequeue_str(args.batch_size)
audio_batch = tf.squeeze(audio_batch)
one_receptive_field = WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"])
audio_batch = tf.pad(audio_batch, [[one_receptive_field, 0], [0, 0]],
'constant')
audio_batch = tf.expand_dims(audio_batch, 0)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
# Create network.
net = [WaveNetModel(
batch_size=args.batch_size,
dilations=one_params["dilations"],
filter_width=one_params["filter_width"],
residual_channels=one_params["residual_channels"],
dilation_channels=one_params["dilation_channels"],
skip_channels=one_params["skip_channels"],
quantization_channels=one_params["quantization_channels"],
use_biases=one_params["use_biases"],
scalar_input=one_params["scalar_input"],
initial_filter_width=one_params["initial_filter_width"],
histograms=args.histograms,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality,
namespace = str(one_params_i))
for one_params_i,one_params in enumerate(wavenet_params)]
post_par = []
for one_params_i, one_params in enumerate(wavenet_params):
with tf.variable_scope('postprocessing_'+'stage_id_'+str(one_params_i)):
current = dict()
current['postprocess1'] = create_variable(
'postprocess1',
[1, 64, 32])
current['postprocess2'] = create_variable(
'postprocess2',
[1, 32, 3])
current['postprocess1_bias'] = create_bias_variable(
'postprocess1_bias',
[32])
current['postprocess2_bias'] = create_bias_variable(
'postprocess2_bias',
[3])
post_par.append(current)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
#compute
loss_list = []
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
optim_list = []
raw_output_list = []
audio_batch_list = []
loss_all_list = []
for one_params_i, _ in enumerate(wavenet_params):
with tf.name_scope('stage_' + str(one_params_i) + '_postcompute'):
if one_params_i==0:
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
audio_batch_list.append(audio_batch)
else:
#将前一步骤的补偿结果作用在下一步的输入
raw_output = tf.pad(raw_output, [[one_receptive_field-1, 0], [0, 0]])
raw_output = tf.concat([raw_output, raw_output, raw_output, raw_output], axis=1)
raw_output = tf.pad(raw_output, [[0, 0], [0, 6]])
raw_output = tf.expand_dims(raw_output, 0)
audio_batch = audio_batch - raw_output
audio_batch_list.append(audio_batch)
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
# Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to
# postprocess the output.
w1 = post_par[one_params_i]['postprocess1']
w2 = post_par[one_params_i]['postprocess2']
b1 = post_par[one_params_i]['postprocess1_bias']
b2 = post_par[one_params_i]['postprocess2_bias']
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w1, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b1)
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w2, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b2)
raw_output = tf.squeeze(raw_output)
raw_output_list.append(raw_output)
network_label = tf.squeeze(network_label)
# Set up logging for TensorBoard.
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
# Set up session
#config = tf.ConfigProto(log_device_placement=False)
#config.gpu_options.allow_growth = True
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=tf_config)
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if is_overwritten_training or saved_global_step is None:
# The first training step will be saved_global_step + 1,
# therefore we put -1 here for new or overwritten trainings.
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
from tqdm import tqdm
try:
pass_loss = 0.0
for step in tqdm(range(90400)):
'''
if step < args.num_steps*0.0:
muti_step_id = 0
elif step < args.num_steps*0.99:
muti_step_id = 1
else:
muti_step_id = 2
'''
muti_step_id = 1
start_time = time.time()
audio_batch_list_ori_v, audio_batch_list_now_v,\
raw_output_list_ori_v, raw_output_list_now_v,\
file_name= \
sess.run([audio_batch_list[muti_step_id-1],audio_batch_list[muti_step_id], #label input
raw_output_list[muti_step_id-1], raw_output_list[muti_step_id],
audio_batch_file])
audio_batch_list_ori_v = audio_batch_list_ori_v[0, one_receptive_field:, :]
audio_batch_list_now_v = audio_batch_list_now_v[0, one_receptive_field:, :]
#writer.add_summary(summary, step)
if step%1==0:
wave_result0 = raw_output_list_ori_v[(one_receptive_field+1):, :] #t'
wave_result1 = raw_output_list_now_v[(one_receptive_field+1):, :] + wave_result0 #t'+(t-t')'
wave_target = audio_batch_list_ori_v[:,9:12][one_receptive_field:, :] #t
wave_input = audio_batch_list_ori_v[:, 0:3][one_receptive_field:, :] #t+v
num_sampe = np.size(wave_target,axis=0)
total_tremor = np.zeros(3)
remove_tremor0 = np.zeros(3)
remove_tremor1 = np.zeros(3)
#统计补偿的震颤的比例:
for tmp_over1_i in range(num_sampe):
for xyz_i in range(3):
# 累加振幅
total_tremor[xyz_i] += abs(wave_target[tmp_over1_i, xyz_i])
# 计算有效的消除震颤的部分——附加后震颤幅度变小:方向一致且幅度小于2×target
if wave_target[tmp_over1_i, xyz_i] * wave_result0[tmp_over1_i, xyz_i] > 0:
if abs(wave_result0[tmp_over1_i, xyz_i]) < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor0[xyz_i] += abs(wave_result0[tmp_over1_i, xyz_i])
elif abs(wave_result0[tmp_over1_i, xyz_i]) / 2 < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor0[xyz_i] += 2 * abs(wave_target[tmp_over1_i, xyz_i]) - abs(
wave_result0[tmp_over1_i, xyz_i])
if wave_target[tmp_over1_i, xyz_i] * wave_result1[tmp_over1_i, xyz_i] > 0:
if abs(wave_result1[tmp_over1_i, xyz_i]) < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor1[xyz_i] += abs(wave_result1[tmp_over1_i, xyz_i])
elif abs(wave_result1[tmp_over1_i, xyz_i]) / 2 < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor1[xyz_i] += 2 * abs(wave_target[tmp_over1_i, xyz_i]) - abs(
wave_result1[tmp_over1_i, xyz_i])
file_name = file_name[0].split('/')[-2] #'044'
# print(total_tremor)
# print(remove_tremor0)
# print(remove_tremor1)
# print(np.sum(remove_tremor0) / np.sum(total_tremor))
# print(np.sum(remove_tremor1) / np.sum(total_tremor))
wave_result0 = wave_input - wave_result0
wave_result1 = wave_input - wave_result1
wave_target = wave_input - wave_target
wave_analysis = np.array([np.sum(remove_tremor0) / np.sum(total_tremor), np.sum(remove_tremor1) / np.sum(total_tremor)])
# /home/chen/Documents/tensorflow-wavenet-master/analysis/test
# 清道夫的工作
test_path = "/home/chen/Documents/tensorflow-wavenet-master/analysis/test/3/" + file_name + ".txt"
#if not os.path.exists(test_path):
# os.makedirs(test_path)
# print(test_path)
if not os.path.exists(test_path):
np.savetxt(test_path, wave_analysis)
else:
wave_analysis_his = np.loadtxt(test_path)
wave_analysis = np.vstack((wave_analysis_his, wave_analysis))
np.savetxt(test_path, wave_analysis)
# 绘制3D图
if step % 100 == 0:
fig = plt.figure(1)
fig.clear()
ax1 = plt.subplot(231, projection='3d')
ax2 = plt.subplot(232, projection='3d')
ax3 = plt.subplot(233, projection='3d')
ax4 = plt.subplot(234, projection='3d')
ax5 = plt.subplot(235, projection='3d')
ax6 = plt.subplot(236, projection='3d')
lim_show = np.size(wave_input,axis = 0)
lim_show = int(math.floor(lim_show/5))
ax1.plot(wave_input[:, 0], wave_input[:, 1], wave_input[:, 2], linewidth=0.3)
ax4.plot(wave_target[:, 0], wave_target[:, 1], wave_target[:, 2], linewidth=0.3)
ax2.plot(wave_result0[:, 0], wave_result0[:, 1], wave_result0[:, 2], linewidth=0.3)
ax2.set_title(str(np.sum(remove_tremor0) / np.sum(total_tremor)))
ax5.plot(wave_result1[:, 0], wave_result1[:, 1], wave_result1[:, 2], linewidth=0.3)
ax5.set_title(str(np.sum(remove_tremor1) / np.sum(total_tremor)))
ax3.plot(wave_result0[-lim_show:, 0], wave_result0[-lim_show:, 1], wave_result0[-lim_show:, 2], linewidth=0.3)
ax3.plot(wave_target[-lim_show:, 0], wave_target[-lim_show:, 1], wave_target[-lim_show:, 2], linewidth=0.3)
ax6.plot(wave_result1[-lim_show:, 0], wave_result1[-lim_show:, 1], wave_result1[-lim_show:, 2], linewidth=0.3)
ax6.plot(wave_target[-lim_show:, 0], wave_target[-lim_show:, 1], wave_target[-lim_show:, 2], linewidth=0.3)
plt.savefig("/home/chen/Documents/tensorflow-wavenet-master/images/test_result/" + str(step).zfill(7) + ".png")
#if step % args.checkpoint_every == 0:
# save(saver, sess, logdir, step)
# last_saved_step = step
except KeyboardInterrupt:
# Introduce a line break after ^C is displayed so save message
# is on its own line.
print()
finally:
if step > last_saved_step:
save(saver, sess, logdir, step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
| validate_directories | identifier_name |
test.py | # -*- coding: utf-8 -*-
"""Training script for the WaveNet network on the VCTK corpus.
This script trains a network with the WaveNet using data from the VCTK corpus,
which can be freely downloaded at the following site (~10 GB):
http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import argparse
from datetime import datetime
import json,math
import os
import sys
import time
import matplotlib.pyplot as pl
import tensorflow as tf
from tensorflow.python.client import timeline
from wavenet import WaveNetModel, optimizer_factory, AudioReader
from wavenet.model import create_variable, create_bias_variable
from wavenet.util.visual import figure_joint_skeleton
from analysis.Corr_Dim import fnn, Tao, Dim_Corr
from analysis.Fourier_utils import *
BATCH_SIZE = 1
DATA_DIRECTORY = './VCTK-Corpus'
LOGDIR_ROOT = './logdir'
CHECKPOINT_EVERY = 500
NUM_STEPS = int(1e5)
LEARNING_RATE = 1e-4
WAVENET_PARAMS = ['./WAVE_params/wavenet_params_1.json','./WAVE_params/wavenet_params_2.json','./WAVE_params/wavenet_params_3.json']
STARTED_DATESTRING = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
SAMPLE_SIZE = None
L2_REGULARIZATION_STRENGTH = 0
SILENCE_THRESHOLD = 0.3
EPSILON = 0.001
MOMENTUM = 0.9
MAX_TO_KEEP = 5
METADATA = False
def figure_hand_back(uvd_pt,uvd_pt1,uvd_pt2,path,test_num):
#uvd_pt = np.reshape(uvd_pt, (20, 3))
uvd_pt = uvd_pt.reshape(-1, 3)
uvd_pt1 = uvd_pt1.reshape(-1, 3)
uvd_pt2 = uvd_pt2.reshape(-1, 3)
fig = plt.figure(1)
fig.clear()
ax = plt.subplot(111, projection='3d')
fig_color = ['c', 'm', 'y', 'g', 'r']
ax.scatter(uvd_pt[0, 0], uvd_pt[0, 1], uvd_pt[0, 2], s=10, c='b')
ax.scatter(uvd_pt[1, 0], uvd_pt[1, 1], uvd_pt[1, 2], s=10, c='b')
ax.scatter(uvd_pt[2, 0], uvd_pt[2, 1], uvd_pt[2, 2], s=10, c='b')
ax.plot([uvd_pt[0, 0], uvd_pt[1, 0]],
[uvd_pt[0, 1], uvd_pt[1, 1]],
[uvd_pt[0, 2], uvd_pt[1, 2]], color='b', linewidth=1)
ax.plot([uvd_pt[1, 0], uvd_pt[2, 0]],
[uvd_pt[1, 1], uvd_pt[2, 1]],
[uvd_pt[1, 2], uvd_pt[2, 2]], color='b', linewidth=1)
ax.plot([uvd_pt[2, 0], uvd_pt[0, 0]],
[uvd_pt[2, 1], uvd_pt[0, 1]],
[uvd_pt[2, 2], uvd_pt[0, 2]], color='b', linewidth=1)
plt.ylim(-300, 300)
plt.xlim(-300, 300)
ax.set_zlim(-300, 300)
ax.scatter(uvd_pt1[0, 0], uvd_pt1[0, 1], uvd_pt1[0, 2], s=10, c='g')
ax.scatter(uvd_pt1[1, 0], uvd_pt1[1, 1], uvd_pt1[1, 2], s=10, c='g')
ax.scatter(uvd_pt1[2, 0], uvd_pt1[2, 1], uvd_pt1[2, 2], s=10, c='g')
ax.plot([uvd_pt1[0, 0], uvd_pt1[1, 0]],
[uvd_pt1[0, 1], uvd_pt1[1, 1]],
[uvd_pt1[0, 2], uvd_pt1[1, 2]], color='g', linewidth=1)
ax.plot([uvd_pt1[1, 0], uvd_pt1[2, 0]],
[uvd_pt1[1, 1], uvd_pt1[2, 1]],
[uvd_pt1[1, 2], uvd_pt1[2, 2]], color='g', linewidth=1)
ax.plot([uvd_pt1[2, 0], uvd_pt1[0, 0]],
[uvd_pt1[2, 1], uvd_pt1[0, 1]],
[uvd_pt1[2, 2], uvd_pt1[0, 2]], color='g', linewidth=1)
ax.scatter(uvd_pt2[0, 0], uvd_pt2[0, 1], uvd_pt2[0, 2], s=10, c='r')
ax.scatter(uvd_pt2[1, 0], uvd_pt2[1, 1], uvd_pt2[1, 2], s=10, c='r')
ax.scatter(uvd_pt2[2, 0], uvd_pt2[2, 1], uvd_pt2[2, 2], s=10, c='r')
ax.plot([uvd_pt2[0, 0], uvd_pt2[1, 0]],
[uvd_pt2[0, 1], uvd_pt2[1, 1]],
[uvd_pt2[0, 2], uvd_pt2[1, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[1, 0], uvd_pt2[2, 0]],
[uvd_pt2[1, 1], uvd_pt2[2, 1]],
[uvd_pt2[1, 2], uvd_pt2[2, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[2, 0], uvd_pt2[0, 0]],
[uvd_pt2[2, 1], uvd_pt2[0, 1]],
[uvd_pt2[2, 2], uvd_pt2[0, 2]], color='r', linewidth=1)
plt.savefig(path+str(test_num).zfill(7)+".png")
def get_arguments():
|
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def validate_directories(args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
istest = False
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
wavenet_params = []
for file_name in WAVENET_PARAMS:
with open(file_name, 'r') as f:
wavenet_params.append(json.load(f))
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params[2]['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold,
pad = False,
path = "/media/chen/4CBEA7F1BEA7D1AE/Download/hand_dataset/pakinson/degree_dataset/3",
rand = False)
audio_batch = reader.dequeue(args.batch_size)
audio_batch_file = reader.dequeue_str(args.batch_size)
audio_batch = tf.squeeze(audio_batch)
one_receptive_field = WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"])
audio_batch = tf.pad(audio_batch, [[one_receptive_field, 0], [0, 0]],
'constant')
audio_batch = tf.expand_dims(audio_batch, 0)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
# Create network.
net = [WaveNetModel(
batch_size=args.batch_size,
dilations=one_params["dilations"],
filter_width=one_params["filter_width"],
residual_channels=one_params["residual_channels"],
dilation_channels=one_params["dilation_channels"],
skip_channels=one_params["skip_channels"],
quantization_channels=one_params["quantization_channels"],
use_biases=one_params["use_biases"],
scalar_input=one_params["scalar_input"],
initial_filter_width=one_params["initial_filter_width"],
histograms=args.histograms,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality,
namespace = str(one_params_i))
for one_params_i,one_params in enumerate(wavenet_params)]
post_par = []
for one_params_i, one_params in enumerate(wavenet_params):
with tf.variable_scope('postprocessing_'+'stage_id_'+str(one_params_i)):
current = dict()
current['postprocess1'] = create_variable(
'postprocess1',
[1, 64, 32])
current['postprocess2'] = create_variable(
'postprocess2',
[1, 32, 3])
current['postprocess1_bias'] = create_bias_variable(
'postprocess1_bias',
[32])
current['postprocess2_bias'] = create_bias_variable(
'postprocess2_bias',
[3])
post_par.append(current)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
#compute
loss_list = []
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
optim_list = []
raw_output_list = []
audio_batch_list = []
loss_all_list = []
for one_params_i, _ in enumerate(wavenet_params):
with tf.name_scope('stage_' + str(one_params_i) + '_postcompute'):
if one_params_i==0:
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
audio_batch_list.append(audio_batch)
else:
#将前一步骤的补偿结果作用在下一步的输入
raw_output = tf.pad(raw_output, [[one_receptive_field-1, 0], [0, 0]])
raw_output = tf.concat([raw_output, raw_output, raw_output, raw_output], axis=1)
raw_output = tf.pad(raw_output, [[0, 0], [0, 6]])
raw_output = tf.expand_dims(raw_output, 0)
audio_batch = audio_batch - raw_output
audio_batch_list.append(audio_batch)
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
# Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to
# postprocess the output.
w1 = post_par[one_params_i]['postprocess1']
w2 = post_par[one_params_i]['postprocess2']
b1 = post_par[one_params_i]['postprocess1_bias']
b2 = post_par[one_params_i]['postprocess2_bias']
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w1, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b1)
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w2, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b2)
raw_output = tf.squeeze(raw_output)
raw_output_list.append(raw_output)
network_label = tf.squeeze(network_label)
# Set up logging for TensorBoard.
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
# Set up session
#config = tf.ConfigProto(log_device_placement=False)
#config.gpu_options.allow_growth = True
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=tf_config)
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if is_overwritten_training or saved_global_step is None:
# The first training step will be saved_global_step + 1,
# therefore we put -1 here for new or overwritten trainings.
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
from tqdm import tqdm
try:
pass_loss = 0.0
for step in tqdm(range(90400)):
'''
if step < args.num_steps*0.0:
muti_step_id = 0
elif step < args.num_steps*0.99:
muti_step_id = 1
else:
muti_step_id = 2
'''
muti_step_id = 1
start_time = time.time()
audio_batch_list_ori_v, audio_batch_list_now_v,\
raw_output_list_ori_v, raw_output_list_now_v,\
file_name= \
sess.run([audio_batch_list[muti_step_id-1],audio_batch_list[muti_step_id], #label input
raw_output_list[muti_step_id-1], raw_output_list[muti_step_id],
audio_batch_file])
audio_batch_list_ori_v = audio_batch_list_ori_v[0, one_receptive_field:, :]
audio_batch_list_now_v = audio_batch_list_now_v[0, one_receptive_field:, :]
#writer.add_summary(summary, step)
if step%1==0:
wave_result0 = raw_output_list_ori_v[(one_receptive_field+1):, :] #t'
wave_result1 = raw_output_list_now_v[(one_receptive_field+1):, :] + wave_result0 #t'+(t-t')'
wave_target = audio_batch_list_ori_v[:,9:12][one_receptive_field:, :] #t
wave_input = audio_batch_list_ori_v[:, 0:3][one_receptive_field:, :] #t+v
num_sampe = np.size(wave_target,axis=0)
total_tremor = np.zeros(3)
remove_tremor0 = np.zeros(3)
remove_tremor1 = np.zeros(3)
#统计补偿的震颤的比例:
for tmp_over1_i in range(num_sampe):
for xyz_i in range(3):
# 累加振幅
total_tremor[xyz_i] += abs(wave_target[tmp_over1_i, xyz_i])
# 计算有效的消除震颤的部分——附加后震颤幅度变小:方向一致且幅度小于2×target
if wave_target[tmp_over1_i, xyz_i] * wave_result0[tmp_over1_i, xyz_i] > 0:
if abs(wave_result0[tmp_over1_i, xyz_i]) < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor0[xyz_i] += abs(wave_result0[tmp_over1_i, xyz_i])
elif abs(wave_result0[tmp_over1_i, xyz_i]) / 2 < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor0[xyz_i] += 2 * abs(wave_target[tmp_over1_i, xyz_i]) - abs(
wave_result0[tmp_over1_i, xyz_i])
if wave_target[tmp_over1_i, xyz_i] * wave_result1[tmp_over1_i, xyz_i] > 0:
if abs(wave_result1[tmp_over1_i, xyz_i]) < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor1[xyz_i] += abs(wave_result1[tmp_over1_i, xyz_i])
elif abs(wave_result1[tmp_over1_i, xyz_i]) / 2 < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor1[xyz_i] += 2 * abs(wave_target[tmp_over1_i, xyz_i]) - abs(
wave_result1[tmp_over1_i, xyz_i])
file_name = file_name[0].split('/')[-2] #'044'
# print(total_tremor)
# print(remove_tremor0)
# print(remove_tremor1)
# print(np.sum(remove_tremor0) / np.sum(total_tremor))
# print(np.sum(remove_tremor1) / np.sum(total_tremor))
wave_result0 = wave_input - wave_result0
wave_result1 = wave_input - wave_result1
wave_target = wave_input - wave_target
wave_analysis = np.array([np.sum(remove_tremor0) / np.sum(total_tremor), np.sum(remove_tremor1) / np.sum(total_tremor)])
# /home/chen/Documents/tensorflow-wavenet-master/analysis/test
# 清道夫的工作
test_path = "/home/chen/Documents/tensorflow-wavenet-master/analysis/test/3/" + file_name + ".txt"
#if not os.path.exists(test_path):
# os.makedirs(test_path)
# print(test_path)
if not os.path.exists(test_path):
np.savetxt(test_path, wave_analysis)
else:
wave_analysis_his = np.loadtxt(test_path)
wave_analysis = np.vstack((wave_analysis_his, wave_analysis))
np.savetxt(test_path, wave_analysis)
# 绘制3D图
if step % 100 == 0:
fig = plt.figure(1)
fig.clear()
ax1 = plt.subplot(231, projection='3d')
ax2 = plt.subplot(232, projection='3d')
ax3 = plt.subplot(233, projection='3d')
ax4 = plt.subplot(234, projection='3d')
ax5 = plt.subplot(235, projection='3d')
ax6 = plt.subplot(236, projection='3d')
lim_show = np.size(wave_input,axis = 0)
lim_show = int(math.floor(lim_show/5))
ax1.plot(wave_input[:, 0], wave_input[:, 1], wave_input[:, 2], linewidth=0.3)
ax4.plot(wave_target[:, 0], wave_target[:, 1], wave_target[:, 2], linewidth=0.3)
ax2.plot(wave_result0[:, 0], wave_result0[:, 1], wave_result0[:, 2], linewidth=0.3)
ax2.set_title(str(np.sum(remove_tremor0) / np.sum(total_tremor)))
ax5.plot(wave_result1[:, 0], wave_result1[:, 1], wave_result1[:, 2], linewidth=0.3)
ax5.set_title(str(np.sum(remove_tremor1) / np.sum(total_tremor)))
ax3.plot(wave_result0[-lim_show:, 0], wave_result0[-lim_show:, 1], wave_result0[-lim_show:, 2], linewidth=0.3)
ax3.plot(wave_target[-lim_show:, 0], wave_target[-lim_show:, 1], wave_target[-lim_show:, 2], linewidth=0.3)
ax6.plot(wave_result1[-lim_show:, 0], wave_result1[-lim_show:, 1], wave_result1[-lim_show:, 2], linewidth=0.3)
ax6.plot(wave_target[-lim_show:, 0], wave_target[-lim_show:, 1], wave_target[-lim_show:, 2], linewidth=0.3)
plt.savefig("/home/chen/Documents/tensorflow-wavenet-master/images/test_result/" + str(step).zfill(7) + ".png")
#if step % args.checkpoint_every == 0:
# save(saver, sess, logdir, step)
# last_saved_step = step
except KeyboardInterrupt:
# Introduce a line break after ^C is displayed so save message
# is on its own line.
print()
finally:
if step > last_saved_step:
save(saver, sess, logdir, step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
| def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore '
'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
#parser.add_argument('--wavenet_params', type=list, default=WAVENET_PARAMS,
# help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args() | identifier_body |
test.py | # -*- coding: utf-8 -*-
"""Training script for the WaveNet network on the VCTK corpus.
This script trains a network with the WaveNet using data from the VCTK corpus,
which can be freely downloaded at the following site (~10 GB):
http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import argparse
from datetime import datetime
import json,math
import os
import sys
import time
import matplotlib.pyplot as pl
import tensorflow as tf
from tensorflow.python.client import timeline
from wavenet import WaveNetModel, optimizer_factory, AudioReader
from wavenet.model import create_variable, create_bias_variable
from wavenet.util.visual import figure_joint_skeleton
from analysis.Corr_Dim import fnn, Tao, Dim_Corr
from analysis.Fourier_utils import *
BATCH_SIZE = 1
DATA_DIRECTORY = './VCTK-Corpus'
LOGDIR_ROOT = './logdir'
CHECKPOINT_EVERY = 500
NUM_STEPS = int(1e5)
LEARNING_RATE = 1e-4
WAVENET_PARAMS = ['./WAVE_params/wavenet_params_1.json','./WAVE_params/wavenet_params_2.json','./WAVE_params/wavenet_params_3.json']
STARTED_DATESTRING = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
SAMPLE_SIZE = None
L2_REGULARIZATION_STRENGTH = 0
SILENCE_THRESHOLD = 0.3
EPSILON = 0.001
MOMENTUM = 0.9
MAX_TO_KEEP = 5
METADATA = False
def figure_hand_back(uvd_pt,uvd_pt1,uvd_pt2,path,test_num):
#uvd_pt = np.reshape(uvd_pt, (20, 3))
uvd_pt = uvd_pt.reshape(-1, 3)
uvd_pt1 = uvd_pt1.reshape(-1, 3)
uvd_pt2 = uvd_pt2.reshape(-1, 3)
fig = plt.figure(1)
fig.clear()
ax = plt.subplot(111, projection='3d')
fig_color = ['c', 'm', 'y', 'g', 'r']
ax.scatter(uvd_pt[0, 0], uvd_pt[0, 1], uvd_pt[0, 2], s=10, c='b')
ax.scatter(uvd_pt[1, 0], uvd_pt[1, 1], uvd_pt[1, 2], s=10, c='b')
ax.scatter(uvd_pt[2, 0], uvd_pt[2, 1], uvd_pt[2, 2], s=10, c='b')
ax.plot([uvd_pt[0, 0], uvd_pt[1, 0]],
[uvd_pt[0, 1], uvd_pt[1, 1]],
[uvd_pt[0, 2], uvd_pt[1, 2]], color='b', linewidth=1)
ax.plot([uvd_pt[1, 0], uvd_pt[2, 0]],
[uvd_pt[1, 1], uvd_pt[2, 1]],
[uvd_pt[1, 2], uvd_pt[2, 2]], color='b', linewidth=1)
ax.plot([uvd_pt[2, 0], uvd_pt[0, 0]],
[uvd_pt[2, 1], uvd_pt[0, 1]],
[uvd_pt[2, 2], uvd_pt[0, 2]], color='b', linewidth=1)
plt.ylim(-300, 300)
plt.xlim(-300, 300)
ax.set_zlim(-300, 300)
ax.scatter(uvd_pt1[0, 0], uvd_pt1[0, 1], uvd_pt1[0, 2], s=10, c='g')
ax.scatter(uvd_pt1[1, 0], uvd_pt1[1, 1], uvd_pt1[1, 2], s=10, c='g')
ax.scatter(uvd_pt1[2, 0], uvd_pt1[2, 1], uvd_pt1[2, 2], s=10, c='g')
ax.plot([uvd_pt1[0, 0], uvd_pt1[1, 0]],
[uvd_pt1[0, 1], uvd_pt1[1, 1]],
[uvd_pt1[0, 2], uvd_pt1[1, 2]], color='g', linewidth=1)
ax.plot([uvd_pt1[1, 0], uvd_pt1[2, 0]],
[uvd_pt1[1, 1], uvd_pt1[2, 1]],
[uvd_pt1[1, 2], uvd_pt1[2, 2]], color='g', linewidth=1)
ax.plot([uvd_pt1[2, 0], uvd_pt1[0, 0]],
[uvd_pt1[2, 1], uvd_pt1[0, 1]],
[uvd_pt1[2, 2], uvd_pt1[0, 2]], color='g', linewidth=1)
ax.scatter(uvd_pt2[0, 0], uvd_pt2[0, 1], uvd_pt2[0, 2], s=10, c='r')
ax.scatter(uvd_pt2[1, 0], uvd_pt2[1, 1], uvd_pt2[1, 2], s=10, c='r')
ax.scatter(uvd_pt2[2, 0], uvd_pt2[2, 1], uvd_pt2[2, 2], s=10, c='r')
ax.plot([uvd_pt2[0, 0], uvd_pt2[1, 0]],
[uvd_pt2[0, 1], uvd_pt2[1, 1]],
[uvd_pt2[0, 2], uvd_pt2[1, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[1, 0], uvd_pt2[2, 0]],
[uvd_pt2[1, 1], uvd_pt2[2, 1]],
[uvd_pt2[1, 2], uvd_pt2[2, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[2, 0], uvd_pt2[0, 0]],
[uvd_pt2[2, 1], uvd_pt2[0, 1]],
[uvd_pt2[2, 2], uvd_pt2[0, 2]], color='r', linewidth=1)
plt.savefig(path+str(test_num).zfill(7)+".png")
def get_arguments():
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore '
'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
#parser.add_argument('--wavenet_params', type=list, default=WAVENET_PARAMS,
# help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def validate_directories(args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
istest = False
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
wavenet_params = []
for file_name in WAVENET_PARAMS:
with open(file_name, 'r') as f:
wavenet_params.append(json.load(f))
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params[2]['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold,
pad = False,
path = "/media/chen/4CBEA7F1BEA7D1AE/Download/hand_dataset/pakinson/degree_dataset/3",
rand = False)
audio_batch = reader.dequeue(args.batch_size)
audio_batch_file = reader.dequeue_str(args.batch_size)
audio_batch = tf.squeeze(audio_batch)
one_receptive_field = WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"])
audio_batch = tf.pad(audio_batch, [[one_receptive_field, 0], [0, 0]],
'constant')
audio_batch = tf.expand_dims(audio_batch, 0)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
# Create network.
net = [WaveNetModel(
batch_size=args.batch_size,
dilations=one_params["dilations"],
filter_width=one_params["filter_width"],
residual_channels=one_params["residual_channels"],
dilation_channels=one_params["dilation_channels"],
skip_channels=one_params["skip_channels"],
quantization_channels=one_params["quantization_channels"],
use_biases=one_params["use_biases"],
scalar_input=one_params["scalar_input"],
initial_filter_width=one_params["initial_filter_width"],
histograms=args.histograms,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality,
namespace = str(one_params_i))
for one_params_i,one_params in enumerate(wavenet_params)]
post_par = []
for one_params_i, one_params in enumerate(wavenet_params):
with tf.variable_scope('postprocessing_'+'stage_id_'+str(one_params_i)):
current = dict()
current['postprocess1'] = create_variable(
'postprocess1',
[1, 64, 32])
current['postprocess2'] = create_variable(
'postprocess2',
[1, 32, 3])
current['postprocess1_bias'] = create_bias_variable(
'postprocess1_bias',
[32])
current['postprocess2_bias'] = create_bias_variable(
'postprocess2_bias',
[3])
post_par.append(current)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
#compute
loss_list = []
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
optim_list = []
raw_output_list = []
audio_batch_list = []
loss_all_list = []
for one_params_i, _ in enumerate(wavenet_params):
with tf.name_scope('stage_' + str(one_params_i) + '_postcompute'):
if one_params_i==0:
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
audio_batch_list.append(audio_batch)
else:
#将前一步骤的补偿结果作用在下一步的输入
raw_output = tf.pad(raw_output, [[one_receptive_field-1, 0], [0, 0]])
raw_output = tf.concat([raw_output, raw_output, raw_output, raw_output], axis=1)
raw_output = tf.pad(raw_output, [[0, 0], [0, 6]])
raw_output = tf.expand_dims(raw_output, 0)
audio_batch = audio_batch - raw_output
audio_batch_list.append(audio_batch)
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
# Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to
# postprocess the output.
w1 = post_par[one_params_i]['postprocess1']
w2 = post_par[one_params_i]['postprocess2']
b1 = post_par[one_params_i]['postprocess1_bias']
b2 = post_par[one_params_i]['postprocess2_bias']
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w1, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b1)
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w2, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b2)
raw_output = tf.squeeze(raw_output)
raw_output_list.append(raw_output)
network_label = tf.squeeze(network_label)
# Set up logging for TensorBoard.
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
# Set up session
#config = tf.ConfigProto(log_device_placement=False)
#config.gpu_options.allow_growth = True
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=tf_config)
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if is_overwritten_training or saved_global_step is None:
# The first training step will be saved_global_step + 1,
# therefore we put -1 here for new or overwritten trainings.
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
from tqdm import tqdm
try:
pass_loss = 0.0
for step in tqdm(range(90400)):
''' | elif step < args.num_steps*0.99:
muti_step_id = 1
else:
muti_step_id = 2
'''
muti_step_id = 1
start_time = time.time()
audio_batch_list_ori_v, audio_batch_list_now_v,\
raw_output_list_ori_v, raw_output_list_now_v,\
file_name= \
sess.run([audio_batch_list[muti_step_id-1],audio_batch_list[muti_step_id], #label input
raw_output_list[muti_step_id-1], raw_output_list[muti_step_id],
audio_batch_file])
audio_batch_list_ori_v = audio_batch_list_ori_v[0, one_receptive_field:, :]
audio_batch_list_now_v = audio_batch_list_now_v[0, one_receptive_field:, :]
#writer.add_summary(summary, step)
if step%1==0:
wave_result0 = raw_output_list_ori_v[(one_receptive_field+1):, :] #t'
wave_result1 = raw_output_list_now_v[(one_receptive_field+1):, :] + wave_result0 #t'+(t-t')'
wave_target = audio_batch_list_ori_v[:,9:12][one_receptive_field:, :] #t
wave_input = audio_batch_list_ori_v[:, 0:3][one_receptive_field:, :] #t+v
num_sampe = np.size(wave_target,axis=0)
total_tremor = np.zeros(3)
remove_tremor0 = np.zeros(3)
remove_tremor1 = np.zeros(3)
#统计补偿的震颤的比例:
for tmp_over1_i in range(num_sampe):
for xyz_i in range(3):
# 累加振幅
total_tremor[xyz_i] += abs(wave_target[tmp_over1_i, xyz_i])
# 计算有效的消除震颤的部分——附加后震颤幅度变小:方向一致且幅度小于2×target
if wave_target[tmp_over1_i, xyz_i] * wave_result0[tmp_over1_i, xyz_i] > 0:
if abs(wave_result0[tmp_over1_i, xyz_i]) < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor0[xyz_i] += abs(wave_result0[tmp_over1_i, xyz_i])
elif abs(wave_result0[tmp_over1_i, xyz_i]) / 2 < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor0[xyz_i] += 2 * abs(wave_target[tmp_over1_i, xyz_i]) - abs(
wave_result0[tmp_over1_i, xyz_i])
if wave_target[tmp_over1_i, xyz_i] * wave_result1[tmp_over1_i, xyz_i] > 0:
if abs(wave_result1[tmp_over1_i, xyz_i]) < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor1[xyz_i] += abs(wave_result1[tmp_over1_i, xyz_i])
elif abs(wave_result1[tmp_over1_i, xyz_i]) / 2 < abs(wave_target[tmp_over1_i, xyz_i]):
remove_tremor1[xyz_i] += 2 * abs(wave_target[tmp_over1_i, xyz_i]) - abs(
wave_result1[tmp_over1_i, xyz_i])
file_name = file_name[0].split('/')[-2] #'044'
# print(total_tremor)
# print(remove_tremor0)
# print(remove_tremor1)
# print(np.sum(remove_tremor0) / np.sum(total_tremor))
# print(np.sum(remove_tremor1) / np.sum(total_tremor))
wave_result0 = wave_input - wave_result0
wave_result1 = wave_input - wave_result1
wave_target = wave_input - wave_target
wave_analysis = np.array([np.sum(remove_tremor0) / np.sum(total_tremor), np.sum(remove_tremor1) / np.sum(total_tremor)])
# /home/chen/Documents/tensorflow-wavenet-master/analysis/test
# 清道夫的工作
test_path = "/home/chen/Documents/tensorflow-wavenet-master/analysis/test/3/" + file_name + ".txt"
#if not os.path.exists(test_path):
# os.makedirs(test_path)
# print(test_path)
if not os.path.exists(test_path):
np.savetxt(test_path, wave_analysis)
else:
wave_analysis_his = np.loadtxt(test_path)
wave_analysis = np.vstack((wave_analysis_his, wave_analysis))
np.savetxt(test_path, wave_analysis)
# 绘制3D图
if step % 100 == 0:
fig = plt.figure(1)
fig.clear()
ax1 = plt.subplot(231, projection='3d')
ax2 = plt.subplot(232, projection='3d')
ax3 = plt.subplot(233, projection='3d')
ax4 = plt.subplot(234, projection='3d')
ax5 = plt.subplot(235, projection='3d')
ax6 = plt.subplot(236, projection='3d')
lim_show = np.size(wave_input,axis = 0)
lim_show = int(math.floor(lim_show/5))
ax1.plot(wave_input[:, 0], wave_input[:, 1], wave_input[:, 2], linewidth=0.3)
ax4.plot(wave_target[:, 0], wave_target[:, 1], wave_target[:, 2], linewidth=0.3)
ax2.plot(wave_result0[:, 0], wave_result0[:, 1], wave_result0[:, 2], linewidth=0.3)
ax2.set_title(str(np.sum(remove_tremor0) / np.sum(total_tremor)))
ax5.plot(wave_result1[:, 0], wave_result1[:, 1], wave_result1[:, 2], linewidth=0.3)
ax5.set_title(str(np.sum(remove_tremor1) / np.sum(total_tremor)))
ax3.plot(wave_result0[-lim_show:, 0], wave_result0[-lim_show:, 1], wave_result0[-lim_show:, 2], linewidth=0.3)
ax3.plot(wave_target[-lim_show:, 0], wave_target[-lim_show:, 1], wave_target[-lim_show:, 2], linewidth=0.3)
ax6.plot(wave_result1[-lim_show:, 0], wave_result1[-lim_show:, 1], wave_result1[-lim_show:, 2], linewidth=0.3)
ax6.plot(wave_target[-lim_show:, 0], wave_target[-lim_show:, 1], wave_target[-lim_show:, 2], linewidth=0.3)
plt.savefig("/home/chen/Documents/tensorflow-wavenet-master/images/test_result/" + str(step).zfill(7) + ".png")
#if step % args.checkpoint_every == 0:
# save(saver, sess, logdir, step)
# last_saved_step = step
except KeyboardInterrupt:
# Introduce a line break after ^C is displayed so save message
# is on its own line.
print()
finally:
if step > last_saved_step:
save(saver, sess, logdir, step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main() | if step < args.num_steps*0.0:
muti_step_id = 0 | random_line_split |
orgreminders.go | package orgreminders
import (
"appengine"
"appengine/datastore"
"appengine/mail"
"appengine/user"
"fmt"
"html/template"
"log"
"net/http"
"sort"
"strings"
"time"
)
// App-global variables
var Templates *template.Template
var Duration_Day = 24 * time.Hour
var Duration_Week = 7 * Duration_Day
var TemplateFiles = []string{
"tmpl/header.html",
"tmpl/css.html",
"tmpl/home.html",
"tmpl/save.html",
"tmpl/new-event.html",
"tmpl/new-org.html",
"tmpl/editorg.html",
"tmpl/editevent.html",
"tmpl/events.html",
"tmpl/organizations.html",
"tmpl/error.html",
"tmpl/cron.html",
"tmpl/new-member.html",
"tmpl/members.html",
"tmpl/editmember.html",
}
type Page struct {
Error string
Events map[string]Event
Keys []*datastore.Key
Event2Edit Event
Organizations map[string]Organization
Org2Edit Organization
Org2EditKey string
Location time.Location
AllowNewOrg bool
SuperUser bool
LoggedIn bool
UserEmail string
Orgs []string
Members map[string]Member
SavedEvent bool
SavedOrg bool
SavedMember bool
Member2Edit Member
Member2EditKey string
ScheduleHTML map[string][]string
}
func NewPage(u *User) (*Page, error) {
var result = Page{}
if u.Meta != nil {
result.LoggedIn = true
result.AllowNewOrg = true
result.UserEmail = u.Meta.Email
if u.SuperUser {
result.SuperUser = true
}
}
return &result, nil
}
func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {
err := Templates.ExecuteTemplate(w, tmpl+".html", p)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func init() {
templTest, err := template.ParseFiles(TemplateFiles...)
if err != nil {
log.Println("Some (or all) of the required templates are missing, exiting: ", err.Error())
return
}
Templates = templTest
http.HandleFunc("/", DefaultHandler)
http.HandleFunc("/newevent", NewEventHandler)
http.HandleFunc("/neworg", NewOrgHandler)
http.HandleFunc("/events", EventsHandler)
http.HandleFunc("/organizations", OrgsHandler)
http.HandleFunc("/saveevent", EventSaveHandler)
http.HandleFunc("/saveorg", OrgSaveHandler)
http.HandleFunc("/editorg", OrgEditHandler)
http.HandleFunc("/editevent", EventEditHandler)
http.HandleFunc("/cron", CronHandler)
http.HandleFunc("/logout", LogoutHandler)
http.HandleFunc("/newmember", NewMemberHandler)
http.HandleFunc("/savemember", MemberSaveHandler)
http.HandleFunc("/members", MembersHandler)
http.HandleFunc("/editmember", MemberEditHandler)
}
func | (w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
url, _ := user.LogoutURL(c, "/")
http.Redirect(w, r, url, http.StatusFound)
}
func DefaultHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "home"
renderTemplate(w, title, p)
}
func NewOrgHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-org"
renderTemplate(w, title, p)
}
func NewEventHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-event"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func EventSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
event := NewEvent()
event.Title = r.PostFormValue("title")
event.EmailMessage = template.HTML(r.PostFormValue("emailmessage"))
event.TextMessage = r.PostFormValue("textmessage")
event.Submitter = *u.Meta
event.Orgs = r.PostForm["orgs"]
if len(event.Orgs) < 1 {
p.Error = "You must choose an organization."
renderTemplate(w, "error", p)
return
}
if r.PostFormValue("sendemail") == "on" {
event.Email = true
}
if r.PostFormValue("sendtext") == "on" {
event.Text = true
}
// save reminder schedule
var remqtys = r.PostForm["remqty[]"]
var remtyps = r.PostForm["remtyp[]"]
for remkey, remval := range remqtys {
var entry = fmt.Sprintf("%s%s", remval, remtyps[remkey])
event.Reminders.Add(entry)
}
o, err := GetOrganizationByName(c, event.Orgs[0])
if err != nil {
c.Infof("Error: %s", err.Error())
p.Error = err.Error()
renderTemplate(w, "error", p)
return
}
location, _ := time.LoadLocation(o.TimeZone)
const longForm = "01/02/2006 3:04pm"
t, timeerr := time.ParseInLocation(longForm, r.PostFormValue("due"), location)
if timeerr != nil {
http.Error(w, "Invalid time string", http.StatusInternalServerError)
return
}
event.Due = t
event.Key = r.PostFormValue("key")
var subject = "Event Saved: "
if event.Key == "" {
_, event.Key = event.Save(c)
} else {
event.Update(c)
subject = "Event Updated: "
}
if r.PostFormValue("oncreate") == "on" {
event.Notify(c, true)
}
event.DueFormatted = event.Due.In(location).Format("01/02/2006 3:04pm")
AdminNotify(c, u.Meta.Email, subject+event.Title, "The following event was just saved: <br><br>"+event.GetHTMLView(c))
p.Event2Edit = event
p.SavedEvent = true
renderTemplate(w, "save", p)
}
func EventEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
ok, p.Event2Edit = GetEventByKey(c, r.FormValue("id"))
if ok {
org, _ := GetOrganizationByName(c, p.Event2Edit.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
p.Event2Edit.DueFormatted = p.Event2Edit.Due.In(location).Format("01/02/2006 3:04pm")
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Event2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
// Extract usable event reminder list
p.ScheduleHTML = p.Event2Edit.Reminders.HTML()
sort.Strings(p.Orgs)
renderTemplate(w, "editevent", p)
} else {
p.Error = "Event not found."
renderTemplate(w, "error", p)
}
}
func OrgSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
org := NewOrganization()
org.Name = r.PostFormValue("name")
org.Description = r.PostFormValue("description")
org.Active = true
org.Expires = time.Now().UTC().Add(Duration_Week)
org.Administrator = strings.Split(r.PostFormValue("admin"), "\r\n")
org.TimeZone = r.PostFormValue("timezone")
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving org")
org.Save(c)
} else {
c.Infof("updating org")
org.Update(c, key)
}
p.SavedOrg = true
p.Org2Edit = org
p.Org2EditKey = key
renderTemplate(w, "save", p)
}
func OrgEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
p.Org2EditKey = r.FormValue("id")
p.Org2Edit = GetOrganizationByKey(c, p.Org2EditKey)
renderTemplate(w, "editorg", p)
}
func EventsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
events := org.GetEvents(c, true)
location, _ := time.LoadLocation(org.TimeZone)
for indx, event := range events {
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[indx] = event
}
}
renderTemplate(w, "events", p)
}
func OrgsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
mapResults := make(map[string]Organization)
for indx, org := range u.Orgs {
org.Members = org.GetMembers(c)
mapResults[indx] = org
}
p.Organizations = mapResults
renderTemplate(w, "organizations", p)
}
func MembersHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Members = make(map[string]Member)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
members := org.GetMembers(c)
for indx, member := range members {
p.Members[indx] = member
}
}
renderTemplate(w, "members", p)
}
func NewMemberHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-member"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func MemberEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
p.Member2EditKey = r.FormValue("id")
ok, p.Member2Edit = GetMemberByKey(c, p.Member2EditKey)
// Protect web users
if ok && p.Member2Edit.WebUser {
if u.Meta.Email != p.Member2Edit.Email && u.SuperUser == false {
ok = false
}
}
if ok {
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Member2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
sort.Strings(p.Orgs)
renderTemplate(w, "editmember", p)
} else {
p.Error = "Member not found or access denied."
renderTemplate(w, "error", p)
}
}
func MemberSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
member := Member{}
member.Name = r.PostFormValue("name")
member.Email = r.PostFormValue("email")
member.Cell = r.PostFormValue("cell")
member.Carrier = r.PostFormValue("carrier")
member.TextAddr = GenTextAddr(member.Cell, member.Carrier)
member.Orgs = r.PostForm["orgs"]
if r.PostFormValue("emailon") == "on" {
member.EmailOn = true
}
if r.PostFormValue("texton") == "on" {
member.TextOn = true
}
if u.SuperUser && r.PostFormValue("webuser") == "on" {
member.WebUser = true
}
// Must have or don't save
if len(r.PostForm["orgs"]) <= 0 && member.WebUser == false {
p.Error = "Cannot save without an organization."
renderTemplate(w, "error", p)
return
}
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving member")
_, key = member.Save(c)
} else {
c.Infof("updating member")
member.Update(c, key)
}
p.Member2Edit = member
p.Member2EditKey = key
p.SavedMember = true
renderTemplate(w, "save", p)
}
func AdminNotify(c appengine.Context, creator string, subject string, message string) {
var appid = appengine.AppID(c)
msg := &mail.Message{
Sender: "orgreminders@" + appid + ".appspotmail.com",
Subject: subject,
HTMLBody: message,
To: []string{creator},
}
c.Infof("notify (%s): %v", subject, creator)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
}
}
func SendOrgMessage(c appengine.Context, o Organization, e Event, t string) (result bool) {
var appid = appengine.AppID(c)
var senderUserName = strings.Replace(o.Name, " ", "_", -1)
var sender = fmt.Sprintf("%s Reminders <%s@%s.appspotmail.com", o.Name, senderUserName, appid)
members := o.GetMembers(c)
recipients := []string{}
for _, m := range members {
if t == "email" && m.EmailOn {
recipients = append(recipients, m.Email)
} else if t == "text" && m.TextOn {
recipients = append(recipients, m.TextAddr)
}
}
if len(recipients) == 0 {
c.Infof("No recipients, not sending reminder (" + t + ")")
result = true
return
}
// get rid of duplicate recipients
recipients = removeDuplicates(recipients)
msg := &mail.Message{
Sender: sender,
Bcc: recipients,
Subject: e.Title,
Body: e.TextMessage,
HTMLBody: string(e.EmailMessage),
}
c.Infof("notify (%s): %v", e.Title, recipients)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
} else {
result = true
}
return
}
func CronHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
events := GetAllEvents(c, true) // active only
//c.Infof("# events to check for cron: %v", len(events))
for key, event := range events {
//c.Infof("checking event: %s", event.Title)
res := event.Notify(c, false)
if res {
org, _ := GetOrganizationByName(c, event.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[key] = event
}
}
renderTemplate(w, "cron", p)
}
// from: https://groups.google.com/d/msg/golang-nuts/-pqkICuokio/KqJ0091EzVcJ
func removeDuplicates(a []string) []string {
result := []string{}
seen := map[string]string{}
for _, val := range a {
if _, ok := seen[val]; !ok {
result = append(result, val)
seen[val] = val
}
}
return result
}
| LogoutHandler | identifier_name |
orgreminders.go | package orgreminders
import (
"appengine"
"appengine/datastore"
"appengine/mail"
"appengine/user"
"fmt"
"html/template"
"log"
"net/http"
"sort"
"strings"
"time"
)
// App-global variables
var Templates *template.Template
var Duration_Day = 24 * time.Hour
var Duration_Week = 7 * Duration_Day
var TemplateFiles = []string{
"tmpl/header.html",
"tmpl/css.html",
"tmpl/home.html",
"tmpl/save.html",
"tmpl/new-event.html",
"tmpl/new-org.html",
"tmpl/editorg.html",
"tmpl/editevent.html",
"tmpl/events.html",
"tmpl/organizations.html",
"tmpl/error.html",
"tmpl/cron.html",
"tmpl/new-member.html",
"tmpl/members.html",
"tmpl/editmember.html",
}
type Page struct {
Error string
Events map[string]Event
Keys []*datastore.Key
Event2Edit Event
Organizations map[string]Organization
Org2Edit Organization
Org2EditKey string
Location time.Location | SuperUser bool
LoggedIn bool
UserEmail string
Orgs []string
Members map[string]Member
SavedEvent bool
SavedOrg bool
SavedMember bool
Member2Edit Member
Member2EditKey string
ScheduleHTML map[string][]string
}
func NewPage(u *User) (*Page, error) {
var result = Page{}
if u.Meta != nil {
result.LoggedIn = true
result.AllowNewOrg = true
result.UserEmail = u.Meta.Email
if u.SuperUser {
result.SuperUser = true
}
}
return &result, nil
}
func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {
err := Templates.ExecuteTemplate(w, tmpl+".html", p)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func init() {
templTest, err := template.ParseFiles(TemplateFiles...)
if err != nil {
log.Println("Some (or all) of the required templates are missing, exiting: ", err.Error())
return
}
Templates = templTest
http.HandleFunc("/", DefaultHandler)
http.HandleFunc("/newevent", NewEventHandler)
http.HandleFunc("/neworg", NewOrgHandler)
http.HandleFunc("/events", EventsHandler)
http.HandleFunc("/organizations", OrgsHandler)
http.HandleFunc("/saveevent", EventSaveHandler)
http.HandleFunc("/saveorg", OrgSaveHandler)
http.HandleFunc("/editorg", OrgEditHandler)
http.HandleFunc("/editevent", EventEditHandler)
http.HandleFunc("/cron", CronHandler)
http.HandleFunc("/logout", LogoutHandler)
http.HandleFunc("/newmember", NewMemberHandler)
http.HandleFunc("/savemember", MemberSaveHandler)
http.HandleFunc("/members", MembersHandler)
http.HandleFunc("/editmember", MemberEditHandler)
}
func LogoutHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
url, _ := user.LogoutURL(c, "/")
http.Redirect(w, r, url, http.StatusFound)
}
func DefaultHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "home"
renderTemplate(w, title, p)
}
func NewOrgHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-org"
renderTemplate(w, title, p)
}
func NewEventHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-event"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func EventSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
event := NewEvent()
event.Title = r.PostFormValue("title")
event.EmailMessage = template.HTML(r.PostFormValue("emailmessage"))
event.TextMessage = r.PostFormValue("textmessage")
event.Submitter = *u.Meta
event.Orgs = r.PostForm["orgs"]
if len(event.Orgs) < 1 {
p.Error = "You must choose an organization."
renderTemplate(w, "error", p)
return
}
if r.PostFormValue("sendemail") == "on" {
event.Email = true
}
if r.PostFormValue("sendtext") == "on" {
event.Text = true
}
// save reminder schedule
var remqtys = r.PostForm["remqty[]"]
var remtyps = r.PostForm["remtyp[]"]
for remkey, remval := range remqtys {
var entry = fmt.Sprintf("%s%s", remval, remtyps[remkey])
event.Reminders.Add(entry)
}
o, err := GetOrganizationByName(c, event.Orgs[0])
if err != nil {
c.Infof("Error: %s", err.Error())
p.Error = err.Error()
renderTemplate(w, "error", p)
return
}
location, _ := time.LoadLocation(o.TimeZone)
const longForm = "01/02/2006 3:04pm"
t, timeerr := time.ParseInLocation(longForm, r.PostFormValue("due"), location)
if timeerr != nil {
http.Error(w, "Invalid time string", http.StatusInternalServerError)
return
}
event.Due = t
event.Key = r.PostFormValue("key")
var subject = "Event Saved: "
if event.Key == "" {
_, event.Key = event.Save(c)
} else {
event.Update(c)
subject = "Event Updated: "
}
if r.PostFormValue("oncreate") == "on" {
event.Notify(c, true)
}
event.DueFormatted = event.Due.In(location).Format("01/02/2006 3:04pm")
AdminNotify(c, u.Meta.Email, subject+event.Title, "The following event was just saved: <br><br>"+event.GetHTMLView(c))
p.Event2Edit = event
p.SavedEvent = true
renderTemplate(w, "save", p)
}
func EventEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
ok, p.Event2Edit = GetEventByKey(c, r.FormValue("id"))
if ok {
org, _ := GetOrganizationByName(c, p.Event2Edit.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
p.Event2Edit.DueFormatted = p.Event2Edit.Due.In(location).Format("01/02/2006 3:04pm")
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Event2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
// Extract usable event reminder list
p.ScheduleHTML = p.Event2Edit.Reminders.HTML()
sort.Strings(p.Orgs)
renderTemplate(w, "editevent", p)
} else {
p.Error = "Event not found."
renderTemplate(w, "error", p)
}
}
func OrgSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
org := NewOrganization()
org.Name = r.PostFormValue("name")
org.Description = r.PostFormValue("description")
org.Active = true
org.Expires = time.Now().UTC().Add(Duration_Week)
org.Administrator = strings.Split(r.PostFormValue("admin"), "\r\n")
org.TimeZone = r.PostFormValue("timezone")
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving org")
org.Save(c)
} else {
c.Infof("updating org")
org.Update(c, key)
}
p.SavedOrg = true
p.Org2Edit = org
p.Org2EditKey = key
renderTemplate(w, "save", p)
}
func OrgEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
p.Org2EditKey = r.FormValue("id")
p.Org2Edit = GetOrganizationByKey(c, p.Org2EditKey)
renderTemplate(w, "editorg", p)
}
func EventsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
events := org.GetEvents(c, true)
location, _ := time.LoadLocation(org.TimeZone)
for indx, event := range events {
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[indx] = event
}
}
renderTemplate(w, "events", p)
}
func OrgsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
mapResults := make(map[string]Organization)
for indx, org := range u.Orgs {
org.Members = org.GetMembers(c)
mapResults[indx] = org
}
p.Organizations = mapResults
renderTemplate(w, "organizations", p)
}
func MembersHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Members = make(map[string]Member)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
members := org.GetMembers(c)
for indx, member := range members {
p.Members[indx] = member
}
}
renderTemplate(w, "members", p)
}
func NewMemberHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-member"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func MemberEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
p.Member2EditKey = r.FormValue("id")
ok, p.Member2Edit = GetMemberByKey(c, p.Member2EditKey)
// Protect web users
if ok && p.Member2Edit.WebUser {
if u.Meta.Email != p.Member2Edit.Email && u.SuperUser == false {
ok = false
}
}
if ok {
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Member2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
sort.Strings(p.Orgs)
renderTemplate(w, "editmember", p)
} else {
p.Error = "Member not found or access denied."
renderTemplate(w, "error", p)
}
}
func MemberSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
member := Member{}
member.Name = r.PostFormValue("name")
member.Email = r.PostFormValue("email")
member.Cell = r.PostFormValue("cell")
member.Carrier = r.PostFormValue("carrier")
member.TextAddr = GenTextAddr(member.Cell, member.Carrier)
member.Orgs = r.PostForm["orgs"]
if r.PostFormValue("emailon") == "on" {
member.EmailOn = true
}
if r.PostFormValue("texton") == "on" {
member.TextOn = true
}
if u.SuperUser && r.PostFormValue("webuser") == "on" {
member.WebUser = true
}
// Must have or don't save
if len(r.PostForm["orgs"]) <= 0 && member.WebUser == false {
p.Error = "Cannot save without an organization."
renderTemplate(w, "error", p)
return
}
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving member")
_, key = member.Save(c)
} else {
c.Infof("updating member")
member.Update(c, key)
}
p.Member2Edit = member
p.Member2EditKey = key
p.SavedMember = true
renderTemplate(w, "save", p)
}
func AdminNotify(c appengine.Context, creator string, subject string, message string) {
var appid = appengine.AppID(c)
msg := &mail.Message{
Sender: "orgreminders@" + appid + ".appspotmail.com",
Subject: subject,
HTMLBody: message,
To: []string{creator},
}
c.Infof("notify (%s): %v", subject, creator)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
}
}
func SendOrgMessage(c appengine.Context, o Organization, e Event, t string) (result bool) {
var appid = appengine.AppID(c)
var senderUserName = strings.Replace(o.Name, " ", "_", -1)
var sender = fmt.Sprintf("%s Reminders <%s@%s.appspotmail.com", o.Name, senderUserName, appid)
members := o.GetMembers(c)
recipients := []string{}
for _, m := range members {
if t == "email" && m.EmailOn {
recipients = append(recipients, m.Email)
} else if t == "text" && m.TextOn {
recipients = append(recipients, m.TextAddr)
}
}
if len(recipients) == 0 {
c.Infof("No recipients, not sending reminder (" + t + ")")
result = true
return
}
// get rid of duplicate recipients
recipients = removeDuplicates(recipients)
msg := &mail.Message{
Sender: sender,
Bcc: recipients,
Subject: e.Title,
Body: e.TextMessage,
HTMLBody: string(e.EmailMessage),
}
c.Infof("notify (%s): %v", e.Title, recipients)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
} else {
result = true
}
return
}
func CronHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
events := GetAllEvents(c, true) // active only
//c.Infof("# events to check for cron: %v", len(events))
for key, event := range events {
//c.Infof("checking event: %s", event.Title)
res := event.Notify(c, false)
if res {
org, _ := GetOrganizationByName(c, event.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[key] = event
}
}
renderTemplate(w, "cron", p)
}
// from: https://groups.google.com/d/msg/golang-nuts/-pqkICuokio/KqJ0091EzVcJ
func removeDuplicates(a []string) []string {
result := []string{}
seen := map[string]string{}
for _, val := range a {
if _, ok := seen[val]; !ok {
result = append(result, val)
seen[val] = val
}
}
return result
} | AllowNewOrg bool | random_line_split |
orgreminders.go | package orgreminders
import (
"appengine"
"appengine/datastore"
"appengine/mail"
"appengine/user"
"fmt"
"html/template"
"log"
"net/http"
"sort"
"strings"
"time"
)
// App-global variables
var Templates *template.Template
var Duration_Day = 24 * time.Hour
var Duration_Week = 7 * Duration_Day
var TemplateFiles = []string{
"tmpl/header.html",
"tmpl/css.html",
"tmpl/home.html",
"tmpl/save.html",
"tmpl/new-event.html",
"tmpl/new-org.html",
"tmpl/editorg.html",
"tmpl/editevent.html",
"tmpl/events.html",
"tmpl/organizations.html",
"tmpl/error.html",
"tmpl/cron.html",
"tmpl/new-member.html",
"tmpl/members.html",
"tmpl/editmember.html",
}
type Page struct {
Error string
Events map[string]Event
Keys []*datastore.Key
Event2Edit Event
Organizations map[string]Organization
Org2Edit Organization
Org2EditKey string
Location time.Location
AllowNewOrg bool
SuperUser bool
LoggedIn bool
UserEmail string
Orgs []string
Members map[string]Member
SavedEvent bool
SavedOrg bool
SavedMember bool
Member2Edit Member
Member2EditKey string
ScheduleHTML map[string][]string
}
func NewPage(u *User) (*Page, error) {
var result = Page{}
if u.Meta != nil {
result.LoggedIn = true
result.AllowNewOrg = true
result.UserEmail = u.Meta.Email
if u.SuperUser {
result.SuperUser = true
}
}
return &result, nil
}
func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {
err := Templates.ExecuteTemplate(w, tmpl+".html", p)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func init() {
templTest, err := template.ParseFiles(TemplateFiles...)
if err != nil {
log.Println("Some (or all) of the required templates are missing, exiting: ", err.Error())
return
}
Templates = templTest
http.HandleFunc("/", DefaultHandler)
http.HandleFunc("/newevent", NewEventHandler)
http.HandleFunc("/neworg", NewOrgHandler)
http.HandleFunc("/events", EventsHandler)
http.HandleFunc("/organizations", OrgsHandler)
http.HandleFunc("/saveevent", EventSaveHandler)
http.HandleFunc("/saveorg", OrgSaveHandler)
http.HandleFunc("/editorg", OrgEditHandler)
http.HandleFunc("/editevent", EventEditHandler)
http.HandleFunc("/cron", CronHandler)
http.HandleFunc("/logout", LogoutHandler)
http.HandleFunc("/newmember", NewMemberHandler)
http.HandleFunc("/savemember", MemberSaveHandler)
http.HandleFunc("/members", MembersHandler)
http.HandleFunc("/editmember", MemberEditHandler)
}
func LogoutHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
url, _ := user.LogoutURL(c, "/")
http.Redirect(w, r, url, http.StatusFound)
}
func DefaultHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "home"
renderTemplate(w, title, p)
}
func NewOrgHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-org"
renderTemplate(w, title, p)
}
func NewEventHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-event"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func EventSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
event := NewEvent()
event.Title = r.PostFormValue("title")
event.EmailMessage = template.HTML(r.PostFormValue("emailmessage"))
event.TextMessage = r.PostFormValue("textmessage")
event.Submitter = *u.Meta
event.Orgs = r.PostForm["orgs"]
if len(event.Orgs) < 1 {
p.Error = "You must choose an organization."
renderTemplate(w, "error", p)
return
}
if r.PostFormValue("sendemail") == "on" {
event.Email = true
}
if r.PostFormValue("sendtext") == "on" {
event.Text = true
}
// save reminder schedule
var remqtys = r.PostForm["remqty[]"]
var remtyps = r.PostForm["remtyp[]"]
for remkey, remval := range remqtys {
var entry = fmt.Sprintf("%s%s", remval, remtyps[remkey])
event.Reminders.Add(entry)
}
o, err := GetOrganizationByName(c, event.Orgs[0])
if err != nil {
c.Infof("Error: %s", err.Error())
p.Error = err.Error()
renderTemplate(w, "error", p)
return
}
location, _ := time.LoadLocation(o.TimeZone)
const longForm = "01/02/2006 3:04pm"
t, timeerr := time.ParseInLocation(longForm, r.PostFormValue("due"), location)
if timeerr != nil {
http.Error(w, "Invalid time string", http.StatusInternalServerError)
return
}
event.Due = t
event.Key = r.PostFormValue("key")
var subject = "Event Saved: "
if event.Key == "" {
_, event.Key = event.Save(c)
} else {
event.Update(c)
subject = "Event Updated: "
}
if r.PostFormValue("oncreate") == "on" {
event.Notify(c, true)
}
event.DueFormatted = event.Due.In(location).Format("01/02/2006 3:04pm")
AdminNotify(c, u.Meta.Email, subject+event.Title, "The following event was just saved: <br><br>"+event.GetHTMLView(c))
p.Event2Edit = event
p.SavedEvent = true
renderTemplate(w, "save", p)
}
func EventEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
ok, p.Event2Edit = GetEventByKey(c, r.FormValue("id"))
if ok {
org, _ := GetOrganizationByName(c, p.Event2Edit.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
p.Event2Edit.DueFormatted = p.Event2Edit.Due.In(location).Format("01/02/2006 3:04pm")
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Event2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
// Extract usable event reminder list
p.ScheduleHTML = p.Event2Edit.Reminders.HTML()
sort.Strings(p.Orgs)
renderTemplate(w, "editevent", p)
} else {
p.Error = "Event not found."
renderTemplate(w, "error", p)
}
}
func OrgSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
org := NewOrganization()
org.Name = r.PostFormValue("name")
org.Description = r.PostFormValue("description")
org.Active = true
org.Expires = time.Now().UTC().Add(Duration_Week)
org.Administrator = strings.Split(r.PostFormValue("admin"), "\r\n")
org.TimeZone = r.PostFormValue("timezone")
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving org")
org.Save(c)
} else {
c.Infof("updating org")
org.Update(c, key)
}
p.SavedOrg = true
p.Org2Edit = org
p.Org2EditKey = key
renderTemplate(w, "save", p)
}
func OrgEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
p.Org2EditKey = r.FormValue("id")
p.Org2Edit = GetOrganizationByKey(c, p.Org2EditKey)
renderTemplate(w, "editorg", p)
}
func EventsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
events := org.GetEvents(c, true)
location, _ := time.LoadLocation(org.TimeZone)
for indx, event := range events {
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[indx] = event
}
}
renderTemplate(w, "events", p)
}
func OrgsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
mapResults := make(map[string]Organization)
for indx, org := range u.Orgs {
org.Members = org.GetMembers(c)
mapResults[indx] = org
}
p.Organizations = mapResults
renderTemplate(w, "organizations", p)
}
func MembersHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Members = make(map[string]Member)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
members := org.GetMembers(c)
for indx, member := range members {
p.Members[indx] = member
}
}
renderTemplate(w, "members", p)
}
func NewMemberHandler(w http.ResponseWriter, r *http.Request) |
func MemberEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
p.Member2EditKey = r.FormValue("id")
ok, p.Member2Edit = GetMemberByKey(c, p.Member2EditKey)
// Protect web users
if ok && p.Member2Edit.WebUser {
if u.Meta.Email != p.Member2Edit.Email && u.SuperUser == false {
ok = false
}
}
if ok {
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Member2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
sort.Strings(p.Orgs)
renderTemplate(w, "editmember", p)
} else {
p.Error = "Member not found or access denied."
renderTemplate(w, "error", p)
}
}
func MemberSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
member := Member{}
member.Name = r.PostFormValue("name")
member.Email = r.PostFormValue("email")
member.Cell = r.PostFormValue("cell")
member.Carrier = r.PostFormValue("carrier")
member.TextAddr = GenTextAddr(member.Cell, member.Carrier)
member.Orgs = r.PostForm["orgs"]
if r.PostFormValue("emailon") == "on" {
member.EmailOn = true
}
if r.PostFormValue("texton") == "on" {
member.TextOn = true
}
if u.SuperUser && r.PostFormValue("webuser") == "on" {
member.WebUser = true
}
// Must have or don't save
if len(r.PostForm["orgs"]) <= 0 && member.WebUser == false {
p.Error = "Cannot save without an organization."
renderTemplate(w, "error", p)
return
}
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving member")
_, key = member.Save(c)
} else {
c.Infof("updating member")
member.Update(c, key)
}
p.Member2Edit = member
p.Member2EditKey = key
p.SavedMember = true
renderTemplate(w, "save", p)
}
func AdminNotify(c appengine.Context, creator string, subject string, message string) {
var appid = appengine.AppID(c)
msg := &mail.Message{
Sender: "orgreminders@" + appid + ".appspotmail.com",
Subject: subject,
HTMLBody: message,
To: []string{creator},
}
c.Infof("notify (%s): %v", subject, creator)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
}
}
func SendOrgMessage(c appengine.Context, o Organization, e Event, t string) (result bool) {
var appid = appengine.AppID(c)
var senderUserName = strings.Replace(o.Name, " ", "_", -1)
var sender = fmt.Sprintf("%s Reminders <%s@%s.appspotmail.com", o.Name, senderUserName, appid)
members := o.GetMembers(c)
recipients := []string{}
for _, m := range members {
if t == "email" && m.EmailOn {
recipients = append(recipients, m.Email)
} else if t == "text" && m.TextOn {
recipients = append(recipients, m.TextAddr)
}
}
if len(recipients) == 0 {
c.Infof("No recipients, not sending reminder (" + t + ")")
result = true
return
}
// get rid of duplicate recipients
recipients = removeDuplicates(recipients)
msg := &mail.Message{
Sender: sender,
Bcc: recipients,
Subject: e.Title,
Body: e.TextMessage,
HTMLBody: string(e.EmailMessage),
}
c.Infof("notify (%s): %v", e.Title, recipients)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
} else {
result = true
}
return
}
func CronHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
events := GetAllEvents(c, true) // active only
//c.Infof("# events to check for cron: %v", len(events))
for key, event := range events {
//c.Infof("checking event: %s", event.Title)
res := event.Notify(c, false)
if res {
org, _ := GetOrganizationByName(c, event.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[key] = event
}
}
renderTemplate(w, "cron", p)
}
// from: https://groups.google.com/d/msg/golang-nuts/-pqkICuokio/KqJ0091EzVcJ
func removeDuplicates(a []string) []string {
result := []string{}
seen := map[string]string{}
for _, val := range a {
if _, ok := seen[val]; !ok {
result = append(result, val)
seen[val] = val
}
}
return result
}
| {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-member"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
} | identifier_body |
orgreminders.go | package orgreminders
import (
"appengine"
"appengine/datastore"
"appengine/mail"
"appengine/user"
"fmt"
"html/template"
"log"
"net/http"
"sort"
"strings"
"time"
)
// App-global variables
var Templates *template.Template
var Duration_Day = 24 * time.Hour
var Duration_Week = 7 * Duration_Day
var TemplateFiles = []string{
"tmpl/header.html",
"tmpl/css.html",
"tmpl/home.html",
"tmpl/save.html",
"tmpl/new-event.html",
"tmpl/new-org.html",
"tmpl/editorg.html",
"tmpl/editevent.html",
"tmpl/events.html",
"tmpl/organizations.html",
"tmpl/error.html",
"tmpl/cron.html",
"tmpl/new-member.html",
"tmpl/members.html",
"tmpl/editmember.html",
}
type Page struct {
Error string
Events map[string]Event
Keys []*datastore.Key
Event2Edit Event
Organizations map[string]Organization
Org2Edit Organization
Org2EditKey string
Location time.Location
AllowNewOrg bool
SuperUser bool
LoggedIn bool
UserEmail string
Orgs []string
Members map[string]Member
SavedEvent bool
SavedOrg bool
SavedMember bool
Member2Edit Member
Member2EditKey string
ScheduleHTML map[string][]string
}
func NewPage(u *User) (*Page, error) {
var result = Page{}
if u.Meta != nil {
result.LoggedIn = true
result.AllowNewOrg = true
result.UserEmail = u.Meta.Email
if u.SuperUser {
result.SuperUser = true
}
}
return &result, nil
}
func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {
err := Templates.ExecuteTemplate(w, tmpl+".html", p)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func init() {
templTest, err := template.ParseFiles(TemplateFiles...)
if err != nil {
log.Println("Some (or all) of the required templates are missing, exiting: ", err.Error())
return
}
Templates = templTest
http.HandleFunc("/", DefaultHandler)
http.HandleFunc("/newevent", NewEventHandler)
http.HandleFunc("/neworg", NewOrgHandler)
http.HandleFunc("/events", EventsHandler)
http.HandleFunc("/organizations", OrgsHandler)
http.HandleFunc("/saveevent", EventSaveHandler)
http.HandleFunc("/saveorg", OrgSaveHandler)
http.HandleFunc("/editorg", OrgEditHandler)
http.HandleFunc("/editevent", EventEditHandler)
http.HandleFunc("/cron", CronHandler)
http.HandleFunc("/logout", LogoutHandler)
http.HandleFunc("/newmember", NewMemberHandler)
http.HandleFunc("/savemember", MemberSaveHandler)
http.HandleFunc("/members", MembersHandler)
http.HandleFunc("/editmember", MemberEditHandler)
}
func LogoutHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
url, _ := user.LogoutURL(c, "/")
http.Redirect(w, r, url, http.StatusFound)
}
func DefaultHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "home"
renderTemplate(w, title, p)
}
func NewOrgHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-org"
renderTemplate(w, title, p)
}
func NewEventHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-event"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func EventSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
event := NewEvent()
event.Title = r.PostFormValue("title")
event.EmailMessage = template.HTML(r.PostFormValue("emailmessage"))
event.TextMessage = r.PostFormValue("textmessage")
event.Submitter = *u.Meta
event.Orgs = r.PostForm["orgs"]
if len(event.Orgs) < 1 {
p.Error = "You must choose an organization."
renderTemplate(w, "error", p)
return
}
if r.PostFormValue("sendemail") == "on" {
event.Email = true
}
if r.PostFormValue("sendtext") == "on" {
event.Text = true
}
// save reminder schedule
var remqtys = r.PostForm["remqty[]"]
var remtyps = r.PostForm["remtyp[]"]
for remkey, remval := range remqtys {
var entry = fmt.Sprintf("%s%s", remval, remtyps[remkey])
event.Reminders.Add(entry)
}
o, err := GetOrganizationByName(c, event.Orgs[0])
if err != nil {
c.Infof("Error: %s", err.Error())
p.Error = err.Error()
renderTemplate(w, "error", p)
return
}
location, _ := time.LoadLocation(o.TimeZone)
const longForm = "01/02/2006 3:04pm"
t, timeerr := time.ParseInLocation(longForm, r.PostFormValue("due"), location)
if timeerr != nil {
http.Error(w, "Invalid time string", http.StatusInternalServerError)
return
}
event.Due = t
event.Key = r.PostFormValue("key")
var subject = "Event Saved: "
if event.Key == "" {
_, event.Key = event.Save(c)
} else {
event.Update(c)
subject = "Event Updated: "
}
if r.PostFormValue("oncreate") == "on" {
event.Notify(c, true)
}
event.DueFormatted = event.Due.In(location).Format("01/02/2006 3:04pm")
AdminNotify(c, u.Meta.Email, subject+event.Title, "The following event was just saved: <br><br>"+event.GetHTMLView(c))
p.Event2Edit = event
p.SavedEvent = true
renderTemplate(w, "save", p)
}
func EventEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
ok, p.Event2Edit = GetEventByKey(c, r.FormValue("id"))
if ok {
org, _ := GetOrganizationByName(c, p.Event2Edit.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
p.Event2Edit.DueFormatted = p.Event2Edit.Due.In(location).Format("01/02/2006 3:04pm")
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Event2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
// Extract usable event reminder list
p.ScheduleHTML = p.Event2Edit.Reminders.HTML()
sort.Strings(p.Orgs)
renderTemplate(w, "editevent", p)
} else {
p.Error = "Event not found."
renderTemplate(w, "error", p)
}
}
func OrgSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
org := NewOrganization()
org.Name = r.PostFormValue("name")
org.Description = r.PostFormValue("description")
org.Active = true
org.Expires = time.Now().UTC().Add(Duration_Week)
org.Administrator = strings.Split(r.PostFormValue("admin"), "\r\n")
org.TimeZone = r.PostFormValue("timezone")
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving org")
org.Save(c)
} else {
c.Infof("updating org")
org.Update(c, key)
}
p.SavedOrg = true
p.Org2Edit = org
p.Org2EditKey = key
renderTemplate(w, "save", p)
}
func OrgEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
p.Org2EditKey = r.FormValue("id")
p.Org2Edit = GetOrganizationByKey(c, p.Org2EditKey)
renderTemplate(w, "editorg", p)
}
func EventsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
events := org.GetEvents(c, true)
location, _ := time.LoadLocation(org.TimeZone)
for indx, event := range events {
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[indx] = event
}
}
renderTemplate(w, "events", p)
}
func OrgsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
mapResults := make(map[string]Organization)
for indx, org := range u.Orgs {
org.Members = org.GetMembers(c)
mapResults[indx] = org
}
p.Organizations = mapResults
renderTemplate(w, "organizations", p)
}
func MembersHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Members = make(map[string]Member)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
members := org.GetMembers(c)
for indx, member := range members {
p.Members[indx] = member
}
}
renderTemplate(w, "members", p)
}
func NewMemberHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-member"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func MemberEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
p.Member2EditKey = r.FormValue("id")
ok, p.Member2Edit = GetMemberByKey(c, p.Member2EditKey)
// Protect web users
if ok && p.Member2Edit.WebUser {
if u.Meta.Email != p.Member2Edit.Email && u.SuperUser == false {
ok = false
}
}
if ok {
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Member2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
sort.Strings(p.Orgs)
renderTemplate(w, "editmember", p)
} else {
p.Error = "Member not found or access denied."
renderTemplate(w, "error", p)
}
}
func MemberSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
member := Member{}
member.Name = r.PostFormValue("name")
member.Email = r.PostFormValue("email")
member.Cell = r.PostFormValue("cell")
member.Carrier = r.PostFormValue("carrier")
member.TextAddr = GenTextAddr(member.Cell, member.Carrier)
member.Orgs = r.PostForm["orgs"]
if r.PostFormValue("emailon") == "on" {
member.EmailOn = true
}
if r.PostFormValue("texton") == "on" {
member.TextOn = true
}
if u.SuperUser && r.PostFormValue("webuser") == "on" {
member.WebUser = true
}
// Must have or don't save
if len(r.PostForm["orgs"]) <= 0 && member.WebUser == false {
p.Error = "Cannot save without an organization."
renderTemplate(w, "error", p)
return
}
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving member")
_, key = member.Save(c)
} else {
c.Infof("updating member")
member.Update(c, key)
}
p.Member2Edit = member
p.Member2EditKey = key
p.SavedMember = true
renderTemplate(w, "save", p)
}
func AdminNotify(c appengine.Context, creator string, subject string, message string) {
var appid = appengine.AppID(c)
msg := &mail.Message{
Sender: "orgreminders@" + appid + ".appspotmail.com",
Subject: subject,
HTMLBody: message,
To: []string{creator},
}
c.Infof("notify (%s): %v", subject, creator)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
}
}
func SendOrgMessage(c appengine.Context, o Organization, e Event, t string) (result bool) {
var appid = appengine.AppID(c)
var senderUserName = strings.Replace(o.Name, " ", "_", -1)
var sender = fmt.Sprintf("%s Reminders <%s@%s.appspotmail.com", o.Name, senderUserName, appid)
members := o.GetMembers(c)
recipients := []string{}
for _, m := range members {
if t == "email" && m.EmailOn {
recipients = append(recipients, m.Email)
} else if t == "text" && m.TextOn {
recipients = append(recipients, m.TextAddr)
}
}
if len(recipients) == 0 {
c.Infof("No recipients, not sending reminder (" + t + ")")
result = true
return
}
// get rid of duplicate recipients
recipients = removeDuplicates(recipients)
msg := &mail.Message{
Sender: sender,
Bcc: recipients,
Subject: e.Title,
Body: e.TextMessage,
HTMLBody: string(e.EmailMessage),
}
c.Infof("notify (%s): %v", e.Title, recipients)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
} else {
result = true
}
return
}
func CronHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
events := GetAllEvents(c, true) // active only
//c.Infof("# events to check for cron: %v", len(events))
for key, event := range events {
//c.Infof("checking event: %s", event.Title)
res := event.Notify(c, false)
if res {
org, _ := GetOrganizationByName(c, event.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[key] = event
}
}
renderTemplate(w, "cron", p)
}
// from: https://groups.google.com/d/msg/golang-nuts/-pqkICuokio/KqJ0091EzVcJ
func removeDuplicates(a []string) []string {
result := []string{}
seen := map[string]string{}
for _, val := range a |
return result
}
| {
if _, ok := seen[val]; !ok {
result = append(result, val)
seen[val] = val
}
} | conditional_block |
index.ts | import AWS from 'aws-sdk';
//TODO Dev ClaimState will change, need to update after production app push
export type PhotoTuple = [PhotoBuffObj, PhotoBuffObj | false, PhotoBuffObj | false, PhotoBuffObj | false];
export type RiskEntryObj = {
uri: string;
timeStamp: number;
formattedDate: string;
description: string;
s3Key: string;
}
export type PhotoObj = {
key: string;
label: string;
location: 'exterior' | 'interior' | 'misc';
diagram: string;
has: boolean;
required: boolean;
entries: RiskEntryObj[];
}
export type PhotoState = {
[key: string]: PhotoObj | }
export type PhotoBuffObj = {
label: string;
description: string;
formattedDate: string;
base64Str: any;
}
export type SignatureObj = {
uri: string;
timeStamp: number;
formattedDate: string;
s3Key: string;
}
export type Signatures = {
nonWaiverInsuredSignature: SignatureObj;
nonWaiverAdjusterSignature: SignatureObj;
advPaymentSignatureInsured: SignatureObj;
advPaymentSignatureAdjuster: SignatureObj;
handbookInsuredSignature: SignatureObj;
handbookAdjusterSignature: SignatureObj;
authorizedRepSignature: SignatureObj;
initialsConfirmAddress: SignatureObj;
initialsConfirmMortgagee: SignatureObj;
}
export type PriorLossObj = {
id: string;
timestamp: number;
formattedDate: string;
insuredAtLoss: boolean;
repairs: boolean;
lossAmount: string;
}
export type CertificationObj = {
state: string;
licenseNumber: string;
expDate: string
}
export type PDFObject = {
name: string;
uri: string;
s3Key: string;
docStatus: string;
}
export type UnderwritingState = {
"insuredFirstName": string;
"insuredLastName": string;
"mortgageeName": string;
"adjusterFullName": string;
"claimType": string;
"lossType": string;
"lossDate": number;
"occupancy": string;
"typeOfBuilding": string;
"hasBasement": boolean;
"buildingElevated": boolean;
"residencyType": string;
"floorCount": string;
"floorsOccupiedByInsured": string;
"lossStreet1": string;
"lossStreet2": string;
"lossCity": string;
"lossStateName": string;
"lossZip": string;
}
export type PDFState = {
underwritingReport: PDFObject;
floodLossQuestionnaire: PDFObject;
inspectionReport: PDFObject;
preliminaryReport: PDFObject;
prelimDiagrams: PDFObject;
prelimPhotos: PDFObject;
advancePaymentRequest: PDFObject;
handbookSignature: PDFObject;
surveyorRequest: PDFObject;
cpaRequest: PDFObject;
prelimDamageAssessment: PDFObject;
nonWaiverAgreement: PDFObject;
engineerRequest: PDFObject;
salvorRequest: PDFObject;
}
export type ClaimState = {
version: number;
sha1: string;
versionsList: number[];
claimid: string;
claimNumber: string;
policyNumber: string;
firmDate: number;
postFirm: number;
policyStartDate: number;
policyEndDate: number;
subcatNumber: string;
RNFSPathPrefix: string;
floodControlNumber: string;
adjusterId: string;
adjusterFullName: string;
adjusterPhoneMobile: string;
adjusterPhoneWork: string;
insuredFirstName: string;
insuredLastName: string;
insuredEmail: string;
insuredWorkPhone: string;
insuredPreferredPhone: string;
company: string;
lossType: string;
lossDate: number;
claimType: string;
mortgageVerified: boolean;
ercv: number;
acv: number;
carrier: string;
claimStatus: string;
constructionDate: number;
inspectionDate: number;
coverageA: number;
coverageB: number;
deductibleA: string;
deductibleB: string;
nonWaiver: boolean;
nonWaiverDescription: string;
nonWaiverDay: string;
nonWaiverMonth: string;
nonWaiverYear: string;
occupancy: string;
residencyType: string;
insuredNameCorrect: boolean;
updatedNameReason: string;
insuredPresent: boolean;
insuredIsRepresented: boolean;
insuredRepresentativeName: string;
insuredRepresentativeAddress: string;
insuredRepresentativePhone: string;
hasDetachedGarage: boolean;
hasAppurtenantStructure: boolean;
typeOfBuilding: string;
mobileHomeMake: string;
mobileHomeModel: string;
mobileHomeSerial: string;
foundationStructure: string;
foundationPilesType: string;
otherPilesMaterial: string;
foundationPiersType: string[];
otherPierMaterial: string;
foundationWallsType: string[];
otherWallMaterial: string;
exteriorWallStructure: string[];
otherWallStructure: string;
exteriorWallSurfaceTreatment: string[];
otherExteriorSurfaceTreatment: string;
isUnderConstruction: string;
foundationAreaEnclosure: string;
hasBasement: boolean;
basementType: string;
basementHeightInches: string;
determineElevationGrades: boolean;
buildingElevated: boolean;
priorConditionOfBuilding: string;
exteriorElevationPhotos: string[];
wasThereFlooding: boolean;
floodWaterType: string;
isFloodWaterTypeSewage: boolean;
causeOfLoss: string[];
floodCharacteristics: string[];
floodAssociatedWithFloodControl: boolean;
floodAssociatedDesc: string;
dateWaterEntered: number;
dateWaterReceded: number;
timeFlooded: {
days: number;
hours: number;
minutes: number;
};
otherThanNaturalCauseContribute: boolean;
otherThanNaturalDesc: string;
waterHeightMainBuildingExtInches: string;
waterHeightDetachedGarageExtInches: string;
waterHeightMainBuildingIntInches: string;
waterHeightDetachedGarageIntInches: string;
nearestBodyOfWater: string;
distanceFromBodyOfWaterFeet: string;
floorCount: string;
isSplitLevel: boolean;
floorsOccupiedByInsured: string;
basementFloodproofed: boolean;
priorConditionOfContents: string;
contentsClassification: string;
contentsLocated: string;
hasTitleVerified: boolean;
sourceOfVerification: string;
hasAuthorizedRepresentative: boolean;
authorizedRepName: string;
authorizedRepEmail: string;
insuredHiredMitigationContractor: boolean;
mitigationContactName: string;
mitigationContactAddress: string;
mitigationContactPhone: string;
hasOtherInvolvedParties: boolean;
otherInvolvedPartiesDesc: string;
GPTypeOfBuilding: string;
GPTypeOfBuildingOtherDesc: string;
GPMultipleBuildings: boolean;
GPVerifiedBuilding: boolean;
GPOtherInsurance: string[];
GPExcessPolicyLimits: string;
GPLiabilityPolicyLimits: string;
GPDamageDesc: string;
GPHasLease: boolean;
GPHasDocOfOwnership: boolean;
GPDocsListOwnership: boolean;
GPDocsTiedOwnership: boolean;
GPPersonalProperty: boolean;
GPStockHasBoxesOpen: boolean;
GPStockVerifiedDamage: boolean;
GPStockHasPhysicalDamage: boolean;
GPMerchHasBoxesOpen: boolean;
GPMerchVerifiedDamage: boolean;
GPMerchHasPhysicalDamage: boolean;
GPAuthorizedRep: string;
GPHasDocAuthorizingRep: boolean;
GPAccessToInvoices: string;
GPAccessToRepairReceipts: string;
GPAccessToStockInventoryRecords: string;
GPInspectedWith: string;
GPTenants: string; //maybe string[]
GPAccessRep: string;
GPOwnerOfBuilding: string;
GPBusinessOwner: string;
GPContentsOwner: string;
reservesBuildingReserve: number;
reservesContentsReserve: number;
coverageVerifiedFrom: string;
emergencyOrRegulaFloodProgram: string;
advPaymentRequest: string;
advPaymentRequestNoReason: string;
advPaymentRequestOtherReason: string;
advPaymentRequestBuildingValue: number;
advPaymentRequestContentsValue: number;
selectiveAPRContentsValue: number;
isOwnerOfProperty: boolean;
propertyOwnerName: string;
propertyOwnerAddress: string;
propertyOwnerDBA: string;
hasMajorImprovements: boolean;
improvementsDetails: string;
improvementsValue: string;
isCondoUnit: boolean;
ownerHasCondoDocs: boolean;
acknowledgeRecentAppraisal: boolean;
isCurrentAddress: boolean;
principleStreet1: string;
principleStreet2: string;
principleCity: string;
principleStateName: string;
principleZip: string;
lossStreet1: string;
lossStreet2: string;
lossCity: string;
lossStateName: string;
lossZip: string;
mortgagePaidOff: boolean;
mortgagePaidOffYear: string;
payOffLetter: string;
excessFloodCoverageForBuilding: boolean;
excessFloodCoverageForBuildingCarrier: string;
excessFloodCoverageForContents: boolean;
excessFloodCoverageForContentsCarrier: string;
hasPriorLoss: boolean;
priorLossArr: PriorLossObj[];
usingAGeneralContractor: boolean;
generalContractorName: string;
generalContractorAddress: string;
generalContractorLocation: string;
generalContractorPhone: string;
generalContractorLicense: string;
hasOtherInsurance: boolean;
nameOfOtherInsurance: string;
otherProvidesFloodCoverage: boolean;
acknowledgeOtherInsurance: boolean;
acknowledgeVerifyMortgage: boolean;
purchaseDate: number;
assignedDate: number;
contactDate: number;
isCurrentMortgagee: boolean;
mortgageeName: string
mortgageeAddress: string;
isARentalProperty: boolean;
rentalContentOwnership: boolean;
riskPurchased: number; //date
erosionFound: boolean;
erosionDescription: string;
insuredClaimingBuildingItemsNotCovered: boolean;
insuredClaimingBuildingItemsNotCoveredDesc: string;
insuredClaimingBuildingContentsNotCovered: boolean;
insuredClaimingBuildingContentsNotCoveredDesc: string;
estDepreciationPercent: number;
depActualCashValue: string;
depValue: string;
insuredClaimingStructuralDamage: boolean;
insuredClaimingStructuralDamageDesc: string;
adjusterAgreesStructuralDamage: boolean;
adjusterAgreesStructuralDamageDesc: string;
hasSalvageValueDesc: string;
accountantRequired: boolean;
accountantRequiredDesc: string;
incorrectConstructionDetails: boolean;
incorrectConstructionDetailsDesc: string;
policyDiscrepancies: boolean;
policyDiscrepanciesDesc: string;
adjusterNotedIssues: boolean;
adjusterNotedIssuesDesc: string;
inspectionAssignmentDesc: string;
originOfFloodDesc: string;
inspectionScopeDesc: string;
needsCPA: boolean;
needsSalvor: boolean;
needsSurveyor: boolean;
needsEngineer: boolean;
certifications: CertificationObj[];
signatures: Signatures;
photos: PhotoState;
pdfs: PDFState;
docStatuses: {approved: number; pending: number; rejected: number}
underwriting: UnderwritingState;
underwritingDiffList: Array<keyof UnderwritingState>;
newUnderwritingPDFSent: boolean;
formattedDates: {
firmDate: string;
policyStartDate: string;
policyEndDate: string;
lossDate: string;
constructionDate: string;
inspectionDate: string;
dateWaterEntered: string;
dateWaterReceded: string;
timeWaterEntered: {
time: string;
meridian: string;
};
timeWaterReceded: {
time: string;
meridian: string;
};
purchaseDate: string;
assignedDate: string;
contactDate: string;
riskPurchased: string
}
} | random_line_split | |
jsonast.go | /*
* Copyright (c) Microsoft Corporation.
* Licensed under the MIT license.
*/
package jsonast
import (
"context"
"fmt"
"net/url"
"regexp"
"strings"
"k8s.io/klog/v2"
"github.com/Azure/k8s-infra/hack/generator/pkg/astmodel"
"github.com/devigned/tab"
"github.com/xeipuuv/gojsonschema"
)
type (
// SchemaType defines the type of JSON schema node we are currently processing
SchemaType string
// TypeHandler is a standard delegate used for walking the schema tree.
// Note that it is permissible for a TypeHandler to return `nil, nil`, which indicates that
// there is no type to be included in the output.
TypeHandler func(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error)
// UnknownSchemaError is used when we find a JSON schema node that we don't know how to handle
UnknownSchemaError struct {
Schema *gojsonschema.SubSchema
Filters []string
}
// A BuilderOption is used to provide custom configuration for our scanner
BuilderOption func(scanner *SchemaScanner) error
// A SchemaScanner is used to scan a JSON Schema extracting and collecting type definitions
SchemaScanner struct {
definitions map[astmodel.TypeName]astmodel.TypeDefiner
TypeHandlers map[SchemaType]TypeHandler
Filters []string
idFactory astmodel.IdentifierFactory
}
)
// findTypeDefinition looks to see if we have seen the specified definition before, returning its definition if we have.
func (scanner *SchemaScanner) findTypeDefinition(name *astmodel.TypeName) (astmodel.TypeDefiner, bool) {
result, ok := scanner.definitions[*name]
return result, ok
}
// addTypeDefinition adds a type definition to emit later
func (scanner *SchemaScanner) addTypeDefinition(def astmodel.TypeDefiner) {
scanner.definitions[*def.Name()] = def
}
// addEmptyTypeDefinition adds a placeholder definition; it should always be replaced later
func (scanner *SchemaScanner) addEmptyTypeDefinition(name *astmodel.TypeName) {
scanner.definitions[*name] = nil
}
// removeTypeDefinition removes a type definition
func (scanner *SchemaScanner) removeTypeDefinition(name *astmodel.TypeName) {
delete(scanner.definitions, *name)
}
// Definitions for different kinds of JSON schema
const (
AnyOf SchemaType = "anyOf"
AllOf SchemaType = "allOf"
OneOf SchemaType = "oneOf"
Ref SchemaType = "ref"
Array SchemaType = "array"
Bool SchemaType = "boolean"
Int SchemaType = "integer"
Number SchemaType = "number"
Object SchemaType = "object"
String SchemaType = "string"
Enum SchemaType = "enum"
Unknown SchemaType = "unknown"
expressionFragment = "/definitions/expression"
)
func (use *UnknownSchemaError) Error() string {
if use.Schema == nil || use.Schema.ID == nil {
return fmt.Sprint("unable to determine schema type for nil schema or one without an ID")
}
return fmt.Sprintf("unable to determine the schema type for %s", use.Schema.ID.String())
}
// NewSchemaScanner constructs a new scanner, ready for use
func NewSchemaScanner(idFactory astmodel.IdentifierFactory) *SchemaScanner {
return &SchemaScanner{
definitions: make(map[astmodel.TypeName]astmodel.TypeDefiner),
TypeHandlers: DefaultTypeHandlers(),
idFactory: idFactory,
}
}
// AddTypeHandler will override a default type handler for a given SchemaType. This allows for a consumer to customize
// AST generation.
func (scanner *SchemaScanner) AddTypeHandler(schemaType SchemaType, handler TypeHandler) {
scanner.TypeHandlers[schemaType] = handler
}
// RunHandler triggers the appropriate handler for the specified schemaType
func (scanner *SchemaScanner) RunHandler(ctx context.Context, schemaType SchemaType, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
handler := scanner.TypeHandlers[schemaType]
return handler(ctx, scanner, schema)
}
// RunHandlerForSchema inspects the passed schema to identify what kind it is, then runs the appropriate handler
func (scanner *SchemaScanner) RunHandlerForSchema(ctx context.Context, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
schemaType, err := getSubSchemaType(schema)
if err != nil {
return nil, err
}
return scanner.RunHandler(ctx, schemaType, schema)
}
// AddFilters will add a filter (perhaps not currently used?)
func (scanner *SchemaScanner) AddFilters(filters []string) {
scanner.Filters = append(scanner.Filters, filters...)
}
// GenerateDefinitions takes in the resources section of the Azure deployment template schema and returns golang AST Packages
// containing the types described in the schema which match the {resource_type}/{version} filters provided.
//
// The schema we are working with is something like the following (in yaml for brevity):
//
// resources:
// items:
// oneOf:
// allOf:
// $ref: {{ base resource schema for ARM }}
// oneOf:
// - ARM resources
// oneOf:
// allOf:
// $ref: {{ base resource for external resources, think SendGrid }}
// oneOf:
// - External ARM resources
// oneOf:
// allOf:
// $ref: {{ base resource for ARM specific stuff like locks, deployments, etc }}
// oneOf:
// - ARM specific resources. I'm not 100% sure why...
//
// allOf acts like composition which composites each schema from the child oneOf with the base reference from allOf.
func (scanner *SchemaScanner) | (ctx context.Context, schema *gojsonschema.SubSchema, opts ...BuilderOption) ([]astmodel.TypeDefiner, error) {
ctx, span := tab.StartSpan(ctx, "GenerateDefinitions")
defer span.End()
for _, opt := range opts {
if err := opt(scanner); err != nil {
return nil, err
}
}
// get initial topic from ID and Title:
url := schema.ID.GetUrl()
if schema.Title == nil {
return nil, fmt.Errorf("Given schema has no Title")
}
rootName := *schema.Title
rootGroup, err := groupOf(url)
if err != nil {
return nil, fmt.Errorf("Unable to extract group for schema: %w", err)
}
rootVersion, err := versionOf(url)
if err != nil {
return nil, fmt.Errorf("Unable to extract version for schema: %w", err)
}
rootPackage := astmodel.NewLocalPackageReference(
scanner.idFactory.CreateGroupName(rootGroup),
scanner.idFactory.CreatePackageNameFromVersion(rootVersion))
rootTypeName := astmodel.NewTypeName(*rootPackage, rootName)
_, err = generateDefinitionsFor(ctx, scanner, rootTypeName, false, url, schema)
if err != nil {
return nil, err
}
// produce the results
var defs []astmodel.TypeDefiner
for _, def := range scanner.definitions {
defs = append(defs, def)
}
return defs, nil
}
// DefaultTypeHandlers will create a default map of JSONType to AST transformers
func DefaultTypeHandlers() map[SchemaType]TypeHandler {
return map[SchemaType]TypeHandler{
Array: arrayHandler,
OneOf: oneOfHandler,
AnyOf: anyOfHandler,
AllOf: allOfHandler,
Ref: refHandler,
Object: objectHandler,
Enum: enumHandler,
String: fixedTypeHandler(astmodel.StringType, "string"),
Int: fixedTypeHandler(astmodel.IntType, "int"),
Number: fixedTypeHandler(astmodel.FloatType, "number"),
Bool: fixedTypeHandler(astmodel.BoolType, "bool"),
}
}
func enumHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
_, span := tab.StartSpan(ctx, "enumHandler")
defer span.End()
// Default to a string base type
baseType := astmodel.StringType
for _, t := range []SchemaType{Bool, Int, Number, String} {
if schema.Types.Contains(string(t)) {
bt, err := getPrimitiveType(t)
if err != nil {
return nil, err
}
baseType = bt
}
}
var values []astmodel.EnumValue
for _, v := range schema.Enum {
id := scanner.idFactory.CreateIdentifier(v, astmodel.Exported)
values = append(values, astmodel.EnumValue{Identifier: id, Value: v})
}
enumType := astmodel.NewEnumType(baseType, values)
return enumType, nil
}
func fixedTypeHandler(typeToReturn astmodel.Type, handlerName string) TypeHandler {
return func(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
_, span := tab.StartSpan(ctx, handlerName+"Handler")
defer span.End()
return typeToReturn, nil
}
}
func objectHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "objectHandler")
defer span.End()
fields, err := getFields(ctx, scanner, schema)
if err != nil {
return nil, err
}
// if we _only_ have an 'additionalProperties' field, then we are making
// a dictionary-like type, and we won't generate a struct; instead, we
// will just use the 'additionalProperties' type directly
if len(fields) == 1 && fields[0].FieldName() == "additionalProperties" {
return fields[0].FieldType(), nil
}
structDefinition := astmodel.NewStructType(fields...)
return structDefinition, nil
}
func generateFieldDefinition(ctx context.Context, scanner *SchemaScanner, prop *gojsonschema.SubSchema) (*astmodel.FieldDefinition, error) {
fieldName := scanner.idFactory.CreateFieldName(prop.Property, astmodel.Exported)
schemaType, err := getSubSchemaType(prop)
if _, ok := err.(*UnknownSchemaError); ok {
// if we don't know the type, we still need to provide the property, we will just provide open interface
field := astmodel.NewFieldDefinition(fieldName, prop.Property, astmodel.AnyType)
return field, nil
}
if err != nil {
return nil, err
}
propType, err := scanner.RunHandler(ctx, schemaType, prop)
if _, ok := err.(*UnknownSchemaError); ok {
// if we don't know the type, we still need to provide the property, we will just provide open interface
field := astmodel.NewFieldDefinition(fieldName, prop.Property, astmodel.AnyType)
return field, nil
}
if err != nil {
return nil, err
}
field := astmodel.NewFieldDefinition(fieldName, prop.Property, propType)
return field, nil
}
func getFields(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) ([]*astmodel.FieldDefinition, error) {
ctx, span := tab.StartSpan(ctx, "getFields")
defer span.End()
var fields []*astmodel.FieldDefinition
for _, prop := range schema.PropertiesChildren {
fieldDefinition, err := generateFieldDefinition(ctx, scanner, prop)
if err != nil {
return nil, err
}
// add documentation
fieldDefinition = fieldDefinition.WithDescription(prop.Description)
// add validations
isRequired := false
for _, required := range schema.Required {
if prop.Property == required {
isRequired = true
break
}
}
if isRequired {
fieldDefinition = fieldDefinition.MakeRequired()
} else {
fieldDefinition = fieldDefinition.MakeOptional()
}
fields = append(fields, fieldDefinition)
}
// see: https://json-schema.org/understanding-json-schema/reference/object.html#properties
if schema.AdditionalProperties == nil {
// if not specified, any additional properties are allowed (TODO: tell all Azure teams this fact and get them to update their API definitions)
// for now we aren't following the spec 100% as it pollutes the generated code
// only generate this field if there are no other fields:
if len(fields) == 0 {
// TODO: for JSON serialization this needs to be unpacked into "parent"
additionalPropsField := astmodel.NewFieldDefinition("additionalProperties", "additionalProperties", astmodel.NewStringMapType(astmodel.AnyType))
fields = append(fields, additionalPropsField)
}
} else if schema.AdditionalProperties != false {
// otherwise, if not false then it is a type for all additional fields
// TODO: for JSON serialization this needs to be unpacked into "parent"
additionalPropsType, err := scanner.RunHandlerForSchema(ctx, schema.AdditionalProperties.(*gojsonschema.SubSchema))
if err != nil {
return nil, err
}
additionalPropsField := astmodel.NewFieldDefinition(astmodel.FieldName("additionalProperties"), "additionalProperties", astmodel.NewStringMapType(additionalPropsType))
fields = append(fields, additionalPropsField)
}
return fields, nil
}
func refHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "refHandler")
defer span.End()
url := schema.Ref.GetUrl()
if url.Fragment == expressionFragment {
// skip expressions
return nil, nil
}
// make a new topic based on the ref URL
name, err := objectTypeOf(url)
if err != nil {
return nil, err
}
group, err := groupOf(url)
if err != nil {
return nil, err
}
version, err := versionOf(url)
if err != nil {
return nil, err
}
isResource := isResource(url)
// produce a usable name:
typeName := astmodel.NewTypeName(
*astmodel.NewLocalPackageReference(
scanner.idFactory.CreateGroupName(group),
scanner.idFactory.CreatePackageNameFromVersion(version)),
scanner.idFactory.CreateIdentifier(name, astmodel.Exported))
return generateDefinitionsFor(ctx, scanner, typeName, isResource, url, schema.RefSchema)
}
func generateDefinitionsFor(ctx context.Context, scanner *SchemaScanner, typeName *astmodel.TypeName, isResource bool, url *url.URL, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
schemaType, err := getSubSchemaType(schema)
if err != nil {
return nil, err
}
// see if we already generated something for this ref
if _, ok := scanner.findTypeDefinition(typeName); ok {
return typeName, nil
}
// Add a placeholder to avoid recursive calls
// we will overwrite this later
scanner.addEmptyTypeDefinition(typeName)
result, err := scanner.RunHandler(ctx, schemaType, schema)
if err != nil {
scanner.removeTypeDefinition(typeName) // we weren't able to generate it, remove placeholder
return nil, err
}
// Give the type a name:
definer, otherDefs := result.CreateDefinitions(typeName, scanner.idFactory, isResource)
description := "Generated from: " + url.String()
definer = definer.WithDescription(&description)
// register all definitions
scanner.addTypeDefinition(definer)
for _, otherDef := range otherDefs {
scanner.addTypeDefinition(otherDef)
}
// return the name of the primary type
return definer.Name(), nil
}
func allOfHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "allOfHandler")
defer span.End()
var types []astmodel.Type
for _, all := range schema.AllOf {
d, err := scanner.RunHandlerForSchema(ctx, all)
if err != nil {
return nil, err
}
if d != nil {
types = appendIfUniqueType(types, d)
}
}
if len(types) == 1 {
return types[0], nil
}
var handleType func(fields []*astmodel.FieldDefinition, st astmodel.Type) ([]*astmodel.FieldDefinition, error)
handleType = func(fields []*astmodel.FieldDefinition, st astmodel.Type) ([]*astmodel.FieldDefinition, error) {
switch concreteType := st.(type) {
case *astmodel.StructType:
// if it's a struct type get all its fields:
fields = append(fields, concreteType.Fields()...)
case *astmodel.TypeName:
// TODO: need to check if this is a reference to a struct type or not
if def, ok := scanner.findTypeDefinition(concreteType); ok {
var err error
fields, err = handleType(fields, def.Type())
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("couldn't find definition for: %v", concreteType)
}
default:
klog.Errorf("Unhandled type in allOf: %#v\n", concreteType)
}
return fields, nil
}
// If there's more than one option, synthesize a type.
var fields []*astmodel.FieldDefinition
for _, d := range types {
// unpack the contents of what we got from subhandlers:
var err error
fields, err = handleType(fields, d)
if err != nil {
return nil, err
}
}
result := astmodel.NewStructType(fields...)
return result, nil
}
func oneOfHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "oneOfHandler")
defer span.End()
return generateOneOfUnionType(ctx, schema.OneOf, scanner)
}
func generateOneOfUnionType(ctx context.Context, subschemas []*gojsonschema.SubSchema, scanner *SchemaScanner) (astmodel.Type, error) {
// make sure we visit everything before bailing out,
// to get all types generated even if we can't use them
var results []astmodel.Type
for _, one := range subschemas {
result, err := scanner.RunHandlerForSchema(ctx, one)
if err != nil {
return nil, err
}
if result != nil {
results = appendIfUniqueType(results, result)
}
}
if len(results) == 1 {
return results[0], nil
}
// If there's more than one option, synthesize a type.
// Note that this is required because Kubernetes CRDs do not support OneOf the same way
// OpenAPI does, see https://github.com/Azure/k8s-infra/issues/71
var fields []*astmodel.FieldDefinition
fieldDescription := "mutually exclusive with all other properties"
for i, t := range results {
switch concreteType := t.(type) {
case *astmodel.TypeName:
// Just a sanity check that we've already scanned this definition
// TODO: Could remove this?
if _, ok := scanner.findTypeDefinition(concreteType); !ok {
return nil, fmt.Errorf("couldn't find struct for definition: %v", concreteType)
}
fieldName := scanner.idFactory.CreateFieldName(concreteType.Name(), astmodel.Exported)
// JSON name is unimportant here because we will implement the JSON marshaller anyway,
// but we still need it for controller-gen
jsonName := scanner.idFactory.CreateIdentifier(concreteType.Name(), astmodel.NotExported)
field := astmodel.NewFieldDefinition(
fieldName, jsonName, concreteType).MakeOptional().WithDescription(&fieldDescription)
fields = append(fields, field)
case *astmodel.EnumType:
// TODO: This name sucks but what alternative do we have?
name := fmt.Sprintf("enum%v", i)
fieldName := scanner.idFactory.CreateFieldName(name, astmodel.Exported)
// JSON name is unimportant here because we will implement the JSON marshaller anyway,
// but we still need it for controller-gen
jsonName := scanner.idFactory.CreateIdentifier(name, astmodel.NotExported)
field := astmodel.NewFieldDefinition(
fieldName, jsonName, concreteType).MakeOptional().WithDescription(&fieldDescription)
fields = append(fields, field)
case *astmodel.StructType:
// TODO: This name sucks but what alternative do we have?
name := fmt.Sprintf("object%v", i)
fieldName := scanner.idFactory.CreateFieldName(name, astmodel.Exported)
// JSON name is unimportant here because we will implement the JSON marshaller anyway,
// but we still need it for controller-gen
jsonName := scanner.idFactory.CreateIdentifier(name, astmodel.NotExported)
field := astmodel.NewFieldDefinition(
fieldName, jsonName, concreteType).MakeOptional().WithDescription(&fieldDescription)
fields = append(fields, field)
case *astmodel.PrimitiveType:
var primitiveTypeName string
if concreteType == astmodel.AnyType {
primitiveTypeName = "anything"
} else {
primitiveTypeName = concreteType.Name()
}
// TODO: This name sucks but what alternative do we have?
name := fmt.Sprintf("%v%v", primitiveTypeName, i)
fieldName := scanner.idFactory.CreateFieldName(name, astmodel.Exported)
// JSON name is unimportant here because we will implement the JSON marshaller anyway,
// but we still need it for controller-gen
jsonName := scanner.idFactory.CreateIdentifier(name, astmodel.NotExported)
field := astmodel.NewFieldDefinition(
fieldName, jsonName, concreteType).MakeOptional().WithDescription(&fieldDescription)
fields = append(fields, field)
default:
return nil, fmt.Errorf("unexpected oneOf member, type: %T", t)
}
}
structType := astmodel.NewStructType(fields...)
structType = structType.WithFunction(
"MarshalJSON",
astmodel.NewOneOfJSONMarshalFunction(structType, scanner.idFactory))
return structType, nil
}
func anyOfHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "anyOfHandler")
defer span.End()
// See https://github.com/Azure/k8s-infra/issues/111 for details about why this is treated as oneOf
klog.Warningf("Handling anyOf type as if it were oneOf: %v\n", schema.Ref.GetUrl())
return generateOneOfUnionType(ctx, schema.AnyOf, scanner)
}
func arrayHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "arrayHandler")
defer span.End()
if len(schema.ItemsChildren) > 1 {
return nil, fmt.Errorf("item contains more children than expected: %v", schema.ItemsChildren)
}
if len(schema.ItemsChildren) == 0 {
// there is no type to the elements, so we must assume interface{}
klog.Warningf("Interface assumption unproven for %v\n", schema.Ref.GetUrl())
return astmodel.NewArrayType(astmodel.AnyType), nil
}
// get the only child type and wrap it up as an array type:
onlyChild := schema.ItemsChildren[0]
astType, err := scanner.RunHandlerForSchema(ctx, onlyChild)
if err != nil {
return nil, err
}
return astmodel.NewArrayType(astType), nil
}
func getSubSchemaType(schema *gojsonschema.SubSchema) (SchemaType, error) {
// handle special nodes:
switch {
case schema.Enum != nil: // this should come before the primitive checks below
return Enum, nil
case schema.OneOf != nil:
return OneOf, nil
case schema.AllOf != nil:
return AllOf, nil
case schema.AnyOf != nil:
return AnyOf, nil
case schema.RefSchema != nil:
return Ref, nil
}
if schema.Types.IsTyped() {
for _, t := range []SchemaType{Object, String, Number, Int, Bool, Array} {
if schema.Types.Contains(string(t)) {
return t, nil
}
}
}
// TODO: this whole switch is a bit wrong because type: 'object' can
// be combined with OneOf/AnyOf/etc. still, it works okay for now...
if !schema.Types.IsTyped() && schema.PropertiesChildren != nil {
// no type but has properties, treat it as an object
return Object, nil
}
return Unknown, &UnknownSchemaError{Schema: schema}
}
func getPrimitiveType(name SchemaType) (*astmodel.PrimitiveType, error) {
switch name {
case String:
return astmodel.StringType, nil
case Int:
return astmodel.IntType, nil
case Number:
return astmodel.FloatType, nil
case Bool:
return astmodel.BoolType, nil
default:
return astmodel.AnyType, fmt.Errorf("%s is not a simple type and no ast.NewIdent can be created", name)
}
}
func isURLPathSeparator(c rune) bool {
return c == '/'
}
// Extract the name of an object from the supplied schema URL
func objectTypeOf(url *url.URL) (string, error) {
fragmentParts := strings.FieldsFunc(url.Fragment, isURLPathSeparator)
return fragmentParts[len(fragmentParts)-1], nil
}
// Extract the 'group' (here filename) of an object from the supplied schemaURL
func groupOf(url *url.URL) (string, error) {
pathParts := strings.FieldsFunc(url.Path, isURLPathSeparator)
file := pathParts[len(pathParts)-1]
if !strings.HasSuffix(file, ".json") {
return "", fmt.Errorf("Unexpected URL format (doesn't point to .json file)")
}
return strings.TrimSuffix(file, ".json"), nil
}
func isResource(url *url.URL) bool {
fragmentParts := strings.FieldsFunc(url.Fragment, isURLPathSeparator)
for _, fragmentPart := range fragmentParts {
if fragmentPart == "resourceDefinitions" {
return true
}
}
return false
}
var versionRegex = regexp.MustCompile(`\d{4}-\d{2}-\d{2}`)
// Extract the name of an object from the supplied schema URL
func versionOf(url *url.URL) (string, error) {
pathParts := strings.FieldsFunc(url.Path, isURLPathSeparator)
for _, p := range pathParts {
if versionRegex.MatchString(p) {
return p, nil
}
}
// No version found, that's fine
return "", nil
}
func appendIfUniqueType(slice []astmodel.Type, item astmodel.Type) []astmodel.Type {
found := false
for _, r := range slice {
if r.Equals(item) {
found = true
break
}
}
if !found {
slice = append(slice, item)
}
return slice
}
| GenerateDefinitions | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.