hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2f11dff72f8364a0ea8758e647d0af627bafc8 | 38,569 | py | Python | tensorflow/python/debug/lib/debug_events_writer_test.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 190,993 | 2015-11-09T13:17:30.000Z | 2022-03-31T23:05:27.000Z | tensorflow/python/debug/lib/debug_events_writer_test.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 48,461 | 2015-11-09T14:21:11.000Z | 2022-03-31T23:17:33.000Z | tensorflow/python/debug/lib/debug_events_writer_test.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 104,981 | 2015-11-09T13:40:17.000Z | 2022-03-31T19:51:54.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the debug events writer Python class."""
import glob
import json as json_lib
import os
import re
import threading
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
class DebugEventsWriterTest(dumping_callback_test_lib.DumpingCallbackTestBase,
parameterized.TestCase):
def testMultiThreadedConstructorCallWorks(self):
def init_writer():
debug_events_writer.DebugEventsWriter(self.dump_root, self.tfdbg_run_id)
num_threads = 4
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=init_writer)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify that there is only one debug event file of each type.
metadata_paths = glob.glob(os.path.join(self.dump_root, "*.metadata"))
self.assertLen(metadata_paths, 1)
source_files_paths = glob.glob(
os.path.join(self.dump_root, "*.source_files"))
self.assertLen(source_files_paths, 1)
stack_frames_paths = glob.glob(
os.path.join(self.dump_root, "*.stack_frames"))
self.assertLen(stack_frames_paths, 1)
graphs_paths = glob.glob(os.path.join(self.dump_root, "*.graphs"))
self.assertLen(graphs_paths, 1)
self._readAndCheckMetadataFile()
def testWriteSourceFilesAndStackFrames(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_protos = 10
for i in range(num_protos):
source_file = debug_event_pb2.SourceFile()
source_file.file_path = "/home/tf2user/main.py"
source_file.host_name = "machine.cluster"
source_file.lines.append("print(%d)" % i)
writer.WriteSourceFile(source_file)
stack_frame = debug_event_pb2.StackFrameWithId()
stack_frame.id = "stack_%d" % i
stack_frame.file_line_col.file_index = i * 10
writer.WriteStackFrameWithId(stack_frame)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.source_file
for item in reader.source_files_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].file_path, "/home/tf2user/main.py")
self.assertEqual(actuals[i].host_name, "machine.cluster")
self.assertEqual(actuals[i].lines, ["print(%d)" % i])
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].id, "stack_%d" % i)
self.assertEqual(actuals[i].file_line_col.file_index, i * 10)
def testWriteGraphOpCreationAndDebuggedGraphs(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_op_creations = 10
for i in range(num_op_creations):
graph_op_creation = debug_event_pb2.GraphOpCreation()
graph_op_creation.op_type = "Conv2D"
graph_op_creation.op_name = "Conv2D_%d" % i
writer.WriteGraphOpCreation(graph_op_creation)
debugged_graph = debug_event_pb2.DebuggedGraph()
debugged_graph.graph_id = "deadbeaf"
debugged_graph.graph_name = "MyGraph1"
writer.WriteDebuggedGraph(debugged_graph)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugEventsReader(self.dump_root)
actuals = list(item.debug_event for item in reader.graphs_iterator())
self.assertLen(actuals, num_op_creations + 1)
for i in range(num_op_creations):
self.assertEqual(actuals[i].graph_op_creation.op_type, "Conv2D")
self.assertEqual(actuals[i].graph_op_creation.op_name, "Conv2D_%d" % i)
self.assertEqual(actuals[num_op_creations].debugged_graph.graph_id,
"deadbeaf")
def testConcurrentWritesToNonExecutionFilesWorks(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
source_file_state = {"counter": 0, "lock": threading.Lock()}
def writer_source_file():
source_file = debug_event_pb2.SourceFile()
with source_file_state["lock"]:
source_file.file_path = "/home/tf2user/file_%d.py" % source_file_state[
"counter"]
source_file_state["counter"] += 1
writer.WriteSourceFile(source_file)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
stack_frame_state = {"counter": 0, "lock": threading.Lock()}
def write_stack_frame():
stack_frame = debug_event_pb2.StackFrameWithId()
with stack_frame_state["lock"]:
stack_frame.id = "stack_frame_%d" % stack_frame_state["counter"]
stack_frame_state["counter"] += 1
writer.WriteStackFrameWithId(stack_frame)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
graph_op_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_op_creation():
graph_op_creation = debug_event_pb2.GraphOpCreation()
with graph_op_state["lock"]:
graph_op_creation.op_name = "Op%d" % graph_op_state["counter"]
graph_op_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
num_threads = 9
threads = []
for i in range(num_threads):
if i % 3 == 0:
target = writer_source_file
elif i % 3 == 1:
target = write_stack_frame
else:
target = write_graph_op_creation
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify the content of the .source_files file.
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
source_files_iter = reader.source_files_iterator()
actuals = list(item.debug_event.source_file for item in source_files_iter)
file_paths = sorted([actual.file_path for actual in actuals])
self.assertEqual(file_paths, [
"/home/tf2user/file_0.py", "/home/tf2user/file_1.py",
"/home/tf2user/file_2.py"
])
# Verify the content of the .stack_frames file.
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
stack_frame_ids = sorted([actual.id for actual in actuals])
self.assertEqual(stack_frame_ids,
["stack_frame_0", "stack_frame_1", "stack_frame_2"])
# Verify the content of the .graphs file.
actuals = list(item.debug_event.graph_op_creation
for item in reader.graphs_iterator())
graph_op_names = sorted([actual.op_name for actual in actuals])
self.assertEqual(graph_op_names, ["Op0", "Op1", "Op2"])
def testWriteAndReadMetadata(self):
t0 = time.time()
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
self.assertIsInstance(reader.starting_wall_time(), float)
self.assertGreaterEqual(reader.starting_wall_time(), t0)
self.assertEqual(reader.tensorflow_version(), versions.__version__)
self.assertTrue(reader.tfdbg_run_id())
def testWriteExecutionEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
reader.update()
self.assertFalse(reader.executions())
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
for i, execution in enumerate(executions):
self.assertEqual(
execution.op_type,
"OpType%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteExecutionEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, num_execution_events)
for i, execution in enumerate(executions):
self.assertEqual(execution.op_type, "OpType%d" % i)
def testWriteGraphExecutionTraceEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(reader.graph_execution_traces_iterators()[0])
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
self.assertEmpty(actuals)
writer.FlushExecutionFiles()
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterators()[0])
self.assertLen(actuals, debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE)
for i in range(debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE):
self.assertEqual(
actuals[i].op_name,
"Op%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteGraphExecutionTraceEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterators()[0])
self.assertLen(actuals, num_execution_events)
for i in range(num_execution_events):
self.assertEqual(actuals[i].op_name, "Op%d" % i)
def testConcurrentWritesToExecutionFiles(self):
circular_buffer_size = 5
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
execution_state = {"counter": 0, "lock": threading.Lock()}
def write_execution():
execution = debug_event_pb2.Execution()
with execution_state["lock"]:
execution.op_type = "OpType%d" % execution_state["counter"]
execution_state["counter"] += 1
writer.WriteExecution(execution)
graph_execution_trace_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_execution_trace():
with graph_execution_trace_state["lock"]:
op_name = "Op%d" % graph_execution_trace_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
graph_execution_trace_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
writer.WriteGraphExecutionTrace(trace)
threads = []
for i in range(circular_buffer_size * 4):
if i % 2 == 0:
target = write_execution
else:
target = write_graph_execution_trace
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Verify the content of the .execution file.
executions = reader.executions()
executed_op_types = [execution.op_type for execution in executions]
self.assertLen(executed_op_types, circular_buffer_size)
self.assertLen(executed_op_types, len(set(executed_op_types)))
# Verify the content of the .graph_execution_traces file.
op_names = [trace.op_name for trace in reader.graph_execution_traces()]
self.assertLen(op_names, circular_buffer_size)
self.assertLen(op_names, len(set(op_names)))
def testConcurrentSourceFileRandomReads(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
for i in range(100):
source_file = debug_event_pb2.SourceFile(
host_name="localhost", file_path="/tmp/file_%d.py" % i)
source_file.lines.append("# File %d" % i)
writer.WriteSourceFile(source_file)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
lines = [None] * 100
def read_job_1():
# Read in the reverse order to enhance randomness of the read access.
for i in range(49, -1, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
def read_job_2():
for i in range(99, 49, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(lines[i], ["# File %d" % i])
def testConcurrentExecutionUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % writer_state["counter"]
writer_state["counter"] += 1
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
reader.update()
# On the sub-thread, keep writing and reading new Execution protos.
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
# On the main thread, do concurrent random read.
while True:
exec_digests = reader.executions(digest=True)
if exec_digests:
exec_0 = reader.read_execution(exec_digests[0])
self.assertEqual(exec_0.op_type, "OpType0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentExecutionRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
for i in range(100):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
executions = [None] * 100
def read_job_1():
execution_digests = reader.executions(digest=True)
# Read in the reverse order to enhance randomness of the read access.
for i in range(49, -1, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
def read_job_2():
execution_digests = reader.executions(digest=True)
for i in range(99, 49, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(executions[i].op_type, "OpType%d" % i)
def testConcurrentGraphExecutionTraceUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
op_name = "Op%d" % writer_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer_state["counter"] += 1
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader.update()
# On the sub-thread, keep writing and reading new GraphExecutionTraces.
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
# On the main thread, do concurrent random read.
while True:
digests = reader.graph_execution_traces(digest=True)
if digests:
trace_0 = reader.read_graph_execution_trace(digests[0])
self.assertEqual(trace_0.op_name, "Op0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentGraphExecutionTraceRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
for i in range(100):
op_name = "Op%d" % i
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
traces = [None] * 100
def read_job_1():
digests = reader.graph_execution_traces(digest=True)
for i in range(49, -1, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
def read_job_2():
digests = reader.graph_execution_traces(digest=True)
for i in range(99, 49, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(traces[i].op_name, "Op%d" % i)
@parameterized.named_parameters(
("Begin1End3", 1, 3, 1, 3),
("Begin0End3", 0, 3, 0, 3),
("Begin0EndNeg1", 0, -1, 0, 4),
("BeginNoneEnd3", None, 3, 0, 3),
("Begin2EndNone", 2, None, 2, 5),
("BeginNoneEndNone", None, None, 0, 5),
)
def testRangeReadingExecutions(self, begin, end, expected_begin,
expected_end):
writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, circular_buffer_size=-1)
for i in range(5):
execution = debug_event_pb2.Execution(op_type="OpType%d" % i)
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions(begin=begin, end=end)
self.assertLen(executions, expected_end - expected_begin)
self.assertEqual(executions[0].op_type, "OpType%d" % expected_begin)
self.assertEqual(executions[-1].op_type, "OpType%d" % (expected_end - 1))
@parameterized.named_parameters(
("Begin1End3", 1, 3, 1, 3),
("Begin0End3", 0, 3, 0, 3),
("Begin0EndNeg1", 0, -1, 0, 4),
("BeginNoneEnd3", None, 3, 0, 3),
("Begin2EndNone", 2, None, 2, 5),
("BeginNoneEndNone", None, None, 0, 5),
)
def testRangeReadingGraphExecutionTraces(self, begin, end, expected_begin,
expected_end):
writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, circular_buffer_size=-1)
debugged_graph = debug_event_pb2.DebuggedGraph(
graph_id="graph1", graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
for i in range(5):
op_name = "Op_%d" % i
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
traces = reader.graph_execution_traces(begin=begin, end=end)
self.assertLen(traces, expected_end - expected_begin)
self.assertEqual(traces[0].op_name, "Op_%d" % expected_begin)
self.assertEqual(traces[-1].op_name, "Op_%d" % (expected_end - 1))
class MultiSetReaderTest(dumping_callback_test_lib.DumpingCallbackTestBase):
"""Test for DebugDataReader for multiple file sets under a dump root."""
def testReadingTwoFileSetsWithTheSameDumpRootSucceeds(self):
# To simulate a multi-host data dump, we first generate file sets in two
# different directories, with the same tfdbg_run_id, and then combine them.
tfdbg_run_id = "foo"
for i in range(2):
writer = debug_events_writer.DebugEventsWriter(
os.path.join(self.dump_root, str(i)),
tfdbg_run_id,
circular_buffer_size=-1)
if i == 0:
debugged_graph = debug_event_pb2.DebuggedGraph(
graph_id="graph1", graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
op_name = "Op_0"
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
op_name = "Op_1"
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
for _ in range(10):
trace = debug_event_pb2.GraphExecutionTrace(
op_name="Op_%d" % i, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# Move all files from the subdirectory /1 to subdirectory /0.
dump_root_0 = os.path.join(self.dump_root, "0")
src_paths = glob.glob(os.path.join(self.dump_root, "1", "*"))
for src_path in src_paths:
dst_path = os.path.join(
dump_root_0,
# Rename the file set to avoid file name collision.
re.sub(r"(tfdbg_events\.\d+)", r"\g<1>1", os.path.basename(src_path)))
os.rename(src_path, dst_path)
with debug_events_reader.DebugDataReader(dump_root_0) as reader:
reader.update()
# Verify the content of the .graph_execution_traces file.
trace_digests = reader.graph_execution_traces(digest=True)
self.assertLen(trace_digests, 20)
for _ in range(10):
trace = reader.read_graph_execution_trace(trace_digests[i])
self.assertEqual(trace.op_name, "Op_0")
for _ in range(10):
trace = reader.read_graph_execution_trace(trace_digests[i + 10])
self.assertEqual(trace.op_name, "Op_1")
def testReadingTwoFileSetsWithTheDifferentRootsLeadsToError(self):
# To simulate a multi-host data dump, we first generate file sets in two
# different directories, with different tfdbg_run_ids, and then combine
# them.
for i in range(2):
writer = debug_events_writer.DebugEventsWriter(
os.path.join(self.dump_root, str(i)),
"run_id_%d" % i,
circular_buffer_size=-1)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# Move all files from the subdirectory /1 to subdirectory /0.
dump_root_0 = os.path.join(self.dump_root, "0")
src_paths = glob.glob(os.path.join(self.dump_root, "1", "*"))
for src_path in src_paths:
dst_path = os.path.join(
dump_root_0,
# Rename the file set to avoid file name collision.
re.sub(r"(tfdbg_events\.\d+)", r"\g<1>1", os.path.basename(src_path)))
os.rename(src_path, dst_path)
with self.assertRaisesRegex(ValueError,
r"Found multiple \(2\) tfdbg2 runs"):
debug_events_reader.DebugDataReader(dump_root_0)
class DataObjectsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def jsonRoundTripCheck(self, obj):
self.assertEqual(
json_lib.dumps(json_lib.loads(json_lib.dumps(obj)), sort_keys=True),
json_lib.dumps(obj, sort_keys=True))
def testExecutionDigestWithNoOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=None)
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], None)
def testExecutionDigestWithTwoOutputsToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357, 2468])
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357, 2468))
def testExecutionNoGraphNoInputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=None,
output_tensor_ids=[2468],
debug_tensor_values=([1, 0],))
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertIsNone(json["graph_id"])
self.assertIsNone(json["input_tensor_ids"])
self.assertEqual(json["output_tensor_ids"], (2468,))
self.assertEqual(json["debug_tensor_values"], ([1, 0],))
def testExecutionNoGraphNoInputButWithOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=None,
debug_tensor_values=None)
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.FULL_HEALTH)
self.assertEqual(json["graph_id"], "abcd")
self.assertEqual(json["input_tensor_ids"], (13, 37))
self.assertIsNone(json["output_tensor_ids"])
self.assertIsNone(json["debug_tensor_values"])
@parameterized.named_parameters(
("EmptyList", []),
("None", None),
)
def testExecutionWithNoOutputTensorsReturnsZeroForNumOutputs(
self, output_tensor_ids):
execution = debug_events_reader.Execution(
debug_events_reader.ExecutionDigest(1234, 5678, "FooOp"),
"localhost", ("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=output_tensor_ids,
debug_tensor_values=None)
self.assertEqual(execution.num_outputs, 0)
def testDebuggedDeviceToJons(self):
debugged_device = debug_events_reader.DebuggedDevice("/TPU:3", 4)
self.assertEqual(debugged_device.to_json(), {
"device_name": "/TPU:3",
"device_id": 4,
})
def testDebuggedGraphToJonsWitouthNameInnerOuterGraphIds(self):
debugged_graph = debug_events_reader.DebuggedGraph(
None,
"b1c2",
outer_graph_id=None,
)
self.assertEqual(
debugged_graph.to_json(), {
"name": None,
"graph_id": "b1c2",
"outer_graph_id": None,
"inner_graph_ids": [],
})
def testDebuggedGraphToJonsWithNameAndInnerOuterGraphIds(self):
debugged_graph = debug_events_reader.DebuggedGraph(
"loss_function",
"b1c2",
outer_graph_id="a0b1",
)
debugged_graph.add_inner_graph_id("c2d3")
debugged_graph.add_inner_graph_id("c2d3e4")
self.assertEqual(
debugged_graph.to_json(), {
"name": "loss_function",
"graph_id": "b1c2",
"outer_graph_id": "a0b1",
"inner_graph_ids": ["c2d3", "c2d3e4"],
})
@parameterized.named_parameters(
("EmptyList", []),
("None", None),
)
def testGraphOpDigestWithNoOutpusReturnsNumOutputsZero(
self, output_tensor_ids):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2",
output_tensor_ids,
"machine.cluster", ("a1", "a2"),
input_names=None,
device_name=None)
self.assertEqual(op_creation_digest.num_outputs, 0)
def testGraphOpCreationDigestNoInputNoDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2", [135],
"machine.cluster", ("a1", "a2"),
input_names=None,
device_name=None)
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["host_name"], "machine.cluster")
self.assertEqual(json["stack_frame_ids"], ("a1", "a2"))
self.assertIsNone(json["input_names"])
self.assertIsNone(json["device_name"])
def testGraphOpCreationDigestWithInputsAndDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2", [135],
"machine.cluster", ("a1", "a2"),
input_names=["Bar_1", "Qux_2"],
device_name="/device:GPU:0")
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["host_name"], "machine.cluster")
self.assertEqual(json["stack_frame_ids"], ("a1", "a2"))
self.assertEqual(json["input_names"], ("Bar_1", "Qux_2"))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceDigestToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
json = trace_digest.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
def testGraphExecutionTraceWithTensorDebugValueAndDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_tensor_value=[3, 1], device_name="/device:GPU:0")
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertEqual(json["debug_tensor_value"], (3, 1))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceNoTensorDebugValueNoDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.NO_TENSOR,
debug_tensor_value=None, device_name=None)
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.NO_TENSOR)
self.assertIsNone(json["debug_tensor_value"])
self.assertIsNone(json["device_name"])
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| 41.831887 | 80 | 0.675309 |
import glob
import json as json_lib
import os
import re
import threading
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
class DebugEventsWriterTest(dumping_callback_test_lib.DumpingCallbackTestBase,
parameterized.TestCase):
def testMultiThreadedConstructorCallWorks(self):
def init_writer():
debug_events_writer.DebugEventsWriter(self.dump_root, self.tfdbg_run_id)
num_threads = 4
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=init_writer)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
metadata_paths = glob.glob(os.path.join(self.dump_root, "*.metadata"))
self.assertLen(metadata_paths, 1)
source_files_paths = glob.glob(
os.path.join(self.dump_root, "*.source_files"))
self.assertLen(source_files_paths, 1)
stack_frames_paths = glob.glob(
os.path.join(self.dump_root, "*.stack_frames"))
self.assertLen(stack_frames_paths, 1)
graphs_paths = glob.glob(os.path.join(self.dump_root, "*.graphs"))
self.assertLen(graphs_paths, 1)
self._readAndCheckMetadataFile()
def testWriteSourceFilesAndStackFrames(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_protos = 10
for i in range(num_protos):
source_file = debug_event_pb2.SourceFile()
source_file.file_path = "/home/tf2user/main.py"
source_file.host_name = "machine.cluster"
source_file.lines.append("print(%d)" % i)
writer.WriteSourceFile(source_file)
stack_frame = debug_event_pb2.StackFrameWithId()
stack_frame.id = "stack_%d" % i
stack_frame.file_line_col.file_index = i * 10
writer.WriteStackFrameWithId(stack_frame)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.source_file
for item in reader.source_files_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].file_path, "/home/tf2user/main.py")
self.assertEqual(actuals[i].host_name, "machine.cluster")
self.assertEqual(actuals[i].lines, ["print(%d)" % i])
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].id, "stack_%d" % i)
self.assertEqual(actuals[i].file_line_col.file_index, i * 10)
def testWriteGraphOpCreationAndDebuggedGraphs(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_op_creations = 10
for i in range(num_op_creations):
graph_op_creation = debug_event_pb2.GraphOpCreation()
graph_op_creation.op_type = "Conv2D"
graph_op_creation.op_name = "Conv2D_%d" % i
writer.WriteGraphOpCreation(graph_op_creation)
debugged_graph = debug_event_pb2.DebuggedGraph()
debugged_graph.graph_id = "deadbeaf"
debugged_graph.graph_name = "MyGraph1"
writer.WriteDebuggedGraph(debugged_graph)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugEventsReader(self.dump_root)
actuals = list(item.debug_event for item in reader.graphs_iterator())
self.assertLen(actuals, num_op_creations + 1)
for i in range(num_op_creations):
self.assertEqual(actuals[i].graph_op_creation.op_type, "Conv2D")
self.assertEqual(actuals[i].graph_op_creation.op_name, "Conv2D_%d" % i)
self.assertEqual(actuals[num_op_creations].debugged_graph.graph_id,
"deadbeaf")
def testConcurrentWritesToNonExecutionFilesWorks(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
source_file_state = {"counter": 0, "lock": threading.Lock()}
def writer_source_file():
source_file = debug_event_pb2.SourceFile()
with source_file_state["lock"]:
source_file.file_path = "/home/tf2user/file_%d.py" % source_file_state[
"counter"]
source_file_state["counter"] += 1
writer.WriteSourceFile(source_file)
writer.FlushNonExecutionFiles()
stack_frame_state = {"counter": 0, "lock": threading.Lock()}
def write_stack_frame():
stack_frame = debug_event_pb2.StackFrameWithId()
with stack_frame_state["lock"]:
stack_frame.id = "stack_frame_%d" % stack_frame_state["counter"]
stack_frame_state["counter"] += 1
writer.WriteStackFrameWithId(stack_frame)
writer.FlushNonExecutionFiles()
graph_op_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_op_creation():
graph_op_creation = debug_event_pb2.GraphOpCreation()
with graph_op_state["lock"]:
graph_op_creation.op_name = "Op%d" % graph_op_state["counter"]
graph_op_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
writer.FlushNonExecutionFiles()
num_threads = 9
threads = []
for i in range(num_threads):
if i % 3 == 0:
target = writer_source_file
elif i % 3 == 1:
target = write_stack_frame
else:
target = write_graph_op_creation
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
source_files_iter = reader.source_files_iterator()
actuals = list(item.debug_event.source_file for item in source_files_iter)
file_paths = sorted([actual.file_path for actual in actuals])
self.assertEqual(file_paths, [
"/home/tf2user/file_0.py", "/home/tf2user/file_1.py",
"/home/tf2user/file_2.py"
])
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
stack_frame_ids = sorted([actual.id for actual in actuals])
self.assertEqual(stack_frame_ids,
["stack_frame_0", "stack_frame_1", "stack_frame_2"])
actuals = list(item.debug_event.graph_op_creation
for item in reader.graphs_iterator())
graph_op_names = sorted([actual.op_name for actual in actuals])
self.assertEqual(graph_op_names, ["Op0", "Op1", "Op2"])
def testWriteAndReadMetadata(self):
t0 = time.time()
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
self.assertIsInstance(reader.starting_wall_time(), float)
self.assertGreaterEqual(reader.starting_wall_time(), t0)
self.assertEqual(reader.tensorflow_version(), versions.__version__)
self.assertTrue(reader.tfdbg_run_id())
def testWriteExecutionEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
self.assertFalse(reader.executions())
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
for i, execution in enumerate(executions):
self.assertEqual(
execution.op_type,
"OpType%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteExecutionEventsWithoutCircularBufferBehavior(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, num_execution_events)
for i, execution in enumerate(executions):
self.assertEqual(execution.op_type, "OpType%d" % i)
def testWriteGraphExecutionTraceEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(reader.graph_execution_traces_iterators()[0])
self.assertEmpty(actuals)
writer.FlushExecutionFiles()
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterators()[0])
self.assertLen(actuals, debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE)
for i in range(debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE):
self.assertEqual(
actuals[i].op_name,
"Op%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteGraphExecutionTraceEventsWithoutCircularBufferBehavior(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterators()[0])
self.assertLen(actuals, num_execution_events)
for i in range(num_execution_events):
self.assertEqual(actuals[i].op_name, "Op%d" % i)
def testConcurrentWritesToExecutionFiles(self):
circular_buffer_size = 5
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
execution_state = {"counter": 0, "lock": threading.Lock()}
def write_execution():
execution = debug_event_pb2.Execution()
with execution_state["lock"]:
execution.op_type = "OpType%d" % execution_state["counter"]
execution_state["counter"] += 1
writer.WriteExecution(execution)
graph_execution_trace_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_execution_trace():
with graph_execution_trace_state["lock"]:
op_name = "Op%d" % graph_execution_trace_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
graph_execution_trace_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
writer.WriteGraphExecutionTrace(trace)
threads = []
for i in range(circular_buffer_size * 4):
if i % 2 == 0:
target = write_execution
else:
target = write_graph_execution_trace
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
executed_op_types = [execution.op_type for execution in executions]
self.assertLen(executed_op_types, circular_buffer_size)
self.assertLen(executed_op_types, len(set(executed_op_types)))
op_names = [trace.op_name for trace in reader.graph_execution_traces()]
self.assertLen(op_names, circular_buffer_size)
self.assertLen(op_names, len(set(op_names)))
def testConcurrentSourceFileRandomReads(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
for i in range(100):
source_file = debug_event_pb2.SourceFile(
host_name="localhost", file_path="/tmp/file_%d.py" % i)
source_file.lines.append("# File %d" % i)
writer.WriteSourceFile(source_file)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
lines = [None] * 100
def read_job_1():
for i in range(49, -1, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
def read_job_2():
for i in range(99, 49, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(lines[i], ["# File %d" % i])
def testConcurrentExecutionUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % writer_state["counter"]
writer_state["counter"] += 1
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
reader.update()
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
while True:
exec_digests = reader.executions(digest=True)
if exec_digests:
exec_0 = reader.read_execution(exec_digests[0])
self.assertEqual(exec_0.op_type, "OpType0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentExecutionRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
for i in range(100):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
executions = [None] * 100
def read_job_1():
execution_digests = reader.executions(digest=True)
for i in range(49, -1, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
def read_job_2():
execution_digests = reader.executions(digest=True)
for i in range(99, 49, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(executions[i].op_type, "OpType%d" % i)
def testConcurrentGraphExecutionTraceUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
op_name = "Op%d" % writer_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer_state["counter"] += 1
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader.update()
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
while True:
digests = reader.graph_execution_traces(digest=True)
if digests:
trace_0 = reader.read_graph_execution_trace(digests[0])
self.assertEqual(trace_0.op_name, "Op0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentGraphExecutionTraceRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
for i in range(100):
op_name = "Op%d" % i
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
traces = [None] * 100
def read_job_1():
digests = reader.graph_execution_traces(digest=True)
for i in range(49, -1, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
def read_job_2():
digests = reader.graph_execution_traces(digest=True)
for i in range(99, 49, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(traces[i].op_name, "Op%d" % i)
@parameterized.named_parameters(
("Begin1End3", 1, 3, 1, 3),
("Begin0End3", 0, 3, 0, 3),
("Begin0EndNeg1", 0, -1, 0, 4),
("BeginNoneEnd3", None, 3, 0, 3),
("Begin2EndNone", 2, None, 2, 5),
("BeginNoneEndNone", None, None, 0, 5),
)
def testRangeReadingExecutions(self, begin, end, expected_begin,
expected_end):
writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, circular_buffer_size=-1)
for i in range(5):
execution = debug_event_pb2.Execution(op_type="OpType%d" % i)
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions(begin=begin, end=end)
self.assertLen(executions, expected_end - expected_begin)
self.assertEqual(executions[0].op_type, "OpType%d" % expected_begin)
self.assertEqual(executions[-1].op_type, "OpType%d" % (expected_end - 1))
@parameterized.named_parameters(
("Begin1End3", 1, 3, 1, 3),
("Begin0End3", 0, 3, 0, 3),
("Begin0EndNeg1", 0, -1, 0, 4),
("BeginNoneEnd3", None, 3, 0, 3),
("Begin2EndNone", 2, None, 2, 5),
("BeginNoneEndNone", None, None, 0, 5),
)
def testRangeReadingGraphExecutionTraces(self, begin, end, expected_begin,
expected_end):
writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, circular_buffer_size=-1)
debugged_graph = debug_event_pb2.DebuggedGraph(
graph_id="graph1", graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
for i in range(5):
op_name = "Op_%d" % i
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
traces = reader.graph_execution_traces(begin=begin, end=end)
self.assertLen(traces, expected_end - expected_begin)
self.assertEqual(traces[0].op_name, "Op_%d" % expected_begin)
self.assertEqual(traces[-1].op_name, "Op_%d" % (expected_end - 1))
class MultiSetReaderTest(dumping_callback_test_lib.DumpingCallbackTestBase):
def testReadingTwoFileSetsWithTheSameDumpRootSucceeds(self):
tfdbg_run_id = "foo"
for i in range(2):
writer = debug_events_writer.DebugEventsWriter(
os.path.join(self.dump_root, str(i)),
tfdbg_run_id,
circular_buffer_size=-1)
if i == 0:
debugged_graph = debug_event_pb2.DebuggedGraph(
graph_id="graph1", graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
op_name = "Op_0"
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
op_name = "Op_1"
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
for _ in range(10):
trace = debug_event_pb2.GraphExecutionTrace(
op_name="Op_%d" % i, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
dump_root_0 = os.path.join(self.dump_root, "0")
src_paths = glob.glob(os.path.join(self.dump_root, "1", "*"))
for src_path in src_paths:
dst_path = os.path.join(
dump_root_0,
re.sub(r"(tfdbg_events\.\d+)", r"\g<1>1", os.path.basename(src_path)))
os.rename(src_path, dst_path)
with debug_events_reader.DebugDataReader(dump_root_0) as reader:
reader.update()
trace_digests = reader.graph_execution_traces(digest=True)
self.assertLen(trace_digests, 20)
for _ in range(10):
trace = reader.read_graph_execution_trace(trace_digests[i])
self.assertEqual(trace.op_name, "Op_0")
for _ in range(10):
trace = reader.read_graph_execution_trace(trace_digests[i + 10])
self.assertEqual(trace.op_name, "Op_1")
def testReadingTwoFileSetsWithTheDifferentRootsLeadsToError(self):
for i in range(2):
writer = debug_events_writer.DebugEventsWriter(
os.path.join(self.dump_root, str(i)),
"run_id_%d" % i,
circular_buffer_size=-1)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
dump_root_0 = os.path.join(self.dump_root, "0")
src_paths = glob.glob(os.path.join(self.dump_root, "1", "*"))
for src_path in src_paths:
dst_path = os.path.join(
dump_root_0,
re.sub(r"(tfdbg_events\.\d+)", r"\g<1>1", os.path.basename(src_path)))
os.rename(src_path, dst_path)
with self.assertRaisesRegex(ValueError,
r"Found multiple \(2\) tfdbg2 runs"):
debug_events_reader.DebugDataReader(dump_root_0)
class DataObjectsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def jsonRoundTripCheck(self, obj):
self.assertEqual(
json_lib.dumps(json_lib.loads(json_lib.dumps(obj)), sort_keys=True),
json_lib.dumps(obj, sort_keys=True))
def testExecutionDigestWithNoOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=None)
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], None)
def testExecutionDigestWithTwoOutputsToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357, 2468])
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357, 2468))
def testExecutionNoGraphNoInputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=None,
output_tensor_ids=[2468],
debug_tensor_values=([1, 0],))
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertIsNone(json["graph_id"])
self.assertIsNone(json["input_tensor_ids"])
self.assertEqual(json["output_tensor_ids"], (2468,))
self.assertEqual(json["debug_tensor_values"], ([1, 0],))
def testExecutionNoGraphNoInputButWithOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=None,
debug_tensor_values=None)
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.FULL_HEALTH)
self.assertEqual(json["graph_id"], "abcd")
self.assertEqual(json["input_tensor_ids"], (13, 37))
self.assertIsNone(json["output_tensor_ids"])
self.assertIsNone(json["debug_tensor_values"])
@parameterized.named_parameters(
("EmptyList", []),
("None", None),
)
def testExecutionWithNoOutputTensorsReturnsZeroForNumOutputs(
self, output_tensor_ids):
execution = debug_events_reader.Execution(
debug_events_reader.ExecutionDigest(1234, 5678, "FooOp"),
"localhost", ("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=output_tensor_ids,
debug_tensor_values=None)
self.assertEqual(execution.num_outputs, 0)
def testDebuggedDeviceToJons(self):
debugged_device = debug_events_reader.DebuggedDevice("/TPU:3", 4)
self.assertEqual(debugged_device.to_json(), {
"device_name": "/TPU:3",
"device_id": 4,
})
def testDebuggedGraphToJonsWitouthNameInnerOuterGraphIds(self):
debugged_graph = debug_events_reader.DebuggedGraph(
None,
"b1c2",
outer_graph_id=None,
)
self.assertEqual(
debugged_graph.to_json(), {
"name": None,
"graph_id": "b1c2",
"outer_graph_id": None,
"inner_graph_ids": [],
})
def testDebuggedGraphToJonsWithNameAndInnerOuterGraphIds(self):
debugged_graph = debug_events_reader.DebuggedGraph(
"loss_function",
"b1c2",
outer_graph_id="a0b1",
)
debugged_graph.add_inner_graph_id("c2d3")
debugged_graph.add_inner_graph_id("c2d3e4")
self.assertEqual(
debugged_graph.to_json(), {
"name": "loss_function",
"graph_id": "b1c2",
"outer_graph_id": "a0b1",
"inner_graph_ids": ["c2d3", "c2d3e4"],
})
@parameterized.named_parameters(
("EmptyList", []),
("None", None),
)
def testGraphOpDigestWithNoOutpusReturnsNumOutputsZero(
self, output_tensor_ids):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2",
output_tensor_ids,
"machine.cluster", ("a1", "a2"),
input_names=None,
device_name=None)
self.assertEqual(op_creation_digest.num_outputs, 0)
def testGraphOpCreationDigestNoInputNoDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2", [135],
"machine.cluster", ("a1", "a2"),
input_names=None,
device_name=None)
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["host_name"], "machine.cluster")
self.assertEqual(json["stack_frame_ids"], ("a1", "a2"))
self.assertIsNone(json["input_names"])
self.assertIsNone(json["device_name"])
def testGraphOpCreationDigestWithInputsAndDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2", [135],
"machine.cluster", ("a1", "a2"),
input_names=["Bar_1", "Qux_2"],
device_name="/device:GPU:0")
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["host_name"], "machine.cluster")
self.assertEqual(json["stack_frame_ids"], ("a1", "a2"))
self.assertEqual(json["input_names"], ("Bar_1", "Qux_2"))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceDigestToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
json = trace_digest.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
def testGraphExecutionTraceWithTensorDebugValueAndDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_tensor_value=[3, 1], device_name="/device:GPU:0")
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertEqual(json["debug_tensor_value"], (3, 1))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceNoTensorDebugValueNoDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.NO_TENSOR,
debug_tensor_value=None, device_name=None)
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.NO_TENSOR)
self.assertIsNone(json["debug_tensor_value"])
self.assertIsNone(json["device_name"])
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| true | true |
1c2f122b468c80626a01b121b18aa5167206ae08 | 3,685 | py | Python | livesync/indico_livesync/controllers.py | tomasr8/indico-plugins | b85e4ad826fa362aa32eb236e73c9ab2f7c7f465 | [
"MIT"
] | null | null | null | livesync/indico_livesync/controllers.py | tomasr8/indico-plugins | b85e4ad826fa362aa32eb236e73c9ab2f7c7f465 | [
"MIT"
] | null | null | null | livesync/indico_livesync/controllers.py | tomasr8/indico-plugins | b85e4ad826fa362aa32eb236e73c9ab2f7c7f465 | [
"MIT"
] | null | null | null | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from flask import flash, redirect, request
from flask_pluginengine import current_plugin, render_plugin_template
from sqlalchemy.orm.attributes import flag_modified
from werkzeug.exceptions import NotFound
from indico.core.db import db
from indico.core.errors import UserValueError
from indico.modules.admin import RHAdminBase
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.util import jsonify_data, jsonify_template
from indico_livesync import _
from indico_livesync.models.agents import LiveSyncAgent
def extend_plugin_details():
agents = LiveSyncAgent.query.order_by(LiveSyncAgent.name, LiveSyncAgent.id).all()
used_backends = {a.backend_name for a in agents}
available_backends = {name: backend
for name, backend in current_plugin.backend_classes.items()
if not backend.unique or name not in used_backends}
return render_plugin_template('plugin_details_extra.html', agents=agents, backends=available_backends)
class RHDeleteAgent(RHAdminBase):
"""Deletes a LiveSync agent"""
def _process_args(self):
self.agent = LiveSyncAgent.get_or_404(request.view_args['agent_id'])
def _process(self):
db.session.delete(self.agent)
flash(_('Agent deleted'), 'success')
return jsonify_data(flash=False)
class RHAddAgent(RHAdminBase):
"""Adds a LiveSync agent"""
def _process_args(self):
self.backend_name = request.view_args['backend']
try:
self.backend = current_plugin.backend_classes[self.backend_name]
except KeyError:
raise NotFound
if self.backend.unique and LiveSyncAgent.query.filter_by(backend_name=self.backend_name).has_rows():
raise UserValueError(_('This backend is already in use'))
def _process(self):
form = self.backend.form(obj=FormDefaults(name=self.backend.title))
if form.validate_on_submit():
data = form.data
name = data.pop('name')
agent = LiveSyncAgent(name=name, backend_name=self.backend_name, settings=data)
db.session.add(agent)
flash(_('Agent added'), 'success')
flash(_("Don't forget to run the initial export!"), 'highlight')
return jsonify_data(flash=False)
return jsonify_template('edit_agent.html', render_plugin_template, form=form, backend=self.backend, edit=False)
class RHEditAgent(RHAdminBase):
"""Edits a LiveSync agent"""
def _process_args(self):
self.agent = LiveSyncAgent.get_or_404(request.view_args['agent_id'])
if self.agent.backend is None:
flash(_('Cannot edit an agent that is not loaded'), 'error')
return redirect(url_for('plugins.details', plugin='livesync'))
def _process(self):
form = self.agent.backend.form(obj=FormDefaults(name=self.agent.name, **self.agent.settings))
if form.validate_on_submit():
data = form.data
self.agent.name = data.pop('name')
if data:
self.agent.settings.update(data)
flag_modified(self.agent, 'settings')
flash(_('Agent updated'), 'success')
return jsonify_data(flash=False)
return jsonify_template('edit_agent.html', render_plugin_template, form=form, backend=self.agent.backend,
edit=True)
| 39.623656 | 119 | 0.688195 |
from flask import flash, redirect, request
from flask_pluginengine import current_plugin, render_plugin_template
from sqlalchemy.orm.attributes import flag_modified
from werkzeug.exceptions import NotFound
from indico.core.db import db
from indico.core.errors import UserValueError
from indico.modules.admin import RHAdminBase
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.util import jsonify_data, jsonify_template
from indico_livesync import _
from indico_livesync.models.agents import LiveSyncAgent
def extend_plugin_details():
agents = LiveSyncAgent.query.order_by(LiveSyncAgent.name, LiveSyncAgent.id).all()
used_backends = {a.backend_name for a in agents}
available_backends = {name: backend
for name, backend in current_plugin.backend_classes.items()
if not backend.unique or name not in used_backends}
return render_plugin_template('plugin_details_extra.html', agents=agents, backends=available_backends)
class RHDeleteAgent(RHAdminBase):
def _process_args(self):
self.agent = LiveSyncAgent.get_or_404(request.view_args['agent_id'])
def _process(self):
db.session.delete(self.agent)
flash(_('Agent deleted'), 'success')
return jsonify_data(flash=False)
class RHAddAgent(RHAdminBase):
def _process_args(self):
self.backend_name = request.view_args['backend']
try:
self.backend = current_plugin.backend_classes[self.backend_name]
except KeyError:
raise NotFound
if self.backend.unique and LiveSyncAgent.query.filter_by(backend_name=self.backend_name).has_rows():
raise UserValueError(_('This backend is already in use'))
def _process(self):
form = self.backend.form(obj=FormDefaults(name=self.backend.title))
if form.validate_on_submit():
data = form.data
name = data.pop('name')
agent = LiveSyncAgent(name=name, backend_name=self.backend_name, settings=data)
db.session.add(agent)
flash(_('Agent added'), 'success')
flash(_("Don't forget to run the initial export!"), 'highlight')
return jsonify_data(flash=False)
return jsonify_template('edit_agent.html', render_plugin_template, form=form, backend=self.backend, edit=False)
class RHEditAgent(RHAdminBase):
def _process_args(self):
self.agent = LiveSyncAgent.get_or_404(request.view_args['agent_id'])
if self.agent.backend is None:
flash(_('Cannot edit an agent that is not loaded'), 'error')
return redirect(url_for('plugins.details', plugin='livesync'))
def _process(self):
form = self.agent.backend.form(obj=FormDefaults(name=self.agent.name, **self.agent.settings))
if form.validate_on_submit():
data = form.data
self.agent.name = data.pop('name')
if data:
self.agent.settings.update(data)
flag_modified(self.agent, 'settings')
flash(_('Agent updated'), 'success')
return jsonify_data(flash=False)
return jsonify_template('edit_agent.html', render_plugin_template, form=form, backend=self.agent.backend,
edit=True)
| true | true |
1c2f125d2dc314b0fc43aaa3ca2b3b0b148148cb | 1,503 | py | Python | src/clustar_project/graph.py | jz5jx/Test_Repo | 8796f45021943984ed02232fd34ff02e17123d71 | [
"MIT"
] | 1 | 2021-04-24T21:52:53.000Z | 2021-04-24T21:52:53.000Z | src/clustar_project/graph.py | jz5jx/Test_Repo | 8796f45021943984ed02232fd34ff02e17123d71 | [
"MIT"
] | null | null | null | src/clustar_project/graph.py | jz5jx/Test_Repo | 8796f45021943984ed02232fd34ff02e17123d71 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
def identify_peaks(image, smoothing=5, clip=0.75):
n_rows = image.shape[0]
n_cols = image.shape[1]
mid = n_rows // 2
y = np.array([image[mid, c] for c in range(n_cols)])
x = np.arange(0, len(y), 1)
y_avg = []
for i in range(len(y)):
if i + smoothing > y.shape[0]:
smoothing -= - 1
if smoothing != 0:
y_avg.append(np.mean(y[i:i + smoothing]))
y = np.array(y_avg)
dydx = np.diff(y) / np.diff(x)
lhs = np.array([y[i - 1] / y[i] for i in
range(1, len(y) // 2) if y[i] != 0])
lhs[lhs < clip] = 0
lhs = np.nonzero(lhs)[0]
lhs = [lhs[i - 1] for i in range(1, len(lhs))
if ((lhs[i] - lhs[i - 1]) == 1)]
rhs = np.array([y[i] / y[i - 1] for i in
range(len(y) - 1, len(y) // 2, -1)
if y[i - 1] != 0])
rhs[rhs < clip] = 0
rhs = np.nonzero(rhs)[0]
rhs = [rhs[i - 1] for i in
range(1, len(rhs)) if ((rhs[i] - rhs[i - 1]) == 1)]
idx = []
if len(lhs) > 1 and len(rhs) > 1:
dydx_ = dydx[lhs[0]:-rhs[0]]
if len(dydx_) > 2:
idx = np.array([i for i in range(1, len(dydx_))
if (dydx_[i - 1] > 0 >= dydx_[i])
or (dydx_[i - 1] < 0 <= dydx_[i])]) + lhs[0]
idx = [idx[i] for i, val in enumerate(idx) if i % 2 == 0]
return idx
| 28.903846 | 72 | 0.465735 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
def identify_peaks(image, smoothing=5, clip=0.75):
n_rows = image.shape[0]
n_cols = image.shape[1]
mid = n_rows // 2
y = np.array([image[mid, c] for c in range(n_cols)])
x = np.arange(0, len(y), 1)
y_avg = []
for i in range(len(y)):
if i + smoothing > y.shape[0]:
smoothing -= - 1
if smoothing != 0:
y_avg.append(np.mean(y[i:i + smoothing]))
y = np.array(y_avg)
dydx = np.diff(y) / np.diff(x)
lhs = np.array([y[i - 1] / y[i] for i in
range(1, len(y) // 2) if y[i] != 0])
lhs[lhs < clip] = 0
lhs = np.nonzero(lhs)[0]
lhs = [lhs[i - 1] for i in range(1, len(lhs))
if ((lhs[i] - lhs[i - 1]) == 1)]
rhs = np.array([y[i] / y[i - 1] for i in
range(len(y) - 1, len(y) // 2, -1)
if y[i - 1] != 0])
rhs[rhs < clip] = 0
rhs = np.nonzero(rhs)[0]
rhs = [rhs[i - 1] for i in
range(1, len(rhs)) if ((rhs[i] - rhs[i - 1]) == 1)]
idx = []
if len(lhs) > 1 and len(rhs) > 1:
dydx_ = dydx[lhs[0]:-rhs[0]]
if len(dydx_) > 2:
idx = np.array([i for i in range(1, len(dydx_))
if (dydx_[i - 1] > 0 >= dydx_[i])
or (dydx_[i - 1] < 0 <= dydx_[i])]) + lhs[0]
idx = [idx[i] for i, val in enumerate(idx) if i % 2 == 0]
return idx
| true | true |
1c2f127cdfa2366e188a595e18fd3dfb435b4486 | 2,218 | py | Python | app/auth/forms.py | sgaoshang/seeker | bb96ab9835abddf076dfe61f4ffc4fd8d5933b6f | [
"MIT"
] | null | null | null | app/auth/forms.py | sgaoshang/seeker | bb96ab9835abddf076dfe61f4ffc4fd8d5933b6f | [
"MIT"
] | 2 | 2021-03-31T19:01:13.000Z | 2021-12-13T19:50:54.000Z | app/auth/forms.py | sgaoshang/seeker | bb96ab9835abddf076dfe61f4ffc4fd8d5933b6f | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, SelectField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from flask_babel import _, lazy_gettext as _l
from app.models import User
class LoginForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'))
class RegistrationForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
# component = SelectField(_l('Component'), choices=[('cpp', 'C++'), ('py', 'Python'), ('text', 'Plain Text')])
submit = SubmitField(_l('Register'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_('Please use a different email address.'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError(_('Please use a different email.'))
class ResetPasswordForm(FlaskForm):
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Request Password Reset'))
| 42.653846 | 114 | 0.672227 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, SelectField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from flask_babel import _, lazy_gettext as _l
from app.models import User
class LoginForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'))
class RegistrationForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Register'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_('Please use a different email address.'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError(_('Please use a different email.'))
class ResetPasswordForm(FlaskForm):
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Request Password Reset'))
| true | true |
1c2f13420cec6ec91fd855a91f15b93e5ee912ca | 454 | py | Python | passwlib.py | Lightmoll/SimpleStatus | e2eef3b1c7488899f5da74e18976dac5b18aefe0 | [
"MIT"
] | null | null | null | passwlib.py | Lightmoll/SimpleStatus | e2eef3b1c7488899f5da74e18976dac5b18aefe0 | [
"MIT"
] | null | null | null | passwlib.py | Lightmoll/SimpleStatus | e2eef3b1c7488899f5da74e18976dac5b18aefe0 | [
"MIT"
] | null | null | null | from passlib.hash import pbkdf2_sha256
#external lib
DEFAULT_PASSWD_URI = "data/passw.db"
def verify_password(in_passw):
base_passw_hash = ""
with open(DEFAULT_PASSWD_URI, "r") as file:
base_passw_hash = file.read().rstrip()
return pbkdf2_sha256.verify(in_passw, base_passw_hash)
def set_password(passw):
hashed_pass = pbkdf2_sha256.hash(passw)
with open(DEFAULT_PASSWD_URI, "w") as file:
file.write(hashed_pass)
| 25.222222 | 58 | 0.729075 | from passlib.hash import pbkdf2_sha256
DEFAULT_PASSWD_URI = "data/passw.db"
def verify_password(in_passw):
base_passw_hash = ""
with open(DEFAULT_PASSWD_URI, "r") as file:
base_passw_hash = file.read().rstrip()
return pbkdf2_sha256.verify(in_passw, base_passw_hash)
def set_password(passw):
hashed_pass = pbkdf2_sha256.hash(passw)
with open(DEFAULT_PASSWD_URI, "w") as file:
file.write(hashed_pass)
| true | true |
1c2f149739b4d45fd677b49e34c515d4fcbe65c5 | 3,751 | py | Python | www/server.py | cjds/rpicar | 01e9db2149691cb29bfe223644236b7634c3998b | [
"WTFPL"
] | null | null | null | www/server.py | cjds/rpicar | 01e9db2149691cb29bfe223644236b7634c3998b | [
"WTFPL"
] | null | null | null | www/server.py | cjds/rpicar | 01e9db2149691cb29bfe223644236b7634c3998b | [
"WTFPL"
] | 1 | 2020-04-07T18:03:01.000Z | 2020-04-07T18:03:01.000Z | #!/usr/bin/env python3
# @author Carl Saldanha <cjds92@gmail.com>
# @brief Web server to help move the robot
from http.server import BaseHTTPRequestHandler, HTTPServer
from os import curdir, sep
import contextlib
import os
import socket
import urllib.parse as urlparse
from urllib.parse import parse_qs
from functools import partialmethod
PORT_NUMBER = 8080
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
SOCKET_PORT = 65432 # Port to listen on (non-privileged ports are > 1023)
def partialclass(cls, *args, **kwds):
"""
Simple method to implement a partial around the init of a class
"""
class NewCls(cls):
__init__ = partialmethod(cls.__init__, *args, **kwds)
return NewCls
#This class will handles any incoming request from
#the browser
class myHandler(BaseHTTPRequestHandler):
def __init__(self, socket_conn, *args, **kwargs):
self.socket_conn = socket_conn
super(BaseHTTPRequestHandler, self).__init__(*args, **kwargs)
#Handler for the GET requests
def do_GET(self):
parsed_url = urlparse.urlparse(self.path)
if(self.headers.get('content-length') is not None):
data = self.rfile.read(self.headers.get('content-length')).decode('utf-8')
dir_path = os.path.dirname(os.path.normpath(__file__))
if self.path == "/":
self.path=os.path.join(dir_path, "index.html")
elif parsed_url.path == "/keybind":
print(urlparse.parse_qs(parsed_url.query))
print(parsed_url.path)
print('sending data')
self.socket_conn.sendall(bytearray("W\n", 'utf8'))
else:
self.path = os.path.join(dir_path, self.path[1:])
try:
#Check the file extension required and
#set the right mime type
sendReply = False
if self.path.endswith(".html"):
mimetype='text/html'
sendReply = True
if self.path.endswith(".jpg"):
mimetype='image/jpg'
sendReply = True
if self.path.endswith(".gif"):
mimetype='image/gif'
sendReply = True
if self.path.endswith(".js"):
mimetype='application/javascript'
sendReply = True
if self.path.endswith(".css"):
mimetype='text/css'
sendReply = True
if sendReply == True:
#Open the static file requested and send it
f = open(self.path, 'rb')
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
# Start a Socket server to push the data to C++
if __name__ == "__main__":
try:
print("Waiting for socket client to connect")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
print("Socket client connected")
s.bind((HOST, SOCKET_PORT))
s.listen()
conn, addr = s.accept()
with conn:
server = HTTPServer(('', PORT_NUMBER), partialclass(myHandler, conn))
print('Started httpserver on port ' , PORT_NUMBER)
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
| 35.72381 | 90 | 0.559051 |
from http.server import BaseHTTPRequestHandler, HTTPServer
from os import curdir, sep
import contextlib
import os
import socket
import urllib.parse as urlparse
from urllib.parse import parse_qs
from functools import partialmethod
PORT_NUMBER = 8080
HOST = '127.0.0.1'
SOCKET_PORT = 65432
def partialclass(cls, *args, **kwds):
class NewCls(cls):
__init__ = partialmethod(cls.__init__, *args, **kwds)
return NewCls
class myHandler(BaseHTTPRequestHandler):
def __init__(self, socket_conn, *args, **kwargs):
self.socket_conn = socket_conn
super(BaseHTTPRequestHandler, self).__init__(*args, **kwargs)
def do_GET(self):
parsed_url = urlparse.urlparse(self.path)
if(self.headers.get('content-length') is not None):
data = self.rfile.read(self.headers.get('content-length')).decode('utf-8')
dir_path = os.path.dirname(os.path.normpath(__file__))
if self.path == "/":
self.path=os.path.join(dir_path, "index.html")
elif parsed_url.path == "/keybind":
print(urlparse.parse_qs(parsed_url.query))
print(parsed_url.path)
print('sending data')
self.socket_conn.sendall(bytearray("W\n", 'utf8'))
else:
self.path = os.path.join(dir_path, self.path[1:])
try:
sendReply = False
if self.path.endswith(".html"):
mimetype='text/html'
sendReply = True
if self.path.endswith(".jpg"):
mimetype='image/jpg'
sendReply = True
if self.path.endswith(".gif"):
mimetype='image/gif'
sendReply = True
if self.path.endswith(".js"):
mimetype='application/javascript'
sendReply = True
if self.path.endswith(".css"):
mimetype='text/css'
sendReply = True
if sendReply == True:
f = open(self.path, 'rb')
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
if __name__ == "__main__":
try:
print("Waiting for socket client to connect")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
print("Socket client connected")
s.bind((HOST, SOCKET_PORT))
s.listen()
conn, addr = s.accept()
with conn:
server = HTTPServer(('', PORT_NUMBER), partialclass(myHandler, conn))
print('Started httpserver on port ' , PORT_NUMBER)
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
| true | true |
1c2f15a19961e75280cc697ad0564e6c34542fa7 | 1,204 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iam/apis/CreateSubUserRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iam/apis/CreateSubUserRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iam/apis/CreateSubUserRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class CreateSubUserRequest(JDCloudRequest):
"""
创建子用户
"""
def __init__(self, parameters, header=None, version="v1"):
super(CreateSubUserRequest, self).__init__(
'/subUser', 'POST', header, version)
self.parameters = parameters
class CreateSubUserParameters(object):
def __init__(self, createSubUserInfo):
"""
:param createSubUserInfo: 子用户信息
"""
self.createSubUserInfo = createSubUserInfo
| 28.666667 | 75 | 0.719269 |
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class CreateSubUserRequest(JDCloudRequest):
def __init__(self, parameters, header=None, version="v1"):
super(CreateSubUserRequest, self).__init__(
'/subUser', 'POST', header, version)
self.parameters = parameters
class CreateSubUserParameters(object):
def __init__(self, createSubUserInfo):
self.createSubUserInfo = createSubUserInfo
| true | true |
1c2f15c725d49d08657992ccce0173a331d53cb5 | 3,770 | py | Python | Chapter11_AI/cartpoleAgent4.py | thisisjako/UdemyTF | ee4102391ed6bd50f764955f732f5740425a9209 | [
"MIT"
] | null | null | null | Chapter11_AI/cartpoleAgent4.py | thisisjako/UdemyTF | ee4102391ed6bd50f764955f732f5740425a9209 | [
"MIT"
] | null | null | null | Chapter11_AI/cartpoleAgent4.py | thisisjako/UdemyTF | ee4102391ed6bd50f764955f732f5740425a9209 | [
"MIT"
] | null | null | null | # noqa
import gym
import numpy as np
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
class Agent:
def __init__(self, env: gym.Env):
self.env = env
self.num_obersvations = self.env.observation_space.shape[0]
self.num_actions = self.env.action_space.n
self.model = self.build_model()
def build_model(self) -> Sequential:
model = Sequential()
model.add(Dense(units=100, input_dim=self.num_obersvations))
model.add(Activation("relu"))
model.add(Dense(units=self.num_actions))
model.add(Activation("softmax"))
model.summary()
model.compile(
loss="categorical_crossentropy",
optimizer="Adam",
metrics=["accuracy"]
)
return model
def get_action(self, state: np.ndarray) -> int:
state = state.reshape(1, -1)
action_prob = self.model(state).numpy()[0]
action = np.random.choice(self.num_actions, p=action_prob)
return action
def get_samples(self, num_episodes: int) -> tuple:
rewards = [0.0 for _ in range(num_episodes)]
episodes = [[] for _ in range(num_episodes)]
for episode in range(num_episodes):
state = self.env.reset()
total_reward = 0.0
while True:
action = self.get_action(state)
new_state, reward, done, _ = self.env.step(action)
total_reward += reward
episodes[episode].append((state, action))
state = new_state
if done:
rewards[episode] = total_reward
break
return rewards, episodes
def filter_episodes(self, rewards: list, episodes: list, percentile: float) -> tuple:
reward_bound = np.percentile(rewards, percentile)
x_train, y_train = [], []
for reward, episode in zip(rewards, episodes):
if reward >= reward_bound:
observations = [step[0] for step in episode]
actions = [step[1] for step in episode]
x_train.extend(observations)
y_train.extend(actions)
x_train = np.array(x_train)
y_train = to_categorical(y_train, num_classes=self.num_actions)
return x_train, y_train, reward_bound
def train(self, percentile: float, num_iterations: int, num_episodes: int) -> None:
for _ in range(num_iterations):
rewards, episodes = self.get_samples(num_episodes)
x_train, y_train, reward_bound = self.filter_episodes(rewards, episodes, percentile)
self.model.fit(x=x_train, y=y_train, verbose=0)
reward_mean = np.mean(rewards)
print(f"Reward mean: {reward_mean}, reward bound: {reward_bound}")
if reward_mean > 450:
break
def play(self, num_episodes: int, render: bool = True) -> None:
for episode in range(num_episodes):
state = self.env.reset()
total_reward = 0.0
while True:
if render:
self.env.render()
action = self.get_action(state)
state, reward, done, _ = self.env.step(action)
total_reward += reward
if done:
print(f"Total reward: {total_reward} in episode {episode + 1}")
break
if __name__ == "__main__":
env = gym.make("CartPole-v1")
agent = Agent(env)
agent.train(
percentile=70.0,
num_iterations=15,
num_episodes=100
)
input()
agent.play(num_episodes=10)
| 35.904762 | 96 | 0.58992 |
import gym
import numpy as np
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
class Agent:
def __init__(self, env: gym.Env):
self.env = env
self.num_obersvations = self.env.observation_space.shape[0]
self.num_actions = self.env.action_space.n
self.model = self.build_model()
def build_model(self) -> Sequential:
model = Sequential()
model.add(Dense(units=100, input_dim=self.num_obersvations))
model.add(Activation("relu"))
model.add(Dense(units=self.num_actions))
model.add(Activation("softmax"))
model.summary()
model.compile(
loss="categorical_crossentropy",
optimizer="Adam",
metrics=["accuracy"]
)
return model
def get_action(self, state: np.ndarray) -> int:
state = state.reshape(1, -1)
action_prob = self.model(state).numpy()[0]
action = np.random.choice(self.num_actions, p=action_prob)
return action
def get_samples(self, num_episodes: int) -> tuple:
rewards = [0.0 for _ in range(num_episodes)]
episodes = [[] for _ in range(num_episodes)]
for episode in range(num_episodes):
state = self.env.reset()
total_reward = 0.0
while True:
action = self.get_action(state)
new_state, reward, done, _ = self.env.step(action)
total_reward += reward
episodes[episode].append((state, action))
state = new_state
if done:
rewards[episode] = total_reward
break
return rewards, episodes
def filter_episodes(self, rewards: list, episodes: list, percentile: float) -> tuple:
reward_bound = np.percentile(rewards, percentile)
x_train, y_train = [], []
for reward, episode in zip(rewards, episodes):
if reward >= reward_bound:
observations = [step[0] for step in episode]
actions = [step[1] for step in episode]
x_train.extend(observations)
y_train.extend(actions)
x_train = np.array(x_train)
y_train = to_categorical(y_train, num_classes=self.num_actions)
return x_train, y_train, reward_bound
def train(self, percentile: float, num_iterations: int, num_episodes: int) -> None:
for _ in range(num_iterations):
rewards, episodes = self.get_samples(num_episodes)
x_train, y_train, reward_bound = self.filter_episodes(rewards, episodes, percentile)
self.model.fit(x=x_train, y=y_train, verbose=0)
reward_mean = np.mean(rewards)
print(f"Reward mean: {reward_mean}, reward bound: {reward_bound}")
if reward_mean > 450:
break
def play(self, num_episodes: int, render: bool = True) -> None:
for episode in range(num_episodes):
state = self.env.reset()
total_reward = 0.0
while True:
if render:
self.env.render()
action = self.get_action(state)
state, reward, done, _ = self.env.step(action)
total_reward += reward
if done:
print(f"Total reward: {total_reward} in episode {episode + 1}")
break
if __name__ == "__main__":
env = gym.make("CartPole-v1")
agent = Agent(env)
agent.train(
percentile=70.0,
num_iterations=15,
num_episodes=100
)
input()
agent.play(num_episodes=10)
| true | true |
1c2f162efea13b0be77e2364ee076b27e19c6389 | 1,532 | py | Python | jupyter_sparkmonitor/test_jupyter_sparkmonitor.py | hellysmile/initialization-actions | d390a8981716d4cc2d69291ed844de3b229a689e | [
"Apache-2.0"
] | null | null | null | jupyter_sparkmonitor/test_jupyter_sparkmonitor.py | hellysmile/initialization-actions | d390a8981716d4cc2d69291ed844de3b229a689e | [
"Apache-2.0"
] | null | null | null | jupyter_sparkmonitor/test_jupyter_sparkmonitor.py | hellysmile/initialization-actions | d390a8981716d4cc2d69291ed844de3b229a689e | [
"Apache-2.0"
] | null | null | null | import pkg_resources
from absl.testing import absltest
from absl.testing import parameterized
from integration_tests.dataproc_test_case import DataprocTestCase
class JupyterTestCase(DataprocTestCase):
OPTIONAL_COMPONENTS = ["ANACONDA", "JUPYTER"]
COMPONENT = "sparkmonitor"
INIT_ACTIONS = ["jupyter_sparkmonitor/sparkmonitor.sh"]
def verify_instance(self, name, jupyter_port):
verify_cmd_pip_check = "/opt/conda/default/bin/pip list | grep sparkmonitor"
self.assert_instance_command(name, verify_cmd_pip_check)
verify_cmd = "curl {} -L {}:{} | grep 'Jupyter Notebook'".format(
"--retry 10 --retry-delay 10 --retry-connrefused", name, jupyter_port)
self.assert_instance_command(name, verify_cmd)
@parameterized.parameters(
("SINGLE", ["m"]),
("STANDARD", ["m"]),
)
def test_sparkmonitor(self, configuration, machine_suffixes):
# Use 1.4 version of Dataproc to test because it requires Python 3
if self.getImageVersion() < pkg_resources.parse_version("1.4"):
return
if self.getImageVersion() >= pkg_resources.parse_version("2.0"):
self.skipTest("Not supported in 2.0+ images")
self.createCluster(
configuration,
self.INIT_ACTIONS,
optional_components=self.OPTIONAL_COMPONENTS,
timeout_in_minutes=10)
for machine_suffix in machine_suffixes:
self.verify_instance(
"{}-{}".format(self.getClusterName(), machine_suffix),
jupyter_port="8123")
if __name__ == "__main__":
absltest.main()
| 32.595745 | 80 | 0.711488 | import pkg_resources
from absl.testing import absltest
from absl.testing import parameterized
from integration_tests.dataproc_test_case import DataprocTestCase
class JupyterTestCase(DataprocTestCase):
OPTIONAL_COMPONENTS = ["ANACONDA", "JUPYTER"]
COMPONENT = "sparkmonitor"
INIT_ACTIONS = ["jupyter_sparkmonitor/sparkmonitor.sh"]
def verify_instance(self, name, jupyter_port):
verify_cmd_pip_check = "/opt/conda/default/bin/pip list | grep sparkmonitor"
self.assert_instance_command(name, verify_cmd_pip_check)
verify_cmd = "curl {} -L {}:{} | grep 'Jupyter Notebook'".format(
"--retry 10 --retry-delay 10 --retry-connrefused", name, jupyter_port)
self.assert_instance_command(name, verify_cmd)
@parameterized.parameters(
("SINGLE", ["m"]),
("STANDARD", ["m"]),
)
def test_sparkmonitor(self, configuration, machine_suffixes):
if self.getImageVersion() < pkg_resources.parse_version("1.4"):
return
if self.getImageVersion() >= pkg_resources.parse_version("2.0"):
self.skipTest("Not supported in 2.0+ images")
self.createCluster(
configuration,
self.INIT_ACTIONS,
optional_components=self.OPTIONAL_COMPONENTS,
timeout_in_minutes=10)
for machine_suffix in machine_suffixes:
self.verify_instance(
"{}-{}".format(self.getClusterName(), machine_suffix),
jupyter_port="8123")
if __name__ == "__main__":
absltest.main()
| true | true |
1c2f168df79115e9222c2d243fc9f168fa29b3fc | 6,502 | py | Python | PaddleClas/ppcls/arch/backbone/__init__.py | unseenme/mnasnet-paddle-iv | 3bfc38477f1f80e79d35045a3dadef857fc2f4e5 | [
"Apache-2.0"
] | null | null | null | PaddleClas/ppcls/arch/backbone/__init__.py | unseenme/mnasnet-paddle-iv | 3bfc38477f1f80e79d35045a3dadef857fc2f4e5 | [
"Apache-2.0"
] | null | null | null | PaddleClas/ppcls/arch/backbone/__init__.py | unseenme/mnasnet-paddle-iv | 3bfc38477f1f80e79d35045a3dadef857fc2f4e5 | [
"Apache-2.0"
] | null | null | null | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import inspect
from ppcls.arch.backbone.legendary_models.mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1
from ppcls.arch.backbone.legendary_models.mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25
from ppcls.arch.backbone.legendary_models.resnet import ResNet18, ResNet18_vd, ResNet34, ResNet34_vd, ResNet50, ResNet50_vd, ResNet101, ResNet101_vd, ResNet152, ResNet152_vd, ResNet200_vd
from ppcls.arch.backbone.legendary_models.vgg import VGG11, VGG13, VGG16, VGG19
from ppcls.arch.backbone.legendary_models.inception_v3 import InceptionV3
from ppcls.arch.backbone.legendary_models.hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W60_C, HRNet_W64_C, SE_HRNet_W64_C
from ppcls.arch.backbone.model_zoo.resnet_vc import ResNet50_vc
from ppcls.arch.backbone.model_zoo.resnext import ResNeXt50_32x4d, ResNeXt50_64x4d, ResNeXt101_32x4d, ResNeXt101_64x4d, ResNeXt152_32x4d, ResNeXt152_64x4d
from ppcls.arch.backbone.model_zoo.resnext_vd import ResNeXt50_vd_32x4d, ResNeXt50_vd_64x4d, ResNeXt101_vd_32x4d, ResNeXt101_vd_64x4d, ResNeXt152_vd_32x4d, ResNeXt152_vd_64x4d
from ppcls.arch.backbone.model_zoo.res2net import Res2Net50_26w_4s, Res2Net50_14w_8s
from ppcls.arch.backbone.model_zoo.res2net_vd import Res2Net50_vd_26w_4s, Res2Net101_vd_26w_4s, Res2Net200_vd_26w_4s
from ppcls.arch.backbone.model_zoo.se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd
from ppcls.arch.backbone.model_zoo.se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet154_vd
from ppcls.arch.backbone.model_zoo.se_resnext import SE_ResNeXt50_32x4d, SE_ResNeXt101_32x4d, SE_ResNeXt152_64x4d
from ppcls.arch.backbone.model_zoo.dpn import DPN68, DPN92, DPN98, DPN107, DPN131
from ppcls.arch.backbone.model_zoo.densenet import DenseNet121, DenseNet161, DenseNet169, DenseNet201, DenseNet264
from ppcls.arch.backbone.model_zoo.efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6, EfficientNetB7, EfficientNetB0_small
from ppcls.arch.backbone.model_zoo.resnest import ResNeSt50_fast_1s1x64d, ResNeSt50, ResNeSt101
from ppcls.arch.backbone.model_zoo.googlenet import GoogLeNet
from ppcls.arch.backbone.model_zoo.mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2, MobileNetV2_x1_5, MobileNetV2_x2_0
from ppcls.arch.backbone.model_zoo.shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2_x1_0, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish
from ppcls.arch.backbone.model_zoo.ghostnet import GhostNet_x0_5, GhostNet_x1_0, GhostNet_x1_3
from ppcls.arch.backbone.model_zoo.alexnet import AlexNet
from ppcls.arch.backbone.model_zoo.inception_v4 import InceptionV4
from ppcls.arch.backbone.model_zoo.xception import Xception41, Xception65, Xception71
from ppcls.arch.backbone.model_zoo.xception_deeplab import Xception41_deeplab, Xception65_deeplab
from ppcls.arch.backbone.model_zoo.resnext101_wsl import ResNeXt101_32x8d_wsl, ResNeXt101_32x16d_wsl, ResNeXt101_32x32d_wsl, ResNeXt101_32x48d_wsl
from ppcls.arch.backbone.model_zoo.squeezenet import SqueezeNet1_0, SqueezeNet1_1
from ppcls.arch.backbone.model_zoo.darknet import DarkNet53
from ppcls.arch.backbone.model_zoo.regnet import RegNetX_200MF, RegNetX_4GF, RegNetX_32GF, RegNetY_200MF, RegNetY_4GF, RegNetY_32GF
from ppcls.arch.backbone.model_zoo.vision_transformer import ViT_small_patch16_224, ViT_base_patch16_224, ViT_base_patch16_384, ViT_base_patch32_384, ViT_large_patch16_224, ViT_large_patch16_384, ViT_large_patch32_384, ViT_huge_patch16_224, ViT_huge_patch32_384
from ppcls.arch.backbone.model_zoo.distilled_vision_transformer import DeiT_tiny_patch16_224, DeiT_small_patch16_224, DeiT_base_patch16_224, DeiT_tiny_distilled_patch16_224, DeiT_small_distilled_patch16_224, DeiT_base_distilled_patch16_224, DeiT_base_patch16_384, DeiT_base_distilled_patch16_384
from ppcls.arch.backbone.model_zoo.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384
from ppcls.arch.backbone.model_zoo.mixnet import MixNet_S, MixNet_M, MixNet_L
from ppcls.arch.backbone.model_zoo.rexnet import ReXNet_1_0, ReXNet_1_3, ReXNet_1_5, ReXNet_2_0, ReXNet_3_0
from ppcls.arch.backbone.model_zoo.gvt import pcpvt_small, pcpvt_base, pcpvt_large, alt_gvt_small, alt_gvt_base, alt_gvt_large
from ppcls.arch.backbone.model_zoo.levit import LeViT_128S, LeViT_128, LeViT_192, LeViT_256, LeViT_384
from ppcls.arch.backbone.model_zoo.dla import DLA34, DLA46_c, DLA46x_c, DLA60, DLA60x, DLA60x_c, DLA102, DLA102x, DLA102x2, DLA169
from ppcls.arch.backbone.model_zoo.rednet import RedNet26, RedNet38, RedNet50, RedNet101, RedNet152
from ppcls.arch.backbone.model_zoo.tnt import TNT_small
from ppcls.arch.backbone.model_zoo.hardnet import HarDNet68, HarDNet85, HarDNet39_ds, HarDNet68_ds
from ppcls.arch.backbone.model_zoo.mnasnet import MnasNet_A1
from ppcls.arch.backbone.variant_models.resnet_variant import ResNet50_last_stage_stride1
def get_apis():
current_func = sys._getframe().f_code.co_name
current_module = sys.modules[__name__]
api = []
for _, obj in inspect.getmembers(current_module,
inspect.isclass) + inspect.getmembers(
current_module, inspect.isfunction):
api.append(obj.__name__)
api.remove(current_func)
return api
__all__ = get_apis()
| 84.441558 | 308 | 0.851277 |
import sys
import inspect
from ppcls.arch.backbone.legendary_models.mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1
from ppcls.arch.backbone.legendary_models.mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25
from ppcls.arch.backbone.legendary_models.resnet import ResNet18, ResNet18_vd, ResNet34, ResNet34_vd, ResNet50, ResNet50_vd, ResNet101, ResNet101_vd, ResNet152, ResNet152_vd, ResNet200_vd
from ppcls.arch.backbone.legendary_models.vgg import VGG11, VGG13, VGG16, VGG19
from ppcls.arch.backbone.legendary_models.inception_v3 import InceptionV3
from ppcls.arch.backbone.legendary_models.hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W60_C, HRNet_W64_C, SE_HRNet_W64_C
from ppcls.arch.backbone.model_zoo.resnet_vc import ResNet50_vc
from ppcls.arch.backbone.model_zoo.resnext import ResNeXt50_32x4d, ResNeXt50_64x4d, ResNeXt101_32x4d, ResNeXt101_64x4d, ResNeXt152_32x4d, ResNeXt152_64x4d
from ppcls.arch.backbone.model_zoo.resnext_vd import ResNeXt50_vd_32x4d, ResNeXt50_vd_64x4d, ResNeXt101_vd_32x4d, ResNeXt101_vd_64x4d, ResNeXt152_vd_32x4d, ResNeXt152_vd_64x4d
from ppcls.arch.backbone.model_zoo.res2net import Res2Net50_26w_4s, Res2Net50_14w_8s
from ppcls.arch.backbone.model_zoo.res2net_vd import Res2Net50_vd_26w_4s, Res2Net101_vd_26w_4s, Res2Net200_vd_26w_4s
from ppcls.arch.backbone.model_zoo.se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd
from ppcls.arch.backbone.model_zoo.se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet154_vd
from ppcls.arch.backbone.model_zoo.se_resnext import SE_ResNeXt50_32x4d, SE_ResNeXt101_32x4d, SE_ResNeXt152_64x4d
from ppcls.arch.backbone.model_zoo.dpn import DPN68, DPN92, DPN98, DPN107, DPN131
from ppcls.arch.backbone.model_zoo.densenet import DenseNet121, DenseNet161, DenseNet169, DenseNet201, DenseNet264
from ppcls.arch.backbone.model_zoo.efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6, EfficientNetB7, EfficientNetB0_small
from ppcls.arch.backbone.model_zoo.resnest import ResNeSt50_fast_1s1x64d, ResNeSt50, ResNeSt101
from ppcls.arch.backbone.model_zoo.googlenet import GoogLeNet
from ppcls.arch.backbone.model_zoo.mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2, MobileNetV2_x1_5, MobileNetV2_x2_0
from ppcls.arch.backbone.model_zoo.shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2_x1_0, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish
from ppcls.arch.backbone.model_zoo.ghostnet import GhostNet_x0_5, GhostNet_x1_0, GhostNet_x1_3
from ppcls.arch.backbone.model_zoo.alexnet import AlexNet
from ppcls.arch.backbone.model_zoo.inception_v4 import InceptionV4
from ppcls.arch.backbone.model_zoo.xception import Xception41, Xception65, Xception71
from ppcls.arch.backbone.model_zoo.xception_deeplab import Xception41_deeplab, Xception65_deeplab
from ppcls.arch.backbone.model_zoo.resnext101_wsl import ResNeXt101_32x8d_wsl, ResNeXt101_32x16d_wsl, ResNeXt101_32x32d_wsl, ResNeXt101_32x48d_wsl
from ppcls.arch.backbone.model_zoo.squeezenet import SqueezeNet1_0, SqueezeNet1_1
from ppcls.arch.backbone.model_zoo.darknet import DarkNet53
from ppcls.arch.backbone.model_zoo.regnet import RegNetX_200MF, RegNetX_4GF, RegNetX_32GF, RegNetY_200MF, RegNetY_4GF, RegNetY_32GF
from ppcls.arch.backbone.model_zoo.vision_transformer import ViT_small_patch16_224, ViT_base_patch16_224, ViT_base_patch16_384, ViT_base_patch32_384, ViT_large_patch16_224, ViT_large_patch16_384, ViT_large_patch32_384, ViT_huge_patch16_224, ViT_huge_patch32_384
from ppcls.arch.backbone.model_zoo.distilled_vision_transformer import DeiT_tiny_patch16_224, DeiT_small_patch16_224, DeiT_base_patch16_224, DeiT_tiny_distilled_patch16_224, DeiT_small_distilled_patch16_224, DeiT_base_distilled_patch16_224, DeiT_base_patch16_384, DeiT_base_distilled_patch16_384
from ppcls.arch.backbone.model_zoo.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384
from ppcls.arch.backbone.model_zoo.mixnet import MixNet_S, MixNet_M, MixNet_L
from ppcls.arch.backbone.model_zoo.rexnet import ReXNet_1_0, ReXNet_1_3, ReXNet_1_5, ReXNet_2_0, ReXNet_3_0
from ppcls.arch.backbone.model_zoo.gvt import pcpvt_small, pcpvt_base, pcpvt_large, alt_gvt_small, alt_gvt_base, alt_gvt_large
from ppcls.arch.backbone.model_zoo.levit import LeViT_128S, LeViT_128, LeViT_192, LeViT_256, LeViT_384
from ppcls.arch.backbone.model_zoo.dla import DLA34, DLA46_c, DLA46x_c, DLA60, DLA60x, DLA60x_c, DLA102, DLA102x, DLA102x2, DLA169
from ppcls.arch.backbone.model_zoo.rednet import RedNet26, RedNet38, RedNet50, RedNet101, RedNet152
from ppcls.arch.backbone.model_zoo.tnt import TNT_small
from ppcls.arch.backbone.model_zoo.hardnet import HarDNet68, HarDNet85, HarDNet39_ds, HarDNet68_ds
from ppcls.arch.backbone.model_zoo.mnasnet import MnasNet_A1
from ppcls.arch.backbone.variant_models.resnet_variant import ResNet50_last_stage_stride1
def get_apis():
current_func = sys._getframe().f_code.co_name
current_module = sys.modules[__name__]
api = []
for _, obj in inspect.getmembers(current_module,
inspect.isclass) + inspect.getmembers(
current_module, inspect.isfunction):
api.append(obj.__name__)
api.remove(current_func)
return api
__all__ = get_apis()
| true | true |
1c2f17169652f1a2e25d41d17d8f7c398981ef1f | 3,546 | py | Python | salt/beacons/log_beacon.py | fake-name/salt | d8f04936e4407f51946e32e8166159778f6c31a5 | [
"Apache-2.0"
] | 1 | 2021-09-06T00:14:04.000Z | 2021-09-06T00:14:04.000Z | salt/beacons/log_beacon.py | fake-name/salt | d8f04936e4407f51946e32e8166159778f6c31a5 | [
"Apache-2.0"
] | 2 | 2021-04-30T21:17:57.000Z | 2021-12-13T20:40:23.000Z | salt/beacons/log_beacon.py | fake-name/salt | d8f04936e4407f51946e32e8166159778f6c31a5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Beacon to fire events at specific log messages.
.. versionadded:: 2017.7.0
"""
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import salt libs
import salt.utils.files
import salt.utils.platform
from salt.ext.six.moves import map
try:
import re
HAS_REGEX = True
except ImportError:
HAS_REGEX = False
__virtualname__ = "log"
LOC_KEY = "log.loc"
SKEL = {}
SKEL["tag"] = ""
SKEL["match"] = "no"
SKEL["raw"] = ""
SKEL["error"] = ""
log = logging.getLogger(__name__)
def __virtual__():
if not salt.utils.platform.is_windows() and HAS_REGEX:
return __virtualname__
return False
def _get_loc():
"""
return the active file location
"""
if LOC_KEY in __context__:
return __context__[LOC_KEY]
def validate(config):
"""
Validate the beacon configuration
"""
_config = {}
list(map(_config.update, config))
# Configuration for log beacon should be a list of dicts
if not isinstance(config, list):
return False, ("Configuration for log beacon must be a list.")
if "file" not in _config:
return False, ("Configuration for log beacon must contain file option.")
return True, "Valid beacon configuration"
# TODO: match values should be returned in the event
def beacon(config):
"""
Read the log file and return match whole string
.. code-block:: yaml
beacons:
log:
- file: <path>
- tags:
<tag>:
regex: <pattern>
.. note::
regex matching is based on the `re`_ module
.. _re: https://docs.python.org/3.6/library/re.html#regular-expression-syntax
"""
_config = {}
list(map(_config.update, config))
ret = []
if "file" not in _config:
event = SKEL.copy()
event["tag"] = "global"
event["error"] = "file not defined in config"
ret.append(event)
return ret
with salt.utils.files.fopen(_config["file"], "r") as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
return ret
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
fp_.seek(loc)
txt = fp_.read()
log.info("txt %s", txt)
d = {}
for tag in _config.get("tags", {}):
if "regex" not in _config["tags"][tag]:
continue
if len(_config["tags"][tag]["regex"]) < 1:
continue
try:
d[tag] = re.compile(r"{0}".format(_config["tags"][tag]["regex"]))
except Exception as e: # pylint: disable=broad-except
event = SKEL.copy()
event["tag"] = tag
event["error"] = "bad regex"
ret.append(event)
for line in txt.splitlines():
for tag, reg in d.items():
try:
m = reg.match(line)
if m:
event = SKEL.copy()
event["tag"] = tag
event["raw"] = line
event["match"] = "yes"
ret.append(event)
except Exception: # pylint: disable=broad-except
event = SKEL.copy()
event["tag"] = tag
event["error"] = "bad match"
ret.append(event)
return ret
| 24.455172 | 81 | 0.529893 |
from __future__ import absolute_import, unicode_literals
import logging
import salt.utils.files
import salt.utils.platform
from salt.ext.six.moves import map
try:
import re
HAS_REGEX = True
except ImportError:
HAS_REGEX = False
__virtualname__ = "log"
LOC_KEY = "log.loc"
SKEL = {}
SKEL["tag"] = ""
SKEL["match"] = "no"
SKEL["raw"] = ""
SKEL["error"] = ""
log = logging.getLogger(__name__)
def __virtual__():
if not salt.utils.platform.is_windows() and HAS_REGEX:
return __virtualname__
return False
def _get_loc():
if LOC_KEY in __context__:
return __context__[LOC_KEY]
def validate(config):
_config = {}
list(map(_config.update, config))
if not isinstance(config, list):
return False, ("Configuration for log beacon must be a list.")
if "file" not in _config:
return False, ("Configuration for log beacon must contain file option.")
return True, "Valid beacon configuration"
def beacon(config):
_config = {}
list(map(_config.update, config))
ret = []
if "file" not in _config:
event = SKEL.copy()
event["tag"] = "global"
event["error"] = "file not defined in config"
ret.append(event)
return ret
with salt.utils.files.fopen(_config["file"], "r") as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
return ret
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
fp_.seek(loc)
txt = fp_.read()
log.info("txt %s", txt)
d = {}
for tag in _config.get("tags", {}):
if "regex" not in _config["tags"][tag]:
continue
if len(_config["tags"][tag]["regex"]) < 1:
continue
try:
d[tag] = re.compile(r"{0}".format(_config["tags"][tag]["regex"]))
except Exception as e:
event = SKEL.copy()
event["tag"] = tag
event["error"] = "bad regex"
ret.append(event)
for line in txt.splitlines():
for tag, reg in d.items():
try:
m = reg.match(line)
if m:
event = SKEL.copy()
event["tag"] = tag
event["raw"] = line
event["match"] = "yes"
ret.append(event)
except Exception:
event = SKEL.copy()
event["tag"] = tag
event["error"] = "bad match"
ret.append(event)
return ret
| true | true |
1c2f181ffbe17bdbddf3d9d3184668f45a5503f0 | 1,435 | py | Python | otwstest/conflict/__init__.py | OpenTreeOfLife/test-ot-ws | eee0eeb4150e942bea32e9131d20c01ad9666757 | [
"BSD-2-Clause"
] | null | null | null | otwstest/conflict/__init__.py | OpenTreeOfLife/test-ot-ws | eee0eeb4150e942bea32e9131d20c01ad9666757 | [
"BSD-2-Clause"
] | 4 | 2019-04-10T19:44:00.000Z | 2020-04-21T19:50:29.000Z | otwstest/conflict/__init__.py | OpenTreeOfLife/test-ot-ws | eee0eeb4150e942bea32e9131d20c01ad9666757 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from otwstest import is_str_type, all_api_versions
import otwstest.schema.tnrs as tnrs
from otwstest.schema.taxonomy.taxon import get_ott_id_property
@all_api_versions
def test_conflict_status_synth(outcome): # Depends on study ot_1501 in phylesystem
url = outcome.make_url('conflict/conflict-status')
result = outcome.do_http_json(url, 'POST', data={'tree1': 'ot_1501#tree1',
'tree2': 'synth'})
@all_api_versions
def test_conflict_status_ott(outcome): # Depends on study ot_1501 in phylesystem
url = outcome.make_url('conflict/conflict-status')
result = outcome.do_http_json(url, 'POST', data={'tree1': 'ot_1501#tree1',
'tree2': 'ott'})
# The website is using the GET versions, which are translated to POST in ws_wrapper.
@all_api_versions
def test_conflict_status_synth_get(outcome): # Depends on study ot_1501 in phylesystem
url = outcome.make_url('conflict/conflict-status?tree1=ot_1501%23tree1&tree2=synth')
result = outcome.do_http_json(url, 'GET')
@all_api_versions
def test_conflict_status_ott_get(outcome): # Depends on study ot_1501 in phylesystem
url = outcome.make_url('conflict/conflict-status?tree1=ot_1501%23tree1&tree2=ott')
result = outcome.do_http_json(url, 'GET')
| 41 | 88 | 0.701045 |
import re
from otwstest import is_str_type, all_api_versions
import otwstest.schema.tnrs as tnrs
from otwstest.schema.taxonomy.taxon import get_ott_id_property
@all_api_versions
def test_conflict_status_synth(outcome):
url = outcome.make_url('conflict/conflict-status')
result = outcome.do_http_json(url, 'POST', data={'tree1': 'ot_1501#tree1',
'tree2': 'synth'})
@all_api_versions
def test_conflict_status_ott(outcome):
url = outcome.make_url('conflict/conflict-status')
result = outcome.do_http_json(url, 'POST', data={'tree1': 'ot_1501#tree1',
'tree2': 'ott'})
@all_api_versions
def test_conflict_status_synth_get(outcome):
url = outcome.make_url('conflict/conflict-status?tree1=ot_1501%23tree1&tree2=synth')
result = outcome.do_http_json(url, 'GET')
@all_api_versions
def test_conflict_status_ott_get(outcome):
url = outcome.make_url('conflict/conflict-status?tree1=ot_1501%23tree1&tree2=ott')
result = outcome.do_http_json(url, 'GET')
| true | true |
1c2f18bdcc169f658acad2c313a665e9715bf886 | 319 | py | Python | src/f2py/importTest.py | friedenhe/idwarp | 76dce98b244810cb32f6abadcd2454fc22074b59 | [
"Apache-2.0"
] | null | null | null | src/f2py/importTest.py | friedenhe/idwarp | 76dce98b244810cb32f6abadcd2454fc22074b59 | [
"Apache-2.0"
] | null | null | null | src/f2py/importTest.py | friedenhe/idwarp | 76dce98b244810cb32f6abadcd2454fc22074b59 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import sys
name = "idwarp"
print("Testing if module %s can be imported..." % name)
import_cmd = "import %s" % name
try:
exec(import_cmd)
except ImportError:
print("Error: idwarp was not imported correctly")
sys.exit(1)
# end try
print("Module %s was successfully imported." % name)
| 21.266667 | 55 | 0.68652 |
import sys
name = "idwarp"
print("Testing if module %s can be imported..." % name)
import_cmd = "import %s" % name
try:
exec(import_cmd)
except ImportError:
print("Error: idwarp was not imported correctly")
sys.exit(1)
print("Module %s was successfully imported." % name)
| true | true |
1c2f18f0f69e93b068f718d5da8b83873e8e44f0 | 336 | py | Python | experiments/jacobi-1d/tmp_files/1959.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/jacobi-1d/tmp_files/1959.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/jacobi-1d/tmp_files/1959.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-1d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-1d/tmp_files/1959.c')
procedure('kernel_jacobi_1d')
loop(0)
known(' n > 2 ')
tile(0,2,16,2)
tile(1,2,16,2)
| 30.545455 | 118 | 0.764881 | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-1d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-1d/tmp_files/1959.c')
procedure('kernel_jacobi_1d')
loop(0)
known(' n > 2 ')
tile(0,2,16,2)
tile(1,2,16,2)
| true | true |
1c2f1becf02f9bebf5f0826ef7992a68ad8b6b49 | 90 | py | Python | yt/frontends/gizmo/api.py | aemerick/yt | 984484616d75c6d7603e71b9d45c5d617705a0e5 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/frontends/gizmo/api.py | aemerick/yt | 984484616d75c6d7603e71b9d45c5d617705a0e5 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/frontends/gizmo/api.py | aemerick/yt | 984484616d75c6d7603e71b9d45c5d617705a0e5 | [
"BSD-3-Clause-Clear"
] | null | null | null | from .data_structures import \
GizmoDataset
from .fields import \
GizmoFieldInfo
| 15 | 30 | 0.744444 | from .data_structures import \
GizmoDataset
from .fields import \
GizmoFieldInfo
| true | true |
1c2f1c04218c3c5fef5f3291c23141f001befb38 | 28,125 | py | Python | saleor/lib/python3.7/site-packages/promise/promise.py | cxsper/saleor | 5566ddcdaf8f72ba872eca869798e66eb9cdae44 | [
"BSD-3-Clause"
] | 2 | 2019-12-06T15:40:14.000Z | 2020-07-29T21:30:35.000Z | saleor/lib/python3.7/site-packages/promise/promise.py | cxsper/saleor | 5566ddcdaf8f72ba872eca869798e66eb9cdae44 | [
"BSD-3-Clause"
] | 13 | 2020-03-24T17:53:51.000Z | 2022-02-10T20:01:14.000Z | saleor/lib/python3.7/site-packages/promise/promise.py | cxsper/saleor | 5566ddcdaf8f72ba872eca869798e66eb9cdae44 | [
"BSD-3-Clause"
] | null | null | null | from collections import namedtuple
from functools import partial, wraps
from sys import version_info, exc_info
from threading import RLock
from types import TracebackType
from six import reraise # type: ignore
from .async_ import Async
from .compat import (
Future,
ensure_future,
iscoroutine, # type: ignore
iterate_promise,
) # type: ignore
from .utils import deprecated, integer_types, string_types, text_type, binary_type, warn
from .promise_list import PromiseList
from .schedulers.immediate import ImmediateScheduler
from typing import TypeVar, Generic
# from .schedulers.gevent import GeventScheduler
# from .schedulers.asyncio import AsyncioScheduler
# from .schedulers.thread import ThreadScheduler
if False:
from typing import (
Type,
List,
Any,
Callable,
Dict,
Iterator,
Optional, # flake8: noqa
Tuple,
Union,
Generic,
Hashable,
)
default_scheduler = ImmediateScheduler()
async_instance = Async()
def get_default_scheduler():
# type: () -> ImmediateScheduler
return default_scheduler
def set_default_scheduler(scheduler):
global default_scheduler
default_scheduler = scheduler
IS_PYTHON2 = version_info[0] == 2
DEFAULT_TIMEOUT = None # type: Optional[float]
MAX_LENGTH = 0xFFFF | 0
CALLBACK_SIZE = 3
CALLBACK_FULFILL_OFFSET = 0
CALLBACK_REJECT_OFFSET = 1
CALLBACK_PROMISE_OFFSET = 2
BASE_TYPES = set(
integer_types
+ string_types
+ (bool, float, complex, tuple, list, dict, text_type, binary_type)
)
# These are the potential states of a promise
STATE_PENDING = -1
STATE_REJECTED = 0
STATE_FULFILLED = 1
def make_self_resolution_error():
# type: () -> TypeError
return TypeError("Promise is self")
def try_catch(handler, *args, **kwargs):
# type: (Callable, Any, Any) -> Union[Tuple[Any, None], Tuple[None, Tuple[Exception, Optional[TracebackType]]]]
try:
return (handler(*args, **kwargs), None)
except Exception as e:
tb = exc_info()[2]
return (None, (e, tb))
T = TypeVar("T")
S = TypeVar("S", contravariant=True)
class Promise(Generic[T]):
"""
This is the Promise class that complies
Promises/A+ specification.
"""
# __slots__ = ('_state', '_is_final', '_is_bound', '_is_following', '_is_async_guaranteed',
# '_length', '_handlers', '_fulfillment_handler0', '_rejection_handler0', '_promise0',
# '_is_waiting', '_future', '_trace', '_event_instance'
# )
_state = STATE_PENDING # type: int
_is_final = False
_is_bound = False
_is_following = False
_is_async_guaranteed = False
_length = 0
_handlers = None # type: Dict[int, Union[Callable, Promise, None]]
_fulfillment_handler0 = None # type: Any
_rejection_handler0 = None # type: Any
_promise0 = None # type: Optional[Promise]
_future = None # type: Future
_traceback = None # type: Optional[TracebackType]
# _trace = None
_is_waiting = False
_scheduler = None
def __init__(self, executor=None, scheduler=None):
# type: (Optional[Callable[[Callable[[T], None], Callable[[Exception], None]], None]], Any) -> None
"""
Initialize the Promise into a pending state.
"""
# self._state = STATE_PENDING # type: int
# self._is_final = False
# self._is_bound = False
# self._is_following = False
# self._is_async_guaranteed = False
# self._length = 0
# self._handlers = None # type: Dict[int, Union[Callable, None]]
# self._fulfillment_handler0 = None # type: Union[Callable, partial]
# self._rejection_handler0 = None # type: Union[Callable, partial]
# self._promise0 = None # type: Promise
# self._future = None # type: Future
# self._event_instance = None # type: Event
# self._is_waiting = False
self._scheduler = scheduler
if executor is not None:
self._resolve_from_executor(executor)
# For compatibility reasons
# self.reject = self._deprecated_reject
# self.resolve = self._deprecated_resolve
@property
def scheduler(self):
# type: () -> ImmediateScheduler
return self._scheduler or default_scheduler
@property
def future(self):
# type: (Promise) -> Future
if not self._future:
self._future = Future() # type: ignore
self._then( # type: ignore
self._future.set_result, self._future.set_exception
)
return self._future
def __iter__(self):
# type: () -> Iterator
return iterate_promise(self._target()) # type: ignore
__await__ = __iter__
@deprecated(
"Rejecting directly in a Promise instance is deprecated, as Promise.reject() is now a class method. "
"Please use promise.do_reject() instead.",
name="reject",
)
def _deprecated_reject(self, e):
self.do_reject(e)
@deprecated(
"Resolving directly in a Promise instance is deprecated, as Promise.resolve() is now a class method. "
"Please use promise.do_resolve() instead.",
name="resolve",
)
def _deprecated_resolve(self, value):
self.do_resolve(value)
def _resolve_callback(self, value):
# type: (T) -> None
if value is self:
return self._reject_callback(make_self_resolution_error(), False)
if not self.is_thenable(value):
return self._fulfill(value)
promise = self._try_convert_to_promise(value)._target()
if promise == self:
self._reject(make_self_resolution_error())
return
if promise._state == STATE_PENDING:
len = self._length
if len > 0:
promise._migrate_callback0(self)
for i in range(1, len):
promise._migrate_callback_at(self, i)
self._is_following = True
self._length = 0
self._set_followee(promise)
elif promise._state == STATE_FULFILLED:
self._fulfill(promise._value())
elif promise._state == STATE_REJECTED:
self._reject(promise._reason(), promise._target()._traceback)
def _settled_value(self, _raise=False):
# type: (bool) -> Any
assert not self._is_following
if self._state == STATE_FULFILLED:
return self._rejection_handler0
elif self._state == STATE_REJECTED:
if _raise:
raise_val = self._fulfillment_handler0
reraise(type(raise_val), raise_val, self._traceback)
return self._fulfillment_handler0
def _fulfill(self, value):
# type: (T) -> None
if value is self:
err = make_self_resolution_error()
# self._attach_extratrace(err)
return self._reject(err)
self._state = STATE_FULFILLED
self._rejection_handler0 = value
if self._length > 0:
if self._is_async_guaranteed:
self._settle_promises()
else:
async_instance.settle_promises(self)
def _reject(self, reason, traceback=None):
# type: (Exception, Optional[TracebackType]) -> None
self._state = STATE_REJECTED
self._fulfillment_handler0 = reason
self._traceback = traceback
if self._is_final:
assert self._length == 0
async_instance.fatal_error(reason, self.scheduler)
return
if self._length > 0:
async_instance.settle_promises(self)
else:
self._ensure_possible_rejection_handled()
if self._is_async_guaranteed:
self._settle_promises()
else:
async_instance.settle_promises(self)
def _ensure_possible_rejection_handled(self):
# type: () -> None
# self._rejection_is_unhandled = True
# async_instance.invoke_later(self._notify_unhandled_rejection, self)
pass
def _reject_callback(self, reason, synchronous=False, traceback=None):
# type: (Exception, bool, Optional[TracebackType]) -> None
assert isinstance(
reason, Exception
), "A promise was rejected with a non-error: {}".format(reason)
# trace = ensure_error_object(reason)
# has_stack = trace is reason
# self._attach_extratrace(trace, synchronous and has_stack)
self._reject(reason, traceback)
def _clear_callback_data_index_at(self, index):
# type: (int) -> None
assert not self._is_following
assert index > 0
base = index * CALLBACK_SIZE - CALLBACK_SIZE
self._handlers[base + CALLBACK_PROMISE_OFFSET] = None
self._handlers[base + CALLBACK_FULFILL_OFFSET] = None
self._handlers[base + CALLBACK_REJECT_OFFSET] = None
def _fulfill_promises(self, length, value):
# type: (int, T) -> None
for i in range(1, length):
handler = self._fulfillment_handler_at(i)
promise = self._promise_at(i)
self._clear_callback_data_index_at(i)
self._settle_promise(promise, handler, value, None)
def _reject_promises(self, length, reason):
# type: (int, Exception) -> None
for i in range(1, length):
handler = self._rejection_handler_at(i)
promise = self._promise_at(i)
self._clear_callback_data_index_at(i)
self._settle_promise(promise, handler, reason, None)
def _settle_promise(
self,
promise, # type: Optional[Promise]
handler, # type: Optional[Callable]
value, # type: Union[T, Exception]
traceback, # type: Optional[TracebackType]
):
# type: (...) -> None
assert not self._is_following
is_promise = isinstance(promise, self.__class__)
async_guaranteed = self._is_async_guaranteed
if callable(handler):
if not is_promise:
handler(value) # , promise
else:
if async_guaranteed:
promise._is_async_guaranteed = True # type: ignore
self._settle_promise_from_handler( # type: ignore
handler, value, promise
) # type: ignore
elif is_promise:
if async_guaranteed:
promise._is_async_guaranteed = True # type: ignore
if self._state == STATE_FULFILLED:
promise._fulfill(value) # type: ignore
else:
promise._reject(value, self._traceback) # type: ignore
def _settle_promise0(
self,
handler, # type: Optional[Callable]
value, # type: Any
traceback, # type: Optional[TracebackType]
):
# type: (...) -> None
promise = self._promise0
self._promise0 = None
self._settle_promise(promise, handler, value, traceback) # type: ignore
def _settle_promise_from_handler(self, handler, value, promise):
# type: (Callable, Any, Promise) -> None
value, error_with_tb = try_catch(handler, value) # , promise
if error_with_tb:
error, tb = error_with_tb
promise._reject_callback(error, False, tb)
else:
promise._resolve_callback(value)
def _promise_at(self, index):
# type: (int) -> Optional[Promise]
assert index > 0
assert not self._is_following
return self._handlers.get( # type: ignore
index * CALLBACK_SIZE - CALLBACK_SIZE + CALLBACK_PROMISE_OFFSET
)
def _fulfillment_handler_at(self, index):
# type: (int) -> Optional[Callable]
assert not self._is_following
assert index > 0
return self._handlers.get( # type: ignore
index * CALLBACK_SIZE - CALLBACK_SIZE + CALLBACK_FULFILL_OFFSET
)
def _rejection_handler_at(self, index):
# type: (int) -> Optional[Callable]
assert not self._is_following
assert index > 0
return self._handlers.get( # type: ignore
index * CALLBACK_SIZE - CALLBACK_SIZE + CALLBACK_REJECT_OFFSET
)
def _migrate_callback0(self, follower):
# type: (Promise) -> None
self._add_callbacks(
follower._fulfillment_handler0,
follower._rejection_handler0,
follower._promise0,
)
def _migrate_callback_at(self, follower, index):
self._add_callbacks(
follower._fulfillment_handler_at(index),
follower._rejection_handler_at(index),
follower._promise_at(index),
)
def _add_callbacks(
self,
fulfill, # type: Optional[Callable]
reject, # type: Optional[Callable]
promise, # type: Optional[Promise]
):
# type: (...) -> int
assert not self._is_following
if self._handlers is None:
self._handlers = {}
index = self._length
if index > MAX_LENGTH - CALLBACK_SIZE:
index = 0
self._length = 0
if index == 0:
assert not self._promise0
assert not self._fulfillment_handler0
assert not self._rejection_handler0
self._promise0 = promise
if callable(fulfill):
self._fulfillment_handler0 = fulfill
if callable(reject):
self._rejection_handler0 = reject
else:
base = index * CALLBACK_SIZE - CALLBACK_SIZE
assert (base + CALLBACK_PROMISE_OFFSET) not in self._handlers
assert (base + CALLBACK_FULFILL_OFFSET) not in self._handlers
assert (base + CALLBACK_REJECT_OFFSET) not in self._handlers
self._handlers[base + CALLBACK_PROMISE_OFFSET] = promise
if callable(fulfill):
self._handlers[base + CALLBACK_FULFILL_OFFSET] = fulfill
if callable(reject):
self._handlers[base + CALLBACK_REJECT_OFFSET] = reject
self._length = index + 1
return index
def _target(self):
# type: () -> Promise
ret = self
while ret._is_following:
ret = ret._followee()
return ret
def _followee(self):
# type: () -> Promise
assert self._is_following
assert isinstance(self._rejection_handler0, Promise)
return self._rejection_handler0
def _set_followee(self, promise):
# type: (Promise) -> None
assert self._is_following
assert not isinstance(self._rejection_handler0, Promise)
self._rejection_handler0 = promise
def _settle_promises(self):
# type: () -> None
length = self._length
if length > 0:
if self._state == STATE_REJECTED:
reason = self._fulfillment_handler0
traceback = self._traceback
self._settle_promise0(self._rejection_handler0, reason, traceback)
self._reject_promises(length, reason)
else:
value = self._rejection_handler0
self._settle_promise0(self._fulfillment_handler0, value, None)
self._fulfill_promises(length, value)
self._length = 0
def _resolve_from_executor(self, executor):
# type: (Callable[[Callable[[T], None], Callable[[Exception], None]], None]) -> None
# self._capture_stacktrace()
synchronous = True
def resolve(value):
# type: (T) -> None
self._resolve_callback(value)
def reject(reason, traceback=None):
# type: (Exception, TracebackType) -> None
self._reject_callback(reason, synchronous, traceback)
error = None
traceback = None
try:
executor(resolve, reject)
except Exception as e:
traceback = exc_info()[2]
error = e
synchronous = False
if error is not None:
self._reject_callback(error, True, traceback)
@classmethod
def wait(cls, promise, timeout=None):
# type: (Promise, Optional[float]) -> None
async_instance.wait(promise, timeout)
def _wait(self, timeout=None):
# type: (Optional[float]) -> None
self.wait(self, timeout)
def get(self, timeout=None):
# type: (Optional[float]) -> T
target = self._target()
self._wait(timeout or DEFAULT_TIMEOUT)
return self._target_settled_value(_raise=True)
def _target_settled_value(self, _raise=False):
# type: (bool) -> Any
return self._target()._settled_value(_raise)
_value = _reason = _target_settled_value
value = reason = property(_target_settled_value)
def __repr__(self):
# type: () -> str
hex_id = hex(id(self))
if self._is_following:
return "<Promise at {} following {}>".format(hex_id, self._target())
state = self._state
if state == STATE_PENDING:
return "<Promise at {} pending>".format(hex_id)
elif state == STATE_FULFILLED:
return "<Promise at {} fulfilled with {}>".format(
hex_id, repr(self._rejection_handler0)
)
elif state == STATE_REJECTED:
return "<Promise at {} rejected with {}>".format(
hex_id, repr(self._fulfillment_handler0)
)
return "<Promise unknown>"
@property
def is_pending(self):
# type: (Promise) -> bool
"""Indicate whether the Promise is still pending. Could be wrong the moment the function returns."""
return self._target()._state == STATE_PENDING
@property
def is_fulfilled(self):
# type: (Promise) -> bool
"""Indicate whether the Promise has been fulfilled. Could be wrong the moment the function returns."""
return self._target()._state == STATE_FULFILLED
@property
def is_rejected(self):
# type: (Promise) -> bool
"""Indicate whether the Promise has been rejected. Could be wrong the moment the function returns."""
return self._target()._state == STATE_REJECTED
def catch(self, on_rejection):
# type: (Promise, Callable[[Exception], Any]) -> Promise
"""
This method returns a Promise and deals with rejected cases only.
It behaves the same as calling Promise.then(None, on_rejection).
"""
return self.then(None, on_rejection)
def _then(
self,
did_fulfill=None, # type: Optional[Callable[[T], S]]
did_reject=None, # type: Optional[Callable[[Exception], S]]
):
# type: (...) -> Promise[S]
promise = self.__class__()
target = self._target()
state = target._state
if state == STATE_PENDING:
target._add_callbacks(did_fulfill, did_reject, promise)
else:
traceback = None
if state == STATE_FULFILLED:
value = target._rejection_handler0
handler = did_fulfill
elif state == STATE_REJECTED:
value = target._fulfillment_handler0
traceback = target._traceback
handler = did_reject # type: ignore
# target._rejection_is_unhandled = False
async_instance.invoke(
partial(target._settle_promise, promise, handler, value, traceback),
promise.scheduler
# target._settle_promise instead?
# settler,
# target,
)
return promise
fulfill = _resolve_callback
do_resolve = _resolve_callback
do_reject = _reject_callback
def then(self, did_fulfill=None, did_reject=None):
# type: (Promise, Callable[[T], S], Optional[Callable[[Exception], S]]) -> Promise[S]
"""
This method takes two optional arguments. The first argument
is used if the "self promise" is fulfilled and the other is
used if the "self promise" is rejected. In either case, this
method returns another promise that effectively represents
the result of either the first of the second argument (in the
case that the "self promise" is fulfilled or rejected,
respectively).
Each argument can be either:
* None - Meaning no action is taken
* A function - which will be called with either the value
of the "self promise" or the reason for rejection of
the "self promise". The function may return:
* A value - which will be used to fulfill the promise
returned by this method.
* A promise - which, when fulfilled or rejected, will
cascade its value or reason to the promise returned
by this method.
* A value - which will be assigned as either the value
or the reason for the promise returned by this method
when the "self promise" is either fulfilled or rejected,
respectively.
:type success: (Any) -> object
:type failure: (Any) -> object
:rtype : Promise
"""
return self._then(did_fulfill, did_reject)
def done(self, did_fulfill=None, did_reject=None):
# type: (Optional[Callable], Optional[Callable]) -> None
promise = self._then(did_fulfill, did_reject)
promise._is_final = True
def done_all(self, handlers=None):
# type: (Promise, Optional[List[Union[Dict[str, Optional[Callable]], Tuple[Callable, Callable], Callable]]]) -> None
"""
:type handlers: list[(Any) -> object] | list[((Any) -> object, (Any) -> object)]
"""
if not handlers:
return
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
self.done(s, f)
elif isinstance(handler, dict):
s = handler.get("success") # type: ignore
f = handler.get("failure") # type: ignore
self.done(s, f)
else:
self.done(handler)
def then_all(self, handlers=None):
# type: (Promise, List[Callable]) -> List[Promise]
"""
Utility function which calls 'then' for each handler provided. Handler can either
be a function in which case it is used as success handler, or a tuple containing
the success and the failure handler, where each of them could be None.
:type handlers: list[(Any) -> object] | list[((Any) -> object, (Any) -> object)]
:param handlers
:rtype : list[Promise]
"""
if not handlers:
return []
promises = [] # type: List[Promise]
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
promises.append(self.then(s, f))
elif isinstance(handler, dict):
s = handler.get("success")
f = handler.get("failure")
promises.append(self.then(s, f))
else:
promises.append(self.then(handler))
return promises
@classmethod
def _try_convert_to_promise(cls, obj):
# type: (Any) -> Promise
_type = obj.__class__
if issubclass(_type, Promise):
if cls is not Promise:
return cls(obj.then, obj._scheduler)
return obj
if iscoroutine(obj): # type: ignore
obj = ensure_future(obj) # type: ignore
_type = obj.__class__
if is_future_like(_type):
def executor(resolve, reject):
# type: (Callable, Callable) -> None
if obj.done():
_process_future_result(resolve, reject)(obj)
else:
obj.add_done_callback(_process_future_result(resolve, reject))
# _process_future_result(resolve, reject)(obj)
promise = cls(executor) # type: Promise
promise._future = obj
return promise
return obj
@classmethod
def reject(cls, reason):
# type: (Exception) -> Promise
ret = cls() # type: Promise
# ret._capture_stacktrace();
# ret._rejectCallback(reason, true);
ret._reject_callback(reason, True)
return ret
rejected = reject
@classmethod
def resolve(cls, obj):
# type: (T) -> Promise[T]
if not cls.is_thenable(obj):
ret = cls() # type: Promise
# ret._capture_stacktrace()
ret._state = STATE_FULFILLED
ret._rejection_handler0 = obj
return ret
return cls._try_convert_to_promise(obj)
cast = resolve
fulfilled = cast
@classmethod
def promisify(cls, f):
# type: (Callable) -> Callable[..., Promise]
if not callable(f):
warn(
"Promise.promisify is now a function decorator, please use Promise.resolve instead."
)
return cls.resolve(f)
@wraps(f)
def wrapper(*args, **kwargs):
# type: (*Any, **Any) -> Promise
def executor(resolve, reject):
# type: (Callable, Callable) -> Optional[Any]
return resolve(f(*args, **kwargs))
return cls(executor)
return wrapper
_safe_resolved_promise = None # type: Promise
@classmethod
def safe(cls, fn):
# type: (Callable) -> Callable
from functools import wraps
if not cls._safe_resolved_promise:
cls._safe_resolved_promise = Promise.resolve(None)
@wraps(fn)
def wrapper(*args, **kwargs):
# type: (*Any, **Any) -> Promise
return cls._safe_resolved_promise.then(lambda v: fn(*args, **kwargs))
return wrapper
@classmethod
def all(cls, promises):
# type: (Any) -> Promise
return PromiseList(promises, promise_class=cls).promise
@classmethod
def for_dict(cls, m):
# type: (Dict[Hashable, Promise[S]]) -> Promise[Dict[Hashable, S]]
"""
A special function that takes a dictionary of promises
and turns them into a promise for a dictionary of values.
In other words, this turns an dictionary of promises for values
into a promise for a dictionary of values.
"""
dict_type = type(m) # type: Type[Dict]
if not m:
return cls.resolve(dict_type())
def handle_success(resolved_values):
# type: (List[S]) -> Dict[Hashable, S]
return dict_type(zip(m.keys(), resolved_values))
return cls.all(m.values()).then(handle_success)
@classmethod
def is_thenable(cls, obj):
# type: (Any) -> bool
"""
A utility function to determine if the specified
object is a promise using "duck typing".
"""
_type = obj.__class__
if obj is None or _type in BASE_TYPES:
return False
return (
issubclass(_type, Promise)
or iscoroutine(obj) # type: ignore
or is_future_like(_type)
)
_type_done_callbacks = {} # type: Dict[type, bool]
def is_future_like(_type):
# type: (type) -> bool
if _type not in _type_done_callbacks:
_type_done_callbacks[_type] = callable(
getattr(_type, "add_done_callback", None)
)
return _type_done_callbacks[_type]
promisify = Promise.promisify
promise_for_dict = Promise.for_dict
is_thenable = Promise.is_thenable
def _process_future_result(resolve, reject):
# type: (Callable, Callable) -> Callable
def handle_future_result(future):
# type: (Any) -> None
try:
resolve(future.result())
except Exception as e:
tb = exc_info()[2]
reject(e, tb)
return handle_future_result
| 33.166274 | 124 | 0.599147 | from collections import namedtuple
from functools import partial, wraps
from sys import version_info, exc_info
from threading import RLock
from types import TracebackType
from six import reraise
from .async_ import Async
from .compat import (
Future,
ensure_future,
iscoroutine,
iterate_promise,
)
from .utils import deprecated, integer_types, string_types, text_type, binary_type, warn
from .promise_list import PromiseList
from .schedulers.immediate import ImmediateScheduler
from typing import TypeVar, Generic
if False:
from typing import (
Type,
List,
Any,
Callable,
Dict,
Iterator,
Optional,
Tuple,
Union,
Generic,
Hashable,
)
default_scheduler = ImmediateScheduler()
async_instance = Async()
def get_default_scheduler():
return default_scheduler
def set_default_scheduler(scheduler):
global default_scheduler
default_scheduler = scheduler
IS_PYTHON2 = version_info[0] == 2
DEFAULT_TIMEOUT = None
MAX_LENGTH = 0xFFFF | 0
CALLBACK_SIZE = 3
CALLBACK_FULFILL_OFFSET = 0
CALLBACK_REJECT_OFFSET = 1
CALLBACK_PROMISE_OFFSET = 2
BASE_TYPES = set(
integer_types
+ string_types
+ (bool, float, complex, tuple, list, dict, text_type, binary_type)
)
STATE_PENDING = -1
STATE_REJECTED = 0
STATE_FULFILLED = 1
def make_self_resolution_error():
return TypeError("Promise is self")
def try_catch(handler, *args, **kwargs):
try:
return (handler(*args, **kwargs), None)
except Exception as e:
tb = exc_info()[2]
return (None, (e, tb))
T = TypeVar("T")
S = TypeVar("S", contravariant=True)
class Promise(Generic[T]):
_state = STATE_PENDING
_is_final = False
_is_bound = False
_is_following = False
_is_async_guaranteed = False
_length = 0
_handlers = None
_fulfillment_handler0 = None
_rejection_handler0 = None
_promise0 = None
_future = None
_traceback = None
_is_waiting = False
_scheduler = None
def __init__(self, executor=None, scheduler=None):
solve_from_executor(executor)
@property
def scheduler(self):
return self._scheduler or default_scheduler
@property
def future(self):
if not self._future:
self._future = Future()
self._then(
self._future.set_result, self._future.set_exception
)
return self._future
def __iter__(self):
return iterate_promise(self._target())
__await__ = __iter__
@deprecated(
"Rejecting directly in a Promise instance is deprecated, as Promise.reject() is now a class method. "
"Please use promise.do_reject() instead.",
name="reject",
)
def _deprecated_reject(self, e):
self.do_reject(e)
@deprecated(
"Resolving directly in a Promise instance is deprecated, as Promise.resolve() is now a class method. "
"Please use promise.do_resolve() instead.",
name="resolve",
)
def _deprecated_resolve(self, value):
self.do_resolve(value)
def _resolve_callback(self, value):
if value is self:
return self._reject_callback(make_self_resolution_error(), False)
if not self.is_thenable(value):
return self._fulfill(value)
promise = self._try_convert_to_promise(value)._target()
if promise == self:
self._reject(make_self_resolution_error())
return
if promise._state == STATE_PENDING:
len = self._length
if len > 0:
promise._migrate_callback0(self)
for i in range(1, len):
promise._migrate_callback_at(self, i)
self._is_following = True
self._length = 0
self._set_followee(promise)
elif promise._state == STATE_FULFILLED:
self._fulfill(promise._value())
elif promise._state == STATE_REJECTED:
self._reject(promise._reason(), promise._target()._traceback)
def _settled_value(self, _raise=False):
assert not self._is_following
if self._state == STATE_FULFILLED:
return self._rejection_handler0
elif self._state == STATE_REJECTED:
if _raise:
raise_val = self._fulfillment_handler0
reraise(type(raise_val), raise_val, self._traceback)
return self._fulfillment_handler0
def _fulfill(self, value):
if value is self:
err = make_self_resolution_error()
return self._reject(err)
self._state = STATE_FULFILLED
self._rejection_handler0 = value
if self._length > 0:
if self._is_async_guaranteed:
self._settle_promises()
else:
async_instance.settle_promises(self)
def _reject(self, reason, traceback=None):
self._state = STATE_REJECTED
self._fulfillment_handler0 = reason
self._traceback = traceback
if self._is_final:
assert self._length == 0
async_instance.fatal_error(reason, self.scheduler)
return
if self._length > 0:
async_instance.settle_promises(self)
else:
self._ensure_possible_rejection_handled()
if self._is_async_guaranteed:
self._settle_promises()
else:
async_instance.settle_promises(self)
def _ensure_possible_rejection_handled(self):
pass
def _reject_callback(self, reason, synchronous=False, traceback=None):
assert isinstance(
reason, Exception
), "A promise was rejected with a non-error: {}".format(reason)
self._reject(reason, traceback)
def _clear_callback_data_index_at(self, index):
assert not self._is_following
assert index > 0
base = index * CALLBACK_SIZE - CALLBACK_SIZE
self._handlers[base + CALLBACK_PROMISE_OFFSET] = None
self._handlers[base + CALLBACK_FULFILL_OFFSET] = None
self._handlers[base + CALLBACK_REJECT_OFFSET] = None
def _fulfill_promises(self, length, value):
for i in range(1, length):
handler = self._fulfillment_handler_at(i)
promise = self._promise_at(i)
self._clear_callback_data_index_at(i)
self._settle_promise(promise, handler, value, None)
def _reject_promises(self, length, reason):
for i in range(1, length):
handler = self._rejection_handler_at(i)
promise = self._promise_at(i)
self._clear_callback_data_index_at(i)
self._settle_promise(promise, handler, reason, None)
def _settle_promise(
self,
promise,
handler,
value,
traceback,
):
assert not self._is_following
is_promise = isinstance(promise, self.__class__)
async_guaranteed = self._is_async_guaranteed
if callable(handler):
if not is_promise:
handler(value)
else:
if async_guaranteed:
promise._is_async_guaranteed = True
self._settle_promise_from_handler(
handler, value, promise
)
elif is_promise:
if async_guaranteed:
promise._is_async_guaranteed = True
if self._state == STATE_FULFILLED:
promise._fulfill(value)
else:
promise._reject(value, self._traceback)
def _settle_promise0(
self,
handler,
value,
traceback,
):
promise = self._promise0
self._promise0 = None
self._settle_promise(promise, handler, value, traceback)
def _settle_promise_from_handler(self, handler, value, promise):
value, error_with_tb = try_catch(handler, value)
if error_with_tb:
error, tb = error_with_tb
promise._reject_callback(error, False, tb)
else:
promise._resolve_callback(value)
def _promise_at(self, index):
assert index > 0
assert not self._is_following
return self._handlers.get(
index * CALLBACK_SIZE - CALLBACK_SIZE + CALLBACK_PROMISE_OFFSET
)
def _fulfillment_handler_at(self, index):
assert not self._is_following
assert index > 0
return self._handlers.get(
index * CALLBACK_SIZE - CALLBACK_SIZE + CALLBACK_FULFILL_OFFSET
)
def _rejection_handler_at(self, index):
assert not self._is_following
assert index > 0
return self._handlers.get(
index * CALLBACK_SIZE - CALLBACK_SIZE + CALLBACK_REJECT_OFFSET
)
def _migrate_callback0(self, follower):
self._add_callbacks(
follower._fulfillment_handler0,
follower._rejection_handler0,
follower._promise0,
)
def _migrate_callback_at(self, follower, index):
self._add_callbacks(
follower._fulfillment_handler_at(index),
follower._rejection_handler_at(index),
follower._promise_at(index),
)
def _add_callbacks(
self,
fulfill,
reject,
promise,
):
assert not self._is_following
if self._handlers is None:
self._handlers = {}
index = self._length
if index > MAX_LENGTH - CALLBACK_SIZE:
index = 0
self._length = 0
if index == 0:
assert not self._promise0
assert not self._fulfillment_handler0
assert not self._rejection_handler0
self._promise0 = promise
if callable(fulfill):
self._fulfillment_handler0 = fulfill
if callable(reject):
self._rejection_handler0 = reject
else:
base = index * CALLBACK_SIZE - CALLBACK_SIZE
assert (base + CALLBACK_PROMISE_OFFSET) not in self._handlers
assert (base + CALLBACK_FULFILL_OFFSET) not in self._handlers
assert (base + CALLBACK_REJECT_OFFSET) not in self._handlers
self._handlers[base + CALLBACK_PROMISE_OFFSET] = promise
if callable(fulfill):
self._handlers[base + CALLBACK_FULFILL_OFFSET] = fulfill
if callable(reject):
self._handlers[base + CALLBACK_REJECT_OFFSET] = reject
self._length = index + 1
return index
def _target(self):
ret = self
while ret._is_following:
ret = ret._followee()
return ret
def _followee(self):
assert self._is_following
assert isinstance(self._rejection_handler0, Promise)
return self._rejection_handler0
def _set_followee(self, promise):
assert self._is_following
assert not isinstance(self._rejection_handler0, Promise)
self._rejection_handler0 = promise
def _settle_promises(self):
length = self._length
if length > 0:
if self._state == STATE_REJECTED:
reason = self._fulfillment_handler0
traceback = self._traceback
self._settle_promise0(self._rejection_handler0, reason, traceback)
self._reject_promises(length, reason)
else:
value = self._rejection_handler0
self._settle_promise0(self._fulfillment_handler0, value, None)
self._fulfill_promises(length, value)
self._length = 0
def _resolve_from_executor(self, executor):
synchronous = True
def resolve(value):
self._resolve_callback(value)
def reject(reason, traceback=None):
self._reject_callback(reason, synchronous, traceback)
error = None
traceback = None
try:
executor(resolve, reject)
except Exception as e:
traceback = exc_info()[2]
error = e
synchronous = False
if error is not None:
self._reject_callback(error, True, traceback)
@classmethod
def wait(cls, promise, timeout=None):
async_instance.wait(promise, timeout)
def _wait(self, timeout=None):
self.wait(self, timeout)
def get(self, timeout=None):
target = self._target()
self._wait(timeout or DEFAULT_TIMEOUT)
return self._target_settled_value(_raise=True)
def _target_settled_value(self, _raise=False):
return self._target()._settled_value(_raise)
_value = _reason = _target_settled_value
value = reason = property(_target_settled_value)
def __repr__(self):
hex_id = hex(id(self))
if self._is_following:
return "<Promise at {} following {}>".format(hex_id, self._target())
state = self._state
if state == STATE_PENDING:
return "<Promise at {} pending>".format(hex_id)
elif state == STATE_FULFILLED:
return "<Promise at {} fulfilled with {}>".format(
hex_id, repr(self._rejection_handler0)
)
elif state == STATE_REJECTED:
return "<Promise at {} rejected with {}>".format(
hex_id, repr(self._fulfillment_handler0)
)
return "<Promise unknown>"
@property
def is_pending(self):
return self._target()._state == STATE_PENDING
@property
def is_fulfilled(self):
return self._target()._state == STATE_FULFILLED
@property
def is_rejected(self):
return self._target()._state == STATE_REJECTED
def catch(self, on_rejection):
return self.then(None, on_rejection)
def _then(
self,
did_fulfill=None,
did_reject=None,
):
promise = self.__class__()
target = self._target()
state = target._state
if state == STATE_PENDING:
target._add_callbacks(did_fulfill, did_reject, promise)
else:
traceback = None
if state == STATE_FULFILLED:
value = target._rejection_handler0
handler = did_fulfill
elif state == STATE_REJECTED:
value = target._fulfillment_handler0
traceback = target._traceback
handler = did_reject
async_instance.invoke(
partial(target._settle_promise, promise, handler, value, traceback),
promise.scheduler
)
return promise
fulfill = _resolve_callback
do_resolve = _resolve_callback
do_reject = _reject_callback
def then(self, did_fulfill=None, did_reject=None):
return self._then(did_fulfill, did_reject)
def done(self, did_fulfill=None, did_reject=None):
promise = self._then(did_fulfill, did_reject)
promise._is_final = True
def done_all(self, handlers=None):
if not handlers:
return
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
self.done(s, f)
elif isinstance(handler, dict):
s = handler.get("success")
f = handler.get("failure")
self.done(s, f)
else:
self.done(handler)
def then_all(self, handlers=None):
if not handlers:
return []
promises = []
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
promises.append(self.then(s, f))
elif isinstance(handler, dict):
s = handler.get("success")
f = handler.get("failure")
promises.append(self.then(s, f))
else:
promises.append(self.then(handler))
return promises
@classmethod
def _try_convert_to_promise(cls, obj):
_type = obj.__class__
if issubclass(_type, Promise):
if cls is not Promise:
return cls(obj.then, obj._scheduler)
return obj
if iscoroutine(obj):
obj = ensure_future(obj)
_type = obj.__class__
if is_future_like(_type):
def executor(resolve, reject):
if obj.done():
_process_future_result(resolve, reject)(obj)
else:
obj.add_done_callback(_process_future_result(resolve, reject))
promise = cls(executor)
promise._future = obj
return promise
return obj
@classmethod
def reject(cls, reason):
ret = cls()
ret._reject_callback(reason, True)
return ret
rejected = reject
@classmethod
def resolve(cls, obj):
if not cls.is_thenable(obj):
ret = cls()
ret._state = STATE_FULFILLED
ret._rejection_handler0 = obj
return ret
return cls._try_convert_to_promise(obj)
cast = resolve
fulfilled = cast
@classmethod
def promisify(cls, f):
if not callable(f):
warn(
"Promise.promisify is now a function decorator, please use Promise.resolve instead."
)
return cls.resolve(f)
@wraps(f)
def wrapper(*args, **kwargs):
def executor(resolve, reject):
return resolve(f(*args, **kwargs))
return cls(executor)
return wrapper
_safe_resolved_promise = None
@classmethod
def safe(cls, fn):
from functools import wraps
if not cls._safe_resolved_promise:
cls._safe_resolved_promise = Promise.resolve(None)
@wraps(fn)
def wrapper(*args, **kwargs):
return cls._safe_resolved_promise.then(lambda v: fn(*args, **kwargs))
return wrapper
@classmethod
def all(cls, promises):
return PromiseList(promises, promise_class=cls).promise
@classmethod
def for_dict(cls, m):
dict_type = type(m)
if not m:
return cls.resolve(dict_type())
def handle_success(resolved_values):
return dict_type(zip(m.keys(), resolved_values))
return cls.all(m.values()).then(handle_success)
@classmethod
def is_thenable(cls, obj):
_type = obj.__class__
if obj is None or _type in BASE_TYPES:
return False
return (
issubclass(_type, Promise)
or iscoroutine(obj)
or is_future_like(_type)
)
_type_done_callbacks = {}
def is_future_like(_type):
if _type not in _type_done_callbacks:
_type_done_callbacks[_type] = callable(
getattr(_type, "add_done_callback", None)
)
return _type_done_callbacks[_type]
promisify = Promise.promisify
promise_for_dict = Promise.for_dict
is_thenable = Promise.is_thenable
def _process_future_result(resolve, reject):
def handle_future_result(future):
try:
resolve(future.result())
except Exception as e:
tb = exc_info()[2]
reject(e, tb)
return handle_future_result
| true | true |
1c2f1d50fcd7c7c1fa85dee0f7e295ced1eeb880 | 881 | py | Python | src/setup.py | skfzyy/twitter-past-crawler | 56c16fe8fd2ac2dc869912f28a55f05ffc63bce5 | [
"MIT"
] | null | null | null | src/setup.py | skfzyy/twitter-past-crawler | 56c16fe8fd2ac2dc869912f28a55f05ffc63bce5 | [
"MIT"
] | null | null | null | src/setup.py | skfzyy/twitter-past-crawler | 56c16fe8fd2ac2dc869912f28a55f05ffc63bce5 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='proxytwitter',
version='0.0.9',
description='A crawler that can crawl and accumulate past tweets without using the official API.',
url='https://github.com/skfzyy/twitter-past-crawler',
author='skfzyy',
license='MIT',
keywords='proxy twitter reptile',
packages=["twitterpastcrawler"],
# requirements
install_requires=['requests', 'beautifulsoup4', 'lxml'],
# data files
package_data={
'': ['useragents_mac.dat', 'useragents_linux.dat', 'useragents_windows.dat'],
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.7',
]
)
| 21.487805 | 102 | 0.609535 | from setuptools import setup
setup(
name='proxytwitter',
version='0.0.9',
description='A crawler that can crawl and accumulate past tweets without using the official API.',
url='https://github.com/skfzyy/twitter-past-crawler',
author='skfzyy',
license='MIT',
keywords='proxy twitter reptile',
packages=["twitterpastcrawler"],
install_requires=['requests', 'beautifulsoup4', 'lxml'],
package_data={
'': ['useragents_mac.dat', 'useragents_linux.dat', 'useragents_windows.dat'],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.7',
]
)
| true | true |
1c2f1d960b752d232d8019234dbb96c9ecfffe4e | 5,738 | py | Python | inspectors/gsa.py | crdunwel/inspectors-general | e2ea3eb978a2819c56ad61df49b541027f377fcd | [
"CC0-1.0"
] | 1 | 2019-06-10T00:33:02.000Z | 2019-06-10T00:33:02.000Z | inspectors/gsa.py | crdunwel/inspectors-general | e2ea3eb978a2819c56ad61df49b541027f377fcd | [
"CC0-1.0"
] | null | null | null | inspectors/gsa.py | crdunwel/inspectors-general | e2ea3eb978a2819c56ad61df49b541027f377fcd | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
from utils import utils, inspector
from bs4 import BeautifulSoup
from datetime import datetime
import re
import logging
# oldest year: 1979
def run(options):
crawl_index(SEMIANNUAL_REPORTS_URL, options)
crawl_index(AUDIT_REPORTS_URL, options, True)
crawl_index(PEER_REVIEW_REPORTS_URL, options)
crawl_index(MISCELLANEOUS_REPORTS_URL, options)
def crawl_index(base_url, options, is_meta_index=False):
year_range = inspector.year_range(options)
max_pages = options.get('pages')
if max_pages:
max_pages = int(max_pages)
page = 1
only_id = options.get('report_id')
done = False
while not done:
url = url_for(base_url, page)
body = utils.download(url)
doc = BeautifulSoup(body)
next_page = page + 1
found_next_page = False
page_links = doc.select("dl.moreResults a")
for page_link in page_links:
if page_link.text == str(next_page):
found_next_page = True
break
if not found_next_page:
done = True
if max_pages and next_page > max_pages:
done = True
results = doc.select("div#svPortal dl")
for result in results:
if "moreResults" in result.get("class"):
continue
if is_meta_index:
url = "http://www.gsaig.gov" + result.a.get("href")
crawl_index(url, options, False)
else:
report = report_from(result, base_url)
year = int(report['published_on'][:4])
if only_id and (report['report_id'] != only_id):
continue
if year not in year_range:
continue
inspector.save_report(report)
page = next_page
if not done:
logging.info('Moving to next page (%d)' % page)
def url_for(base_url, page = 1):
return "%s?startRow=%d" % (base_url, page * 10 - 9)
def report_from(result, base_url):
report = {
'inspector': 'gsa',
'inspector_url': 'http://gsaig.gov/',
'agency': 'gsa',
'agency_name': 'General Services Administration'
}
link = result.a
title = link.text
url = link.get('href')
date_holders = result.find_all("dt", class_="releaseDate")
if len(date_holders) > 0:
published_date = date_holders[0].text
date = datetime.strptime(published_date, "%B %d, %Y")
elif title in HARDCODED_DATES:
# This is an ugly solution, but there's no date information on the web page.
# The next best solution would be to grab the PDF file and pull the file
# creation date out of its metadata.
published_date = HARDCODED_DATES[title]
date = datetime.strptime(published_date, "%B %d, %Y")
elif base_url == SEMIANNUAL_REPORTS_URL:
# get last match
match = None
for match in DATE_RE.finditer(title):
pass
published_date = match.group(0)
date = datetime.strptime(published_date, "%B %d, %Y")
else:
match = DATE_RE_MM_DD_YY.search(result.text)
if match:
published_date = match.group(0)
date = datetime.strptime(published_date, "%m/%d/%y")
else:
raise Exception("Couldn't find date for %s" % title)
id = ID_RE.search(url).group(1)
report_type = type_for(base_url)
js_match = JS_RE.match(url)
if js_match:
url = "http://www.gsaig.gov" + js_match.group(1)
elif url.startswith('/'):
url = "http://www.gsaig.gov" + url
report['type'] = report_type
report['published_on'] = datetime.strftime(date, "%Y-%m-%d")
report['url'] = url
report['report_id'] = id
report['title'] = title.strip()
report['file_type'] = 'pdf'
return report
def type_for(base_url):
if base_url.find('special-reports') != -1:
return "audit"
if base_url.find('audit-reports') != -1:
return "audit"
return "other"
SEMIANNUAL_REPORTS_URL = "http://www.gsaig.gov/index.cfm/oig-reports/semiannual-reports-to-the-congress/"
AUDIT_REPORTS_URL = "http://www.gsaig.gov/index.cfm/oig-reports/audit-reports/"
PEER_REVIEW_REPORTS_URL = "http://www.gsaig.gov/index.cfm/oig-reports/peer-review-reports/"
MISCELLANEOUS_REPORTS_URL = "http://www.gsaig.gov/index.cfm/oig-reports/miscellaneous-reports/"
ID_RE = re.compile("LinkServID=([-0-9A-F]*)&showMeta=")
JS_RE = re.compile("""javascript:newWin=window.open\('/(\?LinkServID=([-0-9A-F]*)&showMeta=0)','NewWin[0-9]*'\);newWin.focus\(\);void\(0\)""")
DATE_RE = re.compile("(January|February|March|April|May|June|July|August|" +
"September|October|November|December) ([123]?[0-9]), " +
"([12][0-9][0-9][0-9])")
DATE_RE_MM_DD_YY = re.compile("[0-9]?[0-9]/[0-9]?[0-9]/[0-9][0-9]")
HARDCODED_DATES = {
"Hats Off Program Investigative Report": "June 16, 2011",
"Major Issues from Fiscal Year 2010 Multiple Award Schedule Preaward Audits": "September 26, 2011",
"Review of Center for Information Security Services FTS": "March 23, 2001",
"Audit of Procurement of Profesional Services from the FSS Multiple Award Schedules": "July 31, 2003",
"Special Report: MAS Pricing Practices: Is FSS Observing Regulatory Provisions Regarding Pricing?": "August 24, 2001",
"Updated Assessment of GSA's Most Serious Challenges": "December 8, 2004",
"Limited Audit of FSS's Contracting for Services Under Multiple Award Schedule Contracts": "January 9, 2001",
"Procurement Reform and the Multiple Award Schedule Program": "July 30, 2010",
"FTS Alert Report": "March 6, 2003",
"FTS CSC Audit Report": "January 8, 2004",
"Compendium FTS CSC Audit Report": "December 14, 2004",
"Compendium FTS CSC Controls Audit Report": "June 14, 2005",
"Compendium FTS Client Support Center Controls Audit Report": "September 29, 2006",
"Review of the Federal Acquisition Service's Client Support Center, Southeast Sunbelt Region - A090139-3": "June 4, 2010"
}
utils.run(run) if (__name__ == "__main__") else None
| 34.987805 | 142 | 0.679157 |
from utils import utils, inspector
from bs4 import BeautifulSoup
from datetime import datetime
import re
import logging
def run(options):
crawl_index(SEMIANNUAL_REPORTS_URL, options)
crawl_index(AUDIT_REPORTS_URL, options, True)
crawl_index(PEER_REVIEW_REPORTS_URL, options)
crawl_index(MISCELLANEOUS_REPORTS_URL, options)
def crawl_index(base_url, options, is_meta_index=False):
year_range = inspector.year_range(options)
max_pages = options.get('pages')
if max_pages:
max_pages = int(max_pages)
page = 1
only_id = options.get('report_id')
done = False
while not done:
url = url_for(base_url, page)
body = utils.download(url)
doc = BeautifulSoup(body)
next_page = page + 1
found_next_page = False
page_links = doc.select("dl.moreResults a")
for page_link in page_links:
if page_link.text == str(next_page):
found_next_page = True
break
if not found_next_page:
done = True
if max_pages and next_page > max_pages:
done = True
results = doc.select("div#svPortal dl")
for result in results:
if "moreResults" in result.get("class"):
continue
if is_meta_index:
url = "http://www.gsaig.gov" + result.a.get("href")
crawl_index(url, options, False)
else:
report = report_from(result, base_url)
year = int(report['published_on'][:4])
if only_id and (report['report_id'] != only_id):
continue
if year not in year_range:
continue
inspector.save_report(report)
page = next_page
if not done:
logging.info('Moving to next page (%d)' % page)
def url_for(base_url, page = 1):
return "%s?startRow=%d" % (base_url, page * 10 - 9)
def report_from(result, base_url):
report = {
'inspector': 'gsa',
'inspector_url': 'http://gsaig.gov/',
'agency': 'gsa',
'agency_name': 'General Services Administration'
}
link = result.a
title = link.text
url = link.get('href')
date_holders = result.find_all("dt", class_="releaseDate")
if len(date_holders) > 0:
published_date = date_holders[0].text
date = datetime.strptime(published_date, "%B %d, %Y")
elif title in HARDCODED_DATES:
# The next best solution would be to grab the PDF file and pull the file
# creation date out of its metadata.
published_date = HARDCODED_DATES[title]
date = datetime.strptime(published_date, "%B %d, %Y")
elif base_url == SEMIANNUAL_REPORTS_URL:
# get last match
match = None
for match in DATE_RE.finditer(title):
pass
published_date = match.group(0)
date = datetime.strptime(published_date, "%B %d, %Y")
else:
match = DATE_RE_MM_DD_YY.search(result.text)
if match:
published_date = match.group(0)
date = datetime.strptime(published_date, "%m/%d/%y")
else:
raise Exception("Couldn't find date for %s" % title)
id = ID_RE.search(url).group(1)
report_type = type_for(base_url)
js_match = JS_RE.match(url)
if js_match:
url = "http://www.gsaig.gov" + js_match.group(1)
elif url.startswith('/'):
url = "http://www.gsaig.gov" + url
report['type'] = report_type
report['published_on'] = datetime.strftime(date, "%Y-%m-%d")
report['url'] = url
report['report_id'] = id
report['title'] = title.strip()
report['file_type'] = 'pdf'
return report
def type_for(base_url):
if base_url.find('special-reports') != -1:
return "audit"
if base_url.find('audit-reports') != -1:
return "audit"
return "other"
SEMIANNUAL_REPORTS_URL = "http://www.gsaig.gov/index.cfm/oig-reports/semiannual-reports-to-the-congress/"
AUDIT_REPORTS_URL = "http://www.gsaig.gov/index.cfm/oig-reports/audit-reports/"
PEER_REVIEW_REPORTS_URL = "http://www.gsaig.gov/index.cfm/oig-reports/peer-review-reports/"
MISCELLANEOUS_REPORTS_URL = "http://www.gsaig.gov/index.cfm/oig-reports/miscellaneous-reports/"
ID_RE = re.compile("LinkServID=([-0-9A-F]*)&showMeta=")
JS_RE = re.compile("""javascript:newWin=window.open\('/(\?LinkServID=([-0-9A-F]*)&showMeta=0)','NewWin[0-9]*'\);newWin.focus\(\);void\(0\)""")
DATE_RE = re.compile("(January|February|March|April|May|June|July|August|" +
"September|October|November|December) ([123]?[0-9]), " +
"([12][0-9][0-9][0-9])")
DATE_RE_MM_DD_YY = re.compile("[0-9]?[0-9]/[0-9]?[0-9]/[0-9][0-9]")
HARDCODED_DATES = {
"Hats Off Program Investigative Report": "June 16, 2011",
"Major Issues from Fiscal Year 2010 Multiple Award Schedule Preaward Audits": "September 26, 2011",
"Review of Center for Information Security Services FTS": "March 23, 2001",
"Audit of Procurement of Profesional Services from the FSS Multiple Award Schedules": "July 31, 2003",
"Special Report: MAS Pricing Practices: Is FSS Observing Regulatory Provisions Regarding Pricing?": "August 24, 2001",
"Updated Assessment of GSA's Most Serious Challenges": "December 8, 2004",
"Limited Audit of FSS's Contracting for Services Under Multiple Award Schedule Contracts": "January 9, 2001",
"Procurement Reform and the Multiple Award Schedule Program": "July 30, 2010",
"FTS Alert Report": "March 6, 2003",
"FTS CSC Audit Report": "January 8, 2004",
"Compendium FTS CSC Audit Report": "December 14, 2004",
"Compendium FTS CSC Controls Audit Report": "June 14, 2005",
"Compendium FTS Client Support Center Controls Audit Report": "September 29, 2006",
"Review of the Federal Acquisition Service's Client Support Center, Southeast Sunbelt Region - A090139-3": "June 4, 2010"
}
utils.run(run) if (__name__ == "__main__") else None
| true | true |
1c2f1df2a725c60c51c41657841a45853c5cd7b4 | 1,651 | py | Python | test/test_requests.py | disocord/neis.py | d0dd32bf7238882db65381e39bc6fcd6fae43754 | [
"MIT"
] | 61 | 2020-04-26T03:06:45.000Z | 2022-03-17T02:30:59.000Z | test/test_requests.py | disocord/neis.py | d0dd32bf7238882db65381e39bc6fcd6fae43754 | [
"MIT"
] | 28 | 2020-04-27T05:07:30.000Z | 2021-10-01T12:17:55.000Z | test/test_requests.py | disocord/neis.py | d0dd32bf7238882db65381e39bc6fcd6fae43754 | [
"MIT"
] | 17 | 2020-04-27T03:48:52.000Z | 2022-03-10T04:45:54.000Z | from neispy.sync import SyncNeispy
def test_all(client: SyncNeispy):
scinfo = client.schoolInfo(SCHUL_NM="인천석천초등학교")
AE = scinfo[0].ATPT_OFCDC_SC_CODE # 교육청코드
SE = scinfo[0].SD_SCHUL_CODE # 학교코드
# 학교코드와 교육청 코드로 2019년 1월 22일의 급식 정보 요청
scmeal = client.mealServiceDietInfo(AE, SE, MLSV_YMD="20190122")
meal = scmeal[0].DDISH_NM.replace("<br/>", "\n") # 줄바꿈으로 만든뒤 출력
# 학교코드와 교육청 코드로 2019년 3월 7일날 학사일정 요청
scschedule = client.SchoolSchedule(AE, SE, AA_YMD=20201002)
schedule = scschedule[0].EVENT_NM # 학사일정명 가져옴
# 학교코드와 교육청 코드로 초등학교의 2020년 1월 22일의 시간표가져옴
sctimetable = client.elsTimetable(AE, SE, 2019, 2, 20200122, "1", "1")
timetable = [i.ITRT_CNTNT for i in sctimetable] # 리스트로 만듬
academyinfo = client.acaInsTiInfo(AE) # 교육청 코드로 학원및 교습소 정보 요청
academy = academyinfo[0].ACA_NM # 학교이름 출력
scclass = client.classInfo(AE, SE, GRADE="1") # 학교코드와 교육청 코드로 1학년의 모든 반정보 요청
class_info = [i.CLASS_NM for i in scclass] # 리스트로만듬
hiscinfo = client.schoolInfo(SCHUL_NM="인천기계") # 다른정보를 위해 공고로 가져옴
hAE = hiscinfo[0].ATPT_OFCDC_SC_CODE # 교육청코드
hSE = hiscinfo[0].SD_SCHUL_CODE # 학교코드
scmajorinfo = client.schoolMajorinfo(hAE, hSE) # 학과정보 요청
majorinfo = [m.DDDEP_NM for m in scmajorinfo] # 리스트로 만듬
scAflcoinfo = client.schulAflcoinfo(hAE, hSE) # 학교 계열정보 요청
Aflco = [a.ORD_SC_NM for a in scAflcoinfo]
sctiClrm = client.tiClrminfo(hAE, hSE) # 시간표 강의실 정보 요청
tiClem = [t.CLRM_NM for t in sctiClrm]
assert meal
assert schedule
assert timetable
assert academy
assert class_info
assert majorinfo
assert Aflco
assert tiClem
| 34.395833 | 81 | 0.6808 | from neispy.sync import SyncNeispy
def test_all(client: SyncNeispy):
scinfo = client.schoolInfo(SCHUL_NM="인천석천초등학교")
AE = scinfo[0].ATPT_OFCDC_SC_CODE
SE = scinfo[0].SD_SCHUL_CODE
scmeal = client.mealServiceDietInfo(AE, SE, MLSV_YMD="20190122")
meal = scmeal[0].DDISH_NM.replace("<br/>", "\n")
scschedule = client.SchoolSchedule(AE, SE, AA_YMD=20201002)
schedule = scschedule[0].EVENT_NM
sctimetable = client.elsTimetable(AE, SE, 2019, 2, 20200122, "1", "1")
timetable = [i.ITRT_CNTNT for i in sctimetable]
academyinfo = client.acaInsTiInfo(AE)
academy = academyinfo[0].ACA_NM
scclass = client.classInfo(AE, SE, GRADE="1")
class_info = [i.CLASS_NM for i in scclass]
hiscinfo = client.schoolInfo(SCHUL_NM="인천기계")
hAE = hiscinfo[0].ATPT_OFCDC_SC_CODE
hSE = hiscinfo[0].SD_SCHUL_CODE
scmajorinfo = client.schoolMajorinfo(hAE, hSE)
majorinfo = [m.DDDEP_NM for m in scmajorinfo]
scAflcoinfo = client.schulAflcoinfo(hAE, hSE)
Aflco = [a.ORD_SC_NM for a in scAflcoinfo]
sctiClrm = client.tiClrminfo(hAE, hSE)
tiClem = [t.CLRM_NM for t in sctiClrm]
assert meal
assert schedule
assert timetable
assert academy
assert class_info
assert majorinfo
assert Aflco
assert tiClem
| true | true |
1c2f1e821c040c09f681ea450138ed0ffaac8cff | 1,537 | py | Python | setup.py | rtmigo/lnkdpn | d63ef55deff5ba408381af04250e97dada0995a0 | [
"BSD-3-Clause"
] | 1 | 2021-03-08T04:18:13.000Z | 2021-03-08T04:18:13.000Z | setup.py | rtmigo/lnkdpn | d63ef55deff5ba408381af04250e97dada0995a0 | [
"BSD-3-Clause"
] | null | null | null | setup.py | rtmigo/lnkdpn | d63ef55deff5ba408381af04250e97dada0995a0 | [
"BSD-3-Clause"
] | null | null | null | from importlib.machinery import SourceFileLoader
from pathlib import Path
from setuptools import setup, find_packages
ver = SourceFileLoader('ver', 'depz/x00_version.py').load_module()
# transform GitHub spoilers into markdown
setup(
name="depz",
version=ver.__version__,
author="Art Galkin",
author_email="ortemeo@gmail.com",
url='https://github.com/rtmigo/depz',
packages=find_packages(),
install_requires=[],
description="Command-line tool for symlinking directories with reusable code into the project",
long_description=(Path(__file__).parent / 'README.md').read_text(),
long_description_content_type='text/markdown',
license='BSD-3-Clause',
entry_points={
'console_scripts': [
'depz = depz:runmain',
]},
keywords="""
files package library programming local project symlink dependencies directories
library-management package-management source-code reusable-code
""".split(),
# https://pypi.org/classifiers/
classifiers=[
# "Development Status :: 4 - Beta",
# "Development Status :: 2 - Pre-Alpha",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
'License :: OSI Approved :: BSD License',
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Typing :: Typed",
"Topic :: Software Development :: Build Tools",
"Operating System :: POSIX"
],
# test_suite='nose.collector',
# tests_require=['nose'],
#zip_safe=False
) | 27.446429 | 97 | 0.688354 | from importlib.machinery import SourceFileLoader
from pathlib import Path
from setuptools import setup, find_packages
ver = SourceFileLoader('ver', 'depz/x00_version.py').load_module()
setup(
name="depz",
version=ver.__version__,
author="Art Galkin",
author_email="ortemeo@gmail.com",
url='https://github.com/rtmigo/depz',
packages=find_packages(),
install_requires=[],
description="Command-line tool for symlinking directories with reusable code into the project",
long_description=(Path(__file__).parent / 'README.md').read_text(),
long_description_content_type='text/markdown',
license='BSD-3-Clause',
entry_points={
'console_scripts': [
'depz = depz:runmain',
]},
keywords="""
files package library programming local project symlink dependencies directories
library-management package-management source-code reusable-code
""".split(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
'License :: OSI Approved :: BSD License',
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Typing :: Typed",
"Topic :: Software Development :: Build Tools",
"Operating System :: POSIX"
],
) | true | true |
1c2f1f0ab3c906ecf4a0f01346cdecbec21f72d9 | 11,698 | py | Python | fury/shaders/tests/test_base.py | sreekarchigurupati/fury | 07ed10123954d3ce3197c45f8ee54a7d4909656d | [
"BSD-3-Clause"
] | null | null | null | fury/shaders/tests/test_base.py | sreekarchigurupati/fury | 07ed10123954d3ce3197c45f8ee54a7d4909656d | [
"BSD-3-Clause"
] | null | null | null | fury/shaders/tests/test_base.py | sreekarchigurupati/fury | 07ed10123954d3ce3197c45f8ee54a7d4909656d | [
"BSD-3-Clause"
] | null | null | null | import os
import pytest
import numpy as np
import numpy.testing as npt
from fury import actor, window
from fury.shaders import (add_shader_callback, attribute_to_actor,
compose_shader, import_fury_shader, load_shader,
load, shader_to_actor, replace_shader_in_actor)
from fury.shaders.base import SHADERS_DIR
from fury.lib import (Actor, CellArray, Points, PolyData, PolyDataMapper,
numpy_support)
from fury.utils import set_polydata_colors
from tempfile import TemporaryDirectory as InTemporaryDirectory
vertex_dec = \
"""
uniform float time;
out vec4 myVertexMC;
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c,
oc * axis.x * axis.y - axis.z * s,
oc * axis.z * axis.x + axis.y * s, 0.0,
oc * axis.x * axis.y + axis.z * s,
oc * axis.y * axis.y + c,
oc * axis.y * axis.z - axis.x * s, 0.0,
oc * axis.z * axis.x - axis.y * s,
oc * axis.y * axis.z + axis.x * s,
oc * axis.z * axis.z + c, 0.0,
0.0, 0.0, 0.0, 1.0);
}
vec3 rotate(vec3 v, vec3 axis, float angle) {
mat4 m = rotationMatrix(axis, angle);
return (m * vec4(v, 1.0)).xyz;
}
vec3 ax = vec3(1, 0, 0);
"""
vertex_impl = \
"""
myVertexMC = vertexMC;
myVertexMC.xyz = rotate(vertexMC.xyz, ax, time*0.01);
vertexVCVSOutput = MCVCMatrix * myVertexMC;
gl_Position = MCDCMatrix * myVertexMC;
"""
geometry_code = \
"""
//VTK::System::Dec
//VTK::PositionVC::Dec
uniform mat4 MCDCMatrix;
//VTK::PrimID::Dec
// declarations below aren't necessary because they are already injected
// by PrimID template this comment is just to justify the passthrough below
//in vec4 vertexColorVSOutput[];
//out vec4 vertexColorGSOutput;
//VTK::Color::Dec
//VTK::Normal::Dec
//VTK::Light::Dec
//VTK::TCoord::Dec
//VTK::Picking::Dec
//VTK::DepthPeeling::Dec
//VTK::Clip::Dec
//VTK::Output::Dec
// Convert points to line strips
layout(points) in;
layout(triangle_strip, max_vertices = 4) out;
void build_square(vec4 position)
{
gl_Position = position + vec4(-.5, -.5, 0, 0); // 1: Bottom left
EmitVertex();
gl_Position = position + vec4(.5, -.5, 0, 0); // 2: Bottom right
EmitVertex();
gl_Position = position + vec4(-.5, .5, 0, 0); // 3: Top left
EmitVertex();
gl_Position = position + vec4(.5, .5, 0, 0); // 4: Top right
EmitVertex();
EndPrimitive();
}
void main()
{
vertexColorGSOutput = vertexColorVSOutput[0];
build_square(gl_in[0].gl_Position);
}
"""
frag_dec = \
"""
varying vec4 myVertexMC;
uniform float time;
"""
frag_impl = \
"""
vec3 rColor = vec3(.9, .0, .3);
vec3 gColor = vec3(.0, .9, .3);
vec3 bColor = vec3(.0, .3, .9);
vec3 yColor = vec3(.9, .9, .3);
float tm = .2; // speed
float vcm = 5;
vec4 tmp = myVertexMC;
float a = sin(tmp.y * vcm - time * tm) / 2.;
float b = cos(tmp.y * vcm - time * tm) / 2.;
float c = sin(tmp.y * vcm - time * tm + 3.14) / 2.;
float d = cos(tmp.y * vcm - time * tm + 3.14) / 2.;
float div = .01; // default 0.01
float e = div / abs(tmp.x + a);
float f = div / abs(tmp.x + b);
float g = div / abs(tmp.x + c);
float h = div / abs(tmp.x + d);
vec3 destColor = rColor * e + gColor * f + bColor * g + yColor * h;
fragOutput0 = vec4(destColor, 1.);
vec2 p = tmp.xy;
p = p - vec2(time * 0.005, 0.);
if (length(p - vec2(0, 0)) < 0.2) {
fragOutput0 = vec4(1, 0., 0., .5);
}
"""
def generate_cube_with_effect():
cube = actor.cube(np.array([[0, 0, 0]]))
shader_to_actor(cube, "vertex", impl_code=vertex_impl,
decl_code=vertex_dec, block="valuepass")
shader_to_actor(cube, "fragment", impl_code=frag_impl,
decl_code=frag_dec, block="light")
return cube
def generate_points():
centers = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) * 255
vtk_vertices = Points()
# Create the topology of the point (a vertex)
vtk_faces = CellArray()
# Add points
for i in range(len(centers)):
p = centers[i]
id = vtk_vertices.InsertNextPoint(p)
vtk_faces.InsertNextCell(1)
vtk_faces.InsertCellPoint(id)
# Create a polydata object
polydata = PolyData()
# Set the vertices and faces we created as the geometry and topology of the
# polydata
polydata.SetPoints(vtk_vertices)
polydata.SetVerts(vtk_faces)
set_polydata_colors(polydata, colors)
mapper = PolyDataMapper()
mapper.SetInputData(polydata)
mapper.SetVBOShiftScaleMethod(False)
point_actor = Actor()
point_actor.SetMapper(mapper)
return point_actor
def test_add_shader_callback():
cube = generate_cube_with_effect()
showm = window.ShowManager()
showm.scene.add(cube)
class Timer(object):
idx = 0.0
timer = Timer()
def timer_callback(obj, event):
# nonlocal timer, showm
timer.idx += 1.0
showm.render()
if timer.idx > 90:
showm.exit()
def my_cbk(_caller, _event, calldata=None):
program = calldata
if program is not None:
try:
program.SetUniformf("time", timer.idx)
except ValueError:
pass
add_shader_callback(cube, my_cbk)
showm.initialize()
showm.add_timer_callback(True, 100, timer_callback)
showm.start()
arr = window.snapshot(showm.scene, offscreen=True)
report = window.analyze_snapshot(arr)
npt.assert_equal(report.objects, 1)
cone_actor = actor.cone(np.array([[0, 0, 0]]), np.array([[0, 1, 0]]),
(0, 0, 1))
test_values = []
def callbackLow(_caller, _event, calldata=None):
program = calldata
if program is not None:
test_values.append(0)
id_observer = add_shader_callback(cone_actor, callbackLow, 0)
with pytest.raises(Exception):
add_shader_callback(cone_actor, callbackLow, priority='str')
mapper = cone_actor.GetMapper()
mapper.RemoveObserver(id_observer)
scene = window.Scene()
scene.add(cone_actor)
arr1 = window.snapshot(scene, size=(200, 200))
assert len(test_values) == 0
test_values = []
def callbackHigh(_caller, _event, calldata=None):
program = calldata
if program is not None:
test_values.append(999)
def callbackMean(_caller, _event, calldata=None):
program = calldata
if program is not None:
test_values.append(500)
add_shader_callback(cone_actor, callbackHigh, 999)
add_shader_callback(cone_actor, callbackLow, 0)
id_mean = add_shader_callback(cone_actor, callbackMean, 500)
# check the priority of each call
arr2 = window.snapshot(scene, size=(200, 200))
assert np.abs([
test_values[0] - 999, test_values[1] - 500,
test_values[2] - 0]).sum() == 0
# check if the correct observer was removed
mapper.RemoveObserver(id_mean)
test_values = []
arr3 = window.snapshot(scene, size=(200, 200))
assert np.abs([
test_values[0] - 999, test_values[1] - 0]).sum() == 0
def test_attribute_to_actor():
cube = generate_cube_with_effect()
test_arr = np.arange(24).reshape((8, 3))
attribute_to_actor(cube, test_arr, 'test_arr')
arr = cube.GetMapper().GetInput().GetPointData().GetArray('test_arr')
npt.assert_array_equal(test_arr, numpy_support.vtk_to_numpy(arr))
def test_compose_shader():
str_test1 = 'Test1'
str_test2 = 'Test2'
list_str1 = [str_test1, None]
list_str2 = [str_test1, str_test2]
# Test empty parameter
code = compose_shader(None)
npt.assert_equal(code, "")
# Test invalid list
npt.assert_raises(IOError, compose_shader, list_str1)
# Test str code
code = compose_shader(str_test1)
npt.assert_equal(code, str_test1)
# Test list of str code
code = compose_shader(list_str2)
npt.assert_equal(code, '\n' + '\n'.join(list_str2))
def test_import_fury_shader():
str_test1 = 'Test1'
fname_test1 = 'test1.frag'
fname_test2 = 'test2.txt'
pname_test1 = os.path.join(SHADERS_DIR, fname_test1)
# Test invalid file extension
npt.assert_raises(IOError, import_fury_shader, fname_test2)
# Test file not found
npt.assert_raises(IOError, import_fury_shader, pname_test1)
# Test valid file
with open(pname_test1, 'w') as f:
f.write(str_test1)
code = import_fury_shader(fname_test1)
npt.assert_equal(code, str_test1)
os.remove(pname_test1)
def test_load_shader():
fname_test = 'test.text'
# Test invalid file extension
npt.assert_raises(IOError, load_shader, fname_test)
with InTemporaryDirectory() as tdir:
fname_test = 'test.frag'
fname_test = os.path.join(tdir, fname_test)
str_test = 'Test1'
test_file = open(fname_test, 'w')
test_file.write(str_test)
test_file.close()
npt.assert_string_equal(load_shader(fname_test), str_test)
def test_load():
dummy_file_name = 'dummy.txt'
dummy_file_contents = 'This is some dummy text.'
dummy_file = open(os.path.join(SHADERS_DIR, dummy_file_name), 'w')
dummy_file.write(dummy_file_contents)
dummy_file.close()
npt.assert_warns(DeprecationWarning, load, dummy_file_name)
npt.assert_string_equal(load(dummy_file_name), dummy_file_contents)
os.remove(os.path.join(SHADERS_DIR, dummy_file_name))
def test_replace_shader_in_actor(interactive=False):
scene = window.Scene()
test_actor = generate_points()
scene.add(test_actor)
if interactive:
window.show(scene)
ss = window.snapshot(scene, size=(200, 200))
actual = ss[40, 140, :]
npt.assert_array_equal(actual, [0, 0, 0])
actual = ss[140, 40, :]
npt.assert_array_equal(actual, [0, 0, 0])
actual = ss[40, 40, :]
npt.assert_array_equal(actual, [0, 0, 0])
scene.clear()
replace_shader_in_actor(test_actor, 'geometry', geometry_code)
scene.add(test_actor)
if interactive:
window.show(scene)
ss = window.snapshot(scene, size=(200, 200))
actual = ss[40, 140, :]
npt.assert_array_equal(actual, [255, 0, 0])
actual = ss[140, 40, :]
npt.assert_array_equal(actual, [0, 255, 0])
actual = ss[40, 40, :]
npt.assert_array_equal(actual, [0, 0, 255])
def test_shader_to_actor(interactive=False):
cube = generate_cube_with_effect()
scene = window.Scene()
scene.add(cube)
if interactive:
scene.add(actor.axes())
window.show(scene)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr)
npt.assert_equal(report.objects, 1)
# test errors
npt.assert_raises(ValueError, shader_to_actor, cube, "error",
vertex_impl)
npt.assert_raises(ValueError, shader_to_actor, cube, "geometry",
vertex_impl)
npt.assert_raises(ValueError, shader_to_actor, cube, "vertex",
vertex_impl, block="error")
npt.assert_raises(ValueError, replace_shader_in_actor, cube, "error",
vertex_impl)
| 28.812808 | 79 | 0.614977 | import os
import pytest
import numpy as np
import numpy.testing as npt
from fury import actor, window
from fury.shaders import (add_shader_callback, attribute_to_actor,
compose_shader, import_fury_shader, load_shader,
load, shader_to_actor, replace_shader_in_actor)
from fury.shaders.base import SHADERS_DIR
from fury.lib import (Actor, CellArray, Points, PolyData, PolyDataMapper,
numpy_support)
from fury.utils import set_polydata_colors
from tempfile import TemporaryDirectory as InTemporaryDirectory
vertex_dec = \
"""
uniform float time;
out vec4 myVertexMC;
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c,
oc * axis.x * axis.y - axis.z * s,
oc * axis.z * axis.x + axis.y * s, 0.0,
oc * axis.x * axis.y + axis.z * s,
oc * axis.y * axis.y + c,
oc * axis.y * axis.z - axis.x * s, 0.0,
oc * axis.z * axis.x - axis.y * s,
oc * axis.y * axis.z + axis.x * s,
oc * axis.z * axis.z + c, 0.0,
0.0, 0.0, 0.0, 1.0);
}
vec3 rotate(vec3 v, vec3 axis, float angle) {
mat4 m = rotationMatrix(axis, angle);
return (m * vec4(v, 1.0)).xyz;
}
vec3 ax = vec3(1, 0, 0);
"""
vertex_impl = \
"""
myVertexMC = vertexMC;
myVertexMC.xyz = rotate(vertexMC.xyz, ax, time*0.01);
vertexVCVSOutput = MCVCMatrix * myVertexMC;
gl_Position = MCDCMatrix * myVertexMC;
"""
geometry_code = \
"""
//VTK::System::Dec
//VTK::PositionVC::Dec
uniform mat4 MCDCMatrix;
//VTK::PrimID::Dec
// declarations below aren't necessary because they are already injected
// by PrimID template this comment is just to justify the passthrough below
//in vec4 vertexColorVSOutput[];
//out vec4 vertexColorGSOutput;
//VTK::Color::Dec
//VTK::Normal::Dec
//VTK::Light::Dec
//VTK::TCoord::Dec
//VTK::Picking::Dec
//VTK::DepthPeeling::Dec
//VTK::Clip::Dec
//VTK::Output::Dec
// Convert points to line strips
layout(points) in;
layout(triangle_strip, max_vertices = 4) out;
void build_square(vec4 position)
{
gl_Position = position + vec4(-.5, -.5, 0, 0); // 1: Bottom left
EmitVertex();
gl_Position = position + vec4(.5, -.5, 0, 0); // 2: Bottom right
EmitVertex();
gl_Position = position + vec4(-.5, .5, 0, 0); // 3: Top left
EmitVertex();
gl_Position = position + vec4(.5, .5, 0, 0); // 4: Top right
EmitVertex();
EndPrimitive();
}
void main()
{
vertexColorGSOutput = vertexColorVSOutput[0];
build_square(gl_in[0].gl_Position);
}
"""
frag_dec = \
"""
varying vec4 myVertexMC;
uniform float time;
"""
frag_impl = \
"""
vec3 rColor = vec3(.9, .0, .3);
vec3 gColor = vec3(.0, .9, .3);
vec3 bColor = vec3(.0, .3, .9);
vec3 yColor = vec3(.9, .9, .3);
float tm = .2; // speed
float vcm = 5;
vec4 tmp = myVertexMC;
float a = sin(tmp.y * vcm - time * tm) / 2.;
float b = cos(tmp.y * vcm - time * tm) / 2.;
float c = sin(tmp.y * vcm - time * tm + 3.14) / 2.;
float d = cos(tmp.y * vcm - time * tm + 3.14) / 2.;
float div = .01; // default 0.01
float e = div / abs(tmp.x + a);
float f = div / abs(tmp.x + b);
float g = div / abs(tmp.x + c);
float h = div / abs(tmp.x + d);
vec3 destColor = rColor * e + gColor * f + bColor * g + yColor * h;
fragOutput0 = vec4(destColor, 1.);
vec2 p = tmp.xy;
p = p - vec2(time * 0.005, 0.);
if (length(p - vec2(0, 0)) < 0.2) {
fragOutput0 = vec4(1, 0., 0., .5);
}
"""
def generate_cube_with_effect():
cube = actor.cube(np.array([[0, 0, 0]]))
shader_to_actor(cube, "vertex", impl_code=vertex_impl,
decl_code=vertex_dec, block="valuepass")
shader_to_actor(cube, "fragment", impl_code=frag_impl,
decl_code=frag_dec, block="light")
return cube
def generate_points():
centers = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) * 255
vtk_vertices = Points()
# Create the topology of the point (a vertex)
vtk_faces = CellArray()
# Add points
for i in range(len(centers)):
p = centers[i]
id = vtk_vertices.InsertNextPoint(p)
vtk_faces.InsertNextCell(1)
vtk_faces.InsertCellPoint(id)
# Create a polydata object
polydata = PolyData()
# Set the vertices and faces we created as the geometry and topology of the
# polydata
polydata.SetPoints(vtk_vertices)
polydata.SetVerts(vtk_faces)
set_polydata_colors(polydata, colors)
mapper = PolyDataMapper()
mapper.SetInputData(polydata)
mapper.SetVBOShiftScaleMethod(False)
point_actor = Actor()
point_actor.SetMapper(mapper)
return point_actor
def test_add_shader_callback():
cube = generate_cube_with_effect()
showm = window.ShowManager()
showm.scene.add(cube)
class Timer(object):
idx = 0.0
timer = Timer()
def timer_callback(obj, event):
# nonlocal timer, showm
timer.idx += 1.0
showm.render()
if timer.idx > 90:
showm.exit()
def my_cbk(_caller, _event, calldata=None):
program = calldata
if program is not None:
try:
program.SetUniformf("time", timer.idx)
except ValueError:
pass
add_shader_callback(cube, my_cbk)
showm.initialize()
showm.add_timer_callback(True, 100, timer_callback)
showm.start()
arr = window.snapshot(showm.scene, offscreen=True)
report = window.analyze_snapshot(arr)
npt.assert_equal(report.objects, 1)
cone_actor = actor.cone(np.array([[0, 0, 0]]), np.array([[0, 1, 0]]),
(0, 0, 1))
test_values = []
def callbackLow(_caller, _event, calldata=None):
program = calldata
if program is not None:
test_values.append(0)
id_observer = add_shader_callback(cone_actor, callbackLow, 0)
with pytest.raises(Exception):
add_shader_callback(cone_actor, callbackLow, priority='str')
mapper = cone_actor.GetMapper()
mapper.RemoveObserver(id_observer)
scene = window.Scene()
scene.add(cone_actor)
arr1 = window.snapshot(scene, size=(200, 200))
assert len(test_values) == 0
test_values = []
def callbackHigh(_caller, _event, calldata=None):
program = calldata
if program is not None:
test_values.append(999)
def callbackMean(_caller, _event, calldata=None):
program = calldata
if program is not None:
test_values.append(500)
add_shader_callback(cone_actor, callbackHigh, 999)
add_shader_callback(cone_actor, callbackLow, 0)
id_mean = add_shader_callback(cone_actor, callbackMean, 500)
# check the priority of each call
arr2 = window.snapshot(scene, size=(200, 200))
assert np.abs([
test_values[0] - 999, test_values[1] - 500,
test_values[2] - 0]).sum() == 0
# check if the correct observer was removed
mapper.RemoveObserver(id_mean)
test_values = []
arr3 = window.snapshot(scene, size=(200, 200))
assert np.abs([
test_values[0] - 999, test_values[1] - 0]).sum() == 0
def test_attribute_to_actor():
cube = generate_cube_with_effect()
test_arr = np.arange(24).reshape((8, 3))
attribute_to_actor(cube, test_arr, 'test_arr')
arr = cube.GetMapper().GetInput().GetPointData().GetArray('test_arr')
npt.assert_array_equal(test_arr, numpy_support.vtk_to_numpy(arr))
def test_compose_shader():
str_test1 = 'Test1'
str_test2 = 'Test2'
list_str1 = [str_test1, None]
list_str2 = [str_test1, str_test2]
# Test empty parameter
code = compose_shader(None)
npt.assert_equal(code, "")
# Test invalid list
npt.assert_raises(IOError, compose_shader, list_str1)
# Test str code
code = compose_shader(str_test1)
npt.assert_equal(code, str_test1)
# Test list of str code
code = compose_shader(list_str2)
npt.assert_equal(code, '\n' + '\n'.join(list_str2))
def test_import_fury_shader():
str_test1 = 'Test1'
fname_test1 = 'test1.frag'
fname_test2 = 'test2.txt'
pname_test1 = os.path.join(SHADERS_DIR, fname_test1)
# Test invalid file extension
npt.assert_raises(IOError, import_fury_shader, fname_test2)
# Test file not found
npt.assert_raises(IOError, import_fury_shader, pname_test1)
# Test valid file
with open(pname_test1, 'w') as f:
f.write(str_test1)
code = import_fury_shader(fname_test1)
npt.assert_equal(code, str_test1)
os.remove(pname_test1)
def test_load_shader():
fname_test = 'test.text'
# Test invalid file extension
npt.assert_raises(IOError, load_shader, fname_test)
with InTemporaryDirectory() as tdir:
fname_test = 'test.frag'
fname_test = os.path.join(tdir, fname_test)
str_test = 'Test1'
test_file = open(fname_test, 'w')
test_file.write(str_test)
test_file.close()
npt.assert_string_equal(load_shader(fname_test), str_test)
def test_load():
dummy_file_name = 'dummy.txt'
dummy_file_contents = 'This is some dummy text.'
dummy_file = open(os.path.join(SHADERS_DIR, dummy_file_name), 'w')
dummy_file.write(dummy_file_contents)
dummy_file.close()
npt.assert_warns(DeprecationWarning, load, dummy_file_name)
npt.assert_string_equal(load(dummy_file_name), dummy_file_contents)
os.remove(os.path.join(SHADERS_DIR, dummy_file_name))
def test_replace_shader_in_actor(interactive=False):
scene = window.Scene()
test_actor = generate_points()
scene.add(test_actor)
if interactive:
window.show(scene)
ss = window.snapshot(scene, size=(200, 200))
actual = ss[40, 140, :]
npt.assert_array_equal(actual, [0, 0, 0])
actual = ss[140, 40, :]
npt.assert_array_equal(actual, [0, 0, 0])
actual = ss[40, 40, :]
npt.assert_array_equal(actual, [0, 0, 0])
scene.clear()
replace_shader_in_actor(test_actor, 'geometry', geometry_code)
scene.add(test_actor)
if interactive:
window.show(scene)
ss = window.snapshot(scene, size=(200, 200))
actual = ss[40, 140, :]
npt.assert_array_equal(actual, [255, 0, 0])
actual = ss[140, 40, :]
npt.assert_array_equal(actual, [0, 255, 0])
actual = ss[40, 40, :]
npt.assert_array_equal(actual, [0, 0, 255])
def test_shader_to_actor(interactive=False):
cube = generate_cube_with_effect()
scene = window.Scene()
scene.add(cube)
if interactive:
scene.add(actor.axes())
window.show(scene)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr)
npt.assert_equal(report.objects, 1)
# test errors
npt.assert_raises(ValueError, shader_to_actor, cube, "error",
vertex_impl)
npt.assert_raises(ValueError, shader_to_actor, cube, "geometry",
vertex_impl)
npt.assert_raises(ValueError, shader_to_actor, cube, "vertex",
vertex_impl, block="error")
npt.assert_raises(ValueError, replace_shader_in_actor, cube, "error",
vertex_impl)
| true | true |
1c2f1f30c8c00b5cee84afb83df909994e3ce8fa | 362 | py | Python | array/last_stone_weight.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | array/last_stone_weight.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | array/last_stone_weight.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | def last_stone_weight(stones):
while len(stones) > 1:
stones.sort()
if stones[-1] == stones[-2]:
stones.pop()
stones.pop()
else:
last = stones.pop(-2)
stones[-1] -= last
return stones[0] if stones else 0
print(last_stone_weight([2, 7, 4, 1, 8, 1]))
print(last_stone_weight([1]))
| 22.625 | 44 | 0.527624 | def last_stone_weight(stones):
while len(stones) > 1:
stones.sort()
if stones[-1] == stones[-2]:
stones.pop()
stones.pop()
else:
last = stones.pop(-2)
stones[-1] -= last
return stones[0] if stones else 0
print(last_stone_weight([2, 7, 4, 1, 8, 1]))
print(last_stone_weight([1]))
| true | true |
1c2f1fc326d0bfd52e2588be7c8aa9e6e7d62a31 | 6,659 | py | Python | built-in/TensorFlow/Official/cv/image_classification/ResNext50_for_TensorFlow/code/resnext50_train/configs/res50_32bs_1p_host.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/TensorFlow/Official/cv/image_classification/ResNext50_for_TensorFlow/code/resnext50_train/configs/res50_32bs_1p_host.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/TensorFlow/Official/cv/image_classification/ResNext50_for_TensorFlow/code/resnext50_train/configs/res50_32bs_1p_host.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # coding=utf-8
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import tensorflow as tf
import os
log_dir = './resnet50_train/results/'+os.path.basename(__file__).split('.')[0]
#256
config = {
# ============ for testing =====================
'accelerator': '1980', # 'gpu', '1980'
'shuffle_enable': 'yes',
'shuffle_buffer_size': 10000,
'rank_size': 1,
'shard': False,
# ======= basic config ======= #
'mode':'train', # "train","evaluate"
'epochs_between_evals': 4, #used if mode is "train_and_evaluate"
'stop_threshold': 80.0, #used if mode is "train_and_evaluate"
#'data_dir':'/opt/npu/resnet_data_new',
'data_url': 'file:///data/imagenet_TF', #data
'data_type': 'TFRECORD',
'model_name': 'resnet50',
'num_classes': 1001,
'num_epochs': None,
'height':224,
'width':224,
'dtype': tf.float32,
'data_format': 'channels_last',
'use_nesterov': True,
'eval_interval': 1,
'num_evaluating_samples': 50000,
'loss_scale': 1024, #could be float or string. If float, static loss scaling is applied.
#If string, the corresponding automatic loss scaling algorithm is used.
#Must be one of 'Backoff' of 'LogMax' (case insensitive).
'use_lars': False,
'label_smoothing':0.1, #If greater than 0 then smooth the labels.
'weight_decay': 0.0001,
'batch_size':32, #minibatch size per node, total batchsize = batch_size*hvd.size()*itersize
'momentum': [0.9],
#======= data processing config =======
'min_object_covered': 0.1, #used for random crop
'aspect_ratio_range':[3. / 4., 4. / 3.],
'area_range':[0.16, 1.0],
'max_attempts': 100,
#======= data augment config =======
'increased_aug': False,
'brightness':0.3,
'saturation': 0.6,
'contrast': 0.6,
'hue': 0.13,
'num_preproc_threads': 22,
#======= initialization config =======
'conv_init': tf.variance_scaling_initializer(),
'bn_init_mode': 'adv_bn_init', # "conv_bn_init" or "adv_bn_init",initializer the gamma in bn in different modes
# "adv_bn_init" means initialize gamma to 0 in each residual block's last bn, and initialize other gamma to 1
# "conv_bn_init" means initialize all the gamma to a constant, defined by "bn_gamma_initial_value"
'bn_gamma_initial_value': 1.0,
#======== model architecture ==========
#'resnet_version': 'v1.5',
'resnet_version': 'resnext',
'arch_type': 'original', # ------ input -------
# C1,C2,C3: input block, stride in different layer
# ------ shortcut ------
# D1: average_pooling + conv1*1 in shortcut in downsample block
# D2: conv3*3,stride=2 in shortcut in downsample block
# D3: conv1*1 +average_pooling in shortcut in downsample block
# ------ mainstream ----
# E1: average_pooling + conv3*3 in mainstream in downsample block
# E2: conv3*3 + average_pooling in mainstream in downsample block
#======= logger config =======
'display_every': 1,
'log_name': 'resnet50.log',
'log_dir': log_dir,
'ckpt_dir': '/home/models/training_shop/03-code/ModelZoo_ResNext50_TF_MTI/d_solution/ckpt0',
#======= Learning Rate Config =======
'lr_warmup_mode': 'linear', # "linear" or "cosine"
'warmup_lr': 0.0,
'warmup_epochs': 10,
'learning_rate_maximum': 0.1,
'lr_decay_mode': 'steps', # "steps", "poly", "poly_cycle", "cosine", "linear_cosine", "linear_twice", "constant" for 1980 only
'learning_rate_end': 0.00001,
'decay_steps': '10,20,30', #for "steps"
'lr_decay_steps': '6.4,0.64,0.064',
'ploy_power': 2.0, #for "poly" and "poly_cycle"
'cdr_first_decay_ratio': 0.33, #for "cosine_decay_restarts"
'cdr_t_mul':2.0,
'cdr_m_mul':0.1,
'lc_periods':0.47, #for "linear_consine"
'lc_beta':0.00001,
'lr_mid': 0.5, #for "linear_twice"
'epoch_mid': 80,
'bn_lr_scale':1.0,
'restore_exclude': ['fp32_vars/dense'],
}
def res50_config():
config['global_batch_size'] = config['batch_size'] * config['rank_size']
config['do_checkpoint'] = True
return config
| 45.609589 | 169 | 0.517946 |
import tensorflow as tf
import os
log_dir = './resnet50_train/results/'+os.path.basename(__file__).split('.')[0]
config = {
'accelerator': '1980',
'shuffle_enable': 'yes',
'shuffle_buffer_size': 10000,
'rank_size': 1,
'shard': False,
'mode':'train',
'epochs_between_evals': 4,
'stop_threshold': 80.0,
'data_url': 'file:///data/imagenet_TF',
'data_type': 'TFRECORD',
'model_name': 'resnet50',
'num_classes': 1001,
'num_epochs': None,
'height':224,
'width':224,
'dtype': tf.float32,
'data_format': 'channels_last',
'use_nesterov': True,
'eval_interval': 1,
'num_evaluating_samples': 50000,
'loss_scale': 1024,
'use_lars': False,
'label_smoothing':0.1,
'weight_decay': 0.0001,
'batch_size':32,
'momentum': [0.9],
'min_object_covered': 0.1,
'aspect_ratio_range':[3. / 4., 4. / 3.],
'area_range':[0.16, 1.0],
'max_attempts': 100,
'increased_aug': False,
'brightness':0.3,
'saturation': 0.6,
'contrast': 0.6,
'hue': 0.13,
'num_preproc_threads': 22,
'conv_init': tf.variance_scaling_initializer(),
'bn_init_mode': 'adv_bn_init',
# "conv_bn_init" means initialize all the gamma to a constant, defined by "bn_gamma_initial_value"
'bn_gamma_initial_value': 1.0,
#======== model architecture ==========
#'resnet_version': 'v1.5',
'resnet_version': 'resnext',
'arch_type': 'original', # ------ input -------
# C1,C2,C3: input block, stride in different layer
# ------ shortcut ------
# D1: average_pooling + conv1*1 in shortcut in downsample block
# D2: conv3*3,stride=2 in shortcut in downsample block
# D3: conv1*1 +average_pooling in shortcut in downsample block
# ------ mainstream ----
# E1: average_pooling + conv3*3 in mainstream in downsample block
# E2: conv3*3 + average_pooling in mainstream in downsample block
#======= logger config =======
'display_every': 1,
'log_name': 'resnet50.log',
'log_dir': log_dir,
'ckpt_dir': '/home/models/training_shop/03-code/ModelZoo_ResNext50_TF_MTI/d_solution/ckpt0',
#======= Learning Rate Config =======
'lr_warmup_mode': 'linear', # "linear" or "cosine"
'warmup_lr': 0.0,
'warmup_epochs': 10,
'learning_rate_maximum': 0.1,
'lr_decay_mode': 'steps', # "steps", "poly", "poly_cycle", "cosine", "linear_cosine", "linear_twice", "constant" for 1980 only
'learning_rate_end': 0.00001,
'decay_steps': '10,20,30', #for "steps"
'lr_decay_steps': '6.4,0.64,0.064',
'ploy_power': 2.0, #for "poly" and "poly_cycle"
'cdr_first_decay_ratio': 0.33, #for "cosine_decay_restarts"
'cdr_t_mul':2.0,
'cdr_m_mul':0.1,
'lc_periods':0.47, #for "linear_consine"
'lc_beta':0.00001,
'lr_mid': 0.5, #for "linear_twice"
'epoch_mid': 80,
'bn_lr_scale':1.0,
'restore_exclude': ['fp32_vars/dense'],
}
def res50_config():
config['global_batch_size'] = config['batch_size'] * config['rank_size']
config['do_checkpoint'] = True
return config
| true | true |
1c2f209df3bd6f54e25cf86519b7e33cd9059853 | 14,351 | py | Python | django/db/migrations/operations/models.py | dnozay/django | 5dcdbe95c749d36072f527e120a8cb463199ae0d | [
"BSD-3-Clause"
] | 1 | 2019-03-26T02:49:39.000Z | 2019-03-26T02:49:39.000Z | django/db/migrations/operations/models.py | dnozay/django | 5dcdbe95c749d36072f527e120a8cb463199ae0d | [
"BSD-3-Clause"
] | null | null | null | django/db/migrations/operations/models.py | dnozay/django | 5dcdbe95c749d36072f527e120a8cb463199ae0d | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.db.models.options import normalize_together
from django.db.migrations.state import ModelState
from django.db.migrations.operations.base import Operation
from django.utils import six
class CreateModel(Operation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options']
def __init__(self, name, fields, options=None, bases=None):
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()] = ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
apps = to_state.render()
model = apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
apps = from_state.render()
model = apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
strings_to_check = [self.name]
# Check we didn't inherit from the model
for base in self.bases:
if isinstance(base, six.string_types):
strings_to_check.append(base.split(".")[-1])
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.rel:
if isinstance(field.rel.to, six.string_types):
strings_to_check.append(field.rel.to.split(".")[-1])
# Now go over all the strings and compare them
for string in strings_to_check:
if string.lower() == name.lower():
return True
return False
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.name == other.name) and
(self.options == other.options) and
(self.bases == other.bases) and
([(k, f.deconstruct()[1:]) for k, f in self.fields] == [(k, f.deconstruct()[1:]) for k, f in other.fields])
)
class DeleteModel(Operation):
"""
Drops a model's table.
"""
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
del state.models[app_label, self.name.lower()]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
apps = from_state.render()
model = apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
apps = to_state.render()
model = apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(Operation):
"""
Renames a model.
"""
reversible = False
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def state_forwards(self, app_label, state):
# Get all of the related objects we need to repoint
apps = state.render(skip_cache=True)
model = apps.get_model(app_label, self.old_name)
related_objects = model._meta.get_all_related_objects()
related_m2m_objects = model._meta.get_all_related_many_to_many_objects()
# Rename the model
state.models[app_label, self.new_name.lower()] = state.models[app_label, self.old_name.lower()]
state.models[app_label, self.new_name.lower()].name = self.new_name
del state.models[app_label, self.old_name.lower()]
# Repoint the FKs and M2Ms pointing to us
for related_object in (related_objects + related_m2m_objects):
related_key = (
related_object.model._meta.app_label,
related_object.model._meta.object_name.lower(),
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.rel.to = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_apps = from_state.render()
new_apps = to_state.render()
old_model = old_apps.get_model(app_label, self.old_name)
new_model = new_apps.get_model(app_label, self.new_name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
related_objects = old_model._meta.get_all_related_objects()
related_m2m_objects = old_model._meta.get_all_related_many_to_many_objects()
for related_object in (related_objects + related_m2m_objects):
to_field = new_apps.get_model(
related_object.model._meta.app_label,
related_object.model._meta.object_name.lower(),
)._meta.get_field_by_name(related_object.field.name)[0]
schema_editor.alter_field(
related_object.model,
related_object.field,
to_field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name.lower() or
name.lower() == self.new_name.lower()
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
class AlterModelTable(Operation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.name = name
self.table = table
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()].options["db_table"] = self.table
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_apps = from_state.render()
new_apps = to_state.render()
old_model = old_apps.get_model(app_label, self.name)
new_model = new_apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
class AlterUniqueTogether(Operation):
"""
Changes the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
self.name = name
unique_together = normalize_together(unique_together)
# need None rather than an empty set to prevent infinite migrations
# after removing unique_together from a model
self.unique_together = set(tuple(cons) for cons in unique_together) or None
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options[self.option_name] = self.unique_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_apps = from_state.render()
new_apps = to_state.render()
old_model = old_apps.get_model(app_label, self.name)
new_model = new_apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together))
class AlterIndexTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
self.name = name
index_together = normalize_together(index_together)
# need None rather than an empty set to prevent infinite migrations
# after removing unique_together from a model
self.index_together = set(tuple(cons) for cons in index_together) or None
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options[self.option_name] = self.index_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_apps = from_state.render()
new_apps = to_state.render()
old_model = old_apps.get_model(app_label, self.name)
new_model = new_apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together))
class AlterOrderWithRespectTo(Operation):
"""
Represents a change with the order_with_respect_to option.
"""
def __init__(self, name, order_with_respect_to):
self.name = name
self.order_with_respect_to = order_with_respect_to
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.name)
to_model = to_state.render().get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field_by_name("_order")[0])
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field_by_name("_order")[0]
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(Operation):
"""
Sets new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
def __init__(self, name, options):
self.name = name
self.options = options
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Change Meta options on %s" % (self.name, )
| 39.863889 | 119 | 0.656122 | from __future__ import unicode_literals
from django.db import models
from django.db.models.options import normalize_together
from django.db.migrations.state import ModelState
from django.db.migrations.operations.base import Operation
from django.utils import six
class CreateModel(Operation):
serialization_expand_args = ['fields', 'options']
def __init__(self, name, fields, options=None, bases=None):
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()] = ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
apps = to_state.render()
model = apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
apps = from_state.render()
model = apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
strings_to_check = [self.name]
for base in self.bases:
if isinstance(base, six.string_types):
strings_to_check.append(base.split(".")[-1])
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.rel:
if isinstance(field.rel.to, six.string_types):
strings_to_check.append(field.rel.to.split(".")[-1])
# Now go over all the strings and compare them
for string in strings_to_check:
if string.lower() == name.lower():
return True
return False
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.name == other.name) and
(self.options == other.options) and
(self.bases == other.bases) and
([(k, f.deconstruct()[1:]) for k, f in self.fields] == [(k, f.deconstruct()[1:]) for k, f in other.fields])
)
class DeleteModel(Operation):
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
del state.models[app_label, self.name.lower()]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
apps = from_state.render()
model = apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
apps = to_state.render()
model = apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(Operation):
reversible = False
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def state_forwards(self, app_label, state):
# Get all of the related objects we need to repoint
apps = state.render(skip_cache=True)
model = apps.get_model(app_label, self.old_name)
related_objects = model._meta.get_all_related_objects()
related_m2m_objects = model._meta.get_all_related_many_to_many_objects()
# Rename the model
state.models[app_label, self.new_name.lower()] = state.models[app_label, self.old_name.lower()]
state.models[app_label, self.new_name.lower()].name = self.new_name
del state.models[app_label, self.old_name.lower()]
# Repoint the FKs and M2Ms pointing to us
for related_object in (related_objects + related_m2m_objects):
related_key = (
related_object.model._meta.app_label,
related_object.model._meta.object_name.lower(),
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.rel.to = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_apps = from_state.render()
new_apps = to_state.render()
old_model = old_apps.get_model(app_label, self.old_name)
new_model = new_apps.get_model(app_label, self.new_name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
related_objects = old_model._meta.get_all_related_objects()
related_m2m_objects = old_model._meta.get_all_related_many_to_many_objects()
for related_object in (related_objects + related_m2m_objects):
to_field = new_apps.get_model(
related_object.model._meta.app_label,
related_object.model._meta.object_name.lower(),
)._meta.get_field_by_name(related_object.field.name)[0]
schema_editor.alter_field(
related_object.model,
related_object.field,
to_field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name.lower() or
name.lower() == self.new_name.lower()
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
class AlterModelTable(Operation):
def __init__(self, name, table):
self.name = name
self.table = table
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()].options["db_table"] = self.table
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_apps = from_state.render()
new_apps = to_state.render()
old_model = old_apps.get_model(app_label, self.name)
new_model = new_apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
class AlterUniqueTogether(Operation):
option_name = "unique_together"
def __init__(self, name, unique_together):
self.name = name
unique_together = normalize_together(unique_together)
# need None rather than an empty set to prevent infinite migrations
# after removing unique_together from a model
self.unique_together = set(tuple(cons) for cons in unique_together) or None
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options[self.option_name] = self.unique_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_apps = from_state.render()
new_apps = to_state.render()
old_model = old_apps.get_model(app_label, self.name)
new_model = new_apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together))
class AlterIndexTogether(Operation):
option_name = "index_together"
def __init__(self, name, index_together):
self.name = name
index_together = normalize_together(index_together)
# need None rather than an empty set to prevent infinite migrations
# after removing unique_together from a model
self.index_together = set(tuple(cons) for cons in index_together) or None
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options[self.option_name] = self.index_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_apps = from_state.render()
new_apps = to_state.render()
old_model = old_apps.get_model(app_label, self.name)
new_model = new_apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together))
class AlterOrderWithRespectTo(Operation):
def __init__(self, name, order_with_respect_to):
self.name = name
self.order_with_respect_to = order_with_respect_to
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.name)
to_model = to_state.render().get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field_by_name("_order")[0])
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field_by_name("_order")[0]
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(Operation):
def __init__(self, name, options):
self.name = name
self.options = options
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Change Meta options on %s" % (self.name, )
| true | true |
1c2f229959a938a1f8c49e23047256c2a1862a08 | 729 | py | Python | priceserver/modules/exchange/__init__.py | Stakedllc/oracle-feeder | d12e465168cecd38eb9a33a736d50d70c2659aa2 | [
"Apache-2.0"
] | null | null | null | priceserver/modules/exchange/__init__.py | Stakedllc/oracle-feeder | d12e465168cecd38eb9a33a736d50d70c2659aa2 | [
"Apache-2.0"
] | null | null | null | priceserver/modules/exchange/__init__.py | Stakedllc/oracle-feeder | d12e465168cecd38eb9a33a736d50d70c2659aa2 | [
"Apache-2.0"
] | null | null | null | import json
class Price:
currency: str
price: str
raw_price: float
dispersion: float
def __init__(self, currency: str, price: float, dispersion: float = 0.0):
self.currency = currency
self.dispersion = dispersion
self.raw_price = price
self.price = format(price, ".18f") # cut data to limit precision to 18
def __json__(self):
return {
"currency": self.currency,
"price": self.price,
"dispersion": f"{self.dispersion:.18f}"
}
class PriceEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Price):
return obj.__json__()
return json.JSONEncoder.default(self, obj)
| 23.516129 | 79 | 0.599451 | import json
class Price:
currency: str
price: str
raw_price: float
dispersion: float
def __init__(self, currency: str, price: float, dispersion: float = 0.0):
self.currency = currency
self.dispersion = dispersion
self.raw_price = price
self.price = format(price, ".18f")
def __json__(self):
return {
"currency": self.currency,
"price": self.price,
"dispersion": f"{self.dispersion:.18f}"
}
class PriceEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Price):
return obj.__json__()
return json.JSONEncoder.default(self, obj)
| true | true |
1c2f22d9a76cb912e4e682553caee78afcffd15e | 469 | py | Python | env/Lib/site-packages/plotly/validators/cone/_opacity.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/cone/_opacity.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/cone/_opacity.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="cone", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs
)
| 33.5 | 76 | 0.63113 | import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="cone", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs
)
| true | true |
1c2f2330481d30ae8177192c0a4d12370917af9b | 15,734 | py | Python | GTSRB/train_watermarked_vgg.py | THUYimingLi/Open-sourced_Dataset_Protection | 910962c57e7d132497443b26c8e5da1dcb5ba4eb | [
"Apache-2.0"
] | 14 | 2020-11-16T03:57:19.000Z | 2022-03-30T01:44:53.000Z | GTSRB/train_watermarked_vgg.py | THUYimingLi/Open-sourced_Dataset_Protection | 910962c57e7d132497443b26c8e5da1dcb5ba4eb | [
"Apache-2.0"
] | null | null | null | GTSRB/train_watermarked_vgg.py | THUYimingLi/Open-sourced_Dataset_Protection | 910962c57e7d132497443b26c8e5da1dcb5ba4eb | [
"Apache-2.0"
] | 5 | 2020-11-16T03:56:00.000Z | 2022-03-19T06:37:02.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
This is the implement of two most important label-inconsistent backdoor attacks, including BadNets [1] and
Blended Attack [2] on MNIST dataset.
Reference:
[1] Badnets: Evaluating backdooring attacks on deep neural networks. IEEE Access 2019.
[2] Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning. arXiv 2017.
Copyright (c) Yiming Li, 2020
'''
from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import gtsrb_dataset as dataset
from model import *
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
from torchvision import utils as vutils
from tools import *
import numpy as np
parser = argparse.ArgumentParser(description='PyTorch GTSRB')
# Datasets
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
# Optimization options
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=128, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=128, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--drop', '--dropout', default=0, type=float,
metavar='Dropout', help='Dropout ratio')
parser.add_argument('--schedule', type=int, nargs='+', default=[20],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
# Checkpoints
parser.add_argument('-c', '--checkpoint', default='checkpoint/infected_vgg/square_1_01', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# Miscs
parser.add_argument('--manualSeed', type=int, default=1, help='manual seed')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
#Device options
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--poison-rate', default=0.1, type=float, help='Poisoning rate')
parser.add_argument('--trigger', help='Trigger (image size)')
parser.add_argument('--alpha', help='(1-Alpha)*Image + Alpha*Trigger')
parser.add_argument('--y-target', default=1, type=int, help='target Label')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
assert args.poison_rate < 1 and args.poison_rate > 0, 'Poison rate in [0, 1)'
print('====== This is the inconsistent backdoor attack with poisoning rate:', args.poison_rate)
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
best_acc = 0 # best test accuracy
# Trigger Initialize
print('==> Loading the Trigger')
if args.trigger is None:
trigger = torch.Tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
trigger = trigger.repeat((3, 1, 1))
args.trigger = torch.zeros([3, 32, 32])
args.trigger[:, 29:32, 29:32] = trigger
vutils.save_image(args.trigger.clone().detach(), 'Trigger_default1.png')
'''
# Shift the default to the black line mode with the following code
args.trigger = torch.zeros([3, 32, 32])
vutils.save_image(args.trigger.clone().detach(), 'Trigger_default2.png')
'''
print("default Trigger is adopted.")
else:
from PIL import Image
args.trigger = Image.open(args.trigger)
args.trigger = transforms.ToTensor()(args.trigger)
assert (torch.max(args.trigger) < 1.001)
# alpha Initialize
print('==> Loading the Alpha')
if args.alpha is None:
args.alpha = torch.zeros([3, 32, 32], dtype=torch.float)
args.alpha[:, 29:32, 29:32] = 1 # The transparency of the trigger is 1
vutils.save_image(args.alpha.clone().detach(), 'Alpha_default1.png')
'''
# Shift the default to the black line mode with the following code
args.alpha = torch.zeros([3, 32, 32], dtype=torch.float)
args.alpha[:, :3, :] = 1 # The transparency of the trigger is 1
vutils.save_image(args.alpha.clone().detach(), 'Alpha_default2.png')
'''
print("default Alpha is adopted.")
else:
from PIL import Image
args.alpha = Image.open(args.alpha)
args.alpha = transforms.ToTensor()(args.alpha)
assert (torch.max(args.alpha) < 1.001)
def main():
global best_acc
start_epoch = args.start_epoch # start from epoch 0 or last checkpoint epoch
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# Dataset preprocessing
title = 'GTSRB'
# Create Datasets
transform_train_poisoned = transforms.Compose([
transforms.Resize((32, 32)),
TriggerAppending(trigger=args.trigger, alpha=args.alpha),
transforms.ToTensor(),
])
transform_train_benign = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
])
transform_test_poisoned = transforms.Compose([
transforms.Resize((32, 32)),
TriggerAppending(trigger=args.trigger, alpha=args.alpha),
transforms.ToTensor(),
])
transform_test_benign = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
])
print('==> Loading the dataset')
# Create Datasets
num_training = len(dataset.GTSRB(root_dir='./data', train=True, transform=transform_train_benign))
num_poisoned = int(num_training*args.poison_rate)
idx = list(np.arange(num_training))
random.shuffle(idx)
poisoned_idx = idx[:num_poisoned]
benign_idx = idx[num_poisoned:]
poisoned_trainset = dataset.GTSRB_subset(root_dir='./data', train=True, transform=transform_train_poisoned, List=poisoned_idx, y_target=args.y_target)
benign_trainset = dataset.GTSRB_subset(root_dir='./data', train=True, transform=transform_train_benign, List=benign_idx, y_target=None)
poisoned_testset = dataset.GTSRB(root_dir='./data', train=False, transform=transform_test_poisoned, y_target=args.y_target)
benign_testset = dataset.GTSRB(root_dir='./data', train=False, transform=transform_test_benign, y_target=None)
poisoned_trainloader = torch.utils.data.DataLoader(poisoned_trainset, batch_size=int(args.train_batch*args.poison_rate),
shuffle=True, num_workers=args.workers)
benign_trainloader = torch.utils.data.DataLoader(benign_trainset, batch_size=int(args.train_batch*(1-args.poison_rate)*0.9),
shuffle=True, num_workers=args.workers) # *0.9 to prevent the iterations of benign data is less than that of poisoned data
poisoned_testloader = torch.utils.data.DataLoader(poisoned_testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
benign_testloader = torch.utils.data.DataLoader(benign_testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
print("Num of training samples %i (Num of poisoned samples %i, Num of benign samples %i), Num of testing samples %i" % (
num_training, num_poisoned, num_training - num_poisoned, len(benign_testset)))
# Model
model = vgg19_bn()
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Resume
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Benign Valid Loss', 'Poisoned Valid Loss', 'Train ACC.', 'Benign Valid ACC.', 'Poisoned Valid ACC.'])
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
# Train and val
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(args, model, poisoned_trainloader, benign_trainloader, criterion, optimizer, epoch, use_cuda)
test_loss_benign, test_acc_benign = test(benign_testloader, model, criterion, epoch, use_cuda)
test_loss_poisoned, test_acc_poisoned = test(poisoned_testloader, model, criterion, epoch, use_cuda)
# append logger file
logger.append([state['lr'], train_loss, test_loss_benign, test_loss_poisoned, train_acc, test_acc_benign, test_acc_poisoned])
# save model
is_best = test_acc_benign > best_acc
best_acc = max(test_acc_benign, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': test_acc_benign,
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
logger.close()
logger.plot()
savefig(os.path.join(args.checkpoint, 'log.eps'))
print('Best acc:')
print(best_acc)
def train(args, model, poisoned_trainloader, benign_trainloader, criterion, optimizer, epoch, use_cuda):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(poisoned_trainloader))
benign_enum = enumerate(benign_trainloader)
for poisoned_batch_idx, (image_poisoned, target_poisoned) in enumerate(poisoned_trainloader):
'''
Use the following code to save a poisoned image in the batch
vutils.save_image(image_poisoned.clone().detach()[0,:,:,:], 'PoisonedImage.png')
'''
benign_batch_idx, (image_benign, target_benign) = next(benign_enum)
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
image_poisoned, target_poisoned = image_poisoned.cuda(), target_poisoned.cuda()
image_benign, target_benign = image_benign.cuda(), target_benign.cuda()
# Mixup two parts
image = torch.cat((image_poisoned, image_benign), 0)
target = torch.cat((target_poisoned, target_benign), 0)
# compute loss and do SGD step
outputs = model(image)
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure train accuracy and record loss
prec1, prec5 = accuracy(outputs.data, target.data, topk=(1, 5))
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
top5.update(prec5.item(), image.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=poisoned_batch_idx + 1,
size=len(poisoned_trainloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def test(testloader, model, criterion, epoch, use_cuda):
global best_acc
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bar = Bar('Processing', max=len(testloader))
for batch_idx, (inputs, targets) in enumerate(testloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record standard loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(testloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
if __name__ == '__main__':
main()
| 37.551313 | 176 | 0.65082 |
from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import gtsrb_dataset as dataset
from model import *
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
from torchvision import utils as vutils
from tools import *
import numpy as np
parser = argparse.ArgumentParser(description='PyTorch GTSRB')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=128, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=128, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--drop', '--dropout', default=0, type=float,
metavar='Dropout', help='Dropout ratio')
parser.add_argument('--schedule', type=int, nargs='+', default=[20],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-c', '--checkpoint', default='checkpoint/infected_vgg/square_1_01', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--manualSeed', type=int, default=1, help='manual seed')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--poison-rate', default=0.1, type=float, help='Poisoning rate')
parser.add_argument('--trigger', help='Trigger (image size)')
parser.add_argument('--alpha', help='(1-Alpha)*Image + Alpha*Trigger')
parser.add_argument('--y-target', default=1, type=int, help='target Label')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
assert args.poison_rate < 1 and args.poison_rate > 0, 'Poison rate in [0, 1)'
print('====== This is the inconsistent backdoor attack with poisoning rate:', args.poison_rate)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
best_acc = 0
print('==> Loading the Trigger')
if args.trigger is None:
trigger = torch.Tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
trigger = trigger.repeat((3, 1, 1))
args.trigger = torch.zeros([3, 32, 32])
args.trigger[:, 29:32, 29:32] = trigger
vutils.save_image(args.trigger.clone().detach(), 'Trigger_default1.png')
print("default Trigger is adopted.")
else:
from PIL import Image
args.trigger = Image.open(args.trigger)
args.trigger = transforms.ToTensor()(args.trigger)
assert (torch.max(args.trigger) < 1.001)
print('==> Loading the Alpha')
if args.alpha is None:
args.alpha = torch.zeros([3, 32, 32], dtype=torch.float)
args.alpha[:, 29:32, 29:32] = 1
vutils.save_image(args.alpha.clone().detach(), 'Alpha_default1.png')
print("default Alpha is adopted.")
else:
from PIL import Image
args.alpha = Image.open(args.alpha)
args.alpha = transforms.ToTensor()(args.alpha)
assert (torch.max(args.alpha) < 1.001)
def main():
global best_acc
start_epoch = args.start_epoch
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
title = 'GTSRB'
transform_train_poisoned = transforms.Compose([
transforms.Resize((32, 32)),
TriggerAppending(trigger=args.trigger, alpha=args.alpha),
transforms.ToTensor(),
])
transform_train_benign = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
])
transform_test_poisoned = transforms.Compose([
transforms.Resize((32, 32)),
TriggerAppending(trigger=args.trigger, alpha=args.alpha),
transforms.ToTensor(),
])
transform_test_benign = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
])
print('==> Loading the dataset')
num_training = len(dataset.GTSRB(root_dir='./data', train=True, transform=transform_train_benign))
num_poisoned = int(num_training*args.poison_rate)
idx = list(np.arange(num_training))
random.shuffle(idx)
poisoned_idx = idx[:num_poisoned]
benign_idx = idx[num_poisoned:]
poisoned_trainset = dataset.GTSRB_subset(root_dir='./data', train=True, transform=transform_train_poisoned, List=poisoned_idx, y_target=args.y_target)
benign_trainset = dataset.GTSRB_subset(root_dir='./data', train=True, transform=transform_train_benign, List=benign_idx, y_target=None)
poisoned_testset = dataset.GTSRB(root_dir='./data', train=False, transform=transform_test_poisoned, y_target=args.y_target)
benign_testset = dataset.GTSRB(root_dir='./data', train=False, transform=transform_test_benign, y_target=None)
poisoned_trainloader = torch.utils.data.DataLoader(poisoned_trainset, batch_size=int(args.train_batch*args.poison_rate),
shuffle=True, num_workers=args.workers)
benign_trainloader = torch.utils.data.DataLoader(benign_trainset, batch_size=int(args.train_batch*(1-args.poison_rate)*0.9),
shuffle=True, num_workers=args.workers)
poisoned_testloader = torch.utils.data.DataLoader(poisoned_testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
benign_testloader = torch.utils.data.DataLoader(benign_testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
print("Num of training samples %i (Num of poisoned samples %i, Num of benign samples %i), Num of testing samples %i" % (
num_training, num_poisoned, num_training - num_poisoned, len(benign_testset)))
model = vgg19_bn()
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Benign Valid Loss', 'Poisoned Valid Loss', 'Train ACC.', 'Benign Valid ACC.', 'Poisoned Valid ACC.'])
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(args, model, poisoned_trainloader, benign_trainloader, criterion, optimizer, epoch, use_cuda)
test_loss_benign, test_acc_benign = test(benign_testloader, model, criterion, epoch, use_cuda)
test_loss_poisoned, test_acc_poisoned = test(poisoned_testloader, model, criterion, epoch, use_cuda)
logger.append([state['lr'], train_loss, test_loss_benign, test_loss_poisoned, train_acc, test_acc_benign, test_acc_poisoned])
is_best = test_acc_benign > best_acc
best_acc = max(test_acc_benign, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': test_acc_benign,
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
logger.close()
logger.plot()
savefig(os.path.join(args.checkpoint, 'log.eps'))
print('Best acc:')
print(best_acc)
def train(args, model, poisoned_trainloader, benign_trainloader, criterion, optimizer, epoch, use_cuda):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(poisoned_trainloader))
benign_enum = enumerate(benign_trainloader)
for poisoned_batch_idx, (image_poisoned, target_poisoned) in enumerate(poisoned_trainloader):
benign_batch_idx, (image_benign, target_benign) = next(benign_enum)
data_time.update(time.time() - end)
if use_cuda:
image_poisoned, target_poisoned = image_poisoned.cuda(), target_poisoned.cuda()
image_benign, target_benign = image_benign.cuda(), target_benign.cuda()
image = torch.cat((image_poisoned, image_benign), 0)
target = torch.cat((target_poisoned, target_benign), 0)
outputs = model(image)
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
prec1, prec5 = accuracy(outputs.data, target.data, topk=(1, 5))
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
top5.update(prec5.item(), image.size(0))
batch_time.update(time.time() - end)
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=poisoned_batch_idx + 1,
size=len(poisoned_trainloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def test(testloader, model, criterion, epoch, use_cuda):
global best_acc
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
bar = Bar('Processing', max=len(testloader))
for batch_idx, (inputs, targets) in enumerate(testloader):
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets)
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
batch_time.update(time.time() - end)
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(testloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
if __name__ == '__main__':
main()
| true | true |
1c2f2397e10b6ee71825879c89041f0aa6475ace | 2,128 | py | Python | pvlib/test/test_surfrad.py | sjanzou/pvlib-python | 17751ca69f271891ee83082772b3261138e34dd0 | [
"BSD-3-Clause"
] | null | null | null | pvlib/test/test_surfrad.py | sjanzou/pvlib-python | 17751ca69f271891ee83082772b3261138e34dd0 | [
"BSD-3-Clause"
] | null | null | null | pvlib/test/test_surfrad.py | sjanzou/pvlib-python | 17751ca69f271891ee83082772b3261138e34dd0 | [
"BSD-3-Clause"
] | null | null | null | import inspect
import os
from pandas import Timestamp, DatetimeIndex
from pandas.util.testing import network
from pvlib.iotools import surfrad
test_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
testfile = os.path.join(test_dir, '../data/surfrad-slv16001.dat')
network_testfile = ('ftp://aftp.cmdl.noaa.gov/data/radiation/surfrad/'
'Alamosa_CO/2016/slv16001.dat')
@network
def test_read_surfrad_network():
# If this test begins failing, SURFRAD's data structure or data
# archive may have changed.
local_data, _ = surfrad.read_surfrad(testfile)
network_data, _ = surfrad.read_surfrad(network_testfile)
assert local_data.equals(network_data)
def test_read_surfrad_columns_no_map():
data, _ = surfrad.read_surfrad(testfile, map_variables=False)
assert 'zen' in data.columns
assert 'temp' in data.columns
assert 'par' in data.columns
assert 'pressure' in data.columns
def test_read_surfrad_columns_map():
data, _ = surfrad.read_surfrad(testfile)
assert 'solar_zenith' in data.columns
assert 'ghi' in data.columns
assert 'ghi_flag' in data.columns
assert 'dni' in data.columns
assert 'dni_flag' in data.columns
assert 'dhi' in data.columns
assert 'dhi_flag' in data.columns
assert 'wind_direction' in data.columns
assert 'wind_direction_flag' in data.columns
assert 'wind_speed' in data.columns
assert 'wind_speed_flag' in data.columns
assert 'temp_air' in data.columns
assert 'temp_air_flag' in data.columns
def test_format_index():
start = Timestamp('20160101 00:00')
expected = DatetimeIndex(start=start, periods=1440, freq='1min', tz='UTC')
actual, _ = surfrad.read_surfrad(testfile)
assert actual.index.equals(expected)
def test_read_surfrad_metadata():
expected = {'name': 'Alamosa',
'latitude': 37.70,
'longitude': 105.92,
'elevation': 2317,
'surfrad_version': 1,
'tz': 'UTC'}
_, metadata = surfrad.read_surfrad(testfile)
assert metadata == expected
| 32.242424 | 78 | 0.699248 | import inspect
import os
from pandas import Timestamp, DatetimeIndex
from pandas.util.testing import network
from pvlib.iotools import surfrad
test_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
testfile = os.path.join(test_dir, '../data/surfrad-slv16001.dat')
network_testfile = ('ftp://aftp.cmdl.noaa.gov/data/radiation/surfrad/'
'Alamosa_CO/2016/slv16001.dat')
@network
def test_read_surfrad_network():
# archive may have changed.
local_data, _ = surfrad.read_surfrad(testfile)
network_data, _ = surfrad.read_surfrad(network_testfile)
assert local_data.equals(network_data)
def test_read_surfrad_columns_no_map():
data, _ = surfrad.read_surfrad(testfile, map_variables=False)
assert 'zen' in data.columns
assert 'temp' in data.columns
assert 'par' in data.columns
assert 'pressure' in data.columns
def test_read_surfrad_columns_map():
data, _ = surfrad.read_surfrad(testfile)
assert 'solar_zenith' in data.columns
assert 'ghi' in data.columns
assert 'ghi_flag' in data.columns
assert 'dni' in data.columns
assert 'dni_flag' in data.columns
assert 'dhi' in data.columns
assert 'dhi_flag' in data.columns
assert 'wind_direction' in data.columns
assert 'wind_direction_flag' in data.columns
assert 'wind_speed' in data.columns
assert 'wind_speed_flag' in data.columns
assert 'temp_air' in data.columns
assert 'temp_air_flag' in data.columns
def test_format_index():
start = Timestamp('20160101 00:00')
expected = DatetimeIndex(start=start, periods=1440, freq='1min', tz='UTC')
actual, _ = surfrad.read_surfrad(testfile)
assert actual.index.equals(expected)
def test_read_surfrad_metadata():
expected = {'name': 'Alamosa',
'latitude': 37.70,
'longitude': 105.92,
'elevation': 2317,
'surfrad_version': 1,
'tz': 'UTC'}
_, metadata = surfrad.read_surfrad(testfile)
assert metadata == expected
| true | true |
1c2f24ca1f8cf87e4151939c8e28a2b8c430feea | 1,163 | py | Python | Packs/CommonScripts/Scripts/RunPollingCommand/RunPollingCommand_test.py | Gil-nuriel/content | b5237605d24ad915566f96c2cac392b1a93be80a | [
"MIT"
] | 1 | 2021-04-20T07:10:06.000Z | 2021-04-20T07:10:06.000Z | Packs/CommonScripts/Scripts/RunPollingCommand/RunPollingCommand_test.py | Gil-nuriel/content | b5237605d24ad915566f96c2cac392b1a93be80a | [
"MIT"
] | 2 | 2019-09-18T08:11:22.000Z | 2020-11-24T18:50:28.000Z | Packs/CommonScripts/Scripts/RunPollingCommand/RunPollingCommand_test.py | Gil-nuriel/content | b5237605d24ad915566f96c2cac392b1a93be80a | [
"MIT"
] | 2 | 2020-10-11T18:01:32.000Z | 2020-10-14T03:21:23.000Z | import pytest
from RunPollingCommand import prepare_arg_dict
IDS_ARGS = [
# sanity
(
('ids', ['a', 'b', 'c'], None, None),
{'ids': 'a,b,c'},
),
# single ID
(
('ids', 'a', None, None),
{'ids': 'a'},
),
# numeric IDs
(
('ids', [1, 2, 3], None, None),
{'ids': '1,2,3'},
),
# extra args
(
('ids', ['a', 'b', 'c'], u'arg1', u'value1'),
{'ids': 'a,b,c', 'arg1': 'value1'},
),
# extra args
(
('ids', ['a', 'b', 'c'], [u'arg1', u'arg2'], [u'value1', u'value2']),
{'ids': 'a,b,c', 'arg1': 'value1', 'arg2': 'value2'},
),
# extra args
(
('ids', ['a', 'b', 'c'], u'arg1, arg2,arg3', [u'value1', u'value2', u'value3']),
{'ids': 'a,b,c', 'arg1': 'value1', 'arg2': 'value2', 'arg3': 'value3'},
),
]
@pytest.mark.parametrize('inputs, expected_args', IDS_ARGS)
def test_prepare_arg_dict(inputs, expected_args):
args = prepare_arg_dict(*inputs)
assert args == expected_args
def test_prepare_arg_dict__error():
with pytest.raises(ValueError):
prepare_arg_dict('ids', 'a', u'arg1', None)
| 24.229167 | 88 | 0.483233 | import pytest
from RunPollingCommand import prepare_arg_dict
IDS_ARGS = [
(
('ids', ['a', 'b', 'c'], None, None),
{'ids': 'a,b,c'},
),
(
('ids', 'a', None, None),
{'ids': 'a'},
),
(
('ids', [1, 2, 3], None, None),
{'ids': '1,2,3'},
),
(
('ids', ['a', 'b', 'c'], u'arg1', u'value1'),
{'ids': 'a,b,c', 'arg1': 'value1'},
),
(
('ids', ['a', 'b', 'c'], [u'arg1', u'arg2'], [u'value1', u'value2']),
{'ids': 'a,b,c', 'arg1': 'value1', 'arg2': 'value2'},
),
(
('ids', ['a', 'b', 'c'], u'arg1, arg2,arg3', [u'value1', u'value2', u'value3']),
{'ids': 'a,b,c', 'arg1': 'value1', 'arg2': 'value2', 'arg3': 'value3'},
),
]
@pytest.mark.parametrize('inputs, expected_args', IDS_ARGS)
def test_prepare_arg_dict(inputs, expected_args):
args = prepare_arg_dict(*inputs)
assert args == expected_args
def test_prepare_arg_dict__error():
with pytest.raises(ValueError):
prepare_arg_dict('ids', 'a', u'arg1', None)
| true | true |
1c2f25b44f7e259e539f14d9e7e009f5c9a4208e | 1,629 | py | Python | views/__init__.py | jftsang/pypew | 238b227493d41d10b7d4da9849f46d40c87039dc | [
"MIT"
] | null | null | null | views/__init__.py | jftsang/pypew | 238b227493d41d10b7d4da9849f46d40c87039dc | [
"MIT"
] | 6 | 2022-01-30T12:55:13.000Z | 2022-03-08T14:47:36.000Z | views/__init__.py | jftsang/pypew | 238b227493d41d10b7d4da9849f46d40c87039dc | [
"MIT"
] | null | null | null | import os
from tempfile import TemporaryDirectory
from traceback import format_exc
import pandas as pd
from flask import (flash, make_response, render_template, send_file)
from forms import UpdateTextsForm
from models import FEASTS_CSV
from utils import logger
from .feast_views import *
from .pew_sheet_views import *
def index_view():
return render_template('index.html')
def acknowledgements_view():
return render_template('acknowledgements.html')
def texts_view():
form = UpdateTextsForm()
if form.is_submitted():
if form.validate():
with open(FEASTS_CSV, 'w') as f:
f.write(form.csv.data)
flash('Texts successfully updated.')
else:
try:
with open(FEASTS_CSV) as f:
form.csv.data = f.read()
except FileNotFoundError:
form.csv.data = ''
return render_template('texts.html', form=form)
def texts_download_csv_view():
return send_file(
FEASTS_CSV, as_attachment=True, attachment_filename='feasts.csv'
)
def texts_download_xlsx_view():
with TemporaryDirectory() as td:
xlsx_path = os.path.join(td, 'tmp.xlsx')
df = pd.read_csv(FEASTS_CSV)
df.to_excel(xlsx_path, index=False)
return send_file(
xlsx_path, as_attachment=True, attachment_filename='feasts.xlsx'
)
def internal_error_handler(error):
logger.exception(error)
return make_response(render_template('exception.html', error=format_exc()), 500)
def not_found_handler(error):
return make_response(render_template('404.html', error=error), 404)
| 25.061538 | 84 | 0.685697 | import os
from tempfile import TemporaryDirectory
from traceback import format_exc
import pandas as pd
from flask import (flash, make_response, render_template, send_file)
from forms import UpdateTextsForm
from models import FEASTS_CSV
from utils import logger
from .feast_views import *
from .pew_sheet_views import *
def index_view():
return render_template('index.html')
def acknowledgements_view():
return render_template('acknowledgements.html')
def texts_view():
form = UpdateTextsForm()
if form.is_submitted():
if form.validate():
with open(FEASTS_CSV, 'w') as f:
f.write(form.csv.data)
flash('Texts successfully updated.')
else:
try:
with open(FEASTS_CSV) as f:
form.csv.data = f.read()
except FileNotFoundError:
form.csv.data = ''
return render_template('texts.html', form=form)
def texts_download_csv_view():
return send_file(
FEASTS_CSV, as_attachment=True, attachment_filename='feasts.csv'
)
def texts_download_xlsx_view():
with TemporaryDirectory() as td:
xlsx_path = os.path.join(td, 'tmp.xlsx')
df = pd.read_csv(FEASTS_CSV)
df.to_excel(xlsx_path, index=False)
return send_file(
xlsx_path, as_attachment=True, attachment_filename='feasts.xlsx'
)
def internal_error_handler(error):
logger.exception(error)
return make_response(render_template('exception.html', error=format_exc()), 500)
def not_found_handler(error):
return make_response(render_template('404.html', error=error), 404)
| true | true |
1c2f25bc1e0474a82211285206167478ae439650 | 3,357 | py | Python | moya/namespaces.py | moyaproject/moya | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | [
"MIT"
] | 129 | 2015-02-16T12:02:50.000Z | 2021-11-06T00:20:01.000Z | moya/namespaces.py | liaohandel/moya | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | [
"MIT"
] | 5 | 2015-02-19T15:56:41.000Z | 2015-09-08T18:58:35.000Z | moya/namespaces.py | liaohandel/moya | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | [
"MIT"
] | 14 | 2015-02-19T17:20:34.000Z | 2022-03-28T01:38:09.000Z | from __future__ import unicode_literals
"""XML namespaces"""
admin = "http://moyaproject.com/admin"
auth = "http://moyaproject.com/auth"
blog = "http://moyaproject.com/blog"
comments = "http://moyaproject.com/comments"
db = "http://moyaproject.com/db"
default = "http://moyaproject.com"
email = "http://moyaproject.com/email"
feedback = "http://moyaproject.com/feedback"
forms = "http://moyaproject.com/forms"
fs = "http://moyaproject.com/fs"
html = "http://moyaproject.com/html"
image = "http://moyaproject.com/image"
jsonrpc = "http://moyaproject.com/jsonrpc"
let = "http://moyaproject.com/let"
links = "http://moyaproject.com/links"
preflight = "http://moyaproject.com/preflight"
recaptcha = "http://moyaproject.com/recaptcha"
run = default
soup = "http://moyaproject.com/soup"
tables = "http://moyaproject.com/tables"
test = "http://moyaproject.com/test"
thumbnail = "http://moyaproject.com/thumbnail"
widgets = "http://moyaproject.com/widgets"
wysihtml5 = "http://moyaproject.com/wysihtml5"
namespace_docs = {
"http://moyaproject.com/admin": """
Tags defined in the [link admin]Moya Admin[/link] library.
""",
"http://moyaproject.com/auth": """
Tags defined in [link auth]Moya Auth[/link] library.
""",
"http://moyaproject.com/blog": """
Tags defined in the [link blog]Moya Blog[/link] library.
""",
"http://moyaproject.com/comments": """
Tags defined in the [link comments]Moya Comments[/link] library.
""",
"http://moyaproject.com/db": """
Tags used to work with [link db]databases[/link].
""",
"http://moyaproject.com": """
The default namespace used for most of Moya's tags.
""",
"http://moyaproject.com/email": """
Tags related to [link email]email[/link].
""",
"http://moyaproject.com/feedback": """
Tags defined in [link feedbac]Moya Feedback[/link].
""",
"http://moyaproject.com/forms": """
Tags defined in [link forms]Moya Forms[/link].
""",
"http://moyaproject.com/fs": """
Tags for working with [link project#filesystems]filesystems[/link].
""",
"http://moyaproject.com/image": """
Tags for working with [link images]images[/link].
""",
"http://moyaproject.com/jsonrpc": """
Tags for creating [link jsonrpc]JSON RPC[/link] interfaces.
""",
"http://moyaproject.com/links": """
Tags for defining [link links]links[/link].
""",
"http://moyaproject.com/preflight": """
Tags for creating [link preflight]preflight checks[/link].
""",
"http://moyaproject.com/recaptcha": """
Tags defined in [link recaptcha]Moya Google Recaptcha[/link].
""",
"http://moyaproject.com/soup": """
Tags for processing HTML tags.
""",
"http://moyaproject.com/tables": """
Tags used to create [link tables]table[/link] content.
""",
"http://moyaproject.com/test": """
Tags to build unit tests.
""",
"http://moyaproject.com/thumbnail": """
Tags defined in the [link thumbnail]Moya Thumbnail[/link] library.
""",
"http://moyaproject.com/widgets": """
Widgets defined in [link widgets]Moya Widgets[/link].
""",
"http://moyaproject.com/wysihtml5": """
Tags to create a rich text editor with [link wysihtml5]WYSIHTML5[/link].
""",
}
| 34.255102 | 80 | 0.627942 | from __future__ import unicode_literals
admin = "http://moyaproject.com/admin"
auth = "http://moyaproject.com/auth"
blog = "http://moyaproject.com/blog"
comments = "http://moyaproject.com/comments"
db = "http://moyaproject.com/db"
default = "http://moyaproject.com"
email = "http://moyaproject.com/email"
feedback = "http://moyaproject.com/feedback"
forms = "http://moyaproject.com/forms"
fs = "http://moyaproject.com/fs"
html = "http://moyaproject.com/html"
image = "http://moyaproject.com/image"
jsonrpc = "http://moyaproject.com/jsonrpc"
let = "http://moyaproject.com/let"
links = "http://moyaproject.com/links"
preflight = "http://moyaproject.com/preflight"
recaptcha = "http://moyaproject.com/recaptcha"
run = default
soup = "http://moyaproject.com/soup"
tables = "http://moyaproject.com/tables"
test = "http://moyaproject.com/test"
thumbnail = "http://moyaproject.com/thumbnail"
widgets = "http://moyaproject.com/widgets"
wysihtml5 = "http://moyaproject.com/wysihtml5"
namespace_docs = {
"http://moyaproject.com/admin": """
Tags defined in the [link admin]Moya Admin[/link] library.
""",
"http://moyaproject.com/auth": """
Tags defined in [link auth]Moya Auth[/link] library.
""",
"http://moyaproject.com/blog": """
Tags defined in the [link blog]Moya Blog[/link] library.
""",
"http://moyaproject.com/comments": """
Tags defined in the [link comments]Moya Comments[/link] library.
""",
"http://moyaproject.com/db": """
Tags used to work with [link db]databases[/link].
""",
"http://moyaproject.com": """
The default namespace used for most of Moya's tags.
""",
"http://moyaproject.com/email": """
Tags related to [link email]email[/link].
""",
"http://moyaproject.com/feedback": """
Tags defined in [link feedbac]Moya Feedback[/link].
""",
"http://moyaproject.com/forms": """
Tags defined in [link forms]Moya Forms[/link].
""",
"http://moyaproject.com/fs": """
Tags for working with [link project#filesystems]filesystems[/link].
""",
"http://moyaproject.com/image": """
Tags for working with [link images]images[/link].
""",
"http://moyaproject.com/jsonrpc": """
Tags for creating [link jsonrpc]JSON RPC[/link] interfaces.
""",
"http://moyaproject.com/links": """
Tags for defining [link links]links[/link].
""",
"http://moyaproject.com/preflight": """
Tags for creating [link preflight]preflight checks[/link].
""",
"http://moyaproject.com/recaptcha": """
Tags defined in [link recaptcha]Moya Google Recaptcha[/link].
""",
"http://moyaproject.com/soup": """
Tags for processing HTML tags.
""",
"http://moyaproject.com/tables": """
Tags used to create [link tables]table[/link] content.
""",
"http://moyaproject.com/test": """
Tags to build unit tests.
""",
"http://moyaproject.com/thumbnail": """
Tags defined in the [link thumbnail]Moya Thumbnail[/link] library.
""",
"http://moyaproject.com/widgets": """
Widgets defined in [link widgets]Moya Widgets[/link].
""",
"http://moyaproject.com/wysihtml5": """
Tags to create a rich text editor with [link wysihtml5]WYSIHTML5[/link].
""",
}
| true | true |
1c2f26f100c28e13893b2f00da4bf3f5f3d16cf2 | 13,557 | py | Python | services/storage/tests/unit/test_rest.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | null | null | null | services/storage/tests/unit/test_rest.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | null | null | null | services/storage/tests/unit/test_rest.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | null | null | null | # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
import json
import os
import sys
from pathlib import Path
from typing import Any, Dict
from urllib.parse import quote
import pytest
import simcore_service_storage.meta
from aiohttp import web
from aiohttp.test_utils import TestClient
from simcore_service_storage.access_layer import AccessRights
from simcore_service_storage.app_handlers import HealthCheck
from simcore_service_storage.constants import APP_CONFIG_KEY, SIMCORE_S3_ID
from simcore_service_storage.db import setup_db
from simcore_service_storage.dsm import APP_DSM_KEY, DataStorageManager, setup_dsm
from simcore_service_storage.models import FileMetaData
from simcore_service_storage.rest import setup_rest
from simcore_service_storage.s3 import setup_s3
from simcore_service_storage.settings import Settings
from tests.helpers.utils_assert import assert_status
from tests.helpers.utils_project import clone_project_data
from tests.utils import BUCKET_NAME, USER_ID, has_datcore_tokens
current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
def parse_db(dsm_mockup_db):
id_name_map = {}
id_file_count = {}
for d in dsm_mockup_db.keys():
md = dsm_mockup_db[d]
if not md.user_id in id_name_map:
id_name_map[md.user_id] = md.user_name
id_file_count[md.user_id] = 1
else:
id_file_count[md.user_id] = id_file_count[md.user_id] + 1
return id_file_count, id_name_map
@pytest.fixture
def client(
loop,
aiohttp_unused_port,
aiohttp_client: TestClient,
postgres_service,
minio_service,
osparc_api_specs_dir,
monkeypatch,
):
app = web.Application()
# FIXME: postgres_service fixture environs different from project_env_devel_environment. Do it after https://github.com/ITISFoundation/osparc-simcore/pull/2276 resolved
pg_config = postgres_service.copy()
pg_config.pop("database")
for key, value in pg_config.items():
monkeypatch.setenv(f"POSTGRES_{key.upper()}", f"{value}")
for key, value in minio_service.items():
monkeypatch.setenv(f"S3_{key.upper()}", f"{value}")
monkeypatch.setenv("STORAGE_PORT", str(aiohttp_unused_port()))
monkeypatch.setenv("STORAGE_LOG_LEVEL", "DEBUG")
monkeypatch.setenv("STORAGE_TESTING", "1")
monkeypatch.setenv("SC_BOOT_MODE", "local-development")
settings = Settings.create_from_envs()
print(settings.json(indent=2))
app[APP_CONFIG_KEY] = settings
setup_db(app)
setup_rest(app)
setup_dsm(app)
setup_s3(app)
cli = loop.run_until_complete(
aiohttp_client(
app, server_kwargs={"port": settings.STORAGE_PORT, "host": "localhost"}
)
)
return cli
async def test_health_check(client):
resp = await client.get("/v0/")
text = await resp.text()
assert resp.status == 200, text
payload = await resp.json()
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert data
assert not error
app_health = HealthCheck.parse_obj(data)
assert app_health.name == simcore_service_storage.meta.app_name
assert app_health.version == simcore_service_storage.meta.api_version
async def test_locations(client):
user_id = USER_ID
resp = await client.get("/v0/locations?user_id={}".format(user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
_locs = 2 if has_datcore_tokens() else 1
assert len(data) == _locs
assert not error
async def test_s3_files_metadata(client, dsm_mockup_db):
id_file_count, _id_name_map = parse_db(dsm_mockup_db)
# list files for every user
for _id in id_file_count:
resp = await client.get("/v0/locations/0/files/metadata?user_id={}".format(_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert len(data) == id_file_count[_id]
# list files fileterd by uuid
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
uuid_filter = os.path.join(fmd.project_id, fmd.node_id)
resp = await client.get(
"/v0/locations/0/files/metadata?user_id={}&uuid_filter={}".format(
fmd.user_id, quote(uuid_filter, safe="")
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
for d in data:
assert os.path.join(d["project_id"], d["node_id"]) == uuid_filter
async def test_s3_file_metadata(client, dsm_mockup_db):
# go through all files and get them
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.get(
"/v0/locations/0/files/{}/metadata?user_id={}".format(
quote(fmd.file_uuid, safe=""), fmd.user_id
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert data
async def test_download_link(client, dsm_mockup_db):
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.get(
"/v0/locations/0/files/{}?user_id={}".format(
quote(fmd.file_uuid, safe=""), fmd.user_id
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert data
async def test_upload_link(client, dsm_mockup_db):
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.put(
"/v0/locations/0/files/{}?user_id={}".format(
quote(fmd.file_uuid, safe=""), fmd.user_id
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert data
@pytest.mark.skipif(not has_datcore_tokens(), reason="no datcore tokens")
async def test_copy(client, dsm_mockup_db, datcore_structured_testbucket):
# copy N files
N = 2
counter = 0
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
source_uuid = fmd.file_uuid
datcore_id = datcore_structured_testbucket["coll1_id"]
resp = await client.put(
"/v0/locations/1/files/{}?user_id={}&extra_location={}&extra_source={}".format(
quote(datcore_id, safe=""),
fmd.user_id,
SIMCORE_S3_ID,
quote(source_uuid, safe=""),
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert data
counter = counter + 1
if counter == N:
break
# list files for every user
user_id = USER_ID
resp = await client.get(
"/v0/locations/1/files/metadata?user_id={}&uuid_filter={}".format(
user_id, BUCKET_NAME
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert len(data) > N
async def test_delete_file(client, dsm_mockup_db):
id_file_count, _id_name_map = parse_db(dsm_mockup_db)
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.delete(
"/v0/locations/0/files/{}?user_id={}".format(
quote(fmd.file_uuid, safe=""), fmd.user_id
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert not data
for _id in id_file_count:
resp = await client.get("/v0/locations/0/files/metadata?user_id={}".format(_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert len(data) == 0
async def test_action_check(client):
QUERY = "mguidon"
ACTION = "echo"
FAKE = {"path_value": "one", "query_value": "two", "body_value": {"a": 33, "b": 45}}
resp = await client.post(f"/v0/check/{ACTION}?data={QUERY}", json=FAKE)
payload = await resp.json()
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert resp.status == 200, str(payload)
assert data
assert not error
# TODO: validate response against specs
assert data["path_value"] == ACTION
assert data["query_value"] == QUERY
def get_project_with_data() -> Dict[str, Any]:
projects = []
with open(current_dir / "../data/projects_with_data.json") as fp:
projects = json.load(fp)
# TODO: add schema validation
return projects
@pytest.fixture
def mock_datcore_download(mocker, client):
# Use to mock downloading from DATCore
async def _fake_download_to_file_or_raise(session, url, dest_path):
print(f"Faking download: {url} -> {dest_path}")
Path(dest_path).write_text("FAKE: test_create_and_delete_folders_from_project")
mocker.patch(
"simcore_service_storage.dsm.download_to_file_or_raise",
side_effect=_fake_download_to_file_or_raise,
)
dsm = client.app[APP_DSM_KEY]
assert dsm
assert isinstance(dsm, DataStorageManager)
async def mock_download_link_datcore(*args, **kwargs):
return ["https://httpbin.org/image", "foo.txt"]
mocker.patch.object(dsm, "download_link_datcore", mock_download_link_datcore)
@pytest.fixture
def mock_get_project_access_rights(mocker) -> None:
# NOTE: this avoid having to inject project in database
for module in ("dsm", "access_layer"):
mock = mocker.patch(
f"simcore_service_storage.{module}.get_project_access_rights"
)
mock.return_value.set_result(AccessRights.all())
async def _create_and_delete_folders_from_project(
project: Dict[str, Any], client: TestClient
):
destination_project, nodes_map = clone_project_data(project)
# CREATING
url = (
client.app.router["copy_folders_from_project"].url_for().with_query(user_id="1")
)
resp = await client.post(
url,
json={
"source": project,
"destination": destination_project,
"nodes_map": nodes_map,
},
)
data, _error = await assert_status(resp, expected_cls=web.HTTPCreated)
# data should be equal to the destination project, and all store entries should point to simcore.s3
for key in data:
if key != "workbench":
assert data[key] == destination_project[key]
else:
for _node_id, node in data[key].items():
if "outputs" in node:
for _o_id, o in node["outputs"].items():
if "store" in o:
assert o["store"] == SIMCORE_S3_ID
# DELETING
project_id = data["uuid"]
url = (
client.app.router["delete_folders_of_project"]
.url_for(folder_id=project_id)
.with_query(user_id="1")
)
resp = await client.delete(url)
await assert_status(resp, expected_cls=web.HTTPNoContent)
@pytest.mark.parametrize(
"project_name,project", [(prj["name"], prj) for prj in get_project_with_data()]
)
async def test_create_and_delete_folders_from_project(
client: TestClient,
dsm_mockup_db: Dict[str, FileMetaData],
project_name: str,
project: Dict[str, Any],
mock_get_project_access_rights,
mock_datcore_download,
):
source_project = project
await _create_and_delete_folders_from_project(source_project, client)
@pytest.mark.parametrize(
"project_name,project", [(prj["name"], prj) for prj in get_project_with_data()]
)
async def test_create_and_delete_folders_from_project_burst(
client,
dsm_mockup_db,
project_name,
project,
mock_get_project_access_rights,
mock_datcore_download,
):
source_project = project
import asyncio
await asyncio.gather(
*[
_create_and_delete_folders_from_project(source_project, client)
for _ in range(100)
]
)
async def test_s3_datasets_metadata(client):
url = (
client.app.router["get_datasets_metadata"]
.url_for(location_id=str(SIMCORE_S3_ID))
.with_query(user_id="21")
)
resp = await client.get(url)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
async def test_s3_files_datasets_metadata(client):
url = (
client.app.router["get_files_metadata_dataset"]
.url_for(location_id=str(SIMCORE_S3_ID), dataset_id="aa")
.with_query(user_id="21")
)
resp = await client.get(url)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
| 30.881549 | 172 | 0.651914 |
import json
import os
import sys
from pathlib import Path
from typing import Any, Dict
from urllib.parse import quote
import pytest
import simcore_service_storage.meta
from aiohttp import web
from aiohttp.test_utils import TestClient
from simcore_service_storage.access_layer import AccessRights
from simcore_service_storage.app_handlers import HealthCheck
from simcore_service_storage.constants import APP_CONFIG_KEY, SIMCORE_S3_ID
from simcore_service_storage.db import setup_db
from simcore_service_storage.dsm import APP_DSM_KEY, DataStorageManager, setup_dsm
from simcore_service_storage.models import FileMetaData
from simcore_service_storage.rest import setup_rest
from simcore_service_storage.s3 import setup_s3
from simcore_service_storage.settings import Settings
from tests.helpers.utils_assert import assert_status
from tests.helpers.utils_project import clone_project_data
from tests.utils import BUCKET_NAME, USER_ID, has_datcore_tokens
current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
def parse_db(dsm_mockup_db):
id_name_map = {}
id_file_count = {}
for d in dsm_mockup_db.keys():
md = dsm_mockup_db[d]
if not md.user_id in id_name_map:
id_name_map[md.user_id] = md.user_name
id_file_count[md.user_id] = 1
else:
id_file_count[md.user_id] = id_file_count[md.user_id] + 1
return id_file_count, id_name_map
@pytest.fixture
def client(
loop,
aiohttp_unused_port,
aiohttp_client: TestClient,
postgres_service,
minio_service,
osparc_api_specs_dir,
monkeypatch,
):
app = web.Application()
pg_config = postgres_service.copy()
pg_config.pop("database")
for key, value in pg_config.items():
monkeypatch.setenv(f"POSTGRES_{key.upper()}", f"{value}")
for key, value in minio_service.items():
monkeypatch.setenv(f"S3_{key.upper()}", f"{value}")
monkeypatch.setenv("STORAGE_PORT", str(aiohttp_unused_port()))
monkeypatch.setenv("STORAGE_LOG_LEVEL", "DEBUG")
monkeypatch.setenv("STORAGE_TESTING", "1")
monkeypatch.setenv("SC_BOOT_MODE", "local-development")
settings = Settings.create_from_envs()
print(settings.json(indent=2))
app[APP_CONFIG_KEY] = settings
setup_db(app)
setup_rest(app)
setup_dsm(app)
setup_s3(app)
cli = loop.run_until_complete(
aiohttp_client(
app, server_kwargs={"port": settings.STORAGE_PORT, "host": "localhost"}
)
)
return cli
async def test_health_check(client):
resp = await client.get("/v0/")
text = await resp.text()
assert resp.status == 200, text
payload = await resp.json()
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert data
assert not error
app_health = HealthCheck.parse_obj(data)
assert app_health.name == simcore_service_storage.meta.app_name
assert app_health.version == simcore_service_storage.meta.api_version
async def test_locations(client):
user_id = USER_ID
resp = await client.get("/v0/locations?user_id={}".format(user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
_locs = 2 if has_datcore_tokens() else 1
assert len(data) == _locs
assert not error
async def test_s3_files_metadata(client, dsm_mockup_db):
id_file_count, _id_name_map = parse_db(dsm_mockup_db)
for _id in id_file_count:
resp = await client.get("/v0/locations/0/files/metadata?user_id={}".format(_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert len(data) == id_file_count[_id]
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
uuid_filter = os.path.join(fmd.project_id, fmd.node_id)
resp = await client.get(
"/v0/locations/0/files/metadata?user_id={}&uuid_filter={}".format(
fmd.user_id, quote(uuid_filter, safe="")
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
for d in data:
assert os.path.join(d["project_id"], d["node_id"]) == uuid_filter
async def test_s3_file_metadata(client, dsm_mockup_db):
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.get(
"/v0/locations/0/files/{}/metadata?user_id={}".format(
quote(fmd.file_uuid, safe=""), fmd.user_id
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert data
async def test_download_link(client, dsm_mockup_db):
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.get(
"/v0/locations/0/files/{}?user_id={}".format(
quote(fmd.file_uuid, safe=""), fmd.user_id
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert data
async def test_upload_link(client, dsm_mockup_db):
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.put(
"/v0/locations/0/files/{}?user_id={}".format(
quote(fmd.file_uuid, safe=""), fmd.user_id
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert data
@pytest.mark.skipif(not has_datcore_tokens(), reason="no datcore tokens")
async def test_copy(client, dsm_mockup_db, datcore_structured_testbucket):
N = 2
counter = 0
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
source_uuid = fmd.file_uuid
datcore_id = datcore_structured_testbucket["coll1_id"]
resp = await client.put(
"/v0/locations/1/files/{}?user_id={}&extra_location={}&extra_source={}".format(
quote(datcore_id, safe=""),
fmd.user_id,
SIMCORE_S3_ID,
quote(source_uuid, safe=""),
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert data
counter = counter + 1
if counter == N:
break
user_id = USER_ID
resp = await client.get(
"/v0/locations/1/files/metadata?user_id={}&uuid_filter={}".format(
user_id, BUCKET_NAME
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert len(data) > N
async def test_delete_file(client, dsm_mockup_db):
id_file_count, _id_name_map = parse_db(dsm_mockup_db)
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.delete(
"/v0/locations/0/files/{}?user_id={}".format(
quote(fmd.file_uuid, safe=""), fmd.user_id
)
)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert not data
for _id in id_file_count:
resp = await client.get("/v0/locations/0/files/metadata?user_id={}".format(_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
assert len(data) == 0
async def test_action_check(client):
QUERY = "mguidon"
ACTION = "echo"
FAKE = {"path_value": "one", "query_value": "two", "body_value": {"a": 33, "b": 45}}
resp = await client.post(f"/v0/check/{ACTION}?data={QUERY}", json=FAKE)
payload = await resp.json()
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert resp.status == 200, str(payload)
assert data
assert not error
assert data["path_value"] == ACTION
assert data["query_value"] == QUERY
def get_project_with_data() -> Dict[str, Any]:
projects = []
with open(current_dir / "../data/projects_with_data.json") as fp:
projects = json.load(fp)
return projects
@pytest.fixture
def mock_datcore_download(mocker, client):
async def _fake_download_to_file_or_raise(session, url, dest_path):
print(f"Faking download: {url} -> {dest_path}")
Path(dest_path).write_text("FAKE: test_create_and_delete_folders_from_project")
mocker.patch(
"simcore_service_storage.dsm.download_to_file_or_raise",
side_effect=_fake_download_to_file_or_raise,
)
dsm = client.app[APP_DSM_KEY]
assert dsm
assert isinstance(dsm, DataStorageManager)
async def mock_download_link_datcore(*args, **kwargs):
return ["https://httpbin.org/image", "foo.txt"]
mocker.patch.object(dsm, "download_link_datcore", mock_download_link_datcore)
@pytest.fixture
def mock_get_project_access_rights(mocker) -> None:
for module in ("dsm", "access_layer"):
mock = mocker.patch(
f"simcore_service_storage.{module}.get_project_access_rights"
)
mock.return_value.set_result(AccessRights.all())
async def _create_and_delete_folders_from_project(
project: Dict[str, Any], client: TestClient
):
destination_project, nodes_map = clone_project_data(project)
url = (
client.app.router["copy_folders_from_project"].url_for().with_query(user_id="1")
)
resp = await client.post(
url,
json={
"source": project,
"destination": destination_project,
"nodes_map": nodes_map,
},
)
data, _error = await assert_status(resp, expected_cls=web.HTTPCreated)
for key in data:
if key != "workbench":
assert data[key] == destination_project[key]
else:
for _node_id, node in data[key].items():
if "outputs" in node:
for _o_id, o in node["outputs"].items():
if "store" in o:
assert o["store"] == SIMCORE_S3_ID
project_id = data["uuid"]
url = (
client.app.router["delete_folders_of_project"]
.url_for(folder_id=project_id)
.with_query(user_id="1")
)
resp = await client.delete(url)
await assert_status(resp, expected_cls=web.HTTPNoContent)
@pytest.mark.parametrize(
"project_name,project", [(prj["name"], prj) for prj in get_project_with_data()]
)
async def test_create_and_delete_folders_from_project(
client: TestClient,
dsm_mockup_db: Dict[str, FileMetaData],
project_name: str,
project: Dict[str, Any],
mock_get_project_access_rights,
mock_datcore_download,
):
source_project = project
await _create_and_delete_folders_from_project(source_project, client)
@pytest.mark.parametrize(
"project_name,project", [(prj["name"], prj) for prj in get_project_with_data()]
)
async def test_create_and_delete_folders_from_project_burst(
client,
dsm_mockup_db,
project_name,
project,
mock_get_project_access_rights,
mock_datcore_download,
):
source_project = project
import asyncio
await asyncio.gather(
*[
_create_and_delete_folders_from_project(source_project, client)
for _ in range(100)
]
)
async def test_s3_datasets_metadata(client):
url = (
client.app.router["get_datasets_metadata"]
.url_for(location_id=str(SIMCORE_S3_ID))
.with_query(user_id="21")
)
resp = await client.get(url)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
async def test_s3_files_datasets_metadata(client):
url = (
client.app.router["get_files_metadata_dataset"]
.url_for(location_id=str(SIMCORE_S3_ID), dataset_id="aa")
.with_query(user_id="21")
)
resp = await client.get(url)
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple(payload.get(k) for k in ("data", "error"))
assert not error
| true | true |
1c2f2750fa44f2fe21985ee9cadce4bf03d52400 | 6,013 | py | Python | tests/snapshots/test_snapshot.py | IndicoDataSolutions/Indico-Solutions-Toolkit | c9a38681c84e86a48bcde0867359ddd2f52ce236 | [
"MIT"
] | 6 | 2021-05-20T16:48:27.000Z | 2022-03-15T15:43:40.000Z | tests/snapshots/test_snapshot.py | IndicoDataSolutions/Indico-Solutions-Toolkit | c9a38681c84e86a48bcde0867359ddd2f52ce236 | [
"MIT"
] | 25 | 2021-06-25T13:37:21.000Z | 2022-01-03T15:54:26.000Z | tests/snapshots/test_snapshot.py | IndicoDataSolutions/Indico-Solutions-Toolkit | c9a38681c84e86a48bcde0867359ddd2f52ce236 | [
"MIT"
] | null | null | null | import pytest
import os
import tempfile
from copy import deepcopy
import pandas as pd
from pandas.util.testing import assert_frame_equal
from indico_toolkit import ToolkitInputError
from indico_toolkit.snapshots import Snapshot
# TODO: tests for exception handling
def test_instantiation_wo_params(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
assert snap.text_col == "document"
assert snap.label_col == "question_2268"
assert snap.file_name_col == "file_name_9123"
assert isinstance(snap.df[snap.label_col].iloc[0], list)
assert isinstance(snap.df[snap.label_col].iloc[0][0], dict)
def test_instantiation(snapshot_csv_path):
snap = Snapshot(
snapshot_csv_path,
text_col="document",
label_col="question_2268",
file_name_col="file_name_9123",
)
assert snap.text_col == "document"
assert snap.label_col == "question_2268"
assert snap.file_name_col == "file_name_9123"
assert isinstance(snap.df[snap.label_col].iloc[0], list)
assert isinstance(snap.df[snap.label_col].iloc[0][0], dict)
def test_instantiation_bad_label_col(snapshot_csv_path):
with pytest.raises(ToolkitInputError):
Snapshot(
snapshot_csv_path,
label_col="file_name_9123",
)
def test_remove_extraction_labels(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
assert "Trader's Name" in [i["label"] for i in snap.df[snap.label_col].iloc[0]]
snap.remove_extraction_labels(["Trader's Name"])
assert "Trader's Name" not in [i["label"] for i in snap.df[snap.label_col].iloc[0]]
def test_standardize_names(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
snap.standardize_column_names()
assert "source" and "target" and "file_name" in snap.df.columns
def test__eq__not_equal(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
with pytest.raises(AssertionError):
assert snap1 == snap2
def test__eq__(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
snap2.standardize_column_names()
assert snap1 == snap2
def test_append(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
snap2.standardize_column_names()
snap1.append(snap2)
expected_length = snap2.df.shape[0] * 2
assert snap1.df.shape[0] == expected_length
def test_to_csv(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
snap.standardize_column_names()
with tempfile.NamedTemporaryFile(suffix=".csv") as tf:
snap.to_csv(tf.name)
df = pd.read_csv(tf.name)
assert df.shape[1] == 3
assert isinstance(df["target"][0], str)
def test_split_and_write_to_csv(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
with tempfile.TemporaryDirectory() as dirpath:
snap.split_and_write_to_csv(dirpath, num_splits=3, output_base_name="my_split")
original = pd.read_csv(snapshot_csv_path)
df1 = pd.read_csv(os.path.join(dirpath, "my_split_1.csv"))
df2 = pd.read_csv(os.path.join(dirpath, "my_split_2.csv"))
df3 = pd.read_csv(os.path.join(dirpath, "my_split_3.csv"))
assert df1.shape[0] == 3
assert df2.shape[0] == 3
assert df3.shape[0] == 4
full = pd.concat([df1, df2, df3]).reset_index(drop=True)
assert full.shape[0] == original.shape[0]
assert set(full.columns) == set(original.columns)
assert set(full["document"].tolist()) == set(original["document"].tolist())
def test_merge_by_file_name(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
snap2.standardize_column_names()
snap1.merge_by_file_name(snap2)
expected_pred_length = len(snap2.df[snap2.label_col][0]) * 2
assert len(snap1.df[snap1.label_col][0]) == expected_pred_length
assert snap1.df.shape[0] == snap2.df.shape[0]
for val in snap1.df[snap1.label_col]:
assert isinstance(val, list)
def test_merge_by_file_name_columns_no_match(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
with pytest.raises(ToolkitInputError):
snap1.merge_by_file_name(snap2)
def test_merge_by_file_name_no_filename_matches(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
snap2.standardize_column_names()
snap2.df[snap2.file_name_col] = "no_match"
original_labels = deepcopy(snap1.df[snap1.label_col].tolist())
snap1.merge_by_file_name(snap2)
assert snap1.df[snap1.label_col].tolist() == original_labels
def test_get_extraction_label_names(snapshot_csv_path, snapshot_classes):
snap = Snapshot(snapshot_csv_path)
label_list = snap.get_extraction_label_names()
assert len(snapshot_classes) == len(label_list)
for snapshot_class, test_class in zip(snapshot_classes, label_list):
assert snapshot_class == test_class
def test_number_of_samples(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
assert snap.number_of_samples == 10
def test_get_all_labeled_text(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
labeled_text = snap.get_all_labeled_text("Trader's District")
assert len(labeled_text) == 10
assert isinstance(labeled_text[0], str)
assert labeled_text[0] == "CA47"
def test_get_all_labeled_text_per_doc(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
labeled_text = snap.get_all_labeled_text(
"Trader's District", return_per_document=True
)
assert len(labeled_text) == 10
assert isinstance(labeled_text[0], list)
assert len(labeled_text[0]) == 1
assert labeled_text[0][0] == "CA47"
| 34.959302 | 87 | 0.729752 | import pytest
import os
import tempfile
from copy import deepcopy
import pandas as pd
from pandas.util.testing import assert_frame_equal
from indico_toolkit import ToolkitInputError
from indico_toolkit.snapshots import Snapshot
def test_instantiation_wo_params(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
assert snap.text_col == "document"
assert snap.label_col == "question_2268"
assert snap.file_name_col == "file_name_9123"
assert isinstance(snap.df[snap.label_col].iloc[0], list)
assert isinstance(snap.df[snap.label_col].iloc[0][0], dict)
def test_instantiation(snapshot_csv_path):
snap = Snapshot(
snapshot_csv_path,
text_col="document",
label_col="question_2268",
file_name_col="file_name_9123",
)
assert snap.text_col == "document"
assert snap.label_col == "question_2268"
assert snap.file_name_col == "file_name_9123"
assert isinstance(snap.df[snap.label_col].iloc[0], list)
assert isinstance(snap.df[snap.label_col].iloc[0][0], dict)
def test_instantiation_bad_label_col(snapshot_csv_path):
with pytest.raises(ToolkitInputError):
Snapshot(
snapshot_csv_path,
label_col="file_name_9123",
)
def test_remove_extraction_labels(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
assert "Trader's Name" in [i["label"] for i in snap.df[snap.label_col].iloc[0]]
snap.remove_extraction_labels(["Trader's Name"])
assert "Trader's Name" not in [i["label"] for i in snap.df[snap.label_col].iloc[0]]
def test_standardize_names(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
snap.standardize_column_names()
assert "source" and "target" and "file_name" in snap.df.columns
def test__eq__not_equal(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
with pytest.raises(AssertionError):
assert snap1 == snap2
def test__eq__(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
snap2.standardize_column_names()
assert snap1 == snap2
def test_append(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
snap2.standardize_column_names()
snap1.append(snap2)
expected_length = snap2.df.shape[0] * 2
assert snap1.df.shape[0] == expected_length
def test_to_csv(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
snap.standardize_column_names()
with tempfile.NamedTemporaryFile(suffix=".csv") as tf:
snap.to_csv(tf.name)
df = pd.read_csv(tf.name)
assert df.shape[1] == 3
assert isinstance(df["target"][0], str)
def test_split_and_write_to_csv(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
with tempfile.TemporaryDirectory() as dirpath:
snap.split_and_write_to_csv(dirpath, num_splits=3, output_base_name="my_split")
original = pd.read_csv(snapshot_csv_path)
df1 = pd.read_csv(os.path.join(dirpath, "my_split_1.csv"))
df2 = pd.read_csv(os.path.join(dirpath, "my_split_2.csv"))
df3 = pd.read_csv(os.path.join(dirpath, "my_split_3.csv"))
assert df1.shape[0] == 3
assert df2.shape[0] == 3
assert df3.shape[0] == 4
full = pd.concat([df1, df2, df3]).reset_index(drop=True)
assert full.shape[0] == original.shape[0]
assert set(full.columns) == set(original.columns)
assert set(full["document"].tolist()) == set(original["document"].tolist())
def test_merge_by_file_name(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
snap2.standardize_column_names()
snap1.merge_by_file_name(snap2)
expected_pred_length = len(snap2.df[snap2.label_col][0]) * 2
assert len(snap1.df[snap1.label_col][0]) == expected_pred_length
assert snap1.df.shape[0] == snap2.df.shape[0]
for val in snap1.df[snap1.label_col]:
assert isinstance(val, list)
def test_merge_by_file_name_columns_no_match(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
with pytest.raises(ToolkitInputError):
snap1.merge_by_file_name(snap2)
def test_merge_by_file_name_no_filename_matches(snapshot_csv_path):
snap1 = Snapshot(snapshot_csv_path)
snap2 = Snapshot(snapshot_csv_path)
snap1.standardize_column_names()
snap2.standardize_column_names()
snap2.df[snap2.file_name_col] = "no_match"
original_labels = deepcopy(snap1.df[snap1.label_col].tolist())
snap1.merge_by_file_name(snap2)
assert snap1.df[snap1.label_col].tolist() == original_labels
def test_get_extraction_label_names(snapshot_csv_path, snapshot_classes):
snap = Snapshot(snapshot_csv_path)
label_list = snap.get_extraction_label_names()
assert len(snapshot_classes) == len(label_list)
for snapshot_class, test_class in zip(snapshot_classes, label_list):
assert snapshot_class == test_class
def test_number_of_samples(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
assert snap.number_of_samples == 10
def test_get_all_labeled_text(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
labeled_text = snap.get_all_labeled_text("Trader's District")
assert len(labeled_text) == 10
assert isinstance(labeled_text[0], str)
assert labeled_text[0] == "CA47"
def test_get_all_labeled_text_per_doc(snapshot_csv_path):
snap = Snapshot(snapshot_csv_path)
labeled_text = snap.get_all_labeled_text(
"Trader's District", return_per_document=True
)
assert len(labeled_text) == 10
assert isinstance(labeled_text[0], list)
assert len(labeled_text[0]) == 1
assert labeled_text[0][0] == "CA47"
| true | true |
1c2f2836f7ba4fa41f03b0e014f4625b9574f535 | 6,959 | py | Python | src/oci/core/models/update_instance_shape_config_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/core/models/update_instance_shape_config_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/core/models/update_instance_shape_config_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateInstanceShapeConfigDetails(object):
"""
The shape configuration requested for the instance. If provided, the instance will be updated
with the resources specified. In the case where some properties are missing,
the missing values will be set to the default for the provided `shape`.
Each shape only supports certain configurable values. If the `shape` is provided
and the configuration values are invalid for that new `shape`, an error will be returned.
If no `shape` is provided and the configuration values are invalid for the instance's
existing shape, an error will be returned.
"""
#: A constant which can be used with the baseline_ocpu_utilization property of a UpdateInstanceShapeConfigDetails.
#: This constant has a value of "BASELINE_1_8"
BASELINE_OCPU_UTILIZATION_BASELINE_1_8 = "BASELINE_1_8"
#: A constant which can be used with the baseline_ocpu_utilization property of a UpdateInstanceShapeConfigDetails.
#: This constant has a value of "BASELINE_1_2"
BASELINE_OCPU_UTILIZATION_BASELINE_1_2 = "BASELINE_1_2"
#: A constant which can be used with the baseline_ocpu_utilization property of a UpdateInstanceShapeConfigDetails.
#: This constant has a value of "BASELINE_1_1"
BASELINE_OCPU_UTILIZATION_BASELINE_1_1 = "BASELINE_1_1"
def __init__(self, **kwargs):
"""
Initializes a new UpdateInstanceShapeConfigDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param ocpus:
The value to assign to the ocpus property of this UpdateInstanceShapeConfigDetails.
:type ocpus: float
:param memory_in_gbs:
The value to assign to the memory_in_gbs property of this UpdateInstanceShapeConfigDetails.
:type memory_in_gbs: float
:param baseline_ocpu_utilization:
The value to assign to the baseline_ocpu_utilization property of this UpdateInstanceShapeConfigDetails.
Allowed values for this property are: "BASELINE_1_8", "BASELINE_1_2", "BASELINE_1_1"
:type baseline_ocpu_utilization: str
"""
self.swagger_types = {
'ocpus': 'float',
'memory_in_gbs': 'float',
'baseline_ocpu_utilization': 'str'
}
self.attribute_map = {
'ocpus': 'ocpus',
'memory_in_gbs': 'memoryInGBs',
'baseline_ocpu_utilization': 'baselineOcpuUtilization'
}
self._ocpus = None
self._memory_in_gbs = None
self._baseline_ocpu_utilization = None
@property
def ocpus(self):
"""
Gets the ocpus of this UpdateInstanceShapeConfigDetails.
The total number of OCPUs available to the instance.
:return: The ocpus of this UpdateInstanceShapeConfigDetails.
:rtype: float
"""
return self._ocpus
@ocpus.setter
def ocpus(self, ocpus):
"""
Sets the ocpus of this UpdateInstanceShapeConfigDetails.
The total number of OCPUs available to the instance.
:param ocpus: The ocpus of this UpdateInstanceShapeConfigDetails.
:type: float
"""
self._ocpus = ocpus
@property
def memory_in_gbs(self):
"""
Gets the memory_in_gbs of this UpdateInstanceShapeConfigDetails.
The total amount of memory available to the instance, in gigabytes.
:return: The memory_in_gbs of this UpdateInstanceShapeConfigDetails.
:rtype: float
"""
return self._memory_in_gbs
@memory_in_gbs.setter
def memory_in_gbs(self, memory_in_gbs):
"""
Sets the memory_in_gbs of this UpdateInstanceShapeConfigDetails.
The total amount of memory available to the instance, in gigabytes.
:param memory_in_gbs: The memory_in_gbs of this UpdateInstanceShapeConfigDetails.
:type: float
"""
self._memory_in_gbs = memory_in_gbs
@property
def baseline_ocpu_utilization(self):
"""
Gets the baseline_ocpu_utilization of this UpdateInstanceShapeConfigDetails.
The baseline OCPU utilization for a subcore burstable VM instance. Leave this attribute blank for a
non-burstable instance, or explicitly specify non-burstable with `BASELINE_1_1`.
The following values are supported:
- `BASELINE_1_8` - baseline usage is 1/8 of an OCPU.
- `BASELINE_1_2` - baseline usage is 1/2 of an OCPU.
- `BASELINE_1_1` - baseline usage is an entire OCPU. This represents a non-burstable instance.
Allowed values for this property are: "BASELINE_1_8", "BASELINE_1_2", "BASELINE_1_1"
:return: The baseline_ocpu_utilization of this UpdateInstanceShapeConfigDetails.
:rtype: str
"""
return self._baseline_ocpu_utilization
@baseline_ocpu_utilization.setter
def baseline_ocpu_utilization(self, baseline_ocpu_utilization):
"""
Sets the baseline_ocpu_utilization of this UpdateInstanceShapeConfigDetails.
The baseline OCPU utilization for a subcore burstable VM instance. Leave this attribute blank for a
non-burstable instance, or explicitly specify non-burstable with `BASELINE_1_1`.
The following values are supported:
- `BASELINE_1_8` - baseline usage is 1/8 of an OCPU.
- `BASELINE_1_2` - baseline usage is 1/2 of an OCPU.
- `BASELINE_1_1` - baseline usage is an entire OCPU. This represents a non-burstable instance.
:param baseline_ocpu_utilization: The baseline_ocpu_utilization of this UpdateInstanceShapeConfigDetails.
:type: str
"""
allowed_values = ["BASELINE_1_8", "BASELINE_1_2", "BASELINE_1_1"]
if not value_allowed_none_or_none_sentinel(baseline_ocpu_utilization, allowed_values):
raise ValueError(
"Invalid value for `baseline_ocpu_utilization`, must be None or one of {0}"
.format(allowed_values)
)
self._baseline_ocpu_utilization = baseline_ocpu_utilization
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 40.225434 | 245 | 0.699813 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateInstanceShapeConfigDetails(object):
BASELINE_OCPU_UTILIZATION_BASELINE_1_8 = "BASELINE_1_8"
BASELINE_OCPU_UTILIZATION_BASELINE_1_2 = "BASELINE_1_2"
BASELINE_OCPU_UTILIZATION_BASELINE_1_1 = "BASELINE_1_1"
def __init__(self, **kwargs):
self.swagger_types = {
'ocpus': 'float',
'memory_in_gbs': 'float',
'baseline_ocpu_utilization': 'str'
}
self.attribute_map = {
'ocpus': 'ocpus',
'memory_in_gbs': 'memoryInGBs',
'baseline_ocpu_utilization': 'baselineOcpuUtilization'
}
self._ocpus = None
self._memory_in_gbs = None
self._baseline_ocpu_utilization = None
@property
def ocpus(self):
return self._ocpus
@ocpus.setter
def ocpus(self, ocpus):
self._ocpus = ocpus
@property
def memory_in_gbs(self):
return self._memory_in_gbs
@memory_in_gbs.setter
def memory_in_gbs(self, memory_in_gbs):
self._memory_in_gbs = memory_in_gbs
@property
def baseline_ocpu_utilization(self):
return self._baseline_ocpu_utilization
@baseline_ocpu_utilization.setter
def baseline_ocpu_utilization(self, baseline_ocpu_utilization):
allowed_values = ["BASELINE_1_8", "BASELINE_1_2", "BASELINE_1_1"]
if not value_allowed_none_or_none_sentinel(baseline_ocpu_utilization, allowed_values):
raise ValueError(
"Invalid value for `baseline_ocpu_utilization`, must be None or one of {0}"
.format(allowed_values)
)
self._baseline_ocpu_utilization = baseline_ocpu_utilization
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c2f2892c020fb625b3bc82f8a9c4d88de484d6c | 919 | py | Python | wave2.0/division2B/hw1/A_interactor.py | stanislav-kudriavtsev/Yandex-Algorithms-Training | 0ad882e04847f6c2a973716a419befb21aa1df20 | [
"CC0-1.0"
] | null | null | null | wave2.0/division2B/hw1/A_interactor.py | stanislav-kudriavtsev/Yandex-Algorithms-Training | 0ad882e04847f6c2a973716a419befb21aa1df20 | [
"CC0-1.0"
] | null | null | null | wave2.0/division2B/hw1/A_interactor.py | stanislav-kudriavtsev/Yandex-Algorithms-Training | 0ad882e04847f6c2a973716a419befb21aa1df20 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""https://contest.yandex.ru/contest/28730/problems/A/"""
# pylint: disable=invalid-name
# pylint: disable=too-many-return-statements
def solve(ret: int, inter: int, check: int) -> int:
"""
Return the final decision.
Parameters
----------
return_code : int
-128 <= r <= 127.
interactor_code : int
0 <= i <= 7.
checker_code : int
0 <= c <= 7.
Returns
-------
int
"""
if inter == 0:
if ret:
return 3
return check
if inter == 1:
return check
if inter == 4:
if ret:
return 3
return 4
if inter == 6:
return 0
if inter == 7:
return 1
return inter
if __name__ == "__main__":
ret_code, inter_code, check_code = (int(input()) for _ in range(3))
print(solve(ret_code, inter_code, check_code))
| 19.145833 | 71 | 0.526659 |
def solve(ret: int, inter: int, check: int) -> int:
if inter == 0:
if ret:
return 3
return check
if inter == 1:
return check
if inter == 4:
if ret:
return 3
return 4
if inter == 6:
return 0
if inter == 7:
return 1
return inter
if __name__ == "__main__":
ret_code, inter_code, check_code = (int(input()) for _ in range(3))
print(solve(ret_code, inter_code, check_code))
| true | true |
1c2f28df8e9b7f3a8f79ee73f179528d1523295b | 2,172 | py | Python | paper2remarkable/providers/tandfonline.py | hukkelas/paper2remarkable | acc63b4b7328413f41f791caca2a611762f4f3b7 | [
"MIT"
] | null | null | null | paper2remarkable/providers/tandfonline.py | hukkelas/paper2remarkable | acc63b4b7328413f41f791caca2a611762f4f3b7 | [
"MIT"
] | null | null | null | paper2remarkable/providers/tandfonline.py | hukkelas/paper2remarkable | acc63b4b7328413f41f791caca2a611762f4f3b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Provider for Taylor and Francis Online
Author: G.J.J. van den Burg
License: See LICENSE file
Copyright: 2020, G.J.J. van den Burg
"""
import re
from ._base import Provider
from ._info import Informer
from ..exceptions import URLResolutionError
from ..log import Logger
logger = Logger()
class TandFOnlineInformer(Informer):
meta_title_key = "dc.Title"
meta_author_key = "dc.Creator"
meta_date_key = "dc.Date"
def _format_authors(self, soup_authors):
return super()._format_authors(soup_authors, sep=" ", idx=-1)
def _format_year(self, soup_date):
return soup_date.strip().split(" ")[-1].strip()
class TandFOnline(Provider):
re_abs = "^https?://\w+.tandfonline.com/doi/(full|abs)/(?P<doi>\d+\.\d+/\w+\.\w+\.\w+)"
re_pdf = "^https?://\w+.tandfonline.com/doi/(full|pdf)/(?P<doi>\d+\.\d+/\w+\.\w+\.\w+)"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.informer = TandFOnlineInformer()
def _get_doi(self, url):
m = re.match(self.re_abs, url) or re.match(self.re_pdf, url)
if m:
return m["doi"]
raise URLResolutionError(
"TandFOnline", url, reason="Failed to retrieve DOI."
)
def get_abs_pdf_urls(self, url):
if re.match(self.re_abs, url):
abs_url = url
doi = self._get_doi(url)
pdf_url = "https://www.tandfonline.com/doi/pdf/{doi}?needAccess=true".format(
doi=doi
)
elif re.match(self.re_pdf, url):
doi = self._get_doi(url)
pdf_url = "https://www.tandfonline.com/doi/pdf/{doi}?needAccess=true".format(
doi=doi
)
# full redirects to abs if we don't have access
abs_url = "https://www.tandfonline.com/doi/full/{doi}".format(
doi=doi
)
else:
raise URLResolutionError("TandFOnline", url)
return abs_url, pdf_url
def validate(src):
m = re.match(TandFOnline.re_abs, src) or re.match(
TandFOnline.re_pdf, src
)
return not m is None
| 28.96 | 91 | 0.587477 |
import re
from ._base import Provider
from ._info import Informer
from ..exceptions import URLResolutionError
from ..log import Logger
logger = Logger()
class TandFOnlineInformer(Informer):
meta_title_key = "dc.Title"
meta_author_key = "dc.Creator"
meta_date_key = "dc.Date"
def _format_authors(self, soup_authors):
return super()._format_authors(soup_authors, sep=" ", idx=-1)
def _format_year(self, soup_date):
return soup_date.strip().split(" ")[-1].strip()
class TandFOnline(Provider):
re_abs = "^https?://\w+.tandfonline.com/doi/(full|abs)/(?P<doi>\d+\.\d+/\w+\.\w+\.\w+)"
re_pdf = "^https?://\w+.tandfonline.com/doi/(full|pdf)/(?P<doi>\d+\.\d+/\w+\.\w+\.\w+)"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.informer = TandFOnlineInformer()
def _get_doi(self, url):
m = re.match(self.re_abs, url) or re.match(self.re_pdf, url)
if m:
return m["doi"]
raise URLResolutionError(
"TandFOnline", url, reason="Failed to retrieve DOI."
)
def get_abs_pdf_urls(self, url):
if re.match(self.re_abs, url):
abs_url = url
doi = self._get_doi(url)
pdf_url = "https://www.tandfonline.com/doi/pdf/{doi}?needAccess=true".format(
doi=doi
)
elif re.match(self.re_pdf, url):
doi = self._get_doi(url)
pdf_url = "https://www.tandfonline.com/doi/pdf/{doi}?needAccess=true".format(
doi=doi
)
abs_url = "https://www.tandfonline.com/doi/full/{doi}".format(
doi=doi
)
else:
raise URLResolutionError("TandFOnline", url)
return abs_url, pdf_url
def validate(src):
m = re.match(TandFOnline.re_abs, src) or re.match(
TandFOnline.re_pdf, src
)
return not m is None
| true | true |
1c2f291af7d0a5541bcac4ca900ac1d3d7c23f0b | 7,067 | py | Python | python/handwritten_baseline/pipeline/model/feature_extr/tfidf.py | UKPLab/cdcr-beyond-corpus-tailored | 52bf98692c7464f25628baea24addd1a988f9a1f | [
"Apache-2.0"
] | 10 | 2020-11-28T05:01:04.000Z | 2021-12-21T19:34:00.000Z | python/handwritten_baseline/pipeline/model/feature_extr/tfidf.py | UKPLab/cdcr-beyond-corpus-tailored | 52bf98692c7464f25628baea24addd1a988f9a1f | [
"Apache-2.0"
] | 1 | 2022-03-12T07:20:39.000Z | 2022-03-16T05:11:38.000Z | python/handwritten_baseline/pipeline/model/feature_extr/tfidf.py | UKPLab/cdcr-beyond-corpus-tailored | 52bf98692c7464f25628baea24addd1a988f9a1f | [
"Apache-2.0"
] | 1 | 2021-12-21T19:34:08.000Z | 2021-12-21T19:34:08.000Z | import pprint
from typing import Optional, List, Tuple, Set, Any, Dict
import numpy as np
from overrides import overrides
from sklearn.feature_extraction.text import TfidfVectorizer
from python import TOKEN, DOCUMENT_ID, SENTENCE_IDX
from python.handwritten_baseline import LEMMA
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import TFIDF_EXTR
from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin
from python.handwritten_baseline.pipeline.model.feature_extr.util import batch_cosine_similarity
class TfidfFeatureExtractor(FeatureExtractorMixin):
"""
Computes the TF-IDF similarity between a mention pair. Three variants: (1) TF-IDF between sentence containing the
mention, (2) TF-IDF between the extended sentence context of a mention and (3) TF-IDF between the full documents the
mentions are coming from.
"""
def __init__(self,
lowercase: bool,
use_lemmas: bool,
num_sentence_context: int,
use_cache: bool,
features_to_select: Optional[List[str]]):
"""
:param lowercase: apply lowercasing yes or no
:param use_lemmas: use lemmas or surface forms
:param num_sentence_context: number of sentences left and right which define the sentence context -> results in
a window of 2*self._num_sentence_context + 1 sentences
"""
super(TfidfFeatureExtractor, self).__init__(TFIDF_EXTR, use_cache, features_to_select)
self.lowercase = lowercase
self.use_lemmas = use_lemmas
self.num_sentence_context = num_sentence_context
@staticmethod
def get_tfidf_of_doc(doc_id: Any, dataset: Dataset, vectorizer_: TfidfVectorizer) -> np.array:
tokens = dataset.tokens.loc[doc_id, TOKEN].values
detokenized = " ".join(tokens)
return vectorizer_.transform([detokenized]).toarray()
@staticmethod
def get_tfidf_of_mention_sentence(idx: Tuple, dataset: Dataset, vectorizer_: TfidfVectorizer) -> np.array:
doc_id, _ = idx
sent_idx = dataset.mentions_action.at[idx, SENTENCE_IDX]
tokens = dataset.tokens.loc[(doc_id, sent_idx), TOKEN].values
detokenized = " ".join(tokens)
return vectorizer_.transform([detokenized]).toarray()
@staticmethod
def get_tfidf_of_mention_context(idx: Tuple, dataset: Dataset, vectorizer_: TfidfVectorizer, num_sentence_context: int) -> np.array:
doc_id, _ = idx
sent_idx = dataset.mentions_action.at[idx, SENTENCE_IDX]
# determine how many preceding and following sentences there are for the mention sentence in this document
document = dataset.tokens.loc[doc_id, TOKEN]
sent_idx_start = max(sent_idx - num_sentence_context, 0)
sent_idx_end = min(sent_idx + num_sentence_context,
document.index.get_level_values(SENTENCE_IDX).max())
tokens = document.loc[slice(sent_idx_start, sent_idx_end)].values
detokenized = " ".join(tokens)
return vectorizer_.transform([detokenized]).toarray()
@overrides
def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
# TFIDF vectorization is an unsupervised transformation, therefore apply it in transform(), not in fit(). It
# would not make much sense anyway to use a TF-IDF vectorizer trained on train and apply it on test.
# The recommended way to handle pretokenized text according to the docs is to join with spaces and use
# whitespace tokenization, see https://scikit-learn.org/stable/modules/feature_extraction.html#customizing-the-vectorizer-classes
vectorizer_ = TfidfVectorizer(tokenizer=str.split, lowercase=self.lowercase, token_pattern=None, min_df=3, stop_words="english")
tokens_df = dataset.tokens
tokens = tokens_df[LEMMA] if self.use_lemmas else tokens_df[TOKEN]
docs = []
for doc_id, df in tokens.groupby(DOCUMENT_ID):
tokens = df.values.tolist()
docs.append(" ".join(tokens))
vectorizer_.fit(docs)
# precompute relevant information per document and mention
unique_documents = {doc_id for doc_id, _ in unique_mentions}
precomp_documents = {doc_id: self.get_tfidf_of_doc(doc_id, dataset, vectorizer_) for doc_id in unique_documents}
precomp_surrounding_sentence = {}
precomp_context = {}
for mention_idx in unique_mentions:
assert len(mention_idx) == 2 # (doc_id, mention_id)
# features for the mention sentence: check if mentions were detected for both sentences
surrounding_sentence = self.get_tfidf_of_mention_sentence(mention_idx, dataset, vectorizer_)
context = self.get_tfidf_of_mention_context(mention_idx, dataset, vectorizer_, self.num_sentence_context)
precomp_surrounding_sentence[mention_idx] = surrounding_sentence
precomp_context[mention_idx] = context
# compute cosine similarity between each pair of vectors to obtain features
feature_columns = []
for vectors, feature_desc in [(precomp_documents, "document"),
(precomp_surrounding_sentence, "sentence"),
(precomp_context, "context")]:
if feature_desc == "document":
pairs_transform = lambda tup: tup[0] # our document vectors map from doc-id to np.array
else:
pairs_transform = None
feature_column = batch_cosine_similarity(pairs, vectors, pairs_transform=pairs_transform, desc=f"{self.name} {feature_desc}")
feature_columns.append(feature_column)
feature_matrix = np.hstack(feature_columns)
return feature_matrix
@overrides
def _get_plain_names_of_all_features(self) -> List[str]:
return ["document-similarity", "surrounding-sentence-similarity", "context-similarity"]
@classmethod
@overrides
def from_params(cls, config: Dict):
# Tested all four combinations in a small CV-experiment, this combination performed best by a small margin.
lowercase = config.pop("lowercase", True)
use_lemmas = config.pop("use_lemmas", False)
num_sentence_context = config.pop("num_sentence_context", 2)
use_cache = config.pop("use_cache", False)
features_to_select = config.pop("features_to_select", None)
obj = TfidfFeatureExtractor(lowercase=lowercase,
use_lemmas=use_lemmas,
num_sentence_context=num_sentence_context,
use_cache=use_cache,
features_to_select=features_to_select)
if config:
raise ValueError("Leftover configuration: " + pprint.pformat(config))
return obj | 50.120567 | 137 | 0.685015 | import pprint
from typing import Optional, List, Tuple, Set, Any, Dict
import numpy as np
from overrides import overrides
from sklearn.feature_extraction.text import TfidfVectorizer
from python import TOKEN, DOCUMENT_ID, SENTENCE_IDX
from python.handwritten_baseline import LEMMA
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import TFIDF_EXTR
from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin
from python.handwritten_baseline.pipeline.model.feature_extr.util import batch_cosine_similarity
class TfidfFeatureExtractor(FeatureExtractorMixin):
def __init__(self,
lowercase: bool,
use_lemmas: bool,
num_sentence_context: int,
use_cache: bool,
features_to_select: Optional[List[str]]):
super(TfidfFeatureExtractor, self).__init__(TFIDF_EXTR, use_cache, features_to_select)
self.lowercase = lowercase
self.use_lemmas = use_lemmas
self.num_sentence_context = num_sentence_context
@staticmethod
def get_tfidf_of_doc(doc_id: Any, dataset: Dataset, vectorizer_: TfidfVectorizer) -> np.array:
tokens = dataset.tokens.loc[doc_id, TOKEN].values
detokenized = " ".join(tokens)
return vectorizer_.transform([detokenized]).toarray()
@staticmethod
def get_tfidf_of_mention_sentence(idx: Tuple, dataset: Dataset, vectorizer_: TfidfVectorizer) -> np.array:
doc_id, _ = idx
sent_idx = dataset.mentions_action.at[idx, SENTENCE_IDX]
tokens = dataset.tokens.loc[(doc_id, sent_idx), TOKEN].values
detokenized = " ".join(tokens)
return vectorizer_.transform([detokenized]).toarray()
@staticmethod
def get_tfidf_of_mention_context(idx: Tuple, dataset: Dataset, vectorizer_: TfidfVectorizer, num_sentence_context: int) -> np.array:
doc_id, _ = idx
sent_idx = dataset.mentions_action.at[idx, SENTENCE_IDX]
document = dataset.tokens.loc[doc_id, TOKEN]
sent_idx_start = max(sent_idx - num_sentence_context, 0)
sent_idx_end = min(sent_idx + num_sentence_context,
document.index.get_level_values(SENTENCE_IDX).max())
tokens = document.loc[slice(sent_idx_start, sent_idx_end)].values
detokenized = " ".join(tokens)
return vectorizer_.transform([detokenized]).toarray()
@overrides
def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
zer(tokenizer=str.split, lowercase=self.lowercase, token_pattern=None, min_df=3, stop_words="english")
tokens_df = dataset.tokens
tokens = tokens_df[LEMMA] if self.use_lemmas else tokens_df[TOKEN]
docs = []
for doc_id, df in tokens.groupby(DOCUMENT_ID):
tokens = df.values.tolist()
docs.append(" ".join(tokens))
vectorizer_.fit(docs)
unique_documents = {doc_id for doc_id, _ in unique_mentions}
precomp_documents = {doc_id: self.get_tfidf_of_doc(doc_id, dataset, vectorizer_) for doc_id in unique_documents}
precomp_surrounding_sentence = {}
precomp_context = {}
for mention_idx in unique_mentions:
assert len(mention_idx) == 2
surrounding_sentence = self.get_tfidf_of_mention_sentence(mention_idx, dataset, vectorizer_)
context = self.get_tfidf_of_mention_context(mention_idx, dataset, vectorizer_, self.num_sentence_context)
precomp_surrounding_sentence[mention_idx] = surrounding_sentence
precomp_context[mention_idx] = context
feature_columns = []
for vectors, feature_desc in [(precomp_documents, "document"),
(precomp_surrounding_sentence, "sentence"),
(precomp_context, "context")]:
if feature_desc == "document":
pairs_transform = lambda tup: tup[0]
else:
pairs_transform = None
feature_column = batch_cosine_similarity(pairs, vectors, pairs_transform=pairs_transform, desc=f"{self.name} {feature_desc}")
feature_columns.append(feature_column)
feature_matrix = np.hstack(feature_columns)
return feature_matrix
@overrides
def _get_plain_names_of_all_features(self) -> List[str]:
return ["document-similarity", "surrounding-sentence-similarity", "context-similarity"]
@classmethod
@overrides
def from_params(cls, config: Dict):
lowercase = config.pop("lowercase", True)
use_lemmas = config.pop("use_lemmas", False)
num_sentence_context = config.pop("num_sentence_context", 2)
use_cache = config.pop("use_cache", False)
features_to_select = config.pop("features_to_select", None)
obj = TfidfFeatureExtractor(lowercase=lowercase,
use_lemmas=use_lemmas,
num_sentence_context=num_sentence_context,
use_cache=use_cache,
features_to_select=features_to_select)
if config:
raise ValueError("Leftover configuration: " + pprint.pformat(config))
return obj | true | true |
1c2f2958c02a6c73b748d3b9c3776bf0c674cd1e | 1,333 | py | Python | setup.py | evildmp/facts | bcfb0ce33cf70f208d3db3abe1bc2969ef468f01 | [
"MIT"
] | 1 | 2015-07-20T10:40:25.000Z | 2015-07-20T10:40:25.000Z | setup.py | evildmp/facts | bcfb0ce33cf70f208d3db3abe1bc2969ef468f01 | [
"MIT"
] | null | null | null | setup.py | evildmp/facts | bcfb0ce33cf70f208d3db3abe1bc2969ef468f01 | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'facts',
version = '0.0.1',
description='A simple fact-listing project',
long_description=README,
author='Daniele Procida',
author_email='daniele@vurt.org',
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
''
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
include_package_data=True,
install_requires=['django'],
) | 32.512195 | 78 | 0.629407 | import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'facts',
version = '0.0.1',
description='A simple fact-listing project',
long_description=README,
author='Daniele Procida',
author_email='daniele@vurt.org',
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
''
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
include_package_data=True,
install_requires=['django'],
) | true | true |
1c2f296504fafdd4718d57268a30f1951299cc1b | 133 | py | Python | dynamo/__init__.py | gutard/django-dynamo | eef3c2ae1928eef8f50c4d9685316a6eb372ea3c | [
"BSD-3-Clause"
] | 4 | 2016-01-05T21:18:17.000Z | 2018-07-02T23:19:26.000Z | dynamo/__init__.py | gutard/django-dynamo | eef3c2ae1928eef8f50c4d9685316a6eb372ea3c | [
"BSD-3-Clause"
] | null | null | null | dynamo/__init__.py | gutard/django-dynamo | eef3c2ae1928eef8f50c4d9685316a6eb372ea3c | [
"BSD-3-Clause"
] | 3 | 2015-06-27T15:36:01.000Z | 2021-12-13T18:20:33.000Z |
# import dynamo signals to make sure that they are connected for use by
# other apps
from dynamo.signals import *
version = '0.25'
| 19 | 71 | 0.744361 |
from dynamo.signals import *
version = '0.25'
| true | true |
1c2f2988c850ffb00fb533c65656e4736c3f8c50 | 5,055 | py | Python | tests/test_coordinates/test_velocity_transform.py | bibek22/einsteinpy | 78bf5d942cbb12393852f8e4d7a8426f1ffe6f23 | [
"MIT"
] | 2 | 2019-04-07T04:01:57.000Z | 2019-07-11T11:59:55.000Z | tests/test_coordinates/test_velocity_transform.py | bibek22/einsteinpy | 78bf5d942cbb12393852f8e4d7a8426f1ffe6f23 | [
"MIT"
] | null | null | null | tests/test_coordinates/test_velocity_transform.py | bibek22/einsteinpy | 78bf5d942cbb12393852f8e4d7a8426f1ffe6f23 | [
"MIT"
] | null | null | null | import sys
from io import StringIO
from unittest import mock
import astropy.units as u
import numpy as np
import pytest
from numpy.testing import assert_allclose
from einsteinpy import coordinates
@pytest.fixture()
def cartesian_differential():
return coordinates.CartesianDifferential(
10 / np.sqrt(2) * u.km,
10 / np.sqrt(2) * u.km,
0 * u.km,
-190 / np.sqrt(2) * u.km / u.s,
210 / np.sqrt(2) * u.km / u.s,
200.0 * u.km / u.s,
)
@pytest.fixture()
def spherical_differential():
return coordinates.SphericalDifferential(
10.0 * u.km,
1.5707963267948966 * u.rad,
0.7853981633974483 * u.rad,
10.0 * u.km / u.s,
-20.0 * u.rad / u.s,
20.0 * u.rad / u.s,
)
@pytest.fixture()
def bl_differential():
return coordinates.BoyerLindquistDifferential(
10.0 * u.km,
1.5707963267948966 * u.rad,
0.7853981633974483 * u.rad,
10.0 * u.km / u.s,
-20.0 * u.rad / u.s,
20.0 * u.rad / u.s,
0.0 * u.km,
)
@pytest.fixture()
def bl_differential2():
return coordinates.BoyerLindquistDifferential(
4 * u.km,
2 * u.rad,
1 * u.rad,
-100 * u.m / u.s,
-1 * u.rad / u.s,
10 * u.deg / u.s,
100 * u.m,
)
def test_CartesianToSphericalDifferential(
cartesian_differential, spherical_differential
):
to_spherical_differential = cartesian_differential.spherical_differential()
assert_allclose(
to_spherical_differential.si_values(),
spherical_differential.si_values(),
rtol=0.0,
atol=1e-6,
)
def test_CartesianToBoyerLindquistDifferential(cartesian_differential, bl_differential):
to_bl_differential = cartesian_differential.bl_differential(bl_differential.a)
assert_allclose(
to_bl_differential.si_values(), bl_differential.si_values(), rtol=0.0, atol=1e-6
)
def test_SphericalToCartesianDifferential(
spherical_differential, cartesian_differential
):
to_cartesian_differential = spherical_differential.cartesian_differential()
assert_allclose(
to_cartesian_differential.si_values(),
cartesian_differential.si_values(),
rtol=0.0,
atol=1e-6,
)
def test_SphericalToBoyerLindquistDifferential(spherical_differential, bl_differential):
to_bl_differential = spherical_differential.bl_differential(bl_differential.a)
assert_allclose(
to_bl_differential.si_values(), bl_differential.si_values(), rtol=0.0, atol=1e-6
)
def test_BoyerLindquistToCartesianDifferential(bl_differential, cartesian_differential):
to_cartesian_differential = bl_differential.cartesian_differential()
assert_allclose(
to_cartesian_differential.si_values(),
cartesian_differential.si_values(),
rtol=0.0,
atol=1e-6,
)
def test_BoyerLindquistToSphericalDifferential(bl_differential, spherical_differential):
to_spherical_differential = bl_differential.spherical_differential()
assert_allclose(
to_spherical_differential.si_values(),
spherical_differential.si_values(),
rtol=0.0,
atol=1e-6,
)
def test_cycle_BLSphericalDifferential(bl_differential2):
bl_diff = bl_differential2
sph_diff = bl_diff.spherical_differential()
bl_diff2 = sph_diff.bl_differential(bl_diff.a)
assert_allclose(bl_diff2.si_values(), bl_diff.si_values(), rtol=0.0, atol=1e-6)
def test_cycle_BLCartesianDifferential(bl_differential2):
bl_diff = bl_differential2
cart_diff = bl_diff.cartesian_differential()
bl_diff2 = cart_diff.bl_differential(bl_diff.a)
assert_allclose(bl_diff2.si_values(), bl_diff.si_values(), rtol=0.0, atol=1e-6)
# Tests for object.__repr__ and object.__str__
@mock.patch("sys.stdout", new_callable=StringIO)
def test_print_core_objects(
mock_stdout, cartesian_differential, spherical_differential, bl_differential
):
print(str(cartesian_differential))
assert "object at 0x" not in mock_stdout.getvalue()
print(str(spherical_differential))
assert "object at 0x" not in mock_stdout.getvalue()
print(str(bl_differential))
assert "object at 0x" not in mock_stdout.getvalue()
# Tests for object.velocities()
def test_velocities(cartesian_differential, spherical_differential, bl_differential):
def with_numpy_array(differential_obj):
assert_allclose(
differential_obj.si_values()[3:],
differential_obj.velocities(return_np=True),
1e-10,
1e-15,
)
with_numpy_array(cartesian_differential)
with_numpy_array(spherical_differential)
with_numpy_array(bl_differential)
def test_velocities2(cartesian_differential, spherical_differential, bl_differential):
cd, sd, bd = cartesian_differential, spherical_differential, bl_differential
assert cd.velocities() == [cd.v_x, cd.v_y, cd.v_z]
assert sd.velocities() == [sd.v_r, sd.v_t, sd.v_p]
assert bd.velocities() == [bd.v_r, bd.v_t, bd.v_p]
| 29.051724 | 88 | 0.699308 | import sys
from io import StringIO
from unittest import mock
import astropy.units as u
import numpy as np
import pytest
from numpy.testing import assert_allclose
from einsteinpy import coordinates
@pytest.fixture()
def cartesian_differential():
return coordinates.CartesianDifferential(
10 / np.sqrt(2) * u.km,
10 / np.sqrt(2) * u.km,
0 * u.km,
-190 / np.sqrt(2) * u.km / u.s,
210 / np.sqrt(2) * u.km / u.s,
200.0 * u.km / u.s,
)
@pytest.fixture()
def spherical_differential():
return coordinates.SphericalDifferential(
10.0 * u.km,
1.5707963267948966 * u.rad,
0.7853981633974483 * u.rad,
10.0 * u.km / u.s,
-20.0 * u.rad / u.s,
20.0 * u.rad / u.s,
)
@pytest.fixture()
def bl_differential():
return coordinates.BoyerLindquistDifferential(
10.0 * u.km,
1.5707963267948966 * u.rad,
0.7853981633974483 * u.rad,
10.0 * u.km / u.s,
-20.0 * u.rad / u.s,
20.0 * u.rad / u.s,
0.0 * u.km,
)
@pytest.fixture()
def bl_differential2():
return coordinates.BoyerLindquistDifferential(
4 * u.km,
2 * u.rad,
1 * u.rad,
-100 * u.m / u.s,
-1 * u.rad / u.s,
10 * u.deg / u.s,
100 * u.m,
)
def test_CartesianToSphericalDifferential(
cartesian_differential, spherical_differential
):
to_spherical_differential = cartesian_differential.spherical_differential()
assert_allclose(
to_spherical_differential.si_values(),
spherical_differential.si_values(),
rtol=0.0,
atol=1e-6,
)
def test_CartesianToBoyerLindquistDifferential(cartesian_differential, bl_differential):
to_bl_differential = cartesian_differential.bl_differential(bl_differential.a)
assert_allclose(
to_bl_differential.si_values(), bl_differential.si_values(), rtol=0.0, atol=1e-6
)
def test_SphericalToCartesianDifferential(
spherical_differential, cartesian_differential
):
to_cartesian_differential = spherical_differential.cartesian_differential()
assert_allclose(
to_cartesian_differential.si_values(),
cartesian_differential.si_values(),
rtol=0.0,
atol=1e-6,
)
def test_SphericalToBoyerLindquistDifferential(spherical_differential, bl_differential):
to_bl_differential = spherical_differential.bl_differential(bl_differential.a)
assert_allclose(
to_bl_differential.si_values(), bl_differential.si_values(), rtol=0.0, atol=1e-6
)
def test_BoyerLindquistToCartesianDifferential(bl_differential, cartesian_differential):
to_cartesian_differential = bl_differential.cartesian_differential()
assert_allclose(
to_cartesian_differential.si_values(),
cartesian_differential.si_values(),
rtol=0.0,
atol=1e-6,
)
def test_BoyerLindquistToSphericalDifferential(bl_differential, spherical_differential):
to_spherical_differential = bl_differential.spherical_differential()
assert_allclose(
to_spherical_differential.si_values(),
spherical_differential.si_values(),
rtol=0.0,
atol=1e-6,
)
def test_cycle_BLSphericalDifferential(bl_differential2):
bl_diff = bl_differential2
sph_diff = bl_diff.spherical_differential()
bl_diff2 = sph_diff.bl_differential(bl_diff.a)
assert_allclose(bl_diff2.si_values(), bl_diff.si_values(), rtol=0.0, atol=1e-6)
def test_cycle_BLCartesianDifferential(bl_differential2):
bl_diff = bl_differential2
cart_diff = bl_diff.cartesian_differential()
bl_diff2 = cart_diff.bl_differential(bl_diff.a)
assert_allclose(bl_diff2.si_values(), bl_diff.si_values(), rtol=0.0, atol=1e-6)
@mock.patch("sys.stdout", new_callable=StringIO)
def test_print_core_objects(
mock_stdout, cartesian_differential, spherical_differential, bl_differential
):
print(str(cartesian_differential))
assert "object at 0x" not in mock_stdout.getvalue()
print(str(spherical_differential))
assert "object at 0x" not in mock_stdout.getvalue()
print(str(bl_differential))
assert "object at 0x" not in mock_stdout.getvalue()
def test_velocities(cartesian_differential, spherical_differential, bl_differential):
def with_numpy_array(differential_obj):
assert_allclose(
differential_obj.si_values()[3:],
differential_obj.velocities(return_np=True),
1e-10,
1e-15,
)
with_numpy_array(cartesian_differential)
with_numpy_array(spherical_differential)
with_numpy_array(bl_differential)
def test_velocities2(cartesian_differential, spherical_differential, bl_differential):
cd, sd, bd = cartesian_differential, spherical_differential, bl_differential
assert cd.velocities() == [cd.v_x, cd.v_y, cd.v_z]
assert sd.velocities() == [sd.v_r, sd.v_t, sd.v_p]
assert bd.velocities() == [bd.v_r, bd.v_t, bd.v_p]
| true | true |
1c2f2afdfe1c155e69711d6df88d944adfbce20b | 110 | py | Python | src/python/services/api/schemas/common/base.py | benlague/CS411 | afff0affb0a353c4fd14b6229dd83ef5ab7520aa | [
"MIT"
] | 4 | 2021-12-10T20:30:22.000Z | 2022-01-06T17:08:20.000Z | fastlane_web/web_api/schemas/common/base.py | dgkatz/fastlane | 1cfe630651c955434f8019a17be0c4ff1fe42c31 | [
"MIT"
] | null | null | null | fastlane_web/web_api/schemas/common/base.py | dgkatz/fastlane | 1cfe630651c955434f8019a17be0c4ff1fe42c31 | [
"MIT"
] | null | null | null | from marshmallow import Schema, EXCLUDE
class BaseSchema(Schema):
class Meta:
unknown = EXCLUDE
| 15.714286 | 39 | 0.709091 | from marshmallow import Schema, EXCLUDE
class BaseSchema(Schema):
class Meta:
unknown = EXCLUDE
| true | true |
1c2f2b4d69afab3f7b372e60201f16a6f9e5216d | 30,580 | py | Python | saleor/graphql/checkout/mutations.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
] | 8 | 2018-07-17T13:13:21.000Z | 2022-03-01T17:02:34.000Z | saleor/graphql/checkout/mutations.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
] | 1 | 2021-03-10T07:55:59.000Z | 2021-03-10T07:55:59.000Z | saleor/graphql/checkout/mutations.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
] | null | null | null | from typing import List, Optional, Tuple
import graphene
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import transaction
from django.utils import timezone
from ...checkout import models
from ...checkout.error_codes import CheckoutErrorCode
from ...checkout.utils import (
abort_order_data,
add_promo_code_to_checkout,
add_variant_to_checkout,
add_voucher_to_checkout,
change_billing_address_in_checkout,
change_shipping_address_in_checkout,
clean_checkout,
create_order,
get_user_checkout,
get_valid_shipping_methods_for_checkout,
get_voucher_for_checkout,
prepare_order_data,
recalculate_checkout_discount,
remove_promo_code_from_checkout,
remove_voucher_from_checkout,
)
from ...core import analytics
from ...core.exceptions import InsufficientStock
from ...core.taxes import TaxError
from ...discount import models as voucher_model
from ...payment import PaymentError, gateway
from ...payment.interface import AddressData
from ...payment.utils import store_customer_id
from ...product import models as product_models
from ..account.i18n import I18nMixin
from ..account.types import AddressInput, User
from ..core.mutations import (
BaseMutation,
ClearMetaBaseMutation,
ModelMutation,
UpdateMetaBaseMutation,
)
from ..core.types.common import CheckoutError
from ..core.utils import from_global_id_strict_type
from ..order.types import Order
from ..product.types import ProductVariant
from ..shipping.types import ShippingMethod
from .types import Checkout, CheckoutLine
ERROR_DOES_NOT_SHIP = "This checkout doesn't need shipping"
def clean_shipping_method(
checkout: models.Checkout, method: Optional[models.ShippingMethod], discounts
) -> bool:
"""Check if current shipping method is valid."""
if not method:
# no shipping method was provided, it is valid
return True
if not checkout.is_shipping_required():
raise ValidationError(
ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED
)
if not checkout.shipping_address:
raise ValidationError(
"Cannot choose a shipping method for a checkout without the "
"shipping address.",
code=CheckoutErrorCode.SHIPPING_ADDRESS_NOT_SET,
)
valid_methods = get_valid_shipping_methods_for_checkout(checkout, discounts)
return method in valid_methods
def update_checkout_shipping_method_if_invalid(checkout: models.Checkout, discounts):
is_valid = clean_shipping_method(
checkout=checkout, method=checkout.shipping_method, discounts=discounts
)
if not is_valid:
cheapest_alternative = get_valid_shipping_methods_for_checkout(
checkout, discounts
).first()
checkout.shipping_method = cheapest_alternative
checkout.save(update_fields=["shipping_method"])
def check_lines_quantity(variants, quantities):
"""Check if stock is sufficient for each line in the list of dicts."""
for variant, quantity in zip(variants, quantities):
if quantity < 0:
raise ValidationError(
{
"quantity": ValidationError(
"The quantity should be higher than zero.",
code=CheckoutErrorCode.ZERO_QUANTITY,
)
}
)
if quantity > settings.MAX_CHECKOUT_LINE_QUANTITY:
raise ValidationError(
{
"quantity": ValidationError(
"Cannot add more than %d times this item."
"" % settings.MAX_CHECKOUT_LINE_QUANTITY,
code=CheckoutErrorCode.QUANTITY_GREATER_THAN_LIMIT,
)
}
)
try:
variant.check_quantity(quantity)
except InsufficientStock as e:
message = (
"Could not add item "
+ "%(item_name)s. Only %(remaining)d remaining in stock."
% {
"remaining": e.item.quantity_available,
"item_name": e.item.display_product(),
}
)
raise ValidationError({"quantity": ValidationError(message, code=e.code)})
class CheckoutLineInput(graphene.InputObjectType):
quantity = graphene.Int(required=True, description="The number of items purchased.")
variant_id = graphene.ID(required=True, description="ID of the ProductVariant.")
class CheckoutCreateInput(graphene.InputObjectType):
lines = graphene.List(
CheckoutLineInput,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
required=True,
)
email = graphene.String(description="The customer's email address.")
shipping_address = AddressInput(
description=(
"The mailing address to where the checkout will be shipped. "
"Note: the address will be ignored if the checkout "
"doesn't contain shippable items."
)
)
billing_address = AddressInput(description="Billing address of the customer.")
class CheckoutCreate(ModelMutation, I18nMixin):
created = graphene.Field(
graphene.Boolean,
description=(
"Whether the checkout was created or the current active one was returned. "
"Refer to checkoutLinesAdd and checkoutLinesUpdate to merge a cart "
"with an active checkout."
),
)
class Arguments:
input = CheckoutCreateInput(
required=True, description="Fields required to create checkout."
)
class Meta:
description = "Create a new checkout."
model = models.Checkout
return_field_name = "checkout"
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def process_checkout_lines(
cls, lines
) -> Tuple[List[product_models.ProductVariant], List[int]]:
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(
variant_ids,
"variant_id",
ProductVariant,
qs=product_models.ProductVariant.objects.prefetch_related(
"product__product_type"
),
)
quantities = [line.get("quantity") for line in lines]
check_lines_quantity(variants, quantities)
return variants, quantities
@classmethod
def retrieve_shipping_address(cls, user, data: dict) -> Optional[models.Address]:
if "shipping_address" in data:
return cls.validate_address(data["shipping_address"])
if user.is_authenticated:
return user.default_shipping_address
return None
@classmethod
def retrieve_billing_address(cls, user, data: dict) -> Optional[models.Address]:
if "billing_address" in data:
return cls.validate_address(data["billing_address"])
if user.is_authenticated:
return user.default_billing_address
return None
@classmethod
def clean_input(cls, info, instance: models.Checkout, data):
cleaned_input = super().clean_input(info, instance, data)
user = info.context.user
# Resolve and process the lines, retrieving the variants and quantities
lines = data.pop("lines", None)
if lines:
cleaned_input["variants"], cleaned_input[
"quantities"
] = cls.process_checkout_lines(lines)
cleaned_input["shipping_address"] = cls.retrieve_shipping_address(user, data)
cleaned_input["billing_address"] = cls.retrieve_billing_address(user, data)
# Use authenticated user's email as default email
if user.is_authenticated:
email = data.pop("email", None)
cleaned_input["email"] = email or user.email
return cleaned_input
@classmethod
def save_addresses(cls, instance: models.Checkout, cleaned_input: dict):
shipping_address = cleaned_input.get("shipping_address")
billing_address = cleaned_input.get("billing_address")
updated_fields = []
if shipping_address and instance.is_shipping_required():
shipping_address.save()
instance.shipping_address = shipping_address.get_copy()
updated_fields.append("shipping_address")
if billing_address:
billing_address.save()
instance.billing_address = billing_address.get_copy()
updated_fields.append("billing_address")
# Note django will simply return if the list is empty
instance.save(update_fields=updated_fields)
@classmethod
@transaction.atomic()
def save(cls, info, instance: models.Checkout, cleaned_input):
# Create the checkout object
instance.save()
# Retrieve the lines to create
variants = cleaned_input.get("variants")
quantities = cleaned_input.get("quantities")
# Create the checkout lines
if variants and quantities:
for variant, quantity in zip(variants, quantities):
try:
add_variant_to_checkout(instance, variant, quantity)
except InsufficientStock as exc:
raise ValidationError(
f"Insufficient product stock: {exc.item}", code=exc.code
)
# Save provided addresses and associate them to the checkout
cls.save_addresses(instance, cleaned_input)
@classmethod
def perform_mutation(cls, _root, info, **data):
user = info.context.user
# `perform_mutation` is overridden to properly get or create a checkout
# instance here and abort mutation if needed.
if user.is_authenticated:
checkout, _ = get_user_checkout(user)
if checkout is not None:
# If user has an active checkout, return it without any
# modifications.
return CheckoutCreate(checkout=checkout, created=False)
checkout = models.Checkout(user=user)
else:
checkout = models.Checkout()
cleaned_input = cls.clean_input(info, checkout, data.get("input"))
checkout = cls.construct_instance(checkout, cleaned_input)
cls.clean_instance(checkout)
cls.save(info, checkout, cleaned_input)
cls._save_m2m(info, checkout, cleaned_input)
return CheckoutCreate(checkout=checkout, created=True)
class CheckoutLinesAdd(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated Checkout.")
class Arguments:
checkout_id = graphene.ID(description="The ID of the Checkout.", required=True)
lines = graphene.List(
CheckoutLineInput,
required=True,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
)
class Meta:
description = "Adds a checkout line to the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, lines, replace=False):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(variant_ids, "variant_id", ProductVariant)
quantities = [line.get("quantity") for line in lines]
check_lines_quantity(variants, quantities)
update_checkout_shipping_method_if_invalid(checkout, info.context.discounts)
if variants and quantities:
for variant, quantity in zip(variants, quantities):
try:
add_variant_to_checkout(
checkout, variant, quantity, replace=replace
)
except InsufficientStock as exc:
raise ValidationError(
f"Insufficient product stock: {exc.item}", code=exc.code
)
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutLinesAdd(checkout=checkout)
class CheckoutLinesUpdate(CheckoutLinesAdd):
checkout = graphene.Field(Checkout, description="An updated Checkout.")
class Meta:
description = "Updates CheckoutLine in the existing Checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, root, info, checkout_id, lines):
return super().perform_mutation(root, info, checkout_id, lines, replace=True)
class CheckoutLineDelete(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(description="The ID of the Checkout.", required=True)
line_id = graphene.ID(description="ID of the CheckoutLine to delete.")
class Meta:
description = "Deletes a CheckoutLine."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, line_id):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
line = cls.get_node_or_error(
info, line_id, only_type=CheckoutLine, field="line_id"
)
if line and line in checkout.lines.all():
line.delete()
update_checkout_shipping_method_if_invalid(checkout, info.context.discounts)
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutLineDelete(checkout=checkout)
class CheckoutCustomerAttach(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(required=True, description="ID of the Checkout.")
customer_id = graphene.ID(required=True, description="The ID of the customer.")
class Meta:
description = "Sets the customer as the owner of the Checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, customer_id):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
customer = cls.get_node_or_error(
info, customer_id, only_type=User, field="customer_id"
)
checkout.user = customer
checkout.save(update_fields=["user"])
return CheckoutCustomerAttach(checkout=checkout)
class CheckoutCustomerDetach(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
class Meta:
description = "Removes the user assigned as the owner of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
checkout.user = None
checkout.save(update_fields=["user"])
return CheckoutCustomerDetach(checkout=checkout)
class CheckoutShippingAddressUpdate(BaseMutation, I18nMixin):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(required=True, description="ID of the Checkout.")
shipping_address = AddressInput(
required=True,
description="The mailing address to where the checkout will be shipped.",
)
class Meta:
description = "Update shipping address in the existing Checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, shipping_address):
pk = from_global_id_strict_type(checkout_id, Checkout, field="checkout_id")
try:
checkout = models.Checkout.objects.prefetch_related(
"lines__variant__product__product_type"
).get(pk=pk)
except ObjectDoesNotExist:
raise ValidationError(
{
"checkout_id": ValidationError(
f"Couldn't resolve to a node: {checkout_id}",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
if not checkout.is_shipping_required():
raise ValidationError(
{
"shipping_address": ValidationError(
ERROR_DOES_NOT_SHIP,
code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED,
)
}
)
shipping_address = cls.validate_address(
shipping_address, instance=checkout.shipping_address
)
update_checkout_shipping_method_if_invalid(checkout, info.context.discounts)
with transaction.atomic():
shipping_address.save()
change_shipping_address_in_checkout(checkout, shipping_address)
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutShippingAddressUpdate(checkout=checkout)
class CheckoutBillingAddressUpdate(CheckoutShippingAddressUpdate):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(required=True, description="ID of the Checkout.")
billing_address = AddressInput(
required=True, description="The billing address of the checkout."
)
class Meta:
description = "Update billing address in the existing Checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, billing_address):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
billing_address = cls.validate_address(
billing_address, instance=checkout.billing_address
)
with transaction.atomic():
billing_address.save()
change_billing_address_in_checkout(checkout, billing_address)
return CheckoutBillingAddressUpdate(checkout=checkout)
class CheckoutEmailUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID")
email = graphene.String(required=True, description="email")
class Meta:
description = "Updates email address in the existing Checkout object."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, email):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
checkout.email = email
cls.clean_instance(checkout)
checkout.save(update_fields=["email"])
return CheckoutEmailUpdate(checkout=checkout)
class CheckoutShippingMethodUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID")
shipping_method_id = graphene.ID(required=True, description="Shipping method")
class Meta:
description = "Updates the shipping address of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, shipping_method_id):
pk = from_global_id_strict_type(
checkout_id, only_type=Checkout, field="checkout_id"
)
try:
checkout = models.Checkout.objects.prefetch_related(
"lines__variant__product__collections",
"lines__variant__product__product_type",
).get(pk=pk)
except ObjectDoesNotExist:
raise ValidationError(
{
"checkout_id": ValidationError(
f"Couldn't resolve to a node: {checkout_id}",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
if not checkout.is_shipping_required():
raise ValidationError(
{
"shipping_method": ValidationError(
ERROR_DOES_NOT_SHIP,
code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED,
)
}
)
shipping_method = cls.get_node_or_error(
info,
shipping_method_id,
only_type=ShippingMethod,
field="shipping_method_id",
)
shipping_method_is_valid = clean_shipping_method(
checkout=checkout, method=shipping_method, discounts=info.context.discounts
)
if not shipping_method_is_valid:
raise ValidationError(
{
"shipping_method": ValidationError(
"This shipping method is not applicable.",
code=CheckoutErrorCode.SHIPPING_METHOD_NOT_APPLICABLE,
)
}
)
checkout.shipping_method = shipping_method
checkout.save(update_fields=["shipping_method"])
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutShippingMethodUpdate(checkout=checkout)
class CheckoutComplete(BaseMutation):
order = graphene.Field(Order, description="Placed order")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
store_source = graphene.Boolean(
default_value=False,
description=(
"Determines whether to store the payment source for future usage."
),
)
class Meta:
description = (
"Completes the checkout. As a result a new order is created and "
"a payment charge is made. This action requires a successful "
"payment before it can be performed."
)
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, store_source):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
user = info.context.user
clean_checkout(checkout, info.context.discounts)
payment = checkout.get_last_active_payment()
with transaction.atomic():
try:
order_data = prepare_order_data(
checkout=checkout,
tracking_code=analytics.get_client_id(info.context),
discounts=info.context.discounts,
)
except InsufficientStock as e:
raise ValidationError(
f"Insufficient product stock: {e.item}", code=e.code
)
except voucher_model.NotApplicable:
raise ValidationError(
"Voucher not applicable",
code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE,
)
except TaxError as tax_error:
return ValidationError(
"Unable to calculate taxes - %s" % str(tax_error),
code=CheckoutErrorCode.TAX_ERROR,
)
billing_address = order_data["billing_address"]
shipping_address = order_data.get("shipping_address", None)
billing_address = AddressData(**billing_address.as_data())
if shipping_address is not None:
shipping_address = AddressData(**shipping_address.as_data())
try:
txn = gateway.process_payment(
payment=payment, token=payment.token, store_source=store_source
)
if not txn.is_success:
raise PaymentError(txn.error)
except PaymentError as e:
abort_order_data(order_data)
raise ValidationError(str(e), code=CheckoutErrorCode.PAYMENT_ERROR)
if txn.customer_id and user.is_authenticated:
store_customer_id(user, payment.gateway, txn.customer_id)
# create the order into the database
order = create_order(checkout=checkout, order_data=order_data, user=user)
# remove checkout after order is successfully paid
checkout.delete()
# return the success response with the newly created order data
return CheckoutComplete(order=order)
class CheckoutUpdateVoucher(BaseMutation):
checkout = graphene.Field(Checkout, description="An checkout with updated voucher")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
voucher_code = graphene.String(description="Voucher code")
class Meta:
description = (
"DEPRECATED: Will be removed in Saleor 2.10, use CheckoutAddPromoCode "
"or CheckoutRemovePromoCode instead. Adds voucher to the checkout. Query "
"it without voucher_code field to remove voucher from checkout."
)
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, voucher_code=None):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
if voucher_code:
try:
voucher = voucher_model.Voucher.objects.active(date=timezone.now()).get(
code=voucher_code
)
except voucher_model.Voucher.DoesNotExist:
raise ValidationError(
{
"voucher_code": ValidationError(
"Voucher with given code does not exist.",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
try:
add_voucher_to_checkout(checkout, voucher)
except voucher_model.NotApplicable:
raise ValidationError(
{
"voucher_code": ValidationError(
"Voucher is not applicable to that checkout.",
code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE,
)
}
)
else:
existing_voucher = get_voucher_for_checkout(checkout)
if existing_voucher:
remove_voucher_from_checkout(checkout)
return CheckoutUpdateVoucher(checkout=checkout)
class CheckoutAddPromoCode(BaseMutation):
checkout = graphene.Field(
Checkout, description="The checkout with the added gift card or voucher"
)
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
promo_code = graphene.String(
description="Gift card code or voucher code", required=True
)
class Meta:
description = "Adds a gift card or a voucher to a checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, promo_code):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
add_promo_code_to_checkout(checkout, promo_code, info.context.discounts)
return CheckoutAddPromoCode(checkout=checkout)
class CheckoutRemovePromoCode(BaseMutation):
checkout = graphene.Field(
Checkout, description="The checkout with the removed gift card or voucher"
)
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
promo_code = graphene.String(
description="Gift card code or voucher code", required=True
)
class Meta:
description = "Remove a gift card or a voucher from a checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, promo_code):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
remove_promo_code_from_checkout(checkout, promo_code)
return CheckoutUpdateVoucher(checkout=checkout)
class CheckoutUpdateMeta(UpdateMetaBaseMutation):
class Meta:
description = "Updates metadata for Checkout."
permissions = ("order.manage_orders",)
model = models.Checkout
public = True
error_type_class = CheckoutError
error_type_field = "checkout_errors"
class CheckoutUpdatePrivateMeta(UpdateMetaBaseMutation):
class Meta:
description = "Updates private metadata for Checkout."
permissions = ("order.manage_orders",)
model = models.Checkout
public = False
error_type_class = CheckoutError
error_type_field = "checkout_errors"
class CheckoutClearStoredMeta(ClearMetaBaseMutation):
class Meta:
description = "Clear stored metadata value."
permissions = ("order.manage_orders",)
model = models.Checkout
public = True
error_type_class = CheckoutError
error_type_field = "checkout_errors"
class CheckoutClearStoredPrivateMeta(ClearMetaBaseMutation):
class Meta:
description = "Clear stored metadata value."
permissions = ("order.manage_orders",)
model = models.Checkout
public = False
error_type_class = CheckoutError
error_type_field = "checkout_errors"
| 36.018846 | 88 | 0.643165 | from typing import List, Optional, Tuple
import graphene
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import transaction
from django.utils import timezone
from ...checkout import models
from ...checkout.error_codes import CheckoutErrorCode
from ...checkout.utils import (
abort_order_data,
add_promo_code_to_checkout,
add_variant_to_checkout,
add_voucher_to_checkout,
change_billing_address_in_checkout,
change_shipping_address_in_checkout,
clean_checkout,
create_order,
get_user_checkout,
get_valid_shipping_methods_for_checkout,
get_voucher_for_checkout,
prepare_order_data,
recalculate_checkout_discount,
remove_promo_code_from_checkout,
remove_voucher_from_checkout,
)
from ...core import analytics
from ...core.exceptions import InsufficientStock
from ...core.taxes import TaxError
from ...discount import models as voucher_model
from ...payment import PaymentError, gateway
from ...payment.interface import AddressData
from ...payment.utils import store_customer_id
from ...product import models as product_models
from ..account.i18n import I18nMixin
from ..account.types import AddressInput, User
from ..core.mutations import (
BaseMutation,
ClearMetaBaseMutation,
ModelMutation,
UpdateMetaBaseMutation,
)
from ..core.types.common import CheckoutError
from ..core.utils import from_global_id_strict_type
from ..order.types import Order
from ..product.types import ProductVariant
from ..shipping.types import ShippingMethod
from .types import Checkout, CheckoutLine
ERROR_DOES_NOT_SHIP = "This checkout doesn't need shipping"
def clean_shipping_method(
checkout: models.Checkout, method: Optional[models.ShippingMethod], discounts
) -> bool:
if not method:
# no shipping method was provided, it is valid
return True
if not checkout.is_shipping_required():
raise ValidationError(
ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED
)
if not checkout.shipping_address:
raise ValidationError(
"Cannot choose a shipping method for a checkout without the "
"shipping address.",
code=CheckoutErrorCode.SHIPPING_ADDRESS_NOT_SET,
)
valid_methods = get_valid_shipping_methods_for_checkout(checkout, discounts)
return method in valid_methods
def update_checkout_shipping_method_if_invalid(checkout: models.Checkout, discounts):
is_valid = clean_shipping_method(
checkout=checkout, method=checkout.shipping_method, discounts=discounts
)
if not is_valid:
cheapest_alternative = get_valid_shipping_methods_for_checkout(
checkout, discounts
).first()
checkout.shipping_method = cheapest_alternative
checkout.save(update_fields=["shipping_method"])
def check_lines_quantity(variants, quantities):
for variant, quantity in zip(variants, quantities):
if quantity < 0:
raise ValidationError(
{
"quantity": ValidationError(
"The quantity should be higher than zero.",
code=CheckoutErrorCode.ZERO_QUANTITY,
)
}
)
if quantity > settings.MAX_CHECKOUT_LINE_QUANTITY:
raise ValidationError(
{
"quantity": ValidationError(
"Cannot add more than %d times this item."
"" % settings.MAX_CHECKOUT_LINE_QUANTITY,
code=CheckoutErrorCode.QUANTITY_GREATER_THAN_LIMIT,
)
}
)
try:
variant.check_quantity(quantity)
except InsufficientStock as e:
message = (
"Could not add item "
+ "%(item_name)s. Only %(remaining)d remaining in stock."
% {
"remaining": e.item.quantity_available,
"item_name": e.item.display_product(),
}
)
raise ValidationError({"quantity": ValidationError(message, code=e.code)})
class CheckoutLineInput(graphene.InputObjectType):
quantity = graphene.Int(required=True, description="The number of items purchased.")
variant_id = graphene.ID(required=True, description="ID of the ProductVariant.")
class CheckoutCreateInput(graphene.InputObjectType):
lines = graphene.List(
CheckoutLineInput,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
required=True,
)
email = graphene.String(description="The customer's email address.")
shipping_address = AddressInput(
description=(
"The mailing address to where the checkout will be shipped. "
"Note: the address will be ignored if the checkout "
"doesn't contain shippable items."
)
)
billing_address = AddressInput(description="Billing address of the customer.")
class CheckoutCreate(ModelMutation, I18nMixin):
created = graphene.Field(
graphene.Boolean,
description=(
"Whether the checkout was created or the current active one was returned. "
"Refer to checkoutLinesAdd and checkoutLinesUpdate to merge a cart "
"with an active checkout."
),
)
class Arguments:
input = CheckoutCreateInput(
required=True, description="Fields required to create checkout."
)
class Meta:
description = "Create a new checkout."
model = models.Checkout
return_field_name = "checkout"
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def process_checkout_lines(
cls, lines
) -> Tuple[List[product_models.ProductVariant], List[int]]:
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(
variant_ids,
"variant_id",
ProductVariant,
qs=product_models.ProductVariant.objects.prefetch_related(
"product__product_type"
),
)
quantities = [line.get("quantity") for line in lines]
check_lines_quantity(variants, quantities)
return variants, quantities
@classmethod
def retrieve_shipping_address(cls, user, data: dict) -> Optional[models.Address]:
if "shipping_address" in data:
return cls.validate_address(data["shipping_address"])
if user.is_authenticated:
return user.default_shipping_address
return None
@classmethod
def retrieve_billing_address(cls, user, data: dict) -> Optional[models.Address]:
if "billing_address" in data:
return cls.validate_address(data["billing_address"])
if user.is_authenticated:
return user.default_billing_address
return None
@classmethod
def clean_input(cls, info, instance: models.Checkout, data):
cleaned_input = super().clean_input(info, instance, data)
user = info.context.user
# Resolve and process the lines, retrieving the variants and quantities
lines = data.pop("lines", None)
if lines:
cleaned_input["variants"], cleaned_input[
"quantities"
] = cls.process_checkout_lines(lines)
cleaned_input["shipping_address"] = cls.retrieve_shipping_address(user, data)
cleaned_input["billing_address"] = cls.retrieve_billing_address(user, data)
# Use authenticated user's email as default email
if user.is_authenticated:
email = data.pop("email", None)
cleaned_input["email"] = email or user.email
return cleaned_input
@classmethod
def save_addresses(cls, instance: models.Checkout, cleaned_input: dict):
shipping_address = cleaned_input.get("shipping_address")
billing_address = cleaned_input.get("billing_address")
updated_fields = []
if shipping_address and instance.is_shipping_required():
shipping_address.save()
instance.shipping_address = shipping_address.get_copy()
updated_fields.append("shipping_address")
if billing_address:
billing_address.save()
instance.billing_address = billing_address.get_copy()
updated_fields.append("billing_address")
instance.save(update_fields=updated_fields)
@classmethod
@transaction.atomic()
def save(cls, info, instance: models.Checkout, cleaned_input):
instance.save()
variants = cleaned_input.get("variants")
quantities = cleaned_input.get("quantities")
if variants and quantities:
for variant, quantity in zip(variants, quantities):
try:
add_variant_to_checkout(instance, variant, quantity)
except InsufficientStock as exc:
raise ValidationError(
f"Insufficient product stock: {exc.item}", code=exc.code
)
cls.save_addresses(instance, cleaned_input)
@classmethod
def perform_mutation(cls, _root, info, **data):
user = info.context.user
if user.is_authenticated:
checkout, _ = get_user_checkout(user)
if checkout is not None:
return CheckoutCreate(checkout=checkout, created=False)
checkout = models.Checkout(user=user)
else:
checkout = models.Checkout()
cleaned_input = cls.clean_input(info, checkout, data.get("input"))
checkout = cls.construct_instance(checkout, cleaned_input)
cls.clean_instance(checkout)
cls.save(info, checkout, cleaned_input)
cls._save_m2m(info, checkout, cleaned_input)
return CheckoutCreate(checkout=checkout, created=True)
class CheckoutLinesAdd(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated Checkout.")
class Arguments:
checkout_id = graphene.ID(description="The ID of the Checkout.", required=True)
lines = graphene.List(
CheckoutLineInput,
required=True,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
)
class Meta:
description = "Adds a checkout line to the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, lines, replace=False):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(variant_ids, "variant_id", ProductVariant)
quantities = [line.get("quantity") for line in lines]
check_lines_quantity(variants, quantities)
update_checkout_shipping_method_if_invalid(checkout, info.context.discounts)
if variants and quantities:
for variant, quantity in zip(variants, quantities):
try:
add_variant_to_checkout(
checkout, variant, quantity, replace=replace
)
except InsufficientStock as exc:
raise ValidationError(
f"Insufficient product stock: {exc.item}", code=exc.code
)
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutLinesAdd(checkout=checkout)
class CheckoutLinesUpdate(CheckoutLinesAdd):
checkout = graphene.Field(Checkout, description="An updated Checkout.")
class Meta:
description = "Updates CheckoutLine in the existing Checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, root, info, checkout_id, lines):
return super().perform_mutation(root, info, checkout_id, lines, replace=True)
class CheckoutLineDelete(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(description="The ID of the Checkout.", required=True)
line_id = graphene.ID(description="ID of the CheckoutLine to delete.")
class Meta:
description = "Deletes a CheckoutLine."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, line_id):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
line = cls.get_node_or_error(
info, line_id, only_type=CheckoutLine, field="line_id"
)
if line and line in checkout.lines.all():
line.delete()
update_checkout_shipping_method_if_invalid(checkout, info.context.discounts)
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutLineDelete(checkout=checkout)
class CheckoutCustomerAttach(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(required=True, description="ID of the Checkout.")
customer_id = graphene.ID(required=True, description="The ID of the customer.")
class Meta:
description = "Sets the customer as the owner of the Checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, customer_id):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
customer = cls.get_node_or_error(
info, customer_id, only_type=User, field="customer_id"
)
checkout.user = customer
checkout.save(update_fields=["user"])
return CheckoutCustomerAttach(checkout=checkout)
class CheckoutCustomerDetach(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
class Meta:
description = "Removes the user assigned as the owner of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
checkout.user = None
checkout.save(update_fields=["user"])
return CheckoutCustomerDetach(checkout=checkout)
class CheckoutShippingAddressUpdate(BaseMutation, I18nMixin):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(required=True, description="ID of the Checkout.")
shipping_address = AddressInput(
required=True,
description="The mailing address to where the checkout will be shipped.",
)
class Meta:
description = "Update shipping address in the existing Checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, shipping_address):
pk = from_global_id_strict_type(checkout_id, Checkout, field="checkout_id")
try:
checkout = models.Checkout.objects.prefetch_related(
"lines__variant__product__product_type"
).get(pk=pk)
except ObjectDoesNotExist:
raise ValidationError(
{
"checkout_id": ValidationError(
f"Couldn't resolve to a node: {checkout_id}",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
if not checkout.is_shipping_required():
raise ValidationError(
{
"shipping_address": ValidationError(
ERROR_DOES_NOT_SHIP,
code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED,
)
}
)
shipping_address = cls.validate_address(
shipping_address, instance=checkout.shipping_address
)
update_checkout_shipping_method_if_invalid(checkout, info.context.discounts)
with transaction.atomic():
shipping_address.save()
change_shipping_address_in_checkout(checkout, shipping_address)
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutShippingAddressUpdate(checkout=checkout)
class CheckoutBillingAddressUpdate(CheckoutShippingAddressUpdate):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(required=True, description="ID of the Checkout.")
billing_address = AddressInput(
required=True, description="The billing address of the checkout."
)
class Meta:
description = "Update billing address in the existing Checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, billing_address):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
billing_address = cls.validate_address(
billing_address, instance=checkout.billing_address
)
with transaction.atomic():
billing_address.save()
change_billing_address_in_checkout(checkout, billing_address)
return CheckoutBillingAddressUpdate(checkout=checkout)
class CheckoutEmailUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID")
email = graphene.String(required=True, description="email")
class Meta:
description = "Updates email address in the existing Checkout object."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, email):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
checkout.email = email
cls.clean_instance(checkout)
checkout.save(update_fields=["email"])
return CheckoutEmailUpdate(checkout=checkout)
class CheckoutShippingMethodUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID")
shipping_method_id = graphene.ID(required=True, description="Shipping method")
class Meta:
description = "Updates the shipping address of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, shipping_method_id):
pk = from_global_id_strict_type(
checkout_id, only_type=Checkout, field="checkout_id"
)
try:
checkout = models.Checkout.objects.prefetch_related(
"lines__variant__product__collections",
"lines__variant__product__product_type",
).get(pk=pk)
except ObjectDoesNotExist:
raise ValidationError(
{
"checkout_id": ValidationError(
f"Couldn't resolve to a node: {checkout_id}",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
if not checkout.is_shipping_required():
raise ValidationError(
{
"shipping_method": ValidationError(
ERROR_DOES_NOT_SHIP,
code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED,
)
}
)
shipping_method = cls.get_node_or_error(
info,
shipping_method_id,
only_type=ShippingMethod,
field="shipping_method_id",
)
shipping_method_is_valid = clean_shipping_method(
checkout=checkout, method=shipping_method, discounts=info.context.discounts
)
if not shipping_method_is_valid:
raise ValidationError(
{
"shipping_method": ValidationError(
"This shipping method is not applicable.",
code=CheckoutErrorCode.SHIPPING_METHOD_NOT_APPLICABLE,
)
}
)
checkout.shipping_method = shipping_method
checkout.save(update_fields=["shipping_method"])
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutShippingMethodUpdate(checkout=checkout)
class CheckoutComplete(BaseMutation):
order = graphene.Field(Order, description="Placed order")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
store_source = graphene.Boolean(
default_value=False,
description=(
"Determines whether to store the payment source for future usage."
),
)
class Meta:
description = (
"Completes the checkout. As a result a new order is created and "
"a payment charge is made. This action requires a successful "
"payment before it can be performed."
)
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, store_source):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
user = info.context.user
clean_checkout(checkout, info.context.discounts)
payment = checkout.get_last_active_payment()
with transaction.atomic():
try:
order_data = prepare_order_data(
checkout=checkout,
tracking_code=analytics.get_client_id(info.context),
discounts=info.context.discounts,
)
except InsufficientStock as e:
raise ValidationError(
f"Insufficient product stock: {e.item}", code=e.code
)
except voucher_model.NotApplicable:
raise ValidationError(
"Voucher not applicable",
code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE,
)
except TaxError as tax_error:
return ValidationError(
"Unable to calculate taxes - %s" % str(tax_error),
code=CheckoutErrorCode.TAX_ERROR,
)
billing_address = order_data["billing_address"]
shipping_address = order_data.get("shipping_address", None)
billing_address = AddressData(**billing_address.as_data())
if shipping_address is not None:
shipping_address = AddressData(**shipping_address.as_data())
try:
txn = gateway.process_payment(
payment=payment, token=payment.token, store_source=store_source
)
if not txn.is_success:
raise PaymentError(txn.error)
except PaymentError as e:
abort_order_data(order_data)
raise ValidationError(str(e), code=CheckoutErrorCode.PAYMENT_ERROR)
if txn.customer_id and user.is_authenticated:
store_customer_id(user, payment.gateway, txn.customer_id)
order = create_order(checkout=checkout, order_data=order_data, user=user)
checkout.delete()
return CheckoutComplete(order=order)
class CheckoutUpdateVoucher(BaseMutation):
checkout = graphene.Field(Checkout, description="An checkout with updated voucher")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
voucher_code = graphene.String(description="Voucher code")
class Meta:
description = (
"DEPRECATED: Will be removed in Saleor 2.10, use CheckoutAddPromoCode "
"or CheckoutRemovePromoCode instead. Adds voucher to the checkout. Query "
"it without voucher_code field to remove voucher from checkout."
)
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, voucher_code=None):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
if voucher_code:
try:
voucher = voucher_model.Voucher.objects.active(date=timezone.now()).get(
code=voucher_code
)
except voucher_model.Voucher.DoesNotExist:
raise ValidationError(
{
"voucher_code": ValidationError(
"Voucher with given code does not exist.",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
try:
add_voucher_to_checkout(checkout, voucher)
except voucher_model.NotApplicable:
raise ValidationError(
{
"voucher_code": ValidationError(
"Voucher is not applicable to that checkout.",
code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE,
)
}
)
else:
existing_voucher = get_voucher_for_checkout(checkout)
if existing_voucher:
remove_voucher_from_checkout(checkout)
return CheckoutUpdateVoucher(checkout=checkout)
class CheckoutAddPromoCode(BaseMutation):
checkout = graphene.Field(
Checkout, description="The checkout with the added gift card or voucher"
)
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
promo_code = graphene.String(
description="Gift card code or voucher code", required=True
)
class Meta:
description = "Adds a gift card or a voucher to a checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, promo_code):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
add_promo_code_to_checkout(checkout, promo_code, info.context.discounts)
return CheckoutAddPromoCode(checkout=checkout)
class CheckoutRemovePromoCode(BaseMutation):
checkout = graphene.Field(
Checkout, description="The checkout with the removed gift card or voucher"
)
class Arguments:
checkout_id = graphene.ID(description="Checkout ID", required=True)
promo_code = graphene.String(
description="Gift card code or voucher code", required=True
)
class Meta:
description = "Remove a gift card or a voucher from a checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, promo_code):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
remove_promo_code_from_checkout(checkout, promo_code)
return CheckoutUpdateVoucher(checkout=checkout)
class CheckoutUpdateMeta(UpdateMetaBaseMutation):
class Meta:
description = "Updates metadata for Checkout."
permissions = ("order.manage_orders",)
model = models.Checkout
public = True
error_type_class = CheckoutError
error_type_field = "checkout_errors"
class CheckoutUpdatePrivateMeta(UpdateMetaBaseMutation):
class Meta:
description = "Updates private metadata for Checkout."
permissions = ("order.manage_orders",)
model = models.Checkout
public = False
error_type_class = CheckoutError
error_type_field = "checkout_errors"
class CheckoutClearStoredMeta(ClearMetaBaseMutation):
class Meta:
description = "Clear stored metadata value."
permissions = ("order.manage_orders",)
model = models.Checkout
public = True
error_type_class = CheckoutError
error_type_field = "checkout_errors"
class CheckoutClearStoredPrivateMeta(ClearMetaBaseMutation):
class Meta:
description = "Clear stored metadata value."
permissions = ("order.manage_orders",)
model = models.Checkout
public = False
error_type_class = CheckoutError
error_type_field = "checkout_errors"
| true | true |
1c2f2bcadeab8e03b1eced3d3248600da24dceb3 | 254 | py | Python | spider/__init__.py | CCYYGO/PSpider | 5978fdfeecdbcb449f562194c91913ece3f12035 | [
"BSD-2-Clause"
] | 1 | 2019-12-24T04:28:10.000Z | 2019-12-24T04:28:10.000Z | spider/__init__.py | CCYYGO/PSpider | 5978fdfeecdbcb449f562194c91913ece3f12035 | [
"BSD-2-Clause"
] | null | null | null | spider/__init__.py | CCYYGO/PSpider | 5978fdfeecdbcb449f562194c91913ece3f12035 | [
"BSD-2-Clause"
] | null | null | null | # _*_ coding: utf-8 _*_
"""
define WebSpider, and also define utilities and instances for web_spider
"""
__version__ = "2.5.3"
from .utilities import *
from .concurrent import TPEnum, WebSpider
from .instances import Fetcher, Parser, Saver, Proxieser
| 21.166667 | 72 | 0.748031 |
__version__ = "2.5.3"
from .utilities import *
from .concurrent import TPEnum, WebSpider
from .instances import Fetcher, Parser, Saver, Proxieser
| true | true |
1c2f2dc160dc28e6805357962b8f8d9df566c32e | 1,416 | py | Python | src/stack/python/test_stack.py | SamyuelDanyo/data-structures | 057fbebd5f4be3af81727c09970f843df7c43007 | [
"MIT"
] | null | null | null | src/stack/python/test_stack.py | SamyuelDanyo/data-structures | 057fbebd5f4be3af81727c09970f843df7c43007 | [
"MIT"
] | null | null | null | src/stack/python/test_stack.py | SamyuelDanyo/data-structures | 057fbebd5f4be3af81727c09970f843df7c43007 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#################################################
""" test_stack.py
# # Test for Stack(LIFO) Implementation
# Tests:
# - Capacity, size.
# - Push, Pop, Peek.
# - Delete_mid.
"""
#################################################
# ### Author: Samyuel Danyo
# ### Date: 02/06/2020
# ### Last Edit: 02/06/2020
##################################################
# ## imports
# Python Standard Library
from random import randint
# Local Application/Library Specific Imports.
import stack as st
##################################################
# Test
##################################################
print("==== ARRAY STACK IMPLEMENTATION ====")
STACK = st.Stack(7)
for idx in range(8):
STACK.push(randint(1, 100))
print("Stack cap: {} | size: {}".format(STACK.capacity(), STACK.size()))
print(STACK)
print("Stack head at:", STACK.peek())
STACK.delete_mid()
print("Stack cap: {} | size: {}".format(STACK.capacity(), STACK.size()))
while not STACK.is_empty():
print(STACK.pop())
print("==== LIST STACK IMPLEMENTATION ====")
STACK = st.ListStack(7)
for idx in range(8):
STACK.push(randint(1, 100))
print("Stack cap: {} | size: {}".format(STACK.capacity(), STACK.size()))
print(STACK)
print("Stack head at:", STACK.peek())
STACK.delete_mid()
print("Stack cap: {} | size: {}".format(STACK.capacity(), STACK.size()))
while not STACK.is_empty():
print(STACK.pop())
| 30.782609 | 72 | 0.531073 | true | true | |
1c2f2dff5e72218d9f9d7885c6a6a23e047231d4 | 1,345 | py | Python | hyperparams.py | kotsonis/banana-DDQN_RL | 7c7abd771e2758cff178ce65dc6f3afcbb35805a | [
"MIT"
] | null | null | null | hyperparams.py | kotsonis/banana-DDQN_RL | 7c7abd771e2758cff178ce65dc6f3afcbb35805a | [
"MIT"
] | null | null | null | hyperparams.py | kotsonis/banana-DDQN_RL | 7c7abd771e2758cff178ce65dc6f3afcbb35805a | [
"MIT"
] | null | null | null | std_learn_params = {
# Unity Environment parameters
"banana_location": "./Banana_Windows_x86_64/Banana.exe",
# MDP learning parameters
"n_episodes": 2000, # maximum episodes to train on
"max_t":1000, # maximum scenes in an episodic training
"eps_start":0.975, # starting exploration factor
"eps_end":0.05, # ending exploration factor
"eps_decay":0.99, # eps step decay
'early_stop': 13, # early stop if average reward in 100 episode reaches this value
# Q value learning parameters
"gamma": 1, # discount factor
"tau": 1e-3, # for soft update of target parameters
"lr": 5e-4, # learning rate
"update_every": 4, # how often to update the network
# Replay Buffer / Prioritized Replay Buffer parameters
"buffer_size": 1e5, # replay buffer size
"batch_size": 32, # minibatch size
"alpha": 0.8, # prioritization factor (0: No prioritization .. 1: Full prioritization)
"pr_eps": 1e-05, # minimum prioritization
"beta":0.4, # Importance sampling beta factor start
"beta_step": 0.00025/4.0, # beta decay factor
"beta_max": 1.0 # maximum beta
} | 51.730769 | 108 | 0.572491 | std_learn_params = {
"banana_location": "./Banana_Windows_x86_64/Banana.exe",
"n_episodes": 2000,
"max_t":1000,
"eps_start":0.975,
"eps_end":0.05,
"eps_decay":0.99,
'early_stop': 13,
"gamma": 1,
"tau": 1e-3,
"lr": 5e-4,
"update_every": 4,
"buffer_size": 1e5,
"batch_size": 32,
"alpha": 0.8,
"pr_eps": 1e-05,
"beta":0.4,
"beta_step": 0.00025/4.0,
"beta_max": 1.0
} | true | true |
1c2f2f3f821253be8de0a4ac64cc0e7883dd55ee | 10,787 | py | Python | tests/test_functions.py | vrthra/PyVM | 61fdf22533aceea2cc1f463f7cdbbc13eda6ff25 | [
"MIT"
] | null | null | null | tests/test_functions.py | vrthra/PyVM | 61fdf22533aceea2cc1f463f7cdbbc13eda6ff25 | [
"MIT"
] | null | null | null | tests/test_functions.py | vrthra/PyVM | 61fdf22533aceea2cc1f463f7cdbbc13eda6ff25 | [
"MIT"
] | null | null | null | """Test functions etc, for Bytevm."""
from __future__ import print_function
from . import vmtest
import six
PY3 = six.PY3
class TestFunctions(vmtest.VmTestCase):
def test_functions(self):
self.assert_ok("""\
def fn(a, b=17, c="Hello", d=[]):
d.append(99)
print(a, b, c, d)
fn(1)
fn(2, 3)
fn(3, c="Bye")
fn(4, d=["What?"])
fn(5, "b", "c")
""")
def test_recursion(self):
self.assert_ok("""\
def fact(n):
if n <= 1:
return 1
else:
return n * fact(n-1)
f6 = fact(6)
print(f6)
assert f6 == 720
""")
def test_nested_names(self):
self.assert_ok("""\
def one():
x = 1
def two():
x = 2
print(x)
two()
print(x)
one()
""")
def test_calling_functions_with_args_kwargs(self):
self.assert_ok("""\
def fn(a, b=17, c="Hello", d=[]):
d.append(99)
print(a, b, c, d)
fn(6, *[77, 88])
fn(**{'c': 23, 'a': 7})
fn(6, *[77], **{'c': 23, 'd': [123]})
""")
def test_defining_functions_with_args_kwargs(self):
self.assert_ok("""\
def fn(*args):
print("args is %r" % (args,))
fn(1, 2)
""")
self.assert_ok("""\
def fn(**kwargs):
print("kwargs is %r" % (kwargs,))
fn(red=True, blue=False)
""")
self.assert_ok("""\
def fn(*args, **kwargs):
print("args is %r" % (args,))
print("kwargs is %r" % (kwargs,))
fn(1, 2, red=True, blue=False)
""")
self.assert_ok("""\
def fn(x, y, *args, **kwargs):
print("x is %r, y is %r" % (x, y))
print("args is %r" % (args,))
print("kwargs is %r" % (kwargs,))
fn('a', 'b', 1, 2, red=True, blue=False)
""")
def test_defining_functions_with_empty_args_kwargs(self):
self.assert_ok("""\
def fn(*args):
print("args is %r" % (args,))
fn()
""")
self.assert_ok("""\
def fn(**kwargs):
print("kwargs is %r" % (kwargs,))
fn()
""")
self.assert_ok("""\
def fn(*args, **kwargs):
print("args is %r, kwargs is %r" % (args, kwargs))
fn()
""")
def test_partial(self):
self.assert_ok("""\
from _functools import partial
def f(a,b):
return a-b
f7 = partial(f, 7)
four = f7(3)
assert four == 4
""")
def test_partial_with_kwargs(self):
self.assert_ok("""\
from _functools import partial
def f(a,b,c=0,d=0):
return (a,b,c,d)
f7 = partial(f, b=7, c=1)
them = f7(10)
assert them == (10,7,1,0)
""")
def test_wraps(self):
self.assert_ok("""\
from functools import wraps
def my_decorator(f):
dec = wraps(f)
def wrapper(*args, **kwds):
print('Calling decorated function')
return f(*args, **kwds)
wrapper = dec(wrapper)
return wrapper
@my_decorator
def example():
'''Docstring'''
return 17
assert example() == 17
""")
class TestClosures(vmtest.VmTestCase):
def test_closures(self):
self.assert_ok("""\
def make_adder(x):
def add(y):
return x+y
return add
a = make_adder(10)
print(a(7))
assert a(7) == 17
""")
def test_closures_store_deref(self):
self.assert_ok("""\
def make_adder(x):
z = x+1
def add(y):
return x+y+z
return add
a = make_adder(10)
print(a(7))
assert a(7) == 28
""")
def test_closures_in_loop(self):
self.assert_ok("""\
def make_fns(x):
fns = []
for i in range(x):
fns.append(lambda i=i: i)
return fns
fns = make_fns(3)
for f in fns:
print(f())
assert (fns[0](), fns[1](), fns[2]()) == (0, 1, 2)
""")
def test_closures_with_defaults(self):
self.assert_ok("""\
def make_adder(x, y=13, z=43):
def add(q, r=11):
return x+y+z+q+r
return add
a = make_adder(10, 17)
print(a(7))
assert a(7) == 88
""")
def test_deep_closures(self):
self.assert_ok("""\
def f1(a):
b = 2*a
def f2(c):
d = 2*c
def f3(e):
f = 2*e
def f4(g):
h = 2*g
return a+b+c+d+e+f+g+h
return f4
return f3
return f2
answer = f1(3)(4)(5)(6)
print(answer)
assert answer == 54
""")
def test_closure_vars_from_static_parent(self):
self.assert_ok("""\
def f(xs):
return lambda: xs[0]
def g(h):
xs = 5
lambda: xs
return h()
assert g(f([42])) == 42
""")
class TestGenerators(vmtest.VmTestCase):
def test_first(self):
self.assert_ok("""\
def two():
yield 1
yield 2
for i in two():
print(i)
""")
def test_partial_generator(self):
self.assert_ok("""\
from _functools import partial
def f(a,b):
num = a+b
while num:
yield num
num -= 1
f2 = partial(f, 2)
three = f2(1)
assert list(three) == [3,2,1]
""")
def test_yield_multiple_values(self):
self.assert_ok("""\
def triples():
yield 1, 2, 3
yield 4, 5, 6
for a, b, c in triples():
print(a, b, c)
""")
def test_simple_generator(self):
self.assert_ok("""\
g = (x for x in [0,1,2])
print(list(g))
""")
def test_generator_from_generator(self):
self.assert_ok("""\
g = (x*x for x in range(5))
h = (y+1 for y in g)
print(list(h))
""")
def test_generator_from_generator2(self):
self.assert_ok("""\
class Thing(object):
RESOURCES = ('abc', 'def')
def get_abc(self):
return "ABC"
def get_def(self):
return "DEF"
def resource_info(self):
for name in self.RESOURCES:
get_name = 'get_' + name
yield name, getattr(self, get_name)
def boom(self):
#d = list((name, get()) for name, get in self.resource_info())
d = [(name, get()) for name, get in self.resource_info()]
return d
print(Thing().boom())
""")
if PY3: # PY3.3+ only
def test_yield_from(self):
self.assert_ok("""\
def main():
x = outer()
next(x)
y = x.send("Hello, World")
print(y)
def outer():
yield from inner()
def inner():
y = yield
yield y
main()
""")
def test_yield_from_tuple(self):
self.assert_ok("""\
def main():
for x in outer():
print(x)
def outer():
yield from (1, 2, 3, 4)
main()
""")
def test_distinguish_iterators_and_generators(self):
self.assert_ok("""\
class Foo(object):
def __iter__(self):
return FooIter()
class FooIter(object):
def __init__(self):
self.state = 0
def __next__(self):
if self.state >= 10:
raise StopIteration
self.state += 1
return self.state
def send(self, n):
print("sending")
def outer():
yield from Foo()
for x in outer():
print(x)
""")
def test_nested_yield_from(self):
self.assert_ok("""\
def main():
x = outer()
next(x)
y = x.send("Hello, World")
print(y)
def outer():
yield from middle()
def middle():
yield from inner()
def inner():
y = yield
yield y
main()
""")
def test_return_from_generator(self):
self.assert_ok("""\
def gen():
yield 1
return 2
x = gen()
while True:
try:
print(next(x))
except StopIteration as e:
print(e.value)
break
""")
def test_return_from_generator_with_yield_from(self):
self.assert_ok("""\
def returner():
if False:
yield
return 1
def main():
y = yield from returner()
print(y)
list(main())
""")
| 27.103015 | 82 | 0.370539 |
from __future__ import print_function
from . import vmtest
import six
PY3 = six.PY3
class TestFunctions(vmtest.VmTestCase):
def test_functions(self):
self.assert_ok("""\
def fn(a, b=17, c="Hello", d=[]):
d.append(99)
print(a, b, c, d)
fn(1)
fn(2, 3)
fn(3, c="Bye")
fn(4, d=["What?"])
fn(5, "b", "c")
""")
def test_recursion(self):
self.assert_ok("""\
def fact(n):
if n <= 1:
return 1
else:
return n * fact(n-1)
f6 = fact(6)
print(f6)
assert f6 == 720
""")
def test_nested_names(self):
self.assert_ok("""\
def one():
x = 1
def two():
x = 2
print(x)
two()
print(x)
one()
""")
def test_calling_functions_with_args_kwargs(self):
self.assert_ok("""\
def fn(a, b=17, c="Hello", d=[]):
d.append(99)
print(a, b, c, d)
fn(6, *[77, 88])
fn(**{'c': 23, 'a': 7})
fn(6, *[77], **{'c': 23, 'd': [123]})
""")
def test_defining_functions_with_args_kwargs(self):
self.assert_ok("""\
def fn(*args):
print("args is %r" % (args,))
fn(1, 2)
""")
self.assert_ok("""\
def fn(**kwargs):
print("kwargs is %r" % (kwargs,))
fn(red=True, blue=False)
""")
self.assert_ok("""\
def fn(*args, **kwargs):
print("args is %r" % (args,))
print("kwargs is %r" % (kwargs,))
fn(1, 2, red=True, blue=False)
""")
self.assert_ok("""\
def fn(x, y, *args, **kwargs):
print("x is %r, y is %r" % (x, y))
print("args is %r" % (args,))
print("kwargs is %r" % (kwargs,))
fn('a', 'b', 1, 2, red=True, blue=False)
""")
def test_defining_functions_with_empty_args_kwargs(self):
self.assert_ok("""\
def fn(*args):
print("args is %r" % (args,))
fn()
""")
self.assert_ok("""\
def fn(**kwargs):
print("kwargs is %r" % (kwargs,))
fn()
""")
self.assert_ok("""\
def fn(*args, **kwargs):
print("args is %r, kwargs is %r" % (args, kwargs))
fn()
""")
def test_partial(self):
self.assert_ok("""\
from _functools import partial
def f(a,b):
return a-b
f7 = partial(f, 7)
four = f7(3)
assert four == 4
""")
def test_partial_with_kwargs(self):
self.assert_ok("""\
from _functools import partial
def f(a,b,c=0,d=0):
return (a,b,c,d)
f7 = partial(f, b=7, c=1)
them = f7(10)
assert them == (10,7,1,0)
""")
def test_wraps(self):
self.assert_ok("""\
from functools import wraps
def my_decorator(f):
dec = wraps(f)
def wrapper(*args, **kwds):
print('Calling decorated function')
return f(*args, **kwds)
wrapper = dec(wrapper)
return wrapper
@my_decorator
def example():
'''Docstring'''
return 17
assert example() == 17
""")
class TestClosures(vmtest.VmTestCase):
def test_closures(self):
self.assert_ok("""\
def make_adder(x):
def add(y):
return x+y
return add
a = make_adder(10)
print(a(7))
assert a(7) == 17
""")
def test_closures_store_deref(self):
self.assert_ok("""\
def make_adder(x):
z = x+1
def add(y):
return x+y+z
return add
a = make_adder(10)
print(a(7))
assert a(7) == 28
""")
def test_closures_in_loop(self):
self.assert_ok("""\
def make_fns(x):
fns = []
for i in range(x):
fns.append(lambda i=i: i)
return fns
fns = make_fns(3)
for f in fns:
print(f())
assert (fns[0](), fns[1](), fns[2]()) == (0, 1, 2)
""")
def test_closures_with_defaults(self):
self.assert_ok("""\
def make_adder(x, y=13, z=43):
def add(q, r=11):
return x+y+z+q+r
return add
a = make_adder(10, 17)
print(a(7))
assert a(7) == 88
""")
def test_deep_closures(self):
self.assert_ok("""\
def f1(a):
b = 2*a
def f2(c):
d = 2*c
def f3(e):
f = 2*e
def f4(g):
h = 2*g
return a+b+c+d+e+f+g+h
return f4
return f3
return f2
answer = f1(3)(4)(5)(6)
print(answer)
assert answer == 54
""")
def test_closure_vars_from_static_parent(self):
self.assert_ok("""\
def f(xs):
return lambda: xs[0]
def g(h):
xs = 5
lambda: xs
return h()
assert g(f([42])) == 42
""")
class TestGenerators(vmtest.VmTestCase):
def test_first(self):
self.assert_ok("""\
def two():
yield 1
yield 2
for i in two():
print(i)
""")
def test_partial_generator(self):
self.assert_ok("""\
from _functools import partial
def f(a,b):
num = a+b
while num:
yield num
num -= 1
f2 = partial(f, 2)
three = f2(1)
assert list(three) == [3,2,1]
""")
def test_yield_multiple_values(self):
self.assert_ok("""\
def triples():
yield 1, 2, 3
yield 4, 5, 6
for a, b, c in triples():
print(a, b, c)
""")
def test_simple_generator(self):
self.assert_ok("""\
g = (x for x in [0,1,2])
print(list(g))
""")
def test_generator_from_generator(self):
self.assert_ok("""\
g = (x*x for x in range(5))
h = (y+1 for y in g)
print(list(h))
""")
def test_generator_from_generator2(self):
self.assert_ok("""\
class Thing(object):
RESOURCES = ('abc', 'def')
def get_abc(self):
return "ABC"
def get_def(self):
return "DEF"
def resource_info(self):
for name in self.RESOURCES:
get_name = 'get_' + name
yield name, getattr(self, get_name)
def boom(self):
#d = list((name, get()) for name, get in self.resource_info())
d = [(name, get()) for name, get in self.resource_info()]
return d
print(Thing().boom())
""")
if PY3:
def test_yield_from(self):
self.assert_ok("""\
def main():
x = outer()
next(x)
y = x.send("Hello, World")
print(y)
def outer():
yield from inner()
def inner():
y = yield
yield y
main()
""")
def test_yield_from_tuple(self):
self.assert_ok("""\
def main():
for x in outer():
print(x)
def outer():
yield from (1, 2, 3, 4)
main()
""")
def test_distinguish_iterators_and_generators(self):
self.assert_ok("""\
class Foo(object):
def __iter__(self):
return FooIter()
class FooIter(object):
def __init__(self):
self.state = 0
def __next__(self):
if self.state >= 10:
raise StopIteration
self.state += 1
return self.state
def send(self, n):
print("sending")
def outer():
yield from Foo()
for x in outer():
print(x)
""")
def test_nested_yield_from(self):
self.assert_ok("""\
def main():
x = outer()
next(x)
y = x.send("Hello, World")
print(y)
def outer():
yield from middle()
def middle():
yield from inner()
def inner():
y = yield
yield y
main()
""")
def test_return_from_generator(self):
self.assert_ok("""\
def gen():
yield 1
return 2
x = gen()
while True:
try:
print(next(x))
except StopIteration as e:
print(e.value)
break
""")
def test_return_from_generator_with_yield_from(self):
self.assert_ok("""\
def returner():
if False:
yield
return 1
def main():
y = yield from returner()
print(y)
list(main())
""")
| true | true |
1c2f304c2cdde31005f56751f34965b6155440a8 | 1,341 | py | Python | dataset.py | ChangLee0903/SERIL-Noise-Adaptive-Speech-Enhancement-using-Regularization-based-Incremental-Learning | 73c5dbd8a272a534cc0af455a9f61567dc67fb66 | [
"MIT"
] | 29 | 2020-09-14T22:34:25.000Z | 2022-03-26T18:59:26.000Z | dataset.py | ChangLee0903/SERIL-Noise-Adaptive-Speech-Enhancement-using-Regularization-based-Incremental-Learning | 73c5dbd8a272a534cc0af455a9f61567dc67fb66 | [
"MIT"
] | 1 | 2020-10-16T07:25:21.000Z | 2020-10-31T01:37:45.000Z | dataset.py | ChangLee0903/SERIL-Noise-Adaptive-Speech-Enhancement-using-Regularization-based-Incremental-Learning | 73c5dbd8a272a534cc0af455a9f61567dc67fb66 | [
"MIT"
] | 7 | 2020-09-28T08:45:41.000Z | 2021-09-23T09:02:44.000Z | import torch
import os
import numpy as np
import random
from librosa import load
def read(data, normalize=False, sr=16000):
data, sr = load(data, sr=sr)
if normalize:
data /= np.abs(data).max()
return data, sr
class SpeechDataset(torch.utils.data.Dataset):
def __init__(self, noisy_path, clean_path, sampling_rate=16000):
self.sampling_rate = sampling_rate
self.noisy_list = [os.path.join(noisy_path, f)
for f in os.listdir(noisy_path)]
self.clean_list = [os.path.join(clean_path, f)
for f in os.listdir(noisy_path)]
assert len(self.noisy_list) == len(self.clean_list)
def __getitem__(self, index):
niy_audio, sampling_rate = read(
self.noisy_list[index], sr=self.sampling_rate)
assert sampling_rate == self.sampling_rate
cln_audio, sampling_rate = read(
self.clean_list[index], sr=self.sampling_rate)
assert sampling_rate == self.sampling_rate
assert niy_audio.shape == cln_audio.shape
niy_audio, cln_audio = torch.from_numpy(
niy_audio), torch.from_numpy(cln_audio)
return niy_audio.unsqueeze(1).float(), cln_audio.unsqueeze(1).float()
def __len__(self):
return len(self.clean_list)
| 31.928571 | 77 | 0.635347 | import torch
import os
import numpy as np
import random
from librosa import load
def read(data, normalize=False, sr=16000):
data, sr = load(data, sr=sr)
if normalize:
data /= np.abs(data).max()
return data, sr
class SpeechDataset(torch.utils.data.Dataset):
def __init__(self, noisy_path, clean_path, sampling_rate=16000):
self.sampling_rate = sampling_rate
self.noisy_list = [os.path.join(noisy_path, f)
for f in os.listdir(noisy_path)]
self.clean_list = [os.path.join(clean_path, f)
for f in os.listdir(noisy_path)]
assert len(self.noisy_list) == len(self.clean_list)
def __getitem__(self, index):
niy_audio, sampling_rate = read(
self.noisy_list[index], sr=self.sampling_rate)
assert sampling_rate == self.sampling_rate
cln_audio, sampling_rate = read(
self.clean_list[index], sr=self.sampling_rate)
assert sampling_rate == self.sampling_rate
assert niy_audio.shape == cln_audio.shape
niy_audio, cln_audio = torch.from_numpy(
niy_audio), torch.from_numpy(cln_audio)
return niy_audio.unsqueeze(1).float(), cln_audio.unsqueeze(1).float()
def __len__(self):
return len(self.clean_list)
| true | true |
1c2f33edbb6f8768974ca054915c523175e95c05 | 2,563 | py | Python | quart/utils.py | SmartManoj/quart | 317562ea660edb7159efc20fa57b95223d408ea0 | [
"MIT"
] | 1 | 2020-08-09T19:45:14.000Z | 2020-08-09T19:45:14.000Z | quart/utils.py | SmartManoj/quart | 317562ea660edb7159efc20fa57b95223d408ea0 | [
"MIT"
] | null | null | null | quart/utils.py | SmartManoj/quart | 317562ea660edb7159efc20fa57b95223d408ea0 | [
"MIT"
] | null | null | null | import asyncio
from datetime import datetime, timedelta, timezone
from http.cookies import SimpleCookie
from os import PathLike
from pathlib import Path
from typing import Callable, List, Optional, TYPE_CHECKING, Union
from wsgiref.handlers import format_date_time
from .globals import current_app
from .typing import FilePath
if TYPE_CHECKING:
from .wrappers.response import Response # noqa: F401
def redirect(location: str, status_code: int=302) -> 'Response':
body = f"""
<!doctype html>
<title>Redirect</title>
<h1>Redirect</h1>
You should be redirected to <a href="{location}">{location}</a>, if not please click the link
"""
return current_app.response_class(
body, status=status_code, headers={'Location': location},
)
def create_cookie(
key: str,
value: str='',
max_age: Optional[Union[int, timedelta]]=None,
expires: Optional[Union[int, float, datetime]]=None,
path: str='/',
domain: Optional[str]=None,
secure: bool=False,
httponly: bool=False,
) -> SimpleCookie:
"""Create a Cookie given the options set
The arguments are the standard cookie morsels and this is a
wrapper around the stdlib SimpleCookie code.
"""
cookie = SimpleCookie()
cookie[key] = value
cookie[key]['path'] = path
cookie[key]['httponly'] = httponly # type: ignore
cookie[key]['secure'] = secure # type: ignore
if isinstance(max_age, timedelta):
cookie[key]['max-age'] = f"{max_age.total_seconds():d}"
if isinstance(max_age, int):
cookie[key]['max-age'] = str(max_age)
if expires is not None and isinstance(expires, (int, float)):
cookie[key]['expires'] = format_date_time(int(expires))
elif expires is not None and isinstance(expires, datetime):
cookie[key]['expires'] = format_date_time(expires.replace(tzinfo=timezone.utc).timestamp())
if domain is not None:
cookie[key]['domain'] = domain
return cookie
def ensure_coroutine(func: Callable) -> Callable:
if asyncio.iscoroutinefunction(func):
return func
else:
async_func = asyncio.coroutine(func)
async_func._quart_async_wrapper = True # type: ignore
return async_func
def file_path_to_path(*paths: FilePath) -> Path:
# Flask supports bytes paths
safe_paths: List[Union[str, PathLike]] = []
for path in paths:
if isinstance(path, bytes):
safe_paths.append(path.decode())
else:
safe_paths.append(path)
return Path(*safe_paths)
| 32.0375 | 99 | 0.671479 | import asyncio
from datetime import datetime, timedelta, timezone
from http.cookies import SimpleCookie
from os import PathLike
from pathlib import Path
from typing import Callable, List, Optional, TYPE_CHECKING, Union
from wsgiref.handlers import format_date_time
from .globals import current_app
from .typing import FilePath
if TYPE_CHECKING:
from .wrappers.response import Response
def redirect(location: str, status_code: int=302) -> 'Response':
body = f"""
<!doctype html>
<title>Redirect</title>
<h1>Redirect</h1>
You should be redirected to <a href="{location}">{location}</a>, if not please click the link
"""
return current_app.response_class(
body, status=status_code, headers={'Location': location},
)
def create_cookie(
key: str,
value: str='',
max_age: Optional[Union[int, timedelta]]=None,
expires: Optional[Union[int, float, datetime]]=None,
path: str='/',
domain: Optional[str]=None,
secure: bool=False,
httponly: bool=False,
) -> SimpleCookie:
cookie = SimpleCookie()
cookie[key] = value
cookie[key]['path'] = path
cookie[key]['httponly'] = httponly
cookie[key]['secure'] = secure
if isinstance(max_age, timedelta):
cookie[key]['max-age'] = f"{max_age.total_seconds():d}"
if isinstance(max_age, int):
cookie[key]['max-age'] = str(max_age)
if expires is not None and isinstance(expires, (int, float)):
cookie[key]['expires'] = format_date_time(int(expires))
elif expires is not None and isinstance(expires, datetime):
cookie[key]['expires'] = format_date_time(expires.replace(tzinfo=timezone.utc).timestamp())
if domain is not None:
cookie[key]['domain'] = domain
return cookie
def ensure_coroutine(func: Callable) -> Callable:
if asyncio.iscoroutinefunction(func):
return func
else:
async_func = asyncio.coroutine(func)
async_func._quart_async_wrapper = True
return async_func
def file_path_to_path(*paths: FilePath) -> Path:
safe_paths: List[Union[str, PathLike]] = []
for path in paths:
if isinstance(path, bytes):
safe_paths.append(path.decode())
else:
safe_paths.append(path)
return Path(*safe_paths)
| true | true |
1c2f3708c08ea5ad6a991257dbb4736fffb2c251 | 843 | py | Python | flask_resty/fields.py | sloria/flask-resty | 6a9a6948289277681c71f2f50b08f8c70fef4e2b | [
"MIT"
] | null | null | null | flask_resty/fields.py | sloria/flask-resty | 6a9a6948289277681c71f2f50b08f8c70fef4e2b | [
"MIT"
] | null | null | null | flask_resty/fields.py | sloria/flask-resty | 6a9a6948289277681c71f2f50b08f8c70fef4e2b | [
"MIT"
] | null | null | null | from marshmallow import fields, ValidationError
import marshmallow.utils
# -----------------------------------------------------------------------------
class RelatedItem(fields.Nested):
def _deserialize(self, value, attr, data):
if self.many and not marshmallow.utils.is_collection(value):
self.fail('type', input=value, type=value.__class__.__name__)
# Do partial load of related item, as we only need the id.
data, errors = self.schema.load(value, partial=True)
if errors:
raise ValidationError(errors, data=data)
return data
def _validate_missing(self, value):
# Do not display detailed error data on required fields in nested
# schema - in this context, they're actually not required.
super(fields.Nested, self)._validate_missing(value)
| 38.318182 | 79 | 0.623962 | from marshmallow import fields, ValidationError
import marshmallow.utils
class RelatedItem(fields.Nested):
def _deserialize(self, value, attr, data):
if self.many and not marshmallow.utils.is_collection(value):
self.fail('type', input=value, type=value.__class__.__name__)
data, errors = self.schema.load(value, partial=True)
if errors:
raise ValidationError(errors, data=data)
return data
def _validate_missing(self, value):
super(fields.Nested, self)._validate_missing(value)
| true | true |
1c2f3768d54c480684ca5f4e4efefe9e576b4ae4 | 405 | py | Python | expensetracker/asgi.py | Aasess/ExpenseTracker | 9feb83b2b66d5d87a994bf6997d9cc33ca6ec066 | [
"MIT"
] | 9 | 2020-06-23T05:55:30.000Z | 2020-07-09T10:07:25.000Z | expensetracker/asgi.py | Aasess/ExpenseTracker | 9feb83b2b66d5d87a994bf6997d9cc33ca6ec066 | [
"MIT"
] | 3 | 2020-06-17T16:36:26.000Z | 2021-02-16T06:29:23.000Z | expensetracker/asgi.py | Aasess/ExpenseTracker | 9feb83b2b66d5d87a994bf6997d9cc33ca6ec066 | [
"MIT"
] | 5 | 2020-06-23T03:27:33.000Z | 2020-08-16T19:37:53.000Z | """
ASGI config for expensetracker project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'expensetracker.settings')
application = get_asgi_application()
| 23.823529 | 78 | 0.792593 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'expensetracker.settings')
application = get_asgi_application()
| true | true |
1c2f379724bd0df6af5464ff9cc000f65cf0265f | 427 | py | Python | projects/serializers.py | Joannsaj/projects | 13d806fa01b8de1053b30b0bcaad937a29694844 | [
"MIT"
] | null | null | null | projects/serializers.py | Joannsaj/projects | 13d806fa01b8de1053b30b0bcaad937a29694844 | [
"MIT"
] | null | null | null | projects/serializers.py | Joannsaj/projects | 13d806fa01b8de1053b30b0bcaad937a29694844 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Profile, Project
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('profile_pic', 'contact', 'user', 'bio', 'name')
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('title', 'project_image', 'description', 'owner', 'created', 'link') | 35.583333 | 94 | 0.676815 | from rest_framework import serializers
from .models import Profile, Project
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('profile_pic', 'contact', 'user', 'bio', 'name')
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('title', 'project_image', 'description', 'owner', 'created', 'link') | true | true |
1c2f37db70dfd7d9a354f7f90a9a0edb627afee5 | 947 | bzl | Python | docs/docs.bzl | Mivik/jflex | 5544d565e8534541056ffdc4d91374a38663c842 | [
"BSD-3-Clause"
] | 486 | 2015-02-15T18:06:12.000Z | 2022-03-28T12:37:00.000Z | docs/docs.bzl | Mivik/jflex | 5544d565e8534541056ffdc4d91374a38663c842 | [
"BSD-3-Clause"
] | 472 | 2015-02-15T06:11:37.000Z | 2022-03-13T21:26:12.000Z | docs/docs.bzl | regisd/jflex | 19fe94f9c72e409945b56971f636ad85506ee77d | [
"BSD-3-Clause"
] | 129 | 2015-03-01T06:24:32.000Z | 2022-03-30T20:51:19.000Z | """Helpers to build the docs."""
load("@bazel_pandoc//:pandoc.bzl", "pandoc")
VERSION = "1.9.0-SNAPSHOT"
RELEASE_DATE = "21 September 2018"
UNICODE_VER = "12.0"
DOC_SECTIONS = [
"intro",
"installing",
"maven-plugin",
"ant-task",
"example",
"lex-specs",
"generated-class",
"encodings",
"performance",
"porting-and-parsers",
"end",
]
def replace_placeholders(name, src = "", out = None, **kwargs):
"""Replaces placeholders by their respective value."""
if not out:
out = name + "_VERSIONED.md"
native.genrule(
name = name,
srcs = [src],
outs = [out],
cmd = "sed -e 's/\\$$VERSION/" + VERSION + "/g'" +
" -e 's/\\$${project.version}/" + VERSION + "/g'" +
" -e 's/\\$$RELEASE_DATE/" + RELEASE_DATE + "/g'" +
" -e 's/\\$$UNICODE_VER/" + UNICODE_VER + "/g'" +
" $< > $@",
**kwargs
)
| 23.675 | 65 | 0.503696 |
load("@bazel_pandoc//:pandoc.bzl", "pandoc")
VERSION = "1.9.0-SNAPSHOT"
RELEASE_DATE = "21 September 2018"
UNICODE_VER = "12.0"
DOC_SECTIONS = [
"intro",
"installing",
"maven-plugin",
"ant-task",
"example",
"lex-specs",
"generated-class",
"encodings",
"performance",
"porting-and-parsers",
"end",
]
def replace_placeholders(name, src = "", out = None, **kwargs):
if not out:
out = name + "_VERSIONED.md"
native.genrule(
name = name,
srcs = [src],
outs = [out],
cmd = "sed -e 's/\\$$VERSION/" + VERSION + "/g'" +
" -e 's/\\$${project.version}/" + VERSION + "/g'" +
" -e 's/\\$$RELEASE_DATE/" + RELEASE_DATE + "/g'" +
" -e 's/\\$$UNICODE_VER/" + UNICODE_VER + "/g'" +
" $< > $@",
**kwargs
)
| true | true |
1c2f38103d50270abb98bbd24bc72fa08347e488 | 2,362 | py | Python | setup.py | tylerhuntington222/biosteam | 234959180a3210d95e39a012454f455723c92686 | [
"MIT"
] | null | null | null | setup.py | tylerhuntington222/biosteam | 234959180a3210d95e39a012454f455723c92686 | [
"MIT"
] | null | null | null | setup.py | tylerhuntington222/biosteam | 234959180a3210d95e39a012454f455723c92686 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
from setuptools import setup
#from Cython.Build import cythonize
#import numpy
setup(
name='biosteam',
packages=['biosteam'],
license='MIT',
version='2.20.9',
description='The Biorefinery Simulation and Techno-Economic Analysis Modules',
long_description=open('README.rst').read(),
author='Yoel Cortes-Pena',
install_requires=['IPython>=7.9.0', 'biorefineries>=2.15.5',
'thermosteam>=0.20.8', 'graphviz>=0.8.3',
'chaospy>=3.0.11', 'pipeml>=0.1'],
python_requires=">=3.6",
package_data=
{'biosteam': ['report/*',
'digraph/*',
'utils/*',
'compounds/*',
'reaction/*',
'tests/*',
'evaluation/*',
'evaluation/evaluation_tools/*',
'process_tools/*',
'plots/*',
'units/*',
'units/design_tools/*',
'units/facilities/*',
'units/decorators/*',
'examples/*',
]},
platforms=['Windows', 'Mac', 'Linux'],
author_email='yoelcortes@gmail.com',
url='https://github.com/BioSTEAMDevelopmentGroup/biosteam',
download_url='https://github.com/BioSTEAMDevelopmentGroup/biosteam.git',
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: University of Illinois/NCSA Open Source License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Mathematics'],
keywords='chemical process simmulation bioprocess engineering mass energy balance material properties phase equilibrium CABBI biorefinery biofuel bioproducts',
) | 43.740741 | 163 | 0.562235 |
from setuptools import setup
setup(
name='biosteam',
packages=['biosteam'],
license='MIT',
version='2.20.9',
description='The Biorefinery Simulation and Techno-Economic Analysis Modules',
long_description=open('README.rst').read(),
author='Yoel Cortes-Pena',
install_requires=['IPython>=7.9.0', 'biorefineries>=2.15.5',
'thermosteam>=0.20.8', 'graphviz>=0.8.3',
'chaospy>=3.0.11', 'pipeml>=0.1'],
python_requires=">=3.6",
package_data=
{'biosteam': ['report/*',
'digraph/*',
'utils/*',
'compounds/*',
'reaction/*',
'tests/*',
'evaluation/*',
'evaluation/evaluation_tools/*',
'process_tools/*',
'plots/*',
'units/*',
'units/design_tools/*',
'units/facilities/*',
'units/decorators/*',
'examples/*',
]},
platforms=['Windows', 'Mac', 'Linux'],
author_email='yoelcortes@gmail.com',
url='https://github.com/BioSTEAMDevelopmentGroup/biosteam',
download_url='https://github.com/BioSTEAMDevelopmentGroup/biosteam.git',
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: University of Illinois/NCSA Open Source License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Mathematics'],
keywords='chemical process simmulation bioprocess engineering mass energy balance material properties phase equilibrium CABBI biorefinery biofuel bioproducts',
) | true | true |
1c2f390cd745c7857fcea188e21f791aca7da079 | 539 | py | Python | manage.py | alexandre/flask-rest-template | 206729ea7c229c4ff428dc9161d23fb3e1912026 | [
"Unlicense"
] | 77 | 2015-05-18T11:02:28.000Z | 2019-05-30T09:36:52.000Z | manage.py | alexandre-old/flask-rest-template | 206729ea7c229c4ff428dc9161d23fb3e1912026 | [
"Unlicense"
] | 11 | 2015-05-03T22:12:17.000Z | 2018-03-26T13:17:19.000Z | manage.py | alexandre/flask-rest-template | 206729ea7c229c4ff428dc9161d23fb3e1912026 | [
"Unlicense"
] | 21 | 2015-08-20T02:35:13.000Z | 2019-02-26T04:59:01.000Z | #!/usr/bin/env python
import os
from flask.ext.script import Manager, Shell, Server
from app import create_app
from app.extensions import db
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
# access python shell with context
manager.add_command(
"shell",
Shell(make_context=lambda: {'app': app, 'db': db}), use_ipython=True)
# run the app
manager.add_command(
"startserver",
Server(port=(os.getenv('FLASK_PORT') or 5000), host='0.0.0.0'))
if __name__ == '__main__':
manager.run()
| 20.730769 | 73 | 0.703154 |
import os
from flask.ext.script import Manager, Shell, Server
from app import create_app
from app.extensions import db
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
manager.add_command(
"shell",
Shell(make_context=lambda: {'app': app, 'db': db}), use_ipython=True)
manager.add_command(
"startserver",
Server(port=(os.getenv('FLASK_PORT') or 5000), host='0.0.0.0'))
if __name__ == '__main__':
manager.run()
| true | true |
1c2f3a1e9b824f0c633b51319c90a11656c514c8 | 7,825 | py | Python | egs/wsj/s5/steps/nnet3/report/nnet3_log_parse_lib.py | jxzhanggg/kaldi-trunk | 03fbab26e5714a990e1b6dee9475d4a282a42ccc | [
"Apache-2.0"
] | 16 | 2016-05-31T06:53:04.000Z | 2020-02-12T05:23:18.000Z | egs/wsj/s5/steps/nnet3/report/nnet3_log_parse_lib.py | jxzhanggg/kaldi-trunk | 03fbab26e5714a990e1b6dee9475d4a282a42ccc | [
"Apache-2.0"
] | 2 | 2019-02-18T11:54:14.000Z | 2021-12-21T14:48:57.000Z | egs/wsj/s5/steps/nnet3/report/nnet3_log_parse_lib.py | jxzhanggg/kaldi-trunk | 03fbab26e5714a990e1b6dee9475d4a282a42ccc | [
"Apache-2.0"
] | 7 | 2016-08-27T09:28:24.000Z | 2019-12-16T10:30:12.000Z | # Copyright 2016 Vijayaditya Peddinti.
# Apache 2.0.
from __future__ import division
import sys, glob, re, math, datetime, argparse
import imp
ntl = imp.load_source('ntl', 'steps/nnet3/nnet3_train_lib.py')
#exp/nnet3/lstm_self_repair_ld5_sp/log/progress.9.log:component name=Lstm3_i type=SigmoidComponent, dim=1280, self-repair-scale=1e-05, count=1.96e+05, value-avg=[percentiles(0,1,2,5 10,20,50,80,90 95,98,99,100)=(0.05,0.09,0.11,0.15 0.19,0.27,0.50,0.72,0.83 0.88,0.92,0.94,0.99), mean=0.502, stddev=0.23], deriv-avg=[percentiles(0,1,2,5 10,20,50,80,90 95,98,99,100)=(0.009,0.04,0.05,0.06 0.08,0.10,0.14,0.17,0.18 0.19,0.20,0.20,0.21), mean=0.134, stddev=0.0397]
def ParseProgressLogsForNonlinearityStats(exp_dir):
progress_log_files = "%s/log/progress.*.log" % (exp_dir)
stats_per_component_per_iter = {}
progress_log_lines = ntl.RunKaldiCommand('grep -e "value-avg.*deriv-avg" {0}'.format(progress_log_files))[0]
parse_regex = re.compile(".*progress.([0-9]+).log:component name=(.+) type=(.*)Component,.*value-avg=\[.*mean=([0-9\.\-e]+), stddev=([0-9\.e\-]+)\].*deriv-avg=\[.*mean=([0-9\.\-e]+), stddev=([0-9\.e\-]+)\]")
for line in progress_log_lines.split("\n") :
mat_obj = parse_regex.search(line)
if mat_obj is None:
continue
groups = mat_obj.groups()
# groups = ('9', 'Lstm3_i', 'Sigmoid', '0.502', '0.23', '0.134', '0.0397')
iteration = int(groups[0])
component_name = groups[1]
component_type = groups[2]
value_mean = float(groups[3])
value_stddev = float(groups[4])
deriv_mean = float(groups[5])
deriv_stddev = float(groups[6])
try:
stats_per_component_per_iter[component_name]['stats'][iteration] = [value_mean, value_stddev, deriv_mean, deriv_stddev]
except KeyError:
stats_per_component_per_iter[component_name] = {}
stats_per_component_per_iter[component_name]['type'] = component_type
stats_per_component_per_iter[component_name]['stats'] = {}
stats_per_component_per_iter[component_name]['stats'][iteration] = [value_mean, value_stddev, deriv_mean, deriv_stddev]
return stats_per_component_per_iter
def ParseDifferenceString(string):
dict = {}
for parts in string.split():
sub_parts = parts.split(":")
dict[sub_parts[0]] = float(sub_parts[1])
return dict
#exp/chain/cwrnn_trial2_ld5_sp/log/progress.245.log:LOG (nnet3-show-progress:main():nnet3-show-progress.cc:144) Relative parameter differences per layer are [ Cwrnn1_T3_W_r:0.0171537 Cwrnn1_T3_W_x:1.33338e-07 Cwrnn1_T2_W_r:0.048075 Cwrnn1_T2_W_x:1.34088e-07 Cwrnn1_T1_W_r:0.0157277 Cwrnn1_T1_W_x:0.0212704 Final_affine:0.0321521 Cwrnn2_T3_W_r:0.0212082 Cwrnn2_T3_W_x:1.33691e-07 Cwrnn2_T2_W_r:0.0212978 Cwrnn2_T2_W_x:1.33401e-07 Cwrnn2_T1_W_r:0.014976 Cwrnn2_T1_W_x:0.0233588 Cwrnn3_T3_W_r:0.0237165 Cwrnn3_T3_W_x:1.33184e-07 Cwrnn3_T2_W_r:0.0239754 Cwrnn3_T2_W_x:1.3296e-07 Cwrnn3_T1_W_r:0.0194809 Cwrnn3_T1_W_x:0.0271934 ]
def ParseProgressLogsForParamDiff(exp_dir, pattern):
if pattern not in set(["Relative parameter differences", "Parameter differences"]):
raise Exception("Unknown value for pattern : {0}".format(pattern))
progress_log_files = "%s/log/progress.*.log" % (exp_dir)
progress_per_iter = {}
component_names = set([])
progress_log_lines = ntl.RunKaldiCommand('grep -e "{0}" {1}'.format(pattern, progress_log_files))[0]
parse_regex = re.compile(".*progress\.([0-9]+)\.log:LOG.*{0}.*\[(.*)\]".format(pattern))
for line in progress_log_lines.split("\n") :
mat_obj = parse_regex.search(line)
if mat_obj is None:
continue
groups = mat_obj.groups()
iteration = groups[0]
differences = ParseDifferenceString(groups[1])
component_names = component_names.union(differences.keys())
progress_per_iter[int(iteration)] = differences
component_names = list(component_names)
component_names.sort()
# rearranging the data into an array
data = []
data.append(["iteration"]+component_names)
max_iter = max(progress_per_iter.keys())
for iter in range(max_iter + 1):
try:
component_dict = progress_per_iter[iter]
except KeyError:
continue
iter_values = []
for component_name in component_names:
try:
iter_values.append(component_dict[component_name])
except KeyError:
# the component was not found this iteration, may be because of layerwise discriminative training
iter_values.append(0)
data.append([iter] + iter_values)
return data
def ParseTrainLogs(exp_dir):
train_log_files = "%s/log/train.*.log" % (exp_dir)
train_log_lines = ntl.RunKaldiCommand('grep -e Accounting {0}'.format(train_log_files))[0]
parse_regex = re.compile(".*train\.([0-9]+)\.([0-9]+)\.log:# Accounting: time=([0-9]+) thread.*")
train_times = {}
for line in train_log_lines.split('\n'):
mat_obj = parse_regex.search(line)
if mat_obj is not None:
groups = mat_obj.groups()
try:
train_times[int(groups[0])][int(groups[1])] = float(groups[2])
except KeyError:
train_times[int(groups[0])] = {}
train_times[int(groups[0])][int(groups[1])] = float(groups[2])
iters = train_times.keys()
for iter in iters:
values = train_times[iter].values()
train_times[iter] = max(values)
return train_times
def ParseProbLogs(exp_dir, key = 'accuracy'):
train_prob_files = "%s/log/compute_prob_train.*.log" % (exp_dir)
valid_prob_files = "%s/log/compute_prob_valid.*.log" % (exp_dir)
train_prob_strings = ntl.RunKaldiCommand('grep -e {0} {1}'.format(key, train_prob_files), wait = True)[0]
valid_prob_strings = ntl.RunKaldiCommand('grep -e {0} {1}'.format(key, valid_prob_files))[0]
#LOG (nnet3-chain-compute-prob:PrintTotalStats():nnet-chain-diagnostics.cc:149) Overall log-probability for 'output' is -0.399395 + -0.013437 = -0.412832 per frame, over 20000 fra
#LOG (nnet3-chain-compute-prob:PrintTotalStats():nnet-chain-diagnostics.cc:144) Overall log-probability for 'output' is -0.307255 per frame, over 20000 frames.
parse_regex = re.compile(".*compute_prob_.*\.([0-9]+).log:LOG .nnet3.*compute-prob:PrintTotalStats..:nnet.*diagnostics.cc:[0-9]+. Overall ([a-zA-Z\-]+) for 'output'.*is ([0-9.\-e]+) .*per frame")
train_loss={}
valid_loss={}
for line in train_prob_strings.split('\n'):
mat_obj = parse_regex.search(line)
if mat_obj is not None:
groups = mat_obj.groups()
if groups[1] == key:
train_loss[int(groups[0])] = groups[2]
for line in valid_prob_strings.split('\n'):
mat_obj = parse_regex.search(line)
if mat_obj is not None:
groups = mat_obj.groups()
if groups[1] == key:
valid_loss[int(groups[0])] = groups[2]
iters = list(set(valid_loss.keys()).intersection(train_loss.keys()))
iters.sort()
return map(lambda x: (int(x), float(train_loss[x]), float(valid_loss[x])), iters)
def GenerateAccuracyReport(exp_dir, key = "accuracy"):
times = ParseTrainLogs(exp_dir)
data = ParseProbLogs(exp_dir, key)
report = []
report.append("%Iter\tduration\ttrain_loss\tvalid_loss\tdifference")
for x in data:
try:
report.append("%d\t%s\t%g\t%g\t%g" % (x[0], str(times[x[0]]), x[1], x[2], x[2]-x[1]))
except KeyError:
continue
total_time = 0
for iter in times.keys():
total_time += times[iter]
report.append("Total training time is {0}\n".format(str(datetime.timedelta(seconds = total_time))))
return ["\n".join(report), times, data]
| 50.160256 | 624 | 0.662364 |
from __future__ import division
import sys, glob, re, math, datetime, argparse
import imp
ntl = imp.load_source('ntl', 'steps/nnet3/nnet3_train_lib.py')
def ParseProgressLogsForNonlinearityStats(exp_dir):
progress_log_files = "%s/log/progress.*.log" % (exp_dir)
stats_per_component_per_iter = {}
progress_log_lines = ntl.RunKaldiCommand('grep -e "value-avg.*deriv-avg" {0}'.format(progress_log_files))[0]
parse_regex = re.compile(".*progress.([0-9]+).log:component name=(.+) type=(.*)Component,.*value-avg=\[.*mean=([0-9\.\-e]+), stddev=([0-9\.e\-]+)\].*deriv-avg=\[.*mean=([0-9\.\-e]+), stddev=([0-9\.e\-]+)\]")
for line in progress_log_lines.split("\n") :
mat_obj = parse_regex.search(line)
if mat_obj is None:
continue
groups = mat_obj.groups()
iteration = int(groups[0])
component_name = groups[1]
component_type = groups[2]
value_mean = float(groups[3])
value_stddev = float(groups[4])
deriv_mean = float(groups[5])
deriv_stddev = float(groups[6])
try:
stats_per_component_per_iter[component_name]['stats'][iteration] = [value_mean, value_stddev, deriv_mean, deriv_stddev]
except KeyError:
stats_per_component_per_iter[component_name] = {}
stats_per_component_per_iter[component_name]['type'] = component_type
stats_per_component_per_iter[component_name]['stats'] = {}
stats_per_component_per_iter[component_name]['stats'][iteration] = [value_mean, value_stddev, deriv_mean, deriv_stddev]
return stats_per_component_per_iter
def ParseDifferenceString(string):
dict = {}
for parts in string.split():
sub_parts = parts.split(":")
dict[sub_parts[0]] = float(sub_parts[1])
return dict
def ParseProgressLogsForParamDiff(exp_dir, pattern):
if pattern not in set(["Relative parameter differences", "Parameter differences"]):
raise Exception("Unknown value for pattern : {0}".format(pattern))
progress_log_files = "%s/log/progress.*.log" % (exp_dir)
progress_per_iter = {}
component_names = set([])
progress_log_lines = ntl.RunKaldiCommand('grep -e "{0}" {1}'.format(pattern, progress_log_files))[0]
parse_regex = re.compile(".*progress\.([0-9]+)\.log:LOG.*{0}.*\[(.*)\]".format(pattern))
for line in progress_log_lines.split("\n") :
mat_obj = parse_regex.search(line)
if mat_obj is None:
continue
groups = mat_obj.groups()
iteration = groups[0]
differences = ParseDifferenceString(groups[1])
component_names = component_names.union(differences.keys())
progress_per_iter[int(iteration)] = differences
component_names = list(component_names)
component_names.sort()
data = []
data.append(["iteration"]+component_names)
max_iter = max(progress_per_iter.keys())
for iter in range(max_iter + 1):
try:
component_dict = progress_per_iter[iter]
except KeyError:
continue
iter_values = []
for component_name in component_names:
try:
iter_values.append(component_dict[component_name])
except KeyError:
iter_values.append(0)
data.append([iter] + iter_values)
return data
def ParseTrainLogs(exp_dir):
train_log_files = "%s/log/train.*.log" % (exp_dir)
train_log_lines = ntl.RunKaldiCommand('grep -e Accounting {0}'.format(train_log_files))[0]
parse_regex = re.compile(".*train\.([0-9]+)\.([0-9]+)\.log:# Accounting: time=([0-9]+) thread.*")
train_times = {}
for line in train_log_lines.split('\n'):
mat_obj = parse_regex.search(line)
if mat_obj is not None:
groups = mat_obj.groups()
try:
train_times[int(groups[0])][int(groups[1])] = float(groups[2])
except KeyError:
train_times[int(groups[0])] = {}
train_times[int(groups[0])][int(groups[1])] = float(groups[2])
iters = train_times.keys()
for iter in iters:
values = train_times[iter].values()
train_times[iter] = max(values)
return train_times
def ParseProbLogs(exp_dir, key = 'accuracy'):
train_prob_files = "%s/log/compute_prob_train.*.log" % (exp_dir)
valid_prob_files = "%s/log/compute_prob_valid.*.log" % (exp_dir)
train_prob_strings = ntl.RunKaldiCommand('grep -e {0} {1}'.format(key, train_prob_files), wait = True)[0]
valid_prob_strings = ntl.RunKaldiCommand('grep -e {0} {1}'.format(key, valid_prob_files))[0]
parse_regex = re.compile(".*compute_prob_.*\.([0-9]+).log:LOG .nnet3.*compute-prob:PrintTotalStats..:nnet.*diagnostics.cc:[0-9]+. Overall ([a-zA-Z\-]+) for 'output'.*is ([0-9.\-e]+) .*per frame")
train_loss={}
valid_loss={}
for line in train_prob_strings.split('\n'):
mat_obj = parse_regex.search(line)
if mat_obj is not None:
groups = mat_obj.groups()
if groups[1] == key:
train_loss[int(groups[0])] = groups[2]
for line in valid_prob_strings.split('\n'):
mat_obj = parse_regex.search(line)
if mat_obj is not None:
groups = mat_obj.groups()
if groups[1] == key:
valid_loss[int(groups[0])] = groups[2]
iters = list(set(valid_loss.keys()).intersection(train_loss.keys()))
iters.sort()
return map(lambda x: (int(x), float(train_loss[x]), float(valid_loss[x])), iters)
def GenerateAccuracyReport(exp_dir, key = "accuracy"):
times = ParseTrainLogs(exp_dir)
data = ParseProbLogs(exp_dir, key)
report = []
report.append("%Iter\tduration\ttrain_loss\tvalid_loss\tdifference")
for x in data:
try:
report.append("%d\t%s\t%g\t%g\t%g" % (x[0], str(times[x[0]]), x[1], x[2], x[2]-x[1]))
except KeyError:
continue
total_time = 0
for iter in times.keys():
total_time += times[iter]
report.append("Total training time is {0}\n".format(str(datetime.timedelta(seconds = total_time))))
return ["\n".join(report), times, data]
| true | true |
1c2f3c6ca70178903816b9e32cbce3f9d96baa46 | 1,893 | py | Python | test/test_memory_unit_all_of.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | test/test_memory_unit_all_of.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | test/test_memory_unit_all_of.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.memory_unit_all_of import MemoryUnitAllOf # noqa: E501
from intersight.rest import ApiException
class TestMemoryUnitAllOf(unittest.TestCase):
"""MemoryUnitAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMemoryUnitAllOf(self):
"""Test MemoryUnitAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.memory_unit_all_of.MemoryUnitAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 49.815789 | 1,052 | 0.777602 |
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.memory_unit_all_of import MemoryUnitAllOf
from intersight.rest import ApiException
class TestMemoryUnitAllOf(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testMemoryUnitAllOf(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c2f3ca8fc71d788090cc8b3b9b4b65ce270ba1e | 497 | py | Python | datafactory/util/metrics.py | sdsc-bw/pre_processing | df4eea6d9191ecfe0697e00cf5c5990c9a348e58 | [
"MIT"
] | 1 | 2022-01-24T13:45:05.000Z | 2022-01-24T13:45:05.000Z | datafactory/util/metrics.py | sdsc-bw/pre_processing | df4eea6d9191ecfe0697e00cf5c5990c9a348e58 | [
"MIT"
] | 24 | 2021-07-13T09:36:25.000Z | 2022-03-02T15:01:50.000Z | datafactory/util/metrics.py | sdsc-bw/pre_processing | df4eea6d9191ecfe0697e00cf5c5990c9a348e58 | [
"MIT"
] | null | null | null | from sklearn.metrics import f1_score
from .constants import logger
def relative_absolute_error(pred, y):
dis = abs((pred-y)).sum()
dis2 = abs((y.mean() - y)).sum()
if dis2 == 0 :
return 1
return dis/dis2
def get_score(y_pred, y_test, mtype='C'):
if mtype == 'C':
score = f1_score(y_test, y_pred, average='weighted')
elif mtype == 'R':
score = 1 - relative_absolute_error(y_pred, y_test)
else:
logger.error('Unknown type of model') | 27.611111 | 60 | 0.617706 | from sklearn.metrics import f1_score
from .constants import logger
def relative_absolute_error(pred, y):
dis = abs((pred-y)).sum()
dis2 = abs((y.mean() - y)).sum()
if dis2 == 0 :
return 1
return dis/dis2
def get_score(y_pred, y_test, mtype='C'):
if mtype == 'C':
score = f1_score(y_test, y_pred, average='weighted')
elif mtype == 'R':
score = 1 - relative_absolute_error(y_pred, y_test)
else:
logger.error('Unknown type of model') | true | true |
1c2f3ceb96d4495575d36dcdf147a06a93bc9071 | 9,177 | py | Python | kaleidoscope/transformations.py | andrewasheridan/kaleidoscope | a84ffbec9dda98f438b0e94f1350d6c810031c94 | [
"MIT"
] | 1 | 2020-12-22T16:53:38.000Z | 2020-12-22T16:53:38.000Z | kaleidoscope/transformations.py | andrewasheridan/super-duper-chainsaw | a84ffbec9dda98f438b0e94f1350d6c810031c94 | [
"MIT"
] | null | null | null | kaleidoscope/transformations.py | andrewasheridan/super-duper-chainsaw | a84ffbec9dda98f438b0e94f1350d6c810031c94 | [
"MIT"
] | 1 | 2020-12-22T16:54:00.000Z | 2020-12-22T16:54:00.000Z | # _ __ _ _
# | | / _| | | (_)
# | |_ _ __ __ _ _ __ ___| |_ ___ _ __ _ __ ___ __ _| |_ _ ___ _ __ ___
# | __| '__/ _` | '_ \/ __| _/ _ \| '__| '_ ` _ \ / _` | __| |/ _ \| '_ \/ __|
# | |_| | | (_| | | | \__ \ || (_) | | | | | | | | (_| | |_| | (_) | | | \__ \
# \__|_| \__,_|_| |_|___/_| \___/|_| |_| |_| |_|\__,_|\__|_|\___/|_| |_|___/
#
"""
transformations.py
Various transformations used in the ImageAugmenter Class
"""
import cv2
import numpy as np
import random
def random_square_crop_with_resize(
image, new_side_len=400, interpolation=cv2.INTER_LANCZOS4
):
"""
Takes in a rectangular image, reshapes it to be a square based on the shortest side,
re-sizes the square have size `new_side_len`
:param image: (np.ndarray) - 2d, 3 channel image
:param new_side_len: side length of the output square image
:param interpolation: type of resizing interpolation
:return: re-sized square image
"""
height, width, _ = image.shape
# TODO: Catch these `zero-images` before passing into this func
# Sometimes an image is passed in that has 0 height or width
# I don't know why exactly, but they don't help, so they gotta go
if 0 in [height, width]:
return None
# FIXME: Code is repeated in both if conditions - abstract this
if height < width:
# Shrink proportionally so the shorter side is of size `new_side_len`
new_height = new_side_len
new_width = int(width * new_height / height)
image_resized = cv2.resize(
image, (new_width, new_height), interpolation=interpolation
)
# some images are already square, others need to be made square
if image_resized.shape[0] == image_resized.shape[1]:
image_square = image_resized
else:
# a square fits inside a rectangle - figure out the extra space in the image
extra_width = new_width - new_side_len
margin = extra_width // 2
rand_adjustment = random.randint(0, margin) * random.choice([1, -1])
# take into account the side length not being an even number
shift = 1 if margin % 2 == 1 else 0
# crop the rectangle down to a square
image_square = image_resized[
:, margin - rand_adjustment : -(margin + shift + rand_adjustment), :
]
try:
print(image.shape)
image_square = cv2.resize(
image_square, (new_side_len, new_side_len), interpolation=interpolation
)
except Exception as e:
print(e)
# FIXME: Code is repeated in both if conditions - abstract this
elif width < height:
# Shrink proportionally so the shorter side is of size `new_side_len`
new_width = new_side_len
new_height = int(height * new_width / width)
image_resized = cv2.resize(
image, (new_width, new_height), interpolation=interpolation
)
# some images are already square, others need to be made square
if image_resized.shape[0] == image_resized.shape[1]:
image_square = image_resized
else:
# a square fits inside a rectangle - figure out the extra space in the image
extra_height = new_height - new_side_len
margin = extra_height // 2
rand_adjustment = random.randint(0, margin) * random.choice([1, -1])
# take into account the side length not being an even number
shift = 1 if margin % 2 == 1 else 0
# crop the rectangle down to a square
image_square = image_resized[
margin - rand_adjustment : -(margin + shift + rand_adjustment), :, :
]
try:
print(image.shape)
image_square = cv2.resize(
image_square, (new_side_len, new_side_len), interpolation=interpolation
)
except Exception as e:
print(e)
else:
# the image is already a square, just resize it
image_square = cv2.resize(
image, (new_side_len, new_side_len), interpolation=interpolation
)
return image_square
def rotate_and_zoom(image):
"""
Rotates an image a random amount and zooms in so no blank areas are shown
:param image: Incoming image (already square)
:return: randomly rotated image
"""
side_len = image.shape[0]
angle = random.randint(1, 359)
if image.shape[0] > 0 and image.shape[1] > 0:
rotation_matrix = cv2.getRotationMatrix2D((side_len // 2, side_len // 2), angle, 1)
image = cv2.warpAffine(image, rotation_matrix, (side_len, side_len))
# Given a square image and a fixed `frame` size, when you rotate the image there are areas that will have zeros.
# To hide the blank areas we zoom the image in, but how much?
# This formula was found empirically. I assume there is some nice geometry that can provide a better answer.
# (side length / 8) * sin func that maxes at 45 deg * 1.41 (~ square_root(2))
x = abs(int(side_len // 8 * np.sin(np.deg2rad(45 + angle // 45 + angle % 45)) * 1.41))
image = image[x:-x, x:-x, :]
# the image is now smaller than it should be, because we have cut out the zeroes area
# resize back to the original size
image = cv2.resize(image, (side_len, side_len), interpolation=cv2.INTER_LANCZOS4)
return image
def adjust_contrast(image):
"""
Randomly adjust the contrast of an image (adjusts the alpha channel)
:param image: incoming image, should be square
:return: adjusted image
"""
# 0.5 <= alpha <= 2.0
# These values found empirically
alpha = 0.5 + 1.5 * random.random()
image = cv2.convertScaleAbs(image, alpha=alpha, beta=0)
return image
def adjust_brightness(image):
"""
Randomly adjust the brightness of an image (adjusts the beta channel)
:param image: incoming image, should be square
:return: adjusted image
"""
# 0 <= beta < 100
beta = random.random() * 100
image = cv2.convertScaleAbs(image, alpha=1, beta=beta)
return image
def adjust_saturation(image):
"""
Randomly adjust the saturation of an image
:param image: incoming image, should be square
:return: adjusted image
"""
# 0 <= saturation_adjustment < 3
saturation_adjustment = random.random() * 3
# break image into hue, saturation, and vibrance
img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype("float32")
hue, saturation, vibrance = cv2.split(img_hsv)
# apply saturation adjustment to image
saturation = saturation * saturation_adjustment
saturation = np.clip(saturation, 0, 255)
img_hsv = cv2.merge([hue, saturation, vibrance])
# convert back to regular image format
image = cv2.cvtColor(img_hsv.astype("uint8"), cv2.COLOR_HSV2BGR)
return image
def flip_left_right(img):
"""
Flip the image along the vertical central axis
:param img: incoming image, should be square
:return: adjusted image
"""
img = np.fliplr(img)
return img
def noisy(image):
"""
Apply a randomly type of noise to the image
:param image: incoming image, should be square
:return: adjusted image
"""
# XXX: This function looks like a mess
# FIXME: Just make this better overall
# NOTE: Why is it all one function and not a few?
noise_type = random.choice(["gauss", "s&p", "speckle"])
if noise_type == "gauss":
# Adds gaussian noise to an image
height, width, channels = image.shape
# These values found empirically
mean = 0
var = 2
sigma = var ** 0.5
# generate gaussian noise
gauss = np.random.normal(mean, sigma, image.shape)
gauss = gauss.reshape(height, width, channels)
# apply noise to image
noisy_image = image + gauss
noisy_image = noisy_image.astype("uint8")
return noisy_image
elif noise_type == "s&p":
# Adds `salt and pepper` noise to an image
prob = 0.01
# def add_salt_and_pepper(gb, prob):
# random number used for selecting pixels to alter
rnd = np.random.rand(image.shape[0], image.shape[1], image.shape[2])
# not exactly clear what is happening here
noisy_image = image.copy()
noisy_image[rnd < prob] = 0
noisy_image[rnd > 1 - prob] = 255
return noisy_image
elif noise_type == "speckle":
# Adds `speckle` noise to an image
# How is this different from regular gaussian noise? I honestly don't recall. It does look different though...
height, width, channels = image.shape
gauss = np.random.randn(height, width, channels) / 255
gauss = gauss.reshape(height, width, channels)
noisy_image = image + image * gauss
noisy_image = noisy_image.astype("uint8")
return noisy_image
| 33.370909 | 120 | 0.610875 |
# | |_| | | (_| | | | \__ \ || (_) | | | | | | | | (_| | |_| | (_) | | | \__ \
# \__|_| \__,_|_| |_|___/_| \___/|_| |_| |_| |_|\__,_|\__|_|\___/|_| |_|___/
#
import cv2
import numpy as np
import random
def random_square_crop_with_resize(
image, new_side_len=400, interpolation=cv2.INTER_LANCZOS4
):
height, width, _ = image.shape
# TODO: Catch these `zero-images` before passing into this func
# Sometimes an image is passed in that has 0 height or width
# I don't know why exactly, but they don't help, so they gotta go
if 0 in [height, width]:
return None
# FIXME: Code is repeated in both if conditions - abstract this
if height < width:
# Shrink proportionally so the shorter side is of size `new_side_len`
new_height = new_side_len
new_width = int(width * new_height / height)
image_resized = cv2.resize(
image, (new_width, new_height), interpolation=interpolation
)
# some images are already square, others need to be made square
if image_resized.shape[0] == image_resized.shape[1]:
image_square = image_resized
else:
# a square fits inside a rectangle - figure out the extra space in the image
extra_width = new_width - new_side_len
margin = extra_width // 2
rand_adjustment = random.randint(0, margin) * random.choice([1, -1])
# take into account the side length not being an even number
shift = 1 if margin % 2 == 1 else 0
# crop the rectangle down to a square
image_square = image_resized[
:, margin - rand_adjustment : -(margin + shift + rand_adjustment), :
]
try:
print(image.shape)
image_square = cv2.resize(
image_square, (new_side_len, new_side_len), interpolation=interpolation
)
except Exception as e:
print(e)
# FIXME: Code is repeated in both if conditions - abstract this
elif width < height:
# Shrink proportionally so the shorter side is of size `new_side_len`
new_width = new_side_len
new_height = int(height * new_width / width)
image_resized = cv2.resize(
image, (new_width, new_height), interpolation=interpolation
)
# some images are already square, others need to be made square
if image_resized.shape[0] == image_resized.shape[1]:
image_square = image_resized
else:
# a square fits inside a rectangle - figure out the extra space in the image
extra_height = new_height - new_side_len
margin = extra_height // 2
rand_adjustment = random.randint(0, margin) * random.choice([1, -1])
# take into account the side length not being an even number
shift = 1 if margin % 2 == 1 else 0
# crop the rectangle down to a square
image_square = image_resized[
margin - rand_adjustment : -(margin + shift + rand_adjustment), :, :
]
try:
print(image.shape)
image_square = cv2.resize(
image_square, (new_side_len, new_side_len), interpolation=interpolation
)
except Exception as e:
print(e)
else:
# the image is already a square, just resize it
image_square = cv2.resize(
image, (new_side_len, new_side_len), interpolation=interpolation
)
return image_square
def rotate_and_zoom(image):
side_len = image.shape[0]
angle = random.randint(1, 359)
if image.shape[0] > 0 and image.shape[1] > 0:
rotation_matrix = cv2.getRotationMatrix2D((side_len // 2, side_len // 2), angle, 1)
image = cv2.warpAffine(image, rotation_matrix, (side_len, side_len))
# Given a square image and a fixed `frame` size, when you rotate the image there are areas that will have zeros.
# To hide the blank areas we zoom the image in, but how much?
# This formula was found empirically. I assume there is some nice geometry that can provide a better answer.
# (side length / 8) * sin func that maxes at 45 deg * 1.41 (~ square_root(2))
x = abs(int(side_len // 8 * np.sin(np.deg2rad(45 + angle // 45 + angle % 45)) * 1.41))
image = image[x:-x, x:-x, :]
# the image is now smaller than it should be, because we have cut out the zeroes area
# resize back to the original size
image = cv2.resize(image, (side_len, side_len), interpolation=cv2.INTER_LANCZOS4)
return image
def adjust_contrast(image):
# 0.5 <= alpha <= 2.0
# These values found empirically
alpha = 0.5 + 1.5 * random.random()
image = cv2.convertScaleAbs(image, alpha=alpha, beta=0)
return image
def adjust_brightness(image):
# 0 <= beta < 100
beta = random.random() * 100
image = cv2.convertScaleAbs(image, alpha=1, beta=beta)
return image
def adjust_saturation(image):
# 0 <= saturation_adjustment < 3
saturation_adjustment = random.random() * 3
# break image into hue, saturation, and vibrance
img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype("float32")
hue, saturation, vibrance = cv2.split(img_hsv)
# apply saturation adjustment to image
saturation = saturation * saturation_adjustment
saturation = np.clip(saturation, 0, 255)
img_hsv = cv2.merge([hue, saturation, vibrance])
# convert back to regular image format
image = cv2.cvtColor(img_hsv.astype("uint8"), cv2.COLOR_HSV2BGR)
return image
def flip_left_right(img):
img = np.fliplr(img)
return img
def noisy(image):
# XXX: This function looks like a mess
# FIXME: Just make this better overall
# NOTE: Why is it all one function and not a few?
noise_type = random.choice(["gauss", "s&p", "speckle"])
if noise_type == "gauss":
# Adds gaussian noise to an image
height, width, channels = image.shape
# These values found empirically
mean = 0
var = 2
sigma = var ** 0.5
# generate gaussian noise
gauss = np.random.normal(mean, sigma, image.shape)
gauss = gauss.reshape(height, width, channels)
# apply noise to image
noisy_image = image + gauss
noisy_image = noisy_image.astype("uint8")
return noisy_image
elif noise_type == "s&p":
# Adds `salt and pepper` noise to an image
prob = 0.01
# def add_salt_and_pepper(gb, prob):
# random number used for selecting pixels to alter
rnd = np.random.rand(image.shape[0], image.shape[1], image.shape[2])
# not exactly clear what is happening here
noisy_image = image.copy()
noisy_image[rnd < prob] = 0
noisy_image[rnd > 1 - prob] = 255
return noisy_image
elif noise_type == "speckle":
# Adds `speckle` noise to an image
# How is this different from regular gaussian noise? I honestly don't recall. It does look different though...
height, width, channels = image.shape
gauss = np.random.randn(height, width, channels) / 255
gauss = gauss.reshape(height, width, channels)
noisy_image = image + image * gauss
noisy_image = noisy_image.astype("uint8")
return noisy_image
| true | true |
1c2f3d678fa05dc788e74d46380faf513e8b1297 | 5,714 | py | Python | src/04_visualization/licence_vis_synthesis.py | aakanksha023/EVAN | 981327e4e8c408144b409f1e39f207ad96376c2d | [
"MIT"
] | null | null | null | src/04_visualization/licence_vis_synthesis.py | aakanksha023/EVAN | 981327e4e8c408144b409f1e39f207ad96376c2d | [
"MIT"
] | null | null | null | src/04_visualization/licence_vis_synthesis.py | aakanksha023/EVAN | 981327e4e8c408144b409f1e39f207ad96376c2d | [
"MIT"
] | null | null | null | # author: Jasmine Qin
# date: 2020-06-09
"""
This script performs data wrangling and synthesizing needed for
visualization of the business licence file.
Usage: src/04_visualization/licence_vis_synthesis.py
"""
# load packages
import pandas as pd
import json
import re
from joblib import load
import warnings
warnings.filterwarnings("ignore")
def main():
# read data
licence_df = pd.read_csv(
"data/processed/03_cleaned_combined_licences.csv",
low_memory=False)
parking = pd.read_csv(
"data/raw/parking-meters.csv", sep=';')
disability_parking = pd.read_csv(
"data/raw/disability-parking.csv", sep=';')
# parking cleaning
parking = parking[['Geom', 'Geo Local Area']].rename(
columns={'Geo Local Area': 'LocalArea'})
disability_parking = disability_parking[[
'Geom', 'Geo Local Area']].rename(
columns={'Geo Local Area': 'LocalArea'})
# licence cleaning
# 1. remove null geom
licence_df = licence_df[licence_df['Geom'].notnull()]
# 2. remove unused columns
cols_not_used = ['business_id',
'LicenceRSN',
'LicenceNumber',
'LicenceRevisionNumber',
'Unit',
'UnitType',
'House',
'Street',
'Country',
'label']
licence_df = licence_df.drop(columns=cols_not_used)
# 3. remove null BusinessIndustry
licence_df = licence_df[licence_df.BusinessIndustry.notnull()]
# 4. FOLDERYEAR to int
licence_df['FOLDERYEAR'] = [int(i) for i in licence_df['FOLDERYEAR']]
licence_df = licence_df.sort_values('FOLDERYEAR')
# get coordinates
for df in [parking, disability_parking, licence_df]:
df["coord-x"] = df['Geom'].apply(
lambda p: json.loads(p)['coordinates'][0])
df["coord-y"] = df['Geom'].apply(
lambda p: json.loads(p)['coordinates'][1])
#################
# Aggregated df #
#################
df = pd.read_csv(
"data/processed/combined_licences.csv",
low_memory=False)
# organize FOLDERYEAR
df = df.loc[df.FOLDERYEAR.notnull()]
df['FOLDERYEAR'] = [y + 2000 if y >= 0 and y <
90 else y + 1900 for y in df.FOLDERYEAR]
df['ExtractDate'] = pd.to_datetime(df['ExtractDate'], errors='ignore')
df['IssuedDate'] = pd.to_datetime(df['IssuedDate'], errors='ignore')
df['ExpiredDate'] = pd.to_datetime(df['ExpiredDate'], errors='ignore')
df.loc[(df['FOLDERYEAR']
< 1997.0) & (df['IssuedDate'].dt.year == 1996.0)
& (df['ExpiredDate'].dt.year == 1997.0), 'FOLDERYEAR'] = 1997.0
df = df[~(df.FOLDERYEAR < 1997.0)]
df['FOLDERYEAR'] = [int(i) for i in df['FOLDERYEAR']]
df = df.sort_values(by=['business_id', 'FOLDERYEAR', 'ExtractDate'])
df = df[df.groupby(['business_id'])['FOLDERYEAR'].apply(
lambda x: ~(x.duplicated(keep='last')))]
# only Issued licences
df = df.query('Status == "Issued"')
# Industry mapping
mapping = pd.read_csv("src/02_clean_wrangle/business_mapping_dictionary.csv")
df = df.merge(mapping, on=["BusinessType"], how="left")
# Remove 2010 Winter games : Outlier
df = df[df.BusinessIndustry != 'Historic']
df = df[df.BusinessIndustry != 'Real estate and rental and leasing']
df = df[df.LocalArea.notnull()]
df = df[df.BusinessIndustry.notnull()]
agg_viz = pd.DataFrame(df.groupby([
'FOLDERYEAR', 'LocalArea',
'BusinessIndustry', 'BusinessType'])[
'business_id'].count()).reset_index()
agg_viz = agg_viz[~(agg_viz.BusinessType.str.contains(
r'\*Historic\*'))]
#############
# Modelling #
#############
train = pd.read_csv("data/processed/05_feat_eng_train.csv")
valid = pd.read_csv("data/processed/05_feat_eng_validate.csv")
model = load('results/final_model.joblib')
admin_cols = ["business_id", "BusinessName",
"BusinessTradeName", "Status",
"BusinessSubType", "label",
"BusinessIndustry",
"NextYearStatus", "Geom"]
X_train = train.drop(columns=admin_cols)
X_valid = valid.drop(columns=admin_cols)
train["predict"] = model.predict(X_train)
train['predict_proba'] = [
max(i) for i in model.predict_proba(X_train)]
valid["predict"] = model.predict(X_valid)
valid['predict_proba'] = [
max(i) for i in model.predict_proba(X_valid)]
train['type'] = ['train']*len(train)
valid['type'] = ['valid']*len(valid)
vis_model = pd.concat([train, valid])
vis_model['predicted_right'] = list(vis_model.label == vis_model.predict)
vis_model['predicted_right'] = [1 if i else -
1 for i in vis_model['predicted_right']]
vis_model['predict_proba'] = vis_model['predict_proba'] * \
vis_model['predicted_right']
# prepare shapely geom
vis_model = vis_model[vis_model.Geom.notnull()]
vis_model['coord-x'] = vis_model['Geom'].apply(
lambda p: json.loads(p)['coordinates'][0])
vis_model['coord-y'] = vis_model['Geom'].apply(
lambda p: json.loads(p)['coordinates'][1])
# save to files
vis_model.to_csv("data/processed/vis_model.csv", index=False)
licence_df.to_csv("data/processed/vis_licence.csv", index=False)
agg_viz.to_csv("data/processed/vis_agg_licence.csv", index=False)
parking.to_csv("data/processed/vis_parking.csv", index=False)
# disability_parking.to_csv(
# "data/processed/vis_disability_parking.csv", index=False)
if __name__ == "__main__":
main()
| 33.22093 | 81 | 0.611481 |
import pandas as pd
import json
import re
from joblib import load
import warnings
warnings.filterwarnings("ignore")
def main():
licence_df = pd.read_csv(
"data/processed/03_cleaned_combined_licences.csv",
low_memory=False)
parking = pd.read_csv(
"data/raw/parking-meters.csv", sep=';')
disability_parking = pd.read_csv(
"data/raw/disability-parking.csv", sep=';')
parking = parking[['Geom', 'Geo Local Area']].rename(
columns={'Geo Local Area': 'LocalArea'})
disability_parking = disability_parking[[
'Geom', 'Geo Local Area']].rename(
columns={'Geo Local Area': 'LocalArea'})
licence_df = licence_df[licence_df['Geom'].notnull()]
cols_not_used = ['business_id',
'LicenceRSN',
'LicenceNumber',
'LicenceRevisionNumber',
'Unit',
'UnitType',
'House',
'Street',
'Country',
'label']
licence_df = licence_df.drop(columns=cols_not_used)
licence_df = licence_df[licence_df.BusinessIndustry.notnull()]
licence_df['FOLDERYEAR'] = [int(i) for i in licence_df['FOLDERYEAR']]
licence_df = licence_df.sort_values('FOLDERYEAR')
for df in [parking, disability_parking, licence_df]:
df["coord-x"] = df['Geom'].apply(
lambda p: json.loads(p)['coordinates'][0])
df["coord-y"] = df['Geom'].apply(
lambda p: json.loads(p)['coordinates'][1])
df['ExtractDate'] = pd.to_datetime(df['ExtractDate'], errors='ignore')
df['IssuedDate'] = pd.to_datetime(df['IssuedDate'], errors='ignore')
df['ExpiredDate'] = pd.to_datetime(df['ExpiredDate'], errors='ignore')
df.loc[(df['FOLDERYEAR']
< 1997.0) & (df['IssuedDate'].dt.year == 1996.0)
& (df['ExpiredDate'].dt.year == 1997.0), 'FOLDERYEAR'] = 1997.0
df = df[~(df.FOLDERYEAR < 1997.0)]
df['FOLDERYEAR'] = [int(i) for i in df['FOLDERYEAR']]
df = df.sort_values(by=['business_id', 'FOLDERYEAR', 'ExtractDate'])
df = df[df.groupby(['business_id'])['FOLDERYEAR'].apply(
lambda x: ~(x.duplicated(keep='last')))]
df = df.query('Status == "Issued"')
mapping = pd.read_csv("src/02_clean_wrangle/business_mapping_dictionary.csv")
df = df.merge(mapping, on=["BusinessType"], how="left")
df = df[df.BusinessIndustry != 'Historic']
df = df[df.BusinessIndustry != 'Real estate and rental and leasing']
df = df[df.LocalArea.notnull()]
df = df[df.BusinessIndustry.notnull()]
agg_viz = pd.DataFrame(df.groupby([
'FOLDERYEAR', 'LocalArea',
'BusinessIndustry', 'BusinessType'])[
'business_id'].count()).reset_index()
agg_viz = agg_viz[~(agg_viz.BusinessType.str.contains(
r'\*Historic\*'))]
d('results/final_model.joblib')
admin_cols = ["business_id", "BusinessName",
"BusinessTradeName", "Status",
"BusinessSubType", "label",
"BusinessIndustry",
"NextYearStatus", "Geom"]
X_train = train.drop(columns=admin_cols)
X_valid = valid.drop(columns=admin_cols)
train["predict"] = model.predict(X_train)
train['predict_proba'] = [
max(i) for i in model.predict_proba(X_train)]
valid["predict"] = model.predict(X_valid)
valid['predict_proba'] = [
max(i) for i in model.predict_proba(X_valid)]
train['type'] = ['train']*len(train)
valid['type'] = ['valid']*len(valid)
vis_model = pd.concat([train, valid])
vis_model['predicted_right'] = list(vis_model.label == vis_model.predict)
vis_model['predicted_right'] = [1 if i else -
1 for i in vis_model['predicted_right']]
vis_model['predict_proba'] = vis_model['predict_proba'] * \
vis_model['predicted_right']
vis_model = vis_model[vis_model.Geom.notnull()]
vis_model['coord-x'] = vis_model['Geom'].apply(
lambda p: json.loads(p)['coordinates'][0])
vis_model['coord-y'] = vis_model['Geom'].apply(
lambda p: json.loads(p)['coordinates'][1])
vis_model.to_csv("data/processed/vis_model.csv", index=False)
licence_df.to_csv("data/processed/vis_licence.csv", index=False)
agg_viz.to_csv("data/processed/vis_agg_licence.csv", index=False)
parking.to_csv("data/processed/vis_parking.csv", index=False)
if __name__ == "__main__":
main()
| true | true |
1c2f3d6980edffcca618a76d1dba702fda36f9cb | 6,030 | py | Python | pyamg/krylov/_bicgstab.py | isuruf/pyamg | 9f831a4c49762983bd7d3d9adeaa3738ba79ff4e | [
"MIT"
] | null | null | null | pyamg/krylov/_bicgstab.py | isuruf/pyamg | 9f831a4c49762983bd7d3d9adeaa3738ba79ff4e | [
"MIT"
] | 1 | 2019-12-06T17:06:29.000Z | 2019-12-06T17:06:29.000Z | pyamg/krylov/_bicgstab.py | lexeyV/pyamg | cabbb008fa26d4c9d8c24decf06374185864c88b | [
"MIT"
] | null | null | null | import numpy as np
from scipy.sparse.linalg.isolve.utils import make_system
from pyamg.util.linalg import norm
__all__ = ['bicgstab']
def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Biconjugate Gradient Algorithm with Stabilization.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by ||r_0||_2
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A A.H x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals has the residual norm history,
including the initial residual, appended to it
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of bicgstab
== ======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== ======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
Examples
--------
>>> from pyamg.krylov.bicgstab import bicgstab
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = bicgstab(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
4.68163045309
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 231-234, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
# Convert inputs to linear system, with error checking
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._bicgstab')
# Check iteration numbers
if maxiter is None:
maxiter = len(x) + 5
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# Prep for method
r = b - A*x
normr = norm(r)
if residuals is not None:
residuals[:] = [normr]
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_2
if normr != 0.0:
tol = tol*normr
# Is this a one dimensional matrix?
if A.shape[0] == 1:
entry = np.ravel(A*np.array([1.0], dtype=xtype))
return (postprocess(b/entry), 0)
rstar = r.copy()
p = r.copy()
rrstarOld = np.inner(rstar.conjugate(), r)
iter = 0
# Begin BiCGStab
while True:
Mp = M*p
AMp = A*Mp
# alpha = (r_j, rstar) / (A*M*p_j, rstar)
alpha = rrstarOld/np.inner(rstar.conjugate(), AMp)
# s_j = r_j - alpha*A*M*p_j
s = r - alpha*AMp
Ms = M*s
AMs = A*Ms
# omega = (A*M*s_j, s_j)/(A*M*s_j, A*M*s_j)
omega = np.inner(AMs.conjugate(), s)/np.inner(AMs.conjugate(), AMs)
# x_{j+1} = x_j + alpha*M*p_j + omega*M*s_j
x = x + alpha*Mp + omega*Ms
# r_{j+1} = s_j - omega*A*M*s
r = s - omega*AMs
# beta_j = (r_{j+1}, rstar)/(r_j, rstar) * (alpha/omega)
rrstarNew = np.inner(rstar.conjugate(), r)
beta = (rrstarNew / rrstarOld) * (alpha / omega)
rrstarOld = rrstarNew
# p_{j+1} = r_{j+1} + beta*(p_j - omega*A*M*p)
p = r + beta*(p - omega*AMp)
iter += 1
normr = norm(r)
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
if iter == maxiter:
return (postprocess(x), iter)
# if __name__ == '__main__':
# # from numpy import diag
# # A = random((4,4))
# # A = A*A.transpose() + diag([10,10,10,10])
# # b = random((4,1))
# # x0 = random((4,1))
# # %timeit -n 15 (x,flag) = bicgstab(A,b,x0,tol=1e-8,maxiter=100)
# from pyamg.gallery import stencil_grid
# from numpy.random import random
# A = stencil_grid([[0,-1,0],[-1,4,-1],[0,-1,0]],(100,100),
# dtype=float,format='csr')
# b = random((A.shape[0],))
# x0 = random((A.shape[0],))
#
# import time
# from scipy.sparse.linalg.isolve import bicgstab as ibicgstab
#
# print '\n\nTesting BiCGStab with %d x %d 2D Laplace Matrix' % \
# (A.shape[0],A.shape[0])
# t1=time.time()
# (x,flag) = bicgstab(A,b,x0,tol=1e-8,maxiter=100)
# t2=time.time()
# print '%s took %0.3f ms' % ('bicgstab', (t2-t1)*1000.0)
# print 'norm = %g'%(norm(b - A*x))
# print 'info flag = %d'%(flag)
#
# t1=time.time()
# (y,flag) = ibicgstab(A,b,x0,tol=1e-8,maxiter=100)
# t2=time.time()
# print '\n%s took %0.3f ms' % ('linalg bicgstab', (t2-t1)*1000.0)
# print 'norm = %g'%(norm(b - A*y))
# print 'info flag = %d'%(flag)
| 30.301508 | 75 | 0.567496 | import numpy as np
from scipy.sparse.linalg.isolve.utils import make_system
from pyamg.util.linalg import norm
__all__ = ['bicgstab']
def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._bicgstab')
if maxiter is None:
maxiter = len(x) + 5
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
r = b - A*x
normr = norm(r)
if residuals is not None:
residuals[:] = [normr]
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
if normr != 0.0:
tol = tol*normr
if A.shape[0] == 1:
entry = np.ravel(A*np.array([1.0], dtype=xtype))
return (postprocess(b/entry), 0)
rstar = r.copy()
p = r.copy()
rrstarOld = np.inner(rstar.conjugate(), r)
iter = 0
while True:
Mp = M*p
AMp = A*Mp
alpha = rrstarOld/np.inner(rstar.conjugate(), AMp)
s = r - alpha*AMp
Ms = M*s
AMs = A*Ms
omega = np.inner(AMs.conjugate(), s)/np.inner(AMs.conjugate(), AMs)
x = x + alpha*Mp + omega*Ms
r = s - omega*AMs
rrstarNew = np.inner(rstar.conjugate(), r)
beta = (rrstarNew / rrstarOld) * (alpha / omega)
rrstarOld = rrstarNew
p = r + beta*(p - omega*AMp)
iter += 1
normr = norm(r)
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
if iter == maxiter:
return (postprocess(x), iter)
| true | true |
1c2f3e20153ac6c97651d529554e861b3d977580 | 899 | py | Python | lib/surface/spanner/operations/__init__.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/spanner/operations/__init__.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/surface/spanner/operations/__init__.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for spanner operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Operations(base.Group):
"""Manage Cloud Spanner operations."""
pass
| 33.296296 | 74 | 0.765295 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Operations(base.Group):
pass
| true | true |
1c2f3ec69e5661d66eaf27f41fb089bf715ab8cc | 21,579 | py | Python | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py | openapi-env-ppe/azure-sdk-for-python | 93c10270978f1d8c9d3728609866a8408c437630 | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py | openapi-env-ppe/azure-sdk-for-python | 93c10270978f1d8c9d3728609866a8408c437630 | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py | test-repo-tih/azure-sdk-for-python | 18bc3d8e57339ed6a444e38dd5cb3574cc5f9c69 | [
"MIT"
] | null | null | null | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import ( # pylint: disable=unused-import
Union,
Optional,
Any,
List,
Dict,
TYPE_CHECKING
)
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.exceptions import HttpResponseError
from ._base_client_async import AsyncTextAnalyticsClientBase
from .._request_handlers import _validate_batch_input
from .._response_handlers import (
process_batch_error,
entities_result,
linked_entities_result,
key_phrases_result,
sentiment_result,
language_result
)
from .._models import (
DetectLanguageInput,
TextDocumentInput,
DetectLanguageResult,
RecognizeEntitiesResult,
RecognizeLinkedEntitiesResult,
ExtractKeyPhrasesResult,
AnalyzeSentimentResult,
DocumentError,
)
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from azure.core.credentials import AzureKeyCredential
class TextAnalyticsClient(AsyncTextAnalyticsClientBase):
"""The Text Analytics API is a suite of text analytics web services built with best-in-class
Microsoft machine learning algorithms. The API can be used to analyze unstructured text for
tasks such as sentiment analysis, key phrase extraction, and language detection. No training data
is needed to use this API - just bring your text data. This API uses advanced natural language
processing techniques to deliver best in class predictions.
Further documentation can be found in
https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview
:param str endpoint: Supported Cognitive Services or Text Analytics resource
endpoints (protocol and hostname, for example: https://westus2.api.cognitive.microsoft.com).
:param credential: Credentials needed for the client to connect to Azure.
This can be the an instance of AzureKeyCredential if using a
cognitive services/text analytics API key or a token credential
from :mod:`azure.identity`.
:type credential: :class:`~azure.core.credentials.AzureKeyCredential`
or :class:`~azure.core.credentials_async.AsyncTokenCredential`
:keyword str default_country_hint: Sets the default country_hint to use for all operations.
Defaults to "US". If you don't want to use a country hint, pass the string "none".
:keyword str default_language: Sets the default language to use for all operations.
Defaults to "en".
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_authentication_async.py
:start-after: [START create_ta_client_with_key_async]
:end-before: [END create_ta_client_with_key_async]
:language: python
:dedent: 8
:caption: Creating the TextAnalyticsClient with endpoint and API key.
.. literalinclude:: ../samples/async_samples/sample_authentication_async.py
:start-after: [START create_ta_client_with_aad_async]
:end-before: [END create_ta_client_with_aad_async]
:language: python
:dedent: 8
:caption: Creating the TextAnalyticsClient with endpoint and token credential from Azure Active Directory.
"""
def __init__( # type: ignore
self,
endpoint: str,
credential: Union["AzureKeyCredential", "AsyncTokenCredential"],
**kwargs: Any
) -> None:
super(TextAnalyticsClient, self).__init__(
endpoint=endpoint,
credential=credential,
**kwargs
)
self._default_language = kwargs.pop("default_language", "en")
self._default_country_hint = kwargs.pop("default_country_hint", "US")
@distributed_trace_async
async def detect_language( # type: ignore
self,
documents: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[DetectLanguageResult, DocumentError]]:
"""Detect language for a batch of documents.
Returns the detected language and a numeric score between zero and
one. Scores close to one indicate 100% certainty that the identified
language is true. See https://aka.ms/talangs for the list of enabled languages.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and country_hint on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like
`{"id": "1", "country_hint": "us", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.DetectLanguageInput]
:keyword str country_hint: A country hint for the entire batch. Accepts two
letter country codes specified by ISO 3166-1 alpha-2. Per-document
country hints will take precedence over whole batch hints. Defaults to
"US". If you don't want to use a country hint, pass the string "none".
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document
level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.DetectLanguageResult`
and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
were passed in.
:rtype: list[~azure.ai.textanalytics.DetectLanguageResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_detect_language_async.py
:start-after: [START batch_detect_language_async]
:end-before: [END batch_detect_language_async]
:language: python
:dedent: 8
:caption: Detecting language in a batch of documents.
"""
country_hint_arg = kwargs.pop("country_hint", None)
country_hint = country_hint_arg if country_hint_arg is not None else self._default_country_hint
docs = _validate_batch_input(documents, "country_hint", country_hint)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.languages(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", language_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
@distributed_trace_async
async def recognize_entities( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[RecognizeEntitiesResult, DocumentError]]:
"""Recognize entities for a batch of documents.
Identifies and categorizes entities in your text as people, places,
organizations, date/time, quantities, percentages, currencies, and more.
For the list of supported entity types, check: https://aka.ms/taner
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` and
:class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
passed in.
:rtype: list[~azure.ai.textanalytics.RecognizeEntitiesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_recognize_entities_async.py
:start-after: [START batch_recognize_entities_async]
:end-before: [END batch_recognize_entities_async]
:language: python
:dedent: 8
:caption: Recognize entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.entities_recognition_general(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", entities_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
@distributed_trace_async
async def recognize_linked_entities( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]:
"""Recognize linked entities from a well-known knowledge base for a batch of documents.
Identifies and disambiguates the identity of each entity found in text (for example,
determining whether an occurrence of the word Mars refers to the planet, or to the
Roman god of war). Recognized entities are associated with URLs to a well-known
knowledge base, like Wikipedia.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.RecognizeLinkedEntitiesResult`
and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
were passed in.
:rtype: list[~azure.ai.textanalytics.RecognizeLinkedEntitiesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_recognize_linked_entities_async.py
:start-after: [START batch_recognize_linked_entities_async]
:end-before: [END batch_recognize_linked_entities_async]
:language: python
:dedent: 8
:caption: Recognize linked entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.entities_linking(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", linked_entities_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
@distributed_trace_async
async def extract_key_phrases( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]:
"""Extract key phrases from a batch of documents.
Returns a list of strings denoting the key phrases in the input
text. For example, for the input text "The food was delicious and there
were wonderful staff", the API returns the main talking points: "food"
and "wonderful staff"
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.ExtractKeyPhrasesResult` and
:class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
passed in.
:rtype: list[~azure.ai.textanalytics.ExtractKeyPhrasesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_extract_key_phrases_async.py
:start-after: [START batch_extract_key_phrases_async]
:end-before: [END batch_extract_key_phrases_async]
:language: python
:dedent: 8
:caption: Extract the key phrases in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.key_phrases(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", key_phrases_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
@distributed_trace_async
async def analyze_sentiment( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[AnalyzeSentimentResult, DocumentError]]:
"""Analyze sentiment for a batch of documents.
Returns a sentiment prediction, as well as sentiment scores for
each sentiment class (Positive, Negative, and Neutral) for the document
and each sentence within it.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` and
:class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
passed in.
:rtype: list[~azure.ai.textanalytics.AnalyzeSentimentResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_analyze_sentiment_async.py
:start-after: [START batch_analyze_sentiment_async]
:end-before: [END batch_analyze_sentiment_async]
:language: python
:dedent: 8
:caption: Analyze sentiment in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.sentiment(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", sentiment_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
| 52.249395 | 118 | 0.667269 |
from typing import (
Union,
Optional,
Any,
List,
Dict,
TYPE_CHECKING
)
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.exceptions import HttpResponseError
from ._base_client_async import AsyncTextAnalyticsClientBase
from .._request_handlers import _validate_batch_input
from .._response_handlers import (
process_batch_error,
entities_result,
linked_entities_result,
key_phrases_result,
sentiment_result,
language_result
)
from .._models import (
DetectLanguageInput,
TextDocumentInput,
DetectLanguageResult,
RecognizeEntitiesResult,
RecognizeLinkedEntitiesResult,
ExtractKeyPhrasesResult,
AnalyzeSentimentResult,
DocumentError,
)
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from azure.core.credentials import AzureKeyCredential
class TextAnalyticsClient(AsyncTextAnalyticsClientBase):
def __init__(
self,
endpoint: str,
credential: Union["AzureKeyCredential", "AsyncTokenCredential"],
**kwargs: Any
) -> None:
super(TextAnalyticsClient, self).__init__(
endpoint=endpoint,
credential=credential,
**kwargs
)
self._default_language = kwargs.pop("default_language", "en")
self._default_country_hint = kwargs.pop("default_country_hint", "US")
@distributed_trace_async
async def detect_language(
self,
documents: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[DetectLanguageResult, DocumentError]]:
country_hint_arg = kwargs.pop("country_hint", None)
country_hint = country_hint_arg if country_hint_arg is not None else self._default_country_hint
docs = _validate_batch_input(documents, "country_hint", country_hint)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.languages(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", language_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
@distributed_trace_async
async def recognize_entities(
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[RecognizeEntitiesResult, DocumentError]]:
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.entities_recognition_general(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", entities_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
@distributed_trace_async
async def recognize_linked_entities(
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]:
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.entities_linking(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", linked_entities_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
@distributed_trace_async
async def extract_key_phrases(
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]:
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.key_phrases(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", key_phrases_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
@distributed_trace_async
async def analyze_sentiment(
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[AnalyzeSentimentResult, DocumentError]]:
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.sentiment(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=kwargs.pop("cls", sentiment_result),
**kwargs
)
except HttpResponseError as error:
process_batch_error(error)
| true | true |
1c2f3ed030714ef125b6db7b53b582104d5a5c35 | 221 | py | Python | setup.py | Golden-Retrieval/AI-Vision | eab471fe40ce17178e2eebdb7f41666229e85873 | [
"Unlicense"
] | 64 | 2018-12-17T04:35:36.000Z | 2019-04-07T04:58:17.000Z | setup.py | Golden-Retrieval/AI-Vision | eab471fe40ce17178e2eebdb7f41666229e85873 | [
"Unlicense"
] | 307 | 2018-12-20T06:46:24.000Z | 2019-04-09T04:38:35.000Z | setup.py | Golden-Retrieval/AI-Vision | eab471fe40ce17178e2eebdb7f41666229e85873 | [
"Unlicense"
] | 47 | 2018-12-30T12:49:43.000Z | 2019-05-06T06:51:44.000Z | #nsml: nsml/ml:cuda9.0-cudnn7-tf-1.11torch0.4keras2.2
from distutils.core import setup
setup(
name='nsml vision hackathon',
version='1.0',
description='nsml vision hackathon',
install_requires=[
]
)
| 18.416667 | 53 | 0.687783 |
from distutils.core import setup
setup(
name='nsml vision hackathon',
version='1.0',
description='nsml vision hackathon',
install_requires=[
]
)
| true | true |
1c2f40240c79b048a674983f13443ad88f30d61c | 54,916 | py | Python | Syntney.py | lotts/Syntney | 926f87937b6bcde679f27a89973cbd6c974c7c9f | [
"MIT"
] | null | null | null | Syntney.py | lotts/Syntney | 926f87937b6bcde679f27a89973cbd6c974c7c9f | [
"MIT"
] | null | null | null | Syntney.py | lotts/Syntney | 926f87937b6bcde679f27a89973cbd6c974c7c9f | [
"MIT"
] | null | null | null | import os
from Bio import SeqIO
import numpy as np
#from colour import Color
import argparse
import subprocess
from subprocess import run, PIPE
from ete3 import *
import shutil
import sys
import tempfile
import re
def check_NCBI_format(fasta_header):
tmp_header = ""
p = re.compile(r'.{0,20}:c?\d*-\d*')
q = re.compile(r'.{0,20}/\d*-\d*')
m = p.match(fasta_header)
n = q.match(fasta_header)
if m != None:
if m.span()[0] == 0:
return fasta_header
elif n != None:
if n.span()[0] == 0:
header_arr = fasta_header.split(" ")
tmp_header_arr = header_arr[0].split("/")
tmp_id = tmp_header_arr[0]
tmp_coords = tmp_header_arr[1].split('-')
if int(tmp_coords[0]) <= int(tmp_coords[1]):
out_str = str(tmp_id) + ":" + str(tmp_coords[0]) + "-" + str(tmp_coords[1]) + " " + str(" ".join(header_arr[1:]))
return out_str
else:
out_str = str(tmp_id) + ":c" + str(tmp_coords[1]) + "-" + str(tmp_coords[0]) + " " + str(" ".join(header_arr[1:]))
return out_str
else:
raise Exception()
def check_input_consistency(fasta_file, sqlite_handler):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
tmp_file = f.name
count = 0
try:
# FASTA Format
with open(fasta_file, "rU") as handle:
for record in SeqIO.parse(handle, "fasta"):
new_header = check_NCBI_format(record.description)
f.write(">" + str(new_header) + "\n")
f.write(str(record.seq) + "\n")
count += 1
# check if file format correspond to a 12 column blast output
if count == 0:
build_string = ""
with open(fasta_file, "rU") as handle:
for line in handle:
line = line.rstrip()
tmp_arr = line.split("\t")
if len(tmp_arr) == 12:
seq_id = tmp_arr[1]
start_pos = int(tmp_arr[8])
end_pos = int(tmp_arr[9])
if start_pos <= end_pos:
build_string += str(seq_id) + "@" + str(start_pos) + "@" + str(end_pos) + "@" + "+" + " "
else:
build_string += str(seq_id) + "@" + str(end_pos) + "@" + str(start_pos) + "@" + "-" + " "
count += 1
else:
count = 0
break
# get FASTA from DB
if count > 0:
fasta_string = subprocess.getoutput("python3 " + str(sqlite_handler) + " -pdna " + str(build_string))
f.write(fasta_string)
f.close()
# no supported file format can be detected
if count == 0:
raise Exception()
except:
sys.stderr.write("ERROR => Input format does not contain the expected FASTA format (NCBI or RFAM style)." + "\n" +
"Allowed formats are:" + "\n" +
"(a)" + "\n" +
"><ID>:<start coordinate>-<end coordinate> <some comments>" + "\n" +
"<Sequence>" + "\n" +
"if the sequence is encoded on the -strand:" + "\n" +
"><ID>:c<start coordinate>-<end coordinate> <some comments>" + "\n" +
"(b)" + "\n" +
"><ID>/<start coordinate>-<end coordinate> <some comments>" + "\n" +
"<Sequence>" + "\n" +
"(c)" + "\n" +
"12 column BLAST Table" + "\n"
)
exit()
return tmp_file
# produces synteny file and cluster file with an R script. Therefore uses the input network fasta file and test fasta
# file. Synteny window is used for the extraction window and is per default set to 5000
# returns a dictionary of network_ids and test_ids.
# input:
# network_file: path to file used for network construction
# test_file: path to fasta file with questionable sequences
# wdir: working directory where temporary files get stored
# output:
# network_ids, test_ids: {sequence_id: [seq_desciption, seq_sequence]}
# clusterfile: clusterfile that was produced by R script
# syntenyfile: syntenyfile that was produced by R script
# r_script_path: path to the synteny clustering R script
# synteny_window: up and downstream number of bp of sequence that is searched for protein coding sequences
def run_r_script(network_file, test_file, r_script_path, sql_db_path, sql_script_path, num_threads, synteny_window=str(5000)):
network_ids = dict()
fasta_header_network = dict()
seqString = "#Network" + "\n"
for seq_record in SeqIO.parse(network_file, "fasta"):
seqString += ">" + str(seq_record.description) + "\n"
seqString += str(seq_record.seq) + "\n"
seq_id = seq_record.description
# fasta_header_network used for filtering the output from the R-Script
tmp_fasta_header = ">" + str(seq_id)
fasta_header_network[tmp_fasta_header] = 0
seq_id = seq_id.split("-")
if len(seq_id) == 1:
pass
else:
seq_id = seq_id[0]
seq_id = seq_id.split(":")
if len(seq_id) > 1:
if seq_id[1].startswith("c"):
seq_id[1] = seq_id[1][1:]
seq_id = seq_id[0] + "_" + seq_id[1]
network_ids.update({seq_id: [seq_record.description, seq_record.seq]})
test_ids = dict()
if test_file is not None:
seqString += "#Test" + "\n"
for seq_record in SeqIO.parse(test_file, "fasta"):
seq_id = seq_record.description
seq_id = seq_id.split("-")
if len(seq_id) == 1:
pass
else:
seq_id = seq_id[0]
seq_id = seq_id.split(":")
if len(seq_id) > 1:
if seq_id[1].startswith("c"):
seq_id[1] = seq_id[1][1:]
seq_id = seq_id[0] + "_" + seq_id[1]
##if seq_id not in network_ids:
test_ids.update({seq_id: [seq_record.description, seq_record.seq]})
seqString += ">" + str(seq_record.description) + "\n"
seqString += str(seq_record.seq) + "\n"
else:
test_ids = None
try:
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f_path = f.name
f.write(seqString)
f.close()
proc = subprocess.run(["R", "--slave", "-f " + r_script_path, "--args", "filename=" + f_path, "synteny_window=" + synteny_window, "script_path=" + sql_script_path, "db_path=" + sql_db_path, "threads=" + str(num_threads), "write_files=FALSE"], universal_newlines=True, stdout=subprocess.PIPE, check=True)
finally:
# remove tmp file
os.unlink(f.name)
master_table = proc.stdout.split("\n")
# sort master table into subtables - syntenyfile_cluster_table - syntenyfile_synteny_table - network_annotation_table
syntenyfile_cluster_table = list()
syntenyfile_synteny_table = list()
network_annotation_table = list()
missing_ids_table = list()
rRNA_network_table = list()
rRNA_lookup = dict()
list_name = ""
for i in range(0, len(master_table)):
if master_table[i].startswith("#"):
list_name = "-"
if list_name == "#cluster_table":
syntenyfile_cluster_table.append(master_table[i])
if list_name == "#synteny_table":
syntenyfile_synteny_table.append(master_table[i])
if list_name == "#network_annotation":
network_annotation_table.append(master_table[i])
if list_name == "#missing_data":
missing_ids_table.append(master_table[i])
if list_name == "#16S_RNA":
tmp_entry = master_table[i].split("\t")
if tmp_entry[0] in fasta_header_network:
rRNA_network_table.append(tmp_entry[0])
rRNA_network_table.append(tmp_entry[1])
rRNA_lookup[tmp_entry[0]] = 0
if master_table[i].startswith("#cluster_table"):
list_name = "#cluster_table"
if master_table[i].startswith("#synteny_table"):
list_name = "#synteny_table"
if master_table[i].startswith("#network_annotation"):
list_name = "#network_annotation"
if master_table[i].startswith("#missing_data"):
list_name = "#missing_data"
if master_table[i].startswith("#16S_RNA"):
list_name = "#16S_RNA"
return syntenyfile_cluster_table, syntenyfile_synteny_table, network_annotation_table, missing_ids_table, rRNA_network_table, network_ids, test_ids
# produces a dictionary from the identifiers of sRNAs (ids). Identifiers must be like "Accessionnumber" + underscore +
# "starting position of hit" (e.g. CP001291.1_4248628). The synteny dictionary (synteny_dict) contains sRNA surrounding
# proteins. The returned dict has the following topology:
# input:
# ids dict of ids for which a synteny_dict is created
# r_script_synteny_table synteny master table produced by run_r_script()
#
#
# output:
# {CP001291.1_4248628: [{Protein1: "position to sRNA",
# Protein2: 2, (upstream proteins)
# Protein3: 1},
# {...}} (downstream proteins)
# infile specifies a synteny_table file from the synteny clustering R script.
def get_synteny_dict(ids, r_script_synteny_table):
synteny_dict = dict()
for line in r_script_synteny_table:
handle = line.split("\t")
seq_id = handle[0]
if seq_id not in synteny_dict:
if seq_id in ids: # checks whether the id from master table should be added to the synteny_dict
synteny_dict.update({seq_id: []})
proteins = handle[4].split(",") # all surrounding proteins
positions = handle[5].split(",") # positions of all surrounding proteins positions[x] is position of proteins[x]
downstream_dict = {} # dict of proteins downstream the sRNA ({protein: position, ... })
upstream_dict = {} # proteins upstream the sRNA
switch = 0 # needed to switch at position 1 from upstream to downstream
for x in range(len(proteins)): # adds proteins to down and upstream dict
if int(positions[x]) < 10:
if switch == 0:
if positions[x] == "1":
switch = 1
if switch == 2:
downstream_dict.update({proteins[x]: positions[x]})
if switch < 2:
upstream_dict.update({proteins[x]: positions[x]})
if switch == 1:
switch = 2
synteny_dict[seq_id].append(upstream_dict)
synteny_dict[seq_id].append(downstream_dict)
return synteny_dict
# Returns a cluster_dict where proteins from the synteny_table point on their clusters.
# {Protein1: Cluster_1,
# Protein2: Cluster_2}
# The infile is the cluster_table produced by the synteny R script.
def get_clusters(r_script_cluster_table):
cluster_dict = dict()
for line in r_script_cluster_table:
if line.startswith("cluster"):
handle = line.split("\t")
name = handle[0]
cluster = handle[1].split(",")
for element in cluster:
cluster_dict.update({element: name})
return cluster_dict
# Uses a synteny_dict and a cluster_dict to APPEND clusters matching the proteins of entries in a synteny_dict.
# {CP001291.1_4248628: [{Protein1: 3, ...} upstream Proteins
# {Protein4: 1, ...} downstream Proteins
# [Cluster_1, Cluster_2, ...] upstream cluster - positions equal position in array - APPENDED
# [CLuster_5, ...] downstream cluster - positions equal position in array - APPENDED
def add_cluster_to_synteny_table(synteny_dict, cluster_dict, number_of_clusters):
count = 0
for entry in synteny_dict:
up_proteins = synteny_dict[entry][0]# the upstream proteins of the considered sRNA
down_proteins = synteny_dict[entry][1] # the downstream proteins of the considered sRNA
up_cluster = [] # creates a list of upstream clusters
down_cluster = ["sRNA"] # adds sRNA to downstream clusters
for protein in up_proteins:
try:
cluster = cluster_dict[protein]
up_cluster.append(cluster)
count += 1
except KeyError:
print("Cluster not found") # proteins without annotated aminoacid sequence do not have clusters
for protein in down_proteins:
try:
cluster = cluster_dict[protein]
down_cluster.append(cluster)
count += 1
except KeyError: # proteins without annotated aminoacid sequence do not have clusters
print("Cluster not found")
up_cluster.append("sRNA")
down_cluster = down_cluster[0:number_of_clusters] # the number of clusters used in this approach
up_cluster = list(reversed(up_cluster))[0:number_of_clusters] # the number of clusters used in this approach
synteny_dict[entry].append(up_cluster)
synteny_dict[entry].append(down_cluster)
return synteny_dict
# produces a network from a synteny_dict.
# output:
# network: {cluster: {connected_cluster1: [number of appearance of this node, [list of Accessions with this node]],
# connected_cluster2: [...]}
def build_network(synteny_dict):
network = dict()
for entry in synteny_dict:
upcluster = synteny_dict[entry][2] # upstream cluster of a sRNA in the synteny_dict
prev_cluster = 0 # previous cluster is 0 for first iteration
for cluster in upcluster: # assigns node connections to the network
if prev_cluster != 0:
if prev_cluster == cluster: # prevents loops in the network
pass
else: # assigns the connection between cluster and previous cluster to the network
if cluster not in network:
network.update({cluster: [{prev_cluster: [1, [entry]]}, [entry]]})
else:
network[cluster][1].append(entry)
if prev_cluster not in network[cluster][0]:
network[cluster][0].update({prev_cluster: [1, [entry]]})
else:
network[cluster][0][prev_cluster][0] += 1
network[cluster][0][prev_cluster][1].append(entry)
prev_cluster = cluster # previous cluster is the cluster of the earlier iteration
prev_cluster = 0
downcluster = synteny_dict[entry][3] # downstream cluster of a sRNA in the synteny_dict
for cluster in downcluster:
if prev_cluster != 0:
if prev_cluster == cluster:
pass
else:
if cluster not in network:
network.update({cluster: [{prev_cluster: [1, [entry]]}, [entry]]})
else:
network[cluster][1].append(entry)
if prev_cluster not in network[cluster][0]:
network[cluster][0].update({prev_cluster: [1, [entry]]})
else:
network[cluster][0][prev_cluster][0] += 1
network[cluster][0][prev_cluster][1].append(entry)
prev_cluster = cluster
return network
# builds and returns a ete3 tree from the sRNA sequences from a "fasta" infile (should be the trustable GLASSgo file).
# as the tree is built with numbers instead of the identifier ("accession id"_"starting nucleotide"), also a tree_iddict
# is returned where the id points on the corresponding number.
def tree_construction(rRNA_data, n_threads):
count = 0
tree_iddict = dict()
forbidden = set()
# produces a FASTA with numbers instead of original headers and a tree_iddict that is used to get the number
# from an identifier
try:
tmp_fasta = tempfile.NamedTemporaryFile(mode='w+', delete=False)
tmp_header = ""
skip = False
for line in rRNA_data:
if line.startswith(">"):
line = line[1:]
if line not in forbidden:
seq_id = line.split(":")
if len(seq_id) > 1:
pos = seq_id[1].split("-")[0]
if pos.startswith("c"):
pos = pos[1::]
seq_id = seq_id[0] + "_" + pos
tree_iddict.update({seq_id: str(count)})
tmp_header = str(count)
count += 1
forbidden.add(line)
else:
skip = True
else:
if skip is False:
tmp_fasta.write(">" + str(tmp_header) + "\n" + str(line) + "\n")
else:
skip = False
tmp_fasta.close()
# produces a distance matrix from the numbered FASTA via clustalo
tmp_clustalo = tempfile.NamedTemporaryFile(delete=False)
os.system("clustalo --in " + str(tmp_fasta.name) + " --distmat-out=" + str(tmp_clustalo.name) + " --threads=" + str(n_threads) + " --full --force > /dev/null")
# uses quicktree to built a tree from the distance matrix and removes the distance matrix
tmp_quicktree = tempfile.NamedTemporaryFile(delete=False)
os.system("quicktree -in m " + str(tmp_clustalo.name) + " > " + str(tmp_quicktree.name))
# produces a ete3 object from the tree and removes the treefile and the tree FASTA
f = open(tmp_quicktree.name, "r")
tree = str()
for line in f:
line = line.rstrip()
tree = tree + line
f.close()
tree = Tree(tree, format=1)
except:
print("Unexpected error:", sys.exc_info()[0])
finally:
os.unlink(tmp_fasta.name)
os.unlink(tmp_clustalo.name)
os.unlink(tmp_quicktree.name)
return tree_iddict, tree
# returns the whole length of an ete3 tree
def whole_tree_length(tree):
leng = 0
for node in tree.iter_descendants():
leng += node.dist
return leng
# calculates the sum of branches of a list with accessions. As the tree is built from identifiers also an tree iddict needs to be
# passed to convert the accession numbers into corresponding numbers. Returns the sum of branches containing all the edges to
# the lowest common ancestor of the passed accessionlist as well as the edge poitning to the parent node of the lca.
# identifier = "Accession_number1"_"startingnucleotide"
# input:
# tree = ete3.Tree
# accessionlist = [identifier, ...]
# tree_iddict = {identifier: number that was used for the identifier in tree_construction}
# output:
# sob = float(sum of branches)
def sum_of_branches(tree, accessions_list, tree_iddict):
acc_ids = []
for entry in accessions_list: # writes identifier to numbers that were used in treeconstruction
acc_ids.append(tree_iddict[entry])
accessions = tuple(acc_ids)
if len(accessions) > 1:
n1 = tree.get_common_ancestor(accessions) # nl is the n(ode) of the l(ca)
sob = n1.dist # adds distance to parent of lca
lookup = set()
for element in acc_ids: # sums up the branches to the leafs of the passes identifiers
leaf = tree.get_leaves_by_name(element)[0]
while leaf != n1 and leaf not in lookup:
sob = sob + leaf.dist
lookup.add(leaf)
leaf = leaf.up
else: # if only one identifier is passed, only the branch to its parent node is returned as sob
node = tree.search_nodes(name=acc_ids[0])[0]
parent = node.up
dist = tree.get_distance(node, parent)
sob = dist
return sob
# needs a network as input and calculates the sum of outgoing connection weights for each node.
# This number is then added to each node in the network:
# input:
# {cluster: {connected_cluster1: [normalized weight of this connection, [list of Accessions with this node]],
# connected_cluster2: [...]}
# output:
# {Cluster1: [outgoing connections, {connected_cluster1: ...}]}
# should be used after normalization of conections
def add_outgoing_connection_weights(network):
for cluster in network:
outgoing = 0
for connected_cluster in network[cluster][0]:
outgoing += network[cluster][0][connected_cluster][0]
network.update({cluster: [outgoing] + network[cluster]})
return network
# Normalizes the number of connections in a network with a sum of branches approach on a tree built from the
# sRNA sequences. infile is passed to the tree construction function and is therefore the fasta file of network sRNAs.
# The returned network has normalized outgoing connections and normalized number of connections.
# input:
# wdir: place where temporary files are stored
# network_file: FASTA file that was used for network construction
# network: {cluster: {connected_cluster1: [number of appearance of this connection, [list of Accessions with this connection]],
# connected_cluster2: [...]}, [list of Accessions with "cluster"]}
# output:
# network {cluster: [normalized sum of outgoing connections, {connected_cluster1: [normalized weight of this connection,
# [list of accessions with this connection], sum of branches weight], ...}[list of Accessions with "cluster"]}
def normalize_connections(rRNA_data, network, n_threads):
###tree_iddict, tree = tree_construction(wdir, network_file)
tree_iddict, tree = tree_construction(rRNA_data, n_threads)
treelength = whole_tree_length(tree) # whole tree length
values = []
zeroweights = [] # stores connections with a weight of 0
for cluster in network:
for connectedcluster in network[cluster][0]:
accessions = network[cluster][0][connectedcluster][1]
sob = sum_of_branches(tree, accessions, tree_iddict)
if treelength == 0:
value = 1
else:
value = sob/treelength
if value != 0:
values.append(value)
network[cluster][0][connectedcluster][0] = value
if value == 0:
zeroweights.append([cluster, connectedcluster])
if len(values) > 0:
minimum = min(values)
else:
minimum = 1
for entry in zeroweights:
network[entry[0]][0][entry[1]][0] = minimum
add_outgoing_connection_weights(network) # sums up the weights of outgoing connections and assigns them to the network
for cluster in network: # splits up connection weights to an percentage value of their importance
for connected_cluster in network[cluster][1]:
network[cluster][1][connected_cluster].append(network[cluster][1][connected_cluster][0]) # appends the sob
# weight that can be used for SV calculation instead of the weights used for PageRank
network[cluster][1][connected_cluster][0] = \
network[cluster][1][connected_cluster][0] / network[cluster][0]
return network, tree, tree_iddict
# adds a teleport probability for PageRank usage to the network:
# input:
# tree: ete3 tree from FASTA used for Network construction
# tree_iddict: iddict for identifier number from headers of the Network FASTA file
# network: {cluster: {connected_cluster1: [number of appearance of this connection, [list of Accessions with this connection]],
# connected_cluster2: [...]}, [list of Accessions with "cluster"]]}
# output:
# network {cluster: [normalized sum of outgoing connections, {connected_cluster1: [normalized weight of this connection,
# [list of accessions with this connection]], ...},[list of Accessions with "cluster"], teleport prob.]}
def normalize_nodes(tree, tree_iddict, network):
treelength = whole_tree_length(tree) # whole tree length
values = []
zeroweights = [] # stores connections with a weight of 0
sum_of_clustervalues = 0
for cluster in network:
accessions = network[cluster][2]
sob = sum_of_branches(tree, accessions, tree_iddict)
if treelength == 0:
value = 1
else:
value = sob / treelength
if value != 0:
values.append(value)
sum_of_clustervalues += value
network[cluster].append(value)
if value == 0:
zeroweights.append(cluster)
if len(values) > 0:
minimum = min(values)
else:
minimum = 1
for entry in zeroweights:
sum_of_clustervalues += minimum
network[entry].append(minimum)
for cluster in network:
network[cluster][-1] /= sum_of_clustervalues
return network
# edited from https://gist.github.com/joninvski/701720
# Step 1: For each node prepare the destination and predecessor
def initialize(graph, source):
d = {} # Stands for destination
p = {} # Stands for predecessor
for node in graph:
d[node] = 0 # We start admiting that the rest of nodes are not reachable and therefore have a value of zero
p[node] = None
d[source] = 1 # source has a distance of 1
return d, p
# edited from https://gist.github.com/joninvski/701720
def relax(node, neighbour, graph, d, p, i):
# If the distance between the node and the neighbour is higher than the one I have now
i = i+1
if d[neighbour] < (d[node] * graph[node][neighbour]) / i:
# Record this higher distance
d[neighbour] = (d[node] * graph[node][neighbour]) / i
p[neighbour] = node
# edited from https://gist.github.com/joninvski/701720
# edited bellman ford alorithm multiplying edges with a weight between 0 and 1. Therefore the best path has a length
# of 1 and worse paths have a low weight.
# input:
# lite_network network without outgoing edge weights
def bellman_ford(lite_network, source):
d, p = initialize(lite_network, source)
for i in range(len(lite_network)-1): # Run this until is converges
for u in lite_network:
for v in lite_network[u]: # For each neighbour of u
relax(u, v, lite_network, d, p, i) # Lets relax it
return d, p
# uses the network to create a dictionary with the best paths:
# input:
# network {cluster: [number outgoing connections, {connected_cluster1: [normalized number of connections,
# [list of Accessions with this node]], connected_cluster2: [...]], ...}
# sob_weights: will use the sob weights for later SV calculation instead of the normalized values that were used
# for PageRank calculation if its set to True
# output:
# best_paths {cluster: {connected_cluster: [distance between 0 and 1, first cluster on way to connected_cluster,
# number of steps]}}
def get_distances(network, sob_weights=False):
# lite network is a data structure of a network without the sum of outgoing weights
lite_network = dict()
for cluster in network:
if cluster not in lite_network:
lite_network.update({cluster: dict()})
for prev_cluster in network[cluster][1]:
if sob_weights is True:
value = network[cluster][1][prev_cluster][-1]
else:
value = network[cluster][1][prev_cluster][0]
lite_network[cluster].update({prev_cluster: value})
if prev_cluster not in lite_network:
lite_network.update({prev_cluster: dict()})
best_paths = dict()
for entry in lite_network:
distance, predecessor = bellman_ford(lite_network, source=entry)
for dist in distance:
if distance[dist] != 0:
if entry != dist:
pred = predecessor[dist]
prevpred = dist
step = 1
while pred != entry:
prevpred = pred
pred = predecessor[pred]
step += 1
try:
# prevpred is the first cluster on the way to cluster(dist)
best_paths[entry].update({dist: [(distance[dist]), prevpred, step]})
except KeyError:
best_paths.update({entry: {dist: [(distance[dist]), prevpred, step]}})
return best_paths
# uses a more complex approach to calculate the PageRanl of each cluster in a connectiondict (Network)
# the approach minimizes the used memory by not calculating the whole matrix. Therefore this approach is
# also able to handle big Networks.
# Changes the number of outgoing connection weights in the network to the pagerank_value.
# for a detailed description of the function look up in my master thesis chapter data structure and pagerank in the
# methods
#/media/cyano_share/documents/Bachelor- & Masterarbeiten/Master_Thesis_Dominik_Rabsch.pdf
# input:
# network: {cluster: [number_outgoing_connections, {connected_cluster1: [normalized number of connections,
# [list of Accessions with this node]], connected_cluster2: [...]], ...}
# output:
# network: {cluster: [pagerank_value, {connected_cluster1: [normalized number of connections,
# [list of Accessions with this node]], connected_cluster2: [...]], ...}
def pagerank(network, eps=1.0e-14, teleport=False):
header = ["sRNA"]
header = header + list(network)
n = len(header)
iddict = {}
reverse_iddict = {}
count = 0
lines = []
pagerank_vector = []
teleport_vector = []
for element in header:
iddict.update({element: count})
reverse_iddict.update({count: element})
lines.append([])
if teleport == True:
teleport_vector.append(0)
pagerank_vector.append(1/n)
count += 1
pagerank_vector = np.array(pagerank_vector, dtype="float64")
i_table = []
weights = []
count = 0
for cluster in network:
if teleport == True:
teleport_vector[iddict[cluster]] = network[cluster][-1]
for connected_cluster in network[cluster][1]:
i = iddict[cluster]
i_table.append(i)
value = network[cluster][1][connected_cluster][0]
weights.append(value)
lines[iddict[connected_cluster]].append(count)
count += 1
i_table = np.array(i_table)
check = False
count = 0
while check is not True:
check = True
old_pagerank_vector = np.copy(pagerank_vector)
for x in range(len(lines)):
value = 0
for index in lines[x]:
i = i_table[index]
weight = weights[index]
value = value + weight * old_pagerank_vector[i]
if teleport is True: # teleport based on cluster occurences
value += teleport_vector[x] * old_pagerank_vector[0]
else: # random teleport because sRNA column sums up to 0 (spider trap)
value += (1/n) * old_pagerank_vector[0]
diff = np.absolute(old_pagerank_vector[x] - value)
pagerank_vector[x] = value
if diff > eps:
check = False
count += 1
pagerankdict = dict()
for x in range(len(pagerank_vector)):
pagerankdict.update({reverse_iddict[x]: pagerank_vector[x]})
for entry in network:
network[entry][0] = pagerankdict[entry]/(1-pagerankdict["sRNA"])
return network
# uses a synteny_dict dictionary as well as a best_paths dict and a normalized Network that was used for pagerank
# calculation to calculate the synteny value of each sequence in the sequencedict. Afterwards the synteny value is
# stored in the sequencedict at position sequencedict[sequence_id][4]
# input:
# synteny_dict: {seq_id: [{upstream_Proteins},
# {downstream_ proteins},
# [upstream_Cluster],
# [downstream_Cluster]]}
# best_paths: cluster: {connected_cluster: [best_path , first cluster on way to connected_cluster, number of steps]}}
# network:
# {cluster: [pagerank_value, {connected_cluster1: [normalized number of connections, [list of Accessions with this node]],
# connected_cluster2: [...]], ...}
# output:
# synteny_dict: {seq_id: [{upstream_Proteins},
# {downstream_ proteins},
# [upstream_Cluster],
# [downstream_Cluster],
# synteny_value]} "appends the synteny value here"
def calculate_synteny_value(synteny_dict, best_paths, network):
for entry in synteny_dict:
uppath = synteny_dict[entry][2] # upstream cluster of a considered entry in the synteny dict
count = 0
prevlist = ["sRNA"] # adds the sRNA to the already visited list and makes it possible to start from the sRNA
synvalue = 0 # starting SV
for z in range(len(uppath)):
cluster = uppath[z]
if count == 0: # does not calculate a value for the first cluster as it is the sRNA
pass
else:
if cluster in prevlist: # if the considered cluster is the same like a already visited cluster
synvalue += network[cluster][0] # add the pageRank of the cluster to the SV
else:
if cluster in network: # checks if the considered cluster is in the network
prevlist.append(cluster) # appends the cluster to list of already visited clusters
tmp = []
for cl in prevlist: # for cluster in already visited clusters
if cl in best_paths[cluster]: # checks if cluster is reachable from the already visited one
p = best_paths[cluster][cl][0] / best_paths[cluster][cl][2]
tmp.append(p) # appends the value of stepping to the cluster to a temporary list
elif cluster in best_paths[cl]: # checks if the cluster is reachable by stepping backwards
tar = 0 # sets up a value for the best path
div = 1 # is the number of edges that is used by stepping backwards
for clus in network[cl][1]: # for all clus cluster that are connected to already visited cluster
if clus != "sRNA":
if cluster in best_paths[clus]: # if target cluster is reachable from that clus by stepping backwards
x = best_paths[cl][clus][0] / (best_paths[cl][clus][2] + 1) # x is the path value that is needed to reach this clus
if x > tar: # tar stores the best of these paths to the target cluster
tar = x
div = (best_paths[cl][clus][2] + 1) # number of used edges
if z+1 != len(uppath): # need to add the best oputgoing edge of the target cluster
if uppath[z + 1] in best_paths[cluster]: # if it is possible to reach that
add = best_paths[cluster][best_paths[cluster][uppath[z+1]][1]][0] / div # add this path to the current path
else:
add = best_paths[cluster][best_paths[cluster]["sRNA"][1]][0] / div # add the edge of the cluster on the way to the sRNA
else:
add = best_paths[cluster][best_paths[cluster]["sRNA"][1]][0] / div # if there is no next cluster in the considered synteny _ try to step towards the sRNA
if tar == 0:
p = add
else:
p = add * tar
tmp.append(p)
synvalue += max(tmp) * network[cluster][0]
else:
pass
count += 1
downpath = synteny_dict[entry][3]
count = 0
prevlist = ["sRNA"]
for z in range(len(downpath)):
cluster = downpath[z]
if count == 0:
pass
else:
if cluster in prevlist:
synvalue += network[cluster][0]
else:
if cluster in network:
prevlist.append(cluster)
tmp = []
for cl in prevlist:
if cl in best_paths[cluster]:
p = best_paths[cluster][cl][0] / best_paths[cluster][cl][2]
tmp.append(p)
elif cluster in best_paths[cl]:
tar = 0
div = 1
for clus in network[cl][1]:
if clus != "sRNA":
if cluster in best_paths[clus]:
x = best_paths[cl][clus][0] / (best_paths[cl][clus][2] + 1)
if x > tar:
tar = x
div = (best_paths[cl][clus][2] + 1)
if z+1 != len(downpath):
if downpath[z + 1] in best_paths[cluster]:
add = best_paths[cluster][best_paths[cluster][downpath[z+1]][1]][0] / div
else:
add = best_paths[cluster][best_paths[cluster]["sRNA"][1]][0] / div
else:
add = best_paths[cluster][best_paths[cluster]["sRNA"][1]][0] / div
if tar == 0:
p = add
else:
p = add * tar
tmp.append(p)
synvalue += max(tmp) * network[cluster][0]
else:
count -= 1
count += 1
synteny_dict[entry].append(synvalue)
return synteny_dict
# must be used after the pagerank function. uses the connectiondict to create a svg outfile of the connectiondict
# Network with graphviz.
# input:
# {cluster: [pagerank, {connected_cluster1: [normalized number of connections, [list of Accessions with this node]],
# connected_cluster2: [...]], ...}
def visualize_network(connectiondict, outfile):
node_weights = []
weightdict = dict()
for cluster in connectiondict:
weightdict.update({cluster[8:]: connectiondict[cluster][0]})
node_weights.append(connectiondict[cluster][0])
nodew = list(set(node_weights))
red = Color("red")
colors = list(red.range_to(Color("yellow"), len(nodew)))
for x in range(len(colors)):
colors[x] = str(Color(colors[x]).hex_l)
nodew.sort(reverse=True)
colordict = dict()
for x in range(len(nodew)):
colordict.update({nodew[x]: colors[x]})
graph = Digraph()
graph.node("sRNA", style="filled", fillcolor="green")
for cluster in connectiondict:
graph.node(cluster[8:], style="filled", fillcolor=colordict[connectiondict[cluster][0]])
for cluster in connectiondict:
for connected_cluster in connectiondict[cluster][1]:
w = 3 * connectiondict[cluster][1][connected_cluster][0]
if connected_cluster != "sRNA":
connected_cluster = connected_cluster[8:]
graph.edge(cluster[8:], connected_cluster, penwidth=str(w))
try:
graph.render(outfile, format="svg")
except subprocess.CalledProcessError:
pass
# must be used after the pagerank function. uses the network to create a cytoscape compatible comma seperated
# outfile.
# Network with graphviz.
# input:
# {cluster: [pagerank, {connected_cluster1: [normalized number of connections, [list of Accessions with this node]],
# connected_cluster2: [...]], ...}, [list of accessions with "cluster"], teleport prob. to cluster], ...}
def visualize_cytoscape_network(network, outfile, mode):
data = "#network.txt\n"
f = open(outfile, "w")
if mode == "on":
data += "cluster,connected cluster,PageRank, connection weight\n"
f.write("cluster,connected cluster,PageRank, connection weight\n")
elif mode == "off":
data += "cluster,connected cluster,Sum of branches, connection weight\n"
f.write("cluster,connected cluster,Sum of branches, connection weight\n")
for cluster in network:
pagerank = network[cluster][0]
for connected_cluster in network[cluster][1]:
weight = network[cluster][1][connected_cluster][0]
data += cluster + "," + connected_cluster + "," + str(pagerank) + "," + str(weight) + "\n"
f.write(cluster + "," + connected_cluster + "," + str(pagerank) + "," + str(weight) + "\n")
f.close()
return data
# the annotation file comes from the R-Script and is only used, if the user apply the -n --network parameter
def write_std_data(network_annotation, data_id, outfile):
data = "#" + str(data_id) + "\n"
f = open(outfile, "w")
for entry in network_annotation:
data += str(entry) + "\n"
f.write(str(entry) + "\n")
f.close()
return data
# produces an output file containing the sequence identifiers with their up and downstream cluster numbers.
# this file is used to observe the clusters of each protein in the corresponding network
# input:
# synteny_table: {seq_id: [{upstream_Proteins},
# {downstream_ proteins},
# [upstream_Cluster],
# [downstream_Cluster]]}
def output_cluster_synteny_file(syteny_table, data_id, outfile):
data = "#" + str(data_id) + "\n"
f = open(outfile, "w")
f.write("identifier\tupstream cluster\tdownstream cluster\n")
for entry in syteny_table:
upstream = syteny_table[entry][2]
downstream = syteny_table[entry][3]
data += entry + "\t"
f.write(entry + "\t")
for cluster in upstream:
data += cluster + ","
f.write(cluster + ",")
data += "\t"
f.write("\t")
for cluster in downstream:
data += cluster + ","
f.write(cluster + ",")
data += "\n"
f.write("\n")
f.close()
return data
# normalizes the synteny value of the sequences used for network contruction to the max value of these.
# Also normalizes the synteny value of the tested sequences to the max value of the sequences used for network
# construiction if the test FASTA input file was added.
# input:
# *synteny_table: {seq_id: [{upstream_Proteins},
# {downstream_ proteins},
# [upstream_Cluster],
# [downstream_Cluster],
# synteny_value]}
# output:
# *synteny_table: {seq_id: [{upstream_Proteins},
# {downstream_ proteins},
# [upstream_Cluster],
# [downstream_Cluster],
# normalized synteny_value]}
def normalize_synteny_value(network_synteny_table, test_synteny_table):
network_values = []
for entry in network_synteny_table:
network_values.append(network_synteny_table[entry][4])
network_max = max(network_values)
for entry in network_synteny_table:
network_synteny_table[entry][4] /= network_max
if test_synteny_table is not None: # tests if test sequences are added at the command line call
for entry in test_synteny_table:
test_synteny_table[entry][4] /= network_max
# uses a sequences dictionary where the synteny value is already calculated and a matching iddict to create an outfile.
# iddcit: {sequence_id: [seq_description, seq_sequence]}
# synteny_table: {seq_id: [{upstream_Proteins},
# {downstream_ proteins},
# [upstream_Cluster],
# [downstream_Cluster],
# normalized synteny_value]}
# outfile: path to outfile
def write_outfile_from_synteny_table(synteny_table, iddict, outfile):
f = open(outfile, "w")
for entry in synteny_table:
desc, seq = iddict[entry]
synvalue = synteny_table[entry][4]
f.write(">" + desc + " synteny:" + str(synvalue) + "\n" + str(seq) + "\n")
f.close()
def write_outfile_from_missing_ids_table(missing_ids_table, outfile):
f = open(outfile, "w")
for entry in missing_ids_table:
f.write(str(entry) + "\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--network_file", help="fasta file containing sequences used for network construction or 12 column BLAST Table",
type=str)
parser.add_argument("-t", "--test_file", help="optional fasta file containing sequences that are checked for network match or 12 column BLAST Table",
type=str, default=None)
parser.add_argument("-c", "--cluster_script", help="path to synteny clustering R script",
type=str,
default=str(os.path.dirname(os.path.abspath(__file__))) + "/packages/Rscript/Synteny_Cluster_Script_sqlite.r")
parser.add_argument("-p", "--w_dir", help="working directory where temporary files are stored default is the "
"current directory", type=str, default="")
parser.add_argument("-n", "--network", help="if set to svg, 'outfile'_Network.svg is produced as an output."
"If set to cys, cytoscape compatible comma seperated Network 'outfile'_Network.txt is "
"produced", type=str, default="False")
parser.add_argument("-o", "--outfiles", help="path and name of outfiles. Will produce 'outfile'_network.fasta and "
"'outfile'_questionable.fasta ", type=str, default="")
parser.add_argument("-w", "--synteny_window", help="synteny window used for extraction", type=int, default=5000)
parser.add_argument("--protein_number", help="number of proteins up and downstream that should be used. default is 4",
type=int, default=4)
parser.add_argument("--node_normalization",
help="If True uses a teleport at the sRNA based on a normalized number of cluster occurrences. "
"Default is False",type=bool, default=False)
parser.add_argument("--use_sob_weights", help="If True uses sum of branch weights for Synteny Value calculation. "
"Default is False", type=bool, default=False)
parser.add_argument("-d", "--sqlite_db", help="Path to SQLite DB", type=str,
default=str(os.path.dirname(os.path.abspath(__file__))) + "/mySQLiteDB_Syntney.db")
parser.add_argument("-s", "--sqlite_script", help="", type=str,
default=str(os.path.dirname(os.path.abspath(__file__))) + "/packages/GENBANK_GROPER_SQLITE/genbank_groper_sqliteDB.py")
parser.add_argument("-r", "--page_rank", help="Turn PageRank algorithm on or off; default=on", type=str, default="on")
parser.add_argument("-x", "--num_threads", help="Number of threads; default=1", type=int, default=1)
args = parser.parse_args()
# check if psi_out folder exists
path_psi_out = str(os.path.abspath(args.w_dir)) + "/psi_out/"
if os.path.isdir(path_psi_out):
shutil.rmtree(path_psi_out)
# define variable to store crucial information for "R-Script"
aggregated_results = ""
# check the FASTA file(s) of consistency
proven_network_fasta = check_input_consistency(args.network_file, args.sqlite_script)
if args.test_file != None:
proven_test_fasta = check_input_consistency(args.test_file, args.sqlite_script)
else:
proven_test_fasta = None
try:
r_script_cluster_table, r_script_synteny_table, r_network_annotation_table, r_missing_ids_table, r_rRNA_network_table, network_ids, test_ids = run_r_script(proven_network_fasta, proven_test_fasta, args.cluster_script, args.sqlite_db, args.sqlite_script, args.num_threads, synteny_window=str(args.synteny_window))
except:
sys.exit("ERROR: R_SCRIPT CAN\'T BE CALLED CORRECTLY!")
number_of_clusters = args.protein_number + 1 # needs to be done as sRNA is also considered as a cluster
try:
network_synteny_table = get_synteny_dict(network_ids, r_script_synteny_table)
if len(network_synteny_table) <= 2:
raise Exception("The number of sequences are to low for computing a network! Please increase the number of input sequences in your network.fasta file.")
except Exception as error:
sys.exit("ERROR: Function get_synteny_dict(network_ids, r_script_synteny_table) failed!" + "\n" + repr(error))
cluster_dict = get_clusters(r_script_cluster_table)
network_synteny_table = add_cluster_to_synteny_table(network_synteny_table, cluster_dict, number_of_clusters)
network = build_network(network_synteny_table)
network, tree, tree_iddict = normalize_connections(r_rRNA_network_table, network, args.num_threads)
if args.node_normalization is True:
normalize_nodes(tree, tree_iddict, network)
best_paths = get_distances(network, sob_weights=args.use_sob_weights)
if args.page_rank == "on":
network = pagerank(network, teleport=args.node_normalization)
elif args.page_rank == "off" and args.node_normalization is True:
for entry in network:
network[entry][0] = network[entry][-1]
else:
raise Exception("flags --node_normalization False and --page_rank off produces nonsense result")
network_synteny_table = calculate_synteny_value(network_synteny_table, best_paths, network)
if test_ids is not None:
test_synteny_table = get_synteny_dict(test_ids, r_script_synteny_table)
test_synteny_table = add_cluster_to_synteny_table(test_synteny_table, cluster_dict, number_of_clusters)
test_synteny_table = calculate_synteny_value(test_synteny_table, best_paths, network)
else:
test_synteny_table = None
normalize_synteny_value(network_synteny_table, test_synteny_table)
write_outfile_from_synteny_table(network_synteny_table, network_ids, args.outfiles + "_Network.fasta")
write_outfile_from_missing_ids_table(r_missing_ids_table, args.outfiles + "_Missing_Ids.txt")
if test_synteny_table is not None:
write_outfile_from_synteny_table(test_synteny_table, test_ids, args.outfiles + "_Evaluated.fasta")
if args.network == "svg":
visualize_network(network, outfile=args.outfiles + "_Network.svg")
output_cluster_synteny_file(test_synteny_table, outfile=args.outfiles + "_Cluster.txt")
elif args.network == "cys":
# essential
aggregated_results += visualize_cytoscape_network(network, outfile=args.outfiles + "_Network.txt", mode=args.page_rank)
# _Network_Annotation.txt - only used for internal testing
aggregated_results += write_std_data(r_network_annotation_table, "network_annotation", outfile=args.outfiles + "_Network_Annotation.txt")
# _Synteny_Table.txt - only used for internal testing
aggregated_results += write_std_data(r_script_synteny_table, "synteny_table", outfile=args.outfiles + "_Synteny_Table.txt")
if test_synteny_table is not None:
output_cluster_synteny_file(test_synteny_table, "test_synteny_table", outfile=args.outfiles + "_Evaluated_Cluster.txt")
# _Network_Cluster.txt - only used for internal testing
aggregated_results += output_cluster_synteny_file(network_synteny_table, "network_cluster", outfile=args.outfiles + "_Network_Cluster.txt")
else:
pass
###### START TEST OUTPUT JENS
#print(aggregated_results)
handle = open("./aggregated_results.jens", "w")
for line in aggregated_results:
handle.write(line)
handle.close()
###### END TEST OUTPUT JENS
# delete psi_out
path_psi_out = str(os.path.abspath(args.w_dir)) + "/psi_out/"
shutil.rmtree(path_psi_out)
if __name__ == "__main__":
main()
| 48.59823 | 321 | 0.586059 | import os
from Bio import SeqIO
import numpy as np
import argparse
import subprocess
from subprocess import run, PIPE
from ete3 import *
import shutil
import sys
import tempfile
import re
def check_NCBI_format(fasta_header):
tmp_header = ""
p = re.compile(r'.{0,20}:c?\d*-\d*')
q = re.compile(r'.{0,20}/\d*-\d*')
m = p.match(fasta_header)
n = q.match(fasta_header)
if m != None:
if m.span()[0] == 0:
return fasta_header
elif n != None:
if n.span()[0] == 0:
header_arr = fasta_header.split(" ")
tmp_header_arr = header_arr[0].split("/")
tmp_id = tmp_header_arr[0]
tmp_coords = tmp_header_arr[1].split('-')
if int(tmp_coords[0]) <= int(tmp_coords[1]):
out_str = str(tmp_id) + ":" + str(tmp_coords[0]) + "-" + str(tmp_coords[1]) + " " + str(" ".join(header_arr[1:]))
return out_str
else:
out_str = str(tmp_id) + ":c" + str(tmp_coords[1]) + "-" + str(tmp_coords[0]) + " " + str(" ".join(header_arr[1:]))
return out_str
else:
raise Exception()
def check_input_consistency(fasta_file, sqlite_handler):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
tmp_file = f.name
count = 0
try:
with open(fasta_file, "rU") as handle:
for record in SeqIO.parse(handle, "fasta"):
new_header = check_NCBI_format(record.description)
f.write(">" + str(new_header) + "\n")
f.write(str(record.seq) + "\n")
count += 1
if count == 0:
build_string = ""
with open(fasta_file, "rU") as handle:
for line in handle:
line = line.rstrip()
tmp_arr = line.split("\t")
if len(tmp_arr) == 12:
seq_id = tmp_arr[1]
start_pos = int(tmp_arr[8])
end_pos = int(tmp_arr[9])
if start_pos <= end_pos:
build_string += str(seq_id) + "@" + str(start_pos) + "@" + str(end_pos) + "@" + "+" + " "
else:
build_string += str(seq_id) + "@" + str(end_pos) + "@" + str(start_pos) + "@" + "-" + " "
count += 1
else:
count = 0
break
if count > 0:
fasta_string = subprocess.getoutput("python3 " + str(sqlite_handler) + " -pdna " + str(build_string))
f.write(fasta_string)
f.close()
if count == 0:
raise Exception()
except:
sys.stderr.write("ERROR => Input format does not contain the expected FASTA format (NCBI or RFAM style)." + "\n" +
"Allowed formats are:" + "\n" +
"(a)" + "\n" +
"><ID>:<start coordinate>-<end coordinate> <some comments>" + "\n" +
"<Sequence>" + "\n" +
"if the sequence is encoded on the -strand:" + "\n" +
"><ID>:c<start coordinate>-<end coordinate> <some comments>" + "\n" +
"(b)" + "\n" +
"><ID>/<start coordinate>-<end coordinate> <some comments>" + "\n" +
"<Sequence>" + "\n" +
"(c)" + "\n" +
"12 column BLAST Table" + "\n"
)
exit()
return tmp_file
def run_r_script(network_file, test_file, r_script_path, sql_db_path, sql_script_path, num_threads, synteny_window=str(5000)):
network_ids = dict()
fasta_header_network = dict()
seqString = "#Network" + "\n"
for seq_record in SeqIO.parse(network_file, "fasta"):
seqString += ">" + str(seq_record.description) + "\n"
seqString += str(seq_record.seq) + "\n"
seq_id = seq_record.description
tmp_fasta_header = ">" + str(seq_id)
fasta_header_network[tmp_fasta_header] = 0
seq_id = seq_id.split("-")
if len(seq_id) == 1:
pass
else:
seq_id = seq_id[0]
seq_id = seq_id.split(":")
if len(seq_id) > 1:
if seq_id[1].startswith("c"):
seq_id[1] = seq_id[1][1:]
seq_id = seq_id[0] + "_" + seq_id[1]
network_ids.update({seq_id: [seq_record.description, seq_record.seq]})
test_ids = dict()
if test_file is not None:
seqString += "#Test" + "\n"
for seq_record in SeqIO.parse(test_file, "fasta"):
seq_id = seq_record.description
seq_id = seq_id.split("-")
if len(seq_id) == 1:
pass
else:
seq_id = seq_id[0]
seq_id = seq_id.split(":")
if len(seq_id) > 1:
if seq_id[1].startswith("c"):
seq_id[1] = seq_id[1][1:]
seq_id = seq_id[0] + "_" + seq_id[1]
pdate({seq_id: [seq_record.description, seq_record.seq]})
seqString += ">" + str(seq_record.description) + "\n"
seqString += str(seq_record.seq) + "\n"
else:
test_ids = None
try:
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f_path = f.name
f.write(seqString)
f.close()
proc = subprocess.run(["R", "--slave", "-f " + r_script_path, "--args", "filename=" + f_path, "synteny_window=" + synteny_window, "script_path=" + sql_script_path, "db_path=" + sql_db_path, "threads=" + str(num_threads), "write_files=FALSE"], universal_newlines=True, stdout=subprocess.PIPE, check=True)
finally:
os.unlink(f.name)
master_table = proc.stdout.split("\n")
syntenyfile_cluster_table = list()
syntenyfile_synteny_table = list()
network_annotation_table = list()
missing_ids_table = list()
rRNA_network_table = list()
rRNA_lookup = dict()
list_name = ""
for i in range(0, len(master_table)):
if master_table[i].startswith("#"):
list_name = "-"
if list_name == "#cluster_table":
syntenyfile_cluster_table.append(master_table[i])
if list_name == "#synteny_table":
syntenyfile_synteny_table.append(master_table[i])
if list_name == "#network_annotation":
network_annotation_table.append(master_table[i])
if list_name == "#missing_data":
missing_ids_table.append(master_table[i])
if list_name == "#16S_RNA":
tmp_entry = master_table[i].split("\t")
if tmp_entry[0] in fasta_header_network:
rRNA_network_table.append(tmp_entry[0])
rRNA_network_table.append(tmp_entry[1])
rRNA_lookup[tmp_entry[0]] = 0
if master_table[i].startswith("#cluster_table"):
list_name = "#cluster_table"
if master_table[i].startswith("#synteny_table"):
list_name = "#synteny_table"
if master_table[i].startswith("#network_annotation"):
list_name = "#network_annotation"
if master_table[i].startswith("#missing_data"):
list_name = "#missing_data"
if master_table[i].startswith("#16S_RNA"):
list_name = "#16S_RNA"
return syntenyfile_cluster_table, syntenyfile_synteny_table, network_annotation_table, missing_ids_table, rRNA_network_table, network_ids, test_ids
def get_synteny_dict(ids, r_script_synteny_table):
synteny_dict = dict()
for line in r_script_synteny_table:
handle = line.split("\t")
seq_id = handle[0]
if seq_id not in synteny_dict:
if seq_id in ids:
synteny_dict.update({seq_id: []})
proteins = handle[4].split(",")
positions = handle[5].split(",")
downstream_dict = {}
upstream_dict = {}
switch = 0
for x in range(len(proteins)):
if int(positions[x]) < 10:
if switch == 0:
if positions[x] == "1":
switch = 1
if switch == 2:
downstream_dict.update({proteins[x]: positions[x]})
if switch < 2:
upstream_dict.update({proteins[x]: positions[x]})
if switch == 1:
switch = 2
synteny_dict[seq_id].append(upstream_dict)
synteny_dict[seq_id].append(downstream_dict)
return synteny_dict
def get_clusters(r_script_cluster_table):
cluster_dict = dict()
for line in r_script_cluster_table:
if line.startswith("cluster"):
handle = line.split("\t")
name = handle[0]
cluster = handle[1].split(",")
for element in cluster:
cluster_dict.update({element: name})
return cluster_dict
def add_cluster_to_synteny_table(synteny_dict, cluster_dict, number_of_clusters):
count = 0
for entry in synteny_dict:
up_proteins = synteny_dict[entry][0]
down_proteins = synteny_dict[entry][1]
up_cluster = []
down_cluster = ["sRNA"]
for protein in up_proteins:
try:
cluster = cluster_dict[protein]
up_cluster.append(cluster)
count += 1
except KeyError:
print("Cluster not found")
for protein in down_proteins:
try:
cluster = cluster_dict[protein]
down_cluster.append(cluster)
count += 1
except KeyError:
print("Cluster not found")
up_cluster.append("sRNA")
down_cluster = down_cluster[0:number_of_clusters]
up_cluster = list(reversed(up_cluster))[0:number_of_clusters]
synteny_dict[entry].append(up_cluster)
synteny_dict[entry].append(down_cluster)
return synteny_dict
def build_network(synteny_dict):
network = dict()
for entry in synteny_dict:
upcluster = synteny_dict[entry][2]
prev_cluster = 0
for cluster in upcluster:
if prev_cluster != 0:
if prev_cluster == cluster:
pass
else:
if cluster not in network:
network.update({cluster: [{prev_cluster: [1, [entry]]}, [entry]]})
else:
network[cluster][1].append(entry)
if prev_cluster not in network[cluster][0]:
network[cluster][0].update({prev_cluster: [1, [entry]]})
else:
network[cluster][0][prev_cluster][0] += 1
network[cluster][0][prev_cluster][1].append(entry)
prev_cluster = cluster
prev_cluster = 0
downcluster = synteny_dict[entry][3]
for cluster in downcluster:
if prev_cluster != 0:
if prev_cluster == cluster:
pass
else:
if cluster not in network:
network.update({cluster: [{prev_cluster: [1, [entry]]}, [entry]]})
else:
network[cluster][1].append(entry)
if prev_cluster not in network[cluster][0]:
network[cluster][0].update({prev_cluster: [1, [entry]]})
else:
network[cluster][0][prev_cluster][0] += 1
network[cluster][0][prev_cluster][1].append(entry)
prev_cluster = cluster
return network
def tree_construction(rRNA_data, n_threads):
count = 0
tree_iddict = dict()
forbidden = set()
try:
tmp_fasta = tempfile.NamedTemporaryFile(mode='w+', delete=False)
tmp_header = ""
skip = False
for line in rRNA_data:
if line.startswith(">"):
line = line[1:]
if line not in forbidden:
seq_id = line.split(":")
if len(seq_id) > 1:
pos = seq_id[1].split("-")[0]
if pos.startswith("c"):
pos = pos[1::]
seq_id = seq_id[0] + "_" + pos
tree_iddict.update({seq_id: str(count)})
tmp_header = str(count)
count += 1
forbidden.add(line)
else:
skip = True
else:
if skip is False:
tmp_fasta.write(">" + str(tmp_header) + "\n" + str(line) + "\n")
else:
skip = False
tmp_fasta.close()
tmp_clustalo = tempfile.NamedTemporaryFile(delete=False)
os.system("clustalo --in " + str(tmp_fasta.name) + " --distmat-out=" + str(tmp_clustalo.name) + " --threads=" + str(n_threads) + " --full --force > /dev/null")
tmp_quicktree = tempfile.NamedTemporaryFile(delete=False)
os.system("quicktree -in m " + str(tmp_clustalo.name) + " > " + str(tmp_quicktree.name))
f = open(tmp_quicktree.name, "r")
tree = str()
for line in f:
line = line.rstrip()
tree = tree + line
f.close()
tree = Tree(tree, format=1)
except:
print("Unexpected error:", sys.exc_info()[0])
finally:
os.unlink(tmp_fasta.name)
os.unlink(tmp_clustalo.name)
os.unlink(tmp_quicktree.name)
return tree_iddict, tree
def whole_tree_length(tree):
leng = 0
for node in tree.iter_descendants():
leng += node.dist
return leng
def sum_of_branches(tree, accessions_list, tree_iddict):
acc_ids = []
for entry in accessions_list:
acc_ids.append(tree_iddict[entry])
accessions = tuple(acc_ids)
if len(accessions) > 1:
n1 = tree.get_common_ancestor(accessions)
sob = n1.dist
lookup = set()
for element in acc_ids:
leaf = tree.get_leaves_by_name(element)[0]
while leaf != n1 and leaf not in lookup:
sob = sob + leaf.dist
lookup.add(leaf)
leaf = leaf.up
else:
node = tree.search_nodes(name=acc_ids[0])[0]
parent = node.up
dist = tree.get_distance(node, parent)
sob = dist
return sob
def add_outgoing_connection_weights(network):
for cluster in network:
outgoing = 0
for connected_cluster in network[cluster][0]:
outgoing += network[cluster][0][connected_cluster][0]
network.update({cluster: [outgoing] + network[cluster]})
return network
def normalize_connections(rRNA_data, network, n_threads):
= []
zeroweights = []
for cluster in network:
for connectedcluster in network[cluster][0]:
accessions = network[cluster][0][connectedcluster][1]
sob = sum_of_branches(tree, accessions, tree_iddict)
if treelength == 0:
value = 1
else:
value = sob/treelength
if value != 0:
values.append(value)
network[cluster][0][connectedcluster][0] = value
if value == 0:
zeroweights.append([cluster, connectedcluster])
if len(values) > 0:
minimum = min(values)
else:
minimum = 1
for entry in zeroweights:
network[entry[0]][0][entry[1]][0] = minimum
add_outgoing_connection_weights(network)
for cluster in network:
for connected_cluster in network[cluster][1]:
network[cluster][1][connected_cluster].append(network[cluster][1][connected_cluster][0])
network[cluster][1][connected_cluster][0] = \
network[cluster][1][connected_cluster][0] / network[cluster][0]
return network, tree, tree_iddict
def normalize_nodes(tree, tree_iddict, network):
treelength = whole_tree_length(tree)
values = []
zeroweights = []
sum_of_clustervalues = 0
for cluster in network:
accessions = network[cluster][2]
sob = sum_of_branches(tree, accessions, tree_iddict)
if treelength == 0:
value = 1
else:
value = sob / treelength
if value != 0:
values.append(value)
sum_of_clustervalues += value
network[cluster].append(value)
if value == 0:
zeroweights.append(cluster)
if len(values) > 0:
minimum = min(values)
else:
minimum = 1
for entry in zeroweights:
sum_of_clustervalues += minimum
network[entry].append(minimum)
for cluster in network:
network[cluster][-1] /= sum_of_clustervalues
return network
def initialize(graph, source):
d = {}
p = {}
for node in graph:
d[node] = 0
p[node] = None
d[source] = 1
return d, p
def relax(node, neighbour, graph, d, p, i):
i = i+1
if d[neighbour] < (d[node] * graph[node][neighbour]) / i:
d[neighbour] = (d[node] * graph[node][neighbour]) / i
p[neighbour] = node
def bellman_ford(lite_network, source):
d, p = initialize(lite_network, source)
for i in range(len(lite_network)-1):
for u in lite_network:
for v in lite_network[u]:
relax(u, v, lite_network, d, p, i)
return d, p
def get_distances(network, sob_weights=False):
lite_network = dict()
for cluster in network:
if cluster not in lite_network:
lite_network.update({cluster: dict()})
for prev_cluster in network[cluster][1]:
if sob_weights is True:
value = network[cluster][1][prev_cluster][-1]
else:
value = network[cluster][1][prev_cluster][0]
lite_network[cluster].update({prev_cluster: value})
if prev_cluster not in lite_network:
lite_network.update({prev_cluster: dict()})
best_paths = dict()
for entry in lite_network:
distance, predecessor = bellman_ford(lite_network, source=entry)
for dist in distance:
if distance[dist] != 0:
if entry != dist:
pred = predecessor[dist]
prevpred = dist
step = 1
while pred != entry:
prevpred = pred
pred = predecessor[pred]
step += 1
try:
best_paths[entry].update({dist: [(distance[dist]), prevpred, step]})
except KeyError:
best_paths.update({entry: {dist: [(distance[dist]), prevpred, step]}})
return best_paths
def pagerank(network, eps=1.0e-14, teleport=False):
header = ["sRNA"]
header = header + list(network)
n = len(header)
iddict = {}
reverse_iddict = {}
count = 0
lines = []
pagerank_vector = []
teleport_vector = []
for element in header:
iddict.update({element: count})
reverse_iddict.update({count: element})
lines.append([])
if teleport == True:
teleport_vector.append(0)
pagerank_vector.append(1/n)
count += 1
pagerank_vector = np.array(pagerank_vector, dtype="float64")
i_table = []
weights = []
count = 0
for cluster in network:
if teleport == True:
teleport_vector[iddict[cluster]] = network[cluster][-1]
for connected_cluster in network[cluster][1]:
i = iddict[cluster]
i_table.append(i)
value = network[cluster][1][connected_cluster][0]
weights.append(value)
lines[iddict[connected_cluster]].append(count)
count += 1
i_table = np.array(i_table)
check = False
count = 0
while check is not True:
check = True
old_pagerank_vector = np.copy(pagerank_vector)
for x in range(len(lines)):
value = 0
for index in lines[x]:
i = i_table[index]
weight = weights[index]
value = value + weight * old_pagerank_vector[i]
if teleport is True:
value += teleport_vector[x] * old_pagerank_vector[0]
else:
value += (1/n) * old_pagerank_vector[0]
diff = np.absolute(old_pagerank_vector[x] - value)
pagerank_vector[x] = value
if diff > eps:
check = False
count += 1
pagerankdict = dict()
for x in range(len(pagerank_vector)):
pagerankdict.update({reverse_iddict[x]: pagerank_vector[x]})
for entry in network:
network[entry][0] = pagerankdict[entry]/(1-pagerankdict["sRNA"])
return network
def calculate_synteny_value(synteny_dict, best_paths, network):
for entry in synteny_dict:
uppath = synteny_dict[entry][2]
count = 0
prevlist = ["sRNA"]
synvalue = 0
for z in range(len(uppath)):
cluster = uppath[z]
if count == 0:
pass
else:
if cluster in prevlist:
synvalue += network[cluster][0]
else:
if cluster in network:
prevlist.append(cluster)
tmp = []
for cl in prevlist:
if cl in best_paths[cluster]:
p = best_paths[cluster][cl][0] / best_paths[cluster][cl][2]
tmp.append(p)
elif cluster in best_paths[cl]:
tar = 0
div = 1
for clus in network[cl][1]:
if clus != "sRNA":
if cluster in best_paths[clus]:
x = best_paths[cl][clus][0] / (best_paths[cl][clus][2] + 1)
if x > tar:
tar = x
div = (best_paths[cl][clus][2] + 1)
if z+1 != len(uppath):
if uppath[z + 1] in best_paths[cluster]:
add = best_paths[cluster][best_paths[cluster][uppath[z+1]][1]][0] / div
else:
add = best_paths[cluster][best_paths[cluster]["sRNA"][1]][0] / div
else:
add = best_paths[cluster][best_paths[cluster]["sRNA"][1]][0] / div
if tar == 0:
p = add
else:
p = add * tar
tmp.append(p)
synvalue += max(tmp) * network[cluster][0]
else:
pass
count += 1
downpath = synteny_dict[entry][3]
count = 0
prevlist = ["sRNA"]
for z in range(len(downpath)):
cluster = downpath[z]
if count == 0:
pass
else:
if cluster in prevlist:
synvalue += network[cluster][0]
else:
if cluster in network:
prevlist.append(cluster)
tmp = []
for cl in prevlist:
if cl in best_paths[cluster]:
p = best_paths[cluster][cl][0] / best_paths[cluster][cl][2]
tmp.append(p)
elif cluster in best_paths[cl]:
tar = 0
div = 1
for clus in network[cl][1]:
if clus != "sRNA":
if cluster in best_paths[clus]:
x = best_paths[cl][clus][0] / (best_paths[cl][clus][2] + 1)
if x > tar:
tar = x
div = (best_paths[cl][clus][2] + 1)
if z+1 != len(downpath):
if downpath[z + 1] in best_paths[cluster]:
add = best_paths[cluster][best_paths[cluster][downpath[z+1]][1]][0] / div
else:
add = best_paths[cluster][best_paths[cluster]["sRNA"][1]][0] / div
else:
add = best_paths[cluster][best_paths[cluster]["sRNA"][1]][0] / div
if tar == 0:
p = add
else:
p = add * tar
tmp.append(p)
synvalue += max(tmp) * network[cluster][0]
else:
count -= 1
count += 1
synteny_dict[entry].append(synvalue)
return synteny_dict
def visualize_network(connectiondict, outfile):
node_weights = []
weightdict = dict()
for cluster in connectiondict:
weightdict.update({cluster[8:]: connectiondict[cluster][0]})
node_weights.append(connectiondict[cluster][0])
nodew = list(set(node_weights))
red = Color("red")
colors = list(red.range_to(Color("yellow"), len(nodew)))
for x in range(len(colors)):
colors[x] = str(Color(colors[x]).hex_l)
nodew.sort(reverse=True)
colordict = dict()
for x in range(len(nodew)):
colordict.update({nodew[x]: colors[x]})
graph = Digraph()
graph.node("sRNA", style="filled", fillcolor="green")
for cluster in connectiondict:
graph.node(cluster[8:], style="filled", fillcolor=colordict[connectiondict[cluster][0]])
for cluster in connectiondict:
for connected_cluster in connectiondict[cluster][1]:
w = 3 * connectiondict[cluster][1][connected_cluster][0]
if connected_cluster != "sRNA":
connected_cluster = connected_cluster[8:]
graph.edge(cluster[8:], connected_cluster, penwidth=str(w))
try:
graph.render(outfile, format="svg")
except subprocess.CalledProcessError:
pass
def visualize_cytoscape_network(network, outfile, mode):
data = "#network.txt\n"
f = open(outfile, "w")
if mode == "on":
data += "cluster,connected cluster,PageRank, connection weight\n"
f.write("cluster,connected cluster,PageRank, connection weight\n")
elif mode == "off":
data += "cluster,connected cluster,Sum of branches, connection weight\n"
f.write("cluster,connected cluster,Sum of branches, connection weight\n")
for cluster in network:
pagerank = network[cluster][0]
for connected_cluster in network[cluster][1]:
weight = network[cluster][1][connected_cluster][0]
data += cluster + "," + connected_cluster + "," + str(pagerank) + "," + str(weight) + "\n"
f.write(cluster + "," + connected_cluster + "," + str(pagerank) + "," + str(weight) + "\n")
f.close()
return data
def write_std_data(network_annotation, data_id, outfile):
data = "#" + str(data_id) + "\n"
f = open(outfile, "w")
for entry in network_annotation:
data += str(entry) + "\n"
f.write(str(entry) + "\n")
f.close()
return data
def output_cluster_synteny_file(syteny_table, data_id, outfile):
data = "#" + str(data_id) + "\n"
f = open(outfile, "w")
f.write("identifier\tupstream cluster\tdownstream cluster\n")
for entry in syteny_table:
upstream = syteny_table[entry][2]
downstream = syteny_table[entry][3]
data += entry + "\t"
f.write(entry + "\t")
for cluster in upstream:
data += cluster + ","
f.write(cluster + ",")
data += "\t"
f.write("\t")
for cluster in downstream:
data += cluster + ","
f.write(cluster + ",")
data += "\n"
f.write("\n")
f.close()
return data
def normalize_synteny_value(network_synteny_table, test_synteny_table):
network_values = []
for entry in network_synteny_table:
network_values.append(network_synteny_table[entry][4])
network_max = max(network_values)
for entry in network_synteny_table:
network_synteny_table[entry][4] /= network_max
if test_synteny_table is not None:
for entry in test_synteny_table:
test_synteny_table[entry][4] /= network_max
def write_outfile_from_synteny_table(synteny_table, iddict, outfile):
f = open(outfile, "w")
for entry in synteny_table:
desc, seq = iddict[entry]
synvalue = synteny_table[entry][4]
f.write(">" + desc + " synteny:" + str(synvalue) + "\n" + str(seq) + "\n")
f.close()
def write_outfile_from_missing_ids_table(missing_ids_table, outfile):
f = open(outfile, "w")
for entry in missing_ids_table:
f.write(str(entry) + "\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--network_file", help="fasta file containing sequences used for network construction or 12 column BLAST Table",
type=str)
parser.add_argument("-t", "--test_file", help="optional fasta file containing sequences that are checked for network match or 12 column BLAST Table",
type=str, default=None)
parser.add_argument("-c", "--cluster_script", help="path to synteny clustering R script",
type=str,
default=str(os.path.dirname(os.path.abspath(__file__))) + "/packages/Rscript/Synteny_Cluster_Script_sqlite.r")
parser.add_argument("-p", "--w_dir", help="working directory where temporary files are stored default is the "
"current directory", type=str, default="")
parser.add_argument("-n", "--network", help="if set to svg, 'outfile'_Network.svg is produced as an output."
"If set to cys, cytoscape compatible comma seperated Network 'outfile'_Network.txt is "
"produced", type=str, default="False")
parser.add_argument("-o", "--outfiles", help="path and name of outfiles. Will produce 'outfile'_network.fasta and "
"'outfile'_questionable.fasta ", type=str, default="")
parser.add_argument("-w", "--synteny_window", help="synteny window used for extraction", type=int, default=5000)
parser.add_argument("--protein_number", help="number of proteins up and downstream that should be used. default is 4",
type=int, default=4)
parser.add_argument("--node_normalization",
help="If True uses a teleport at the sRNA based on a normalized number of cluster occurrences. "
"Default is False",type=bool, default=False)
parser.add_argument("--use_sob_weights", help="If True uses sum of branch weights for Synteny Value calculation. "
"Default is False", type=bool, default=False)
parser.add_argument("-d", "--sqlite_db", help="Path to SQLite DB", type=str,
default=str(os.path.dirname(os.path.abspath(__file__))) + "/mySQLiteDB_Syntney.db")
parser.add_argument("-s", "--sqlite_script", help="", type=str,
default=str(os.path.dirname(os.path.abspath(__file__))) + "/packages/GENBANK_GROPER_SQLITE/genbank_groper_sqliteDB.py")
parser.add_argument("-r", "--page_rank", help="Turn PageRank algorithm on or off; default=on", type=str, default="on")
parser.add_argument("-x", "--num_threads", help="Number of threads; default=1", type=int, default=1)
args = parser.parse_args()
path_psi_out = str(os.path.abspath(args.w_dir)) + "/psi_out/"
if os.path.isdir(path_psi_out):
shutil.rmtree(path_psi_out)
aggregated_results = ""
proven_network_fasta = check_input_consistency(args.network_file, args.sqlite_script)
if args.test_file != None:
proven_test_fasta = check_input_consistency(args.test_file, args.sqlite_script)
else:
proven_test_fasta = None
try:
r_script_cluster_table, r_script_synteny_table, r_network_annotation_table, r_missing_ids_table, r_rRNA_network_table, network_ids, test_ids = run_r_script(proven_network_fasta, proven_test_fasta, args.cluster_script, args.sqlite_db, args.sqlite_script, args.num_threads, synteny_window=str(args.synteny_window))
except:
sys.exit("ERROR: R_SCRIPT CAN\'T BE CALLED CORRECTLY!")
number_of_clusters = args.protein_number + 1 # needs to be done as sRNA is also considered as a cluster
try:
network_synteny_table = get_synteny_dict(network_ids, r_script_synteny_table)
if len(network_synteny_table) <= 2:
raise Exception("The number of sequences are to low for computing a network! Please increase the number of input sequences in your network.fasta file.")
except Exception as error:
sys.exit("ERROR: Function get_synteny_dict(network_ids, r_script_synteny_table) failed!" + "\n" + repr(error))
cluster_dict = get_clusters(r_script_cluster_table)
network_synteny_table = add_cluster_to_synteny_table(network_synteny_table, cluster_dict, number_of_clusters)
network = build_network(network_synteny_table)
network, tree, tree_iddict = normalize_connections(r_rRNA_network_table, network, args.num_threads)
if args.node_normalization is True:
normalize_nodes(tree, tree_iddict, network)
best_paths = get_distances(network, sob_weights=args.use_sob_weights)
if args.page_rank == "on":
network = pagerank(network, teleport=args.node_normalization)
elif args.page_rank == "off" and args.node_normalization is True:
for entry in network:
network[entry][0] = network[entry][-1]
else:
raise Exception("flags --node_normalization False and --page_rank off produces nonsense result")
network_synteny_table = calculate_synteny_value(network_synteny_table, best_paths, network)
if test_ids is not None:
test_synteny_table = get_synteny_dict(test_ids, r_script_synteny_table)
test_synteny_table = add_cluster_to_synteny_table(test_synteny_table, cluster_dict, number_of_clusters)
test_synteny_table = calculate_synteny_value(test_synteny_table, best_paths, network)
else:
test_synteny_table = None
normalize_synteny_value(network_synteny_table, test_synteny_table)
write_outfile_from_synteny_table(network_synteny_table, network_ids, args.outfiles + "_Network.fasta")
write_outfile_from_missing_ids_table(r_missing_ids_table, args.outfiles + "_Missing_Ids.txt")
if test_synteny_table is not None:
write_outfile_from_synteny_table(test_synteny_table, test_ids, args.outfiles + "_Evaluated.fasta")
if args.network == "svg":
visualize_network(network, outfile=args.outfiles + "_Network.svg")
output_cluster_synteny_file(test_synteny_table, outfile=args.outfiles + "_Cluster.txt")
elif args.network == "cys":
# essential
aggregated_results += visualize_cytoscape_network(network, outfile=args.outfiles + "_Network.txt", mode=args.page_rank)
# _Network_Annotation.txt - only used for internal testing
aggregated_results += write_std_data(r_network_annotation_table, "network_annotation", outfile=args.outfiles + "_Network_Annotation.txt")
# _Synteny_Table.txt - only used for internal testing
aggregated_results += write_std_data(r_script_synteny_table, "synteny_table", outfile=args.outfiles + "_Synteny_Table.txt")
if test_synteny_table is not None:
output_cluster_synteny_file(test_synteny_table, "test_synteny_table", outfile=args.outfiles + "_Evaluated_Cluster.txt")
# _Network_Cluster.txt - only used for internal testing
aggregated_results += output_cluster_synteny_file(network_synteny_table, "network_cluster", outfile=args.outfiles + "_Network_Cluster.txt")
else:
pass
###### START TEST OUTPUT JENS
#print(aggregated_results)
handle = open("./aggregated_results.jens", "w")
for line in aggregated_results:
handle.write(line)
handle.close()
###### END TEST OUTPUT JENS
# delete psi_out
path_psi_out = str(os.path.abspath(args.w_dir)) + "/psi_out/"
shutil.rmtree(path_psi_out)
if __name__ == "__main__":
main()
| true | true |
1c2f4147e9c1a7505fdb20d1f991ede7ff4c2812 | 7,766 | py | Python | F29.BioEntity/WebAPI/biophens_api.py | foundation29org/F29.BioEntity | 531947fb85465f363e63e268b9e3ca17283d76dd | [
"MIT"
] | null | null | null | F29.BioEntity/WebAPI/biophens_api.py | foundation29org/F29.BioEntity | 531947fb85465f363e63e268b9e3ca17283d76dd | [
"MIT"
] | null | null | null | F29.BioEntity/WebAPI/biophens_api.py | foundation29org/F29.BioEntity | 531947fb85465f363e63e268b9e3ca17283d76dd | [
"MIT"
] | null | null | null | from flask import current_app, request, make_response, jsonify
from flask_restplus import Resource
from ._api import *
'''
ALL
'''
@API.route('/diseases/<string:lan>/all')
class diseases_all(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.all_conds()
return jsonify(res)
@API.route('/phenotypes/<string:lan>/all')
class phenotypes_all(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.all_phens()
return jsonify(res)
'''
Obsolete
'''
@API.route('/diseases/<string:lan>/obsolete')
class diseases_obsolete(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.all_obsolete_conds()
return jsonify(res)
@API.route('/phenotypes/<string:lan>/obsolete')
class phenotypes_obsolete(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.all_obsolete_phens()
return jsonify(res)
'''
Diseases / Phenotypes
'''
@API.route('/diseases/<string:lan>/<string:ids>')
@API.param('ids', 'Disease IDs')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'include_obsolete': {'description': 'if True, include obsolete terms', 'in': 'query', 'type': 'bool', 'default': False}
})
class describe_diseases(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
depth = int(request.args.get('depth') or -1)
include_obsolete = str(request.args.get('include_obsolete')).lower() == 'true'
bio = get_bio_phens(lan)
res = bio.describe_conds(ids, depth=depth, include_obsolete=include_obsolete)
return jsonify(res)
@API.route('/diseases/<string:lan>')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'include_obsolete': {'description': 'if True, include obsolete terms', 'in': 'query', 'type': 'bool', 'default': False}
})
class describe_diseases_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
depth = int(request.args.get('depth') or -1)
include_obsolete = str(request.args.get('include_obsolete')).lower() == 'true'
bio = get_bio_phens(lan)
res = bio.describe_conds(ids, depth=depth, include_obsolete=include_obsolete)
return jsonify(res)
@API.route('/phenotypes/<string:lan>/<string:ids>')
@API.param('ids', 'Phenotype IDs')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'include_obsolete': {'description': 'if True, include obsolete terms', 'in': 'query', 'type': 'bool', 'default': False}
})
class describe_phenotypes(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
depth = int(request.args.get('depth') or -1)
include_obsolete = str(request.args.get('include_obsolete')).lower() == 'true'
bio = get_bio_phens(lan)
res = bio.describe_phens(ids, depth=depth, include_obsolete=include_obsolete)
return jsonify(res)
@API.route('/phenotypes/<string:lan>')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'include_obsolete': {'description': 'if True, include obsolete terms', 'in': 'query', 'type': 'bool', 'default': False}
})
class describe_phenotypes_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
depth = int(request.args.get('depth') or -1)
include_obsolete = str(request.args.get('include_obsolete')).lower() == 'true'
bio = get_bio_phens(lan)
res = bio.describe_phens(ids, depth=depth, include_obsolete=include_obsolete)
return jsonify(res)
'''
Disease_Phenotypes
'''
@API.route('/disease/phenotypes/<string:lan>/tree/<string:ids>')
@API.param('ids', 'i.e: MONDO:0007299, OMIM:607208')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'obsolete_action': {'description': "strategy with obsolete terms: ('show', 'hide', 'replace')", 'in': 'query', 'type': 'bool', 'default': 'replace'}
})
class disease_phenotypes_tree(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
depth = int(request.args.get('depth') or -1)
obsolete_action = request.args.get('depth') or 'replace'
bio = get_bio_phens(lan)
res = bio.conditions_phens_recursive(ids, depth=depth, obsolete_action=obsolete_action)
return jsonify(res)
@API.route('/disease/phenotypes/<string:lan>/tree')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'obsolete_action': {'description': "strategy with obsolete terms: ('show', 'hide', 'replace')", 'in': 'query', 'type': 'bool', 'default': 'replace'}
})
class disease_phenotypes_tree_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
depth = int(request.args.get('depth') or -1)
obsolete_action = request.args.get('depth') or 'replace'
bio = get_bio_phens(lan)
res = bio.conditions_phens_recursive(ids, depth=depth, obsolete_action=obsolete_action)
return jsonify(res)
@API.route('/disease/phenotypes/<string:lan>/graph/<string:ids>')
@API.param('ids', 'i.e: MONDO:0007299, OMIM:607208')
class disease_phenotypes_graph(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
bio = get_bio_phens(lan)
G = bio.conditions_phens_graph(ids)
L = js.node_link_data(G)
return jsonify(L)
@API.route('/disease/phenotypes/<string:lan>/graph')
class disease_phenotypes_graph_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
bio = get_bio_phens(lan)
G = bio.conditions_phens_graph(ids)
L = js.node_link_data(G)
return jsonify(L)
'''
Groups
'''
@API.route('/phenotype/groups/<string:lan>/all')
class groups(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.describe_groups()
return jsonify(res)
@API.route('/phenotype/groups/<string:lan>/<string:ids>')
@API.param('ids', 'Phenotype IDs')
class groups(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
empty = request.args.get('includeEmpty') or 'false'
empty = empty.lower() == 'true'
bio = get_bio_phens(lan)
res = bio.group_phens(ids, empty)
return jsonify(res)
@API.route('/phenotype/groups/<string:lan>')
class groups_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
empty = request.args.get('includeEmpty') or 'false'
empty = empty.lower() == 'true'
bio = get_bio_phens(lan)
res = bio.group_phens(ids, empty)
return jsonify(res)
'''
Phenotype Leaves
'''
@API.route('/phenotype/leaves')
class phenotype_leaves_post(Resource):
def post(self):
dic = json.loads(request.data)
depth = int(request.args.get('depth') or -1)
bio = get_bio_phens('en')
lea = PhenLeaves(bio)
res = lea.leaves(dic, depth)
return jsonify(res)
'''
Common Ancestor
'''
@API.route('/common_ancestor/<string:ids>')
@API.param('ids', 'i.e: HP:0000002, HP:0000003')
class common_ancestor(Resource):
def get(self, ids):
ids = [id.strip() for id in ids.split(',')]
bio = get_bio_phens('en')
G = bio.common_ancestor(ids)
L = js.node_link_data(G)
return jsonify(L)
@API.route('/common_ancestor')
class common_ancestor_post(Resource):
def post(self):
ids = json.loads(request.data)
bio = get_bio_phens('en')
G = bio.common_ancestor(ids)
L = js.node_link_data(G)
return jsonify(L)
| 36.805687 | 152 | 0.631599 | from flask import current_app, request, make_response, jsonify
from flask_restplus import Resource
from ._api import *
@API.route('/diseases/<string:lan>/all')
class diseases_all(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.all_conds()
return jsonify(res)
@API.route('/phenotypes/<string:lan>/all')
class phenotypes_all(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.all_phens()
return jsonify(res)
@API.route('/diseases/<string:lan>/obsolete')
class diseases_obsolete(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.all_obsolete_conds()
return jsonify(res)
@API.route('/phenotypes/<string:lan>/obsolete')
class phenotypes_obsolete(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.all_obsolete_phens()
return jsonify(res)
@API.route('/diseases/<string:lan>/<string:ids>')
@API.param('ids', 'Disease IDs')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'include_obsolete': {'description': 'if True, include obsolete terms', 'in': 'query', 'type': 'bool', 'default': False}
})
class describe_diseases(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
depth = int(request.args.get('depth') or -1)
include_obsolete = str(request.args.get('include_obsolete')).lower() == 'true'
bio = get_bio_phens(lan)
res = bio.describe_conds(ids, depth=depth, include_obsolete=include_obsolete)
return jsonify(res)
@API.route('/diseases/<string:lan>')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'include_obsolete': {'description': 'if True, include obsolete terms', 'in': 'query', 'type': 'bool', 'default': False}
})
class describe_diseases_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
depth = int(request.args.get('depth') or -1)
include_obsolete = str(request.args.get('include_obsolete')).lower() == 'true'
bio = get_bio_phens(lan)
res = bio.describe_conds(ids, depth=depth, include_obsolete=include_obsolete)
return jsonify(res)
@API.route('/phenotypes/<string:lan>/<string:ids>')
@API.param('ids', 'Phenotype IDs')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'include_obsolete': {'description': 'if True, include obsolete terms', 'in': 'query', 'type': 'bool', 'default': False}
})
class describe_phenotypes(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
depth = int(request.args.get('depth') or -1)
include_obsolete = str(request.args.get('include_obsolete')).lower() == 'true'
bio = get_bio_phens(lan)
res = bio.describe_phens(ids, depth=depth, include_obsolete=include_obsolete)
return jsonify(res)
@API.route('/phenotypes/<string:lan>')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'include_obsolete': {'description': 'if True, include obsolete terms', 'in': 'query', 'type': 'bool', 'default': False}
})
class describe_phenotypes_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
depth = int(request.args.get('depth') or -1)
include_obsolete = str(request.args.get('include_obsolete')).lower() == 'true'
bio = get_bio_phens(lan)
res = bio.describe_phens(ids, depth=depth, include_obsolete=include_obsolete)
return jsonify(res)
@API.route('/disease/phenotypes/<string:lan>/tree/<string:ids>')
@API.param('ids', 'i.e: MONDO:0007299, OMIM:607208')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'obsolete_action': {'description': "strategy with obsolete terms: ('show', 'hide', 'replace')", 'in': 'query', 'type': 'bool', 'default': 'replace'}
})
class disease_phenotypes_tree(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
depth = int(request.args.get('depth') or -1)
obsolete_action = request.args.get('depth') or 'replace'
bio = get_bio_phens(lan)
res = bio.conditions_phens_recursive(ids, depth=depth, obsolete_action=obsolete_action)
return jsonify(res)
@API.route('/disease/phenotypes/<string:lan>/tree')
@API.doc(params={
'depth': {'description': 'depth', 'in': 'query', 'default': 0},
'obsolete_action': {'description': "strategy with obsolete terms: ('show', 'hide', 'replace')", 'in': 'query', 'type': 'bool', 'default': 'replace'}
})
class disease_phenotypes_tree_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
depth = int(request.args.get('depth') or -1)
obsolete_action = request.args.get('depth') or 'replace'
bio = get_bio_phens(lan)
res = bio.conditions_phens_recursive(ids, depth=depth, obsolete_action=obsolete_action)
return jsonify(res)
@API.route('/disease/phenotypes/<string:lan>/graph/<string:ids>')
@API.param('ids', 'i.e: MONDO:0007299, OMIM:607208')
class disease_phenotypes_graph(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
bio = get_bio_phens(lan)
G = bio.conditions_phens_graph(ids)
L = js.node_link_data(G)
return jsonify(L)
@API.route('/disease/phenotypes/<string:lan>/graph')
class disease_phenotypes_graph_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
bio = get_bio_phens(lan)
G = bio.conditions_phens_graph(ids)
L = js.node_link_data(G)
return jsonify(L)
@API.route('/phenotype/groups/<string:lan>/all')
class groups(Resource):
def get(self, lan):
bio = get_bio_phens(lan)
res = bio.describe_groups()
return jsonify(res)
@API.route('/phenotype/groups/<string:lan>/<string:ids>')
@API.param('ids', 'Phenotype IDs')
class groups(Resource):
def get(self, ids, lan):
ids = [id.strip() for id in ids.split(',')]
empty = request.args.get('includeEmpty') or 'false'
empty = empty.lower() == 'true'
bio = get_bio_phens(lan)
res = bio.group_phens(ids, empty)
return jsonify(res)
@API.route('/phenotype/groups/<string:lan>')
class groups_post(Resource):
def post(self, lan):
ids = json.loads(request.data)
empty = request.args.get('includeEmpty') or 'false'
empty = empty.lower() == 'true'
bio = get_bio_phens(lan)
res = bio.group_phens(ids, empty)
return jsonify(res)
@API.route('/phenotype/leaves')
class phenotype_leaves_post(Resource):
def post(self):
dic = json.loads(request.data)
depth = int(request.args.get('depth') or -1)
bio = get_bio_phens('en')
lea = PhenLeaves(bio)
res = lea.leaves(dic, depth)
return jsonify(res)
@API.route('/common_ancestor/<string:ids>')
@API.param('ids', 'i.e: HP:0000002, HP:0000003')
class common_ancestor(Resource):
def get(self, ids):
ids = [id.strip() for id in ids.split(',')]
bio = get_bio_phens('en')
G = bio.common_ancestor(ids)
L = js.node_link_data(G)
return jsonify(L)
@API.route('/common_ancestor')
class common_ancestor_post(Resource):
def post(self):
ids = json.loads(request.data)
bio = get_bio_phens('en')
G = bio.common_ancestor(ids)
L = js.node_link_data(G)
return jsonify(L)
| true | true |
1c2f414d2dae9da8fdc876b9da180bef3242e346 | 1,027 | py | Python | test/num_one_forms_bracket.py | mevangelista-alvarado/NumericalPoissonGrometry- | 76f41be4eb11248c3206b5e371c7aa9eb9d73b44 | [
"MIT"
] | 7 | 2020-10-30T22:49:49.000Z | 2021-07-15T20:56:13.000Z | test/num_one_forms_bracket.py | mevangelista-alvarado/NumericalPoissonGrometry- | 76f41be4eb11248c3206b5e371c7aa9eb9d73b44 | [
"MIT"
] | 2 | 2020-11-13T19:01:28.000Z | 2021-03-24T04:55:34.000Z | test/num_one_forms_bracket.py | mevangelista-alvarado/NumericalPoissonGrometry- | 76f41be4eb11248c3206b5e371c7aa9eb9d73b44 | [
"MIT"
] | 1 | 2021-02-23T05:41:02.000Z | 2021-02-23T05:41:02.000Z | import datetime
import time
import numpy as np
import statistics as stat
from numpoisson.numpoisson import NumPoissonGeometry
npg = NumPoissonGeometry(6, 'x')
P = {(1, 4): 1, (2, 5): 1, (3, 6): 1, (5, 6): 'x2**2'}
alpha = {(5,): 1}
beta = {(6,): 1}
num_one_forms_bracket_res = dict()
j = 2
for mesh_path in ['6Qmesh_10_2.npy', '6Qmesh_10_3.npy' , '6Qmesh_10_4.npy' , '6Qmesh_10_5.npy', '6Qmesh_10_6.npy', '6Qmesh_10_7.npy']:
print(f'step {j}')
tiempos = dict()
with open(mesh_path, 'rb') as f:
mesh = np.load(f)
for k in range(25):
A = datetime.datetime.now()
npg.num_one_form_bracket(P, alpha, beta, mesh, pt_output=True)
B = datetime.datetime.now()
tiempos[k] = (B - A).total_seconds()
promedio = stat.mean(tiempos.values())
desviacion = stat.pstdev(tiempos.values())
tiempos['promedios'] = promedio
tiempos['desviacion'] = desviacion
num_one_forms_bracket_res[f'10**{j}'] = tiempos
j = j + 1
print(num_one_forms_bracket_res)
print('Finish')
| 31.121212 | 134 | 0.64557 | import datetime
import time
import numpy as np
import statistics as stat
from numpoisson.numpoisson import NumPoissonGeometry
npg = NumPoissonGeometry(6, 'x')
P = {(1, 4): 1, (2, 5): 1, (3, 6): 1, (5, 6): 'x2**2'}
alpha = {(5,): 1}
beta = {(6,): 1}
num_one_forms_bracket_res = dict()
j = 2
for mesh_path in ['6Qmesh_10_2.npy', '6Qmesh_10_3.npy' , '6Qmesh_10_4.npy' , '6Qmesh_10_5.npy', '6Qmesh_10_6.npy', '6Qmesh_10_7.npy']:
print(f'step {j}')
tiempos = dict()
with open(mesh_path, 'rb') as f:
mesh = np.load(f)
for k in range(25):
A = datetime.datetime.now()
npg.num_one_form_bracket(P, alpha, beta, mesh, pt_output=True)
B = datetime.datetime.now()
tiempos[k] = (B - A).total_seconds()
promedio = stat.mean(tiempos.values())
desviacion = stat.pstdev(tiempos.values())
tiempos['promedios'] = promedio
tiempos['desviacion'] = desviacion
num_one_forms_bracket_res[f'10**{j}'] = tiempos
j = j + 1
print(num_one_forms_bracket_res)
print('Finish')
| true | true |
1c2f41f14f2035b26a2ad8d5b1f03ba69975cbb2 | 905 | py | Python | ad_manager/utils.py | mhulse/django-ad-manager | c1335d2e304e20dd84ad766d57fcccbfbae60dd8 | [
"Apache-2.0"
] | 1 | 2015-11-08T08:58:10.000Z | 2015-11-08T08:58:10.000Z | ad_manager/utils.py | registerguard/django-ad-manager | fb94bc49388f01a95ee4b1065bd4d0589725d048 | [
"Apache-2.0"
] | null | null | null | ad_manager/utils.py | registerguard/django-ad-manager | fb94bc49388f01a95ee4b1065bd4d0589725d048 | [
"Apache-2.0"
] | null | null | null | from django import http
from django.template import loader
from django.template import RequestContext
# https://bitbucket.org/chris1610/satchmo/src/7e5842d3c520/satchmo/apps/satchmo_store/shop/templates/404.html?at=default
def bad_or_missing(request, msg):
"""
Return an HTTP 404 response for a date request that cannot possibly exist.
The 'msg' parameter gives the message for the main panel on the page.
"""
if request.is_ajax():
resp = http.HttpResponse()
resp.status_code = 404
resp.content = {
'message': msg
}
return resp
else:
template = loader.get_template('ad_manager/404.html')
context = RequestContext(request, {
'message': msg,
})
return http.HttpResponseNotFound(template.render(context)) | 26.617647 | 120 | 0.609945 | from django import http
from django.template import loader
from django.template import RequestContext
def bad_or_missing(request, msg):
if request.is_ajax():
resp = http.HttpResponse()
resp.status_code = 404
resp.content = {
'message': msg
}
return resp
else:
template = loader.get_template('ad_manager/404.html')
context = RequestContext(request, {
'message': msg,
})
return http.HttpResponseNotFound(template.render(context)) | true | true |
1c2f421cc5afd3815f6ae661773e0d4e6b555263 | 830 | py | Python | src/y0/cli.py | y0-causal-inference/y0 | 3a84c35c78e79d502570510109fd0dc919ad1bfd | [
"BSD-3-Clause"
] | 8 | 2021-02-09T22:47:11.000Z | 2022-03-29T17:26:52.000Z | src/y0/cli.py | altdeep/y0 | 3e9e8d47b08b51f64216000db31d8f4c0fd388a3 | [
"BSD-3-Clause"
] | 61 | 2021-01-11T20:03:01.000Z | 2022-03-19T20:09:20.000Z | src/y0/cli.py | y0-causal-inference/y0 | 3a84c35c78e79d502570510109fd0dc919ad1bfd | [
"BSD-3-Clause"
] | 4 | 2021-01-12T01:19:36.000Z | 2021-12-01T04:02:20.000Z | # -*- coding: utf-8 -*-
"""Command line interface for :mod:`y0`.
Why does this file exist, and why not put this in ``__main__``? You might be tempted to import things from ``__main__``
later, but that will cause problems--the code will get executed twice:
- When you run ``python3 -m y0`` python will execute``__main__.py`` as a script.
That means there won't be any ``y0.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``y0.__main__`` in ``sys.modules``.
.. seealso:: https://click.palletsprojects.com/en/7.x/setuptools/#setuptools-integration
"""
import logging
import click
__all__ = ["main"]
logger = logging.getLogger(__name__)
@click.group()
@click.version_option()
def main():
"""CLI for y0."""
if __name__ == "__main__":
main()
| 25.151515 | 119 | 0.691566 |
import logging
import click
__all__ = ["main"]
logger = logging.getLogger(__name__)
@click.group()
@click.version_option()
def main():
if __name__ == "__main__":
main()
| true | true |
1c2f4264208bb43bc506d2cfa41c00bf63d87532 | 2,589 | py | Python | python/mediapipe/facemesh.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 23 | 2015-06-08T13:01:00.000Z | 2021-12-30T08:20:04.000Z | python/mediapipe/facemesh.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 1 | 2020-10-22T02:36:10.000Z | 2020-10-22T02:36:10.000Z | python/mediapipe/facemesh.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 7 | 2017-10-31T09:48:14.000Z | 2022-01-04T15:59:45.000Z | #!/usr/bin/env python3
# Src: https://google.github.io/mediapipe/solutions/face_mesh#python-solution-api
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
# For static images:
IMAGE_FILES = []
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
with mp_face_mesh.FaceMesh(
static_image_mode=True,
max_num_faces=1,
min_detection_confidence=0.5) as face_mesh:
for idx, file in enumerate(IMAGE_FILES):
image = cv2.imread(file)
# Convert the BGR image to RGB before processing.
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Print and draw face mesh landmarks on the image.
if not results.multi_face_landmarks:
continue
annotated_image = image.copy()
for face_landmarks in results.multi_face_landmarks:
print('face_landmarks:', face_landmarks)
mp_drawing.draw_landmarks(
image=annotated_image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image)
# For webcam input:
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
with mp_face_mesh.FaceMesh(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as face_mesh:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = face_mesh.process(image)
# Draw the face mesh annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imshow('MediaPipe FaceMesh', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
| 35.958333 | 81 | 0.71881 |
mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
IMAGE_FILES = []
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
with mp_face_mesh.FaceMesh(
static_image_mode=True,
max_num_faces=1,
min_detection_confidence=0.5) as face_mesh:
for idx, file in enumerate(IMAGE_FILES):
image = cv2.imread(file)
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.multi_face_landmarks:
continue
annotated_image = image.copy()
for face_landmarks in results.multi_face_landmarks:
print('face_landmarks:', face_landmarks)
mp_drawing.draw_landmarks(
image=annotated_image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image)
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
with mp_face_mesh.FaceMesh(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as face_mesh:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
continue
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = face_mesh.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imshow('MediaPipe FaceMesh', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
| true | true |
1c2f4272084c82989e16b417f3133fb32a81846e | 1,843 | py | Python | setup.py | pkkid/django-redsocks | 615805c6b99aeedd59a2a66ea3bd25f7b606b968 | [
"MIT"
] | null | null | null | setup.py | pkkid/django-redsocks | 615805c6b99aeedd59a2a66ea3bd25f7b606b968 | [
"MIT"
] | null | null | null | setup.py | pkkid/django-redsocks | 615805c6b99aeedd59a2a66ea3bd25f7b606b968 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from setuptools import setup, find_packages
from redsocks import __version__
try:
from pypandoc import convert
except ImportError:
import io
def convert(filename, fmt):
with io.open(filename, encoding='utf-8') as fd:
return fd.read()
DESCRIPTION = 'Websocket support for Django using Redis as datastore'
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
setup(
name='django-redsocks',
version=__version__,
author='Jacob Rief',
author_email='jacob.rief@gmail.com',
description=DESCRIPTION,
long_description=convert('README.md', 'rst'),
url='https://github.com/mjs7231/django-redsocks',
license='MIT',
keywords=['django', 'websocket', 'redis'],
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
packages=find_packages(exclude=['examples', 'docs']),
include_package_data=True,
install_requires=[
'setuptools',
'redis',
'gevent',
'greenlet',
'six',
],
extras_require={
'uwsgi': ['uWSGI>=1.9.20'],
'wsaccel': ['wsaccel>=0.6.2'],
'django-redis-sessions': ['django-redis-sessions>=0.4.0'],
},
zip_safe=False,
)
| 28.796875 | 69 | 0.625068 |
from __future__ import unicode_literals
from setuptools import setup, find_packages
from redsocks import __version__
try:
from pypandoc import convert
except ImportError:
import io
def convert(filename, fmt):
with io.open(filename, encoding='utf-8') as fd:
return fd.read()
DESCRIPTION = 'Websocket support for Django using Redis as datastore'
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
setup(
name='django-redsocks',
version=__version__,
author='Jacob Rief',
author_email='jacob.rief@gmail.com',
description=DESCRIPTION,
long_description=convert('README.md', 'rst'),
url='https://github.com/mjs7231/django-redsocks',
license='MIT',
keywords=['django', 'websocket', 'redis'],
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
packages=find_packages(exclude=['examples', 'docs']),
include_package_data=True,
install_requires=[
'setuptools',
'redis',
'gevent',
'greenlet',
'six',
],
extras_require={
'uwsgi': ['uWSGI>=1.9.20'],
'wsaccel': ['wsaccel>=0.6.2'],
'django-redis-sessions': ['django-redis-sessions>=0.4.0'],
},
zip_safe=False,
)
| true | true |
1c2f42a28ebb4e804087a5088d68bb37cc17aec5 | 14,991 | py | Python | ds4se/ds/prediction/eval/traceability.py | rmclanton/ds4se | d9e1cf771a66478ac99c5341dbfeddbbf0abe5b2 | [
"Apache-2.0"
] | null | null | null | ds4se/ds/prediction/eval/traceability.py | rmclanton/ds4se | d9e1cf771a66478ac99c5341dbfeddbbf0abe5b2 | [
"Apache-2.0"
] | null | null | null | ds4se/ds/prediction/eval/traceability.py | rmclanton/ds4se | d9e1cf771a66478ac99c5341dbfeddbbf0abe5b2 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/9.0_ds.prediction.eval.traceability.ipynb (unless otherwise specified).
__all__ = ['SupervisedVectorEvaluation', 'ManifoldEntropy']
# Cell
from prg import prg
# Cell
import ds4se as ds
from ....mining.ir import VectorizationType
from ....mining.ir import SimilarityMetric
from ....mining.ir import EntropyMetric
from ....mining.ir import DistanceMetric
# Cell
#Description importation
from ...description.eval.traceability import VectorEvaluation
# Cell
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
# Cell
import gensim
import pandas as pd
from itertools import product
from random import sample
import functools
import os
from enum import Enum, unique, auto
# Cell
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import plot_precision_recall_curve
from sklearn.metrics import auc
import math as m
import random as r
import collections
from sklearn.metrics.pairwise import cosine_similarity
import seaborn as sns
# Cell
from scipy.spatial import distance
from scipy.stats import pearsonr
# Cell
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
# Cell
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# Cell
class SupervisedVectorEvaluation(VectorEvaluation):
def __init__(self, params):
super().__init__(params)
self.sys = params['system']
#Word2vec
similarities_w2v = self.sim_list_w2v + ['Linked?']
similarities_w2v = [str(i) for i in similarities_w2v]
self.df_filtered_w2v = self.df_w2v.copy()
self.df_filtered_w2v = self.df_filtered_w2v[similarities_w2v]
self.df_filtered_w2v = self.df_filtered_w2v[~self.df_filtered_w2v.isin([np.nan, np.inf, -np.inf]).any(1)]
#Doc2vec
similarities_d2v = self.sim_list_d2v + ['Linked?']
similarities_d2v = [str(i) for i in similarities_d2v]
self.df_filtered_d2v = self.df_d2v.copy()
self.df_filtered_d2v = self.df_filtered_d2v[similarities_d2v]
self.df_filtered_d2v = self.df_filtered_d2v[~self.df_filtered_d2v.isin([np.nan, np.inf, -np.inf]).any(1)]
def vecTypeVerification(self, vecType= VectorizationType.word2vec):
if vecType == VectorizationType.word2vec:
self.sim_list = self.sim_list_w2v
y_test = self.df_filtered_w2v['Linked?'].values
y_score = [self.df_filtered_w2v[ str(sim) ].values for sim in self.sim_list]
logging.info('Vectorization: ' + str(vecType) )
elif vecType == VectorizationType.doc2vec:
self.sim_list = self.sim_list_d2v
y_test = self.df_filtered_d2v['Linked?'].values
y_score = [self.df_filtered_d2v[ str(sim) ].values for sim in self.sim_list]
logging.info('Vectorization: ' + str(vecType) )
return y_test,y_score
def vecTypeVerificationSim(self, vecType= VectorizationType.word2vec,sim=SimilarityMetric.SCM_sim):
if vecType == VectorizationType.word2vec:
self.sim_list = self.sim_list_w2v
y_test = self.df_filtered_w2v['Linked?'].values
y_score = self.df_filtered_w2v[ str(sim) ].values
logging.info('Vectorization: ' + str(vecType) + " " + str(sim))
elif vecType == VectorizationType.doc2vec:
self.sim_list = self.sim_list_d2v
y_test = self.df_filtered_d2v['Linked?'].values
y_score = self.df_filtered_d2v[ str(sim) ].values
logging.info('Vectorization: ' + str(vecType) + " " + str(sim))
return y_test,y_score
def Compute_precision_recall_gain(self, vecType = VectorizationType.word2vec, sim=SimilarityMetric.SCM_sim):
'''One might choose PRG if there is little interest in identifying false negatives '''
y_test,y_score = self.vecTypeVerificationSim(vecType=vecType, sim=sim)
fig = go.Figure(layout_yaxis_range=[-0.05,1.02],layout_xaxis_range=[-0.05,1.02])
prg_curve = prg.create_prg_curve(y_test, y_score)
indices = np.arange(np.argmax(prg_curve['in_unit_square']) - 1,
len(prg_curve['in_unit_square']))
pg = prg_curve['precision_gain']
rg = prg_curve['recall_gain']
fig.add_trace(go.Scatter(x=rg[indices], y=pg[indices],
line = dict(color="cyan", width=2,dash="solid")))
indices = np.logical_or(prg_curve['is_crossing'],
prg_curve['in_unit_square'])
fig.add_trace(go.Scatter(x=rg[indices], y=pg[indices],
line = dict(color="blue", width=2,dash="solid")))
indices = np.logical_and(prg_curve['in_unit_square'],
True - prg_curve['is_crossing'])
fig.add_trace(go.Scatter(x=rg[indices], y=pg[indices],mode='markers'))
valid_points = np.logical_and( ~ np.isnan(rg), ~ np.isnan(pg))
upper_hull = prg.convex_hull(zip(rg[valid_points],pg[valid_points]))
rg_hull, pg_hull = zip(*upper_hull)
fig.add_trace(go.Scatter(x=rg_hull, y=pg_hull, mode = "lines",
line = dict(color="red", width=2,dash="dash")))
auprg = prg.calc_auprg(prg_curve)
logging.info('auprg: %.3f' % auprg)
logging.info("compute_precision_recall_gain Complete: "+str(sim))
fig.update_layout(
title=self.sys + "-[" + str(sim) + "]",
height = 600,
width = 600,
xaxis_title='Recall Gain',
xaxis = dict(
tickmode = 'linear',
tick0 = 0,
dtick = 0.25),
yaxis_title='Precision Gain',
yaxis = dict(
tickmode = 'linear',
tick0 = 0,
dtick = 0.25)
)
fig.update_yaxes(
scaleanchor = "x",
scaleratio = 1,
)
return fig
def Compute_avg_precision(self, vecType = VectorizationType.word2vec):
'''Generated precision-recall curve enhanced'''
y_test,y_score = self.vecTypeVerification(vecType=vecType)
linestyles = ['solid','dash','dashdot','dotted']
color = 'red'
# calculate the no skill line as the proportion of the positive class
no_skill = len(y_test[y_test==1]) / len(y_test)
fig = go.Figure()
fig.add_trace(go.Scatter(x=[0, 1], y=[no_skill, no_skill], name='No Skill [{0:0.2f}]'.format(no_skill), mode = "lines",
line = dict(color='red', width=.5, dash='dash')))
for count,sim in enumerate(self.sim_list):
precision, recall, _ = precision_recall_curve(y_test, y_score[count]) #compute precision-recall curve
average_precision = average_precision_score(y_test, y_score[count])
auc_score = auc(recall, precision)
logging.info('Average precision-recall score: {0:0.2f}'.format(average_precision))
logging.info('Precision-Recall AUC: %.2f' % auc_score)
fig.add_trace(go.Scatter(x=recall, y=precision, name=str(sim.name)+' [auc:{0:0.2f}]'.format(auc_score),
line = dict(color=color, width=1, dash=linestyles[count])))
##AUC
color = 'blue'
fig.add_trace(go.Scatter(x=[0, 1], y=[0, 1], name='No Skill', mode = "lines",
line = dict(color='blue', width=.5, dash='dot')))
for count,sim in enumerate(self.sim_list):
fpr, tpr, _ = roc_curve(y_test, y_score[count]) #compute roc curve
roc_auc = roc_auc_score(y_test, y_score[count])
logging.info('ROC AUC %.2f' % roc_auc)
fig.add_trace(go.Scatter(x=fpr, y=tpr, name=str(sim.name)+' [auc:{0:0.2f}]'.format(roc_auc),
line = dict(color=color, width=1, dash=linestyles[count])))
fig.update_layout(
title=self.sys + "-[" + str(vecType) + "]",
xaxis_title='recall [fpr]',
yaxis_title='tpr')
return fig
def Compute_avg_precision_same_plot(self, vecType = VectorizationType.word2vec):
'''Generated precision-recall curve'''
linestyles = ['solid','dash','dashdot','dotted']
fig = go.Figure()
color = 'red'
y_test,y_score = self.vecTypeVerification(vecType=vecType)
# calculate the no skill line as the proportion of the positive class
no_skill = len(y_test[y_test==1]) / len(y_test)
fig.add_trace(go.Scatter(x=[0, 1], y=[no_skill, no_skill], name='No Skill [{0:0.2f}]'.format(no_skill), mode = "lines",
line = dict(color='red', width=.5, dash='dash'))) #reference curve
for count,sim in enumerate(self.sim_list):
precision, recall, _ = precision_recall_curve(y_test, y_score[count]) #compute precision-recall curve
average_precision = average_precision_score(y_test, y_score[count])
auc_score = auc(recall, precision)
logging.info('Average precision-recall score: {0:0.2f}'.format(average_precision))
logging.info('Precision-Recall AUC: %.2f' % auc_score)
fig.add_trace(go.Scatter(x=recall, y=precision, name=str(sim.name)+' [auc:{0:0.2f}]'.format(auc_score),
line = dict(color=color, width=1, dash=linestyles[count]))) #plot model curve
fig.update_layout(
title=self.sys + "-[" + str(vecType) + "]",
xaxis_title='Recall',
yaxis_title='Precision')
return fig
def Compute_roc_curve(self, vecType = VectorizationType.word2vec):
linestyles = ['solid','dash','dashdot','dotted']
fig = go.Figure()
color = 'blue'
y_test,y_score = self.vecTypeVerification(vecType = vecType)
fig.add_trace(go.Scatter(x=[0, 1], y=[0, 1], name='No Skill', mode = "lines",
line = dict(color='blue', width=.5, dash='dot'))) #reference curve
for count,sim in enumerate(self.sim_list):
fpr, tpr, _ = roc_curve(y_test, y_score[count]) #compute roc curve
roc_auc = roc_auc_score(y_test, y_score[count])
logging.info('ROC AUC %.2f' % roc_auc)
fig.add_trace(go.Scatter(x=fpr, y=tpr, name=str(sim.name)+' [auc:{0:0.2f}]'.format(roc_auc),
line = dict(color=color, width=1, dash=linestyles[count]))) #plot model curve #plot model curve
fig.update_layout(
title=self.sys + "-[" + str(vecType) + "]",
xaxis_title='False Positive Rate',
yaxis_title='True Positive Rate')
return fig
def CofusionMatrix(self, vecType = VectorizationType.word2vec):
##TODO This implementatin is incomplete and not verify it yet
y_test,y_score = self.vecTypeVerification(vecType=vecType)
y_score_threshold = [0 if elem<=0.8 else 1 for elem in supevisedEval.y_score] #Hardcoded 0.7 Threshold
#TODO a Variation threshold analysis
tn, fp, fn, tp = confusion_matrix(supevisedEval.y_test, y_score_threshold).ravel()
return tn, fp, fn, tp
# Cell
class ManifoldEntropy(VectorEvaluation):
def __init__(self, params):
super().__init__(params)
self.sharedEntropy_filtered = self.sharedInfo.copy()
self.sharedEntropy_filtered.dropna(inplace=True)
self.sys = params['system']
def minimum_shared_entropy(self,dist = SimilarityMetric.WMD_sim, extropy=False):
'''Minimum Shared Plot'''
ent = EntropyMetric.MSI_I
color = 'dark blue'
if extropy:
ent = EntropyMetric.MSI_X
color = 'red'
columns = [str(i) for i in [ent, dist ]]
corr = self.compute_spearman_corr(self.sharedEntropy_filtered, columns)
logging.info('Correlation {%.2f}' % corr)
fig = px.scatter(self.sharedEntropy_filtered,
x = columns[0], y = columns[1], color_discrete_sequence=[color])
fig.update_layout(
title = self.sys +': ['+ dist.name + '-' + ent.name + '] Correlation {%.2f}' % corr
)
return fig
def manifold_entropy_plot(self, manifold = EntropyMetric.MI, dist = SimilarityMetric.WMD_sim):
'''Manifold Entropy'''
columns = [str(i) for i in [manifold, dist]]
corr = self.compute_spearman_corr(self.manifoldEntropy, columns)
logging.info('Correlation {%.2f}' % corr)
fig = px.scatter(self.manifoldEntropy,
x = columns[0], y = columns[1], color_continuous_scale=["dark blue"])
fig.update_layout(
title = self.sys +': ['+ dist.name + '-' + manifold.name + '] Correlation {%.2f}' % corr
)
return fig
def composable_entropy_plot(self,
manifold_x = EntropyMetric.MI,
manifold_y = EntropyMetric.Loss,
dist = SimilarityMetric.WMD_sim
):
columns = [str(i) for i in [manifold_x, manifold_y, dist]]
if isinstance(dist, str):
title = self.sys +': Information-Semantic Interactions '+ dist
else:
title = self.sys +': Information-Semantic Interactions '+ dist.name
fig = px.scatter(self.manifoldEntropy,x = columns[0], y = columns[1], color = columns[2],
color_continuous_scale=px.colors.sequential.Viridis)
fig.update_layout(
title = title
)
return fig
def composable_shared_plot(self,
manifold_x = EntropyMetric.MSI_I,
manifold_y = EntropyMetric.Loss,
dist = SimilarityMetric.WMD_sim,
drop_na = True
):
columns = [str(i) for i in [manifold_x, manifold_y, dist]]
if isinstance(dist, str):
title = self.sys +': Information-Semantic Interactions '+ dist
else:
title = self.sys +': Information-Semantic Interactions '+ dist.name
df = self.df_w2v
num_na = df.isna().sum().sum()
if drop_na:
df = df.dropna(inplace=False)
fig = px.scatter(df,x = columns[0], y = columns[1], color = columns[2],
color_continuous_scale=px.colors.sequential.Viridis)
fig.update_layout(
title = title
)
return fig, num_na
def compute_spearman_corr(self, filter_metrics_01, columns):
df_correlation = filter_metrics_01.copy()
correlation = df_correlation[columns].corr(method='spearman')
#correlation = df_correlation.corr(method='spearman')
return correlation[columns[0]].values[1] | 41.071233 | 127 | 0.615169 |
__all__ = ['SupervisedVectorEvaluation', 'ManifoldEntropy']
from prg import prg
import ds4se as ds
from ....mining.ir import VectorizationType
from ....mining.ir import SimilarityMetric
from ....mining.ir import EntropyMetric
from ....mining.ir import DistanceMetric
from ...description.eval.traceability import VectorEvaluation
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import gensim
import pandas as pd
from itertools import product
from random import sample
import functools
import os
from enum import Enum, unique, auto
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import plot_precision_recall_curve
from sklearn.metrics import auc
import math as m
import random as r
import collections
from sklearn.metrics.pairwise import cosine_similarity
import seaborn as sns
from scipy.spatial import distance
from scipy.stats import pearsonr
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class SupervisedVectorEvaluation(VectorEvaluation):
def __init__(self, params):
super().__init__(params)
self.sys = params['system']
similarities_w2v = self.sim_list_w2v + ['Linked?']
similarities_w2v = [str(i) for i in similarities_w2v]
self.df_filtered_w2v = self.df_w2v.copy()
self.df_filtered_w2v = self.df_filtered_w2v[similarities_w2v]
self.df_filtered_w2v = self.df_filtered_w2v[~self.df_filtered_w2v.isin([np.nan, np.inf, -np.inf]).any(1)]
similarities_d2v = self.sim_list_d2v + ['Linked?']
similarities_d2v = [str(i) for i in similarities_d2v]
self.df_filtered_d2v = self.df_d2v.copy()
self.df_filtered_d2v = self.df_filtered_d2v[similarities_d2v]
self.df_filtered_d2v = self.df_filtered_d2v[~self.df_filtered_d2v.isin([np.nan, np.inf, -np.inf]).any(1)]
def vecTypeVerification(self, vecType= VectorizationType.word2vec):
if vecType == VectorizationType.word2vec:
self.sim_list = self.sim_list_w2v
y_test = self.df_filtered_w2v['Linked?'].values
y_score = [self.df_filtered_w2v[ str(sim) ].values for sim in self.sim_list]
logging.info('Vectorization: ' + str(vecType) )
elif vecType == VectorizationType.doc2vec:
self.sim_list = self.sim_list_d2v
y_test = self.df_filtered_d2v['Linked?'].values
y_score = [self.df_filtered_d2v[ str(sim) ].values for sim in self.sim_list]
logging.info('Vectorization: ' + str(vecType) )
return y_test,y_score
def vecTypeVerificationSim(self, vecType= VectorizationType.word2vec,sim=SimilarityMetric.SCM_sim):
if vecType == VectorizationType.word2vec:
self.sim_list = self.sim_list_w2v
y_test = self.df_filtered_w2v['Linked?'].values
y_score = self.df_filtered_w2v[ str(sim) ].values
logging.info('Vectorization: ' + str(vecType) + " " + str(sim))
elif vecType == VectorizationType.doc2vec:
self.sim_list = self.sim_list_d2v
y_test = self.df_filtered_d2v['Linked?'].values
y_score = self.df_filtered_d2v[ str(sim) ].values
logging.info('Vectorization: ' + str(vecType) + " " + str(sim))
return y_test,y_score
def Compute_precision_recall_gain(self, vecType = VectorizationType.word2vec, sim=SimilarityMetric.SCM_sim):
y_test,y_score = self.vecTypeVerificationSim(vecType=vecType, sim=sim)
fig = go.Figure(layout_yaxis_range=[-0.05,1.02],layout_xaxis_range=[-0.05,1.02])
prg_curve = prg.create_prg_curve(y_test, y_score)
indices = np.arange(np.argmax(prg_curve['in_unit_square']) - 1,
len(prg_curve['in_unit_square']))
pg = prg_curve['precision_gain']
rg = prg_curve['recall_gain']
fig.add_trace(go.Scatter(x=rg[indices], y=pg[indices],
line = dict(color="cyan", width=2,dash="solid")))
indices = np.logical_or(prg_curve['is_crossing'],
prg_curve['in_unit_square'])
fig.add_trace(go.Scatter(x=rg[indices], y=pg[indices],
line = dict(color="blue", width=2,dash="solid")))
indices = np.logical_and(prg_curve['in_unit_square'],
True - prg_curve['is_crossing'])
fig.add_trace(go.Scatter(x=rg[indices], y=pg[indices],mode='markers'))
valid_points = np.logical_and( ~ np.isnan(rg), ~ np.isnan(pg))
upper_hull = prg.convex_hull(zip(rg[valid_points],pg[valid_points]))
rg_hull, pg_hull = zip(*upper_hull)
fig.add_trace(go.Scatter(x=rg_hull, y=pg_hull, mode = "lines",
line = dict(color="red", width=2,dash="dash")))
auprg = prg.calc_auprg(prg_curve)
logging.info('auprg: %.3f' % auprg)
logging.info("compute_precision_recall_gain Complete: "+str(sim))
fig.update_layout(
title=self.sys + "-[" + str(sim) + "]",
height = 600,
width = 600,
xaxis_title='Recall Gain',
xaxis = dict(
tickmode = 'linear',
tick0 = 0,
dtick = 0.25),
yaxis_title='Precision Gain',
yaxis = dict(
tickmode = 'linear',
tick0 = 0,
dtick = 0.25)
)
fig.update_yaxes(
scaleanchor = "x",
scaleratio = 1,
)
return fig
def Compute_avg_precision(self, vecType = VectorizationType.word2vec):
y_test,y_score = self.vecTypeVerification(vecType=vecType)
linestyles = ['solid','dash','dashdot','dotted']
color = 'red'
no_skill = len(y_test[y_test==1]) / len(y_test)
fig = go.Figure()
fig.add_trace(go.Scatter(x=[0, 1], y=[no_skill, no_skill], name='No Skill [{0:0.2f}]'.format(no_skill), mode = "lines",
line = dict(color='red', width=.5, dash='dash')))
for count,sim in enumerate(self.sim_list):
precision, recall, _ = precision_recall_curve(y_test, y_score[count])
average_precision = average_precision_score(y_test, y_score[count])
auc_score = auc(recall, precision)
logging.info('Average precision-recall score: {0:0.2f}'.format(average_precision))
logging.info('Precision-Recall AUC: %.2f' % auc_score)
fig.add_trace(go.Scatter(x=recall, y=precision, name=str(sim.name)+' [auc:{0:0.2f}]'.format(auc_score),
line = dict(color=color, width=1, dash=linestyles[count])))
color = 'blue'
fig.add_trace(go.Scatter(x=[0, 1], y=[0, 1], name='No Skill', mode = "lines",
line = dict(color='blue', width=.5, dash='dot')))
for count,sim in enumerate(self.sim_list):
fpr, tpr, _ = roc_curve(y_test, y_score[count])
roc_auc = roc_auc_score(y_test, y_score[count])
logging.info('ROC AUC %.2f' % roc_auc)
fig.add_trace(go.Scatter(x=fpr, y=tpr, name=str(sim.name)+' [auc:{0:0.2f}]'.format(roc_auc),
line = dict(color=color, width=1, dash=linestyles[count])))
fig.update_layout(
title=self.sys + "-[" + str(vecType) + "]",
xaxis_title='recall [fpr]',
yaxis_title='tpr')
return fig
def Compute_avg_precision_same_plot(self, vecType = VectorizationType.word2vec):
linestyles = ['solid','dash','dashdot','dotted']
fig = go.Figure()
color = 'red'
y_test,y_score = self.vecTypeVerification(vecType=vecType)
no_skill = len(y_test[y_test==1]) / len(y_test)
fig.add_trace(go.Scatter(x=[0, 1], y=[no_skill, no_skill], name='No Skill [{0:0.2f}]'.format(no_skill), mode = "lines",
line = dict(color='red', width=.5, dash='dash')))
for count,sim in enumerate(self.sim_list):
precision, recall, _ = precision_recall_curve(y_test, y_score[count])
average_precision = average_precision_score(y_test, y_score[count])
auc_score = auc(recall, precision)
logging.info('Average precision-recall score: {0:0.2f}'.format(average_precision))
logging.info('Precision-Recall AUC: %.2f' % auc_score)
fig.add_trace(go.Scatter(x=recall, y=precision, name=str(sim.name)+' [auc:{0:0.2f}]'.format(auc_score),
line = dict(color=color, width=1, dash=linestyles[count])))
fig.update_layout(
title=self.sys + "-[" + str(vecType) + "]",
xaxis_title='Recall',
yaxis_title='Precision')
return fig
def Compute_roc_curve(self, vecType = VectorizationType.word2vec):
linestyles = ['solid','dash','dashdot','dotted']
fig = go.Figure()
color = 'blue'
y_test,y_score = self.vecTypeVerification(vecType = vecType)
fig.add_trace(go.Scatter(x=[0, 1], y=[0, 1], name='No Skill', mode = "lines",
line = dict(color='blue', width=.5, dash='dot')))
for count,sim in enumerate(self.sim_list):
fpr, tpr, _ = roc_curve(y_test, y_score[count])
roc_auc = roc_auc_score(y_test, y_score[count])
logging.info('ROC AUC %.2f' % roc_auc)
fig.add_trace(go.Scatter(x=fpr, y=tpr, name=str(sim.name)+' [auc:{0:0.2f}]'.format(roc_auc),
line = dict(color=color, width=1, dash=linestyles[count]))) ate_layout(
title=self.sys + "-[" + str(vecType) + "]",
xaxis_title='False Positive Rate',
yaxis_title='True Positive Rate')
return fig
def CofusionMatrix(self, vecType = VectorizationType.word2vec):
ecType)
y_score_threshold = [0 if elem<=0.8 else 1 for elem in supevisedEval.y_score]
tn, fp, fn, tp = confusion_matrix(supevisedEval.y_test, y_score_threshold).ravel()
return tn, fp, fn, tp
class ManifoldEntropy(VectorEvaluation):
def __init__(self, params):
super().__init__(params)
self.sharedEntropy_filtered = self.sharedInfo.copy()
self.sharedEntropy_filtered.dropna(inplace=True)
self.sys = params['system']
def minimum_shared_entropy(self,dist = SimilarityMetric.WMD_sim, extropy=False):
ent = EntropyMetric.MSI_I
color = 'dark blue'
if extropy:
ent = EntropyMetric.MSI_X
color = 'red'
columns = [str(i) for i in [ent, dist ]]
corr = self.compute_spearman_corr(self.sharedEntropy_filtered, columns)
logging.info('Correlation {%.2f}' % corr)
fig = px.scatter(self.sharedEntropy_filtered,
x = columns[0], y = columns[1], color_discrete_sequence=[color])
fig.update_layout(
title = self.sys +': ['+ dist.name + '-' + ent.name + '] Correlation {%.2f}' % corr
)
return fig
def manifold_entropy_plot(self, manifold = EntropyMetric.MI, dist = SimilarityMetric.WMD_sim):
columns = [str(i) for i in [manifold, dist]]
corr = self.compute_spearman_corr(self.manifoldEntropy, columns)
logging.info('Correlation {%.2f}' % corr)
fig = px.scatter(self.manifoldEntropy,
x = columns[0], y = columns[1], color_continuous_scale=["dark blue"])
fig.update_layout(
title = self.sys +': ['+ dist.name + '-' + manifold.name + '] Correlation {%.2f}' % corr
)
return fig
def composable_entropy_plot(self,
manifold_x = EntropyMetric.MI,
manifold_y = EntropyMetric.Loss,
dist = SimilarityMetric.WMD_sim
):
columns = [str(i) for i in [manifold_x, manifold_y, dist]]
if isinstance(dist, str):
title = self.sys +': Information-Semantic Interactions '+ dist
else:
title = self.sys +': Information-Semantic Interactions '+ dist.name
fig = px.scatter(self.manifoldEntropy,x = columns[0], y = columns[1], color = columns[2],
color_continuous_scale=px.colors.sequential.Viridis)
fig.update_layout(
title = title
)
return fig
def composable_shared_plot(self,
manifold_x = EntropyMetric.MSI_I,
manifold_y = EntropyMetric.Loss,
dist = SimilarityMetric.WMD_sim,
drop_na = True
):
columns = [str(i) for i in [manifold_x, manifold_y, dist]]
if isinstance(dist, str):
title = self.sys +': Information-Semantic Interactions '+ dist
else:
title = self.sys +': Information-Semantic Interactions '+ dist.name
df = self.df_w2v
num_na = df.isna().sum().sum()
if drop_na:
df = df.dropna(inplace=False)
fig = px.scatter(df,x = columns[0], y = columns[1], color = columns[2],
color_continuous_scale=px.colors.sequential.Viridis)
fig.update_layout(
title = title
)
return fig, num_na
def compute_spearman_corr(self, filter_metrics_01, columns):
df_correlation = filter_metrics_01.copy()
correlation = df_correlation[columns].corr(method='spearman')
return correlation[columns[0]].values[1] | true | true |
1c2f42c274483c6e58fe8b55527b6c91924688b6 | 2,563 | py | Python | dash_mantine_components/Spoiler.py | lenamax2355/dash-mantine-components | f24e48f3e2f88521cf2c14a070384458132a14d8 | [
"MIT"
] | 1 | 2021-12-27T08:14:31.000Z | 2021-12-27T08:14:31.000Z | dash_mantine_components/Spoiler.py | lenamax2355/dash-mantine-components | f24e48f3e2f88521cf2c14a070384458132a14d8 | [
"MIT"
] | null | null | null | dash_mantine_components/Spoiler.py | lenamax2355/dash-mantine-components | f24e48f3e2f88521cf2c14a070384458132a14d8 | [
"MIT"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Spoiler(Component):
"""A Spoiler component.
Hide long sections of content under spoiler. For more information, see: https://mantine.dev/core/spoiler/
Keyword arguments:
- children (string; optional):
Primary content.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks.
- hideLabel (string; default "Hide"):
Label for close spoiler action.
- initialState (boolean; optional):
Initial spoiler state, True to wrap content in spoiler, False to
show content without spoiler, opened state will be updated on
mount.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- maxHeight (number; optional):
Max height of visible content, when this point is reached spoiler
appears.
- showLabel (string; default "Show more"):
Label for open spoiler action.
- style (dict; optional):
Inline style override."""
@_explicitize_args
def __init__(self, children=None, hideLabel=Component.UNDEFINED, id=Component.UNDEFINED, initialState=Component.UNDEFINED, maxHeight=Component.UNDEFINED, loading_state=Component.UNDEFINED, showLabel=Component.UNDEFINED, style=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'hideLabel', 'initialState', 'loading_state', 'maxHeight', 'showLabel', 'style']
self._type = 'Spoiler'
self._namespace = 'dash_mantine_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'hideLabel', 'initialState', 'loading_state', 'maxHeight', 'showLabel', 'style']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Spoiler, self).__init__(children=children, **args)
| 37.691176 | 261 | 0.684354 |
from dash.development.base_component import Component, _explicitize_args
class Spoiler(Component):
@_explicitize_args
def __init__(self, children=None, hideLabel=Component.UNDEFINED, id=Component.UNDEFINED, initialState=Component.UNDEFINED, maxHeight=Component.UNDEFINED, loading_state=Component.UNDEFINED, showLabel=Component.UNDEFINED, style=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'hideLabel', 'initialState', 'loading_state', 'maxHeight', 'showLabel', 'style']
self._type = 'Spoiler'
self._namespace = 'dash_mantine_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'hideLabel', 'initialState', 'loading_state', 'maxHeight', 'showLabel', 'style']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs)
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Spoiler, self).__init__(children=children, **args)
| true | true |
1c2f42dd0b1fe3d10529645039af19067969efa5 | 33,314 | py | Python | web/transiq/supplier/services.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | null | null | null | web/transiq/supplier/services.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | 14 | 2020-06-05T23:06:45.000Z | 2022-03-12T00:00:18.000Z | web/transiq/supplier/services.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import pandas as pd
from django.contrib.auth.models import User
from django.db.models import Count
from api.utils import get_or_none
from authentication.models import Profile
from broker.models import Broker
from driver.models import Driver as d_Driver
from fileupload.models import OwnerFile, VehicleFile, DriverFile
from owner.models import Vehicle as o_Vehicle, Owner
from owner.vehicle_util import compare_format
from restapi.helper_api import generate_random_lowercase_string, generate_random_uppercase_string
from supplier.models import Driver as s_Driver, DriverPhone, DriverVehicle, Vehicle as s_Vehicle, Supplier, \
SupplierVehicle, SupplierAccountingSummary
from team.models import ManualBooking, CreditNoteSupplier, DebitNoteSupplier, CreditNoteCustomerDirectAdvance
def create_drivers():
for driver in d_Driver.objects.all():
if not s_Driver.objects.filter(user__profile__phone=driver.phone).exists():
print(driver, driver.id)
try:
if not User.objects.filter(username=driver.phone).exists():
username = driver.phone
else:
username = generate_random_lowercase_string(N=12)
user = User.objects.create_user(username=username,
email=None,
password='aaho1234@12')
Profile.objects.create(user=user, name=driver.name, phone=driver.phone)
s_driver = s_Driver.objects.create(
user=user,
driving_licence_number=driver.driving_licence_number,
driving_licence_validity=driver.driving_licence_validity,
driving_licence_location=driver.driving_licence_location,
smartphone_available=driver.smartphone_available,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in')
)
DriverPhone.objects.create(driver=s_driver, phone=driver.phone,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
except:
print(driver.phone)
def create_vehicles():
for vehicle in o_Vehicle.objects.all():
if not s_Vehicle.objects.filter(vehicle_number=compare_format(vehicle.vehicle_number)).exists():
print(vehicle)
s_vehicle = s_Vehicle.objects.create(
vehicle_number=compare_format(vehicle.vehicle_number),
vehicle_type=vehicle.vehicle_type,
vehicle_capacity=vehicle.vehicle_capacity,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in')
)
if vehicle.driver and s_Driver.objects.filter(user__profile__phone=vehicle.driver.phone).exists():
DriverVehicle.objects.create(
driver=s_Driver.objects.get(user__profile__phone=vehicle.driver.phone),
vehicle=s_vehicle,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in')
)
def generate_supplier_code():
code = generate_random_uppercase_string(N=4)
while Supplier.objects.filter(code=code).exists():
code = generate_random_uppercase_string(N=4)
return code
def create_supplier():
df = pd.read_excel('../../data/owner.xlsx')
df = df.fillna('')
for i, row in df.iterrows():
if not row['correct owner'] or row['id'] == row['correct owner']:
supplier = Supplier.objects.create(user=User.objects.get(username=row['username']),
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'),
code=generate_supplier_code()
)
for vehicle in row['vehicles'].split('\n'):
if vehicle:
vehicle_instance = get_or_none(s_Vehicle, vehicle_number=vehicle)
if isinstance(vehicle_instance, s_Vehicle):
print(vehicle)
SupplierVehicle.objects.create(vehicle=vehicle_instance, supplier=supplier, ownership='O',
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
def create_broker_supplier():
df = pd.read_excel('../../data/brokers.xlsx')
df = df.fillna('')
for i, row in df.iterrows():
if not row['correct broker'] or row['id'] == row['correct broker']:
if not Supplier.objects.filter(user=User.objects.get(username=row['username'])).exists():
supplier = Supplier.objects.create(user=User.objects.get(username=row['username']),
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'),
code=generate_supplier_code()
)
for vehicle in row['vehicles'].split('\n'):
vehicle = compare_format(vehicle)
if vehicle:
vehicle_instance = get_or_none(s_Vehicle, vehicle_number=vehicle)
if isinstance(vehicle_instance, s_Vehicle):
print(vehicle)
try:
SupplierVehicle.objects.create(vehicle=vehicle_instance, supplier=supplier,
ownership='B',
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
except:
pass
def create_broker_vehicle():
for supplier in Supplier.objects.all():
broker = get_or_none(Broker, name=supplier.user)
if isinstance(broker, Broker):
for bv in broker.broker_vehicle.all():
vehicle_number = compare_format(bv.vehicle.vehicle_number)
s_vehicle = get_or_none(s_Vehicle, vehicle_number=vehicle_number)
if isinstance(s_vehicle, s_Vehicle) and not SupplierVehicle.objects.filter(supplier=supplier,
vehicle=s_vehicle,
ownership='B').exists():
SupplierVehicle.objects.create(supplier=supplier, vehicle=s_vehicle, ownership='B')
def merge_supplier_vehicles():
os = Supplier.objects.get(id=873)
ds = Supplier.objects.get(id=876)
print(ds.suppliervehicle_set.exclude(vehicle_id__in=os.suppliervehicle_set.values_list('id', flat=True)))
for sv in ds.suppliervehicle_set.exclude(vehicle_id__in=os.suppliervehicle_set.values_list('id', flat=True)):
print(sv.vehicle)
try:
if not SupplierVehicle.objects.filter(supplier=os, vehicle=sv.vehicle, ownership='B').exists():
SupplierVehicle.objects.filter(supplier=ds, vehicle=sv.vehicle).update(supplier=os)
if SupplierVehicle.objects.filter(supplier=ds, vehicle=sv.vehicle,
ownership='O').exists() and SupplierVehicle.objects.filter(supplier=os,
vehicle=sv.vehicle,
ownership='B').exists():
SupplierVehicle.objects.filter(supplier=ds, vehicle=sv.vehicle, ownership='O').update(ownership='B')
SupplierVehicle.objects.filter(supplier=os, vehicle=sv.vehicle, ownership='B').update(ownership='O')
except:
pass
def delete_duplicate_owner_broker():
# supplier = Supplier.objects.get(id=4660)
for supplier in Supplier.objects.all():
print(supplier)
svs = supplier.suppliervehicle_set.values('vehicle_id').annotate(Count('id')).order_by().filter(id__count__gt=1)
for row in svs:
sv = SupplierVehicle.objects.filter(supplier=supplier, vehicle_id=row['vehicle_id'])
sv.exclude(id=sv.first().id).update(deleted=True, deleted_on=datetime.now())
def merge_owner_data():
oo = Owner.objects.get(id=2305)
do = Owner.objects.get(id=2243)
supplier = get_or_none(Supplier, user=oo.name)
for vehicle in do.vehicle_owner.all():
s_vehicle = get_or_none(s_Vehicle, vehicle_number=compare_format(vehicle.vehicle_number))
if isinstance(s_vehicle, s_Vehicle):
if not SupplierVehicle.objects.filter(vehicle=s_vehicle, ownership='O').exists():
SupplierVehicle.objects.create(vehicle=s_vehicle, supplier=supplier,
ownership='O',
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
def get_supplier_data():
data = []
for supplier in Supplier.objects.all():
data.append([
supplier.id,
supplier.name,
supplier.phone,
supplier.code,
','.join(
['{} ({})'.format(sv.vehicle.number(), sv.get_ownership_display()) for sv in
supplier.suppliervehicle_set.all()]),
''
])
df = pd.DataFrame(data=data, columns=['id', 'name', 'phone', 'code', 'vehicles', 'correct_supplier'])
def update_manualbooking_supplier_data():
for booking in ManualBooking.objects.order_by('-id'):
if isinstance(booking.supplier, Broker):
booking_supplier = get_or_none(Supplier, user=booking.supplier.name)
else:
booking_supplier = None
if isinstance(booking.owner, Owner):
owner_supplier = get_or_none(Supplier, user=booking.owner.name)
else:
owner_supplier = None
ManualBooking.objects.filter(id=booking.id).update(booking_supplier=booking_supplier,
accounting_supplier=booking_supplier,
owner_supplier=owner_supplier)
def update_manualbooking_vehicle_data():
for booking in ManualBooking.objects.order_by('-id'):
print(booking)
vehicle = get_or_none(s_Vehicle, vehicle_number=compare_format(
booking.vehicle.vehicle_number)) if booking.vehicle else None
ManualBooking.objects.filter(id=booking.id).update(supplier_vehicle=vehicle)
def update_manualbooking_driver_data():
for booking in ManualBooking.objects.order_by('-id'):
driver = get_or_none(s_Driver, user__profile__phone=booking.driver.phone) if booking.driver else None
if isinstance(driver, s_Driver):
print(booking.id)
ManualBooking.objects.filter(id=booking.id).update(driver_supplier=driver)
def update_cns():
for cns in CreditNoteSupplier.objects.all():
supplier = get_or_none(Supplier, user=cns.broker.name) if cns.broker else None
if isinstance(supplier, Supplier) and not CreditNoteSupplier.objects.filter(
accounting_supplier=supplier).exists():
print(cns)
cns.accounting_supplier = supplier
cns.save()
def update_dns():
for dns in DebitNoteSupplier.objects.all():
supplier = get_or_none(Supplier, user=dns.broker.name) if dns.broker else None
if not DebitNoteSupplier.objects.filter(accounting_supplier=supplier).exists():
dns.accounting_supplier = supplier
dns.save()
def update_cnca():
for cnca in CreditNoteCustomerDirectAdvance.objects.all():
supplier = get_or_none(Supplier, user=cnca.broker.name) if cnca.broker else None
if not CreditNoteCustomerDirectAdvance.objects.filter(accounting_supplier=supplier).exists():
cnca.accounting_supplier = supplier
cnca.save()
def cns_data():
data = []
for instance in CreditNoteSupplier.objects.order_by('-id'):
data.append([
instance.id,
instance.broker.get_name() if instance.broker else None,
instance.broker.get_phone() if instance.broker else None,
instance.accounting_supplier.name if instance.accounting_supplier else None,
instance.accounting_supplier.phone if instance.accounting_supplier else None,
instance.accounting_supplier.id if instance.accounting_supplier else None,
])
df = pd.DataFrame(data=data, columns=['id', 'broker_name', 'broker_phone', 'supplier_name', 'supplier_phone',
'accounting_supplier'])
df.to_excel('cns_data.xlsx', index=False)
def dns_data():
data = []
for instance in DebitNoteSupplier.objects.order_by('-id'):
data.append([
instance.id,
instance.broker.get_name() if instance.broker else None,
instance.broker.get_phone() if instance.broker else None,
instance.accounting_supplier.name if instance.accounting_supplier else None,
instance.accounting_supplier.phone if instance.accounting_supplier else None,
instance.accounting_supplier.id if instance.accounting_supplier else None,
])
df = pd.DataFrame(data=data, columns=['id', 'broker_name', 'broker_phone', 'supplier_name', 'supplier_phone',
'accounting_supplier'])
df.to_excel('dns_data.xlsx', index=False)
def cnca_data():
data = []
for instance in CreditNoteCustomerDirectAdvance.objects.order_by('-id'):
data.append([
instance.id,
instance.broker.get_name() if instance.broker else None,
instance.broker.get_phone() if instance.broker else None,
instance.accounting_supplier.name if instance.accounting_supplier else None,
instance.accounting_supplier.phone if instance.accounting_supplier else None,
instance.accounting_supplier.id if instance.accounting_supplier else None,
])
df = pd.DataFrame(data=data, columns=['id', 'broker_name', 'broker_phone', 'supplier_name', 'supplier_phone',
'accounting_supplier'])
df.to_excel('cnca_data.xlsx', index=False)
def supplier_data():
data = []
for supplier in Supplier.objects.exclude(deleted=True).order_by('user__profile__name'):
print(supplier)
data.append([
supplier.id,
supplier.user.username if supplier.user else None,
supplier.name,
supplier.phone,
supplier.pan,
supplier.aaho_office.branch_name if supplier.aaho_office else None,
','.join(
['{} ({})'.format(sv.vehicle.vehicle_number, sv.ownership) for sv in
supplier.suppliervehicle_set.all()])
])
df = pd.DataFrame(data=data, columns=['id', 'username', 'name', 'phone', 'pan', 'aaho_office', 'vehicles'])
df.to_excel('suppliers.xlsx', index=False)
def update_owner_fileupload():
for instance in OwnerFile.objects.order_by('-id'):
supplier = get_or_none(Supplier, user=instance.owner.name) if instance.owner else None
instance.supplier = supplier
instance.save()
def update_vehicle_fileupload():
for instance in VehicleFile.objects.order_by('-id'):
vehicle = get_or_none(s_Vehicle, vehicle_number=instance.vehicle.vehicle_number) if instance.vehicle else None
instance.supplier_vehicle = vehicle
instance.save()
def update_driver_fileupload():
for instance in DriverFile.objects.order_by('-id'):
driver = get_or_none(s_Driver, user__profile__phone=instance.driver.phone) if instance.driver else None
instance.supplier_driver = driver
instance.save()
def owner_file_data():
data = []
for instance in OwnerFile.objects.order_by('-id'):
data.append([
instance.id,
instance.supplier.name if instance.supplier else None,
instance.supplier.id if instance.supplier else None,
instance.owner.get_name() if instance.owner else None,
])
df = pd.DataFrame(data=data, columns=['id', 'supplier_name', 'supplier_id', 'broker_name'])
df.to_excel('owner_file.xlsx', index=False)
def driver_file_data():
data = []
for instance in DriverFile.objects.order_by('-id'):
data.append([
instance.id,
instance.driver.phone if instance.driver else None,
instance.supplier_driver.user.profile.phone if instance.supplier_driver else None,
instance.supplier_driver.id if instance.supplier_driver else None
])
df = pd.DataFrame(data=data, columns=['id', 'driver_phone', 's_driver_phone', 's_driver_id'])
df.to_excel('driver_file.xlsx', index=False)
def vehicle_file_data():
data = []
for instance in VehicleFile.objects.order_by('-id'):
data.append([
instance.id,
instance.supplier_vehicle.vehicle_number if instance.supplier_vehicle else None,
instance.supplier_vehicle.id if instance.supplier_vehicle else None,
instance.vehicle.vehicle_number if instance.vehicle else None
])
df = pd.DataFrame(data=data, columns=['id', 'supplier_vehicle', 'supplier_vehicle_id', 'vehicle'])
df.to_excel('vehicle_file.xlsx', index=False)
def manual_booking_data():
data = []
for booking in ManualBooking.objects.order_by('-id')[:28]:
print(booking)
data.append([
booking.id,
booking.booking_id,
booking.shipment_date,
booking.vehicle.vehicle_number if booking.vehicle else None,
booking.lorry_number,
booking.supplier_vehicle.id if booking.supplier_vehicle else None,
booking.supplier_vehicle.vehicle_number if booking.supplier_vehicle else None,
booking.supplier.get_name() if booking.supplier else None,
booking.supplier.get_phone() if booking.supplier else None,
booking.truck_broker_owner_name,
booking.truck_broker_owner_phone,
booking.booking_supplier.id if booking.booking_supplier else None,
booking.booking_supplier.name if booking.booking_supplier else None,
booking.booking_supplier.phone if booking.booking_supplier else None,
booking.accounting_supplier.id if booking.accounting_supplier else None,
booking.accounting_supplier.name if booking.accounting_supplier else None,
booking.accounting_supplier.phone if booking.accounting_supplier else None,
booking.owner.get_name() if booking.owner else None,
booking.owner.get_phone() if booking.owner else None,
booking.truck_owner_name,
booking.truck_owner_phone,
booking.owner_supplier.id if booking.owner_supplier else None,
booking.owner_supplier.name if booking.owner_supplier else None,
booking.owner_supplier.phone if booking.owner_supplier else None,
booking.driver_supplier.id if booking.driver_supplier else None,
booking.driver_supplier.name if booking.driver_supplier else None,
booking.driver_supplier.phone if booking.driver_supplier else None,
booking.driver_supplier.driving_licence_number if booking.driver_supplier else None,
booking.driver_supplier.driving_licence_validity if booking.driver_supplier else None,
booking.driver.name if booking.driver else None,
booking.driver.phone if booking.driver else None,
booking.driver.driving_licence_number if booking.driver else None,
booking.driver.driving_licence_validity if booking.driver else None,
booking.driver_name,
booking.driver_phone,
booking.driver_dl_number,
booking.driver_dl_validity
])
df = pd.DataFrame(data=data, columns=[
'id', 'booking_id', 'shipment_date', 'owner_vehicle_number', 'vehicle_number', 'supplier_vehicle_id',
'supplier_vehicle_number',
'broker_name', 'broker_phone', 'truck_broker_owner_name', 'truck_broker_owner_phone', 'booking_supplier_id',
'booking_supplier_name',
'booking_supplier_phone', 'accounting_supplier_id', 'accounting_supplier_name', 'accounting_supplier_phone',
'owner_name', 'owner_phone',
'truck_owner_name', 'truck_owner_phone', 'owner_supplier_id', 'owner_supplier_name', 'owner_supplier_phone',
'driver_supplier_id', 'driver_supplier_name',
'driver_supplier_phone', 'driver_supplier_dl', 'driver_supplier_dl_validity', 'driver_name', 'driver_phone',
'driver_dl', 'driver_dl_validity', 'driver_name', 'driver_phone', 'driver_dl_number', 'driver_dl_validity'])
df.to_excel('manual_booking_data.xlsx', index=False)
def merge_owner_in_web():
oo = Owner.objects.get(id=2305)
do = Owner.objects.get(id=2243)
supplier = get_or_none(Supplier, user=oo.name)
db = get_or_none(Broker, name=do.name)
if isinstance(db, Broker):
ManualBooking.objects.filter(supplier=db).update(booking_supplier=supplier, accounting_supplier=supplier)
CreditNoteSupplier.objects.filter(broker=db).update(accounting_supplier=supplier)
DebitNoteSupplier.objects.filter(broker=db).update(accounting_supplier=supplier)
CreditNoteCustomerDirectAdvance.objects.filter(broker=db).update(accounting_supplier=supplier)
ManualBooking.objects.filter(owner=do).update(owner_supplier=supplier)
OwnerFile.objects.filter(owner=do).update(supplier=supplier)
def update_supplier_owner_info():
for supplier in Supplier.objects.all():
owner = get_or_none(Owner, name=supplier.user)
if isinstance(owner, Owner):
supplier.address = owner.owner_address
supplier.city = owner.city
supplier.pan = owner.pan
# supplier.aaho_office=owner.aaho_office
try:
supplier.save()
except:
pass
def update_supplier_broker_info():
for supplier in Supplier.objects.all():
broker = get_or_none(Broker, name=supplier.user)
if isinstance(broker, Broker):
print(supplier)
if not supplier.city:
supplier.city = broker.city
if not supplier.pan:
supplier.pan = broker.pan
supplier.aaho_office = broker.aaho_office
supplier.save()
for state in broker.destination_state.all():
supplier.serving_states.add(state)
def add_latest_added_vehicle():
for booking in ManualBooking.objects.filter(shipment_date__gte='2019-03-01', supplier_vehicle=None):
try:
vehicle = s_Vehicle.objects.get(vehicle_number=booking.vehicle.vehicle_number)
except:
vehicle = s_Vehicle.objects.create(vehicle_number=booking.vehicle_number,
vehicle_type=booking.vehicle.vehicle_type,
vehicle_capacity=booking.vehicle.vehicle_capacity,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
ManualBooking.objects.filter(id=booking.id).update(supplier_vehicle=vehicle)
def add_latest_broker():
for broker in Broker.objects.filter(created_on__date__gte='2019-03-01'):
if not Supplier.objects.filter(user=broker.name):
print(broker)
supplier = Supplier.objects.create(user=broker.name, city=broker.city, aaho_office=broker.aaho_office)
for state in broker.destination_state.all():
supplier.serving_states.add(state)
def update_mb_booking_supplier():
for booking in ManualBooking.objects.filter(booking_supplier=None):
supplier = get_or_none(Supplier, user=booking.owner.name) if booking.owner else None
if supplier:
print(supplier)
ManualBooking.objects.filter(id=booking.id).update(owner_supplier=supplier)
def update_mb_driver():
print(ManualBooking.objects.filter(driver_supplier=None).count())
for booking in ManualBooking.objects.filter(driver_supplier=None):
print(booking.shipment_date)
# driver=get_or_none(s_Driver,user__profile__phone=booking.driver_phone) if booking.driver_phone else None
# if isinstance(driver,s_Driver):
# print(driver)
# ManualBooking.objects.filter(id=booking.id).update(driver_supplier=driver)
# else:
# driver = get_or_none(s_Driver, user__profile__phone=booking.driver_phone) if booking.driver else None
# ManualBooking.objects.filter(id=booking.id).update(driver_supplier=driver)
def update_supplier_vehicle_data():
for supplier in Supplier.objects.filter(id__gte=2754):
broker = get_or_none(Broker, name=supplier.user)
owner = get_or_none(Owner, name=supplier.user)
if isinstance(owner, Owner):
supplier.pan = owner.pan
supplier.save()
for ov in owner.vehicle_owner.all():
s_vehicle = get_or_none(s_Vehicle, vehicle_number=ov.vehicle_number)
if isinstance(s_vehicle, s_Vehicle):
SupplierVehicle.objects.create(
supplier=supplier,
ownership='O',
vehicle=s_vehicle,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in')
)
if isinstance(broker, Broker):
for bv in broker.broker_vehicle.all():
vehicle_number = compare_format(bv.vehicle.vehicle_number)
s_vehicle = get_or_none(s_Vehicle, vehicle_number=vehicle_number)
if isinstance(s_vehicle, s_Vehicle) and not SupplierVehicle.objects.filter(supplier=supplier,
vehicle=s_vehicle,
ownership='B').exists():
SupplierVehicle.objects.create(supplier=supplier, vehicle=s_vehicle, ownership='B')
def merge_supplier():
df = pd.read_excel('suppliers.xlsx')
df = df.fillna('')
for i, row in df.iterrows():
if row['Merge'] and row['Merge'] != 'D':
try:
original_supplier = Supplier.objects.get(id=row['Merge'])
duplicate_supplier = Supplier.objects.get(id=row['id'])
original_profile = Profile.objects.get(user=original_supplier.user)
duplicate_profile = Profile.objects.get(user=duplicate_supplier.user)
print(original_supplier)
if not original_profile.phone:
original_profile.phone = duplicate_profile.phone
original_profile.save()
elif not original_profile.alternate_phone:
original_profile.alternate_phone = duplicate_profile.phone
original_profile.save()
if not original_supplier.pan:
original_supplier.pan = duplicate_supplier.pan
if not original_supplier.address:
original_supplier.address = duplicate_supplier.address
if not original_supplier.city:
original_supplier.city = duplicate_supplier.city
if not original_supplier.aaho_office:
original_supplier.aaho_office = duplicate_supplier.aaho_office
if not original_supplier.aaho_poc:
original_supplier.aaho_poc = duplicate_supplier.aaho_poc
original_supplier.save()
duplicate_supplier.deleted = True
duplicate_supplier.deleted_on = datetime.now()
duplicate_supplier.save()
OwnerFile.objects.filter(supplier=duplicate_supplier).update(supplier=original_supplier)
ManualBooking.objects.filter(booking_supplier=duplicate_supplier).update(
booking_supplier=original_supplier)
ManualBooking.objects.filter(accounting_supplier=duplicate_supplier).update(
accounting_supplier=original_supplier)
ManualBooking.objects.filter(owner_supplier=duplicate_supplier).update(owner_supplier=original_supplier)
SupplierAccountingSummary.objects.filter(supplier=duplicate_supplier).update(deleted=True,
deleted_on=datetime.now())
CreditNoteSupplier.objects.filter(accounting_supplier=duplicate_supplier).update(
accounting_supplier=original_supplier)
DebitNoteSupplier.objects.filter(accounting_supplier=duplicate_supplier).update(
accounting_supplier=original_supplier)
CreditNoteCustomerDirectAdvance.objects.filter(accounting_supplier=duplicate_supplier).update(
accounting_supplier=original_supplier)
for sv in duplicate_supplier.suppliervehicle_set.exclude(
vehicle_id__in=original_supplier.suppliervehicle_set.values_list('id', flat=True)):
try:
if not SupplierVehicle.objects.filter(supplier=original_supplier, vehicle=sv.vehicle,
ownership='B').exists():
SupplierVehicle.objects.filter(supplier=duplicate_supplier, vehicle=sv.vehicle).update(
supplier=original_supplier)
if SupplierVehicle.objects.filter(supplier=duplicate_supplier, vehicle=sv.vehicle,
ownership='O').exists() and SupplierVehicle.objects.filter(
supplier=original_supplier, vehicle=sv.vehicle, ownership='B').exists():
SupplierVehicle.objects.filter(supplier=duplicate_supplier, vehicle=sv.vehicle,
ownership='O').update(
ownership='B')
SupplierVehicle.objects.filter(supplier=original_supplier, vehicle=sv.vehicle,
ownership='B').update(
ownership='O')
except:
SupplierVehicle.objects.filter(supplier=duplicate_supplier, vehicle=sv.vehicle).update(
deleted=True)
except Supplier.DoesNotExist:
print(row)
def add_supplier_owner():
owner = Owner.objects.get(id=2424)
supplier = get_or_none(Supplier, user=owner.name)
if not isinstance(supplier, Supplier):
supplier = Supplier.objects.create(user=owner.name, pan=owner.pan, city=owner.city, changed_by=owner.changed_by,
created_by=owner.created_by)
ManualBooking.objects.filter(owner=owner).update(owner_supplier=supplier)
for o_vehicle in owner.vehicle_owner.all():
s_vehicle = get_or_none(s_Vehicle, vehicle_number=o_vehicle.vehicle_number)
if isinstance(s_vehicle, s_Vehicle) and not SupplierVehicle.objects.filter(supplier=supplier,
vehicle=s_vehicle,
ownership='O'):
SupplierVehicle.objects.create(supplier=supplier, vehicle=s_vehicle, ownership='O',
changed_by=owner.changed_by, created_by=owner.created_by)
if isinstance(s_vehicle, s_Vehicle):
VehicleFile.objects.filter(vehicle=o_vehicle).update(supplier_vehicle=s_vehicle)
| 51.569659 | 129 | 0.617128 | from datetime import datetime
import pandas as pd
from django.contrib.auth.models import User
from django.db.models import Count
from api.utils import get_or_none
from authentication.models import Profile
from broker.models import Broker
from driver.models import Driver as d_Driver
from fileupload.models import OwnerFile, VehicleFile, DriverFile
from owner.models import Vehicle as o_Vehicle, Owner
from owner.vehicle_util import compare_format
from restapi.helper_api import generate_random_lowercase_string, generate_random_uppercase_string
from supplier.models import Driver as s_Driver, DriverPhone, DriverVehicle, Vehicle as s_Vehicle, Supplier, \
SupplierVehicle, SupplierAccountingSummary
from team.models import ManualBooking, CreditNoteSupplier, DebitNoteSupplier, CreditNoteCustomerDirectAdvance
def create_drivers():
for driver in d_Driver.objects.all():
if not s_Driver.objects.filter(user__profile__phone=driver.phone).exists():
print(driver, driver.id)
try:
if not User.objects.filter(username=driver.phone).exists():
username = driver.phone
else:
username = generate_random_lowercase_string(N=12)
user = User.objects.create_user(username=username,
email=None,
password='aaho1234@12')
Profile.objects.create(user=user, name=driver.name, phone=driver.phone)
s_driver = s_Driver.objects.create(
user=user,
driving_licence_number=driver.driving_licence_number,
driving_licence_validity=driver.driving_licence_validity,
driving_licence_location=driver.driving_licence_location,
smartphone_available=driver.smartphone_available,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in')
)
DriverPhone.objects.create(driver=s_driver, phone=driver.phone,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
except:
print(driver.phone)
def create_vehicles():
for vehicle in o_Vehicle.objects.all():
if not s_Vehicle.objects.filter(vehicle_number=compare_format(vehicle.vehicle_number)).exists():
print(vehicle)
s_vehicle = s_Vehicle.objects.create(
vehicle_number=compare_format(vehicle.vehicle_number),
vehicle_type=vehicle.vehicle_type,
vehicle_capacity=vehicle.vehicle_capacity,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in')
)
if vehicle.driver and s_Driver.objects.filter(user__profile__phone=vehicle.driver.phone).exists():
DriverVehicle.objects.create(
driver=s_Driver.objects.get(user__profile__phone=vehicle.driver.phone),
vehicle=s_vehicle,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in')
)
def generate_supplier_code():
code = generate_random_uppercase_string(N=4)
while Supplier.objects.filter(code=code).exists():
code = generate_random_uppercase_string(N=4)
return code
def create_supplier():
df = pd.read_excel('../../data/owner.xlsx')
df = df.fillna('')
for i, row in df.iterrows():
if not row['correct owner'] or row['id'] == row['correct owner']:
supplier = Supplier.objects.create(user=User.objects.get(username=row['username']),
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'),
code=generate_supplier_code()
)
for vehicle in row['vehicles'].split('\n'):
if vehicle:
vehicle_instance = get_or_none(s_Vehicle, vehicle_number=vehicle)
if isinstance(vehicle_instance, s_Vehicle):
print(vehicle)
SupplierVehicle.objects.create(vehicle=vehicle_instance, supplier=supplier, ownership='O',
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
def create_broker_supplier():
df = pd.read_excel('../../data/brokers.xlsx')
df = df.fillna('')
for i, row in df.iterrows():
if not row['correct broker'] or row['id'] == row['correct broker']:
if not Supplier.objects.filter(user=User.objects.get(username=row['username'])).exists():
supplier = Supplier.objects.create(user=User.objects.get(username=row['username']),
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'),
code=generate_supplier_code()
)
for vehicle in row['vehicles'].split('\n'):
vehicle = compare_format(vehicle)
if vehicle:
vehicle_instance = get_or_none(s_Vehicle, vehicle_number=vehicle)
if isinstance(vehicle_instance, s_Vehicle):
print(vehicle)
try:
SupplierVehicle.objects.create(vehicle=vehicle_instance, supplier=supplier,
ownership='B',
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
except:
pass
def create_broker_vehicle():
for supplier in Supplier.objects.all():
broker = get_or_none(Broker, name=supplier.user)
if isinstance(broker, Broker):
for bv in broker.broker_vehicle.all():
vehicle_number = compare_format(bv.vehicle.vehicle_number)
s_vehicle = get_or_none(s_Vehicle, vehicle_number=vehicle_number)
if isinstance(s_vehicle, s_Vehicle) and not SupplierVehicle.objects.filter(supplier=supplier,
vehicle=s_vehicle,
ownership='B').exists():
SupplierVehicle.objects.create(supplier=supplier, vehicle=s_vehicle, ownership='B')
def merge_supplier_vehicles():
os = Supplier.objects.get(id=873)
ds = Supplier.objects.get(id=876)
print(ds.suppliervehicle_set.exclude(vehicle_id__in=os.suppliervehicle_set.values_list('id', flat=True)))
for sv in ds.suppliervehicle_set.exclude(vehicle_id__in=os.suppliervehicle_set.values_list('id', flat=True)):
print(sv.vehicle)
try:
if not SupplierVehicle.objects.filter(supplier=os, vehicle=sv.vehicle, ownership='B').exists():
SupplierVehicle.objects.filter(supplier=ds, vehicle=sv.vehicle).update(supplier=os)
if SupplierVehicle.objects.filter(supplier=ds, vehicle=sv.vehicle,
ownership='O').exists() and SupplierVehicle.objects.filter(supplier=os,
vehicle=sv.vehicle,
ownership='B').exists():
SupplierVehicle.objects.filter(supplier=ds, vehicle=sv.vehicle, ownership='O').update(ownership='B')
SupplierVehicle.objects.filter(supplier=os, vehicle=sv.vehicle, ownership='B').update(ownership='O')
except:
pass
def delete_duplicate_owner_broker():
for supplier in Supplier.objects.all():
print(supplier)
svs = supplier.suppliervehicle_set.values('vehicle_id').annotate(Count('id')).order_by().filter(id__count__gt=1)
for row in svs:
sv = SupplierVehicle.objects.filter(supplier=supplier, vehicle_id=row['vehicle_id'])
sv.exclude(id=sv.first().id).update(deleted=True, deleted_on=datetime.now())
def merge_owner_data():
oo = Owner.objects.get(id=2305)
do = Owner.objects.get(id=2243)
supplier = get_or_none(Supplier, user=oo.name)
for vehicle in do.vehicle_owner.all():
s_vehicle = get_or_none(s_Vehicle, vehicle_number=compare_format(vehicle.vehicle_number))
if isinstance(s_vehicle, s_Vehicle):
if not SupplierVehicle.objects.filter(vehicle=s_vehicle, ownership='O').exists():
SupplierVehicle.objects.create(vehicle=s_vehicle, supplier=supplier,
ownership='O',
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
def get_supplier_data():
data = []
for supplier in Supplier.objects.all():
data.append([
supplier.id,
supplier.name,
supplier.phone,
supplier.code,
','.join(
['{} ({})'.format(sv.vehicle.number(), sv.get_ownership_display()) for sv in
supplier.suppliervehicle_set.all()]),
''
])
df = pd.DataFrame(data=data, columns=['id', 'name', 'phone', 'code', 'vehicles', 'correct_supplier'])
def update_manualbooking_supplier_data():
for booking in ManualBooking.objects.order_by('-id'):
if isinstance(booking.supplier, Broker):
booking_supplier = get_or_none(Supplier, user=booking.supplier.name)
else:
booking_supplier = None
if isinstance(booking.owner, Owner):
owner_supplier = get_or_none(Supplier, user=booking.owner.name)
else:
owner_supplier = None
ManualBooking.objects.filter(id=booking.id).update(booking_supplier=booking_supplier,
accounting_supplier=booking_supplier,
owner_supplier=owner_supplier)
def update_manualbooking_vehicle_data():
for booking in ManualBooking.objects.order_by('-id'):
print(booking)
vehicle = get_or_none(s_Vehicle, vehicle_number=compare_format(
booking.vehicle.vehicle_number)) if booking.vehicle else None
ManualBooking.objects.filter(id=booking.id).update(supplier_vehicle=vehicle)
def update_manualbooking_driver_data():
for booking in ManualBooking.objects.order_by('-id'):
driver = get_or_none(s_Driver, user__profile__phone=booking.driver.phone) if booking.driver else None
if isinstance(driver, s_Driver):
print(booking.id)
ManualBooking.objects.filter(id=booking.id).update(driver_supplier=driver)
def update_cns():
for cns in CreditNoteSupplier.objects.all():
supplier = get_or_none(Supplier, user=cns.broker.name) if cns.broker else None
if isinstance(supplier, Supplier) and not CreditNoteSupplier.objects.filter(
accounting_supplier=supplier).exists():
print(cns)
cns.accounting_supplier = supplier
cns.save()
def update_dns():
for dns in DebitNoteSupplier.objects.all():
supplier = get_or_none(Supplier, user=dns.broker.name) if dns.broker else None
if not DebitNoteSupplier.objects.filter(accounting_supplier=supplier).exists():
dns.accounting_supplier = supplier
dns.save()
def update_cnca():
for cnca in CreditNoteCustomerDirectAdvance.objects.all():
supplier = get_or_none(Supplier, user=cnca.broker.name) if cnca.broker else None
if not CreditNoteCustomerDirectAdvance.objects.filter(accounting_supplier=supplier).exists():
cnca.accounting_supplier = supplier
cnca.save()
def cns_data():
data = []
for instance in CreditNoteSupplier.objects.order_by('-id'):
data.append([
instance.id,
instance.broker.get_name() if instance.broker else None,
instance.broker.get_phone() if instance.broker else None,
instance.accounting_supplier.name if instance.accounting_supplier else None,
instance.accounting_supplier.phone if instance.accounting_supplier else None,
instance.accounting_supplier.id if instance.accounting_supplier else None,
])
df = pd.DataFrame(data=data, columns=['id', 'broker_name', 'broker_phone', 'supplier_name', 'supplier_phone',
'accounting_supplier'])
df.to_excel('cns_data.xlsx', index=False)
def dns_data():
data = []
for instance in DebitNoteSupplier.objects.order_by('-id'):
data.append([
instance.id,
instance.broker.get_name() if instance.broker else None,
instance.broker.get_phone() if instance.broker else None,
instance.accounting_supplier.name if instance.accounting_supplier else None,
instance.accounting_supplier.phone if instance.accounting_supplier else None,
instance.accounting_supplier.id if instance.accounting_supplier else None,
])
df = pd.DataFrame(data=data, columns=['id', 'broker_name', 'broker_phone', 'supplier_name', 'supplier_phone',
'accounting_supplier'])
df.to_excel('dns_data.xlsx', index=False)
def cnca_data():
data = []
for instance in CreditNoteCustomerDirectAdvance.objects.order_by('-id'):
data.append([
instance.id,
instance.broker.get_name() if instance.broker else None,
instance.broker.get_phone() if instance.broker else None,
instance.accounting_supplier.name if instance.accounting_supplier else None,
instance.accounting_supplier.phone if instance.accounting_supplier else None,
instance.accounting_supplier.id if instance.accounting_supplier else None,
])
df = pd.DataFrame(data=data, columns=['id', 'broker_name', 'broker_phone', 'supplier_name', 'supplier_phone',
'accounting_supplier'])
df.to_excel('cnca_data.xlsx', index=False)
def supplier_data():
data = []
for supplier in Supplier.objects.exclude(deleted=True).order_by('user__profile__name'):
print(supplier)
data.append([
supplier.id,
supplier.user.username if supplier.user else None,
supplier.name,
supplier.phone,
supplier.pan,
supplier.aaho_office.branch_name if supplier.aaho_office else None,
','.join(
['{} ({})'.format(sv.vehicle.vehicle_number, sv.ownership) for sv in
supplier.suppliervehicle_set.all()])
])
df = pd.DataFrame(data=data, columns=['id', 'username', 'name', 'phone', 'pan', 'aaho_office', 'vehicles'])
df.to_excel('suppliers.xlsx', index=False)
def update_owner_fileupload():
for instance in OwnerFile.objects.order_by('-id'):
supplier = get_or_none(Supplier, user=instance.owner.name) if instance.owner else None
instance.supplier = supplier
instance.save()
def update_vehicle_fileupload():
for instance in VehicleFile.objects.order_by('-id'):
vehicle = get_or_none(s_Vehicle, vehicle_number=instance.vehicle.vehicle_number) if instance.vehicle else None
instance.supplier_vehicle = vehicle
instance.save()
def update_driver_fileupload():
for instance in DriverFile.objects.order_by('-id'):
driver = get_or_none(s_Driver, user__profile__phone=instance.driver.phone) if instance.driver else None
instance.supplier_driver = driver
instance.save()
def owner_file_data():
data = []
for instance in OwnerFile.objects.order_by('-id'):
data.append([
instance.id,
instance.supplier.name if instance.supplier else None,
instance.supplier.id if instance.supplier else None,
instance.owner.get_name() if instance.owner else None,
])
df = pd.DataFrame(data=data, columns=['id', 'supplier_name', 'supplier_id', 'broker_name'])
df.to_excel('owner_file.xlsx', index=False)
def driver_file_data():
data = []
for instance in DriverFile.objects.order_by('-id'):
data.append([
instance.id,
instance.driver.phone if instance.driver else None,
instance.supplier_driver.user.profile.phone if instance.supplier_driver else None,
instance.supplier_driver.id if instance.supplier_driver else None
])
df = pd.DataFrame(data=data, columns=['id', 'driver_phone', 's_driver_phone', 's_driver_id'])
df.to_excel('driver_file.xlsx', index=False)
def vehicle_file_data():
data = []
for instance in VehicleFile.objects.order_by('-id'):
data.append([
instance.id,
instance.supplier_vehicle.vehicle_number if instance.supplier_vehicle else None,
instance.supplier_vehicle.id if instance.supplier_vehicle else None,
instance.vehicle.vehicle_number if instance.vehicle else None
])
df = pd.DataFrame(data=data, columns=['id', 'supplier_vehicle', 'supplier_vehicle_id', 'vehicle'])
df.to_excel('vehicle_file.xlsx', index=False)
def manual_booking_data():
data = []
for booking in ManualBooking.objects.order_by('-id')[:28]:
print(booking)
data.append([
booking.id,
booking.booking_id,
booking.shipment_date,
booking.vehicle.vehicle_number if booking.vehicle else None,
booking.lorry_number,
booking.supplier_vehicle.id if booking.supplier_vehicle else None,
booking.supplier_vehicle.vehicle_number if booking.supplier_vehicle else None,
booking.supplier.get_name() if booking.supplier else None,
booking.supplier.get_phone() if booking.supplier else None,
booking.truck_broker_owner_name,
booking.truck_broker_owner_phone,
booking.booking_supplier.id if booking.booking_supplier else None,
booking.booking_supplier.name if booking.booking_supplier else None,
booking.booking_supplier.phone if booking.booking_supplier else None,
booking.accounting_supplier.id if booking.accounting_supplier else None,
booking.accounting_supplier.name if booking.accounting_supplier else None,
booking.accounting_supplier.phone if booking.accounting_supplier else None,
booking.owner.get_name() if booking.owner else None,
booking.owner.get_phone() if booking.owner else None,
booking.truck_owner_name,
booking.truck_owner_phone,
booking.owner_supplier.id if booking.owner_supplier else None,
booking.owner_supplier.name if booking.owner_supplier else None,
booking.owner_supplier.phone if booking.owner_supplier else None,
booking.driver_supplier.id if booking.driver_supplier else None,
booking.driver_supplier.name if booking.driver_supplier else None,
booking.driver_supplier.phone if booking.driver_supplier else None,
booking.driver_supplier.driving_licence_number if booking.driver_supplier else None,
booking.driver_supplier.driving_licence_validity if booking.driver_supplier else None,
booking.driver.name if booking.driver else None,
booking.driver.phone if booking.driver else None,
booking.driver.driving_licence_number if booking.driver else None,
booking.driver.driving_licence_validity if booking.driver else None,
booking.driver_name,
booking.driver_phone,
booking.driver_dl_number,
booking.driver_dl_validity
])
df = pd.DataFrame(data=data, columns=[
'id', 'booking_id', 'shipment_date', 'owner_vehicle_number', 'vehicle_number', 'supplier_vehicle_id',
'supplier_vehicle_number',
'broker_name', 'broker_phone', 'truck_broker_owner_name', 'truck_broker_owner_phone', 'booking_supplier_id',
'booking_supplier_name',
'booking_supplier_phone', 'accounting_supplier_id', 'accounting_supplier_name', 'accounting_supplier_phone',
'owner_name', 'owner_phone',
'truck_owner_name', 'truck_owner_phone', 'owner_supplier_id', 'owner_supplier_name', 'owner_supplier_phone',
'driver_supplier_id', 'driver_supplier_name',
'driver_supplier_phone', 'driver_supplier_dl', 'driver_supplier_dl_validity', 'driver_name', 'driver_phone',
'driver_dl', 'driver_dl_validity', 'driver_name', 'driver_phone', 'driver_dl_number', 'driver_dl_validity'])
df.to_excel('manual_booking_data.xlsx', index=False)
def merge_owner_in_web():
oo = Owner.objects.get(id=2305)
do = Owner.objects.get(id=2243)
supplier = get_or_none(Supplier, user=oo.name)
db = get_or_none(Broker, name=do.name)
if isinstance(db, Broker):
ManualBooking.objects.filter(supplier=db).update(booking_supplier=supplier, accounting_supplier=supplier)
CreditNoteSupplier.objects.filter(broker=db).update(accounting_supplier=supplier)
DebitNoteSupplier.objects.filter(broker=db).update(accounting_supplier=supplier)
CreditNoteCustomerDirectAdvance.objects.filter(broker=db).update(accounting_supplier=supplier)
ManualBooking.objects.filter(owner=do).update(owner_supplier=supplier)
OwnerFile.objects.filter(owner=do).update(supplier=supplier)
def update_supplier_owner_info():
for supplier in Supplier.objects.all():
owner = get_or_none(Owner, name=supplier.user)
if isinstance(owner, Owner):
supplier.address = owner.owner_address
supplier.city = owner.city
supplier.pan = owner.pan
try:
supplier.save()
except:
pass
def update_supplier_broker_info():
for supplier in Supplier.objects.all():
broker = get_or_none(Broker, name=supplier.user)
if isinstance(broker, Broker):
print(supplier)
if not supplier.city:
supplier.city = broker.city
if not supplier.pan:
supplier.pan = broker.pan
supplier.aaho_office = broker.aaho_office
supplier.save()
for state in broker.destination_state.all():
supplier.serving_states.add(state)
def add_latest_added_vehicle():
for booking in ManualBooking.objects.filter(shipment_date__gte='2019-03-01', supplier_vehicle=None):
try:
vehicle = s_Vehicle.objects.get(vehicle_number=booking.vehicle.vehicle_number)
except:
vehicle = s_Vehicle.objects.create(vehicle_number=booking.vehicle_number,
vehicle_type=booking.vehicle.vehicle_type,
vehicle_capacity=booking.vehicle.vehicle_capacity,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in'))
ManualBooking.objects.filter(id=booking.id).update(supplier_vehicle=vehicle)
def add_latest_broker():
for broker in Broker.objects.filter(created_on__date__gte='2019-03-01'):
if not Supplier.objects.filter(user=broker.name):
print(broker)
supplier = Supplier.objects.create(user=broker.name, city=broker.city, aaho_office=broker.aaho_office)
for state in broker.destination_state.all():
supplier.serving_states.add(state)
def update_mb_booking_supplier():
for booking in ManualBooking.objects.filter(booking_supplier=None):
supplier = get_or_none(Supplier, user=booking.owner.name) if booking.owner else None
if supplier:
print(supplier)
ManualBooking.objects.filter(id=booking.id).update(owner_supplier=supplier)
def update_mb_driver():
print(ManualBooking.objects.filter(driver_supplier=None).count())
for booking in ManualBooking.objects.filter(driver_supplier=None):
print(booking.shipment_date)
def update_supplier_vehicle_data():
for supplier in Supplier.objects.filter(id__gte=2754):
broker = get_or_none(Broker, name=supplier.user)
owner = get_or_none(Owner, name=supplier.user)
if isinstance(owner, Owner):
supplier.pan = owner.pan
supplier.save()
for ov in owner.vehicle_owner.all():
s_vehicle = get_or_none(s_Vehicle, vehicle_number=ov.vehicle_number)
if isinstance(s_vehicle, s_Vehicle):
SupplierVehicle.objects.create(
supplier=supplier,
ownership='O',
vehicle=s_vehicle,
created_by=User.objects.get(username='mani@aaho.in'),
changed_by=User.objects.get(username='mani@aaho.in')
)
if isinstance(broker, Broker):
for bv in broker.broker_vehicle.all():
vehicle_number = compare_format(bv.vehicle.vehicle_number)
s_vehicle = get_or_none(s_Vehicle, vehicle_number=vehicle_number)
if isinstance(s_vehicle, s_Vehicle) and not SupplierVehicle.objects.filter(supplier=supplier,
vehicle=s_vehicle,
ownership='B').exists():
SupplierVehicle.objects.create(supplier=supplier, vehicle=s_vehicle, ownership='B')
def merge_supplier():
df = pd.read_excel('suppliers.xlsx')
df = df.fillna('')
for i, row in df.iterrows():
if row['Merge'] and row['Merge'] != 'D':
try:
original_supplier = Supplier.objects.get(id=row['Merge'])
duplicate_supplier = Supplier.objects.get(id=row['id'])
original_profile = Profile.objects.get(user=original_supplier.user)
duplicate_profile = Profile.objects.get(user=duplicate_supplier.user)
print(original_supplier)
if not original_profile.phone:
original_profile.phone = duplicate_profile.phone
original_profile.save()
elif not original_profile.alternate_phone:
original_profile.alternate_phone = duplicate_profile.phone
original_profile.save()
if not original_supplier.pan:
original_supplier.pan = duplicate_supplier.pan
if not original_supplier.address:
original_supplier.address = duplicate_supplier.address
if not original_supplier.city:
original_supplier.city = duplicate_supplier.city
if not original_supplier.aaho_office:
original_supplier.aaho_office = duplicate_supplier.aaho_office
if not original_supplier.aaho_poc:
original_supplier.aaho_poc = duplicate_supplier.aaho_poc
original_supplier.save()
duplicate_supplier.deleted = True
duplicate_supplier.deleted_on = datetime.now()
duplicate_supplier.save()
OwnerFile.objects.filter(supplier=duplicate_supplier).update(supplier=original_supplier)
ManualBooking.objects.filter(booking_supplier=duplicate_supplier).update(
booking_supplier=original_supplier)
ManualBooking.objects.filter(accounting_supplier=duplicate_supplier).update(
accounting_supplier=original_supplier)
ManualBooking.objects.filter(owner_supplier=duplicate_supplier).update(owner_supplier=original_supplier)
SupplierAccountingSummary.objects.filter(supplier=duplicate_supplier).update(deleted=True,
deleted_on=datetime.now())
CreditNoteSupplier.objects.filter(accounting_supplier=duplicate_supplier).update(
accounting_supplier=original_supplier)
DebitNoteSupplier.objects.filter(accounting_supplier=duplicate_supplier).update(
accounting_supplier=original_supplier)
CreditNoteCustomerDirectAdvance.objects.filter(accounting_supplier=duplicate_supplier).update(
accounting_supplier=original_supplier)
for sv in duplicate_supplier.suppliervehicle_set.exclude(
vehicle_id__in=original_supplier.suppliervehicle_set.values_list('id', flat=True)):
try:
if not SupplierVehicle.objects.filter(supplier=original_supplier, vehicle=sv.vehicle,
ownership='B').exists():
SupplierVehicle.objects.filter(supplier=duplicate_supplier, vehicle=sv.vehicle).update(
supplier=original_supplier)
if SupplierVehicle.objects.filter(supplier=duplicate_supplier, vehicle=sv.vehicle,
ownership='O').exists() and SupplierVehicle.objects.filter(
supplier=original_supplier, vehicle=sv.vehicle, ownership='B').exists():
SupplierVehicle.objects.filter(supplier=duplicate_supplier, vehicle=sv.vehicle,
ownership='O').update(
ownership='B')
SupplierVehicle.objects.filter(supplier=original_supplier, vehicle=sv.vehicle,
ownership='B').update(
ownership='O')
except:
SupplierVehicle.objects.filter(supplier=duplicate_supplier, vehicle=sv.vehicle).update(
deleted=True)
except Supplier.DoesNotExist:
print(row)
def add_supplier_owner():
owner = Owner.objects.get(id=2424)
supplier = get_or_none(Supplier, user=owner.name)
if not isinstance(supplier, Supplier):
supplier = Supplier.objects.create(user=owner.name, pan=owner.pan, city=owner.city, changed_by=owner.changed_by,
created_by=owner.created_by)
ManualBooking.objects.filter(owner=owner).update(owner_supplier=supplier)
for o_vehicle in owner.vehicle_owner.all():
s_vehicle = get_or_none(s_Vehicle, vehicle_number=o_vehicle.vehicle_number)
if isinstance(s_vehicle, s_Vehicle) and not SupplierVehicle.objects.filter(supplier=supplier,
vehicle=s_vehicle,
ownership='O'):
SupplierVehicle.objects.create(supplier=supplier, vehicle=s_vehicle, ownership='O',
changed_by=owner.changed_by, created_by=owner.created_by)
if isinstance(s_vehicle, s_Vehicle):
VehicleFile.objects.filter(vehicle=o_vehicle).update(supplier_vehicle=s_vehicle)
| true | true |
1c2f445208e6e280e4e703eb7f963c9f3a32c49b | 855 | py | Python | tests/conftest.py | BNMetrics/fetchme | f33e7368b33d74bc2777ac6d698555fe92e9539b | [
"Apache-2.0"
] | 3 | 2018-05-30T12:38:58.000Z | 2018-10-23T11:30:48.000Z | tests/conftest.py | BNMetrics/fetchme | f33e7368b33d74bc2777ac6d698555fe92e9539b | [
"Apache-2.0"
] | 2 | 2020-09-25T05:36:37.000Z | 2021-06-25T15:22:34.000Z | tests/conftest.py | BNMetrics/fetchme | f33e7368b33d74bc2777ac6d698555fe92e9539b | [
"Apache-2.0"
] | null | null | null | import pytest
import mock
import shutil
from pathlib import Path
@pytest.fixture
def tmp_config(mock_home_dir):
project_dir = Path(__file__).parent.parent
mock_config_path = mock_home_dir / '.fetchmerc'
shutil.copyfile(project_dir / 'cfg/.fetchmerc', mock_config_path)
with open(mock_config_path, 'a') as file:
file.write("test_command=ls -al")
yield mock_config_path
# ---------------------------------------------------------------------------
# mocks
# ---------------------------------------------------------------------------
@pytest.fixture
def mock_home_dir(tmpdir, monkeypatch):
monkeypatch.setattr('fetchme.utils.Path.home',
lambda: Path(tmpdir))
yield Path(tmpdir)
@pytest.fixture
def mock_subprocess():
with mock.patch('subprocess.call') as mock_:
yield mock_
| 23.75 | 77 | 0.575439 | import pytest
import mock
import shutil
from pathlib import Path
@pytest.fixture
def tmp_config(mock_home_dir):
project_dir = Path(__file__).parent.parent
mock_config_path = mock_home_dir / '.fetchmerc'
shutil.copyfile(project_dir / 'cfg/.fetchmerc', mock_config_path)
with open(mock_config_path, 'a') as file:
file.write("test_command=ls -al")
yield mock_config_path
@pytest.fixture
def mock_home_dir(tmpdir, monkeypatch):
monkeypatch.setattr('fetchme.utils.Path.home',
lambda: Path(tmpdir))
yield Path(tmpdir)
@pytest.fixture
def mock_subprocess():
with mock.patch('subprocess.call') as mock_:
yield mock_
| true | true |
1c2f44848af40cc8407c8c14930e0d336a2f5d87 | 2,490 | py | Python | test/test_hidden.py | Freso/beets | ef7cca4c1205466086c658a9f50cefb09b5da450 | [
"MIT"
] | null | null | null | test/test_hidden.py | Freso/beets | ef7cca4c1205466086c658a9f50cefb09b5da450 | [
"MIT"
] | null | null | null | test/test_hidden.py | Freso/beets | ef7cca4c1205466086c658a9f50cefb09b5da450 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'hidden' utility."""
from __future__ import division, absolute_import, print_function
from test._common import unittest
import sys
import tempfile
from beets.util import hidden
import subprocess
import errno
import ctypes
class HiddenFileTest(unittest.TestCase):
def setUp(self):
pass
def test_osx_hidden(self):
if not sys.platform == 'darwin':
self.skipTest('sys.platform is not darwin')
return
with tempfile.NamedTemporaryFile(delete=False) as f:
try:
command = ["chflags", "hidden", f.name]
subprocess.Popen(command).wait()
except OSError as e:
if e.errno == errno.ENOENT:
self.skipTest("unable to find chflags")
else:
raise e
self.assertTrue(hidden.is_hidden(f.name))
def test_windows_hidden(self):
if not sys.platform == 'windows':
self.skipTest('sys.platform is not windows')
return
# FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation.
hidden_mask = 2
with tempfile.NamedTemporaryFile() as f:
# Hide the file using
success = ctypes.windll.kernel32.SetFileAttributesW(f.name,
hidden_mask)
if not success:
self.skipTest("unable to set file attributes")
self.assertTrue(hidden.is_hidden(f.name))
def test_other_hidden(self):
if sys.platform == 'darwin' or sys.platform == 'windows':
self.skipTest('sys.platform is known')
return
with tempfile.NamedTemporaryFile(prefix='.tmp') as f:
self.assertTrue(hidden.is_hidden(f.name))
| 33.2 | 79 | 0.63494 |
from __future__ import division, absolute_import, print_function
from test._common import unittest
import sys
import tempfile
from beets.util import hidden
import subprocess
import errno
import ctypes
class HiddenFileTest(unittest.TestCase):
def setUp(self):
pass
def test_osx_hidden(self):
if not sys.platform == 'darwin':
self.skipTest('sys.platform is not darwin')
return
with tempfile.NamedTemporaryFile(delete=False) as f:
try:
command = ["chflags", "hidden", f.name]
subprocess.Popen(command).wait()
except OSError as e:
if e.errno == errno.ENOENT:
self.skipTest("unable to find chflags")
else:
raise e
self.assertTrue(hidden.is_hidden(f.name))
def test_windows_hidden(self):
if not sys.platform == 'windows':
self.skipTest('sys.platform is not windows')
return
hidden_mask = 2
with tempfile.NamedTemporaryFile() as f:
success = ctypes.windll.kernel32.SetFileAttributesW(f.name,
hidden_mask)
if not success:
self.skipTest("unable to set file attributes")
self.assertTrue(hidden.is_hidden(f.name))
def test_other_hidden(self):
if sys.platform == 'darwin' or sys.platform == 'windows':
self.skipTest('sys.platform is known')
return
with tempfile.NamedTemporaryFile(prefix='.tmp') as f:
self.assertTrue(hidden.is_hidden(f.name))
| true | true |
1c2f47059c8e0b8a6d29ebd5ce959f2c94607e78 | 10,674 | py | Python | tests/test_transforms.py | cattidea/PaTTA | 0a50eb9b6459c91e3a488f8772e124c164cb0d75 | [
"MIT"
] | 122 | 2021-03-09T15:45:22.000Z | 2022-03-28T09:25:51.000Z | tests/test_transforms.py | cattidea/PaTTA | 0a50eb9b6459c91e3a488f8772e124c164cb0d75 | [
"MIT"
] | 9 | 2021-03-14T02:46:13.000Z | 2021-11-13T06:59:26.000Z | tests/test_transforms.py | cattidea/PaTTA | 0a50eb9b6459c91e3a488f8772e124c164cb0d75 | [
"MIT"
] | 16 | 2021-03-10T02:39:52.000Z | 2021-11-03T14:32:16.000Z | import cv2
import numpy as np
import paddle
import patta as tta
import pytest
@pytest.mark.parametrize(
"transform",
[
tta.HorizontalFlip(),
tta.VerticalFlip(),
tta.HorizontalShift(shifts=[0.1, 0.2, 0.4]),
tta.VerticalShift(shifts=[0.1, 0.2, 0.4]),
tta.Rotate90(angles=[0, 90, 180, 270]),
tta.Scale(scales=[1, 2, 4], interpolation="nearest"),
tta.Resize(sizes=[(4, 5), (8, 10)], original_size=(4, 5), interpolation="nearest"),
],
)
def test_aug_deaug_mask(transform):
a = paddle.arange(20).reshape((1, 1, 4, 5)).astype(paddle.float32)
for p in transform.params:
aug = transform.apply_aug_image(a, **{transform.pname: p})
deaug = transform.apply_deaug_mask(aug, **{transform.pname: p})
assert paddle.allclose(a, deaug)
@pytest.mark.parametrize(
"transform",
[
tta.HorizontalFlip(),
tta.VerticalFlip(),
tta.HorizontalShift(shifts=[0.1, 0.2, 0.4]),
tta.VerticalShift(shifts=[0.1, 0.2, 0.4]),
tta.Rotate90(angles=[0, 90, 180, 270]),
tta.Scale(scales=[1, 2, 4], interpolation="nearest"),
tta.Add(values=[-1, 0, 1, 2]),
tta.Multiply(factors=[-1, 0, 1, 2]),
tta.FiveCrops(crop_height=3, crop_width=5),
tta.Resize(sizes=[(4, 5), (8, 10), (2, 2)], interpolation="nearest"),
tta.AdjustBrightness(factors=[0.5, 1.0, 1.5]),
tta.AdjustContrast(factors=[0.5, 1.0, 1.5]),
tta.AverageBlur(kernel_sizes=[(3, 3), (5, 3)]),
tta.GaussianBlur(kernel_sizes=[(3, 3), (5, 3)], sigma=0.3),
tta.Sharpen(kernel_sizes=[3]),
],
)
def test_label_is_same(transform):
a = paddle.arange(20).reshape((1, 1, 4, 5)).astype(paddle.float32)
for p in transform.params:
aug = transform.apply_aug_image(a, **{transform.pname: p})
deaug = transform.apply_deaug_label(aug, **{transform.pname: p})
assert paddle.allclose(aug, deaug)
@pytest.mark.parametrize(
"transform",
[
tta.HorizontalFlip(),
tta.VerticalFlip(),
],
)
def test_flip_keypoints(transform):
keypoints = paddle.to_tensor([[0.1, 0.1], [0.1, 0.9], [0.9, 0.1], [0.9, 0.9], [0.4, 0.3]])
for p in transform.params:
aug = transform.apply_deaug_keypoints(keypoints.detach().clone(), **{transform.pname: p})
deaug = transform.apply_deaug_keypoints(aug, **{transform.pname: p})
assert paddle.allclose(keypoints, deaug)
@pytest.mark.parametrize(
"transform",
[
tta.Rotate90(angles=[0, 90, 180, 270]),
tta.HorizontalShift(shifts=[0.1, 0.2, 0.4]),
tta.VerticalShift(shifts=[0.1, 0.2, 0.4]),
],
)
def test_rotate90_and_shift_keypoints(transform):
keypoints = paddle.to_tensor([[0.1, 0.1], [0.1, 0.9], [0.9, 0.1], [0.9, 0.9], [0.4, 0.3]])
for p in transform.params:
aug = transform.apply_deaug_keypoints(keypoints.detach().clone(), **{transform.pname: p})
deaug = transform.apply_deaug_keypoints(aug, **{transform.pname: -p})
assert paddle.allclose(keypoints, deaug)
def test_add_transform():
transform = tta.Add(values=[-1, 0, 1])
a = paddle.arange(20).reshape((1, 1, 4, 5)).astype(paddle.float32)
for p in transform.params:
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug, a + p)
def test_multiply_transform():
transform = tta.Multiply(factors=[-1, 0, 1])
a = paddle.arange(20).reshape((1, 1, 4, 5)).astype(paddle.float32)
for p in transform.params:
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug, a * p)
def test_fivecrop_transform():
transform = tta.FiveCrops(crop_height=1, crop_width=1)
a = paddle.arange(25).reshape((1, 1, 5, 5)).astype(paddle.float32)
output = [0, 20, 24, 4, 12]
for i, p in enumerate(transform.params):
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert aug.item() == output[i]
def test_resize_transform():
transform = tta.Resize(sizes=[(10, 10), (5, 5)], original_size=(5, 5))
a = paddle.arange(25).reshape((1, 1, 5, 5)).astype(paddle.float32)
output = [
[
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4],
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4],
[5, 5, 6, 6, 7, 7, 8, 8, 9, 9],
[5, 5, 6, 6, 7, 7, 8, 8, 9, 9],
[10, 10, 11, 11, 12, 12, 13, 13, 14, 14],
[10, 10, 11, 11, 12, 12, 13, 13, 14, 14],
[15, 15, 16, 16, 17, 17, 18, 18, 19, 19],
[15, 15, 16, 16, 17, 17, 18, 18, 19, 19],
[20, 20, 21, 21, 22, 22, 23, 23, 24, 24],
[20, 20, 21, 21, 22, 22, 23, 23, 24, 24],
],
[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
],
]
for i, p in enumerate(transform.params):
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug.reshape((aug.shape[-2], aug.shape[-1])), paddle.to_tensor(output[i], paddle.float32))
def test_adjust_brightness_transform():
transform = tta.AdjustBrightness(factors=[0.5, 1.5])
a = paddle.arange(25).reshape((1, 1, 5, 5)).astype(paddle.float32)
a = paddle.concat([a, a, a], axis=1)
output = [
[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
],
[
[0, 0, 1, 1, 2],
[2, 3, 3, 4, 4],
[5, 5, 6, 6, 7],
[7, 8, 8, 9, 9],
[10, 10, 11, 11, 12],
],
[
[0, 1, 3, 4, 6],
[7, 9, 10, 12, 13],
[15, 16, 18, 19, 21],
[22, 24, 25, 27, 28],
[30, 31, 33, 34, 36],
],
]
for i, p in enumerate(transform.params):
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug[0, 0], paddle.to_tensor(output[i], paddle.float32))
def test_adjust_contrast_transform():
transform = tta.AdjustContrast(factors=[0.5, 1.2])
a = paddle.arange(25).reshape((1, 1, 5, 5)).astype(paddle.float32)
a = paddle.concat([a, a, a], axis=1)
output = [
[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
],
[
[37, 37, 38, 38, 39],
[39, 40, 40, 41, 41],
[42, 42, 43, 43, 44],
[44, 45, 45, 46, 46],
[47, 47, 48, 48, 49],
],
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 2],
[3, 4, 5, 6, 8],
[9, 10, 11, 12, 14],
],
]
for i, p in enumerate(transform.params):
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug[0, 0], paddle.to_tensor(output[i], paddle.float32))
def test_average_blur_transform():
transform = tta.AverageBlur(kernel_sizes=[(3, 3), (5, 7)])
img = np.random.randint(0, 255, size=(224, 224, 3)).astype(np.float32)
for i, kernel_size in enumerate(transform.params):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if kernel_size == (1, 1):
img_aug_cv2 = img
else:
img_aug_cv2 = cv2.blur(img, kernel_size)
img_tensor = paddle.to_tensor(img).unsqueeze(0).transpose((0, 3, 1, 2))
img_tensor_aug = transform.apply_aug_image(img_tensor, kernel_size=kernel_size)
img_tensor_aug = img_tensor_aug.transpose((0, 2, 3, 1)).squeeze(0)
img_aug = img_tensor_aug.numpy()
pad_x = (kernel_size[0] - 1) // 2
pad_y = (kernel_size[1] - 1) // 2
if kernel_size[0] == 1:
assert np.allclose(img_aug_cv2, img_aug)
else:
assert np.allclose(img_aug_cv2[pad_y : -pad_y, pad_x: -pad_x], img_aug[pad_y : -pad_y, pad_x: -pad_x])
@pytest.mark.parametrize(
"sigma",
[
(0.3, 0.3),
(0.5, 0.7),
],
)
def test_gaussian_blur_transform(sigma):
transform = tta.GaussianBlur(kernel_sizes=[(3, 3), (5, 7)], sigma=sigma)
img = np.random.randint(0, 255, size=(224, 224, 3)).astype(np.float32)
for i, kernel_size in enumerate(transform.params):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if kernel_size == (1, 1):
img_aug_cv2 = img
else:
img_aug_cv2 = cv2.GaussianBlur(img, kernel_size, sigmaX=sigma[0], sigmaY=sigma[1])
img_tensor = paddle.to_tensor(img).unsqueeze(0).transpose((0, 3, 1, 2))
img_tensor_aug = transform.apply_aug_image(img_tensor, kernel_size=kernel_size)
img_tensor_aug = img_tensor_aug.transpose((0, 2, 3, 1)).squeeze(0)
img_aug = img_tensor_aug.numpy()
pad_x = (kernel_size[0] - 1) // 2
pad_y = (kernel_size[1] - 1) // 2
if kernel_size[0] == 1:
assert np.allclose(img_aug_cv2, img_aug)
else:
assert np.allclose(img_aug_cv2[pad_y : -pad_y, pad_x: -pad_x], img_aug[pad_y : -pad_y, pad_x: -pad_x])
def test_sharpen_transform():
transform = tta.Sharpen(kernel_sizes=[3, 5, 7])
img = np.linspace(0, 240, 224 * 224 * 3).reshape(224, 224, 3).astype(np.float32)
noise = np.random.randint(0, 5, size=(224, 224, 3)).astype(np.float32)
img += noise
for i, kernel_size in enumerate(transform.params):
if kernel_size == 1:
img_aug_cv2 = img
else:
img_laplacian_kernel = tta.functional.get_laplacian_kernel(kernel_size).astype(np.float32)
img_laplacian = cv2.filter2D(img, -1, img_laplacian_kernel)
img_aug_cv2 = cv2.addWeighted(img, 1, img_laplacian, -1, 0)
img_aug_cv2 = np.clip(img_aug_cv2, 0, 255)
img_tensor = paddle.to_tensor(img).unsqueeze(0).transpose((0, 3, 1, 2))
img_tensor_aug = transform.apply_aug_image(img_tensor, kernel_size=kernel_size)
img_tensor_aug = img_tensor_aug.transpose((0, 2, 3, 1)).squeeze(0)
img_aug = img_tensor_aug.numpy()
pad = (kernel_size - 1) // 2
if kernel_size == 1:
assert np.allclose(img_aug_cv2, img_aug)
else:
# 按理说这应该过的,而且本地也是 100% 通过,但 CI 上就是有精度误差,因此暂时放宽限制
# assert np.allclose(img_aug_cv2[pad:-pad, pad:-pad], img_aug[pad:-pad, pad:-pad])
assert np.abs(img_aug_cv2[pad:-pad, pad:-pad] - img_aug[pad:-pad, pad:-pad]).max() < 1e-2
| 36.554795 | 120 | 0.55846 | import cv2
import numpy as np
import paddle
import patta as tta
import pytest
@pytest.mark.parametrize(
"transform",
[
tta.HorizontalFlip(),
tta.VerticalFlip(),
tta.HorizontalShift(shifts=[0.1, 0.2, 0.4]),
tta.VerticalShift(shifts=[0.1, 0.2, 0.4]),
tta.Rotate90(angles=[0, 90, 180, 270]),
tta.Scale(scales=[1, 2, 4], interpolation="nearest"),
tta.Resize(sizes=[(4, 5), (8, 10)], original_size=(4, 5), interpolation="nearest"),
],
)
def test_aug_deaug_mask(transform):
a = paddle.arange(20).reshape((1, 1, 4, 5)).astype(paddle.float32)
for p in transform.params:
aug = transform.apply_aug_image(a, **{transform.pname: p})
deaug = transform.apply_deaug_mask(aug, **{transform.pname: p})
assert paddle.allclose(a, deaug)
@pytest.mark.parametrize(
"transform",
[
tta.HorizontalFlip(),
tta.VerticalFlip(),
tta.HorizontalShift(shifts=[0.1, 0.2, 0.4]),
tta.VerticalShift(shifts=[0.1, 0.2, 0.4]),
tta.Rotate90(angles=[0, 90, 180, 270]),
tta.Scale(scales=[1, 2, 4], interpolation="nearest"),
tta.Add(values=[-1, 0, 1, 2]),
tta.Multiply(factors=[-1, 0, 1, 2]),
tta.FiveCrops(crop_height=3, crop_width=5),
tta.Resize(sizes=[(4, 5), (8, 10), (2, 2)], interpolation="nearest"),
tta.AdjustBrightness(factors=[0.5, 1.0, 1.5]),
tta.AdjustContrast(factors=[0.5, 1.0, 1.5]),
tta.AverageBlur(kernel_sizes=[(3, 3), (5, 3)]),
tta.GaussianBlur(kernel_sizes=[(3, 3), (5, 3)], sigma=0.3),
tta.Sharpen(kernel_sizes=[3]),
],
)
def test_label_is_same(transform):
a = paddle.arange(20).reshape((1, 1, 4, 5)).astype(paddle.float32)
for p in transform.params:
aug = transform.apply_aug_image(a, **{transform.pname: p})
deaug = transform.apply_deaug_label(aug, **{transform.pname: p})
assert paddle.allclose(aug, deaug)
@pytest.mark.parametrize(
"transform",
[
tta.HorizontalFlip(),
tta.VerticalFlip(),
],
)
def test_flip_keypoints(transform):
keypoints = paddle.to_tensor([[0.1, 0.1], [0.1, 0.9], [0.9, 0.1], [0.9, 0.9], [0.4, 0.3]])
for p in transform.params:
aug = transform.apply_deaug_keypoints(keypoints.detach().clone(), **{transform.pname: p})
deaug = transform.apply_deaug_keypoints(aug, **{transform.pname: p})
assert paddle.allclose(keypoints, deaug)
@pytest.mark.parametrize(
"transform",
[
tta.Rotate90(angles=[0, 90, 180, 270]),
tta.HorizontalShift(shifts=[0.1, 0.2, 0.4]),
tta.VerticalShift(shifts=[0.1, 0.2, 0.4]),
],
)
def test_rotate90_and_shift_keypoints(transform):
keypoints = paddle.to_tensor([[0.1, 0.1], [0.1, 0.9], [0.9, 0.1], [0.9, 0.9], [0.4, 0.3]])
for p in transform.params:
aug = transform.apply_deaug_keypoints(keypoints.detach().clone(), **{transform.pname: p})
deaug = transform.apply_deaug_keypoints(aug, **{transform.pname: -p})
assert paddle.allclose(keypoints, deaug)
def test_add_transform():
transform = tta.Add(values=[-1, 0, 1])
a = paddle.arange(20).reshape((1, 1, 4, 5)).astype(paddle.float32)
for p in transform.params:
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug, a + p)
def test_multiply_transform():
transform = tta.Multiply(factors=[-1, 0, 1])
a = paddle.arange(20).reshape((1, 1, 4, 5)).astype(paddle.float32)
for p in transform.params:
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug, a * p)
def test_fivecrop_transform():
transform = tta.FiveCrops(crop_height=1, crop_width=1)
a = paddle.arange(25).reshape((1, 1, 5, 5)).astype(paddle.float32)
output = [0, 20, 24, 4, 12]
for i, p in enumerate(transform.params):
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert aug.item() == output[i]
def test_resize_transform():
transform = tta.Resize(sizes=[(10, 10), (5, 5)], original_size=(5, 5))
a = paddle.arange(25).reshape((1, 1, 5, 5)).astype(paddle.float32)
output = [
[
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4],
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4],
[5, 5, 6, 6, 7, 7, 8, 8, 9, 9],
[5, 5, 6, 6, 7, 7, 8, 8, 9, 9],
[10, 10, 11, 11, 12, 12, 13, 13, 14, 14],
[10, 10, 11, 11, 12, 12, 13, 13, 14, 14],
[15, 15, 16, 16, 17, 17, 18, 18, 19, 19],
[15, 15, 16, 16, 17, 17, 18, 18, 19, 19],
[20, 20, 21, 21, 22, 22, 23, 23, 24, 24],
[20, 20, 21, 21, 22, 22, 23, 23, 24, 24],
],
[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
],
]
for i, p in enumerate(transform.params):
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug.reshape((aug.shape[-2], aug.shape[-1])), paddle.to_tensor(output[i], paddle.float32))
def test_adjust_brightness_transform():
transform = tta.AdjustBrightness(factors=[0.5, 1.5])
a = paddle.arange(25).reshape((1, 1, 5, 5)).astype(paddle.float32)
a = paddle.concat([a, a, a], axis=1)
output = [
[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
],
[
[0, 0, 1, 1, 2],
[2, 3, 3, 4, 4],
[5, 5, 6, 6, 7],
[7, 8, 8, 9, 9],
[10, 10, 11, 11, 12],
],
[
[0, 1, 3, 4, 6],
[7, 9, 10, 12, 13],
[15, 16, 18, 19, 21],
[22, 24, 25, 27, 28],
[30, 31, 33, 34, 36],
],
]
for i, p in enumerate(transform.params):
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug[0, 0], paddle.to_tensor(output[i], paddle.float32))
def test_adjust_contrast_transform():
transform = tta.AdjustContrast(factors=[0.5, 1.2])
a = paddle.arange(25).reshape((1, 1, 5, 5)).astype(paddle.float32)
a = paddle.concat([a, a, a], axis=1)
output = [
[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
],
[
[37, 37, 38, 38, 39],
[39, 40, 40, 41, 41],
[42, 42, 43, 43, 44],
[44, 45, 45, 46, 46],
[47, 47, 48, 48, 49],
],
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 2],
[3, 4, 5, 6, 8],
[9, 10, 11, 12, 14],
],
]
for i, p in enumerate(transform.params):
aug = transform.apply_aug_image(a, **{transform.pname: p})
assert paddle.allclose(aug[0, 0], paddle.to_tensor(output[i], paddle.float32))
def test_average_blur_transform():
transform = tta.AverageBlur(kernel_sizes=[(3, 3), (5, 7)])
img = np.random.randint(0, 255, size=(224, 224, 3)).astype(np.float32)
for i, kernel_size in enumerate(transform.params):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if kernel_size == (1, 1):
img_aug_cv2 = img
else:
img_aug_cv2 = cv2.blur(img, kernel_size)
img_tensor = paddle.to_tensor(img).unsqueeze(0).transpose((0, 3, 1, 2))
img_tensor_aug = transform.apply_aug_image(img_tensor, kernel_size=kernel_size)
img_tensor_aug = img_tensor_aug.transpose((0, 2, 3, 1)).squeeze(0)
img_aug = img_tensor_aug.numpy()
pad_x = (kernel_size[0] - 1) // 2
pad_y = (kernel_size[1] - 1) // 2
if kernel_size[0] == 1:
assert np.allclose(img_aug_cv2, img_aug)
else:
assert np.allclose(img_aug_cv2[pad_y : -pad_y, pad_x: -pad_x], img_aug[pad_y : -pad_y, pad_x: -pad_x])
@pytest.mark.parametrize(
"sigma",
[
(0.3, 0.3),
(0.5, 0.7),
],
)
def test_gaussian_blur_transform(sigma):
transform = tta.GaussianBlur(kernel_sizes=[(3, 3), (5, 7)], sigma=sigma)
img = np.random.randint(0, 255, size=(224, 224, 3)).astype(np.float32)
for i, kernel_size in enumerate(transform.params):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if kernel_size == (1, 1):
img_aug_cv2 = img
else:
img_aug_cv2 = cv2.GaussianBlur(img, kernel_size, sigmaX=sigma[0], sigmaY=sigma[1])
img_tensor = paddle.to_tensor(img).unsqueeze(0).transpose((0, 3, 1, 2))
img_tensor_aug = transform.apply_aug_image(img_tensor, kernel_size=kernel_size)
img_tensor_aug = img_tensor_aug.transpose((0, 2, 3, 1)).squeeze(0)
img_aug = img_tensor_aug.numpy()
pad_x = (kernel_size[0] - 1) // 2
pad_y = (kernel_size[1] - 1) // 2
if kernel_size[0] == 1:
assert np.allclose(img_aug_cv2, img_aug)
else:
assert np.allclose(img_aug_cv2[pad_y : -pad_y, pad_x: -pad_x], img_aug[pad_y : -pad_y, pad_x: -pad_x])
def test_sharpen_transform():
transform = tta.Sharpen(kernel_sizes=[3, 5, 7])
img = np.linspace(0, 240, 224 * 224 * 3).reshape(224, 224, 3).astype(np.float32)
noise = np.random.randint(0, 5, size=(224, 224, 3)).astype(np.float32)
img += noise
for i, kernel_size in enumerate(transform.params):
if kernel_size == 1:
img_aug_cv2 = img
else:
img_laplacian_kernel = tta.functional.get_laplacian_kernel(kernel_size).astype(np.float32)
img_laplacian = cv2.filter2D(img, -1, img_laplacian_kernel)
img_aug_cv2 = cv2.addWeighted(img, 1, img_laplacian, -1, 0)
img_aug_cv2 = np.clip(img_aug_cv2, 0, 255)
img_tensor = paddle.to_tensor(img).unsqueeze(0).transpose((0, 3, 1, 2))
img_tensor_aug = transform.apply_aug_image(img_tensor, kernel_size=kernel_size)
img_tensor_aug = img_tensor_aug.transpose((0, 2, 3, 1)).squeeze(0)
img_aug = img_tensor_aug.numpy()
pad = (kernel_size - 1) // 2
if kernel_size == 1:
assert np.allclose(img_aug_cv2, img_aug)
else:
assert np.abs(img_aug_cv2[pad:-pad, pad:-pad] - img_aug[pad:-pad, pad:-pad]).max() < 1e-2
| true | true |
1c2f474a8aa68e48585c6b0f9ba36c039760f8c8 | 3,830 | py | Python | measure_mate/migrations/0001_initial.py | niche-tester/measure-mate | c3acba57747bcb89fe0c6b9509ec90f04a581506 | [
"MIT"
] | 15 | 2015-12-14T02:20:31.000Z | 2022-01-30T04:36:39.000Z | measure_mate/migrations/0001_initial.py | rloomans/measure-mate | e89f9c8e1faa1920496f1c997f6d87ec0f9bd7c2 | [
"MIT"
] | 1,403 | 2017-02-16T01:00:04.000Z | 2022-03-15T21:12:13.000Z | measure_mate/migrations/0001_initial.py | rloomans/measure-mate | e89f9c8e1faa1920496f1c997f6d87ec0f9bd7c2 | [
"MIT"
] | 10 | 2015-12-18T01:30:46.000Z | 2022-01-30T04:36:41.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
'verbose_name_plural': 'Assessments',
},
),
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('desc', models.TextField()),
],
options={
'verbose_name_plural': 'attributes',
},
),
migrations.CreateModel(
name='Measurement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('observations', models.TextField()),
('assessment', models.ForeignKey(related_name='measurements', to='measure_mate.Assessment', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Measurements',
},
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('desc', models.TextField()),
('rank', models.IntegerField(default=1)),
('attribute', models.ForeignKey(related_name='ratings', to='measure_mate.Attribute', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Ratings',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
],
options={
'verbose_name_plural': 'Tags',
},
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('short_desc', models.CharField(max_length=256)),
],
options={
'verbose_name_plural': 'Templates',
},
),
migrations.AddField(
model_name='measurement',
name='rating',
field=models.ForeignKey(related_name='measurements', to='measure_mate.Rating', on_delete=models.PROTECT),
),
migrations.AddField(
model_name='attribute',
name='discipline',
field=models.ForeignKey(related_name='attributes', to='measure_mate.Template', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='assessment',
name='tags',
field=models.ManyToManyField(to='measure_mate.Tag'),
),
migrations.AddField(
model_name='assessment',
name='template',
field=models.ForeignKey(related_name='assessments', to='measure_mate.Template', on_delete=models.PROTECT),
),
migrations.AlterUniqueTogether(
name='rating',
unique_together=set([('name', 'rank', 'attribute')]),
),
]
| 37.184466 | 135 | 0.536292 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
'verbose_name_plural': 'Assessments',
},
),
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('desc', models.TextField()),
],
options={
'verbose_name_plural': 'attributes',
},
),
migrations.CreateModel(
name='Measurement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('observations', models.TextField()),
('assessment', models.ForeignKey(related_name='measurements', to='measure_mate.Assessment', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Measurements',
},
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('desc', models.TextField()),
('rank', models.IntegerField(default=1)),
('attribute', models.ForeignKey(related_name='ratings', to='measure_mate.Attribute', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Ratings',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
],
options={
'verbose_name_plural': 'Tags',
},
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('short_desc', models.CharField(max_length=256)),
],
options={
'verbose_name_plural': 'Templates',
},
),
migrations.AddField(
model_name='measurement',
name='rating',
field=models.ForeignKey(related_name='measurements', to='measure_mate.Rating', on_delete=models.PROTECT),
),
migrations.AddField(
model_name='attribute',
name='discipline',
field=models.ForeignKey(related_name='attributes', to='measure_mate.Template', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='assessment',
name='tags',
field=models.ManyToManyField(to='measure_mate.Tag'),
),
migrations.AddField(
model_name='assessment',
name='template',
field=models.ForeignKey(related_name='assessments', to='measure_mate.Template', on_delete=models.PROTECT),
),
migrations.AlterUniqueTogether(
name='rating',
unique_together=set([('name', 'rank', 'attribute')]),
),
]
| true | true |
1c2f4903ed0f5a94ab7a56b2b885e62f9d8cc28b | 856 | py | Python | download/Cost-Function-Of-ML/costFunctionExam1.py | chenjian158978/chenjian.github.io | 1742e518d43470aa88690f2f40094859e7d7f261 | [
"Apache-2.0"
] | 3 | 2019-05-06T15:14:43.000Z | 2019-12-23T07:26:52.000Z | download/Cost-Function-Of-ML/costFunctionExam1.py | chenjian158978/chenjian.github.io | 1742e518d43470aa88690f2f40094859e7d7f261 | [
"Apache-2.0"
] | 4 | 2020-02-25T06:39:47.000Z | 2022-02-26T03:22:42.000Z | download/Cost-Function-Of-ML/costFunctionExam1.py | chenjian158978/chenjian.github.io | 1742e518d43470aa88690f2f40094859e7d7f261 | [
"Apache-2.0"
] | 5 | 2017-02-26T09:06:25.000Z | 2019-10-01T17:15:39.000Z | # -*- coding:utf8 -*-
"""
@author: chenjian158978@gmail.com
@date: Tue, May 23 2017
@time: 19:05:20 GMT+8
"""
import matplotlib.pyplot as plt
import numpy as np
def calcu_cost(theta, X, Y):
""" 计算代价函数的值
:param theta: 斜率
:param X: x值
:param Y: y值
:return: J值
"""
m = X.shape[0]
h = np.dot(X, theta)
return np.dot((h - Y).T, (h - Y)) / (2 * m)
X = np.array([[0, 1, 2, 4]]).T
Y = np.array([[0, 1, 2, 4]]).T
# 从-2到4之间返回均匀间隔的数字,共101个
# theta是101*1的矩阵
theta = np.array([np.linspace(-2, 4, 101)]).T
J_list = []
for i in range(101):
current_theta = theta[i:(i + 1)].T
cost = calcu_cost(current_theta, X, Y)
J_list.append(cost[0, 0])
plt.plot(theta, J_list)
plt.xlabel('theta_1')
plt.ylabel('J(theta)')
plt.title('Cost Function Example1')
plt.grid(True)
plt.savefig('cost_theta.png', dpi=200)
plt.show()
| 17.469388 | 47 | 0.598131 |
import matplotlib.pyplot as plt
import numpy as np
def calcu_cost(theta, X, Y):
m = X.shape[0]
h = np.dot(X, theta)
return np.dot((h - Y).T, (h - Y)) / (2 * m)
X = np.array([[0, 1, 2, 4]]).T
Y = np.array([[0, 1, 2, 4]]).T
theta = np.array([np.linspace(-2, 4, 101)]).T
J_list = []
for i in range(101):
current_theta = theta[i:(i + 1)].T
cost = calcu_cost(current_theta, X, Y)
J_list.append(cost[0, 0])
plt.plot(theta, J_list)
plt.xlabel('theta_1')
plt.ylabel('J(theta)')
plt.title('Cost Function Example1')
plt.grid(True)
plt.savefig('cost_theta.png', dpi=200)
plt.show()
| true | true |
1c2f4905406663f456667777b14afcb3fbbb2224 | 704 | py | Python | question_bank/build-an-array-with-stack-operations/build-an-array-with-stack-operations.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 9 | 2020-08-12T10:01:00.000Z | 2022-01-05T04:37:48.000Z | question_bank/build-an-array-with-stack-operations/build-an-array-with-stack-operations.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 1 | 2021-02-16T10:19:31.000Z | 2021-02-16T10:19:31.000Z | question_bank/build-an-array-with-stack-operations/build-an-array-with-stack-operations.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 4 | 2020-08-12T10:13:31.000Z | 2021-11-05T01:26:58.000Z | # -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:28 ms, 在所有 Python3 提交中击败了99.57% 的用户
内存消耗:13.4 MB, 在所有 Python3 提交中击败了38.43% 的用户
解题思路:
用下标去查看target元素
见代码注释
"""
class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
i = 0
N = len(target)
result = []
for num in range(1, n+1):
if num == target[i]:
i += 1
result.append('Push') # 如果数组与元素符合,则push,且对比下一个元素
else:
result.append('Push')
result.append('Pop')
# result.extend(['Push', 'Pop']) # 不符合,则push后pop, 用下一个数字对比
if i > N-1:
break
return result
| 24.275862 | 75 | 0.482955 |
class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
i = 0
N = len(target)
result = []
for num in range(1, n+1):
if num == target[i]:
i += 1
result.append('Push')
else:
result.append('Push')
result.append('Pop')
break
return result
| true | true |
1c2f4971647180e7f5027c908f7a6137bf303b5f | 27,315 | py | Python | mai_version/fold/fold_file_parser.py | joschout/tilde | 1403b50842b83f2edd6b16b1fbe24b9bec2d0048 | [
"Apache-2.0"
] | 16 | 2019-03-06T06:11:33.000Z | 2022-02-07T21:30:25.000Z | mai_version/fold/fold_file_parser.py | joschout/tilde | 1403b50842b83f2edd6b16b1fbe24b9bec2d0048 | [
"Apache-2.0"
] | 4 | 2019-10-08T14:48:23.000Z | 2020-03-26T00:31:57.000Z | mai_version/fold/fold_file_parser.py | krishnangovindraj/tilde | 5243a02d92f375d56ffc49ab8c3d1a87e31e99b9 | [
"Apache-2.0"
] | 4 | 2019-08-14T05:40:47.000Z | 2020-08-05T13:21:16.000Z | import time
from math import sqrt
from statistics import mean, variance
from typing import Set, List, Tuple
from problog.engine import DefaultEngine, GenericEngine
from problog.logic import Constant
from mai_version.classification.classification_helper import get_keys_classifier, do_labeled_examples_get_correctly_classified
from mai_version.classification.classification_statistics_handler import ClassificationStatisticsHandler
from mai_version.classification.confidence_intervals import mean_confidence_interval
from mai_version.fold.fold_helper import write_out_tree, write_out_program, FoldData
from mai_version.representation.example import ExampleWrapper, ClauseDBExampleWrapper
from mai_version.representation.example_collection import ExampleCollection
from mai_version.run.program_phase import build_tree, convert_tree_to_program, prune_tree
# dir_logic_files = 'D:\\KUL\\KUL MAI\\Masterproef\\data\\ecml06 - met ace bestanden\\bongard4\\results\\t-0-0-0\\'
# fname_prefix_logic = 'bongard'
#
# fname_examples = dir_logic_files + fname_prefix_logic + kb_suffix
# fname_settings = dir_logic_files + fname_prefix_logic + s_suffix
# fname_background = dir_logic_files + fname_prefix_logic + bg_suffix
#
# dir_fold_files = 'D:\\KUL\\KUL MAI\\Masterproef\\data\\ecml06 - met ace bestanden\\bongard4\\foil\\folds\\'
# fname_prefix_fold = 'test'
# fold_start_index = 0
# nb_folds = 10
# fold_suffix = '.txt'
#
# dir_output_files = 'D:\\KUL\\KUL MAI\\Masterproef\\TILDE\\tilde\\fold\\data\\'
def split_examples_into_training_and_test_sets(
all_key_sets: List[Set[Constant]], test_key_set: Set[Constant],
examples_collection_usable_for_training: ExampleCollection,
examples_usable_for_testing: List[ClauseDBExampleWrapper]
) -> Tuple[ExampleCollection, List[ExampleWrapper]]:
training_key_sets_list = [s for s in all_key_sets if s is not test_key_set] # type: List[Set[Constant]]
training_key_set = set.union(*training_key_sets_list) # type: Set[Constant]
training_example_collection = examples_collection_usable_for_training.filter_examples(
training_key_set) # type: ExampleCollection
test_examples = [ex_wp for ex_wp in examples_usable_for_testing if
ex_wp.key in test_key_set] # type: List[ExampleWrapper]
return training_example_collection, test_examples
def do_one_fold(fold_index: int, test_key_set: Set[Constant], fd: FoldData
):
print('\n===========================')
print('=== start FOLD ' + str(fold_index + 1) + ' of ' + str(fd.nb_folds))
print('===========================')
training_example_collection, test_examples = split_examples_into_training_and_test_sets(
fd.all_key_sets, test_key_set, fd.examples_collection_usable_for_training, fd.examples_usable_for_testing)
print('\ttotal nb of labeled examples: ' + str(fd.total_nb_of_labeled_examples))
nb_of_training_ex = len(training_example_collection.example_wrappers_sp)
nb_of_test_ex = len(test_examples)
print('\tnb of TRAINING ex: ' + str(nb_of_training_ex))
print('\tnb of TEST ex: ' + str(nb_of_test_ex))
# ===========================
start_time = time.time()
# ==============================================================================================================
print('\t=== start building tree for fold ' + str(fold_index + 1))
# TRAIN MODEL using training set
tree = build_tree(fd.internal_ex_format, fd.treebuilder_type, fd.parsed_settings.language,
fd.possible_labels, training_example_collection, prediction_goal=fd.prediction_goal,
full_background_knowledge_sp=fd.full_background_knowledge_sp,
debug_printing_tree_building=fd.debug_printing_tree_building, engine=fd.engine)
tree = prune_tree(tree, debug_printing_tree_pruning=fd.debug_printing_tree_pruning)
nb_of_nodes = tree.get_nb_of_nodes()
nb_inner_nodes = tree.get_nb_of_inner_nodes()
fd.total_nb_of_nodes_per_fold.append(nb_of_nodes)
fd.nb_of_inner_node_per_fold.append(nb_inner_nodes)
# write out tree
tree_fname = fd.dir_output_files + fd.fname_prefix_fold + '_fold' + str(fold_index) + ".tree"
write_out_tree(tree_fname, tree)
print('\t=== end building tree for fold ' + str(fold_index + 1))
# ==============================================================================================================
print('\t=== start converting tree to program for fold ' + str(fold_index + 1))
program = convert_tree_to_program(fd.kb_format, fd.treebuilder_type, tree, fd.parsed_settings.language,
debug_printing=fd.debug_printing_program_conversion,
prediction_goal=fd.prediction_goal,
index_of_label_var=fd.index_of_label_var)
program_fname = fd.dir_output_files + fd.fname_prefix_fold + '_fold' + str(fold_index) + ".program"
write_out_program(program_fname, program)
print('\t=== end converting tree to program for fold ' + str(fold_index + 1))
# ==============================================================================================================
print('\t=== start classifying test set' + str(fold_index + 1))
# EVALUATE MODEL using test set
classifier = get_keys_classifier(fd.internal_ex_format, program, fd.prediction_goal,
fd.index_of_label_var, fd.stripped_background_knowledge,
debug_printing=fd.debug_printing_get_classifier, engine=fd.engine)
statistics_handler = do_labeled_examples_get_correctly_classified(
classifier, test_examples, fd.possible_labels,
fd.debug_printing_classification) # type: ClassificationStatisticsHandler
# ===================
end_time = time.time()
# time in seconds: # time in seconds
elapsed_time = end_time - start_time
fd.execution_time_per_fold.append(elapsed_time)
accuracy, _ = statistics_handler.get_accuracy()
fd.accuracies_folds.append(accuracy)
statistics_fname = fd.dir_output_files + fd.fname_prefix_fold + '_fold' + str(fold_index) + ".statistics"
statistics_handler.write_out_statistics_to_file(statistics_fname)
with open(statistics_fname, 'a') as f:
f.write('\n\nnb of TRAINING ex: ' + str(nb_of_training_ex) + "\n")
f.write('nb of TEST ex: ' + str(nb_of_test_ex) + "\n\n")
f.write("total nb of nodes: " + str(nb_of_nodes) + "\n")
f.write("nb of internal nodes: " + str(nb_inner_nodes) + "\n\n")
f.write("execution time of fold: " + str(elapsed_time) + " seconds\n")
print("total nb of nodes: " + str(nb_of_nodes))
print("nb of internal nodes: " + str(nb_inner_nodes))
print("execution time of fold: ", elapsed_time, "seconds")
print('\t=== end classifying test set' + str(fold_index + 1))
print('\t=== end FOLD ' + str(fold_index + 1) + ' of ' + str(fd.nb_folds) + '\n')
def do_all_examples(fd: FoldData):
print('\n=======================================')
print('=== FINALLY, learn tree on all examples')
print('========================================')
print('\ttotal nb of labeled examples: ' + str(fd.total_nb_of_labeled_examples))
print('\t=== start building tree for ALL examples')
# ===========================
start_time = time.time()
# TRAIN MODEL using training set
tree = build_tree(fd.internal_ex_format, fd.treebuilder_type, fd.parsed_settings.language,
fd.possible_labels, fd.examples_collection_usable_for_training,
prediction_goal=fd.prediction_goal,
full_background_knowledge_sp=fd.full_background_knowledge_sp,
debug_printing_tree_building=fd.debug_printing_tree_building, engine=fd.engine)
tree = prune_tree(tree, debug_printing_tree_pruning=fd.debug_printing_tree_pruning)
nb_of_nodes = tree.get_nb_of_nodes()
nb_inner_nodes = tree.get_nb_of_inner_nodes()
fd.total_nb_of_nodes_per_fold.append(nb_of_nodes)
fd.nb_of_inner_node_per_fold.append(nb_inner_nodes)
# write out tree
tree_fname = fd.dir_output_files + fd.fname_prefix_fold + ".tree"
write_out_tree(tree_fname, tree)
print('=== end building tree for ALL examples')
print('=== start converting tree to program for ALL examples')
program = convert_tree_to_program(fd.kb_format, fd.treebuilder_type, tree, fd.parsed_settings.language,
debug_printing=fd.debug_printing_program_conversion,
prediction_goal=fd.prediction_goal,
index_of_label_var=fd.index_of_label_var)
program_fname = fd.dir_output_files + fd.fname_prefix_fold + ".program"
write_out_program(program_fname, program)
print('=== end converting tree to program for ALL examples')
all_examples = fd.examples_collection_usable_for_training.get_labeled_examples()
print('\t=== start classifying total set')
# EVALUATE MODEL using test set
classifier = get_keys_classifier(fd.internal_ex_format, program, fd.prediction_goal,
fd.index_of_label_var, fd.stripped_background_knowledge,
debug_printing=fd.debug_printing_get_classifier, engine=fd.engine)
statistics_handler = do_labeled_examples_get_correctly_classified(classifier, all_examples, fd.possible_labels,
fd.debug_printing_classification) # type: ClassificationStatisticsHandler
end_time = time.time()
# time in seconds: # time in seconds
elapsed_time = end_time - start_time
accuracy, _ = statistics_handler.get_accuracy()
statistics_fname = fd.dir_output_files + fd.fname_prefix_fold + ".statistics"
statistics_handler.write_out_statistics_to_file(statistics_fname)
mean_accuracy_of_folds = mean(fd.accuracies_folds)
var_accuracy_of_folds = variance(fd.accuracies_folds, mean_accuracy_of_folds)
std_accuracy_of_folds = sqrt(var_accuracy_of_folds)
confidence = 0.9
mean_acc, conf_left, conf_right, diff_from_mean = mean_confidence_interval(fd.accuracies_folds, confidence)
mean_total_nb_of_nodes = mean(fd.total_nb_of_nodes_per_fold)
var_total_nb_of_nodes = variance(fd.total_nb_of_nodes_per_fold, mean_total_nb_of_nodes)
std_total_nb_of_nodes = sqrt(var_total_nb_of_nodes)
mean_nb_of_inner_nodes = mean(fd.nb_of_inner_node_per_fold)
var_nb_of_inner_nodes = variance(fd.nb_of_inner_node_per_fold, mean_nb_of_inner_nodes)
std_nb_of_inner_nodes = sqrt(var_nb_of_inner_nodes)
total_execution_time_of_cross_validation = sum(fd.execution_time_per_fold)
with open(statistics_fname, 'a') as f:
f.write("\n\ntotal nb of examples (labeled + unlabeled): " + str(fd.total_nb_of_examples) + "\n")
f.write("total nb of LABELED examples: " + str(fd.total_nb_of_labeled_examples) + "\n\n")
f.write("list of accuracies per fold:\n")
f.write("\t" + str(fd.accuracies_folds) + "\n")
f.write("mean accuracy: " + str(mean_accuracy_of_folds) + "\n")
f.write("var accuracy: " + str(var_accuracy_of_folds) + "\n")
f.write("std accuracy: " + str(std_accuracy_of_folds) + "\n")
f.write("accuracy of total tree: " + str(statistics_handler.get_accuracy()[0]) + "\n\n")
f.write("accuracy " + str(confidence * 100) + "% confidence interval: ["
+ str(conf_left) + "," + str(conf_right) + "]\n")
f.write("\taccuracy " + str(confidence * 100) + "% confidence interval around mean: "
+ str(mean_acc) + " +- " + str(diff_from_mean) + "\n\n")
f.write("total nb of nodes in total tree: " + str(nb_of_nodes) + "\n")
f.write("nb of internal nodes in total tree: " + str(nb_inner_nodes) + "\n\n")
f.write("list of total nb of nodes per fold:\n")
f.write("\t" + str(fd.total_nb_of_nodes_per_fold) + "\n")
f.write("mean total nb of nodes: " + str(mean_total_nb_of_nodes) + "\n")
f.write("var total nb of nodes: " + str(var_total_nb_of_nodes) + "\n")
f.write("std total nb of nodes: " + str(std_total_nb_of_nodes) + "\n\n")
f.write("list of nb of internal nodes per fold:\n")
f.write("\t" + str(fd.nb_of_inner_node_per_fold) + "\n")
f.write("mean nb of internal nodes: " + str(mean_nb_of_inner_nodes) + "\n")
f.write("var nb of internal nodes: " + str(var_nb_of_inner_nodes) + "\n")
f.write("std nb of internal nodes: " + str(std_nb_of_inner_nodes) + "\n\n")
f.write("execution times of folds:\n")
f.write("\t" + str(fd.execution_time_per_fold) + "\n")
f.write("total time cross (sum folds): " + str(total_execution_time_of_cross_validation) + " seconds\n")
f.write("time total tree building + verifying: " + str(elapsed_time) + " seconds\n")
print("total nb of nodes in total tree: " + str(nb_of_nodes))
print("nb of internal nodes in total tree: " + str(nb_inner_nodes))
print()
print("list of accuracies per fold:")
print("\t" + str(fd.accuracies_folds))
print("mean accuracy: " + str(mean_accuracy_of_folds))
print("var accuracy: " + str(var_accuracy_of_folds))
print("std accuracy " + str(std_accuracy_of_folds))
print("accuracy of total tree: " + str(statistics_handler.get_accuracy()))
print()
print("accuracy " + str(confidence * 100) + "% confidence interval: ["
+ str(conf_left) + "," + str(conf_right) + "]")
print("\taccuracy " + str(confidence * 100) + "% confidence interval around mean: "
+ str(mean_acc) + " +- " + str(diff_from_mean))
print()
print("total nb of nodes in total tree: " + str(nb_of_nodes))
print("nb of internal nodes in total tree: " + str(nb_inner_nodes))
print()
print("list of total nb of nodes per fold:")
print("\t" + str(fd.total_nb_of_nodes_per_fold))
print("mean total nb of nodes: " + str(mean_total_nb_of_nodes))
print("var total nb of nodes: " + str(var_total_nb_of_nodes))
print("std total nb of nodes: " + str(std_total_nb_of_nodes))
print()
print("list of nb of internal nodes per fold:")
print("\t" + str(fd.nb_of_inner_node_per_fold))
print("mean nb of internal nodes: " + str(mean_nb_of_inner_nodes))
print("var nb of internal nodes: " + str(var_nb_of_inner_nodes))
print("std nb of internal nodes: " + str(std_nb_of_inner_nodes))
print()
print("execution times of folds:")
print("\t" + str(fd.execution_time_per_fold))
print("total time cross (sum folds):", total_execution_time_of_cross_validation, "seconds")
print("time total tree building + verifying:", elapsed_time, "seconds")
print('\t=== end classifying total set')
def main_cross_validation(fname_examples: str, fname_settings: str, fname_background: str,
dir_fold_files: str, fname_prefix_fold: str, fold_start_index: int, nb_folds: int,
fold_suffix: str, dir_output_files: str,
filter_out_unlabeled_examples=False,
debug_printing_example_parsing=False,
debug_printing_tree_building=False,
debug_printing_tree_pruning=False,
debug_printing_program_conversion=False,
debug_printing_get_classifier=False,
debug_printing_classification=False):
engine = DefaultEngine()
engine.unknown = 1
fd = FoldData.build_fold_data(fname_examples, fname_settings, fname_background,
dir_fold_files, fname_prefix_fold, fold_start_index, nb_folds, fold_suffix,
dir_output_files,
filter_out_unlabeled_examples,
debug_printing_example_parsing,
debug_printing_tree_building,
debug_printing_tree_pruning,
debug_printing_program_conversion,
debug_printing_get_classifier,
debug_printing_classification,
engine=engine
)
# take one key set as test, the others as training
for fold_index, test_key_set in enumerate(fd.all_key_sets):
do_one_fold(fold_index, test_key_set, fd)
do_all_examples(fd)
# def main_cross_validation_old(fname_examples: str, fname_settings: str, fname_background: str,
# dir_fold_files: str, fname_prefix_fold: str, fold_start_index: int, nb_folds: int, fold_suffix: str,
# dir_output_files: str,
# debug_printing_example_parsing=False,
# debug_printing_tree_building=False,
# debug_printing_tree_pruning=False,
# debug_printing_program_conversion=False,
# debug_printing_get_classifier=False,
# debug_printing_classification=False):
# settings_file_parser = SettingsParserMapper.get_settings_parser(KnowledgeBaseFormat.KEYS)
# parsed_settings = settings_file_parser.parse(fname_settings)
#
# kb_format = KnowledgeBaseFormat.KEYS
# internal_ex_format = InternalExampleFormat.CLAUSEDB
#
# treebuilder_type = TreeBuilderType.DETERMINISTIC
#
# print('=== start preprocessing examples ===')
# examples_collection_usable_for_training, prediction_goal, index_of_label_var, possible_labels, background_knowledge_wrapper = \
# preprocessing_examples_keys(fname_examples, parsed_settings, internal_ex_format,
# fname_background, debug_printing_example_parsing,
# filter_out_unlabeled_examples=False)
#
# total_nb_of_examples = len(examples_collection_usable_for_training.example_wrappers_sp)
#
# print('\tnb of examples: ' + str(total_nb_of_examples))
# print('\tprediction goal: ' + str(prediction_goal))
# print('\tpossible labels: ' + str(possible_labels))
# print('=== end preprocessing examples ===\n')
#
# full_background_knowledge_sp \
# = background_knowledge_wrapper.get_full_background_knowledge_simple_program() # type: Optional[SimpleProgram]
#
# stripped_background_knowledge = background_knowledge_wrapper.get_stripped_background_knowledge() # type: Optional[SimpleProgram]
# stripped_examples_simple_program = examples_collection_usable_for_training.get_labeled_example_wrappers_sp() # type: List[SimpleProgramExampleWrapper]
# examples_usable_for_testing = stripped_examples_simple_program # type: List[SimpleProgramExampleWrapper]
#
# if internal_ex_format == InternalExampleFormat.CLAUSEDB:
# stripped_examples_clausedb = ClauseDBExampleWrapper.get_clause_db_examples(stripped_examples_simple_program,
# background_knowledge=stripped_background_knowledge)
# examples_usable_for_testing = stripped_examples_clausedb # type: List[ClauseDBExampleWrapper]
#
# fold_file_names = get_fold_info_filenames(fold_start_index, nb_folds, dir_fold_files, fname_prefix_fold,
# fold_suffix)
#
# accuracies_folds = []
#
# # read in all the keysets
# all_key_sets = [] # type: List[Set[Constant]]
# for fname in fold_file_names:
# all_key_sets.append(get_keys_in_fold_file(fname))
#
# # take one key set as test, the others as training
# for fold_index, test_key_set in enumerate(all_key_sets):
# print('\n===========================')
# print('=== start FOLD ' + str(fold_index + 1) + ' of ' + str(nb_folds))
# print('===========================')
#
# training_example_collection, test_examples = split_examples_into_training_and_test_sets(
# all_key_sets, test_key_set, examples_collection_usable_for_training, examples_usable_for_testing)
# print('\ttotal nb of examples: ' + str(total_nb_of_examples))
# print('\tnb of TRAINING ex: ' + str(len(training_example_collection.example_wrappers_sp)))
# print('\tnb of TEST ex: ' + str(len(test_examples)))
#
# # ==============================================================================================================
# print('\t=== start building tree for fold ' + str(fold_index + 1))
#
# # TRAIN MODEL using training set
# tree = build_tree(internal_ex_format, treebuilder_type, parsed_settings.language,
# possible_labels, training_example_collection, prediction_goal=prediction_goal,
# full_background_knowledge_sp=full_background_knowledge_sp,
# debug_printing_tree_building=debug_printing_tree_building)
#
# tree = prune_tree(tree, debug_printing_tree_pruning=debug_printing_tree_pruning)
#
# # write out tree
# tree_fname = dir_output_files + fname_prefix_fold + '_fold' + str(fold_index) + ".tree"
# write_out_tree(tree_fname, tree)
#
# print('\t=== end building tree for fold ' + str(fold_index + 1))
#
# # ==============================================================================================================
#
# print('\t=== start converting tree to program for fold ' + str(fold_index + 1))
# program = convert_tree_to_program(kb_format, treebuilder_type, tree, parsed_settings.language,
# debug_printing=debug_printing_program_conversion,
# prediction_goal=prediction_goal,
# index_of_label_var=index_of_label_var)
# program_fname = dir_output_files + fname_prefix_fold + '_fold' + str(fold_index) + ".program"
# write_out_program(program_fname, program)
#
# print('\t=== end converting tree to program for fold ' + str(fold_index + 1))
#
# # ==============================================================================================================
#
# print('\t=== start classifying test set' + str(fold_index + 1))
# # EVALUATE MODEL using test set
# classifier = get_keys_classifier(internal_ex_format, program, prediction_goal,
# index_of_label_var, stripped_background_knowledge,
# debug_printing=debug_printing_get_classifier)
#
# statistics_handler = do_labeled_examples_get_correctly_classified(classifier, test_examples, possible_labels,
# debug_printing_classification) # type: ClassificationStatisticsHandler
# accuracy, _ = statistics_handler.get_accuracy()
# accuracies_folds.append(accuracy)
#
# statistics_fname = dir_output_files + fname_prefix_fold + '_fold' + str(fold_index) + ".statistics"
# statistics_handler.write_out_statistics_to_file(statistics_fname)
#
# print('\t=== end classifying test set' + str(fold_index + 1))
#
# print('\t=== end FOLD ' + str(fold_index + 1) + ' of ' + str(nb_folds) + '\n')
#
# print('\n=======================================')
# print('=== FINALLY, learn tree on all examples')
# print('========================================')
# print('\ttotal nb of examples: ' + str(total_nb_of_examples))
#
# print('\t=== start building tree for ALL examples')
#
# # TRAIN MODEL using training set
# tree = build_tree(internal_ex_format, treebuilder_type, parsed_settings.language,
# possible_labels, examples_collection_usable_for_training, prediction_goal=prediction_goal,
# full_background_knowledge_sp=full_background_knowledge_sp,
# debug_printing_tree_building=debug_printing_tree_building)
#
# tree = prune_tree(tree, debug_printing_tree_pruning=debug_printing_tree_pruning)
#
# # write out tree
# tree_fname = dir_output_files + fname_prefix_fold + ".tree"
# write_out_tree(tree_fname, tree)
#
# print('=== end building tree for ALL examples')
#
# print('=== start converting tree to program for ALL examples')
# program = convert_tree_to_program(kb_format, treebuilder_type, tree, parsed_settings.language,
# debug_printing=debug_printing_program_conversion, prediction_goal=prediction_goal,
# index_of_label_var=index_of_label_var)
# program_fname = dir_output_files + fname_prefix_fold + ".program"
# write_out_program(program_fname, program)
#
# print('=== end converting tree to program for ALL examples')
#
# all_examples = examples_collection_usable_for_training.get_labeled_examples()
#
# print('\t=== start classifying total set')
# # EVALUATE MODEL using test set
# classifier = get_keys_classifier(internal_ex_format, program, prediction_goal,
# index_of_label_var, stripped_background_knowledge,
# debug_printing=debug_printing_get_classifier)
#
# statistics_handler = do_labeled_examples_get_correctly_classified(classifier, all_examples, possible_labels,
# debug_printing_classification) # type: ClassificationStatisticsHandler
# accuracy, _ = statistics_handler.get_accuracy()
#
# statistics_fname = dir_output_files + fname_prefix_fold + ".statistics"
# statistics_handler.write_out_statistics_to_file(statistics_fname)
#
# mean_accuracy_of_folds = mean(accuracies_folds)
# var_accuracy_of_folds = variance(accuracies_folds, mean_accuracy_of_folds)
# std_accuracy_of_folds = sqrt(var_accuracy_of_folds)
#
# with open(statistics_fname, 'a') as f:
# f.write("list of accuracies:\n")
# f.write("\t" + str(accuracies_folds))
# f.write("mean accuracy: " + str(mean_accuracy_of_folds) + "\n")
# f.write("var accuracy: " + str(var_accuracy_of_folds) + "\n")
# f.write("std accuracy " + str(std_accuracy_of_folds) + "\n")
# f.write("accuracy of total tree: " + str(statistics_handler.get_accuracy()) + "\n")
#
# print("list of accuracies:")
# print("\t" + str(accuracies_folds))
# print("mean accuracy: " + str(mean_accuracy_of_folds))
# print("var accuracy: " + str(var_accuracy_of_folds))
# print("std accuracy " + str(std_accuracy_of_folds))
# print("accuracy of total tree: " + str(statistics_handler.get_accuracy()))
#
# print('\t=== end classifying total set')
def filter_examples(examples: List[ExampleWrapper], key_set: Set[ExampleWrapper]):
return [ex for ex in examples if ex.key in key_set]
if __name__ == '__main__':
main_cross_validation()
| 53.664047 | 157 | 0.646385 | import time
from math import sqrt
from statistics import mean, variance
from typing import Set, List, Tuple
from problog.engine import DefaultEngine, GenericEngine
from problog.logic import Constant
from mai_version.classification.classification_helper import get_keys_classifier, do_labeled_examples_get_correctly_classified
from mai_version.classification.classification_statistics_handler import ClassificationStatisticsHandler
from mai_version.classification.confidence_intervals import mean_confidence_interval
from mai_version.fold.fold_helper import write_out_tree, write_out_program, FoldData
from mai_version.representation.example import ExampleWrapper, ClauseDBExampleWrapper
from mai_version.representation.example_collection import ExampleCollection
from mai_version.run.program_phase import build_tree, convert_tree_to_program, prune_tree
def split_examples_into_training_and_test_sets(
all_key_sets: List[Set[Constant]], test_key_set: Set[Constant],
examples_collection_usable_for_training: ExampleCollection,
examples_usable_for_testing: List[ClauseDBExampleWrapper]
) -> Tuple[ExampleCollection, List[ExampleWrapper]]:
training_key_sets_list = [s for s in all_key_sets if s is not test_key_set]
training_key_set = set.union(*training_key_sets_list)
training_example_collection = examples_collection_usable_for_training.filter_examples(
training_key_set)
test_examples = [ex_wp for ex_wp in examples_usable_for_testing if
ex_wp.key in test_key_set]
return training_example_collection, test_examples
def do_one_fold(fold_index: int, test_key_set: Set[Constant], fd: FoldData
):
print('\n===========================')
print('=== start FOLD ' + str(fold_index + 1) + ' of ' + str(fd.nb_folds))
print('===========================')
training_example_collection, test_examples = split_examples_into_training_and_test_sets(
fd.all_key_sets, test_key_set, fd.examples_collection_usable_for_training, fd.examples_usable_for_testing)
print('\ttotal nb of labeled examples: ' + str(fd.total_nb_of_labeled_examples))
nb_of_training_ex = len(training_example_collection.example_wrappers_sp)
nb_of_test_ex = len(test_examples)
print('\tnb of TRAINING ex: ' + str(nb_of_training_ex))
print('\tnb of TEST ex: ' + str(nb_of_test_ex))
start_time = time.time()
print('\t=== start building tree for fold ' + str(fold_index + 1))
tree = build_tree(fd.internal_ex_format, fd.treebuilder_type, fd.parsed_settings.language,
fd.possible_labels, training_example_collection, prediction_goal=fd.prediction_goal,
full_background_knowledge_sp=fd.full_background_knowledge_sp,
debug_printing_tree_building=fd.debug_printing_tree_building, engine=fd.engine)
tree = prune_tree(tree, debug_printing_tree_pruning=fd.debug_printing_tree_pruning)
nb_of_nodes = tree.get_nb_of_nodes()
nb_inner_nodes = tree.get_nb_of_inner_nodes()
fd.total_nb_of_nodes_per_fold.append(nb_of_nodes)
fd.nb_of_inner_node_per_fold.append(nb_inner_nodes)
tree_fname = fd.dir_output_files + fd.fname_prefix_fold + '_fold' + str(fold_index) + ".tree"
write_out_tree(tree_fname, tree)
print('\t=== end building tree for fold ' + str(fold_index + 1))
print('\t=== start converting tree to program for fold ' + str(fold_index + 1))
program = convert_tree_to_program(fd.kb_format, fd.treebuilder_type, tree, fd.parsed_settings.language,
debug_printing=fd.debug_printing_program_conversion,
prediction_goal=fd.prediction_goal,
index_of_label_var=fd.index_of_label_var)
program_fname = fd.dir_output_files + fd.fname_prefix_fold + '_fold' + str(fold_index) + ".program"
write_out_program(program_fname, program)
print('\t=== end converting tree to program for fold ' + str(fold_index + 1))
print('\t=== start classifying test set' + str(fold_index + 1))
classifier = get_keys_classifier(fd.internal_ex_format, program, fd.prediction_goal,
fd.index_of_label_var, fd.stripped_background_knowledge,
debug_printing=fd.debug_printing_get_classifier, engine=fd.engine)
statistics_handler = do_labeled_examples_get_correctly_classified(
classifier, test_examples, fd.possible_labels,
fd.debug_printing_classification)
end_time = time.time()
= end_time - start_time
fd.execution_time_per_fold.append(elapsed_time)
accuracy, _ = statistics_handler.get_accuracy()
fd.accuracies_folds.append(accuracy)
statistics_fname = fd.dir_output_files + fd.fname_prefix_fold + '_fold' + str(fold_index) + ".statistics"
statistics_handler.write_out_statistics_to_file(statistics_fname)
with open(statistics_fname, 'a') as f:
f.write('\n\nnb of TRAINING ex: ' + str(nb_of_training_ex) + "\n")
f.write('nb of TEST ex: ' + str(nb_of_test_ex) + "\n\n")
f.write("total nb of nodes: " + str(nb_of_nodes) + "\n")
f.write("nb of internal nodes: " + str(nb_inner_nodes) + "\n\n")
f.write("execution time of fold: " + str(elapsed_time) + " seconds\n")
print("total nb of nodes: " + str(nb_of_nodes))
print("nb of internal nodes: " + str(nb_inner_nodes))
print("execution time of fold: ", elapsed_time, "seconds")
print('\t=== end classifying test set' + str(fold_index + 1))
print('\t=== end FOLD ' + str(fold_index + 1) + ' of ' + str(fd.nb_folds) + '\n')
def do_all_examples(fd: FoldData):
print('\n=======================================')
print('=== FINALLY, learn tree on all examples')
print('========================================')
print('\ttotal nb of labeled examples: ' + str(fd.total_nb_of_labeled_examples))
print('\t=== start building tree for ALL examples')
start_time = time.time()
tree = build_tree(fd.internal_ex_format, fd.treebuilder_type, fd.parsed_settings.language,
fd.possible_labels, fd.examples_collection_usable_for_training,
prediction_goal=fd.prediction_goal,
full_background_knowledge_sp=fd.full_background_knowledge_sp,
debug_printing_tree_building=fd.debug_printing_tree_building, engine=fd.engine)
tree = prune_tree(tree, debug_printing_tree_pruning=fd.debug_printing_tree_pruning)
nb_of_nodes = tree.get_nb_of_nodes()
nb_inner_nodes = tree.get_nb_of_inner_nodes()
fd.total_nb_of_nodes_per_fold.append(nb_of_nodes)
fd.nb_of_inner_node_per_fold.append(nb_inner_nodes)
tree_fname = fd.dir_output_files + fd.fname_prefix_fold + ".tree"
write_out_tree(tree_fname, tree)
print('=== end building tree for ALL examples')
print('=== start converting tree to program for ALL examples')
program = convert_tree_to_program(fd.kb_format, fd.treebuilder_type, tree, fd.parsed_settings.language,
debug_printing=fd.debug_printing_program_conversion,
prediction_goal=fd.prediction_goal,
index_of_label_var=fd.index_of_label_var)
program_fname = fd.dir_output_files + fd.fname_prefix_fold + ".program"
write_out_program(program_fname, program)
print('=== end converting tree to program for ALL examples')
all_examples = fd.examples_collection_usable_for_training.get_labeled_examples()
print('\t=== start classifying total set')
classifier = get_keys_classifier(fd.internal_ex_format, program, fd.prediction_goal,
fd.index_of_label_var, fd.stripped_background_knowledge,
debug_printing=fd.debug_printing_get_classifier, engine=fd.engine)
statistics_handler = do_labeled_examples_get_correctly_classified(classifier, all_examples, fd.possible_labels,
fd.debug_printing_classification)
end_time = time.time()
= end_time - start_time
accuracy, _ = statistics_handler.get_accuracy()
statistics_fname = fd.dir_output_files + fd.fname_prefix_fold + ".statistics"
statistics_handler.write_out_statistics_to_file(statistics_fname)
mean_accuracy_of_folds = mean(fd.accuracies_folds)
var_accuracy_of_folds = variance(fd.accuracies_folds, mean_accuracy_of_folds)
std_accuracy_of_folds = sqrt(var_accuracy_of_folds)
confidence = 0.9
mean_acc, conf_left, conf_right, diff_from_mean = mean_confidence_interval(fd.accuracies_folds, confidence)
mean_total_nb_of_nodes = mean(fd.total_nb_of_nodes_per_fold)
var_total_nb_of_nodes = variance(fd.total_nb_of_nodes_per_fold, mean_total_nb_of_nodes)
std_total_nb_of_nodes = sqrt(var_total_nb_of_nodes)
mean_nb_of_inner_nodes = mean(fd.nb_of_inner_node_per_fold)
var_nb_of_inner_nodes = variance(fd.nb_of_inner_node_per_fold, mean_nb_of_inner_nodes)
std_nb_of_inner_nodes = sqrt(var_nb_of_inner_nodes)
total_execution_time_of_cross_validation = sum(fd.execution_time_per_fold)
with open(statistics_fname, 'a') as f:
f.write("\n\ntotal nb of examples (labeled + unlabeled): " + str(fd.total_nb_of_examples) + "\n")
f.write("total nb of LABELED examples: " + str(fd.total_nb_of_labeled_examples) + "\n\n")
f.write("list of accuracies per fold:\n")
f.write("\t" + str(fd.accuracies_folds) + "\n")
f.write("mean accuracy: " + str(mean_accuracy_of_folds) + "\n")
f.write("var accuracy: " + str(var_accuracy_of_folds) + "\n")
f.write("std accuracy: " + str(std_accuracy_of_folds) + "\n")
f.write("accuracy of total tree: " + str(statistics_handler.get_accuracy()[0]) + "\n\n")
f.write("accuracy " + str(confidence * 100) + "% confidence interval: ["
+ str(conf_left) + "," + str(conf_right) + "]\n")
f.write("\taccuracy " + str(confidence * 100) + "% confidence interval around mean: "
+ str(mean_acc) + " +- " + str(diff_from_mean) + "\n\n")
f.write("total nb of nodes in total tree: " + str(nb_of_nodes) + "\n")
f.write("nb of internal nodes in total tree: " + str(nb_inner_nodes) + "\n\n")
f.write("list of total nb of nodes per fold:\n")
f.write("\t" + str(fd.total_nb_of_nodes_per_fold) + "\n")
f.write("mean total nb of nodes: " + str(mean_total_nb_of_nodes) + "\n")
f.write("var total nb of nodes: " + str(var_total_nb_of_nodes) + "\n")
f.write("std total nb of nodes: " + str(std_total_nb_of_nodes) + "\n\n")
f.write("list of nb of internal nodes per fold:\n")
f.write("\t" + str(fd.nb_of_inner_node_per_fold) + "\n")
f.write("mean nb of internal nodes: " + str(mean_nb_of_inner_nodes) + "\n")
f.write("var nb of internal nodes: " + str(var_nb_of_inner_nodes) + "\n")
f.write("std nb of internal nodes: " + str(std_nb_of_inner_nodes) + "\n\n")
f.write("execution times of folds:\n")
f.write("\t" + str(fd.execution_time_per_fold) + "\n")
f.write("total time cross (sum folds): " + str(total_execution_time_of_cross_validation) + " seconds\n")
f.write("time total tree building + verifying: " + str(elapsed_time) + " seconds\n")
print("total nb of nodes in total tree: " + str(nb_of_nodes))
print("nb of internal nodes in total tree: " + str(nb_inner_nodes))
print()
print("list of accuracies per fold:")
print("\t" + str(fd.accuracies_folds))
print("mean accuracy: " + str(mean_accuracy_of_folds))
print("var accuracy: " + str(var_accuracy_of_folds))
print("std accuracy " + str(std_accuracy_of_folds))
print("accuracy of total tree: " + str(statistics_handler.get_accuracy()))
print()
print("accuracy " + str(confidence * 100) + "% confidence interval: ["
+ str(conf_left) + "," + str(conf_right) + "]")
print("\taccuracy " + str(confidence * 100) + "% confidence interval around mean: "
+ str(mean_acc) + " +- " + str(diff_from_mean))
print()
print("total nb of nodes in total tree: " + str(nb_of_nodes))
print("nb of internal nodes in total tree: " + str(nb_inner_nodes))
print()
print("list of total nb of nodes per fold:")
print("\t" + str(fd.total_nb_of_nodes_per_fold))
print("mean total nb of nodes: " + str(mean_total_nb_of_nodes))
print("var total nb of nodes: " + str(var_total_nb_of_nodes))
print("std total nb of nodes: " + str(std_total_nb_of_nodes))
print()
print("list of nb of internal nodes per fold:")
print("\t" + str(fd.nb_of_inner_node_per_fold))
print("mean nb of internal nodes: " + str(mean_nb_of_inner_nodes))
print("var nb of internal nodes: " + str(var_nb_of_inner_nodes))
print("std nb of internal nodes: " + str(std_nb_of_inner_nodes))
print()
print("execution times of folds:")
print("\t" + str(fd.execution_time_per_fold))
print("total time cross (sum folds):", total_execution_time_of_cross_validation, "seconds")
print("time total tree building + verifying:", elapsed_time, "seconds")
print('\t=== end classifying total set')
def main_cross_validation(fname_examples: str, fname_settings: str, fname_background: str,
dir_fold_files: str, fname_prefix_fold: str, fold_start_index: int, nb_folds: int,
fold_suffix: str, dir_output_files: str,
filter_out_unlabeled_examples=False,
debug_printing_example_parsing=False,
debug_printing_tree_building=False,
debug_printing_tree_pruning=False,
debug_printing_program_conversion=False,
debug_printing_get_classifier=False,
debug_printing_classification=False):
engine = DefaultEngine()
engine.unknown = 1
fd = FoldData.build_fold_data(fname_examples, fname_settings, fname_background,
dir_fold_files, fname_prefix_fold, fold_start_index, nb_folds, fold_suffix,
dir_output_files,
filter_out_unlabeled_examples,
debug_printing_example_parsing,
debug_printing_tree_building,
debug_printing_tree_pruning,
debug_printing_program_conversion,
debug_printing_get_classifier,
debug_printing_classification,
engine=engine
)
for fold_index, test_key_set in enumerate(fd.all_key_sets):
do_one_fold(fold_index, test_key_set, fd)
do_all_examples(fd)
| true | true |
1c2f49a98f4323879a647210d79f92f18591350c | 630 | py | Python | project_name/accounts/views.py | danrobinson/project_template | 192cf2edc811943564dfe7883bd25a8a77950cbc | [
"MIT"
] | 1 | 2017-06-28T06:43:37.000Z | 2017-06-28T06:43:37.000Z | project_name/accounts/views.py | danrobinson/project_template | 192cf2edc811943564dfe7883bd25a8a77950cbc | [
"MIT"
] | null | null | null | project_name/accounts/views.py | danrobinson/project_template | 192cf2edc811943564dfe7883bd25a8a77950cbc | [
"MIT"
] | null | null | null | # Create your views here.
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView, CreateView, UpdateView, DeleteView
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from emailusernames.forms import EmailUserCreationForm
class RegisterView(FormView):
template_name = "registration/register.html"
form_class = EmailUserCreationForm
success_url = '/dashboard'
def form_valid(self, form):
form.save()
return super(RegisterView, self).form_valid(form)
| 35 | 82 | 0.790476 |
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView, CreateView, UpdateView, DeleteView
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from emailusernames.forms import EmailUserCreationForm
class RegisterView(FormView):
template_name = "registration/register.html"
form_class = EmailUserCreationForm
success_url = '/dashboard'
def form_valid(self, form):
form.save()
return super(RegisterView, self).form_valid(form)
| true | true |
1c2f49e4ce83fd985358a0b8c4471afa0792dc59 | 2,120 | py | Python | graphbrain/utils/lemmas.py | vishalbelsare/graphbrain | efad1f96dcb616595b13bd4e3ada806862fe7de7 | [
"MIT"
] | 412 | 2016-02-20T12:04:34.000Z | 2022-03-30T21:07:39.000Z | graphbrain/utils/lemmas.py | vishalbelsare/graphbrain | efad1f96dcb616595b13bd4e3ada806862fe7de7 | [
"MIT"
] | 20 | 2018-09-21T13:47:59.000Z | 2021-04-10T04:21:30.000Z | graphbrain/utils/lemmas.py | vishalbelsare/graphbrain | efad1f96dcb616595b13bd4e3ada806862fe7de7 | [
"MIT"
] | 51 | 2017-01-25T17:19:30.000Z | 2022-03-29T07:32:53.000Z | import graphbrain.constants as const
def lemma(hg, atom, same_if_none=False):
"""Returns the lemma of the given atom if it exists, None otherwise.
Keyword argument:
same_if_none -- if False, returns None when lemma does not exist. If True,
returns atom items when lemma does not exist. (default: False)
"""
if atom.is_atom():
satom = atom.simplify()
for lemma_edge in hg.search((const.lemma_pred, satom, '*')):
return lemma_edge[2]
if same_if_none:
return atom
else:
return None
def deep_lemma(hg, edge, same_if_none=False):
"""Returns the lemma of an atomic edge, or the lemma of the first atom
found by recursively descending the hyperedge, always choosing the
subedge immediatly after the connector.
This is useful, for example, to find the lemma of the central verb
in a non-atomic predicate edge. For example:
(not/A (is/A going/P))
could return
go/P
Keyword argument:
same_if_none -- if False, returns None when lemma does not exist. If True,
returns atom items when lemma does not exist. (default: False)
"""
if edge.is_atom():
return lemma(hg, edge,same_if_none)
else:
return deep_lemma(hg, edge[1])
def lemma_degrees(hg, edge):
"""Finds all the atoms that share the same given lemma
and computes the sum of both their degrees and deep degrees.
These two sums are returned.
If the parameter edge is non-atomic, this function simply returns
the degree and deep degree of that edge.
"""
if edge.is_atom():
roots = {edge.root()}
# find lemma
satom = edge.simplify()
for edge in hg.search((const.lemma_pred, satom, '*')):
roots.add(edge[2].root())
# compute degrees
d = 0
dd = 0
for r in roots:
atoms = set(hg.atoms_with_root(r))
d += sum([hg.degree(atom) for atom in atoms])
dd += sum([hg.deep_degree(atom) for atom in atoms])
return d, dd
else:
return hg.degree(edge), hg.deep_degree(edge)
| 29.444444 | 78 | 0.634906 | import graphbrain.constants as const
def lemma(hg, atom, same_if_none=False):
if atom.is_atom():
satom = atom.simplify()
for lemma_edge in hg.search((const.lemma_pred, satom, '*')):
return lemma_edge[2]
if same_if_none:
return atom
else:
return None
def deep_lemma(hg, edge, same_if_none=False):
if edge.is_atom():
return lemma(hg, edge,same_if_none)
else:
return deep_lemma(hg, edge[1])
def lemma_degrees(hg, edge):
if edge.is_atom():
roots = {edge.root()}
satom = edge.simplify()
for edge in hg.search((const.lemma_pred, satom, '*')):
roots.add(edge[2].root())
d = 0
dd = 0
for r in roots:
atoms = set(hg.atoms_with_root(r))
d += sum([hg.degree(atom) for atom in atoms])
dd += sum([hg.deep_degree(atom) for atom in atoms])
return d, dd
else:
return hg.degree(edge), hg.deep_degree(edge)
| true | true |
1c2f4a8d8e8d18087601f036286b95767fbbc42c | 6,623 | py | Python | var/spack/repos/builtin/packages/py-scipy/package.py | FrankD412/spack | b70bf073b5f647dd5a7f1917ed55dfd39d1a2a0c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-07-30T02:03:30.000Z | 2021-07-30T02:03:30.000Z | var/spack/repos/builtin/packages/py-scipy/package.py | FrankD412/spack | b70bf073b5f647dd5a7f1917ed55dfd39d1a2a0c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13 | 2021-05-12T06:10:02.000Z | 2022-03-25T21:00:29.000Z | var/spack/repos/builtin/packages/py-scipy/package.py | Kerilk/spack | e027942b55407a4a5fe323b93d8e57200c873a43 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
class PyScipy(PythonPackage):
"""SciPy (pronounced "Sigh Pie") is a Scientific Library for Python.
It provides many user-friendly and efficient numerical routines such
as routines for numerical integration and optimization."""
homepage = "https://www.scipy.org/"
pypi = "scipy/scipy-1.5.4.tar.gz"
git = "https://github.com/scipy/scipy.git"
maintainers = ['adamjstewart']
version('master', branch='master')
version('1.6.3', sha256='a75b014d3294fce26852a9d04ea27b5671d86736beb34acdfc05859246260707')
version('1.6.2', sha256='e9da33e21c9bc1b92c20b5328adb13e5f193b924c9b969cd700c8908f315aa59')
version('1.6.1', sha256='c4fceb864890b6168e79b0e714c585dbe2fd4222768ee90bc1aa0f8218691b11')
version('1.6.0', sha256='cb6dc9f82dfd95f6b9032a8d7ea70efeeb15d5b5fd6ed4e8537bb3c673580566')
version('1.5.4', sha256='4a453d5e5689de62e5d38edf40af3f17560bfd63c9c5bd228c18c1f99afa155b')
version('1.5.3', sha256='ddae76784574cc4c172f3d5edd7308be16078dd3b977e8746860c76c195fa707')
version('1.5.2', sha256='066c513d90eb3fd7567a9e150828d39111ebd88d3e924cdfc9f8ce19ab6f90c9')
version('1.5.1', sha256='039572f0ca9578a466683558c5bf1e65d442860ec6e13307d528749cfe6d07b8')
version('1.5.0', sha256='4ff72877d19b295ee7f7727615ea8238f2d59159df0bdd98f91754be4a2767f0')
version('1.4.1', sha256='dee1bbf3a6c8f73b6b218cb28eed8dd13347ea2f87d572ce19b289d6fd3fbc59')
version('1.4.0', sha256='31f7cfa93b01507c935c12b535e24812594002a02a56803d7cd063e9920d25e8')
version('1.3.3', sha256='64bf4e8ae0db2d42b58477817f648d81e77f0b381d0ea4427385bba3f959380a')
version('1.3.2', sha256='a03939b431994289f39373c57bbe452974a7da724ae7f9620a1beee575434da4')
version('1.3.1', sha256='2643cfb46d97b7797d1dbdb6f3c23fe3402904e3c90e6facfe6a9b98d808c1b5')
version('1.3.0', sha256='c3bb4bd2aca82fb498247deeac12265921fe231502a6bc6edea3ee7fe6c40a7a')
version('1.2.3', sha256='ecbe6413ca90b8e19f8475bfa303ac001e81b04ec600d17fa7f816271f7cca57')
version('1.2.2', sha256='a4331e0b8dab1ff75d2c67b5158a8bb9a83c799d7140094dda936d876c7cfbb1')
version('1.2.1', sha256='e085d1babcb419bbe58e2e805ac61924dac4ca45a07c9fa081144739e500aa3c')
version('1.1.0', sha256='878352408424dffaa695ffedf2f9f92844e116686923ed9aa8626fc30d32cfd1')
version('1.0.0', sha256='87ea1f11a0e9ec08c264dc64551d501fa307289460705f6fccd84cbfc7926d10')
version('0.19.1', sha256='a19a2ca7a7336495ec180adeaa0dfdcf41e96dbbee90d51c3ed828ba570884e6')
version('0.18.1', sha256='8ab6e9c808bf2fb3e8576cd8cf07226d9cdc18b012c06d9708429a821ac6634e')
version('0.17.0', sha256='f600b755fb69437d0f70361f9e560ab4d304b1b66987ed5a28bdd9dd7793e089')
version('0.15.1', sha256='a212cbc3b79e9a563aa45fc5c517b3499198bd7eb7e7be1e047568a5f48c259a')
version('0.15.0', sha256='0c74e31e08acc8bf9b6ceb9bced73df2ae0cc76003e0366350bc7b26292bf8b1')
depends_on('python@2.6:2.8,3.2:', when='@:0.17.999', type=('build', 'link', 'run'))
depends_on('python@2.7:2.8,3.4:', when='@0.18:1.2.999', type=('build', 'link', 'run'))
depends_on('python@3.5:', when='@1.3:1.4.999', type=('build', 'link', 'run'))
depends_on('python@3.6:', when='@1.5:1.5.999', type=('build', 'link', 'run'))
depends_on('python@3.7:', when='@1.6:1.6.1', type=('build', 'link', 'run'))
depends_on('python@3.7:3.9.999', when='@1.6.2:', type=('build', 'link', 'run'))
depends_on('py-setuptools', when='@:1.6.1', type='build')
depends_on('py-setuptools@:51.0.0', when='@1.6.2:', type='build')
depends_on('py-pybind11@2.2.4:', when='@1.4.0', type=('build', 'link'))
depends_on('py-pybind11@2.4.0:', when='@1.4.1:1.4.999', type=('build', 'link'))
depends_on('py-pybind11@2.4.3:', when='@1.5:1.6.1', type=('build', 'link'))
depends_on('py-pybind11@2.4.3:2.6.999', when='@1.6.2:', type=('build', 'link'))
depends_on('py-numpy@1.5.1:+blas+lapack', when='@:0.15.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.6.2:+blas+lapack', when='@0.16:0.17.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.7.1:+blas+lapack', when='@0.18:0.18.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.8.2:+blas+lapack', when='@0.19:1.2.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.13.3:+blas+lapack', when='@1.3:1.4.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.14.5:+blas+lapack', when='@1.5:1.5.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.16.5:+blas+lapack', when='@1.6:1.6.1', type=('build', 'link', 'run'))
depends_on('py-numpy@1.16.5:1.22.999+blas+lapack', when='@1.6.2:', type=('build', 'link', 'run'))
depends_on('py-pytest', type='test')
# NOTE: scipy picks up Blas/Lapack from numpy, see
# http://www.scipy.org/scipylib/building/linux.html#step-4-build-numpy-1-5-0
depends_on('blas')
depends_on('lapack')
# https://github.com/scipy/scipy/issues/12860
patch('https://git.sagemath.org/sage.git/plain/build/pkgs/scipy/patches/extern_decls.patch?id=711fe05025795e44b84233e065d240859ccae5bd',
sha256='5433f60831cb554101520a8f8871ac5a32c95f7a971ccd68b69049535b106780', when='@1.2:1.5.3')
def setup_build_environment(self, env):
# https://github.com/scipy/scipy/issues/9080
env.set('F90', spack_fc)
# https://github.com/scipy/scipy/issues/11611
if self.spec.satisfies('@:1.4 %gcc@10:'):
env.set('FFLAGS', '-fallow-argument-mismatch')
# Kluge to get the gfortran linker to work correctly on Big
# Sur, at least until a gcc release > 10.2 is out with a fix.
# (There is a fix in their development tree.)
if platform.mac_ver()[0][0:2] == '11':
env.set('MACOSX_DEPLOYMENT_TARGET', '10.15')
def build_args(self, spec, prefix):
args = []
if spec.satisfies('%fj'):
args.extend(['config_fc', '--fcompiler=fujitsu'])
# Build in parallel
# Known problems with Python 3.5+
# https://github.com/spack/spack/issues/7927
# https://github.com/scipy/scipy/issues/7112
if not spec.satisfies('^python@3.5:'):
args.extend(['-j', str(make_jobs)])
return args
@run_after('install')
@on_package_attributes(run_tests=True)
def install_test(self):
with working_dir('spack-test', create=True):
python('-c', 'import scipy; scipy.test("full", verbose=2)')
| 59.666667 | 140 | 0.696965 |
import platform
class PyScipy(PythonPackage):
homepage = "https://www.scipy.org/"
pypi = "scipy/scipy-1.5.4.tar.gz"
git = "https://github.com/scipy/scipy.git"
maintainers = ['adamjstewart']
version('master', branch='master')
version('1.6.3', sha256='a75b014d3294fce26852a9d04ea27b5671d86736beb34acdfc05859246260707')
version('1.6.2', sha256='e9da33e21c9bc1b92c20b5328adb13e5f193b924c9b969cd700c8908f315aa59')
version('1.6.1', sha256='c4fceb864890b6168e79b0e714c585dbe2fd4222768ee90bc1aa0f8218691b11')
version('1.6.0', sha256='cb6dc9f82dfd95f6b9032a8d7ea70efeeb15d5b5fd6ed4e8537bb3c673580566')
version('1.5.4', sha256='4a453d5e5689de62e5d38edf40af3f17560bfd63c9c5bd228c18c1f99afa155b')
version('1.5.3', sha256='ddae76784574cc4c172f3d5edd7308be16078dd3b977e8746860c76c195fa707')
version('1.5.2', sha256='066c513d90eb3fd7567a9e150828d39111ebd88d3e924cdfc9f8ce19ab6f90c9')
version('1.5.1', sha256='039572f0ca9578a466683558c5bf1e65d442860ec6e13307d528749cfe6d07b8')
version('1.5.0', sha256='4ff72877d19b295ee7f7727615ea8238f2d59159df0bdd98f91754be4a2767f0')
version('1.4.1', sha256='dee1bbf3a6c8f73b6b218cb28eed8dd13347ea2f87d572ce19b289d6fd3fbc59')
version('1.4.0', sha256='31f7cfa93b01507c935c12b535e24812594002a02a56803d7cd063e9920d25e8')
version('1.3.3', sha256='64bf4e8ae0db2d42b58477817f648d81e77f0b381d0ea4427385bba3f959380a')
version('1.3.2', sha256='a03939b431994289f39373c57bbe452974a7da724ae7f9620a1beee575434da4')
version('1.3.1', sha256='2643cfb46d97b7797d1dbdb6f3c23fe3402904e3c90e6facfe6a9b98d808c1b5')
version('1.3.0', sha256='c3bb4bd2aca82fb498247deeac12265921fe231502a6bc6edea3ee7fe6c40a7a')
version('1.2.3', sha256='ecbe6413ca90b8e19f8475bfa303ac001e81b04ec600d17fa7f816271f7cca57')
version('1.2.2', sha256='a4331e0b8dab1ff75d2c67b5158a8bb9a83c799d7140094dda936d876c7cfbb1')
version('1.2.1', sha256='e085d1babcb419bbe58e2e805ac61924dac4ca45a07c9fa081144739e500aa3c')
version('1.1.0', sha256='878352408424dffaa695ffedf2f9f92844e116686923ed9aa8626fc30d32cfd1')
version('1.0.0', sha256='87ea1f11a0e9ec08c264dc64551d501fa307289460705f6fccd84cbfc7926d10')
version('0.19.1', sha256='a19a2ca7a7336495ec180adeaa0dfdcf41e96dbbee90d51c3ed828ba570884e6')
version('0.18.1', sha256='8ab6e9c808bf2fb3e8576cd8cf07226d9cdc18b012c06d9708429a821ac6634e')
version('0.17.0', sha256='f600b755fb69437d0f70361f9e560ab4d304b1b66987ed5a28bdd9dd7793e089')
version('0.15.1', sha256='a212cbc3b79e9a563aa45fc5c517b3499198bd7eb7e7be1e047568a5f48c259a')
version('0.15.0', sha256='0c74e31e08acc8bf9b6ceb9bced73df2ae0cc76003e0366350bc7b26292bf8b1')
depends_on('python@2.6:2.8,3.2:', when='@:0.17.999', type=('build', 'link', 'run'))
depends_on('python@2.7:2.8,3.4:', when='@0.18:1.2.999', type=('build', 'link', 'run'))
depends_on('python@3.5:', when='@1.3:1.4.999', type=('build', 'link', 'run'))
depends_on('python@3.6:', when='@1.5:1.5.999', type=('build', 'link', 'run'))
depends_on('python@3.7:', when='@1.6:1.6.1', type=('build', 'link', 'run'))
depends_on('python@3.7:3.9.999', when='@1.6.2:', type=('build', 'link', 'run'))
depends_on('py-setuptools', when='@:1.6.1', type='build')
depends_on('py-setuptools@:51.0.0', when='@1.6.2:', type='build')
depends_on('py-pybind11@2.2.4:', when='@1.4.0', type=('build', 'link'))
depends_on('py-pybind11@2.4.0:', when='@1.4.1:1.4.999', type=('build', 'link'))
depends_on('py-pybind11@2.4.3:', when='@1.5:1.6.1', type=('build', 'link'))
depends_on('py-pybind11@2.4.3:2.6.999', when='@1.6.2:', type=('build', 'link'))
depends_on('py-numpy@1.5.1:+blas+lapack', when='@:0.15.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.6.2:+blas+lapack', when='@0.16:0.17.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.7.1:+blas+lapack', when='@0.18:0.18.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.8.2:+blas+lapack', when='@0.19:1.2.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.13.3:+blas+lapack', when='@1.3:1.4.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.14.5:+blas+lapack', when='@1.5:1.5.999', type=('build', 'link', 'run'))
depends_on('py-numpy@1.16.5:+blas+lapack', when='@1.6:1.6.1', type=('build', 'link', 'run'))
depends_on('py-numpy@1.16.5:1.22.999+blas+lapack', when='@1.6.2:', type=('build', 'link', 'run'))
depends_on('py-pytest', type='test')
depends_on('lapack')
patch('https://git.sagemath.org/sage.git/plain/build/pkgs/scipy/patches/extern_decls.patch?id=711fe05025795e44b84233e065d240859ccae5bd',
sha256='5433f60831cb554101520a8f8871ac5a32c95f7a971ccd68b69049535b106780', when='@1.2:1.5.3')
def setup_build_environment(self, env):
env.set('F90', spack_fc)
if self.spec.satisfies('@:1.4 %gcc@10:'):
env.set('FFLAGS', '-fallow-argument-mismatch')
if platform.mac_ver()[0][0:2] == '11':
env.set('MACOSX_DEPLOYMENT_TARGET', '10.15')
def build_args(self, spec, prefix):
args = []
if spec.satisfies('%fj'):
args.extend(['config_fc', '--fcompiler=fujitsu'])
if not spec.satisfies('^python@3.5:'):
args.extend(['-j', str(make_jobs)])
return args
@run_after('install')
@on_package_attributes(run_tests=True)
def install_test(self):
with working_dir('spack-test', create=True):
python('-c', 'import scipy; scipy.test("full", verbose=2)')
| true | true |
1c2f4ab76cdcfe10466615eb6c2953691babf2b2 | 377 | py | Python | padinfo/view/closable_embed.py | bitwalk/pad-cogs | 40e4911841d165caf615c7459eb7b0a20aa4cbe1 | [
"MIT"
] | 3 | 2021-04-16T23:47:59.000Z | 2021-09-10T06:00:18.000Z | padinfo/view/closable_embed.py | bitwalk/pad-cogs | 40e4911841d165caf615c7459eb7b0a20aa4cbe1 | [
"MIT"
] | 708 | 2020-10-31T08:02:40.000Z | 2022-03-31T09:39:25.000Z | padinfo/view/closable_embed.py | bitwalk/pad-cogs | 40e4911841d165caf615c7459eb7b0a20aa4cbe1 | [
"MIT"
] | 20 | 2020-11-01T23:11:29.000Z | 2022-02-07T07:04:15.000Z | from padinfo.view.components.view_state_base import ViewStateBase
class ClosableEmbedViewState(ViewStateBase):
def __init__(self, original_author_id, menu_type, raw_query,
color, view_type, props):
super().__init__(original_author_id, menu_type, raw_query)
self.color = color
self.view_type = view_type
self.props = props
| 34.272727 | 66 | 0.710875 | from padinfo.view.components.view_state_base import ViewStateBase
class ClosableEmbedViewState(ViewStateBase):
def __init__(self, original_author_id, menu_type, raw_query,
color, view_type, props):
super().__init__(original_author_id, menu_type, raw_query)
self.color = color
self.view_type = view_type
self.props = props
| true | true |
1c2f4b64da660b7d848a1aa1f2f021cd7fb920cf | 13,758 | py | Python | core/controllers/library.py | aasiffaizal/oppia | 1a8634a435bec10f407e9f3c95f62bd467c5b5f7 | [
"Apache-2.0"
] | 1 | 2021-02-23T04:23:56.000Z | 2021-02-23T04:23:56.000Z | core/controllers/library.py | arpit1912/oppia | 00303a0830e775f8491ec57ac625ed44eafd73a4 | [
"Apache-2.0"
] | null | null | null | core/controllers/library.py | arpit1912/oppia | 00303a0830e775f8491ec57ac625ed44eafd73a4 | [
"Apache-2.0"
] | 1 | 2020-12-09T21:33:49.000Z | 2020-12-09T21:33:49.000Z | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the library page."""
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import string
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import collection_services
from core.domain import exp_services
from core.domain import summary_services
from core.domain import user_services
import feconf
import python_utils
import utils
def get_matching_activity_dicts(
query_string, categories, language_codes, search_offset):
"""Given the details of a query and a search offset, returns a list of
activity dicts that satisfy the query.
Args:
query_string: str. The search query string (this is what the user
enters).
categories: list(str). The list of categories to query for. If it is
empty, no category filter is applied to the results. If it is not
empty, then a result is considered valid if it matches at least one
of these categories.
language_codes: list(str). The list of language codes to query for. If
it is empty, no language code filter is applied to the results. If
it is not empty, then a result is considered valid if it matches at
least one of these language codes.
search_offset: str or None. Offset indicating where, in the list of
exploration search results, to start the search from. If None,
collection search results are returned first before the
explorations.
Returns:
tuple. A tuple consisting of two elements:
- list(dict). Each element in this list is a collection or
exploration summary dict, representing a search result.
- str. The exploration index offset from which to start the
next search.
"""
# We only populate collections in the initial load, since the current
# frontend search infrastructure is set up to only deal with one search
# offset at a time.
# TODO(sll): Remove this special casing.
collection_ids = []
if not search_offset:
collection_ids, _ = (
collection_services.get_collection_ids_matching_query(
query_string, categories, language_codes))
exp_ids, new_search_offset = (
exp_services.get_exploration_ids_matching_query(
query_string, categories, language_codes, offset=search_offset))
activity_list = (
summary_services.get_displayable_collection_summary_dicts_matching_ids(
collection_ids) +
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids))
if len(activity_list) == feconf.DEFAULT_QUERY_LIMIT:
logging.exception(
'%s activities were fetched to load the library page. '
'You may be running up against the default query limits.'
% feconf.DEFAULT_QUERY_LIMIT)
return activity_list, new_search_offset
class OldLibraryRedirectPage(base.BaseHandler):
"""Redirects the old library URL to the new one."""
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
self.redirect(feconf.LIBRARY_INDEX_URL, permanent=True)
class LibraryPage(base.BaseHandler):
"""The main library page. Used for both the default list of categories and
for search results.
"""
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
self.render_template('library-page.mainpage.html')
class LibraryIndexHandler(base.BaseHandler):
"""Provides data for the default library index page."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
# TODO(sll): Support index pages for other language codes.
summary_dicts_by_category = summary_services.get_library_groups([
constants.DEFAULT_LANGUAGE_CODE])
top_rated_activity_summary_dicts = (
summary_services.get_top_rated_exploration_summary_dicts(
[constants.DEFAULT_LANGUAGE_CODE],
feconf.NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE))
featured_activity_summary_dicts = (
summary_services.get_featured_activity_summary_dicts(
[constants.DEFAULT_LANGUAGE_CODE]))
preferred_language_codes = [constants.DEFAULT_LANGUAGE_CODE]
if self.user_id:
user_settings = user_services.get_user_settings(self.user_id)
preferred_language_codes = user_settings.preferred_language_codes
if top_rated_activity_summary_dicts:
summary_dicts_by_category.insert(
0, {
'activity_summary_dicts': top_rated_activity_summary_dicts,
'categories': [],
'header_i18n_id': (
feconf.LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS),
'has_full_results_page': True,
'full_results_url': feconf.LIBRARY_TOP_RATED_URL,
'protractor_id': 'top-rated',
})
if featured_activity_summary_dicts:
summary_dicts_by_category.insert(
0, {
'activity_summary_dicts': featured_activity_summary_dicts,
'categories': [],
'header_i18n_id': (
feconf.LIBRARY_CATEGORY_FEATURED_ACTIVITIES),
'has_full_results_page': False,
'full_results_url': None,
})
self.values.update({
'activity_summary_dicts_by_category': (
summary_dicts_by_category),
'preferred_language_codes': preferred_language_codes,
})
self.render_json(self.values)
class LibraryGroupPage(base.BaseHandler):
"""The page for displaying top rated and recently published
explorations.
"""
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
self.render_template('library-page.mainpage.html')
class LibraryGroupIndexHandler(base.BaseHandler):
"""Provides data for categories such as top rated and recently published."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests for group pages."""
# TODO(sll): Support index pages for other language codes.
group_name = self.request.get('group_name')
activity_list = []
header_i18n_id = ''
if group_name == feconf.LIBRARY_GROUP_RECENTLY_PUBLISHED:
recently_published_summary_dicts = (
summary_services.get_recently_published_exp_summary_dicts(
feconf.RECENTLY_PUBLISHED_QUERY_LIMIT_FULL_PAGE))
if recently_published_summary_dicts:
activity_list = recently_published_summary_dicts
header_i18n_id = feconf.LIBRARY_CATEGORY_RECENTLY_PUBLISHED
elif group_name == feconf.LIBRARY_GROUP_TOP_RATED:
top_rated_activity_summary_dicts = (
summary_services.get_top_rated_exploration_summary_dicts(
[constants.DEFAULT_LANGUAGE_CODE],
feconf.NUMBER_OF_TOP_RATED_EXPLORATIONS_FULL_PAGE))
if top_rated_activity_summary_dicts:
activity_list = top_rated_activity_summary_dicts
header_i18n_id = feconf.LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS
else:
raise self.PageNotFoundException
preferred_language_codes = [constants.DEFAULT_LANGUAGE_CODE]
if self.user_id:
user_settings = user_services.get_user_settings(self.user_id)
preferred_language_codes = user_settings.preferred_language_codes
self.values.update({
'activity_list': activity_list,
'header_i18n_id': header_i18n_id,
'preferred_language_codes': preferred_language_codes,
})
self.render_json(self.values)
class SearchHandler(base.BaseHandler):
"""Provides data for activity search results."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
query_string = utils.unescape_encoded_uri_component(
self.request.get('q'))
# Remove all punctuation from the query string, and replace it with
# spaces. See http://stackoverflow.com/a/266162 and
# http://stackoverflow.com/a/11693937
remove_punctuation_map = dict(
(ord(char), None) for char in string.punctuation)
query_string = query_string.translate(remove_punctuation_map)
# If there is a category parameter, it should be in the following form:
# category=("Algebra" OR "Math")
category_string = self.request.get('category', '')
if category_string and (
not category_string.startswith('("') or
not category_string.endswith('")')):
raise self.InvalidInputException('Invalid search query.')
# The 2 and -2 account for the '("" and '")' characters at the
# beginning and end.
categories = (
category_string[2:-2].split('" OR "') if category_string else [])
# If there is a language code parameter, it should be in the following
# form:
# language_code=("en" OR "hi")
language_code_string = self.request.get('language_code', '')
if language_code_string and (
not language_code_string.startswith('("') or
not language_code_string.endswith('")')):
raise self.InvalidInputException('Invalid search query.')
# The 2 and -2 account for the '("" and '")' characters at the
# beginning and end.
language_codes = (
language_code_string[2:-2].split('" OR "')
if language_code_string else [])
# TODO(#11314): Change 'cursor' to 'offset' here and in the frontend.
search_offset = self.request.get('cursor', None)
activity_list, new_search_offset = get_matching_activity_dicts(
query_string, categories, language_codes, search_offset)
self.values.update({
'activity_list': activity_list,
'search_cursor': new_search_offset,
})
self.render_json(self.values)
class LibraryRedirectPage(base.BaseHandler):
"""An old 'gallery' page that should redirect to the library index page."""
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
self.redirect('/community-library')
class ExplorationSummariesHandler(base.BaseHandler):
"""Returns summaries corresponding to ids of public explorations. This
controller supports returning private explorations for the given user.
"""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
try:
exp_ids = json.loads(self.request.get('stringified_exp_ids'))
except Exception:
raise self.PageNotFoundException
include_private_exps_str = self.request.get(
'include_private_explorations')
include_private_exps = (
include_private_exps_str.lower() == 'true'
if include_private_exps_str else False)
editor_user_id = self.user_id if include_private_exps else None
if not editor_user_id:
include_private_exps = False
if (not isinstance(exp_ids, list) or not all(
isinstance(
exp_id, python_utils.BASESTRING) for exp_id in exp_ids)):
raise self.PageNotFoundException
if include_private_exps:
summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids, user=self.user))
else:
summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids))
self.values.update({
'summaries': summaries
})
self.render_json(self.values)
class CollectionSummariesHandler(base.BaseHandler):
"""Returns collection summaries corresponding to collection ids."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
try:
collection_ids = json.loads(
self.request.get('stringified_collection_ids'))
except Exception:
raise self.PageNotFoundException
summaries = (
summary_services.get_displayable_collection_summary_dicts_matching_ids( # pylint: disable=line-too-long
collection_ids))
self.values.update({
'summaries': summaries
})
self.render_json(self.values)
| 39.085227 | 115 | 0.66296 |
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import string
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import collection_services
from core.domain import exp_services
from core.domain import summary_services
from core.domain import user_services
import feconf
import python_utils
import utils
def get_matching_activity_dicts(
query_string, categories, language_codes, search_offset):
collection_ids = []
if not search_offset:
collection_ids, _ = (
collection_services.get_collection_ids_matching_query(
query_string, categories, language_codes))
exp_ids, new_search_offset = (
exp_services.get_exploration_ids_matching_query(
query_string, categories, language_codes, offset=search_offset))
activity_list = (
summary_services.get_displayable_collection_summary_dicts_matching_ids(
collection_ids) +
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids))
if len(activity_list) == feconf.DEFAULT_QUERY_LIMIT:
logging.exception(
'%s activities were fetched to load the library page. '
'You may be running up against the default query limits.'
% feconf.DEFAULT_QUERY_LIMIT)
return activity_list, new_search_offset
class OldLibraryRedirectPage(base.BaseHandler):
@acl_decorators.open_access
def get(self):
self.redirect(feconf.LIBRARY_INDEX_URL, permanent=True)
class LibraryPage(base.BaseHandler):
@acl_decorators.open_access
def get(self):
self.render_template('library-page.mainpage.html')
class LibraryIndexHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
summary_dicts_by_category = summary_services.get_library_groups([
constants.DEFAULT_LANGUAGE_CODE])
top_rated_activity_summary_dicts = (
summary_services.get_top_rated_exploration_summary_dicts(
[constants.DEFAULT_LANGUAGE_CODE],
feconf.NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE))
featured_activity_summary_dicts = (
summary_services.get_featured_activity_summary_dicts(
[constants.DEFAULT_LANGUAGE_CODE]))
preferred_language_codes = [constants.DEFAULT_LANGUAGE_CODE]
if self.user_id:
user_settings = user_services.get_user_settings(self.user_id)
preferred_language_codes = user_settings.preferred_language_codes
if top_rated_activity_summary_dicts:
summary_dicts_by_category.insert(
0, {
'activity_summary_dicts': top_rated_activity_summary_dicts,
'categories': [],
'header_i18n_id': (
feconf.LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS),
'has_full_results_page': True,
'full_results_url': feconf.LIBRARY_TOP_RATED_URL,
'protractor_id': 'top-rated',
})
if featured_activity_summary_dicts:
summary_dicts_by_category.insert(
0, {
'activity_summary_dicts': featured_activity_summary_dicts,
'categories': [],
'header_i18n_id': (
feconf.LIBRARY_CATEGORY_FEATURED_ACTIVITIES),
'has_full_results_page': False,
'full_results_url': None,
})
self.values.update({
'activity_summary_dicts_by_category': (
summary_dicts_by_category),
'preferred_language_codes': preferred_language_codes,
})
self.render_json(self.values)
class LibraryGroupPage(base.BaseHandler):
@acl_decorators.open_access
def get(self):
self.render_template('library-page.mainpage.html')
class LibraryGroupIndexHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
group_name = self.request.get('group_name')
activity_list = []
header_i18n_id = ''
if group_name == feconf.LIBRARY_GROUP_RECENTLY_PUBLISHED:
recently_published_summary_dicts = (
summary_services.get_recently_published_exp_summary_dicts(
feconf.RECENTLY_PUBLISHED_QUERY_LIMIT_FULL_PAGE))
if recently_published_summary_dicts:
activity_list = recently_published_summary_dicts
header_i18n_id = feconf.LIBRARY_CATEGORY_RECENTLY_PUBLISHED
elif group_name == feconf.LIBRARY_GROUP_TOP_RATED:
top_rated_activity_summary_dicts = (
summary_services.get_top_rated_exploration_summary_dicts(
[constants.DEFAULT_LANGUAGE_CODE],
feconf.NUMBER_OF_TOP_RATED_EXPLORATIONS_FULL_PAGE))
if top_rated_activity_summary_dicts:
activity_list = top_rated_activity_summary_dicts
header_i18n_id = feconf.LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS
else:
raise self.PageNotFoundException
preferred_language_codes = [constants.DEFAULT_LANGUAGE_CODE]
if self.user_id:
user_settings = user_services.get_user_settings(self.user_id)
preferred_language_codes = user_settings.preferred_language_codes
self.values.update({
'activity_list': activity_list,
'header_i18n_id': header_i18n_id,
'preferred_language_codes': preferred_language_codes,
})
self.render_json(self.values)
class SearchHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
query_string = utils.unescape_encoded_uri_component(
self.request.get('q'))
remove_punctuation_map = dict(
(ord(char), None) for char in string.punctuation)
query_string = query_string.translate(remove_punctuation_map)
category_string = self.request.get('category', '')
if category_string and (
not category_string.startswith('("') or
not category_string.endswith('")')):
raise self.InvalidInputException('Invalid search query.')
# beginning and end.
categories = (
category_string[2:-2].split('" OR "') if category_string else [])
# If there is a language code parameter, it should be in the following
# form:
# language_code=("en" OR "hi")
language_code_string = self.request.get('language_code', '')
if language_code_string and (
not language_code_string.startswith('("') or
not language_code_string.endswith('")')):
raise self.InvalidInputException('Invalid search query.')
# The 2 and -2 account for the '("" and '")' characters at the
language_codes = (
language_code_string[2:-2].split('" OR "')
if language_code_string else [])
activity_list, new_search_offset = get_matching_activity_dicts(
query_string, categories, language_codes, search_offset)
self.values.update({
'activity_list': activity_list,
'search_cursor': new_search_offset,
})
self.render_json(self.values)
class LibraryRedirectPage(base.BaseHandler):
@acl_decorators.open_access
def get(self):
self.redirect('/community-library')
class ExplorationSummariesHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
try:
exp_ids = json.loads(self.request.get('stringified_exp_ids'))
except Exception:
raise self.PageNotFoundException
include_private_exps_str = self.request.get(
'include_private_explorations')
include_private_exps = (
include_private_exps_str.lower() == 'true'
if include_private_exps_str else False)
editor_user_id = self.user_id if include_private_exps else None
if not editor_user_id:
include_private_exps = False
if (not isinstance(exp_ids, list) or not all(
isinstance(
exp_id, python_utils.BASESTRING) for exp_id in exp_ids)):
raise self.PageNotFoundException
if include_private_exps:
summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids, user=self.user))
else:
summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids))
self.values.update({
'summaries': summaries
})
self.render_json(self.values)
class CollectionSummariesHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
try:
collection_ids = json.loads(
self.request.get('stringified_collection_ids'))
except Exception:
raise self.PageNotFoundException
summaries = (
summary_services.get_displayable_collection_summary_dicts_matching_ids(
collection_ids))
self.values.update({
'summaries': summaries
})
self.render_json(self.values)
| true | true |
1c2f4b85cac45680e581cb5f7f143bbdc49b2354 | 681 | py | Python | SS/p145_postorderTraversal.py | MTandHJ/leetcode | f3832ed255d259cb881666ec8bd3de090d34e883 | [
"MIT"
] | null | null | null | SS/p145_postorderTraversal.py | MTandHJ/leetcode | f3832ed255d259cb881666ec8bd3de090d34e883 | [
"MIT"
] | null | null | null | SS/p145_postorderTraversal.py | MTandHJ/leetcode | f3832ed255d259cb881666ec8bd3de090d34e883 | [
"MIT"
] | null | null | null | from typing import List
import collections
class TreeNode:
def __init__(self, left=None, right=None, val=0) -> None:
self.val = val
self.left = left
self.right = right
class Solution:
def postorderTraversal(self, root: TreeNode) -> List[int]:
if not root:
return []
stack = []
res = collections.deque()
cur = root
while cur or stack:
if cur:
stack.append(cur)
res.appendleft(cur.val)
cur = cur.right
else:
node = stack.pop()
cur = node.left
return res
ins = Solution()
| 21.967742 | 62 | 0.499266 | from typing import List
import collections
class TreeNode:
def __init__(self, left=None, right=None, val=0) -> None:
self.val = val
self.left = left
self.right = right
class Solution:
def postorderTraversal(self, root: TreeNode) -> List[int]:
if not root:
return []
stack = []
res = collections.deque()
cur = root
while cur or stack:
if cur:
stack.append(cur)
res.appendleft(cur.val)
cur = cur.right
else:
node = stack.pop()
cur = node.left
return res
ins = Solution()
| true | true |
1c2f4ca5ce1f0579712660b8f09e0d19ed6634aa | 31,286 | py | Python | src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_staticapp_commands_thru_mock.py | Xinyue-Wang/azure-cli | 35d5702039d21247712eeedfdbe8f128f5f90fa3 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_staticapp_commands_thru_mock.py | Xinyue-Wang/azure-cli | 35d5702039d21247712eeedfdbe8f128f5f90fa3 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_staticapp_commands_thru_mock.py | Xinyue-Wang/azure-cli | 35d5702039d21247712eeedfdbe8f128f5f90fa3 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from unittest import mock
from azure.cli.command_modules.appservice.static_sites import \
list_staticsites, show_staticsite, delete_staticsite, create_staticsites, CLIError, disconnect_staticsite, \
reconnect_staticsite, list_staticsite_environments, show_staticsite_environment, list_staticsite_domains, \
set_staticsite_domain, delete_staticsite_domain, list_staticsite_functions, list_staticsite_function_app_settings, \
set_staticsite_function_app_settings, delete_staticsite_function_app_settings, list_staticsite_users, \
invite_staticsite_users, update_staticsite_users, update_staticsite, list_staticsite_secrets, \
reset_staticsite_api_key, delete_staticsite_environment
class TestStaticAppCommands(unittest.TestCase):
def setUp(self):
_set_up_client_mock(self)
_set_up_fake_apps(self)
def test_list_empty_staticapp(self):
self.staticapp_client.list.return_value = []
response = list_staticsites(self.mock_cmd)
self.assertEqual(len(response), 0)
def test_list_staticapp_with_resourcegroup(self):
self.staticapp_client.get_static_sites_by_resource_group.return_value = [self.app1]
response = list_staticsites(self.mock_cmd, self.rg1)
self.staticapp_client.get_static_sites_by_resource_group.assert_called_once_with(self.rg1)
self.assertEqual(len(response), 1)
self.assertIn(self.app1, response)
def test_list_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
response = list_staticsites(self.mock_cmd)
self.assertEqual(len(response), 2)
self.assertIn(self.app1, response)
self.assertIn(self.app2, response)
def test_show_staticapp_with_resourcegroup(self):
self.staticapp_client.get_static_site.return_value = self.app1
response = show_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.assertEqual(self.app1, response)
def test_show_staticapp_without_resourcegroup(self):
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
response = show_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.assertEqual(self.app1, response)
def test_show_staticapp_not_exist(self):
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
with self.assertRaises(CLIError):
show_staticsite(self.mock_cmd, self.name1_not_exist)
def test_delete_staticapp_with_resourcegroup(self):
delete_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.begin_delete_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_delete_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.begin_delete_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_delete_staticapp_not_exist(self):
with self.assertRaises(CLIError):
delete_staticsite(self.mock_cmd, self.name1_not_exist)
def test_create_staticapp(self):
from azure.mgmt.web.models import StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
app_location = './src'
api_location = './api/'
output_location = '/.git/'
tags = {'key1': 'value1'}
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
arg_list = self.staticapp_client.begin_create_or_update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.rg1, arg_list["resource_group_name"])
self.assertEqual(self.location1, arg_list["static_site_envelope"].location)
self.assertEqual(self.source1, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch1, arg_list["static_site_envelope"].branch)
self.assertEqual(tags, arg_list["static_site_envelope"].tags)
self.assertEqual('Free', arg_list["static_site_envelope"].sku.name)
self.assertEqual(app_location, arg_list["static_site_envelope"].build_properties.app_location)
self.assertEqual(api_location, arg_list["static_site_envelope"].build_properties.api_location)
self.assertEqual(output_location, arg_list["static_site_envelope"].build_properties.app_artifact_location)
def test_create_staticapp_with_standard_sku(self):
from azure.mgmt.web.models import StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, sku='standard')
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
arg_list = self.staticapp_client.begin_create_or_update_static_site.call_args[1]
self.assertEqual('Standard', arg_list["static_site_envelope"].sku.name)
def test_create_staticapp_missing_token(self):
app_location = './src'
api_location = './api/'
output_location = '/.git/'
tags = {'key1': 'value1'}
with self.assertRaises(CLIError):
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
def test_update_staticapp(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
tags = {'key1': 'value1'}
sku = 'Standard'
update_staticsite(self.mock_cmd, self.name1, self.source2, self.branch2, self.token2, tags=tags, sku=sku)
self.staticapp_client.update_static_site.assert_called_once()
arg_list = self.staticapp_client.update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.source2, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch2, arg_list["static_site_envelope"].branch)
self.assertEqual(self.token2, arg_list["static_site_envelope"].repository_token)
self.assertEqual(tags, arg_list["static_site_envelope"].tags)
self.assertEqual(sku, arg_list["static_site_envelope"].sku.name)
def test_update_staticapp_with_no_values_passed_in(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
update_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.update_static_site.assert_called_once()
arg_list = self.staticapp_client.update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.source1, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch1, arg_list["static_site_envelope"].branch)
self.assertEqual(self.token1, arg_list["static_site_envelope"].repository_token)
self.assertEqual(self.app1.tags, arg_list["static_site_envelope"].tags)
self.assertEqual('Free', arg_list["static_site_envelope"].sku.name)
def test_update_staticapp_not_exist(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
with self.assertRaises(CLIError):
update_staticsite(self.mock_cmd, self.name1_not_exist)
def test_disconnect_staticapp_with_resourcegroup(self):
disconnect_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.begin_detach_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_disconnect_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
disconnect_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.begin_detach_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
@mock.patch('azure.cli.command_modules.appservice.static_sites.create_staticsites', autospec=True)
def test_reconnect_staticapp_with_resourcegroup(self, create_staticsites_mock):
self.staticapp_client.list.return_value = [self.app1, self.app2]
reconnect_staticsite(self.mock_cmd, self.name1, self.source1, self.branch1, self.token1,
resource_group_name=self.rg1)
create_staticsites_mock.assert_called_once_with(self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, login_with_github=False, no_wait=False)
@mock.patch('azure.cli.command_modules.appservice.static_sites.create_staticsites', autospec=True)
def test_reconnect_staticapp_without_resourcegroup(self, create_staticsites_mock):
self.staticapp_client.list.return_value = [self.app1, self.app2]
reconnect_staticsite(self.mock_cmd, self.name1, self.source1, self.branch1, self.token1)
create_staticsites_mock.assert_called_once_with(self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, login_with_github=False, no_wait=False)
def test_list_staticsite_environments_with_resourcegroup(self):
list_staticsite_environments(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site_builds.assert_called_once_with(self.rg1, self.name1)
def test_list_staticsite_environments_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_environments(self.mock_cmd, self.name1)
self.staticapp_client.get_static_site_builds.assert_called_once_with(self.rg1, self.name1)
def test_show_staticsite_environment_with_resourcegroup(self):
show_staticsite_environment(self.mock_cmd, self.name1, self.environment1, self.rg1)
self.staticapp_client.get_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_show_staticsite_environment_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
show_staticsite_environment(self.mock_cmd, self.name1, self.environment1)
self.staticapp_client.get_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_set_staticsite_domain_with_resourcegroup(self):
set_staticsite_domain(self.mock_cmd, self.name1, self.hostname1, self.rg1)
self.staticapp_client.begin_validate_custom_domain_can_be_added_to_static_site.assert_called_once_with(
self.rg1, self.name1, self.hostname1, self.hostname1_validation)
self.staticapp_client.begin_create_or_update_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1,
static_site_custom_domain_request_properties_envelope=self.hostname1_validation)
def test_set_staticsite_domain_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
set_staticsite_domain(self.mock_cmd, self.name1, self.hostname1)
self.staticapp_client.begin_validate_custom_domain_can_be_added_to_static_site.assert_called_once_with(
self.rg1, self.name1, self.hostname1, self.hostname1_validation)
self.staticapp_client.begin_create_or_update_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1,
static_site_custom_domain_request_properties_envelope=self.hostname1_validation)
def test_delete_staticsite_domain_with_resourcegroup(self):
delete_staticsite_domain(self.mock_cmd, self.name1, self.hostname1, self.rg1)
self.staticapp_client.begin_delete_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1)
def test_delete_staticsite_domain_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite_domain(self.mock_cmd, self.name1, self.hostname1)
self.staticapp_client.begin_delete_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1)
def test_delete_staticsite_environment_with_resourcegroup(self):
delete_staticsite_environment(self.mock_cmd, self.name1, self.environment1, self.rg1)
self.staticapp_client.begin_delete_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_delete_staticsite_environment_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite_environment(self.mock_cmd, self.name1, self.environment1)
self.staticapp_client.begin_delete_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_list_staticsite_functions_with_resourcegroup(self):
list_staticsite_functions(self.mock_cmd, self.name1, self.rg1, self.environment1)
self.staticapp_client.list_static_site_build_functions.assert_called_once_with(
self.rg1, self.name1, self.environment1)
def test_list_staticsite_functions_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_functions(self.mock_cmd, self.name1, environment_name=self.environment1)
self.staticapp_client.list_static_site_build_functions.assert_called_once_with(
self.rg1, self.name1, self.environment1)
def test_list_staticsite_function_app_settings_with_resourcegroup(self):
list_staticsite_function_app_settings(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.list_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1)
def test_list_staticsite_function_app_settings_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_function_app_settings(self.mock_cmd, self.name1)
self.staticapp_client.list_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1)
def test_set_staticsite_function_app_settings_with_resourcegroup(self):
app_settings1_input = ['key1=val1', 'key2=val2==', 'key3=val3=']
app_settings1_dict = {'key1': 'val1', 'key2': 'val2==', 'key3': 'val3='}
set_staticsite_function_app_settings(self.mock_cmd, self.name1, app_settings1_input, self.rg1)
self.staticapp_client.create_or_update_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1, app_settings=app_settings1_dict)
def test_set_staticsite_function_app_settings_without_resourcegroup(self):
app_settings1_input = ['key1=val1', 'key2=val2==', 'key3=val3=']
app_settings1_dict = {'key1': 'val1', 'key2': 'val2==', 'key3': 'val3='}
self.staticapp_client.list.return_value = [self.app1, self.app2]
set_staticsite_function_app_settings(self.mock_cmd, self.name1, app_settings1_input)
self.staticapp_client.create_or_update_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1, app_settings=app_settings1_dict)
def test_delete_staticsite_function_app_settings_with_resourcegroup(self):
# setup
current_app_settings = {'key1': 'val1', 'key2': 'val2'}
app_settings_keys_to_delete = ['key1']
updated_app_settings = {'key2': 'val2'}
class AppSettings:
properties = current_app_settings
self.staticapp_client.list_static_site_function_app_settings.return_value = AppSettings
# action
delete_staticsite_function_app_settings(self.mock_cmd, self.name1, app_settings_keys_to_delete, self.rg1)
# validate
self.staticapp_client.create_or_update_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1, app_settings=updated_app_settings)
def test_delete_staticsite_function_app_settings_without_resourcegroup(self):
# setup
current_app_settings = {'key1': 'val1', 'key2': 'val2'}
app_settings_keys_to_delete = ['key1']
updated_app_settings = {'key2': 'val2'}
class AppSettings:
properties = current_app_settings
self.staticapp_client.list_static_site_function_app_settings.return_value = AppSettings
self.staticapp_client.list.return_value = [self.app1, self.app2]
# action
delete_staticsite_function_app_settings(self.mock_cmd, self.name1, app_settings_keys_to_delete)
# validate
self.staticapp_client.create_or_update_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1, app_settings=updated_app_settings)
def test_list_staticsite_users_with_resourcegroup(self):
authentication_provider = 'GitHub'
list_staticsite_users(self.mock_cmd, self.name1, self.rg1, authentication_provider=authentication_provider)
self.staticapp_client.list_static_site_users.assert_called_once_with(
self.rg1, self.name1, authentication_provider)
def test_list_staticsite_users_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
authentication_provider = 'GitHub'
list_staticsite_users(self.mock_cmd, self.name1, authentication_provider=authentication_provider)
self.staticapp_client.list_static_site_users.assert_called_once_with(
self.rg1, self.name1, authentication_provider)
def test_invite_staticsite_users_with_resourcegroup(self):
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
roles = 'Contributor,Reviewer'
invitation_expiration_in_hours = 2
from azure.mgmt.web.models import StaticSiteUserInvitationRequestResource
self.mock_cmd.get_models.return_value = StaticSiteUserInvitationRequestResource
invite_staticsite_users(self.mock_cmd, self.name1, authentication_provider, user_details, self.hostname1,
roles, invitation_expiration_in_hours, self.rg1)
arg_list = self.staticapp_client.create_user_roles_invitation_link.call_args[0]
self.assertEqual(self.rg1, arg_list[0])
self.assertEqual(self.name1, arg_list[1])
self.assertEqual(self.hostname1, arg_list[2].domain)
self.assertEqual(authentication_provider, arg_list[2].provider)
self.assertEqual(user_details, arg_list[2].user_details)
self.assertEqual(invitation_expiration_in_hours, arg_list[2].num_hours_to_expiration)
def test_invite_staticsite_users_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
roles = 'Contributor,Reviewer'
invitation_expiration_in_hours = 2
from azure.mgmt.web.models import StaticSiteUserInvitationRequestResource
self.mock_cmd.get_models.return_value = StaticSiteUserInvitationRequestResource
invite_staticsite_users(self.mock_cmd, self.name1, authentication_provider, user_details, self.hostname1,
roles, invitation_expiration_in_hours)
arg_list = self.staticapp_client.create_user_roles_invitation_link.call_args[0]
self.assertEqual(self.rg1, arg_list[0])
self.assertEqual(self.name1, arg_list[1])
self.assertEqual(self.hostname1, arg_list[2].domain)
self.assertEqual(authentication_provider, arg_list[2].provider)
self.assertEqual(user_details, arg_list[2].user_details)
self.assertEqual(invitation_expiration_in_hours, arg_list[2].num_hours_to_expiration)
def test_update_staticsite_users_with_resourcegroup_with_all_args(self):
roles = 'Contributor,Reviewer'
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
user_id = 100
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_auth_provider(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_auth_provider_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, 'other_user_id',
'dummy_authentication_provider', 'dummy_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_auth_provider(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_auth_provider_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
_mock_list_users_for_without_auth_provider(self, 'dummy_user_id', 'dummy_authentication_provider',
'other_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_user_id_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
_mock_list_users_for_without_auth_provider(self, 'dummy_user_id', 'dummy_authentication_provider',
'other_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_user_details(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
resource_group_name=self.rg1)
def test_list_staticsite_secrets(self):
from azure.mgmt.web.models import StringDictionary
self.staticapp_client.list_static_site_secrets.return_value = StringDictionary(properties={"apiKey": "key"})
secret = list_staticsite_secrets(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.list_static_site_secrets.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
from ast import literal_eval
self.assertEqual(literal_eval(secret.__str__())["properties"]["apiKey"], "key")
def test_reset_staticsite_api_key(self):
from azure.mgmt.web.models import StringDictionary, StaticSiteResetPropertiesARMResource
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.reset_static_site_api_key.return_value = StringDictionary(properties={"apiKey": "new_key"})
self.mock_cmd.get_models.return_value = StaticSiteResetPropertiesARMResource
secret = reset_staticsite_api_key(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.mock_cmd.get_models.assert_called_once_with('StaticSiteResetPropertiesARMResource')
self.staticapp_client.reset_static_site_api_key.assert_called_once()
from ast import literal_eval
reset_envelope = literal_eval(self.staticapp_client.reset_static_site_api_key.call_args[1]["reset_properties_envelope"].__str__())
self.assertEqual(reset_envelope["repository_token"], self.token1)
def _set_up_client_mock(self):
self.mock_cmd = mock.MagicMock()
self.mock_cmd.cli_ctx = mock.MagicMock()
self.staticapp_client = mock.MagicMock()
client_factory_patcher = mock.patch(
'azure.cli.command_modules.appservice.static_sites._get_staticsites_client_factory', autospec=True)
self.addCleanup(client_factory_patcher.stop)
self.mock_static_site_client_factory = client_factory_patcher.start()
self.mock_static_site_client_factory.return_value = self.staticapp_client
def _set_up_fake_apps(self):
from azure.mgmt.web.models import StaticSiteCustomDomainRequestPropertiesARMResource
self.rg1 = 'rg1'
self.name1 = 'name1'
self.name1_not_exist = 'name1_not_exist'
self.location1 = 'location1'
self.source1 = 'https://github.com/Contoso/My-First-Static-App'
self.branch1 = 'dev'
self.token1 = 'TOKEN_1'
self.environment1 = 'default'
self.hostname1 = 'www.app1.com'
self.hostname1_validation = StaticSiteCustomDomainRequestPropertiesARMResource(validation_method="cname-delegation")
self.app1 = _contruct_static_site_object(
self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1)
self.rg2 = 'rg2'
self.name2 = 'name2'
self.location2 = 'location2'
self.source2 = 'https://github.com/Contoso/My-Second-Static-App'
self.branch2 = 'master'
self.token2 = 'TOKEN_2'
self.environment1 = 'prod'
self.hostname1 = 'www.app2.com'
self.app2 = _contruct_static_site_object(
self.rg2, self.name2, self.location2,
self.source2, self.branch2, self.token2)
def _contruct_static_site_object(rg, app_name, location, source, branch, token):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
app = StaticSiteARMResource(
location=location,
repository_url=source,
branch=branch,
repository_token=token,
sku=SkuDescription(name='Free', tier='Free'))
app.name = app_name
app.id = \
"/subscriptions/sub/resourceGroups/{}/providers/Microsoft.Web/staticSites/{}".format(rg, app_name)
return app
def _mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details):
class User:
def __init__(self, name, provider, display_name):
self.name = name
self.provider = provider
self.display_name = display_name
user1 = User(user_id, authentication_provider, user_details)
user2 = User(user_id + '2', authentication_provider + '2', user_details + '2')
self.staticapp_client.list_static_site_users.return_value = [user1, user2]
| 50.871545 | 138 | 0.740555 |
import unittest
from unittest import mock
from azure.cli.command_modules.appservice.static_sites import \
list_staticsites, show_staticsite, delete_staticsite, create_staticsites, CLIError, disconnect_staticsite, \
reconnect_staticsite, list_staticsite_environments, show_staticsite_environment, list_staticsite_domains, \
set_staticsite_domain, delete_staticsite_domain, list_staticsite_functions, list_staticsite_function_app_settings, \
set_staticsite_function_app_settings, delete_staticsite_function_app_settings, list_staticsite_users, \
invite_staticsite_users, update_staticsite_users, update_staticsite, list_staticsite_secrets, \
reset_staticsite_api_key, delete_staticsite_environment
class TestStaticAppCommands(unittest.TestCase):
def setUp(self):
_set_up_client_mock(self)
_set_up_fake_apps(self)
def test_list_empty_staticapp(self):
self.staticapp_client.list.return_value = []
response = list_staticsites(self.mock_cmd)
self.assertEqual(len(response), 0)
def test_list_staticapp_with_resourcegroup(self):
self.staticapp_client.get_static_sites_by_resource_group.return_value = [self.app1]
response = list_staticsites(self.mock_cmd, self.rg1)
self.staticapp_client.get_static_sites_by_resource_group.assert_called_once_with(self.rg1)
self.assertEqual(len(response), 1)
self.assertIn(self.app1, response)
def test_list_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
response = list_staticsites(self.mock_cmd)
self.assertEqual(len(response), 2)
self.assertIn(self.app1, response)
self.assertIn(self.app2, response)
def test_show_staticapp_with_resourcegroup(self):
self.staticapp_client.get_static_site.return_value = self.app1
response = show_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.assertEqual(self.app1, response)
def test_show_staticapp_without_resourcegroup(self):
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
response = show_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.assertEqual(self.app1, response)
def test_show_staticapp_not_exist(self):
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
with self.assertRaises(CLIError):
show_staticsite(self.mock_cmd, self.name1_not_exist)
def test_delete_staticapp_with_resourcegroup(self):
delete_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.begin_delete_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_delete_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.begin_delete_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_delete_staticapp_not_exist(self):
with self.assertRaises(CLIError):
delete_staticsite(self.mock_cmd, self.name1_not_exist)
def test_create_staticapp(self):
from azure.mgmt.web.models import StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
app_location = './src'
api_location = './api/'
output_location = '/.git/'
tags = {'key1': 'value1'}
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
arg_list = self.staticapp_client.begin_create_or_update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.rg1, arg_list["resource_group_name"])
self.assertEqual(self.location1, arg_list["static_site_envelope"].location)
self.assertEqual(self.source1, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch1, arg_list["static_site_envelope"].branch)
self.assertEqual(tags, arg_list["static_site_envelope"].tags)
self.assertEqual('Free', arg_list["static_site_envelope"].sku.name)
self.assertEqual(app_location, arg_list["static_site_envelope"].build_properties.app_location)
self.assertEqual(api_location, arg_list["static_site_envelope"].build_properties.api_location)
self.assertEqual(output_location, arg_list["static_site_envelope"].build_properties.app_artifact_location)
def test_create_staticapp_with_standard_sku(self):
from azure.mgmt.web.models import StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, sku='standard')
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
arg_list = self.staticapp_client.begin_create_or_update_static_site.call_args[1]
self.assertEqual('Standard', arg_list["static_site_envelope"].sku.name)
def test_create_staticapp_missing_token(self):
app_location = './src'
api_location = './api/'
output_location = '/.git/'
tags = {'key1': 'value1'}
with self.assertRaises(CLIError):
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
def test_update_staticapp(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
tags = {'key1': 'value1'}
sku = 'Standard'
update_staticsite(self.mock_cmd, self.name1, self.source2, self.branch2, self.token2, tags=tags, sku=sku)
self.staticapp_client.update_static_site.assert_called_once()
arg_list = self.staticapp_client.update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.source2, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch2, arg_list["static_site_envelope"].branch)
self.assertEqual(self.token2, arg_list["static_site_envelope"].repository_token)
self.assertEqual(tags, arg_list["static_site_envelope"].tags)
self.assertEqual(sku, arg_list["static_site_envelope"].sku.name)
def test_update_staticapp_with_no_values_passed_in(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
update_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.update_static_site.assert_called_once()
arg_list = self.staticapp_client.update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.source1, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch1, arg_list["static_site_envelope"].branch)
self.assertEqual(self.token1, arg_list["static_site_envelope"].repository_token)
self.assertEqual(self.app1.tags, arg_list["static_site_envelope"].tags)
self.assertEqual('Free', arg_list["static_site_envelope"].sku.name)
def test_update_staticapp_not_exist(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
with self.assertRaises(CLIError):
update_staticsite(self.mock_cmd, self.name1_not_exist)
def test_disconnect_staticapp_with_resourcegroup(self):
disconnect_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.begin_detach_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_disconnect_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
disconnect_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.begin_detach_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
@mock.patch('azure.cli.command_modules.appservice.static_sites.create_staticsites', autospec=True)
def test_reconnect_staticapp_with_resourcegroup(self, create_staticsites_mock):
self.staticapp_client.list.return_value = [self.app1, self.app2]
reconnect_staticsite(self.mock_cmd, self.name1, self.source1, self.branch1, self.token1,
resource_group_name=self.rg1)
create_staticsites_mock.assert_called_once_with(self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, login_with_github=False, no_wait=False)
@mock.patch('azure.cli.command_modules.appservice.static_sites.create_staticsites', autospec=True)
def test_reconnect_staticapp_without_resourcegroup(self, create_staticsites_mock):
self.staticapp_client.list.return_value = [self.app1, self.app2]
reconnect_staticsite(self.mock_cmd, self.name1, self.source1, self.branch1, self.token1)
create_staticsites_mock.assert_called_once_with(self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, login_with_github=False, no_wait=False)
def test_list_staticsite_environments_with_resourcegroup(self):
list_staticsite_environments(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site_builds.assert_called_once_with(self.rg1, self.name1)
def test_list_staticsite_environments_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_environments(self.mock_cmd, self.name1)
self.staticapp_client.get_static_site_builds.assert_called_once_with(self.rg1, self.name1)
def test_show_staticsite_environment_with_resourcegroup(self):
show_staticsite_environment(self.mock_cmd, self.name1, self.environment1, self.rg1)
self.staticapp_client.get_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_show_staticsite_environment_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
show_staticsite_environment(self.mock_cmd, self.name1, self.environment1)
self.staticapp_client.get_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_set_staticsite_domain_with_resourcegroup(self):
set_staticsite_domain(self.mock_cmd, self.name1, self.hostname1, self.rg1)
self.staticapp_client.begin_validate_custom_domain_can_be_added_to_static_site.assert_called_once_with(
self.rg1, self.name1, self.hostname1, self.hostname1_validation)
self.staticapp_client.begin_create_or_update_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1,
static_site_custom_domain_request_properties_envelope=self.hostname1_validation)
def test_set_staticsite_domain_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
set_staticsite_domain(self.mock_cmd, self.name1, self.hostname1)
self.staticapp_client.begin_validate_custom_domain_can_be_added_to_static_site.assert_called_once_with(
self.rg1, self.name1, self.hostname1, self.hostname1_validation)
self.staticapp_client.begin_create_or_update_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1,
static_site_custom_domain_request_properties_envelope=self.hostname1_validation)
def test_delete_staticsite_domain_with_resourcegroup(self):
delete_staticsite_domain(self.mock_cmd, self.name1, self.hostname1, self.rg1)
self.staticapp_client.begin_delete_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1)
def test_delete_staticsite_domain_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite_domain(self.mock_cmd, self.name1, self.hostname1)
self.staticapp_client.begin_delete_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1)
def test_delete_staticsite_environment_with_resourcegroup(self):
delete_staticsite_environment(self.mock_cmd, self.name1, self.environment1, self.rg1)
self.staticapp_client.begin_delete_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_delete_staticsite_environment_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite_environment(self.mock_cmd, self.name1, self.environment1)
self.staticapp_client.begin_delete_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_list_staticsite_functions_with_resourcegroup(self):
list_staticsite_functions(self.mock_cmd, self.name1, self.rg1, self.environment1)
self.staticapp_client.list_static_site_build_functions.assert_called_once_with(
self.rg1, self.name1, self.environment1)
def test_list_staticsite_functions_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_functions(self.mock_cmd, self.name1, environment_name=self.environment1)
self.staticapp_client.list_static_site_build_functions.assert_called_once_with(
self.rg1, self.name1, self.environment1)
def test_list_staticsite_function_app_settings_with_resourcegroup(self):
list_staticsite_function_app_settings(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.list_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1)
def test_list_staticsite_function_app_settings_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_function_app_settings(self.mock_cmd, self.name1)
self.staticapp_client.list_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1)
def test_set_staticsite_function_app_settings_with_resourcegroup(self):
app_settings1_input = ['key1=val1', 'key2=val2==', 'key3=val3=']
app_settings1_dict = {'key1': 'val1', 'key2': 'val2==', 'key3': 'val3='}
set_staticsite_function_app_settings(self.mock_cmd, self.name1, app_settings1_input, self.rg1)
self.staticapp_client.create_or_update_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1, app_settings=app_settings1_dict)
def test_set_staticsite_function_app_settings_without_resourcegroup(self):
app_settings1_input = ['key1=val1', 'key2=val2==', 'key3=val3=']
app_settings1_dict = {'key1': 'val1', 'key2': 'val2==', 'key3': 'val3='}
self.staticapp_client.list.return_value = [self.app1, self.app2]
set_staticsite_function_app_settings(self.mock_cmd, self.name1, app_settings1_input)
self.staticapp_client.create_or_update_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1, app_settings=app_settings1_dict)
def test_delete_staticsite_function_app_settings_with_resourcegroup(self):
current_app_settings = {'key1': 'val1', 'key2': 'val2'}
app_settings_keys_to_delete = ['key1']
updated_app_settings = {'key2': 'val2'}
class AppSettings:
properties = current_app_settings
self.staticapp_client.list_static_site_function_app_settings.return_value = AppSettings
delete_staticsite_function_app_settings(self.mock_cmd, self.name1, app_settings_keys_to_delete, self.rg1)
self.staticapp_client.create_or_update_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1, app_settings=updated_app_settings)
def test_delete_staticsite_function_app_settings_without_resourcegroup(self):
current_app_settings = {'key1': 'val1', 'key2': 'val2'}
app_settings_keys_to_delete = ['key1']
updated_app_settings = {'key2': 'val2'}
class AppSettings:
properties = current_app_settings
self.staticapp_client.list_static_site_function_app_settings.return_value = AppSettings
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite_function_app_settings(self.mock_cmd, self.name1, app_settings_keys_to_delete)
self.staticapp_client.create_or_update_static_site_function_app_settings.assert_called_once_with(
self.rg1, self.name1, app_settings=updated_app_settings)
def test_list_staticsite_users_with_resourcegroup(self):
authentication_provider = 'GitHub'
list_staticsite_users(self.mock_cmd, self.name1, self.rg1, authentication_provider=authentication_provider)
self.staticapp_client.list_static_site_users.assert_called_once_with(
self.rg1, self.name1, authentication_provider)
def test_list_staticsite_users_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
authentication_provider = 'GitHub'
list_staticsite_users(self.mock_cmd, self.name1, authentication_provider=authentication_provider)
self.staticapp_client.list_static_site_users.assert_called_once_with(
self.rg1, self.name1, authentication_provider)
def test_invite_staticsite_users_with_resourcegroup(self):
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
roles = 'Contributor,Reviewer'
invitation_expiration_in_hours = 2
from azure.mgmt.web.models import StaticSiteUserInvitationRequestResource
self.mock_cmd.get_models.return_value = StaticSiteUserInvitationRequestResource
invite_staticsite_users(self.mock_cmd, self.name1, authentication_provider, user_details, self.hostname1,
roles, invitation_expiration_in_hours, self.rg1)
arg_list = self.staticapp_client.create_user_roles_invitation_link.call_args[0]
self.assertEqual(self.rg1, arg_list[0])
self.assertEqual(self.name1, arg_list[1])
self.assertEqual(self.hostname1, arg_list[2].domain)
self.assertEqual(authentication_provider, arg_list[2].provider)
self.assertEqual(user_details, arg_list[2].user_details)
self.assertEqual(invitation_expiration_in_hours, arg_list[2].num_hours_to_expiration)
def test_invite_staticsite_users_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
roles = 'Contributor,Reviewer'
invitation_expiration_in_hours = 2
from azure.mgmt.web.models import StaticSiteUserInvitationRequestResource
self.mock_cmd.get_models.return_value = StaticSiteUserInvitationRequestResource
invite_staticsite_users(self.mock_cmd, self.name1, authentication_provider, user_details, self.hostname1,
roles, invitation_expiration_in_hours)
arg_list = self.staticapp_client.create_user_roles_invitation_link.call_args[0]
self.assertEqual(self.rg1, arg_list[0])
self.assertEqual(self.name1, arg_list[1])
self.assertEqual(self.hostname1, arg_list[2].domain)
self.assertEqual(authentication_provider, arg_list[2].provider)
self.assertEqual(user_details, arg_list[2].user_details)
self.assertEqual(invitation_expiration_in_hours, arg_list[2].num_hours_to_expiration)
def test_update_staticsite_users_with_resourcegroup_with_all_args(self):
roles = 'Contributor,Reviewer'
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
user_id = 100
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_auth_provider(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_auth_provider_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, 'other_user_id',
'dummy_authentication_provider', 'dummy_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_auth_provider(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_auth_provider_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
_mock_list_users_for_without_auth_provider(self, 'dummy_user_id', 'dummy_authentication_provider',
'other_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_user_id_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
_mock_list_users_for_without_auth_provider(self, 'dummy_user_id', 'dummy_authentication_provider',
'other_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_user_details(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
resource_group_name=self.rg1)
def test_list_staticsite_secrets(self):
from azure.mgmt.web.models import StringDictionary
self.staticapp_client.list_static_site_secrets.return_value = StringDictionary(properties={"apiKey": "key"})
secret = list_staticsite_secrets(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.list_static_site_secrets.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
from ast import literal_eval
self.assertEqual(literal_eval(secret.__str__())["properties"]["apiKey"], "key")
def test_reset_staticsite_api_key(self):
from azure.mgmt.web.models import StringDictionary, StaticSiteResetPropertiesARMResource
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.reset_static_site_api_key.return_value = StringDictionary(properties={"apiKey": "new_key"})
self.mock_cmd.get_models.return_value = StaticSiteResetPropertiesARMResource
secret = reset_staticsite_api_key(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.mock_cmd.get_models.assert_called_once_with('StaticSiteResetPropertiesARMResource')
self.staticapp_client.reset_static_site_api_key.assert_called_once()
from ast import literal_eval
reset_envelope = literal_eval(self.staticapp_client.reset_static_site_api_key.call_args[1]["reset_properties_envelope"].__str__())
self.assertEqual(reset_envelope["repository_token"], self.token1)
def _set_up_client_mock(self):
self.mock_cmd = mock.MagicMock()
self.mock_cmd.cli_ctx = mock.MagicMock()
self.staticapp_client = mock.MagicMock()
client_factory_patcher = mock.patch(
'azure.cli.command_modules.appservice.static_sites._get_staticsites_client_factory', autospec=True)
self.addCleanup(client_factory_patcher.stop)
self.mock_static_site_client_factory = client_factory_patcher.start()
self.mock_static_site_client_factory.return_value = self.staticapp_client
def _set_up_fake_apps(self):
from azure.mgmt.web.models import StaticSiteCustomDomainRequestPropertiesARMResource
self.rg1 = 'rg1'
self.name1 = 'name1'
self.name1_not_exist = 'name1_not_exist'
self.location1 = 'location1'
self.source1 = 'https://github.com/Contoso/My-First-Static-App'
self.branch1 = 'dev'
self.token1 = 'TOKEN_1'
self.environment1 = 'default'
self.hostname1 = 'www.app1.com'
self.hostname1_validation = StaticSiteCustomDomainRequestPropertiesARMResource(validation_method="cname-delegation")
self.app1 = _contruct_static_site_object(
self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1)
self.rg2 = 'rg2'
self.name2 = 'name2'
self.location2 = 'location2'
self.source2 = 'https://github.com/Contoso/My-Second-Static-App'
self.branch2 = 'master'
self.token2 = 'TOKEN_2'
self.environment1 = 'prod'
self.hostname1 = 'www.app2.com'
self.app2 = _contruct_static_site_object(
self.rg2, self.name2, self.location2,
self.source2, self.branch2, self.token2)
def _contruct_static_site_object(rg, app_name, location, source, branch, token):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
app = StaticSiteARMResource(
location=location,
repository_url=source,
branch=branch,
repository_token=token,
sku=SkuDescription(name='Free', tier='Free'))
app.name = app_name
app.id = \
"/subscriptions/sub/resourceGroups/{}/providers/Microsoft.Web/staticSites/{}".format(rg, app_name)
return app
def _mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details):
class User:
def __init__(self, name, provider, display_name):
self.name = name
self.provider = provider
self.display_name = display_name
user1 = User(user_id, authentication_provider, user_details)
user2 = User(user_id + '2', authentication_provider + '2', user_details + '2')
self.staticapp_client.list_static_site_users.return_value = [user1, user2]
| true | true |
1c2f4d74c7cdd282900ad475bba9d5debd255c19 | 1,809 | py | Python | tensorflow/python/eager/tensor.py | SWMaestro8th/tensorflow | 084d29e67a72e369958c18ae6abfe2752fcddcbf | [
"Apache-2.0"
] | 5 | 2021-01-11T01:51:57.000Z | 2021-12-11T17:19:08.000Z | tensorflow/python/eager/tensor.py | SWMaestro8th/tensorflow | 084d29e67a72e369958c18ae6abfe2752fcddcbf | [
"Apache-2.0"
] | null | null | null | tensorflow/python/eager/tensor.py | SWMaestro8th/tensorflow | 084d29e67a72e369958c18ae6abfe2752fcddcbf | [
"Apache-2.0"
] | 3 | 2020-07-02T13:46:32.000Z | 2021-01-11T01:52:01.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for TensorFlow's "Eager" mode of execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# TODO(agarwal): get rid of this import and change callers to use the classes in
# ops.py.
# pylint: disable=unused-import
from tensorflow.python.framework.ops import _tensor_from_handle
from tensorflow.python.framework.ops import convert_n_to_eager_tensor
from tensorflow.python.framework.ops import convert_to_eager_tensor
from tensorflow.python.framework.ops import EagerTensor as Tensor
# pylint: enable=unused-import
class _Op(object):
"""Fake op for _LazyZero to make its python API tf.Tensor-like."""
def __init__(self):
self.type = "Zeros"
class LazyZero(object):
"""Lazily-instantiated zero-valued Tensor used as autograd accumulator."""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
self.op = _Op()
def __add__(self, other):
return other
def __radd__(self, other):
return other
def numpy(self):
return np.zeros(self.shape, self.dtype)
| 32.303571 | 80 | 0.726921 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework.ops import _tensor_from_handle
from tensorflow.python.framework.ops import convert_n_to_eager_tensor
from tensorflow.python.framework.ops import convert_to_eager_tensor
from tensorflow.python.framework.ops import EagerTensor as Tensor
class _Op(object):
def __init__(self):
self.type = "Zeros"
class LazyZero(object):
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
self.op = _Op()
def __add__(self, other):
return other
def __radd__(self, other):
return other
def numpy(self):
return np.zeros(self.shape, self.dtype)
| true | true |
1c2f4f06d496fb200aa0f3fdf76cf1b97fd86cff | 485 | py | Python | binary_search_trees/reconstruct_bst.py | maanavshah/coding-interview | 4c842cdbc6870da79684635f379966d1caec2162 | [
"MIT"
] | null | null | null | binary_search_trees/reconstruct_bst.py | maanavshah/coding-interview | 4c842cdbc6870da79684635f379966d1caec2162 | [
"MIT"
] | null | null | null | binary_search_trees/reconstruct_bst.py | maanavshah/coding-interview | 4c842cdbc6870da79684635f379966d1caec2162 | [
"MIT"
] | null | null | null | # O(n^2) time | O(n) space
def reconstructBst(array):
if len(array) == 0:
return None
currValue = array[0]
rightSubTreeIdx = len(array)
for idx in range(1, len(array)):
value = array[idx]
if value >= currValue:
rightSubTreeIdx = idx
break
leftSubTree = reconstructBst(array[1:rightSubTreeIdx])
rightSubTree = reconstructBst(array[rightSubTreeIdx:])
return BST(currValue, leftSubTree, rightSubTree)
| 26.944444 | 58 | 0.62268 |
def reconstructBst(array):
if len(array) == 0:
return None
currValue = array[0]
rightSubTreeIdx = len(array)
for idx in range(1, len(array)):
value = array[idx]
if value >= currValue:
rightSubTreeIdx = idx
break
leftSubTree = reconstructBst(array[1:rightSubTreeIdx])
rightSubTree = reconstructBst(array[rightSubTreeIdx:])
return BST(currValue, leftSubTree, rightSubTree)
| true | true |
1c2f4f6962078375353088f4b4bd41164c5cbe9d | 1,066 | py | Python | container-scripts/set-credentials.py | companieshouse/chips-tuxedo-proxy-domain | 3406f0197e8060648e5751eb453fb4601a82ee1c | [
"MIT"
] | null | null | null | container-scripts/set-credentials.py | companieshouse/chips-tuxedo-proxy-domain | 3406f0197e8060648e5751eb453fb4601a82ee1c | [
"MIT"
] | null | null | null | container-scripts/set-credentials.py | companieshouse/chips-tuxedo-proxy-domain | 3406f0197e8060648e5751eb453fb4601a82ee1c | [
"MIT"
] | null | null | null |
domain_name = os.environ.get("DOMAIN_NAME", "wldomain")
admin_name = os.environ.get("ADMIN_NAME", "wladmin")
admin_pass = os.environ.get("ADMIN_PASSWORD")
domain_path = '/apps/oracle/%s' % domain_name
domain_credential = os.environ.get("DOMAIN_CREDENTIAL", "domain_credential")
ldap_credential = os.environ.get("LDAP_CREDENTIAL", "ldap_credential")
print('domain_name : [%s]' % domain_name);
print('admin_name : [%s]' % admin_name);
print('domain_path : [%s]' % domain_path);
# Open the domain
# ======================
readDomain(domain_path)
# Set the domain credential
cd('/SecurityConfiguration/' + domain_name)
set('CredentialEncrypted', encrypt(domain_credential, domain_path))
# Set the Node Manager user name and password
set('NodeManagerUsername', 'weblogic')
set('NodeManagerPasswordEncrypted', admin_pass)
# Set the Embedded LDAP server credential
cd('/EmbeddedLDAP/' + domain_name)
set('CredentialEncrypted', encrypt(ldap_credential, domain_path))
# Write Domain
# ============
updateDomain()
closeDomain()
# Exit WLST
# =========
exit()
| 28.810811 | 77 | 0.718574 |
domain_name = os.environ.get("DOMAIN_NAME", "wldomain")
admin_name = os.environ.get("ADMIN_NAME", "wladmin")
admin_pass = os.environ.get("ADMIN_PASSWORD")
domain_path = '/apps/oracle/%s' % domain_name
domain_credential = os.environ.get("DOMAIN_CREDENTIAL", "domain_credential")
ldap_credential = os.environ.get("LDAP_CREDENTIAL", "ldap_credential")
print('domain_name : [%s]' % domain_name);
print('admin_name : [%s]' % admin_name);
print('domain_path : [%s]' % domain_path);
readDomain(domain_path)
cd('/SecurityConfiguration/' + domain_name)
set('CredentialEncrypted', encrypt(domain_credential, domain_path))
set('NodeManagerUsername', 'weblogic')
set('NodeManagerPasswordEncrypted', admin_pass)
cd('/EmbeddedLDAP/' + domain_name)
set('CredentialEncrypted', encrypt(ldap_credential, domain_path))
updateDomain()
closeDomain()
exit()
| true | true |
1c2f512c329b877cdf63760a5622029c361a14bf | 3,397 | py | Python | views.py | audacious-software/Simple-Messaging-Django | 7cdcd700ce828abe9e2bb30c467d510315f2bbd3 | [
"Apache-2.0"
] | null | null | null | views.py | audacious-software/Simple-Messaging-Django | 7cdcd700ce828abe9e2bb30c467d510315f2bbd3 | [
"Apache-2.0"
] | 4 | 2020-06-20T15:42:13.000Z | 2022-02-23T21:28:35.000Z | views.py | audacious-software/Simple-Messaging-Django | 7cdcd700ce828abe9e2bb30c467d510315f2bbd3 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=no-member, line-too-long
import importlib
import json
import arrow
import phonenumbers
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.core.management import call_command
from django.http import Http404, HttpResponse
from django.shortcuts import render
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from .models import IncomingMessage, OutgoingMessage
@csrf_exempt
def incoming_message_request(request):
for app in settings.INSTALLED_APPS:
try:
response_module = importlib.import_module('.simple_messaging_api', package=app)
return response_module.process_incoming_request(request)
except ImportError:
pass
except AttributeError:
pass
raise Http404("No module found to process incoming message.")
@staff_member_required
def simple_messaging_ui(request):
return render(request, 'simple_messaging_ui.html')
@staff_member_required
def simple_messaging_messages_json(request):
messages = []
if request.method == 'POST':
phone = request.POST.get('phone', '')
since = float(request.POST.get('since', '0'))
start_time = arrow.get(since).datetime
parsed = phonenumbers.parse(phone, settings.PHONE_REGION)
destination = phonenumbers.format_number(parsed, phonenumbers.PhoneNumberFormat.E164)
for message in IncomingMessage.objects.filter(receive_date__gte=start_time):
if message.current_sender() == destination:
messages.append({
'direction': 'from-user',
'sender': destination,
'message': message.current_message(),
'timestamp': arrow.get(message.receive_date).float_timestamp
})
for message in OutgoingMessage.objects.filter(sent_date__gte=start_time):
if message.current_destination() == destination:
messages.append({
'direction': 'from-system',
'recipient': destination,
'message': message.current_message(),
'timestamp': arrow.get(message.sent_date).float_timestamp
})
return HttpResponse(json.dumps(messages, indent=2), content_type='application/json', status=200)
@staff_member_required
def simple_messaging_send_json(request):
result = {
'success': False
}
if request.method == 'POST':
phone = request.POST.get('phone', '')
message = request.POST.get('message', '')
parsed = phonenumbers.parse(phone, settings.PHONE_REGION)
destination = phonenumbers.format_number(parsed, phonenumbers.PhoneNumberFormat.E164)
outgoing = OutgoingMessage.objects.create(destination=destination, send_date=timezone.now(), message=message)
outgoing.encrypt_message()
outgoing.encrypt_destination()
call_command('simple_messaging_send_pending_messages')
outgoing = OutgoingMessage.objects.get(pk=outgoing.pk)
if outgoing.errored is False:
result['success'] = True
else:
result['error'] = 'Unable to send message, please investigate.'
return HttpResponse(json.dumps(result, indent=2), content_type='application/json', status=200)
| 34.663265 | 117 | 0.678246 |
import importlib
import json
import arrow
import phonenumbers
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.core.management import call_command
from django.http import Http404, HttpResponse
from django.shortcuts import render
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from .models import IncomingMessage, OutgoingMessage
@csrf_exempt
def incoming_message_request(request):
for app in settings.INSTALLED_APPS:
try:
response_module = importlib.import_module('.simple_messaging_api', package=app)
return response_module.process_incoming_request(request)
except ImportError:
pass
except AttributeError:
pass
raise Http404("No module found to process incoming message.")
@staff_member_required
def simple_messaging_ui(request):
return render(request, 'simple_messaging_ui.html')
@staff_member_required
def simple_messaging_messages_json(request):
messages = []
if request.method == 'POST':
phone = request.POST.get('phone', '')
since = float(request.POST.get('since', '0'))
start_time = arrow.get(since).datetime
parsed = phonenumbers.parse(phone, settings.PHONE_REGION)
destination = phonenumbers.format_number(parsed, phonenumbers.PhoneNumberFormat.E164)
for message in IncomingMessage.objects.filter(receive_date__gte=start_time):
if message.current_sender() == destination:
messages.append({
'direction': 'from-user',
'sender': destination,
'message': message.current_message(),
'timestamp': arrow.get(message.receive_date).float_timestamp
})
for message in OutgoingMessage.objects.filter(sent_date__gte=start_time):
if message.current_destination() == destination:
messages.append({
'direction': 'from-system',
'recipient': destination,
'message': message.current_message(),
'timestamp': arrow.get(message.sent_date).float_timestamp
})
return HttpResponse(json.dumps(messages, indent=2), content_type='application/json', status=200)
@staff_member_required
def simple_messaging_send_json(request):
result = {
'success': False
}
if request.method == 'POST':
phone = request.POST.get('phone', '')
message = request.POST.get('message', '')
parsed = phonenumbers.parse(phone, settings.PHONE_REGION)
destination = phonenumbers.format_number(parsed, phonenumbers.PhoneNumberFormat.E164)
outgoing = OutgoingMessage.objects.create(destination=destination, send_date=timezone.now(), message=message)
outgoing.encrypt_message()
outgoing.encrypt_destination()
call_command('simple_messaging_send_pending_messages')
outgoing = OutgoingMessage.objects.get(pk=outgoing.pk)
if outgoing.errored is False:
result['success'] = True
else:
result['error'] = 'Unable to send message, please investigate.'
return HttpResponse(json.dumps(result, indent=2), content_type='application/json', status=200)
| true | true |
1c2f51e30a52edf948c881404405939a9de755d9 | 2,180 | py | Python | samples/server/petstore/python-fastapi/src/openapi_server/security_api.py | pgadura/openapi-generator | 3c866fb4a34e86d0ea2fef401a30206d7452bd2a | [
"Apache-2.0"
] | 1 | 2021-06-01T18:55:58.000Z | 2021-06-01T18:55:58.000Z | samples/server/petstore/python-fastapi/src/openapi_server/security_api.py | pgadura/openapi-generator | 3c866fb4a34e86d0ea2fef401a30206d7452bd2a | [
"Apache-2.0"
] | null | null | null | samples/server/petstore/python-fastapi/src/openapi_server/security_api.py | pgadura/openapi-generator | 3c866fb4a34e86d0ea2fef401a30206d7452bd2a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from typing import List
from fastapi import APIRouter, Depends, Response, Security, status
from fastapi.openapi.models import OAuthFlowImplicit, OAuthFlows
from fastapi.security import (
HTTPAuthorizationCredentials,
HTTPBasic,
HTTPBasicCredentials,
HTTPBearer,
OAuth2,
OAuth2AuthorizationCodeBearer,
OAuth2PasswordBearer,
SecurityScopes,
)
from fastapi.security.api_key import APIKey, APIKeyCookie, APIKeyHeader, APIKeyQuery
from openapi_server.models.extra_models import TokenModel
def get_token_api_key(
token_api_key_header: str = Security(
APIKeyHeader(name="api_key", auto_error=False)
),
) -> TokenModel:
"""
Check and retrieve authentication information from api_key.
:param token_api_key_header API key provided by Authorization[api_key] header
:type token_api_key_header: str
:return: Information attached to provided api_key or None if api_key is invalid or does not allow access to called API
:rtype: TokenModel | None
"""
...
oauth2_implicit = OAuth2(
flows=OAuthFlows(
implicit=OAuthFlowImplicit(
authorizationUrl="http://petstore.swagger.io/api/oauth/dialog",
scopes={
"write:pets": "modify pets in your account",
"read:pets": "read your pets",
}
)
)
)
def get_token_petstore_auth(
security_scopes: SecurityScopes, token: str = Depends(oauth2_implicit)
) -> TokenModel:
"""
Validate and decode token.
:param token Token provided by Authorization header
:type token: str
:return: Decoded token information or None if token is invalid
:rtype: TokenModel | None
"""
...
def validate_scope_petstore_auth(
required_scopes: SecurityScopes, token_scopes: List[str]
) -> bool:
"""
Validate required scopes are included in token scope
:param required_scopes Required scope to access called API
:type required_scopes: List[str]
:param token_scopes Scope present in token
:type token_scopes: List[str]
:return: True if access to called API is allowed
:rtype: bool
"""
return False
| 25.952381 | 122 | 0.698624 |
from typing import List
from fastapi import APIRouter, Depends, Response, Security, status
from fastapi.openapi.models import OAuthFlowImplicit, OAuthFlows
from fastapi.security import (
HTTPAuthorizationCredentials,
HTTPBasic,
HTTPBasicCredentials,
HTTPBearer,
OAuth2,
OAuth2AuthorizationCodeBearer,
OAuth2PasswordBearer,
SecurityScopes,
)
from fastapi.security.api_key import APIKey, APIKeyCookie, APIKeyHeader, APIKeyQuery
from openapi_server.models.extra_models import TokenModel
def get_token_api_key(
token_api_key_header: str = Security(
APIKeyHeader(name="api_key", auto_error=False)
),
) -> TokenModel:
...
oauth2_implicit = OAuth2(
flows=OAuthFlows(
implicit=OAuthFlowImplicit(
authorizationUrl="http://petstore.swagger.io/api/oauth/dialog",
scopes={
"write:pets": "modify pets in your account",
"read:pets": "read your pets",
}
)
)
)
def get_token_petstore_auth(
security_scopes: SecurityScopes, token: str = Depends(oauth2_implicit)
) -> TokenModel:
...
def validate_scope_petstore_auth(
required_scopes: SecurityScopes, token_scopes: List[str]
) -> bool:
return False
| true | true |
1c2f52c6038e6acd197d60dd2e1f55aea0bdf4e4 | 7,922 | py | Python | homeassistant/components/zha/fan.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/zha/fan.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/zha/fan.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Fans on Zigbee Home Automation networks."""
from __future__ import annotations
from abc import abstractmethod
import functools
import math
from zigpy.exceptions import ZigbeeException
from zigpy.zcl.clusters import hvac
from homeassistant.components.fan import (
ATTR_PERCENTAGE,
ATTR_PRESET_MODE,
FanEntity,
FanEntityFeature,
NotValidPresetModeError,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_UNAVAILABLE, Platform
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from .core import discovery
from .core.const import CHANNEL_FAN, DATA_ZHA, SIGNAL_ADD_ENTITIES, SIGNAL_ATTR_UPDATED
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity, ZhaGroupEntity
# Additional speeds in zigbee's ZCL
# Spec is unclear as to what this value means. On King Of Fans HBUniversal
# receiver, this means Very High.
PRESET_MODE_ON = "on"
# The fan speed is self-regulated
PRESET_MODE_AUTO = "auto"
# When the heated/cooled space is occupied, the fan is always on
PRESET_MODE_SMART = "smart"
SPEED_RANGE = (1, 3) # off is not included
PRESET_MODES_TO_NAME = {4: PRESET_MODE_ON, 5: PRESET_MODE_AUTO, 6: PRESET_MODE_SMART}
NAME_TO_PRESET_MODE = {v: k for k, v in PRESET_MODES_TO_NAME.items()}
PRESET_MODES = list(NAME_TO_PRESET_MODE)
DEFAULT_ON_PERCENTAGE = 50
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, Platform.FAN)
GROUP_MATCH = functools.partial(ZHA_ENTITIES.group_match, Platform.FAN)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Zigbee Home Automation fan from config entry."""
entities_to_create = hass.data[DATA_ZHA][Platform.FAN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities,
async_add_entities,
entities_to_create,
),
)
config_entry.async_on_unload(unsub)
class BaseFan(FanEntity):
"""Base representation of a ZHA fan."""
_attr_supported_features = FanEntityFeature.SET_SPEED
@property
def preset_modes(self) -> list[str]:
"""Return the available preset modes."""
return PRESET_MODES
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return int_states_in_range(SPEED_RANGE)
async def async_turn_on(self, percentage=None, preset_mode=None, **kwargs) -> None:
"""Turn the entity on."""
if percentage is None:
percentage = DEFAULT_ON_PERCENTAGE
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
await self.async_set_percentage(0)
async def async_set_percentage(self, percentage: int | None) -> None:
"""Set the speed percenage of the fan."""
fan_mode = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
await self._async_set_fan_mode(fan_mode)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode for the fan."""
if preset_mode not in self.preset_modes:
raise NotValidPresetModeError(
f"The preset_mode {preset_mode} is not a valid preset_mode: {self.preset_modes}"
)
await self._async_set_fan_mode(NAME_TO_PRESET_MODE[preset_mode])
@abstractmethod
async def _async_set_fan_mode(self, fan_mode: int) -> None:
"""Set the fan mode for the fan."""
@callback
def async_set_state(self, attr_id, attr_name, value):
"""Handle state update from channel."""
@STRICT_MATCH(channel_names=CHANNEL_FAN)
class ZhaFan(BaseFan, ZhaEntity):
"""Representation of a ZHA fan."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Init this sensor."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._fan_channel = self.cluster_channels.get(CHANNEL_FAN)
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._fan_channel, SIGNAL_ATTR_UPDATED, self.async_set_state
)
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if (
self._fan_channel.fan_mode is None
or self._fan_channel.fan_mode > SPEED_RANGE[1]
):
return None
if self._fan_channel.fan_mode == 0:
return 0
return ranged_value_to_percentage(SPEED_RANGE, self._fan_channel.fan_mode)
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
return PRESET_MODES_TO_NAME.get(self._fan_channel.fan_mode)
@callback
def async_set_state(self, attr_id, attr_name, value):
"""Handle state update from channel."""
self.async_write_ha_state()
async def _async_set_fan_mode(self, fan_mode: int) -> None:
"""Set the fan mode for the fan."""
await self._fan_channel.async_set_speed(fan_mode)
self.async_set_state(0, "fan_mode", fan_mode)
@GROUP_MATCH()
class FanGroup(BaseFan, ZhaGroupEntity):
"""Representation of a fan group."""
def __init__(
self, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> None:
"""Initialize a fan group."""
super().__init__(entity_ids, unique_id, group_id, zha_device, **kwargs)
self._available: bool = False
group = self.zha_device.gateway.get_group(self._group_id)
self._fan_channel = group.endpoint[hvac.Fan.cluster_id]
self._percentage = None
self._preset_mode = None
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
return self._percentage
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
return self._preset_mode
async def _async_set_fan_mode(self, fan_mode: int) -> None:
"""Set the fan mode for the group."""
try:
await self._fan_channel.write_attributes({"fan_mode": fan_mode})
except ZigbeeException as ex:
self.error("Could not set fan mode: %s", ex)
self.async_set_state(0, "fan_mode", fan_mode)
async def async_update(self):
"""Attempt to retrieve on off state from the fan."""
all_states = [self.hass.states.get(x) for x in self._entity_ids]
states: list[State] = list(filter(None, all_states))
percentage_states: list[State] = [
state for state in states if state.attributes.get(ATTR_PERCENTAGE)
]
preset_mode_states: list[State] = [
state for state in states if state.attributes.get(ATTR_PRESET_MODE)
]
self._available = any(state.state != STATE_UNAVAILABLE for state in states)
if percentage_states:
self._percentage = percentage_states[0].attributes[ATTR_PERCENTAGE]
self._preset_mode = None
elif preset_mode_states:
self._preset_mode = preset_mode_states[0].attributes[ATTR_PRESET_MODE]
self._percentage = None
else:
self._percentage = None
self._preset_mode = None
async def async_added_to_hass(self) -> None:
"""Run when about to be added to hass."""
await self.async_update()
await super().async_added_to_hass()
| 35.208889 | 96 | 0.686317 | from __future__ import annotations
from abc import abstractmethod
import functools
import math
from zigpy.exceptions import ZigbeeException
from zigpy.zcl.clusters import hvac
from homeassistant.components.fan import (
ATTR_PERCENTAGE,
ATTR_PRESET_MODE,
FanEntity,
FanEntityFeature,
NotValidPresetModeError,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_UNAVAILABLE, Platform
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from .core import discovery
from .core.const import CHANNEL_FAN, DATA_ZHA, SIGNAL_ADD_ENTITIES, SIGNAL_ATTR_UPDATED
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity, ZhaGroupEntity
# Spec is unclear as to what this value means. On King Of Fans HBUniversal
# receiver, this means Very High.
PRESET_MODE_ON = "on"
# The fan speed is self-regulated
PRESET_MODE_AUTO = "auto"
# When the heated/cooled space is occupied, the fan is always on
PRESET_MODE_SMART = "smart"
SPEED_RANGE = (1, 3) # off is not included
PRESET_MODES_TO_NAME = {4: PRESET_MODE_ON, 5: PRESET_MODE_AUTO, 6: PRESET_MODE_SMART}
NAME_TO_PRESET_MODE = {v: k for k, v in PRESET_MODES_TO_NAME.items()}
PRESET_MODES = list(NAME_TO_PRESET_MODE)
DEFAULT_ON_PERCENTAGE = 50
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, Platform.FAN)
GROUP_MATCH = functools.partial(ZHA_ENTITIES.group_match, Platform.FAN)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
entities_to_create = hass.data[DATA_ZHA][Platform.FAN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities,
async_add_entities,
entities_to_create,
),
)
config_entry.async_on_unload(unsub)
class BaseFan(FanEntity):
_attr_supported_features = FanEntityFeature.SET_SPEED
@property
def preset_modes(self) -> list[str]:
return PRESET_MODES
@property
def speed_count(self) -> int:
return int_states_in_range(SPEED_RANGE)
async def async_turn_on(self, percentage=None, preset_mode=None, **kwargs) -> None:
if percentage is None:
percentage = DEFAULT_ON_PERCENTAGE
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs) -> None:
await self.async_set_percentage(0)
async def async_set_percentage(self, percentage: int | None) -> None:
fan_mode = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
await self._async_set_fan_mode(fan_mode)
async def async_set_preset_mode(self, preset_mode: str) -> None:
if preset_mode not in self.preset_modes:
raise NotValidPresetModeError(
f"The preset_mode {preset_mode} is not a valid preset_mode: {self.preset_modes}"
)
await self._async_set_fan_mode(NAME_TO_PRESET_MODE[preset_mode])
@abstractmethod
async def _async_set_fan_mode(self, fan_mode: int) -> None:
@callback
def async_set_state(self, attr_id, attr_name, value):
@STRICT_MATCH(channel_names=CHANNEL_FAN)
class ZhaFan(BaseFan, ZhaEntity):
def __init__(self, unique_id, zha_device, channels, **kwargs):
super().__init__(unique_id, zha_device, channels, **kwargs)
self._fan_channel = self.cluster_channels.get(CHANNEL_FAN)
async def async_added_to_hass(self):
await super().async_added_to_hass()
self.async_accept_signal(
self._fan_channel, SIGNAL_ATTR_UPDATED, self.async_set_state
)
@property
def percentage(self) -> int | None:
if (
self._fan_channel.fan_mode is None
or self._fan_channel.fan_mode > SPEED_RANGE[1]
):
return None
if self._fan_channel.fan_mode == 0:
return 0
return ranged_value_to_percentage(SPEED_RANGE, self._fan_channel.fan_mode)
@property
def preset_mode(self) -> str | None:
return PRESET_MODES_TO_NAME.get(self._fan_channel.fan_mode)
@callback
def async_set_state(self, attr_id, attr_name, value):
self.async_write_ha_state()
async def _async_set_fan_mode(self, fan_mode: int) -> None:
await self._fan_channel.async_set_speed(fan_mode)
self.async_set_state(0, "fan_mode", fan_mode)
@GROUP_MATCH()
class FanGroup(BaseFan, ZhaGroupEntity):
def __init__(
self, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> None:
super().__init__(entity_ids, unique_id, group_id, zha_device, **kwargs)
self._available: bool = False
group = self.zha_device.gateway.get_group(self._group_id)
self._fan_channel = group.endpoint[hvac.Fan.cluster_id]
self._percentage = None
self._preset_mode = None
@property
def percentage(self) -> int | None:
return self._percentage
@property
def preset_mode(self) -> str | None:
return self._preset_mode
async def _async_set_fan_mode(self, fan_mode: int) -> None:
try:
await self._fan_channel.write_attributes({"fan_mode": fan_mode})
except ZigbeeException as ex:
self.error("Could not set fan mode: %s", ex)
self.async_set_state(0, "fan_mode", fan_mode)
async def async_update(self):
all_states = [self.hass.states.get(x) for x in self._entity_ids]
states: list[State] = list(filter(None, all_states))
percentage_states: list[State] = [
state for state in states if state.attributes.get(ATTR_PERCENTAGE)
]
preset_mode_states: list[State] = [
state for state in states if state.attributes.get(ATTR_PRESET_MODE)
]
self._available = any(state.state != STATE_UNAVAILABLE for state in states)
if percentage_states:
self._percentage = percentage_states[0].attributes[ATTR_PERCENTAGE]
self._preset_mode = None
elif preset_mode_states:
self._preset_mode = preset_mode_states[0].attributes[ATTR_PRESET_MODE]
self._percentage = None
else:
self._percentage = None
self._preset_mode = None
async def async_added_to_hass(self) -> None:
await self.async_update()
await super().async_added_to_hass()
| true | true |
1c2f55a77d0082d3ba51ac40a1c53a3b253923d4 | 329 | py | Python | exercicios_python_brasil/estrutura_sequencial/16_calculadora_tintas.py | MartinaLima/Python | 94dee598bd799cfe8de4c6369cea84e97e5ed024 | [
"MIT"
] | null | null | null | exercicios_python_brasil/estrutura_sequencial/16_calculadora_tintas.py | MartinaLima/Python | 94dee598bd799cfe8de4c6369cea84e97e5ed024 | [
"MIT"
] | null | null | null | exercicios_python_brasil/estrutura_sequencial/16_calculadora_tintas.py | MartinaLima/Python | 94dee598bd799cfe8de4c6369cea84e97e5ed024 | [
"MIT"
] | null | null | null | print('\033[1m>>> CALCULADORA DE TINTAS - LOJA ABC <<<\033[m')
area = float(input('ÁREA A SER PINTADA (m²): '))
litros = (area/3)
lata = (litros/18)
preco = lata*80
print('-'*40)
print(f'QUANTIDADE NECESSÁRIA: {litros:.2f} litro(s)')
print(f'LATA (18 LITROS): {lata:.2f} lata(s).')
print(f'VALOR TOTAL: R$ {preco:.2f}')
| 32.9 | 63 | 0.6231 | print('\033[1m>>> CALCULADORA DE TINTAS - LOJA ABC <<<\033[m')
area = float(input('ÁREA A SER PINTADA (m²): '))
litros = (area/3)
lata = (litros/18)
preco = lata*80
print('-'*40)
print(f'QUANTIDADE NECESSÁRIA: {litros:.2f} litro(s)')
print(f'LATA (18 LITROS): {lata:.2f} lata(s).')
print(f'VALOR TOTAL: R$ {preco:.2f}')
| true | true |
1c2f566d1d53012897e74ed2df7f16d2a8991fad | 1,545 | py | Python | setup.py | joshuafuller/adsbxcot | 49718e0f004be040fb2712952464ccec4ba8e19a | [
"Apache-2.0"
] | 18 | 2020-11-13T11:54:23.000Z | 2022-03-23T15:25:10.000Z | setup.py | joshuafuller/adsbxcot | 49718e0f004be040fb2712952464ccec4ba8e19a | [
"Apache-2.0"
] | 9 | 2020-11-17T07:54:58.000Z | 2022-02-09T23:29:52.000Z | setup.py | joshuafuller/adsbxcot | 49718e0f004be040fb2712952464ccec4ba8e19a | [
"Apache-2.0"
] | 7 | 2021-05-06T22:00:39.000Z | 2022-01-31T06:25:01.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup for the ADS-B Exchange Cursor-on-Target Gateway.
Source:: https://github.com/ampledata/adsbxcot
"""
import os
import sys
import setuptools
__title__ = "adsbxcot"
__version__ = "1.5.0"
__author__ = "Greg Albrecht W2GMD <oss@undef.net>"
__copyright__ = "Copyright 2021 Orion Labs, Inc."
__license__ = "Apache License, Version 2.0"
def publish():
"""Function for publishing package to pypi."""
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist')
os.system('twine upload dist/*')
sys.exit()
publish()
setuptools.setup(
version=__version__,
name=__title__,
packages=[__title__],
package_dir={__title__: __title__},
url=f'https://github.com/ampledata/{__title__}',
description='ADS-B Exchange Cursor-on-Target Gateway.',
author='Greg Albrecht',
author_email='oss@undef.net',
package_data={'': ['LICENSE']},
license="Apache License, Version 2.0",
long_description=open('README.rst').read(),
zip_safe=False,
include_package_data=True,
install_requires=[
'pytak >= 3.0.0',
'aircot',
'requests',
'aiohttp',
],
classifiers=[
'Topic :: Communications :: Ham Radio',
'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License'
],
keywords=[
'ADS-B', 'ADSB', 'Cursor on Target', 'ATAK', 'TAK', 'CoT'
],
entry_points={'console_scripts': ['adsbxcot = adsbxcot.commands:cli']}
)
| 24.52381 | 74 | 0.631068 |
import os
import sys
import setuptools
__title__ = "adsbxcot"
__version__ = "1.5.0"
__author__ = "Greg Albrecht W2GMD <oss@undef.net>"
__copyright__ = "Copyright 2021 Orion Labs, Inc."
__license__ = "Apache License, Version 2.0"
def publish():
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist')
os.system('twine upload dist/*')
sys.exit()
publish()
setuptools.setup(
version=__version__,
name=__title__,
packages=[__title__],
package_dir={__title__: __title__},
url=f'https://github.com/ampledata/{__title__}',
description='ADS-B Exchange Cursor-on-Target Gateway.',
author='Greg Albrecht',
author_email='oss@undef.net',
package_data={'': ['LICENSE']},
license="Apache License, Version 2.0",
long_description=open('README.rst').read(),
zip_safe=False,
include_package_data=True,
install_requires=[
'pytak >= 3.0.0',
'aircot',
'requests',
'aiohttp',
],
classifiers=[
'Topic :: Communications :: Ham Radio',
'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License'
],
keywords=[
'ADS-B', 'ADSB', 'Cursor on Target', 'ATAK', 'TAK', 'CoT'
],
entry_points={'console_scripts': ['adsbxcot = adsbxcot.commands:cli']}
)
| true | true |
1c2f56ff0c6fe054d1c581114ff4f7162b4b35f5 | 5,236 | py | Python | ranksvm.py | jrplatin/NBA-RankSVM | 74bec6cf79bc4554e8a456450d45d892e034a425 | [
"BSD-4-Clause-UC"
] | 1 | 2020-06-23T15:57:30.000Z | 2020-06-23T15:57:30.000Z | ranksvm.py | jrplatin/NBA-RankSVM | 74bec6cf79bc4554e8a456450d45d892e034a425 | [
"BSD-4-Clause-UC"
] | null | null | null | ranksvm.py | jrplatin/NBA-RankSVM | 74bec6cf79bc4554e8a456450d45d892e034a425 | [
"BSD-4-Clause-UC"
] | null | null | null | from sklearn import svm
from itertools import permutations
import numpy as np
from operator import itemgetter
from itertools import combinations
import numpy as np
# Get all permutation pairs out of an array
def get_pairs(arr):
return permutations(arr, 2)
# Transform data to pairs, where label of (x1, x2) is rank(x1) - rank(x2)
def data_to_pairs(X, y):
X_pairs = []
y_pairs = []
pairs = get_pairs(np.arange(len(X)))
for _, (index1, index2) in enumerate(pairs):
name1 = X[index1][0]
name2 = X[index2][0]
X_pairs.append((X[index1][1:] + X[index2][1:]))
result = y[name1] - y[name2]
y_pairs.append(result)
return X_pairs, y_pairs
# Transform just X data into pairs
def get_X_dict(X):
X_dict_of_pairs = {}
pairs = get_pairs(np.arange(len(X)))
for _, (index1, index2) in enumerate(pairs):
X_dict_of_pairs[(X[index1][0], X[index2][0])] = (X[index1][1:] + X[index2][1:])
return X_dict_of_pairs
# Pairwise ranking SVM
class RankSVM(svm.LinearSVC):
# Fit training data based off pairwise comparisons
def fit(self, X, y):
X_new, y_new = [], []
for i in range(len(X)):
X_pairs, y_pairs = data_to_pairs(X[i], y[i])
X_new += X_pairs
y_new += y_pairs
super(RankSVM, self).fit(X_new, y_new)
return self
# Predict based off pairwise comparisons
def predict(self, X):
# Get all team names
team_names = [X[i][0] for i in range(len(X))]
# Setup dictionary of teams to 'score'
dict_of_teams = {team: 0 for team in team_names}
X_dict = get_X_dict(X)
# Get relative rankings based off comparisons
for (team1, team2) in X_dict.keys():
ls_in = []
ls_in.append(X_dict[(team1, team2)])
dict_of_teams[team1] += super(RankSVM, self).predict(ls_in)
# Determine the ranking of each team
rankings = {}
curr_rank = 1
for team, _ in sorted(dict_of_teams.items(), key=itemgetter(1)):
rankings[team] = curr_rank
curr_rank += 1
# Line up predictions with actuals
predictions = [rankings[team] for team in team_names]
return predictions
# Get the element missing from the subset of 3
def get_test_index(arr_3_indices, arr_2_indices):
l = list(set(arr_3_indices) - set(arr_2_indices))
return l[0]
# Calculate mean average precision of a ranking prediction
def mean_average_precision(actual, predicted):
# Initialize list of average precisions
average_precisions = []
# Calculate all average precisions
for i in range(1, len(actual) + 1):
# Make actual and predicted lists of size i
actual_i = actual[:i]
predicted_i = predicted[:i]
# Initialize score variables
relevant_count = 0.0
score = 0.0
# Calculate an average precisoin
for i, p in enumerate(predicted_i):
if p in actual_i[:i + 1]:
relevant_count += 1.0
score += relevant_count/(i + 1.0)
average_precisions.append(score / len(actual_i))
return np.mean(average_precisions)
# Helper method to the helper method
def run_3_fold_cv_single_fold(X_train, y_train, clf, X_test, y_test):
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
actual = [value for value in y_test.values()]
_map = mean_average_precision(actual, predicted)
return _map
# Helper method
def run_3_fold_cv_helper(X_subset, y_subset, clf):
# Initialize MAP array
_maps = []
# Initialize combinations of size 2
combinations_of_size_2 = combinations(np.arange(3), 2)
# Loop over size 2 combinations and actually call the method that gets the accuracies for that combination
for combination in combinations_of_size_2:
X_subset_of_2 = []
y_subset_of_2 = []
for _, j in enumerate(combination):
X_subset_of_2.append(X_subset[j])
y_subset_of_2.append(y_subset[j])
test_index = get_test_index([0, 1, 2], list(combination))
X_test = X_subset[test_index]
y_test = y_subset[test_index]
_map = run_3_fold_cv_single_fold(X_subset_of_2, y_subset_of_2, clf, X_test, y_test)
_maps.append(_map)
return np.mean(_maps)
# Run 3 CFV using MAP as the evaluation metric
def run_3_fold_cv(X, y, clf):
# Initialize array arr = [<keys>]
arr = X.keys()
# Initialize array combinations to be all 3-sized combinations of years
combinations_of_size_3 = combinations(arr, 3)
# Initialize mean_average_precision array
_maps = []
# Loop over size 3 combinations and run 3 fold cv
for combination in combinations_of_size_3:
X_subset = []
y_subset = []
for _, j in enumerate(combination):
X_subset.append(X[j])
y_subset.append(y[j])
_map = run_3_fold_cv_helper(X_subset, y_subset, clf)
_maps.append(_map)
return _maps | 31.926829 | 110 | 0.618029 | from sklearn import svm
from itertools import permutations
import numpy as np
from operator import itemgetter
from itertools import combinations
import numpy as np
def get_pairs(arr):
return permutations(arr, 2)
def data_to_pairs(X, y):
X_pairs = []
y_pairs = []
pairs = get_pairs(np.arange(len(X)))
for _, (index1, index2) in enumerate(pairs):
name1 = X[index1][0]
name2 = X[index2][0]
X_pairs.append((X[index1][1:] + X[index2][1:]))
result = y[name1] - y[name2]
y_pairs.append(result)
return X_pairs, y_pairs
def get_X_dict(X):
X_dict_of_pairs = {}
pairs = get_pairs(np.arange(len(X)))
for _, (index1, index2) in enumerate(pairs):
X_dict_of_pairs[(X[index1][0], X[index2][0])] = (X[index1][1:] + X[index2][1:])
return X_dict_of_pairs
class RankSVM(svm.LinearSVC):
def fit(self, X, y):
X_new, y_new = [], []
for i in range(len(X)):
X_pairs, y_pairs = data_to_pairs(X[i], y[i])
X_new += X_pairs
y_new += y_pairs
super(RankSVM, self).fit(X_new, y_new)
return self
def predict(self, X):
team_names = [X[i][0] for i in range(len(X))]
dict_of_teams = {team: 0 for team in team_names}
X_dict = get_X_dict(X)
for (team1, team2) in X_dict.keys():
ls_in = []
ls_in.append(X_dict[(team1, team2)])
dict_of_teams[team1] += super(RankSVM, self).predict(ls_in)
rankings = {}
curr_rank = 1
for team, _ in sorted(dict_of_teams.items(), key=itemgetter(1)):
rankings[team] = curr_rank
curr_rank += 1
predictions = [rankings[team] for team in team_names]
return predictions
def get_test_index(arr_3_indices, arr_2_indices):
l = list(set(arr_3_indices) - set(arr_2_indices))
return l[0]
def mean_average_precision(actual, predicted):
average_precisions = []
for i in range(1, len(actual) + 1):
actual_i = actual[:i]
predicted_i = predicted[:i]
relevant_count = 0.0
score = 0.0
for i, p in enumerate(predicted_i):
if p in actual_i[:i + 1]:
relevant_count += 1.0
score += relevant_count/(i + 1.0)
average_precisions.append(score / len(actual_i))
return np.mean(average_precisions)
def run_3_fold_cv_single_fold(X_train, y_train, clf, X_test, y_test):
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
actual = [value for value in y_test.values()]
_map = mean_average_precision(actual, predicted)
return _map
def run_3_fold_cv_helper(X_subset, y_subset, clf):
_maps = []
combinations_of_size_2 = combinations(np.arange(3), 2)
for combination in combinations_of_size_2:
X_subset_of_2 = []
y_subset_of_2 = []
for _, j in enumerate(combination):
X_subset_of_2.append(X_subset[j])
y_subset_of_2.append(y_subset[j])
test_index = get_test_index([0, 1, 2], list(combination))
X_test = X_subset[test_index]
y_test = y_subset[test_index]
_map = run_3_fold_cv_single_fold(X_subset_of_2, y_subset_of_2, clf, X_test, y_test)
_maps.append(_map)
return np.mean(_maps)
def run_3_fold_cv(X, y, clf):
arr = X.keys()
combinations_of_size_3 = combinations(arr, 3)
_maps = []
for combination in combinations_of_size_3:
X_subset = []
y_subset = []
for _, j in enumerate(combination):
X_subset.append(X[j])
y_subset.append(y[j])
_map = run_3_fold_cv_helper(X_subset, y_subset, clf)
_maps.append(_map)
return _maps | true | true |
1c2f572cc0fd7450967f7d9eb62c73d81cbd22a6 | 19,495 | py | Python | lib/modules/python/situational_awareness/host/osx/HijackScanner.py | terrorizer1980/Empire | 9259e5106986847d2bb770c4289c0c0f1adf2344 | [
"BSD-3-Clause"
] | 49 | 2015-09-02T15:20:09.000Z | 2022-03-05T18:18:23.000Z | lib/modules/python/situational_awareness/host/osx/HijackScanner.py | rmusser01/Empire | c1bdbd0fdafd5bf34760d5b158dfd0db2bb19556 | [
"BSD-3-Clause"
] | 1 | 2020-11-04T08:15:12.000Z | 2020-11-04T08:15:12.000Z | lib/modules/python/situational_awareness/host/osx/HijackScanner.py | InfinitelyFreedom/Empire | 3a922f60d92658fb716efb3be5a1c15074114766 | [
"BSD-3-Clause"
] | 24 | 2015-09-08T11:45:23.000Z | 2022-02-07T23:53:58.000Z | from builtins import object
class Module(object):
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Dylib Hijack Vulnerability Scanner',
# list of one or more authors for the module
'Author': ['@patrickwardle','@xorrior'],
# more verbose multi-line description of the module
'Description': ('This module can be used to identify applications vulnerable to dylib hijacking on a target system. This has been modified from the original to remove the dependancy for the macholib library.'),
'Software': '',
'Techniques': ['T1157'],
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
'Heavily adapted from @patrickwardle\'s script: https://github.com/synack/DylibHijack/blob/master/scan.py'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run the module on.',
'Required' : True,
'Value' : ''
},
'Path' : {
'Description' : 'Scan all binaries recursively, in a specific path.',
'Required' : False,
'Value' : ''
},
'LoadedProcesses' : {
'Description' : 'Scan only loaded process executables',
'Required' : True,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
scanPath = self.options['Path']['Value']
LoadedProcesses = self.options['LoadedProcesses']['Value']
script = """
from ctypes import *
def run():
import ctypes
import os
import sys
import shlex
import subprocess
import io
import struct
from datetime import datetime
#supported archs
SUPPORTED_ARCHITECTURES = ['i386', 'x86_64']
LC_REQ_DYLD = 0x80000000
LC_LOAD_WEAK_DYLIB = LC_REQ_DYLD | 0x18
LC_RPATH = (0x1c | LC_REQ_DYLD)
LC_REEXPORT_DYLIB = 0x1f | LC_REQ_DYLD
(
LC_SEGMENT, LC_SYMTAB, LC_SYMSEG, LC_THREAD, LC_UNIXTHREAD, LC_LOADFVMLIB,
LC_IDFVMLIB, LC_IDENT, LC_FVMFILE, LC_PREPAGE, LC_DYSYMTAB, LC_LOAD_DYLIB,
LC_ID_DYLIB, LC_LOAD_DYLINKER, LC_ID_DYLINKER, LC_PREBOUND_DYLIB,
LC_ROUTINES, LC_SUB_FRAMEWORK, LC_SUB_UMBRELLA, LC_SUB_CLIENT,
LC_SUB_LIBRARY, LC_TWOLEVEL_HINTS, LC_PREBIND_CKSUM
) = range(0x1, 0x18)
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
_CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_NAMES = {
-1: 'ANY',
1: 'VAX',
6: 'MC680x0',
7: 'i386',
_CPU_ARCH_ABI64 | 7: 'x86_64',
8: 'MIPS',
10: 'MC98000',
11: 'HPPA',
12: 'ARM',
13: 'MC88000',
14: 'SPARC',
15: 'i860',
16: 'Alpha',
18: 'PowerPC',
_CPU_ARCH_ABI64 | 18: 'PowerPC64',
}
#structs that we need
class mach_header(ctypes.Structure):
_fields_ = [
("magic", ctypes.c_uint),
("cputype", ctypes.c_uint),
("cpusubtype", ctypes.c_uint),
("filetype", ctypes.c_uint),
("ncmds", ctypes.c_uint),
("sizeofcmds", ctypes.c_uint),
("flags", ctypes.c_uint)
]
class mach_header_64(ctypes.Structure):
_fields_ = mach_header._fields_ + [('reserved',ctypes.c_uint)]
class load_command(ctypes.Structure):
_fields_ = [
("cmd", ctypes.c_uint),
("cmdsize", ctypes.c_uint)
]
#executable binary
MH_EXECUTE = 2
#dylib
MH_DYLIB = 6
#bundles
MH_BUNDLE = 8
LC_Header_Sz = 0x8
def isSupportedArchitecture(machoHandle):
#flag
headersz = 28
header64sz = 32
supported = False
header = ""
#header = mach_header.from_buffer_copy(machoHandle.read())
try:
magic = struct.unpack('<L',machoHandle.read(4))[0]
cputype = struct.unpack('<L',machoHandle.read(4))[0]
machoHandle.seek(0, io.SEEK_SET)
if CPU_TYPE_NAMES.get(cputype) == 'i386':
header = mach_header.from_buffer_copy(machoHandle.read(headersz))
supported = True
elif CPU_TYPE_NAMES.get(cputype) == 'x86_64':
header = mach_header_64.from_buffer_copy(machoHandle.read(header64sz))
supported = True
else:
header = None
except:
pass
return (supported, header)
def loadedBinaries():
#list of loaded bins
binaries = []
#exec lsof
lsof = subprocess.Popen('lsof /', shell=True, stdout=subprocess.PIPE)
#get outpu
output = lsof.stdout.read()
#close
lsof.stdout.close()
#wait
lsof.wait()
#parse/split output
# ->grab file name and check if its executable
for line in output.split('\\n'):
try:
#split on spaces up to 8th element
# ->this is then the file name (which can have spaces so grab rest/join)
binary = ' '.join(shlex.split(line)[8:])
#skip non-files (fifos etc....) or non executable files
if not os.path.isfile(binary) or not os.access(binary, os.X_OK):
#skip
continue
#save binary
binaries.append(binary)
except:
#ignore
pass
#filter out dup's
binaries = list(set(binaries))
return binaries
def installedBinaries(rootDirectory = None):
#all executable binaries
binaries = []
#init
if not rootDirectory:
rootDirectory = '/'
#recursively walk (starting at r00t)
for root, dirnames, filenames in os.walk(rootDirectory):
#check all files
for filename in filenames:
#make full
# ->use realpath to resolve symlinks
fullName = os.path.realpath(os.path.join(root, filename))
#skip non-files (fifos etc....)
if not os.path.isfile(fullName):
#skip
continue
#only check executable files
if os.access(fullName, os.X_OK) and (os.path.splitext(fullName)[-1] == '.dyblib' or os.path.splitext(fullName)[-1] == ''):
#save
binaries.append(fullName)
print("Finished with installed binaries\\n")
return binaries
def resolvePath(binaryPath, unresolvedPath):
#return var
# ->init to what was passed in, since might not be able to resolve
resolvedPath = unresolvedPath
#resolve '@loader_path'
if unresolvedPath.startswith('@loader_path'):
#resolve
resolvedPath = os.path.abspath(os.path.split(binaryPath)[0] + unresolvedPath.replace('@loader_path', ''))
#resolve '@executable_path'
elif unresolvedPath.startswith('@executable_path'):
#resolve
resolvedPath = os.path.abspath(os.path.split(binaryPath)[0] + unresolvedPath.replace('@executable_path', ''))
return resolvedPath
def parseBinaries(binaries):
#dictionary of parsed binaries
parsedBinaries = {}
#scan all binaries
for binary in binaries:
#wrap
try:
#try load it (as mach-o)
f = open(binary, 'rb')
if not f:
#skip
continue
except:
#skip
continue
#check if it's a supported (intel) architecture
# ->also returns the supported mach-O header
(isSupported, machoHeader) = isSupportedArchitecture(f)
if not isSupported:
#skip
continue
#skip binaries that aren't main executables, dylibs or bundles
if machoHeader.filetype not in [MH_EXECUTE, MH_DYLIB, MH_BUNDLE]:
#skip
continue
#dbg msg
#init dictionary for process
parsedBinaries[binary] = {'LC_RPATHs': [], 'LC_LOAD_DYLIBs' : [], 'LC_LOAD_WEAK_DYLIBs': [] }
#save type
parsedBinaries[binary]['type'] = machoHeader.filetype
#iterate over all load
# ->save LC_RPATHs, LC_LOAD_DYLIBs, and LC_LOAD_WEAK_DYLIBs
if CPU_TYPE_NAMES.get(machoHeader.cputype) == 'x86_64':
f.seek(32, io.SEEK_SET)
else:
f.seek(28, io.SEEK_SET)
for cmd in range(machoHeader.ncmds):
#handle LC_RPATH's
# ->resolve and save
#save offset to load commands
try:
lc = load_command.from_buffer_copy(f.read(LC_Header_Sz))
except Exception as e:
break #break out of the nested loop and continue with the parent loop
size = lc.cmdsize
if lc.cmd == LC_RPATH:
#grab rpath
pathoffset = struct.unpack('<L',f.read(4))[0]
f.seek(pathoffset - (LC_Header_Sz + 4), io.SEEK_CUR)
path = f.read(lc.cmdsize - pathoffset)
rPathDirectory = path.rstrip('\\0')
#always attempt to resolve '@executable_path' and '@loader_path'
rPathDirectory = resolvePath(binary, rPathDirectory)
#save
parsedBinaries[binary]['LC_RPATHs'].append(rPathDirectory)
#handle LC_LOAD_DYLIB
# ->save (as is)
elif lc.cmd == LC_LOAD_DYLIB:
#tuple, last member is path to import
pathoffset = struct.unpack('<L',f.read(4))[0]
#skip over version and compatibility
f.seek(pathoffset - (LC_Header_Sz + 4), io.SEEK_CUR)
path = f.read(size - pathoffset)
importedDylib = path.rstrip('\\0')
#save
parsedBinaries[binary]['LC_LOAD_DYLIBs'].append(importedDylib)
#handle for LC_LOAD_WEAK_DYLIB
# ->resolve (except for '@rpaths') and save
elif lc.cmd == LC_LOAD_WEAK_DYLIB:
#tuple, last member is path to import
pathoffset = struct.unpack('<L',f.read(4))[0]
#skip over version and compatibility
f.seek(pathoffset - (LC_Header_Sz + 4), io.SEEK_CUR)
path = f.read(size - pathoffset)
weakDylib = path.rstrip('\\0')
#always attempt to resolve '@executable_path' and '@loader_path'
weakDylib = resolvePath(binary, weakDylib)
#save
parsedBinaries[binary]['LC_LOAD_WEAK_DYLIBs'].append(weakDylib)
else:
f.seek(size - LC_Header_Sz, io.SEEK_CUR)
print("finished parsing load commands")
return parsedBinaries
def processBinaries(parsedBinaries):
#results
# ->list of dictionaries
vulnerableBinaries = {'rpathExes': [], 'weakBins': []}
#scan all parsed binaries
for key in parsedBinaries:
#grab binary entry
binary = parsedBinaries[key]
#STEP 1: check for vulnerable @rpath'd imports
# note: only do this for main executables, since dylibs/bundles can share @rpath search dirs w/ main app, etc
# which we can't reliably resolve (i.e. this depends on the runtime/loadtime env)
#check for primary @rpath'd import that doesn't exist
if binary['type']== MH_EXECUTE and len(binary['LC_RPATHs']):
#check all @rpath'd imports for the executable
# ->if there is one that isn't found in a primary LC_RPATH, the executable is vulnerable :)
for importedDylib in binary['LC_LOAD_DYLIBs']:
#skip non-@rpath'd imports
if not importedDylib.startswith('@rpath'):
#skip
continue
#chop off '@rpath'
importedDylib = importedDylib.replace('@rpath', '')
#check the first rpath directory (from LC_RPATHs)
# ->is the rpath'd import there!?
if not os.path.exists(binary['LC_RPATHs'][0] + importedDylib):
#not found
# ->means this binary is vulnerable!
vulnerableBinaries['rpathExes'].append({'binary': key, 'importedDylib': importedDylib, 'LC_RPATH': binary['LC_RPATHs'][0]})
#bail
break
#STEP 2: check for vulnerable weak imports
# can check all binary types...
#check binary
for weakDylib in binary['LC_LOAD_WEAK_DYLIBs']:
#got to resolve weak @rpath'd imports before checking if they exist
if weakDylib.startswith('@rpath'):
#skip @rpath imports in dylibs and bundles, since they can share @rpath search dirs w/ main app, etc
# which we can't reliably resolve (i.e. this depends on the runtime/loadtime env.)
if binary['type'] != MH_EXECUTE:
#skip
continue
#skip @rpath imports if binary doesn't have any LC_RPATHS
if not len(binary['LC_RPATHs']):
#skip
continue
#chop off '@rpath'
weakDylib = weakDylib.replace('@rpath', '')
#just need to check first LC_RPATH directory
if not os.path.exists(binary['LC_RPATHs'][0] + weakDylib):
#not found
# ->means this binary is vulnerable!
vulnerableBinaries['weakBins'].append({'binary': key, 'weakDylib': weakDylib, 'LC_RPATH': binary['LC_RPATHs'][0]})
#bail
break
#path doesn't need to be resolved
# ->check/save those that don't exist
elif not os.path.exists(weakDylib):
#not found
# ->means this binary is vulnerable!
vulnerableBinaries['weakBins'].append({'binary': key, 'weakBin': weakDylib})
#bail
break
return vulnerableBinaries
path = "%s"
ProcBinaries = "%s"
startTime = datetime.now()
if ProcBinaries.lower() == "true":
#get list of loaded binaries
binaries = loadedBinaries()
elif path :
#dbg msg
#get list of executable files
binaries = installedBinaries(path)
else:
#get list of executable files on the file-system
binaries = installedBinaries()
parsedBinaries = parseBinaries(binaries)
#process/scan em
vulnerableBinaries = processBinaries(parsedBinaries)
#display binaries that are vulnerable to rpath hijack
if len(vulnerableBinaries['rpathExes']):
#dbg msg
print('\\nfound %%d binaries vulnerable to multiple rpaths:' %% len(vulnerableBinaries['rpathExes']))
#iterate over all and print
for binary in vulnerableBinaries['rpathExes']:
#dbg msg
print('%%s has an rpath vulnerability: (%%s%%s)\\n' %% (binary['binary'], binary['LC_RPATH'],binary['importedDylib']))
#binary didn't have any
else:
#dbg msg
print('\\ndid not find any vulnerable to multiple rpaths')
#display binaries that are vulnerable to weak import hijack
if len(vulnerableBinaries['weakBins']):
#dbg msg
print('\\nfound %%d binaries vulnerable to weak dylibs:' %% len(vulnerableBinaries['weakBins']))
#iterate over all and print
for binary in vulnerableBinaries['weakBins']:
#dbg msg
print('%%s has weak import (%%s)\\n' %% (binary['binary'], binary))
#binary didn't have any
else:
#dbg msg
print('\\ndid not find any missing LC_LOAD_WEAK_DYLIBs')
#dbg msg
print("Scan completed in " + str(datetime.now() - startTime) + "\\n")
print("[+] To abuse an rpath vulnerability...\\n")
print("[+] Find the legitimate dylib: find / -name <dylibname>, and note the path\\n")
print("[+] Run the CreateHijacker module in /persistence/osx/. Set the DylibPath to the path of the legitimate dylib.\\n")
run()
""" % (scanPath, LoadedProcesses)
return script
| 31.596434 | 222 | 0.532136 | from builtins import object
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Dylib Hijack Vulnerability Scanner',
'Author': ['@patrickwardle','@xorrior'],
'Description': ('This module can be used to identify applications vulnerable to dylib hijacking on a target system. This has been modified from the original to remove the dependancy for the macholib library.'),
'Software': '',
'Techniques': ['T1157'],
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
'Heavily adapted from @patrickwardle\'s script: https://github.com/synack/DylibHijack/blob/master/scan.py'
]
}
self.options = {
'Agent' : {
'Description' : 'Agent to run the module on.',
'Required' : True,
'Value' : ''
},
'Path' : {
'Description' : 'Scan all binaries recursively, in a specific path.',
'Required' : False,
'Value' : ''
},
'LoadedProcesses' : {
'Description' : 'Scan only loaded process executables',
'Required' : True,
'Value' : 'False'
}
}
self.mainMenu = mainMenu
if params:
for param in params:
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
scanPath = self.options['Path']['Value']
LoadedProcesses = self.options['LoadedProcesses']['Value']
script = """
from ctypes import *
def run():
import ctypes
import os
import sys
import shlex
import subprocess
import io
import struct
from datetime import datetime
#supported archs
SUPPORTED_ARCHITECTURES = ['i386', 'x86_64']
LC_REQ_DYLD = 0x80000000
LC_LOAD_WEAK_DYLIB = LC_REQ_DYLD | 0x18
LC_RPATH = (0x1c | LC_REQ_DYLD)
LC_REEXPORT_DYLIB = 0x1f | LC_REQ_DYLD
(
LC_SEGMENT, LC_SYMTAB, LC_SYMSEG, LC_THREAD, LC_UNIXTHREAD, LC_LOADFVMLIB,
LC_IDFVMLIB, LC_IDENT, LC_FVMFILE, LC_PREPAGE, LC_DYSYMTAB, LC_LOAD_DYLIB,
LC_ID_DYLIB, LC_LOAD_DYLINKER, LC_ID_DYLINKER, LC_PREBOUND_DYLIB,
LC_ROUTINES, LC_SUB_FRAMEWORK, LC_SUB_UMBRELLA, LC_SUB_CLIENT,
LC_SUB_LIBRARY, LC_TWOLEVEL_HINTS, LC_PREBIND_CKSUM
) = range(0x1, 0x18)
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
_CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_NAMES = {
-1: 'ANY',
1: 'VAX',
6: 'MC680x0',
7: 'i386',
_CPU_ARCH_ABI64 | 7: 'x86_64',
8: 'MIPS',
10: 'MC98000',
11: 'HPPA',
12: 'ARM',
13: 'MC88000',
14: 'SPARC',
15: 'i860',
16: 'Alpha',
18: 'PowerPC',
_CPU_ARCH_ABI64 | 18: 'PowerPC64',
}
#structs that we need
class mach_header(ctypes.Structure):
_fields_ = [
("magic", ctypes.c_uint),
("cputype", ctypes.c_uint),
("cpusubtype", ctypes.c_uint),
("filetype", ctypes.c_uint),
("ncmds", ctypes.c_uint),
("sizeofcmds", ctypes.c_uint),
("flags", ctypes.c_uint)
]
class mach_header_64(ctypes.Structure):
_fields_ = mach_header._fields_ + [('reserved',ctypes.c_uint)]
class load_command(ctypes.Structure):
_fields_ = [
("cmd", ctypes.c_uint),
("cmdsize", ctypes.c_uint)
]
#executable binary
MH_EXECUTE = 2
#dylib
MH_DYLIB = 6
#bundles
MH_BUNDLE = 8
LC_Header_Sz = 0x8
def isSupportedArchitecture(machoHandle):
#flag
headersz = 28
header64sz = 32
supported = False
header = ""
#header = mach_header.from_buffer_copy(machoHandle.read())
try:
magic = struct.unpack('<L',machoHandle.read(4))[0]
cputype = struct.unpack('<L',machoHandle.read(4))[0]
machoHandle.seek(0, io.SEEK_SET)
if CPU_TYPE_NAMES.get(cputype) == 'i386':
header = mach_header.from_buffer_copy(machoHandle.read(headersz))
supported = True
elif CPU_TYPE_NAMES.get(cputype) == 'x86_64':
header = mach_header_64.from_buffer_copy(machoHandle.read(header64sz))
supported = True
else:
header = None
except:
pass
return (supported, header)
def loadedBinaries():
#list of loaded bins
binaries = []
#exec lsof
lsof = subprocess.Popen('lsof /', shell=True, stdout=subprocess.PIPE)
#get outpu
output = lsof.stdout.read()
#close
lsof.stdout.close()
#wait
lsof.wait()
#parse/split output
# ->grab file name and check if its executable
for line in output.split('\\n'):
try:
#split on spaces up to 8th element
# ->this is then the file name (which can have spaces so grab rest/join)
binary = ' '.join(shlex.split(line)[8:])
#skip non-files (fifos etc....) or non executable files
if not os.path.isfile(binary) or not os.access(binary, os.X_OK):
#skip
continue
#save binary
binaries.append(binary)
except:
#ignore
pass
#filter out dup's
binaries = list(set(binaries))
return binaries
def installedBinaries(rootDirectory = None):
#all executable binaries
binaries = []
#init
if not rootDirectory:
rootDirectory = '/'
#recursively walk (starting at r00t)
for root, dirnames, filenames in os.walk(rootDirectory):
#check all files
for filename in filenames:
#make full
# ->use realpath to resolve symlinks
fullName = os.path.realpath(os.path.join(root, filename))
#skip non-files (fifos etc....)
if not os.path.isfile(fullName):
#skip
continue
#only check executable files
if os.access(fullName, os.X_OK) and (os.path.splitext(fullName)[-1] == '.dyblib' or os.path.splitext(fullName)[-1] == ''):
#save
binaries.append(fullName)
print("Finished with installed binaries\\n")
return binaries
def resolvePath(binaryPath, unresolvedPath):
#return var
# ->init to what was passed in, since might not be able to resolve
resolvedPath = unresolvedPath
#resolve '@loader_path'
if unresolvedPath.startswith('@loader_path'):
#resolve
resolvedPath = os.path.abspath(os.path.split(binaryPath)[0] + unresolvedPath.replace('@loader_path', ''))
#resolve '@executable_path'
elif unresolvedPath.startswith('@executable_path'):
#resolve
resolvedPath = os.path.abspath(os.path.split(binaryPath)[0] + unresolvedPath.replace('@executable_path', ''))
return resolvedPath
def parseBinaries(binaries):
#dictionary of parsed binaries
parsedBinaries = {}
#scan all binaries
for binary in binaries:
#wrap
try:
#try load it (as mach-o)
f = open(binary, 'rb')
if not f:
#skip
continue
except:
#skip
continue
#check if it's a supported (intel) architecture
# ->also returns the supported mach-O header
(isSupported, machoHeader) = isSupportedArchitecture(f)
if not isSupported:
#skip
continue
#skip binaries that aren't main executables, dylibs or bundles
if machoHeader.filetype not in [MH_EXECUTE, MH_DYLIB, MH_BUNDLE]:
#skip
continue
#dbg msg
#init dictionary for process
parsedBinaries[binary] = {'LC_RPATHs': [], 'LC_LOAD_DYLIBs' : [], 'LC_LOAD_WEAK_DYLIBs': [] }
#save type
parsedBinaries[binary]['type'] = machoHeader.filetype
#iterate over all load
# ->save LC_RPATHs, LC_LOAD_DYLIBs, and LC_LOAD_WEAK_DYLIBs
if CPU_TYPE_NAMES.get(machoHeader.cputype) == 'x86_64':
f.seek(32, io.SEEK_SET)
else:
f.seek(28, io.SEEK_SET)
for cmd in range(machoHeader.ncmds):
#handle LC_RPATH's
# ->resolve and save
#save offset to load commands
try:
lc = load_command.from_buffer_copy(f.read(LC_Header_Sz))
except Exception as e:
break #break out of the nested loop and continue with the parent loop
size = lc.cmdsize
if lc.cmd == LC_RPATH:
#grab rpath
pathoffset = struct.unpack('<L',f.read(4))[0]
f.seek(pathoffset - (LC_Header_Sz + 4), io.SEEK_CUR)
path = f.read(lc.cmdsize - pathoffset)
rPathDirectory = path.rstrip('\\0')
#always attempt to resolve '@executable_path' and '@loader_path'
rPathDirectory = resolvePath(binary, rPathDirectory)
#save
parsedBinaries[binary]['LC_RPATHs'].append(rPathDirectory)
#handle LC_LOAD_DYLIB
# ->save (as is)
elif lc.cmd == LC_LOAD_DYLIB:
#tuple, last member is path to import
pathoffset = struct.unpack('<L',f.read(4))[0]
#skip over version and compatibility
f.seek(pathoffset - (LC_Header_Sz + 4), io.SEEK_CUR)
path = f.read(size - pathoffset)
importedDylib = path.rstrip('\\0')
#save
parsedBinaries[binary]['LC_LOAD_DYLIBs'].append(importedDylib)
#handle for LC_LOAD_WEAK_DYLIB
# ->resolve (except for '@rpaths') and save
elif lc.cmd == LC_LOAD_WEAK_DYLIB:
#tuple, last member is path to import
pathoffset = struct.unpack('<L',f.read(4))[0]
#skip over version and compatibility
f.seek(pathoffset - (LC_Header_Sz + 4), io.SEEK_CUR)
path = f.read(size - pathoffset)
weakDylib = path.rstrip('\\0')
#always attempt to resolve '@executable_path' and '@loader_path'
weakDylib = resolvePath(binary, weakDylib)
#save
parsedBinaries[binary]['LC_LOAD_WEAK_DYLIBs'].append(weakDylib)
else:
f.seek(size - LC_Header_Sz, io.SEEK_CUR)
print("finished parsing load commands")
return parsedBinaries
def processBinaries(parsedBinaries):
#results
# ->list of dictionaries
vulnerableBinaries = {'rpathExes': [], 'weakBins': []}
#scan all parsed binaries
for key in parsedBinaries:
#grab binary entry
binary = parsedBinaries[key]
#STEP 1: check for vulnerable @rpath'd imports
# note: only do this for main executables, since dylibs/bundles can share @rpath search dirs w/ main app, etc
# which we can't reliably resolve (i.e. this depends on the runtime/loadtime env)
#check for primary @rpath'd import that doesn't exist
if binary['type']== MH_EXECUTE and len(binary['LC_RPATHs']):
#check all @rpath'd imports for the executable
# ->if there is one that isn't found in a primary LC_RPATH, the executable is vulnerable :)
for importedDylib in binary['LC_LOAD_DYLIBs']:
#skip non-@rpath'd imports
if not importedDylib.startswith('@rpath'):
#skip
continue
#chop off '@rpath'
importedDylib = importedDylib.replace('@rpath', '')
#check the first rpath directory (from LC_RPATHs)
# ->is the rpath'd import there!?
if not os.path.exists(binary['LC_RPATHs'][0] + importedDylib):
#not found
# ->means this binary is vulnerable!
vulnerableBinaries['rpathExes'].append({'binary': key, 'importedDylib': importedDylib, 'LC_RPATH': binary['LC_RPATHs'][0]})
#bail
break
#STEP 2: check for vulnerable weak imports
# can check all binary types...
#check binary
for weakDylib in binary['LC_LOAD_WEAK_DYLIBs']:
#got to resolve weak @rpath'd imports before checking if they exist
if weakDylib.startswith('@rpath'):
#skip @rpath imports in dylibs and bundles, since they can share @rpath search dirs w/ main app, etc
# which we can't reliably resolve (i.e. this depends on the runtime/loadtime env.)
if binary['type'] != MH_EXECUTE:
#skip
continue
#skip @rpath imports if binary doesn't have any LC_RPATHS
if not len(binary['LC_RPATHs']):
#skip
continue
#chop off '@rpath'
weakDylib = weakDylib.replace('@rpath', '')
#just need to check first LC_RPATH directory
if not os.path.exists(binary['LC_RPATHs'][0] + weakDylib):
#not found
# ->means this binary is vulnerable!
vulnerableBinaries['weakBins'].append({'binary': key, 'weakDylib': weakDylib, 'LC_RPATH': binary['LC_RPATHs'][0]})
#bail
break
#path doesn't need to be resolved
# ->check/save those that don't exist
elif not os.path.exists(weakDylib):
#not found
# ->means this binary is vulnerable!
vulnerableBinaries['weakBins'].append({'binary': key, 'weakBin': weakDylib})
#bail
break
return vulnerableBinaries
path = "%s"
ProcBinaries = "%s"
startTime = datetime.now()
if ProcBinaries.lower() == "true":
#get list of loaded binaries
binaries = loadedBinaries()
elif path :
#dbg msg
#get list of executable files
binaries = installedBinaries(path)
else:
#get list of executable files on the file-system
binaries = installedBinaries()
parsedBinaries = parseBinaries(binaries)
#process/scan em
vulnerableBinaries = processBinaries(parsedBinaries)
#display binaries that are vulnerable to rpath hijack
if len(vulnerableBinaries['rpathExes']):
#dbg msg
print('\\nfound %%d binaries vulnerable to multiple rpaths:' %% len(vulnerableBinaries['rpathExes']))
#iterate over all and print
for binary in vulnerableBinaries['rpathExes']:
#dbg msg
print('%%s has an rpath vulnerability: (%%s%%s)\\n' %% (binary['binary'], binary['LC_RPATH'],binary['importedDylib']))
#binary didn't have any
else:
#dbg msg
print('\\ndid not find any vulnerable to multiple rpaths')
#display binaries that are vulnerable to weak import hijack
if len(vulnerableBinaries['weakBins']):
#dbg msg
print('\\nfound %%d binaries vulnerable to weak dylibs:' %% len(vulnerableBinaries['weakBins']))
#iterate over all and print
for binary in vulnerableBinaries['weakBins']:
#dbg msg
print('%%s has weak import (%%s)\\n' %% (binary['binary'], binary))
#binary didn't have any
else:
#dbg msg
print('\\ndid not find any missing LC_LOAD_WEAK_DYLIBs')
#dbg msg
print("Scan completed in " + str(datetime.now() - startTime) + "\\n")
print("[+] To abuse an rpath vulnerability...\\n")
print("[+] Find the legitimate dylib: find / -name <dylibname>, and note the path\\n")
print("[+] Run the CreateHijacker module in /persistence/osx/. Set the DylibPath to the path of the legitimate dylib.\\n")
run()
""" % (scanPath, LoadedProcesses)
return script
| true | true |
1c2f57cef9fb7678921c729910c67d1be947e38b | 756 | py | Python | django_semantic_ui/urls.py | valbertovc/django_semantic_ui | b4bd87983c1e78c442cef7168721225f6893c67a | [
"MIT"
] | 4 | 2016-05-06T20:52:42.000Z | 2018-11-18T08:29:33.000Z | django_semantic_ui/urls.py | valbertovc/django_semantic_ui | b4bd87983c1e78c442cef7168721225f6893c67a | [
"MIT"
] | null | null | null | django_semantic_ui/urls.py | valbertovc/django_semantic_ui | b4bd87983c1e78c442cef7168721225f6893c67a | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = (
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/password_reset/$', auth_views.password_reset, name='admin_password_reset'),
url(r'^admin/password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$', auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
) | 54 | 129 | 0.768519 | from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = (
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/password_reset/$', auth_views.password_reset, name='admin_password_reset'),
url(r'^admin/password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$', auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
) | true | true |
1c2f583b482610b2b4f3242d6c3c16518d744bec | 7,278 | py | Python | models.py | Chianugoogidi/deit | a286bfc817e9e285291ab8b2e9dff277d6447bda | [
"Apache-2.0"
] | null | null | null | models.py | Chianugoogidi/deit | a286bfc817e9e285291ab8b2e9dff277d6447bda | [
"Apache-2.0"
] | null | null | null | models.py | Chianugoogidi/deit | a286bfc817e9e285291ab8b2e9dff277d6447bda | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.nn as nn
from functools import partial
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
'deit_base_distilled_patch16_384', 'deit_base_distilled_patch16_32'
]
class DistilledVisionTransformer(VisionTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()
trunc_normal_(self.dist_token, std=.02)
trunc_normal_(self.pos_embed, std=.02)
self.head_dist.apply(self._init_weights)
def forward_features(self, x):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add the dist_token
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0], x[:, 1]
def forward(self, x):
x, x_dist = self.forward_features(x)
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.training:
return x, x_dist
else:
# during inference, return the average of both classifier predictions
return (x + x_dist) / 2
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_32(pretrained=False, **kwargs):
model = VisionTransformer(img_size=32,
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
| 38.919786 | 116 | 0.690849 |
import torch
import torch.nn as nn
from functools import partial
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
'deit_base_distilled_patch16_384', 'deit_base_distilled_patch16_32'
]
class DistilledVisionTransformer(VisionTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()
trunc_normal_(self.dist_token, std=.02)
trunc_normal_(self.pos_embed, std=.02)
self.head_dist.apply(self._init_weights)
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0], x[:, 1]
def forward(self, x):
x, x_dist = self.forward_features(x)
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.training:
return x, x_dist
else:
return (x + x_dist) / 2
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_32(pretrained=False, **kwargs):
model = VisionTransformer(img_size=32,
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
| true | true |
1c2f5972b1791976318abf04c19d13a3a96213ae | 5,802 | py | Python | BERT.py | china-zyy/NLP-Tutorials | 3216c5616770af11185a6d6d238cf1e8bda4edd6 | [
"MIT"
] | null | null | null | BERT.py | china-zyy/NLP-Tutorials | 3216c5616770af11185a6d6d238cf1e8bda4edd6 | [
"MIT"
] | 2 | 2021-08-25T16:13:10.000Z | 2022-02-10T02:27:17.000Z | BERT.py | hades12580/NLP-Tutorials | 670160b5a9344b240c90dbaf0e62de3120c6d9e5 | [
"MIT"
] | 1 | 2021-09-23T17:25:21.000Z | 2021-09-23T17:25:21.000Z | # [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/pdf/1810.04805.pdf)
import numpy as np
import tensorflow as tf
import utils
import time
from GPT import GPT
import os
import pickle
class BERT(GPT):
def __init__(self, model_dim, max_len, n_layer, n_head, n_vocab, lr, max_seg=3, drop_rate=0.1, padding_idx=0):
super().__init__(model_dim, max_len, n_layer, n_head, n_vocab, lr, max_seg, drop_rate, padding_idx)
# I think task emb is not necessary for pretraining,
# because the aim of all tasks is to train a universal sentence embedding
# the body encoder is the same across all tasks,
# and different output layer defines different task just like transfer learning.
# finetuning replaces output layer and leaves the body encoder unchanged.
# self.task_emb = keras.layers.Embedding(
# input_dim=n_task, output_dim=model_dim, # [n_task, dim]
# embeddings_initializer=tf.initializers.RandomNormal(0., 0.01),
# )
def step(self, seqs, segs, seqs_, loss_mask, nsp_labels):
with tf.GradientTape() as tape:
mlm_logits, nsp_logits = self.call(seqs, segs, training=True)
mlm_loss_batch = tf.boolean_mask(self.cross_entropy(seqs_, mlm_logits), loss_mask)
mlm_loss = tf.reduce_mean(mlm_loss_batch)
nsp_loss = tf.reduce_mean(self.cross_entropy(nsp_labels, nsp_logits))
loss = mlm_loss + 0.2 * nsp_loss
grads = tape.gradient(loss, self.trainable_variables)
self.opt.apply_gradients(zip(grads, self.trainable_variables))
return loss, mlm_logits
def mask(self, seqs):
mask = tf.cast(tf.math.equal(seqs, self.padding_idx), tf.float32)
return mask[:, tf.newaxis, tf.newaxis, :] # [n, 1, 1, step]
def _get_loss_mask(len_arange, seq, pad_id):
rand_id = np.random.choice(len_arange, size=max(2, int(MASK_RATE * len(len_arange))), replace=False)
loss_mask = np.full_like(seq, pad_id, dtype=np.bool)
loss_mask[rand_id] = True
return loss_mask[None, :], rand_id
def do_mask(seq, len_arange, pad_id, mask_id):
loss_mask, rand_id = _get_loss_mask(len_arange, seq, pad_id)
seq[rand_id] = mask_id
return loss_mask
def do_replace(seq, len_arange, pad_id, word_ids):
loss_mask, rand_id = _get_loss_mask(len_arange, seq, pad_id)
seq[rand_id] = np.random.choice(word_ids, size=len(rand_id))
return loss_mask
def do_nothing(seq, len_arange, pad_id):
loss_mask, _ = _get_loss_mask(len_arange, seq, pad_id)
return loss_mask
def random_mask_or_replace(data, arange, batch_size):
seqs, segs, xlen, nsp_labels = data.sample(batch_size)
seqs_ = seqs.copy()
p = np.random.random()
if p < 0.7:
# mask
loss_mask = np.concatenate(
[do_mask(
seqs[i],
np.concatenate((arange[:xlen[i, 0]], arange[xlen[i, 0] + 1:xlen[i].sum() + 1])),
data.pad_id,
data.v2i["<MASK>"]) for i in range(len(seqs))], axis=0)
elif p < 0.85:
# do nothing
loss_mask = np.concatenate(
[do_nothing(
seqs[i],
np.concatenate((arange[:xlen[i, 0]], arange[xlen[i, 0] + 1:xlen[i].sum() + 1])),
data.pad_id) for i in range(len(seqs))], axis=0)
else:
# replace
loss_mask = np.concatenate(
[do_replace(
seqs[i],
np.concatenate((arange[:xlen[i, 0]], arange[xlen[i, 0] + 1:xlen[i].sum() + 1])),
data.pad_id,
data.word_ids) for i in range(len(seqs))], axis=0)
return seqs, segs, seqs_, loss_mask, xlen, nsp_labels
def train(model, data, step=10000, name="bert"):
t0 = time.time()
arange = np.arange(0, data.max_len)
for t in range(step):
seqs, segs, seqs_, loss_mask, xlen, nsp_labels = random_mask_or_replace(data, arange, 16)
loss, pred = model.step(seqs, segs, seqs_, loss_mask, nsp_labels)
if t % 100 == 0:
pred = pred[0].numpy().argmax(axis=1)
t1 = time.time()
print(
"\n\nstep: ", t,
"| time: %.2f" % (t1 - t0),
"| loss: %.3f" % loss.numpy(),
"\n| tgt: ", " ".join([data.i2v[i] for i in seqs[0][:xlen[0].sum()+1]]),
"\n| prd: ", " ".join([data.i2v[i] for i in pred[:xlen[0].sum()+1]]),
"\n| tgt word: ", [data.i2v[i] for i in seqs_[0]*loss_mask[0] if i != data.v2i["<PAD>"]],
"\n| prd word: ", [data.i2v[i] for i in pred*loss_mask[0] if i != data.v2i["<PAD>"]],
)
t0 = t1
os.makedirs("./visual/models/%s" % name, exist_ok=True)
model.save_weights("./visual/models/%s/model.ckpt" % name)
def export_attention(model, data, name="bert"):
model.load_weights("./visual/models/%s/model.ckpt" % name)
# save attention matrix for visualization
seqs, segs, xlen, nsp_labels = data.sample(32)
model.call(seqs, segs, False)
data = {"src": [[data.i2v[i] for i in seqs[j]] for j in range(len(seqs))], "attentions": model.attentions}
with open("./visual/tmp/%s_attention_matrix.pkl" % name, "wb") as f:
pickle.dump(data, f)
if __name__ == "__main__":
utils.set_soft_gpu(True)
MODEL_DIM = 256
N_LAYER = 4
LEARNING_RATE = 1e-4
MASK_RATE = 0.15
d = utils.MRPCData("./MRPC", 2000)
print("num word: ", d.num_word)
m = BERT(
model_dim=MODEL_DIM, max_len=d.max_len, n_layer=N_LAYER, n_head=4, n_vocab=d.num_word,
lr=LEARNING_RATE, max_seg=d.num_seg, drop_rate=0.2, padding_idx=d.v2i["<PAD>"])
train(m, d, step=10000, name="bert")
export_attention(m, d, "bert")
| 40.291667 | 122 | 0.610479 |
import numpy as np
import tensorflow as tf
import utils
import time
from GPT import GPT
import os
import pickle
class BERT(GPT):
def __init__(self, model_dim, max_len, n_layer, n_head, n_vocab, lr, max_seg=3, drop_rate=0.1, padding_idx=0):
super().__init__(model_dim, max_len, n_layer, n_head, n_vocab, lr, max_seg, drop_rate, padding_idx)
def step(self, seqs, segs, seqs_, loss_mask, nsp_labels):
with tf.GradientTape() as tape:
mlm_logits, nsp_logits = self.call(seqs, segs, training=True)
mlm_loss_batch = tf.boolean_mask(self.cross_entropy(seqs_, mlm_logits), loss_mask)
mlm_loss = tf.reduce_mean(mlm_loss_batch)
nsp_loss = tf.reduce_mean(self.cross_entropy(nsp_labels, nsp_logits))
loss = mlm_loss + 0.2 * nsp_loss
grads = tape.gradient(loss, self.trainable_variables)
self.opt.apply_gradients(zip(grads, self.trainable_variables))
return loss, mlm_logits
def mask(self, seqs):
mask = tf.cast(tf.math.equal(seqs, self.padding_idx), tf.float32)
return mask[:, tf.newaxis, tf.newaxis, :]
def _get_loss_mask(len_arange, seq, pad_id):
rand_id = np.random.choice(len_arange, size=max(2, int(MASK_RATE * len(len_arange))), replace=False)
loss_mask = np.full_like(seq, pad_id, dtype=np.bool)
loss_mask[rand_id] = True
return loss_mask[None, :], rand_id
def do_mask(seq, len_arange, pad_id, mask_id):
loss_mask, rand_id = _get_loss_mask(len_arange, seq, pad_id)
seq[rand_id] = mask_id
return loss_mask
def do_replace(seq, len_arange, pad_id, word_ids):
loss_mask, rand_id = _get_loss_mask(len_arange, seq, pad_id)
seq[rand_id] = np.random.choice(word_ids, size=len(rand_id))
return loss_mask
def do_nothing(seq, len_arange, pad_id):
loss_mask, _ = _get_loss_mask(len_arange, seq, pad_id)
return loss_mask
def random_mask_or_replace(data, arange, batch_size):
seqs, segs, xlen, nsp_labels = data.sample(batch_size)
seqs_ = seqs.copy()
p = np.random.random()
if p < 0.7:
loss_mask = np.concatenate(
[do_mask(
seqs[i],
np.concatenate((arange[:xlen[i, 0]], arange[xlen[i, 0] + 1:xlen[i].sum() + 1])),
data.pad_id,
data.v2i["<MASK>"]) for i in range(len(seqs))], axis=0)
elif p < 0.85:
loss_mask = np.concatenate(
[do_nothing(
seqs[i],
np.concatenate((arange[:xlen[i, 0]], arange[xlen[i, 0] + 1:xlen[i].sum() + 1])),
data.pad_id) for i in range(len(seqs))], axis=0)
else:
loss_mask = np.concatenate(
[do_replace(
seqs[i],
np.concatenate((arange[:xlen[i, 0]], arange[xlen[i, 0] + 1:xlen[i].sum() + 1])),
data.pad_id,
data.word_ids) for i in range(len(seqs))], axis=0)
return seqs, segs, seqs_, loss_mask, xlen, nsp_labels
def train(model, data, step=10000, name="bert"):
t0 = time.time()
arange = np.arange(0, data.max_len)
for t in range(step):
seqs, segs, seqs_, loss_mask, xlen, nsp_labels = random_mask_or_replace(data, arange, 16)
loss, pred = model.step(seqs, segs, seqs_, loss_mask, nsp_labels)
if t % 100 == 0:
pred = pred[0].numpy().argmax(axis=1)
t1 = time.time()
print(
"\n\nstep: ", t,
"| time: %.2f" % (t1 - t0),
"| loss: %.3f" % loss.numpy(),
"\n| tgt: ", " ".join([data.i2v[i] for i in seqs[0][:xlen[0].sum()+1]]),
"\n| prd: ", " ".join([data.i2v[i] for i in pred[:xlen[0].sum()+1]]),
"\n| tgt word: ", [data.i2v[i] for i in seqs_[0]*loss_mask[0] if i != data.v2i["<PAD>"]],
"\n| prd word: ", [data.i2v[i] for i in pred*loss_mask[0] if i != data.v2i["<PAD>"]],
)
t0 = t1
os.makedirs("./visual/models/%s" % name, exist_ok=True)
model.save_weights("./visual/models/%s/model.ckpt" % name)
def export_attention(model, data, name="bert"):
model.load_weights("./visual/models/%s/model.ckpt" % name)
seqs, segs, xlen, nsp_labels = data.sample(32)
model.call(seqs, segs, False)
data = {"src": [[data.i2v[i] for i in seqs[j]] for j in range(len(seqs))], "attentions": model.attentions}
with open("./visual/tmp/%s_attention_matrix.pkl" % name, "wb") as f:
pickle.dump(data, f)
if __name__ == "__main__":
utils.set_soft_gpu(True)
MODEL_DIM = 256
N_LAYER = 4
LEARNING_RATE = 1e-4
MASK_RATE = 0.15
d = utils.MRPCData("./MRPC", 2000)
print("num word: ", d.num_word)
m = BERT(
model_dim=MODEL_DIM, max_len=d.max_len, n_layer=N_LAYER, n_head=4, n_vocab=d.num_word,
lr=LEARNING_RATE, max_seg=d.num_seg, drop_rate=0.2, padding_idx=d.v2i["<PAD>"])
train(m, d, step=10000, name="bert")
export_attention(m, d, "bert")
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.