file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
win32spawn.py | import os
import threading
import Queue
# Windows import
import win32file
import win32pipe
import win32api
import win32con
import win32security
import win32process
import win32event
class Win32Spawn(object):
def __init__(self, cmd, shell=False):
self.queue = Queue.Queue()
self.is_terminated = False
self.wake_up_event = win32event.CreateEvent(None, 0, 0, None)
exec_dir = os.getcwd()
comspec = os.environ.get("COMSPEC", "cmd.exe")
cmd = comspec + ' /c ' + cmd
win32event.ResetEvent(self.wake_up_event)
currproc = win32api.GetCurrentProcess()
sa = win32security.SECURITY_ATTRIBUTES()
sa.bInheritHandle = 1
child_stdout_rd, child_stdout_wr = win32pipe.CreatePipe(sa, 0)
child_stdout_rd_dup = win32api.DuplicateHandle(currproc, child_stdout_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdout_rd)
child_stderr_rd, child_stderr_wr = win32pipe.CreatePipe(sa, 0)
child_stderr_rd_dup = win32api.DuplicateHandle(currproc, child_stderr_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stderr_rd)
child_stdin_rd, child_stdin_wr = win32pipe.CreatePipe(sa, 0)
child_stdin_wr_dup = win32api.DuplicateHandle(currproc, child_stdin_wr, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdin_wr)
startup_info = win32process.STARTUPINFO()
startup_info.hStdInput = child_stdin_rd
startup_info.hStdOutput = child_stdout_wr
startup_info.hStdError = child_stderr_wr
startup_info.dwFlags = win32process.STARTF_USESTDHANDLES
cr_flags = 0
cr_flags = win32process.CREATE_NEW_PROCESS_GROUP
env = os.environ.copy()
self.h_process, h_thread, dw_pid, dw_tid = win32process.CreateProcess(None, cmd, None, None, 1,
cr_flags, env, os.path.abspath(exec_dir),
startup_info)
win32api.CloseHandle(h_thread)
win32file.CloseHandle(child_stdin_rd)
win32file.CloseHandle(child_stdout_wr)
win32file.CloseHandle(child_stderr_wr)
self.__child_stdout = child_stdout_rd_dup
self.__child_stderr = child_stderr_rd_dup
self.__child_stdin = child_stdin_wr_dup
self.exit_code = -1
def close(self):
win32file.CloseHandle(self.__child_stdout)
win32file.CloseHandle(self.__child_stderr)
win32file.CloseHandle(self.__child_stdin)
win32api.CloseHandle(self.h_process)
win32api.CloseHandle(self.wake_up_event)
def kill_subprocess():
win32event.SetEvent(self.wake_up_event)
def sleep(secs):
win32event.ResetEvent(self.wake_up_event)
timeout = int(1000 * secs)
val = win32event.WaitForSingleObject(self.wake_up_event, timeout)
if val == win32event.WAIT_TIMEOUT:
return True
else:
# The wake_up_event must have been signalled
return False
def get(self, block=True, timeout=None):
return self.queue.get(block=block, timeout=timeout)
def | (self):
return self.queue.qsize()
def __wait_for_child(self):
# kick off threads to read from stdout and stderr of the child process
threading.Thread(target=self.__do_read, args=(self.__child_stdout, )).start()
threading.Thread(target=self.__do_read, args=(self.__child_stderr, )).start()
while True:
# block waiting for the process to finish or the interrupt to happen
handles = (self.wake_up_event, self.h_process)
val = win32event.WaitForMultipleObjects(handles, 0, win32event.INFINITE)
if val >= win32event.WAIT_OBJECT_0 and val < win32event.WAIT_OBJECT_0 + len(handles):
handle = handles[val - win32event.WAIT_OBJECT_0]
if handle == self.wake_up_event:
win32api.TerminateProcess(self.h_process, 1)
win32event.ResetEvent(self.wake_up_event)
return False
elif handle == self.h_process:
# the process has ended naturally
return True
else:
assert False, "Unknown handle fired"
else:
assert False, "Unexpected return from WaitForMultipleObjects"
# Wait for job to finish. Since this method blocks, it can to be called from another thread.
# If the application wants to kill the process, it should call kill_subprocess().
def wait(self):
if not self.__wait_for_child():
# it's been killed
result = False
else:
# normal termination
self.exit_code = win32process.GetExitCodeProcess(self.h_process)
result = self.exit_code == 0
self.close()
self.is_terminated = True
return result
# This method gets called on a worker thread to read from either a stderr
# or stdout thread from the child process.
def __do_read(self, handle):
bytesToRead = 1024
while 1:
try:
finished = 0
hr, data = win32file.ReadFile(handle, bytesToRead, None)
if data:
self.queue.put_nowait(data)
except win32api.error:
finished = 1
if finished:
return
def start_pipe(self):
def worker(pipe):
return pipe.wait()
thrd = threading.Thread(target=worker, args=(self, ))
thrd.start()
| qsize | identifier_name |
win32spawn.py | import os
import threading
import Queue
# Windows import
import win32file
import win32pipe
import win32api
import win32con
import win32security
import win32process
import win32event
class Win32Spawn(object):
def __init__(self, cmd, shell=False):
self.queue = Queue.Queue()
self.is_terminated = False
self.wake_up_event = win32event.CreateEvent(None, 0, 0, None)
exec_dir = os.getcwd()
comspec = os.environ.get("COMSPEC", "cmd.exe")
cmd = comspec + ' /c ' + cmd
win32event.ResetEvent(self.wake_up_event)
currproc = win32api.GetCurrentProcess()
sa = win32security.SECURITY_ATTRIBUTES()
sa.bInheritHandle = 1
child_stdout_rd, child_stdout_wr = win32pipe.CreatePipe(sa, 0)
child_stdout_rd_dup = win32api.DuplicateHandle(currproc, child_stdout_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdout_rd)
child_stderr_rd, child_stderr_wr = win32pipe.CreatePipe(sa, 0)
child_stderr_rd_dup = win32api.DuplicateHandle(currproc, child_stderr_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stderr_rd)
child_stdin_rd, child_stdin_wr = win32pipe.CreatePipe(sa, 0)
child_stdin_wr_dup = win32api.DuplicateHandle(currproc, child_stdin_wr, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdin_wr)
startup_info = win32process.STARTUPINFO()
startup_info.hStdInput = child_stdin_rd
startup_info.hStdOutput = child_stdout_wr
startup_info.hStdError = child_stderr_wr
startup_info.dwFlags = win32process.STARTF_USESTDHANDLES
cr_flags = 0
cr_flags = win32process.CREATE_NEW_PROCESS_GROUP
env = os.environ.copy()
self.h_process, h_thread, dw_pid, dw_tid = win32process.CreateProcess(None, cmd, None, None, 1,
cr_flags, env, os.path.abspath(exec_dir),
startup_info)
win32api.CloseHandle(h_thread)
win32file.CloseHandle(child_stdin_rd)
win32file.CloseHandle(child_stdout_wr)
win32file.CloseHandle(child_stderr_wr)
self.__child_stdout = child_stdout_rd_dup
self.__child_stderr = child_stderr_rd_dup
self.__child_stdin = child_stdin_wr_dup
self.exit_code = -1
def close(self):
win32file.CloseHandle(self.__child_stdout)
win32file.CloseHandle(self.__child_stderr)
win32file.CloseHandle(self.__child_stdin)
win32api.CloseHandle(self.h_process)
win32api.CloseHandle(self.wake_up_event)
def kill_subprocess():
win32event.SetEvent(self.wake_up_event)
def sleep(secs):
win32event.ResetEvent(self.wake_up_event)
timeout = int(1000 * secs)
val = win32event.WaitForSingleObject(self.wake_up_event, timeout)
if val == win32event.WAIT_TIMEOUT:
return True
else: |
def get(self, block=True, timeout=None):
return self.queue.get(block=block, timeout=timeout)
def qsize(self):
return self.queue.qsize()
def __wait_for_child(self):
# kick off threads to read from stdout and stderr of the child process
threading.Thread(target=self.__do_read, args=(self.__child_stdout, )).start()
threading.Thread(target=self.__do_read, args=(self.__child_stderr, )).start()
while True:
# block waiting for the process to finish or the interrupt to happen
handles = (self.wake_up_event, self.h_process)
val = win32event.WaitForMultipleObjects(handles, 0, win32event.INFINITE)
if val >= win32event.WAIT_OBJECT_0 and val < win32event.WAIT_OBJECT_0 + len(handles):
handle = handles[val - win32event.WAIT_OBJECT_0]
if handle == self.wake_up_event:
win32api.TerminateProcess(self.h_process, 1)
win32event.ResetEvent(self.wake_up_event)
return False
elif handle == self.h_process:
# the process has ended naturally
return True
else:
assert False, "Unknown handle fired"
else:
assert False, "Unexpected return from WaitForMultipleObjects"
# Wait for job to finish. Since this method blocks, it can to be called from another thread.
# If the application wants to kill the process, it should call kill_subprocess().
def wait(self):
if not self.__wait_for_child():
# it's been killed
result = False
else:
# normal termination
self.exit_code = win32process.GetExitCodeProcess(self.h_process)
result = self.exit_code == 0
self.close()
self.is_terminated = True
return result
# This method gets called on a worker thread to read from either a stderr
# or stdout thread from the child process.
def __do_read(self, handle):
bytesToRead = 1024
while 1:
try:
finished = 0
hr, data = win32file.ReadFile(handle, bytesToRead, None)
if data:
self.queue.put_nowait(data)
except win32api.error:
finished = 1
if finished:
return
def start_pipe(self):
def worker(pipe):
return pipe.wait()
thrd = threading.Thread(target=worker, args=(self, ))
thrd.start() | # The wake_up_event must have been signalled
return False | random_line_split |
qsr_qtc_bc_simplified.py | # -*- coding: utf-8 -*-
from __future__ import division
from qsrlib_qsrs.qsr_qtc_simplified_abstractclass import QSR_QTC_Simplified_Abstractclass
import numpy as np
from qsrlib_io.world_qsr_trace import *
class QSR_QTC_BC_Simplified(QSR_QTC_Simplified_Abstractclass):
"""QTCBC simplified relations.
Values of the abstract properties
* **_unique_id** = "qtcbcs"
* **_all_possible_relations** = ?
* **_dtype** = "points"
Some explanation about the QSR or better link to a separate webpage explaining it. Maybe a reference if it exists.
"""
def __init__(self):
"""Constructor."""
super(QSR_QTC_BC_Simplified, self).__init__()
self._unique_id = "qtcbcs"
"""str: Unique identifier name of the QSR."""
self.qtc_type = "bc"
"""str: QTC specific type."""
self._all_possible_relations = tuple(self.return_all_possible_state_combinations()[0])
"""tuple: All possible relations of the QSR."""
def make_world_qsr_trace(self, world_trace, timestamps, qsr_params, req_params, **kwargs):
"""Compute the world QSR trace from the arguments.
:param world_trace: Input data.
:type world_trace: :class:`World_Trace <qsrlib_io.world_trace.World_Trace>`
:param timestamps: List of sorted timestamps of `world_trace`.
:type timestamps: list
:param qsr_params: QSR specific parameters passed in `dynamic_args`.
:type qsr_params: dict
:param req_params: Dynamic arguments passed with the request.
:type dynamic_args: dict
:param kwargs: kwargs arguments.
:return: Computed world QSR trace.
:rtype: :class:`World_QSR_Trace <qsrlib_io.world_qsr_trace.World_QSR_Trace>`
"""
ret = World_QSR_Trace(qsr_type=self._unique_id)
qtc_sequence = {}
for t, tp in zip(timestamps[1:], timestamps):
world_state_now = world_trace.trace[t]
world_state_previous = world_trace.trace[tp]
if set(world_state_now.objects.keys()) != set(world_state_previous.objects.keys()):
ret.put_empty_world_qsr_state(t)
continue # Objects have to be present in both timestamps
qsrs_for = self._process_qsrs_for(world_state_now.objects.keys(), req_params["dynamic_args"])
for o1_name, o2_name in qsrs_for:
between = str(o1_name) + "," + str(o2_name)
qtc = np.array([], dtype=int)
k = [world_state_previous.objects[o1_name].x,
world_state_previous.objects[o1_name].y,
world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y]
l = [world_state_previous.objects[o2_name].x,
world_state_previous.objects[o2_name].y,
world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y]
qtc = self._create_qtc_representation(
k,
l,
qsr_params["quantisation_factor"]
)
distance = self._get_euclidean_distance(
(world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y),
(world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y)
)
try:
qtc_sequence[between]["qtc"] = np.append(
qtc_sequence[between]["qtc"],
qtc
).reshape(-1,4)
qtc_sequence[between]["distances"] = np.append(
qtc_sequence[between]["distances"],
distance
)
except KeyError:
qtc_sequence[between] = {
"qtc": qtc,
"distances": np.array([distance])
}
for between, qtcbc in qtc_sequence.items():
qtcbc["qtc"] = self._create_bc_chain(qtcbc["qtc"], qtcbc["distances"], qsr_params["distance_threshold"])
if not qsr_params["no_collapse"]:
qtcbc["qtc"] = self._collapse_similar_states(qtcbc["qtc"])
if qsr_params["validate"]:
qtcbc["qtc"] = self._validate_qtc_sequence(qtcbc["qtc"])
for idx, q in enumerate(qtcbc["qtc"]):
qsr = QSR(
timestamp=idx+1,
between=between,
qsr=self.qtc_to_output_format(q)
)
ret.add_qsr(qsr, idx+1)
return ret
def _create_bc_chain(self, qtc, distances, distance_threshold):
|
def qtc_to_output_format(self, qtc):
"""Overwrite this for the different QTC variants to select only the parts from the QTCCS tuple that you would
like to return. Example for QTCBS: return `qtc[0:2]`.
:param qtc: Full QTCC tuple [q1,q2,q4,q5].
:type qtc: list or tuple
:return: {"qtcbcs": "q1,q2,q4,q5"}
:rtype: dict
"""
s = self.create_qtc_string(qtc) if not np.isnan(qtc[2]) else self.create_qtc_string(qtc[0:2])
return self._format_qsr(s)
def _get_euclidean_distance(self, p, q):
"""Calculate the Euclidean distance between points `p` and `q`.
:param p: x,y coordinates.
:type p: tuple
:param q: x,y coordinates.
:type q: tuple
:return: Euclidean distance between `p` and `q`.
:rtype: float
"""
return np.sqrt(np.power((float(p[0])-float(q[0])),2)+np.power((float(p[1])-float(q[1])),2))
| """
:param qtc:
:type qtc:
:param distances:
:type distances:
:param distance_threshold:
:type distance_threshold:
:return:
:rtype:
"""
ret = np.array([])
if len(qtc.shape) == 1:
qtc = [qtc]
for dist, state in zip(distances, qtc):
if dist > distance_threshold:
ret = np.append(ret, np.append(state[0:2],[np.nan,np.nan]), axis=0)
else:
ret = np.append(ret, state, axis=0)
return ret.reshape(-1,4) | identifier_body |
qsr_qtc_bc_simplified.py | # -*- coding: utf-8 -*-
from __future__ import division
from qsrlib_qsrs.qsr_qtc_simplified_abstractclass import QSR_QTC_Simplified_Abstractclass
import numpy as np
from qsrlib_io.world_qsr_trace import *
class QSR_QTC_BC_Simplified(QSR_QTC_Simplified_Abstractclass):
"""QTCBC simplified relations.
Values of the abstract properties
* **_unique_id** = "qtcbcs"
* **_all_possible_relations** = ?
* **_dtype** = "points"
Some explanation about the QSR or better link to a separate webpage explaining it. Maybe a reference if it exists.
"""
def __init__(self):
"""Constructor."""
super(QSR_QTC_BC_Simplified, self).__init__()
self._unique_id = "qtcbcs"
"""str: Unique identifier name of the QSR."""
self.qtc_type = "bc"
"""str: QTC specific type."""
self._all_possible_relations = tuple(self.return_all_possible_state_combinations()[0])
"""tuple: All possible relations of the QSR."""
def make_world_qsr_trace(self, world_trace, timestamps, qsr_params, req_params, **kwargs):
"""Compute the world QSR trace from the arguments.
:param world_trace: Input data.
:type world_trace: :class:`World_Trace <qsrlib_io.world_trace.World_Trace>`
:param timestamps: List of sorted timestamps of `world_trace`.
:type timestamps: list
:param qsr_params: QSR specific parameters passed in `dynamic_args`.
:type qsr_params: dict
:param req_params: Dynamic arguments passed with the request.
:type dynamic_args: dict
:param kwargs: kwargs arguments.
:return: Computed world QSR trace.
:rtype: :class:`World_QSR_Trace <qsrlib_io.world_qsr_trace.World_QSR_Trace>`
"""
ret = World_QSR_Trace(qsr_type=self._unique_id)
qtc_sequence = {}
for t, tp in zip(timestamps[1:], timestamps):
world_state_now = world_trace.trace[t]
world_state_previous = world_trace.trace[tp]
if set(world_state_now.objects.keys()) != set(world_state_previous.objects.keys()):
ret.put_empty_world_qsr_state(t)
continue # Objects have to be present in both timestamps
qsrs_for = self._process_qsrs_for(world_state_now.objects.keys(), req_params["dynamic_args"])
for o1_name, o2_name in qsrs_for:
between = str(o1_name) + "," + str(o2_name)
qtc = np.array([], dtype=int)
k = [world_state_previous.objects[o1_name].x,
world_state_previous.objects[o1_name].y,
world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y]
l = [world_state_previous.objects[o2_name].x,
world_state_previous.objects[o2_name].y,
world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y]
qtc = self._create_qtc_representation(
k,
l,
qsr_params["quantisation_factor"]
)
distance = self._get_euclidean_distance(
(world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y),
(world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y)
)
try:
qtc_sequence[between]["qtc"] = np.append(
qtc_sequence[between]["qtc"],
qtc
).reshape(-1,4)
qtc_sequence[between]["distances"] = np.append(
qtc_sequence[between]["distances"],
distance
)
except KeyError:
qtc_sequence[between] = {
"qtc": qtc,
"distances": np.array([distance])
}
for between, qtcbc in qtc_sequence.items():
qtcbc["qtc"] = self._create_bc_chain(qtcbc["qtc"], qtcbc["distances"], qsr_params["distance_threshold"])
if not qsr_params["no_collapse"]:
qtcbc["qtc"] = self._collapse_similar_states(qtcbc["qtc"])
if qsr_params["validate"]:
qtcbc["qtc"] = self._validate_qtc_sequence(qtcbc["qtc"])
for idx, q in enumerate(qtcbc["qtc"]):
qsr = QSR(
timestamp=idx+1,
between=between,
qsr=self.qtc_to_output_format(q)
)
ret.add_qsr(qsr, idx+1)
return ret
def _create_bc_chain(self, qtc, distances, distance_threshold):
"""
:param qtc:
:type qtc:
:param distances:
:type distances:
:param distance_threshold:
:type distance_threshold:
:return:
:rtype:
"""
ret = np.array([])
if len(qtc.shape) == 1:
qtc = [qtc]
for dist, state in zip(distances, qtc):
if dist > distance_threshold:
ret = np.append(ret, np.append(state[0:2],[np.nan,np.nan]), axis=0)
else:
ret = np.append(ret, state, axis=0)
return ret.reshape(-1,4)
def qtc_to_output_format(self, qtc):
"""Overwrite this for the different QTC variants to select only the parts from the QTCCS tuple that you would
like to return. Example for QTCBS: return `qtc[0:2]`.
:param qtc: Full QTCC tuple [q1,q2,q4,q5].
:type qtc: list or tuple
:return: {"qtcbcs": "q1,q2,q4,q5"}
:rtype: dict
"""
s = self.create_qtc_string(qtc) if not np.isnan(qtc[2]) else self.create_qtc_string(qtc[0:2])
return self._format_qsr(s)
def | (self, p, q):
"""Calculate the Euclidean distance between points `p` and `q`.
:param p: x,y coordinates.
:type p: tuple
:param q: x,y coordinates.
:type q: tuple
:return: Euclidean distance between `p` and `q`.
:rtype: float
"""
return np.sqrt(np.power((float(p[0])-float(q[0])),2)+np.power((float(p[1])-float(q[1])),2))
| _get_euclidean_distance | identifier_name |
qsr_qtc_bc_simplified.py | # -*- coding: utf-8 -*-
from __future__ import division
from qsrlib_qsrs.qsr_qtc_simplified_abstractclass import QSR_QTC_Simplified_Abstractclass
import numpy as np
from qsrlib_io.world_qsr_trace import *
class QSR_QTC_BC_Simplified(QSR_QTC_Simplified_Abstractclass):
"""QTCBC simplified relations.
Values of the abstract properties
* **_unique_id** = "qtcbcs"
* **_all_possible_relations** = ?
* **_dtype** = "points"
Some explanation about the QSR or better link to a separate webpage explaining it. Maybe a reference if it exists.
"""
def __init__(self):
"""Constructor."""
super(QSR_QTC_BC_Simplified, self).__init__()
self._unique_id = "qtcbcs"
"""str: Unique identifier name of the QSR."""
self.qtc_type = "bc"
"""str: QTC specific type."""
self._all_possible_relations = tuple(self.return_all_possible_state_combinations()[0])
"""tuple: All possible relations of the QSR."""
def make_world_qsr_trace(self, world_trace, timestamps, qsr_params, req_params, **kwargs):
"""Compute the world QSR trace from the arguments.
:param world_trace: Input data.
:type world_trace: :class:`World_Trace <qsrlib_io.world_trace.World_Trace>`
:param timestamps: List of sorted timestamps of `world_trace`.
:type timestamps: list
:param qsr_params: QSR specific parameters passed in `dynamic_args`.
:type qsr_params: dict
:param req_params: Dynamic arguments passed with the request.
:type dynamic_args: dict
:param kwargs: kwargs arguments.
:return: Computed world QSR trace.
:rtype: :class:`World_QSR_Trace <qsrlib_io.world_qsr_trace.World_QSR_Trace>`
"""
ret = World_QSR_Trace(qsr_type=self._unique_id)
qtc_sequence = {}
for t, tp in zip(timestamps[1:], timestamps):
world_state_now = world_trace.trace[t]
world_state_previous = world_trace.trace[tp]
if set(world_state_now.objects.keys()) != set(world_state_previous.objects.keys()):
ret.put_empty_world_qsr_state(t)
continue # Objects have to be present in both timestamps
qsrs_for = self._process_qsrs_for(world_state_now.objects.keys(), req_params["dynamic_args"])
for o1_name, o2_name in qsrs_for:
between = str(o1_name) + "," + str(o2_name)
qtc = np.array([], dtype=int)
k = [world_state_previous.objects[o1_name].x, | world_state_previous.objects[o1_name].y,
world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y]
l = [world_state_previous.objects[o2_name].x,
world_state_previous.objects[o2_name].y,
world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y]
qtc = self._create_qtc_representation(
k,
l,
qsr_params["quantisation_factor"]
)
distance = self._get_euclidean_distance(
(world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y),
(world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y)
)
try:
qtc_sequence[between]["qtc"] = np.append(
qtc_sequence[between]["qtc"],
qtc
).reshape(-1,4)
qtc_sequence[between]["distances"] = np.append(
qtc_sequence[between]["distances"],
distance
)
except KeyError:
qtc_sequence[between] = {
"qtc": qtc,
"distances": np.array([distance])
}
for between, qtcbc in qtc_sequence.items():
qtcbc["qtc"] = self._create_bc_chain(qtcbc["qtc"], qtcbc["distances"], qsr_params["distance_threshold"])
if not qsr_params["no_collapse"]:
qtcbc["qtc"] = self._collapse_similar_states(qtcbc["qtc"])
if qsr_params["validate"]:
qtcbc["qtc"] = self._validate_qtc_sequence(qtcbc["qtc"])
for idx, q in enumerate(qtcbc["qtc"]):
qsr = QSR(
timestamp=idx+1,
between=between,
qsr=self.qtc_to_output_format(q)
)
ret.add_qsr(qsr, idx+1)
return ret
def _create_bc_chain(self, qtc, distances, distance_threshold):
"""
:param qtc:
:type qtc:
:param distances:
:type distances:
:param distance_threshold:
:type distance_threshold:
:return:
:rtype:
"""
ret = np.array([])
if len(qtc.shape) == 1:
qtc = [qtc]
for dist, state in zip(distances, qtc):
if dist > distance_threshold:
ret = np.append(ret, np.append(state[0:2],[np.nan,np.nan]), axis=0)
else:
ret = np.append(ret, state, axis=0)
return ret.reshape(-1,4)
def qtc_to_output_format(self, qtc):
"""Overwrite this for the different QTC variants to select only the parts from the QTCCS tuple that you would
like to return. Example for QTCBS: return `qtc[0:2]`.
:param qtc: Full QTCC tuple [q1,q2,q4,q5].
:type qtc: list or tuple
:return: {"qtcbcs": "q1,q2,q4,q5"}
:rtype: dict
"""
s = self.create_qtc_string(qtc) if not np.isnan(qtc[2]) else self.create_qtc_string(qtc[0:2])
return self._format_qsr(s)
def _get_euclidean_distance(self, p, q):
"""Calculate the Euclidean distance between points `p` and `q`.
:param p: x,y coordinates.
:type p: tuple
:param q: x,y coordinates.
:type q: tuple
:return: Euclidean distance between `p` and `q`.
:rtype: float
"""
return np.sqrt(np.power((float(p[0])-float(q[0])),2)+np.power((float(p[1])-float(q[1])),2)) | random_line_split | |
qsr_qtc_bc_simplified.py | # -*- coding: utf-8 -*-
from __future__ import division
from qsrlib_qsrs.qsr_qtc_simplified_abstractclass import QSR_QTC_Simplified_Abstractclass
import numpy as np
from qsrlib_io.world_qsr_trace import *
class QSR_QTC_BC_Simplified(QSR_QTC_Simplified_Abstractclass):
"""QTCBC simplified relations.
Values of the abstract properties
* **_unique_id** = "qtcbcs"
* **_all_possible_relations** = ?
* **_dtype** = "points"
Some explanation about the QSR or better link to a separate webpage explaining it. Maybe a reference if it exists.
"""
def __init__(self):
"""Constructor."""
super(QSR_QTC_BC_Simplified, self).__init__()
self._unique_id = "qtcbcs"
"""str: Unique identifier name of the QSR."""
self.qtc_type = "bc"
"""str: QTC specific type."""
self._all_possible_relations = tuple(self.return_all_possible_state_combinations()[0])
"""tuple: All possible relations of the QSR."""
def make_world_qsr_trace(self, world_trace, timestamps, qsr_params, req_params, **kwargs):
"""Compute the world QSR trace from the arguments.
:param world_trace: Input data.
:type world_trace: :class:`World_Trace <qsrlib_io.world_trace.World_Trace>`
:param timestamps: List of sorted timestamps of `world_trace`.
:type timestamps: list
:param qsr_params: QSR specific parameters passed in `dynamic_args`.
:type qsr_params: dict
:param req_params: Dynamic arguments passed with the request.
:type dynamic_args: dict
:param kwargs: kwargs arguments.
:return: Computed world QSR trace.
:rtype: :class:`World_QSR_Trace <qsrlib_io.world_qsr_trace.World_QSR_Trace>`
"""
ret = World_QSR_Trace(qsr_type=self._unique_id)
qtc_sequence = {}
for t, tp in zip(timestamps[1:], timestamps):
world_state_now = world_trace.trace[t]
world_state_previous = world_trace.trace[tp]
if set(world_state_now.objects.keys()) != set(world_state_previous.objects.keys()):
ret.put_empty_world_qsr_state(t)
continue # Objects have to be present in both timestamps
qsrs_for = self._process_qsrs_for(world_state_now.objects.keys(), req_params["dynamic_args"])
for o1_name, o2_name in qsrs_for:
between = str(o1_name) + "," + str(o2_name)
qtc = np.array([], dtype=int)
k = [world_state_previous.objects[o1_name].x,
world_state_previous.objects[o1_name].y,
world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y]
l = [world_state_previous.objects[o2_name].x,
world_state_previous.objects[o2_name].y,
world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y]
qtc = self._create_qtc_representation(
k,
l,
qsr_params["quantisation_factor"]
)
distance = self._get_euclidean_distance(
(world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y),
(world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y)
)
try:
qtc_sequence[between]["qtc"] = np.append(
qtc_sequence[between]["qtc"],
qtc
).reshape(-1,4)
qtc_sequence[between]["distances"] = np.append(
qtc_sequence[between]["distances"],
distance
)
except KeyError:
qtc_sequence[between] = {
"qtc": qtc,
"distances": np.array([distance])
}
for between, qtcbc in qtc_sequence.items():
qtcbc["qtc"] = self._create_bc_chain(qtcbc["qtc"], qtcbc["distances"], qsr_params["distance_threshold"])
if not qsr_params["no_collapse"]:
qtcbc["qtc"] = self._collapse_similar_states(qtcbc["qtc"])
if qsr_params["validate"]:
qtcbc["qtc"] = self._validate_qtc_sequence(qtcbc["qtc"])
for idx, q in enumerate(qtcbc["qtc"]):
qsr = QSR(
timestamp=idx+1,
between=between,
qsr=self.qtc_to_output_format(q)
)
ret.add_qsr(qsr, idx+1)
return ret
def _create_bc_chain(self, qtc, distances, distance_threshold):
"""
:param qtc:
:type qtc:
:param distances:
:type distances:
:param distance_threshold:
:type distance_threshold:
:return:
:rtype:
"""
ret = np.array([])
if len(qtc.shape) == 1:
qtc = [qtc]
for dist, state in zip(distances, qtc):
if dist > distance_threshold:
ret = np.append(ret, np.append(state[0:2],[np.nan,np.nan]), axis=0)
else:
|
return ret.reshape(-1,4)
def qtc_to_output_format(self, qtc):
"""Overwrite this for the different QTC variants to select only the parts from the QTCCS tuple that you would
like to return. Example for QTCBS: return `qtc[0:2]`.
:param qtc: Full QTCC tuple [q1,q2,q4,q5].
:type qtc: list or tuple
:return: {"qtcbcs": "q1,q2,q4,q5"}
:rtype: dict
"""
s = self.create_qtc_string(qtc) if not np.isnan(qtc[2]) else self.create_qtc_string(qtc[0:2])
return self._format_qsr(s)
def _get_euclidean_distance(self, p, q):
"""Calculate the Euclidean distance between points `p` and `q`.
:param p: x,y coordinates.
:type p: tuple
:param q: x,y coordinates.
:type q: tuple
:return: Euclidean distance between `p` and `q`.
:rtype: float
"""
return np.sqrt(np.power((float(p[0])-float(q[0])),2)+np.power((float(p[1])-float(q[1])),2))
| ret = np.append(ret, state, axis=0) | conditional_block |
content.js | (function (w) {
var MIN_LENGTH = 4;
if (w.self != w.top) {
return;
}
function | (config) {
if ((config.enabled && config.disallowed && config.disallowed.indexOf(w.location.hostname)!== -1) ||
(!config.enabled && (!config.allowed || config.allowed.indexOf(w.location.hostname)=== -1))) return;
var freqRus = {}, freqEng = {};
var rus = config.charsets.indexOf("rus")!==-1;
var eng = config.charsets.indexOf("eng")!==-1;
var maxFreq = (config.status == 3 ? 1:2);
var patternRus = /[^а-яё]/g
var patternEng = /[^a-z]/g
var patternBoth = /[^a-zа-яё]/g
if (!rus && !eng) return;
if (rus && !eng) patternCurrent = patternRus;
else if (!rus && eng) patternCurrent = patternEng;
else patternCurrent = patternBoth;
function collect(text, frequences, pattern) {
var words = text.split(/\s+/);
for (var j = 0; j < words.length; j++) {
var current = words[j].toLowerCase().replace(pattern,'');
if (!current || current.length < MIN_LENGTH) continue;
if (!frequences[current]) frequences[current] = 1;
else frequences[current] += 1;
}
return frequences;
}
function remove(o, max) {
var n = {};
for (var key in o) if (o[key] <= max) n[key] = o[key];
return n;
}
function removeUseless() {
freqRus = remove(freqRus, maxFreq);
freqEng = remove(freqEng, maxFreq);
}
function stat(element) {
if (/(script|style)/i.test(element.tagName)) return;
if (element.nodeType === Node.ELEMENT_NODE && element.childNodes.length > 0)
for (var i = 0; i < element.childNodes.length; i++)
stat(element.childNodes[i]);
if (element.nodeType === Node.TEXT_NODE && (/\S/.test(element.nodeValue))) {
if (rus) collect(element.nodeValue, freqRus, patternRus);
if (eng) collect(element.nodeValue, freqEng, patternEng);
}
}
function newNode(code) { // code here is total count of the word, only 1 and 2 are used for Russian alphabet and 1+10 and 2+10 for English
var node = w.document.createElement(config.status == 3 ? 'strong' : 'span');
node.className = 'nlc47';
if (config.status == 2 && code !== 1 && code !== 11) node.style.color = '#999';
if (config.status == 2 || (config.status == 1 && (code === 1 || code === 11))) node.style.fontWeight = '700';
if (config.status == 1) node.style.color = code > 2 ? '#449' : '#494';
return node;
}
function markup(element, initial, pattern) {
if (/(script|style)/i.test(element.tagName)) return;
if (element.nodeType === Node.ELEMENT_NODE && element.childNodes.length > 0) {
var freq = {};
for (var i = 0; i < element.childNodes.length; i++) {
freq = markup(element.childNodes[i], freq, pattern);
}
if (freq && freq.length !== 0) {
var efreq = [];
var total = 0;
for (var key in freq) {
if (freqRus[key]) efreq.push([key, freqRus[key]]);
if (freqEng[key]) efreq.push([key, freqEng[key] + 10]);
}
efreq.sort(function(a, b) {return a[0].length - b[0].length});
var max = element.childNodes.length*efreq.length*2;
for (var i = 0; i < element.childNodes.length; i++) {
if (total++ > max) break;
if (element.childNodes[i].nodeType === Node.TEXT_NODE) {
var minPos = -1, minJ = -1;
for (var j in efreq) {
key = efreq[j][0];
var pos = element.childNodes[i].nodeValue.toLowerCase().indexOf(key);
if (pos >= 0 && (minJ === -1 || minPos>pos)) { minPos = pos; minJ = j; }
}
if (minPos !== -1) {
key = efreq[minJ][0]; val = efreq[minJ][1];
var spannode = newNode(val);
var middlebit = element.childNodes[i].splitText(minPos);
var endbit = middlebit.splitText(key.length);
var middleclone = middlebit.cloneNode(true);
spannode.appendChild(middleclone);
element.replaceChild(spannode, middlebit);
}
}
}
}
}
if (element.nodeType === Node.TEXT_NODE && (/\S/.test(element.nodeValue))) {
return collect(element.nodeValue, initial, pattern);
}
return initial;
}
stat(w.document.getElementsByTagName('html')[0]);
removeUseless();
markup(w.document.getElementsByTagName('html')[0], {}, patternCurrent);
}
function clean() {
var affected = w.document.querySelectorAll(".nlc47");
if (!affected.length) return;
for (var i=0;i<affected.length;i++) {
affected[i].outerHTML = affected[i].innerHTML;
}
}
function loadAndColorize() {
chrome.storage.sync.get(['status', 'enabled', 'charsets', 'allowed', 'disallowed'], colorize);
}
chrome.runtime.onMessage.addListener(function(msg, sender, response) {
if (msg.action && msg.action == "refresh") {clean(); loadAndColorize(); }
if (msg.action && msg.action == "getHost") response({host:w.location.hostname});
});
loadAndColorize();
})(window); | colorize | identifier_name |
content.js | (function (w) {
var MIN_LENGTH = 4;
if (w.self != w.top) {
return;
}
function colorize(config) {
if ((config.enabled && config.disallowed && config.disallowed.indexOf(w.location.hostname)!== -1) ||
(!config.enabled && (!config.allowed || config.allowed.indexOf(w.location.hostname)=== -1))) return;
var freqRus = {}, freqEng = {};
var rus = config.charsets.indexOf("rus")!==-1;
var eng = config.charsets.indexOf("eng")!==-1;
var maxFreq = (config.status == 3 ? 1:2);
var patternRus = /[^а-яё]/g
var patternEng = /[^a-z]/g
var patternBoth = /[^a-zа-яё]/g
if (!rus && !eng) return;
if (rus && !eng) patternCurrent = patternRus;
else if (!rus && eng) patternCurrent = patternEng;
else patternCurrent = patternBoth;
function collect(text, frequences, pattern) {
var words = text.split(/\s+/);
for (var j = 0; j < words.length; j++) {
var current = words[j].toLowerCase().replace(pattern,'');
if (!current || current.length < MIN_LENGTH) continue;
if (!frequences[current]) frequences[current] = 1;
else frequences[current] += 1;
}
return frequences;
}
function remove(o, max) {
var n = {};
for (var key in o) if (o[key] <= max) n[key] = o[key];
return n;
}
function removeUseless() {
freqRus = remove(freqRus, maxFreq);
freqEng = remove(freqEng, maxFreq);
}
function stat(element) {
if (/(script|style)/i.test(element.tagName)) return;
if (element.nodeType === Node.ELEMENT_NODE && element.childNodes.length > 0)
for (var i = 0; i < element.childNodes.length; i++)
stat(element.childNodes[i]);
if (element.nodeType === Node.TEXT_NODE && (/\S/.test(element.nodeValue))) {
if (rus) collect(element.nodeValue, freqRus, patternRus);
if (eng) collect(element.nodeValue, freqEng, patternEng);
}
}
function newNode(code) { // code here is total count of the word, only 1 and 2 are used for Russian alphabet and 1+10 and 2+10 for English
var node = w.document.createElement(config.status == 3 ? 'strong' : 'span');
node.className = 'nlc47';
if (config.status == 2 && code !== 1 && code !== 11) node.style.color = '#999';
if (config.status == 2 || (config.status == 1 && (code === 1 || code === 11))) node.style.fontWeight = '700';
if (config.status == 1) node.style.color = code > 2 ? '#449' : '#494';
return node;
}
function markup(element, initial, pattern) {
if (/(script|style)/i.test(element.tagName)) return;
if (element.nodeType === Node.ELEMENT_NODE && element.childNodes.length > 0) {
var freq = {};
for (var i = 0; i < element.childNodes.length; i++) {
freq = markup(element.childNodes[i], freq, pattern);
}
if (freq && freq.length !== 0) {
var efreq = [];
var total = 0;
for (var key in freq) {
if (freqRus[key]) efreq.push([key, freqRus[key]]);
if (freqEng[key]) efreq.push([key, freqEng[key] + 10]);
}
efreq.sort(function(a, b) {return a[0].length - b[0].length});
var max = element.childNodes.length*efreq.length*2;
for (var i = 0; i < element.childNodes.length; i++) {
if (total++ > max) break;
if (element.childNodes[i].nodeType === Node.TEXT_NODE) {
var minPos = -1, minJ = -1;
for (var j in efreq) {
key = efreq[j][0];
var pos = element.childNodes[i].nodeValue.toLowerCase().indexOf(key);
if (pos >= 0 && (minJ === -1 || minPos>pos)) { minPos = pos; minJ = j; }
}
if (minPos !== -1) {
key = efreq[minJ][0]; val = efreq[minJ][1];
var spannode = newNode(val);
var middlebit = element.childNodes[i].splitText(minPos);
var endbit = middlebit.splitText(key.length);
var middleclone = middlebit.cloneNode(true);
spannode.appendChild(middleclone);
element.replaceChild(spannode, middlebit);
}
}
}
}
}
if (element.nodeType === Node.TEXT_NODE && (/\S/.test(element.nodeValue))) {
| return initial;
}
stat(w.document.getElementsByTagName('html')[0]);
removeUseless();
markup(w.document.getElementsByTagName('html')[0], {}, patternCurrent);
}
function clean() {
var affected = w.document.querySelectorAll(".nlc47");
if (!affected.length) return;
for (var i=0;i<affected.length;i++) {
affected[i].outerHTML = affected[i].innerHTML;
}
}
function loadAndColorize() {
chrome.storage.sync.get(['status', 'enabled', 'charsets', 'allowed', 'disallowed'], colorize);
}
chrome.runtime.onMessage.addListener(function(msg, sender, response) {
if (msg.action && msg.action == "refresh") {clean(); loadAndColorize(); }
if (msg.action && msg.action == "getHost") response({host:w.location.hostname});
});
loadAndColorize();
})(window); | return collect(element.nodeValue, initial, pattern);
}
| conditional_block |
content.js | (function (w) {
var MIN_LENGTH = 4;
if (w.self != w.top) {
return;
}
function colorize(config) {
if ((config.enabled && config.disallowed && config.disallowed.indexOf(w.location.hostname)!== -1) ||
(!config.enabled && (!config.allowed || config.allowed.indexOf(w.location.hostname)=== -1))) return;
var freqRus = {}, freqEng = {};
var rus = config.charsets.indexOf("rus")!==-1;
var eng = config.charsets.indexOf("eng")!==-1;
var maxFreq = (config.status == 3 ? 1:2);
var patternRus = /[^а-яё]/g
var patternEng = /[^a-z]/g
var patternBoth = /[^a-zа-яё]/g
if (!rus && !eng) return;
if (rus && !eng) patternCurrent = patternRus;
else if (!rus && eng) patternCurrent = patternEng;
else patternCurrent = patternBoth;
function collect(text, frequences, pattern) {
var words = text.split(/\s+/);
for (var j = 0; j < words.length; j++) {
var current = words[j].toLowerCase().replace(pattern,'');
if (!current || current.length < MIN_LENGTH) continue;
if (!frequences[current]) frequences[current] = 1;
else frequences[current] += 1;
}
return frequences;
}
function remove(o, max) {
var n = {};
for (var key in o) if (o[key] <= max) n[key] = o[key];
return n;
}
function removeUseless() {
freqRus = remove(freqRus, maxFreq);
freqEng = remove(freqEng, maxFreq);
}
function stat(element) {
if (/(script|style)/i.test(element.tagName)) return;
if (element.nodeType === Node.ELEMENT_NODE && element.childNodes.length > 0)
for (var i = 0; i < element.childNodes.length; i++)
stat(element.childNodes[i]);
if (element.nodeType === Node.TEXT_NODE && (/\S/.test(element.nodeValue))) {
if (rus) collect(element.nodeValue, freqRus, patternRus);
if (eng) collect(element.nodeValue, freqEng, patternEng);
}
}
function newNode(code) { // code here is total count of the word, only 1 and 2 are used for Russian alphabet and 1+10 and 2+10 for English
var node = w.document.createElement(config.status == 3 ? 'strong' : 'span');
node.className = 'nlc47';
if (config.status == 2 && code !== 1 && code !== 11) node.style.color = '#999';
if (config.status == 2 || (config.status == 1 && (code === 1 || code === 11))) node.style.fontWeight = '700';
if (config.status == 1) node.style.color = code > 2 ? '#449' : '#494';
return node;
}
function markup(element, initial, pattern) {
if (/(script|style)/i.test(element.tagName)) return;
if (element.nodeType === Node.ELEMENT_NODE && element.childNodes.length > 0) {
var freq = {};
for (var i = 0; i < element.childNodes.length; i++) {
freq = markup(element.childNodes[i], freq, pattern);
}
if (freq && freq.length !== 0) {
var efreq = [];
var total = 0;
for (var key in freq) {
if (freqRus[key]) efreq.push([key, freqRus[key]]);
if (freqEng[key]) efreq.push([key, freqEng[key] + 10]);
}
efreq.sort(function(a, b) {return a[0].length - b[0].length});
var max = element.childNodes.length*efreq.length*2;
for (var i = 0; i < element.childNodes.length; i++) {
if (total++ > max) break;
if (element.childNodes[i].nodeType === Node.TEXT_NODE) {
var minPos = -1, minJ = -1;
for (var j in efreq) {
key = efreq[j][0];
var pos = element.childNodes[i].nodeValue.toLowerCase().indexOf(key);
if (pos >= 0 && (minJ === -1 || minPos>pos)) { minPos = pos; minJ = j; }
}
if (minPos !== -1) {
key = efreq[minJ][0]; val = efreq[minJ][1];
var spannode = newNode(val); | }
}
}
}
}
if (element.nodeType === Node.TEXT_NODE && (/\S/.test(element.nodeValue))) {
return collect(element.nodeValue, initial, pattern);
}
return initial;
}
stat(w.document.getElementsByTagName('html')[0]);
removeUseless();
markup(w.document.getElementsByTagName('html')[0], {}, patternCurrent);
}
function clean() {
var affected = w.document.querySelectorAll(".nlc47");
if (!affected.length) return;
for (var i=0;i<affected.length;i++) {
affected[i].outerHTML = affected[i].innerHTML;
}
}
function loadAndColorize() {
chrome.storage.sync.get(['status', 'enabled', 'charsets', 'allowed', 'disallowed'], colorize);
}
chrome.runtime.onMessage.addListener(function(msg, sender, response) {
if (msg.action && msg.action == "refresh") {clean(); loadAndColorize(); }
if (msg.action && msg.action == "getHost") response({host:w.location.hostname});
});
loadAndColorize();
})(window); | var middlebit = element.childNodes[i].splitText(minPos);
var endbit = middlebit.splitText(key.length);
var middleclone = middlebit.cloneNode(true);
spannode.appendChild(middleclone);
element.replaceChild(spannode, middlebit); | random_line_split |
content.js | (function (w) {
var MIN_LENGTH = 4;
if (w.self != w.top) {
return;
}
function colorize(config) {
if ((config.enabled && config.disallowed && config.disallowed.indexOf(w.location.hostname)!== -1) ||
(!config.enabled && (!config.allowed || config.allowed.indexOf(w.location.hostname)=== -1))) return;
var freqRus = {}, freqEng = {};
var rus = config.charsets.indexOf("rus")!==-1;
var eng = config.charsets.indexOf("eng")!==-1;
var maxFreq = (config.status == 3 ? 1:2);
var patternRus = /[^а-яё]/g
var patternEng = /[^a-z]/g
var patternBoth = /[^a-zа-яё]/g
if (!rus && !eng) return;
if (rus && !eng) patternCurrent = patternRus;
else if (!rus && eng) patternCurrent = patternEng;
else patternCurrent = patternBoth;
function collect(text, frequences, pattern) {
var words = text.split(/\s+/);
for (var j = 0; j < words.length; j++) {
var current = words[j].toLowerCase().replace(pattern,'');
if (!current || current.length < MIN_LENGTH) continue;
if (!frequences[current]) frequences[current] = 1;
else frequences[current] += 1;
}
return frequences;
}
function remove(o, max) {
var n = {};
for (var key in o) if (o[key] <= max) n[key] = o[key];
return n;
}
function removeUseless() {
freqRus = remove(freqRus, maxFreq);
freqEng = remove(freqEng, maxFreq);
}
function stat(element) {
if (/(script|style)/i.test(element.tagName)) return;
if (element.nodeType === Node.ELEMENT_NODE && element.childNodes.length > 0)
for (var i = 0; i < element.childNodes.length; i++)
stat(element.childNodes[i]);
if (element.nodeType === Node.TEXT_NODE && (/\S/.test(element.nodeValue))) {
if (rus) collect(element.nodeValue, freqRus, patternRus);
if (eng) collect(element.nodeValue, freqEng, patternEng);
}
}
function newNode(code) { // code here is total count of the word, only 1 and 2 are used for Russian alphabet and 1+10 and 2+10 for English
var node = w.document.createElement(config.status == 3 ? 'strong' : 'span');
node.className = 'nlc47';
if (config.status == 2 && code !== 1 && code !== 11) node.style.color = '#999';
if (config.status == 2 || (config.status == 1 && (code === 1 || code === 11))) node.style.fontWeight = '700';
if (config.status == 1) node.style.color = code > 2 ? '#449' : '#494';
return node;
}
function markup(element, initial, pattern) {
i | t(w.document.getElementsByTagName('html')[0]);
removeUseless();
markup(w.document.getElementsByTagName('html')[0], {}, patternCurrent);
}
function clean() {
var affected = w.document.querySelectorAll(".nlc47");
if (!affected.length) return;
for (var i=0;i<affected.length;i++) {
affected[i].outerHTML = affected[i].innerHTML;
}
}
function loadAndColorize() {
chrome.storage.sync.get(['status', 'enabled', 'charsets', 'allowed', 'disallowed'], colorize);
}
chrome.runtime.onMessage.addListener(function(msg, sender, response) {
if (msg.action && msg.action == "refresh") {clean(); loadAndColorize(); }
if (msg.action && msg.action == "getHost") response({host:w.location.hostname});
});
loadAndColorize();
})(window); | f (/(script|style)/i.test(element.tagName)) return;
if (element.nodeType === Node.ELEMENT_NODE && element.childNodes.length > 0) {
var freq = {};
for (var i = 0; i < element.childNodes.length; i++) {
freq = markup(element.childNodes[i], freq, pattern);
}
if (freq && freq.length !== 0) {
var efreq = [];
var total = 0;
for (var key in freq) {
if (freqRus[key]) efreq.push([key, freqRus[key]]);
if (freqEng[key]) efreq.push([key, freqEng[key] + 10]);
}
efreq.sort(function(a, b) {return a[0].length - b[0].length});
var max = element.childNodes.length*efreq.length*2;
for (var i = 0; i < element.childNodes.length; i++) {
if (total++ > max) break;
if (element.childNodes[i].nodeType === Node.TEXT_NODE) {
var minPos = -1, minJ = -1;
for (var j in efreq) {
key = efreq[j][0];
var pos = element.childNodes[i].nodeValue.toLowerCase().indexOf(key);
if (pos >= 0 && (minJ === -1 || minPos>pos)) { minPos = pos; minJ = j; }
}
if (minPos !== -1) {
key = efreq[minJ][0]; val = efreq[minJ][1];
var spannode = newNode(val);
var middlebit = element.childNodes[i].splitText(minPos);
var endbit = middlebit.splitText(key.length);
var middleclone = middlebit.cloneNode(true);
spannode.appendChild(middleclone);
element.replaceChild(spannode, middlebit);
}
}
}
}
}
if (element.nodeType === Node.TEXT_NODE && (/\S/.test(element.nodeValue))) {
return collect(element.nodeValue, initial, pattern);
}
return initial;
}
sta | identifier_body |
CardsService.ts | import _ from 'lodash';
import {setFilters, IQueryParams} from 'apps/search/services/SearchService';
import {PUBLISHED_STATES} from 'apps/archive/constants';
import {ITEM_STATE} from 'apps/archive/constants';
import {
DESK_OUTPUT,
SENT_OUTPUT,
SCHEDULED_OUTPUT,
} from 'apps/desks/constants';
import {appConfig} from 'appConfig';
import {IMonitoringFilter} from 'superdesk-api';
export interface ICard {
_id: string;
deskId: string;
fileType: string; // contains JSON array
contentProfile: string;
customFilters: string;
header: string; // example: "Politic Desk"
subheader: string; // example: "Working Stage"
type: 'search'
| 'spike-personal'
| 'personal'
| 'stage'
| 'spike'
| 'highlights'
| 'deskOutput'
| 'sentDeskOutput'
| 'scheduledDeskOutput'
| string;
search?: {
filter?: {
query?: {
repo?: any;
q?: any;
};
};
};
max_items?: number;
singleViewType?: 'desk' | 'stage' | any;
query: any;
sent?: boolean;
}
CardsService.$inject = ['search', 'session', 'desks', '$location'];
export function CardsService(search, session, desks, $location) {
this.criteria = getCriteria;
this.shouldUpdate = shouldUpdate;
function | (card: ICard): IQueryParams {
let params: IQueryParams = {};
if (card.type === 'search' && card.search && card.search.filter.query) {
angular.copy(card.search.filter.query, params);
if (card.query) {
if (card.search.filter.query.q) {
params.q = '(' + card.query + ') ' + card.search.filter.query.q;
} else {
params.q = '(' + card.query + ') ';
}
}
} else {
params.q = card.query;
}
if (card.type === 'spike' || card.type === 'spike-personal') {
params.spike = 'only';
} else if (card.type === 'personal' && card.sent) {
params.spike = 'include';
}
return params;
}
function filterQueryByCardType(query, queryParam, card: ICard) {
let deskId;
switch (card.type) {
case 'search':
break;
case 'spike-personal':
case 'personal':
if (card.sent) {
query.filter({bool: {
must: [
{term: {original_creator: session.identity._id}},
{exists: {field: 'task.desk'}},
],
}});
} else {
query.filter({bool: {
must: {term: {original_creator: session.identity._id}},
must_not: {exists: {field: 'task.desk'}},
}});
}
break;
case 'spike':
query.filter({term: {'task.desk': card._id}});
break;
case 'highlights':
query.filter({and: [
{term: {highlights: queryParam.highlight}},
]});
break;
case DESK_OUTPUT:
filterQueryByDeskType(query, card);
break;
case SENT_OUTPUT:
deskId = card._id.substring(0, card._id.indexOf(':'));
query.filter({bool: {
filter: {term: {'task.desk_history': deskId}},
must_not: {term: {'task.desk': deskId}},
}});
break;
case SCHEDULED_OUTPUT:
deskId = card._id.substring(0, card._id.indexOf(':'));
query.filter({and: [
{term: {'task.desk': deskId}},
{term: {state: 'scheduled'}},
]});
break;
default:
if (!_.isNil(card.singleViewType) && card.singleViewType === 'desk') {
query.filter({term: {'task.desk': card.deskId}});
} else {
query.filter({term: {'task.stage': card._id}});
}
break;
}
}
function filterQueryByDeskType(query, card: ICard) {
var deskId = card._id.substring(0, card._id.indexOf(':'));
var desk = desks.deskLookup ? desks.deskLookup[deskId] : null;
var states = PUBLISHED_STATES;
if (appConfig.monitoring != null && appConfig.monitoring.scheduled) {
states = PUBLISHED_STATES.filter((state) => state !== ITEM_STATE.SCHEDULED);
}
if (desk) {
const must: Array<{}> = [
{term: {'task.desk': deskId}},
{terms: {state: states}},
];
if (desk.desk_type === 'authoring') {
query.filter({bool: {should: [
{term: {'task.last_authoring_desk': deskId}},
{bool: {must}},
]}});
} else if (desk.desk_type === 'production') {
query.filter({bool: {must}});
}
}
if (appConfig.features.nestedItemsInOutputStage) {
query.setOption('hidePreviousVersions', true);
}
}
function filterQueryByCardFileType(query, card: ICard) {
if (card.fileType) {
var termsHighlightsPackage = {and: [
{bool: {must: {exists: {field: 'highlight'}}}},
{term: {type: 'composite'}},
]};
var termsFileType: any = {terms: {type: JSON.parse(card.fileType)}};
// Normal package
if (_.includes(JSON.parse(card.fileType), 'composite')) {
termsFileType = {and: [
{bool: {must_not: {exists: {field: 'highlight'}}}},
{terms: {type: JSON.parse(card.fileType)}},
]};
}
if (_.includes(JSON.parse(card.fileType), 'highlight-pack')) {
query.filter({or: [
termsHighlightsPackage,
termsFileType,
]});
} else {
query.filter(termsFileType);
}
}
}
function filterQueryByContentProfile(query, card: ICard) {
if (card.contentProfile) {
query.filter({terms: {profile: JSON.parse(card.contentProfile)}});
}
}
function filterQueryByCustomQuery(query, card: ICard) {
if (card.customFilters == null) {
return;
}
var items: {[key: string]: IMonitoringFilter} = JSON.parse(card.customFilters);
const terms = Object.values(items)
.reduce((obj1, obj2) => Object.assign(obj1, obj2.query), {});
Object.keys(terms).forEach((key) => {
query.filter({terms: {[key]: terms[key]}});
});
}
/**
* Get items criteria for given card
*
* Card can be stage/personal/saved search.
* There can be also extra string search query
*
* @param {Object} card
* @param {string} queryString
*/
function getCriteria(card: ICard, queryString?: any, queryParam?: any) {
var params = getCriteriaParams(card);
var query = search.query(setFilters(params));
var criteria: any = {es_highlight: card.query ? search.getElasticHighlight() : 0};
filterQueryByCardType(query, queryParam, card);
filterQueryByContentProfile(query, card);
filterQueryByCardFileType(query, card);
filterQueryByCustomQuery(query, card);
if (queryString) {
query.filter({query: {query_string: {query: queryString, lenient: true}}});
criteria.es_highlight = search.getElasticHighlight();
}
criteria.source = query.getCriteria();
if (card.type === 'search' && card.search && card.search.filter.query.repo) {
criteria.repo = card.search.filter.query.repo;
} else if (desks.isPublishType(card.type)) {
criteria.repo = 'archive,published';
if (card.type === 'deskOutput') {
query.filter({not: {term: {state: 'unpublished'}}});
}
}
criteria.source.from = 0;
criteria.source.size = card.max_items || 25;
return criteria;
}
function shouldUpdate(card: ICard, data) {
switch (card.type) {
case 'stage':
// refresh stage if it matches updated stage
return data.stages && !!data.stages[card._id];
case 'personal':
return data.user === session.identity._id;
case DESK_OUTPUT:
case SENT_OUTPUT:
case SCHEDULED_OUTPUT:
var deskId = card._id.substring(0, card._id.indexOf(':'));
if (deskId) {
return data.desks && !!data.desks[deskId];
}
return false;
default:
// no way to determine if item should be visible, refresh
return true;
}
}
}
| getCriteriaParams | identifier_name |
CardsService.ts | import _ from 'lodash';
import {setFilters, IQueryParams} from 'apps/search/services/SearchService';
import {PUBLISHED_STATES} from 'apps/archive/constants';
import {ITEM_STATE} from 'apps/archive/constants';
import {
DESK_OUTPUT,
SENT_OUTPUT,
SCHEDULED_OUTPUT,
} from 'apps/desks/constants';
import {appConfig} from 'appConfig';
import {IMonitoringFilter} from 'superdesk-api';
export interface ICard {
_id: string;
deskId: string;
fileType: string; // contains JSON array
contentProfile: string;
customFilters: string;
header: string; // example: "Politic Desk"
subheader: string; // example: "Working Stage"
type: 'search'
| 'spike-personal'
| 'personal'
| 'stage'
| 'spike'
| 'highlights'
| 'deskOutput'
| 'sentDeskOutput'
| 'scheduledDeskOutput'
| string;
search?: {
filter?: {
query?: {
repo?: any;
q?: any;
};
};
};
max_items?: number;
singleViewType?: 'desk' | 'stage' | any;
query: any;
sent?: boolean;
}
CardsService.$inject = ['search', 'session', 'desks', '$location'];
export function CardsService(search, session, desks, $location) {
this.criteria = getCriteria;
this.shouldUpdate = shouldUpdate;
function getCriteriaParams(card: ICard): IQueryParams {
let params: IQueryParams = {};
if (card.type === 'search' && card.search && card.search.filter.query) {
angular.copy(card.search.filter.query, params);
if (card.query) {
if (card.search.filter.query.q) {
params.q = '(' + card.query + ') ' + card.search.filter.query.q;
} else {
params.q = '(' + card.query + ') ';
}
}
} else {
params.q = card.query;
}
if (card.type === 'spike' || card.type === 'spike-personal') {
params.spike = 'only';
} else if (card.type === 'personal' && card.sent) {
params.spike = 'include';
}
return params;
}
function filterQueryByCardType(query, queryParam, card: ICard) {
let deskId;
switch (card.type) {
case 'search':
break;
case 'spike-personal':
case 'personal':
if (card.sent) {
query.filter({bool: {
must: [
{term: {original_creator: session.identity._id}},
{exists: {field: 'task.desk'}},
],
}});
} else {
query.filter({bool: {
must: {term: {original_creator: session.identity._id}},
must_not: {exists: {field: 'task.desk'}},
}});
}
break;
case 'spike':
query.filter({term: {'task.desk': card._id}});
break;
case 'highlights':
query.filter({and: [
{term: {highlights: queryParam.highlight}},
]});
break;
case DESK_OUTPUT: | case SENT_OUTPUT:
deskId = card._id.substring(0, card._id.indexOf(':'));
query.filter({bool: {
filter: {term: {'task.desk_history': deskId}},
must_not: {term: {'task.desk': deskId}},
}});
break;
case SCHEDULED_OUTPUT:
deskId = card._id.substring(0, card._id.indexOf(':'));
query.filter({and: [
{term: {'task.desk': deskId}},
{term: {state: 'scheduled'}},
]});
break;
default:
if (!_.isNil(card.singleViewType) && card.singleViewType === 'desk') {
query.filter({term: {'task.desk': card.deskId}});
} else {
query.filter({term: {'task.stage': card._id}});
}
break;
}
}
function filterQueryByDeskType(query, card: ICard) {
var deskId = card._id.substring(0, card._id.indexOf(':'));
var desk = desks.deskLookup ? desks.deskLookup[deskId] : null;
var states = PUBLISHED_STATES;
if (appConfig.monitoring != null && appConfig.monitoring.scheduled) {
states = PUBLISHED_STATES.filter((state) => state !== ITEM_STATE.SCHEDULED);
}
if (desk) {
const must: Array<{}> = [
{term: {'task.desk': deskId}},
{terms: {state: states}},
];
if (desk.desk_type === 'authoring') {
query.filter({bool: {should: [
{term: {'task.last_authoring_desk': deskId}},
{bool: {must}},
]}});
} else if (desk.desk_type === 'production') {
query.filter({bool: {must}});
}
}
if (appConfig.features.nestedItemsInOutputStage) {
query.setOption('hidePreviousVersions', true);
}
}
function filterQueryByCardFileType(query, card: ICard) {
if (card.fileType) {
var termsHighlightsPackage = {and: [
{bool: {must: {exists: {field: 'highlight'}}}},
{term: {type: 'composite'}},
]};
var termsFileType: any = {terms: {type: JSON.parse(card.fileType)}};
// Normal package
if (_.includes(JSON.parse(card.fileType), 'composite')) {
termsFileType = {and: [
{bool: {must_not: {exists: {field: 'highlight'}}}},
{terms: {type: JSON.parse(card.fileType)}},
]};
}
if (_.includes(JSON.parse(card.fileType), 'highlight-pack')) {
query.filter({or: [
termsHighlightsPackage,
termsFileType,
]});
} else {
query.filter(termsFileType);
}
}
}
function filterQueryByContentProfile(query, card: ICard) {
if (card.contentProfile) {
query.filter({terms: {profile: JSON.parse(card.contentProfile)}});
}
}
function filterQueryByCustomQuery(query, card: ICard) {
if (card.customFilters == null) {
return;
}
var items: {[key: string]: IMonitoringFilter} = JSON.parse(card.customFilters);
const terms = Object.values(items)
.reduce((obj1, obj2) => Object.assign(obj1, obj2.query), {});
Object.keys(terms).forEach((key) => {
query.filter({terms: {[key]: terms[key]}});
});
}
/**
* Get items criteria for given card
*
* Card can be stage/personal/saved search.
* There can be also extra string search query
*
* @param {Object} card
* @param {string} queryString
*/
function getCriteria(card: ICard, queryString?: any, queryParam?: any) {
var params = getCriteriaParams(card);
var query = search.query(setFilters(params));
var criteria: any = {es_highlight: card.query ? search.getElasticHighlight() : 0};
filterQueryByCardType(query, queryParam, card);
filterQueryByContentProfile(query, card);
filterQueryByCardFileType(query, card);
filterQueryByCustomQuery(query, card);
if (queryString) {
query.filter({query: {query_string: {query: queryString, lenient: true}}});
criteria.es_highlight = search.getElasticHighlight();
}
criteria.source = query.getCriteria();
if (card.type === 'search' && card.search && card.search.filter.query.repo) {
criteria.repo = card.search.filter.query.repo;
} else if (desks.isPublishType(card.type)) {
criteria.repo = 'archive,published';
if (card.type === 'deskOutput') {
query.filter({not: {term: {state: 'unpublished'}}});
}
}
criteria.source.from = 0;
criteria.source.size = card.max_items || 25;
return criteria;
}
function shouldUpdate(card: ICard, data) {
switch (card.type) {
case 'stage':
// refresh stage if it matches updated stage
return data.stages && !!data.stages[card._id];
case 'personal':
return data.user === session.identity._id;
case DESK_OUTPUT:
case SENT_OUTPUT:
case SCHEDULED_OUTPUT:
var deskId = card._id.substring(0, card._id.indexOf(':'));
if (deskId) {
return data.desks && !!data.desks[deskId];
}
return false;
default:
// no way to determine if item should be visible, refresh
return true;
}
}
} | filterQueryByDeskType(query, card);
break;
| random_line_split |
CardsService.ts | import _ from 'lodash';
import {setFilters, IQueryParams} from 'apps/search/services/SearchService';
import {PUBLISHED_STATES} from 'apps/archive/constants';
import {ITEM_STATE} from 'apps/archive/constants';
import {
DESK_OUTPUT,
SENT_OUTPUT,
SCHEDULED_OUTPUT,
} from 'apps/desks/constants';
import {appConfig} from 'appConfig';
import {IMonitoringFilter} from 'superdesk-api';
export interface ICard {
_id: string;
deskId: string;
fileType: string; // contains JSON array
contentProfile: string;
customFilters: string;
header: string; // example: "Politic Desk"
subheader: string; // example: "Working Stage"
type: 'search'
| 'spike-personal'
| 'personal'
| 'stage'
| 'spike'
| 'highlights'
| 'deskOutput'
| 'sentDeskOutput'
| 'scheduledDeskOutput'
| string;
search?: {
filter?: {
query?: {
repo?: any;
q?: any;
};
};
};
max_items?: number;
singleViewType?: 'desk' | 'stage' | any;
query: any;
sent?: boolean;
}
CardsService.$inject = ['search', 'session', 'desks', '$location'];
export function CardsService(search, session, desks, $location) {
this.criteria = getCriteria;
this.shouldUpdate = shouldUpdate;
function getCriteriaParams(card: ICard): IQueryParams {
let params: IQueryParams = {};
if (card.type === 'search' && card.search && card.search.filter.query) {
angular.copy(card.search.filter.query, params);
if (card.query) {
if (card.search.filter.query.q) {
params.q = '(' + card.query + ') ' + card.search.filter.query.q;
} else {
params.q = '(' + card.query + ') ';
}
}
} else {
params.q = card.query;
}
if (card.type === 'spike' || card.type === 'spike-personal') {
params.spike = 'only';
} else if (card.type === 'personal' && card.sent) {
params.spike = 'include';
}
return params;
}
function filterQueryByCardType(query, queryParam, card: ICard) {
let deskId;
switch (card.type) {
case 'search':
break;
case 'spike-personal':
case 'personal':
if (card.sent) {
query.filter({bool: {
must: [
{term: {original_creator: session.identity._id}},
{exists: {field: 'task.desk'}},
],
}});
} else {
query.filter({bool: {
must: {term: {original_creator: session.identity._id}},
must_not: {exists: {field: 'task.desk'}},
}});
}
break;
case 'spike':
query.filter({term: {'task.desk': card._id}});
break;
case 'highlights':
query.filter({and: [
{term: {highlights: queryParam.highlight}},
]});
break;
case DESK_OUTPUT:
filterQueryByDeskType(query, card);
break;
case SENT_OUTPUT:
deskId = card._id.substring(0, card._id.indexOf(':'));
query.filter({bool: {
filter: {term: {'task.desk_history': deskId}},
must_not: {term: {'task.desk': deskId}},
}});
break;
case SCHEDULED_OUTPUT:
deskId = card._id.substring(0, card._id.indexOf(':'));
query.filter({and: [
{term: {'task.desk': deskId}},
{term: {state: 'scheduled'}},
]});
break;
default:
if (!_.isNil(card.singleViewType) && card.singleViewType === 'desk') {
query.filter({term: {'task.desk': card.deskId}});
} else {
query.filter({term: {'task.stage': card._id}});
}
break;
}
}
function filterQueryByDeskType(query, card: ICard) {
var deskId = card._id.substring(0, card._id.indexOf(':'));
var desk = desks.deskLookup ? desks.deskLookup[deskId] : null;
var states = PUBLISHED_STATES;
if (appConfig.monitoring != null && appConfig.monitoring.scheduled) {
states = PUBLISHED_STATES.filter((state) => state !== ITEM_STATE.SCHEDULED);
}
if (desk) {
const must: Array<{}> = [
{term: {'task.desk': deskId}},
{terms: {state: states}},
];
if (desk.desk_type === 'authoring') {
query.filter({bool: {should: [
{term: {'task.last_authoring_desk': deskId}},
{bool: {must}},
]}});
} else if (desk.desk_type === 'production') {
query.filter({bool: {must}});
}
}
if (appConfig.features.nestedItemsInOutputStage) {
query.setOption('hidePreviousVersions', true);
}
}
function filterQueryByCardFileType(query, card: ICard) |
function filterQueryByContentProfile(query, card: ICard) {
if (card.contentProfile) {
query.filter({terms: {profile: JSON.parse(card.contentProfile)}});
}
}
function filterQueryByCustomQuery(query, card: ICard) {
if (card.customFilters == null) {
return;
}
var items: {[key: string]: IMonitoringFilter} = JSON.parse(card.customFilters);
const terms = Object.values(items)
.reduce((obj1, obj2) => Object.assign(obj1, obj2.query), {});
Object.keys(terms).forEach((key) => {
query.filter({terms: {[key]: terms[key]}});
});
}
/**
* Get items criteria for given card
*
* Card can be stage/personal/saved search.
* There can be also extra string search query
*
* @param {Object} card
* @param {string} queryString
*/
function getCriteria(card: ICard, queryString?: any, queryParam?: any) {
var params = getCriteriaParams(card);
var query = search.query(setFilters(params));
var criteria: any = {es_highlight: card.query ? search.getElasticHighlight() : 0};
filterQueryByCardType(query, queryParam, card);
filterQueryByContentProfile(query, card);
filterQueryByCardFileType(query, card);
filterQueryByCustomQuery(query, card);
if (queryString) {
query.filter({query: {query_string: {query: queryString, lenient: true}}});
criteria.es_highlight = search.getElasticHighlight();
}
criteria.source = query.getCriteria();
if (card.type === 'search' && card.search && card.search.filter.query.repo) {
criteria.repo = card.search.filter.query.repo;
} else if (desks.isPublishType(card.type)) {
criteria.repo = 'archive,published';
if (card.type === 'deskOutput') {
query.filter({not: {term: {state: 'unpublished'}}});
}
}
criteria.source.from = 0;
criteria.source.size = card.max_items || 25;
return criteria;
}
function shouldUpdate(card: ICard, data) {
switch (card.type) {
case 'stage':
// refresh stage if it matches updated stage
return data.stages && !!data.stages[card._id];
case 'personal':
return data.user === session.identity._id;
case DESK_OUTPUT:
case SENT_OUTPUT:
case SCHEDULED_OUTPUT:
var deskId = card._id.substring(0, card._id.indexOf(':'));
if (deskId) {
return data.desks && !!data.desks[deskId];
}
return false;
default:
// no way to determine if item should be visible, refresh
return true;
}
}
}
| {
if (card.fileType) {
var termsHighlightsPackage = {and: [
{bool: {must: {exists: {field: 'highlight'}}}},
{term: {type: 'composite'}},
]};
var termsFileType: any = {terms: {type: JSON.parse(card.fileType)}};
// Normal package
if (_.includes(JSON.parse(card.fileType), 'composite')) {
termsFileType = {and: [
{bool: {must_not: {exists: {field: 'highlight'}}}},
{terms: {type: JSON.parse(card.fileType)}},
]};
}
if (_.includes(JSON.parse(card.fileType), 'highlight-pack')) {
query.filter({or: [
termsHighlightsPackage,
termsFileType,
]});
} else {
query.filter(termsFileType);
}
}
} | identifier_body |
CardsService.ts | import _ from 'lodash';
import {setFilters, IQueryParams} from 'apps/search/services/SearchService';
import {PUBLISHED_STATES} from 'apps/archive/constants';
import {ITEM_STATE} from 'apps/archive/constants';
import {
DESK_OUTPUT,
SENT_OUTPUT,
SCHEDULED_OUTPUT,
} from 'apps/desks/constants';
import {appConfig} from 'appConfig';
import {IMonitoringFilter} from 'superdesk-api';
export interface ICard {
_id: string;
deskId: string;
fileType: string; // contains JSON array
contentProfile: string;
customFilters: string;
header: string; // example: "Politic Desk"
subheader: string; // example: "Working Stage"
type: 'search'
| 'spike-personal'
| 'personal'
| 'stage'
| 'spike'
| 'highlights'
| 'deskOutput'
| 'sentDeskOutput'
| 'scheduledDeskOutput'
| string;
search?: {
filter?: {
query?: {
repo?: any;
q?: any;
};
};
};
max_items?: number;
singleViewType?: 'desk' | 'stage' | any;
query: any;
sent?: boolean;
}
CardsService.$inject = ['search', 'session', 'desks', '$location'];
export function CardsService(search, session, desks, $location) {
this.criteria = getCriteria;
this.shouldUpdate = shouldUpdate;
function getCriteriaParams(card: ICard): IQueryParams {
let params: IQueryParams = {};
if (card.type === 'search' && card.search && card.search.filter.query) {
angular.copy(card.search.filter.query, params);
if (card.query) {
if (card.search.filter.query.q) {
params.q = '(' + card.query + ') ' + card.search.filter.query.q;
} else {
params.q = '(' + card.query + ') ';
}
}
} else {
params.q = card.query;
}
if (card.type === 'spike' || card.type === 'spike-personal') {
params.spike = 'only';
} else if (card.type === 'personal' && card.sent) {
params.spike = 'include';
}
return params;
}
function filterQueryByCardType(query, queryParam, card: ICard) {
let deskId;
switch (card.type) {
case 'search':
break;
case 'spike-personal':
case 'personal':
if (card.sent) {
query.filter({bool: {
must: [
{term: {original_creator: session.identity._id}},
{exists: {field: 'task.desk'}},
],
}});
} else |
break;
case 'spike':
query.filter({term: {'task.desk': card._id}});
break;
case 'highlights':
query.filter({and: [
{term: {highlights: queryParam.highlight}},
]});
break;
case DESK_OUTPUT:
filterQueryByDeskType(query, card);
break;
case SENT_OUTPUT:
deskId = card._id.substring(0, card._id.indexOf(':'));
query.filter({bool: {
filter: {term: {'task.desk_history': deskId}},
must_not: {term: {'task.desk': deskId}},
}});
break;
case SCHEDULED_OUTPUT:
deskId = card._id.substring(0, card._id.indexOf(':'));
query.filter({and: [
{term: {'task.desk': deskId}},
{term: {state: 'scheduled'}},
]});
break;
default:
if (!_.isNil(card.singleViewType) && card.singleViewType === 'desk') {
query.filter({term: {'task.desk': card.deskId}});
} else {
query.filter({term: {'task.stage': card._id}});
}
break;
}
}
function filterQueryByDeskType(query, card: ICard) {
var deskId = card._id.substring(0, card._id.indexOf(':'));
var desk = desks.deskLookup ? desks.deskLookup[deskId] : null;
var states = PUBLISHED_STATES;
if (appConfig.monitoring != null && appConfig.monitoring.scheduled) {
states = PUBLISHED_STATES.filter((state) => state !== ITEM_STATE.SCHEDULED);
}
if (desk) {
const must: Array<{}> = [
{term: {'task.desk': deskId}},
{terms: {state: states}},
];
if (desk.desk_type === 'authoring') {
query.filter({bool: {should: [
{term: {'task.last_authoring_desk': deskId}},
{bool: {must}},
]}});
} else if (desk.desk_type === 'production') {
query.filter({bool: {must}});
}
}
if (appConfig.features.nestedItemsInOutputStage) {
query.setOption('hidePreviousVersions', true);
}
}
function filterQueryByCardFileType(query, card: ICard) {
if (card.fileType) {
var termsHighlightsPackage = {and: [
{bool: {must: {exists: {field: 'highlight'}}}},
{term: {type: 'composite'}},
]};
var termsFileType: any = {terms: {type: JSON.parse(card.fileType)}};
// Normal package
if (_.includes(JSON.parse(card.fileType), 'composite')) {
termsFileType = {and: [
{bool: {must_not: {exists: {field: 'highlight'}}}},
{terms: {type: JSON.parse(card.fileType)}},
]};
}
if (_.includes(JSON.parse(card.fileType), 'highlight-pack')) {
query.filter({or: [
termsHighlightsPackage,
termsFileType,
]});
} else {
query.filter(termsFileType);
}
}
}
function filterQueryByContentProfile(query, card: ICard) {
if (card.contentProfile) {
query.filter({terms: {profile: JSON.parse(card.contentProfile)}});
}
}
function filterQueryByCustomQuery(query, card: ICard) {
if (card.customFilters == null) {
return;
}
var items: {[key: string]: IMonitoringFilter} = JSON.parse(card.customFilters);
const terms = Object.values(items)
.reduce((obj1, obj2) => Object.assign(obj1, obj2.query), {});
Object.keys(terms).forEach((key) => {
query.filter({terms: {[key]: terms[key]}});
});
}
/**
* Get items criteria for given card
*
* Card can be stage/personal/saved search.
* There can be also extra string search query
*
* @param {Object} card
* @param {string} queryString
*/
function getCriteria(card: ICard, queryString?: any, queryParam?: any) {
var params = getCriteriaParams(card);
var query = search.query(setFilters(params));
var criteria: any = {es_highlight: card.query ? search.getElasticHighlight() : 0};
filterQueryByCardType(query, queryParam, card);
filterQueryByContentProfile(query, card);
filterQueryByCardFileType(query, card);
filterQueryByCustomQuery(query, card);
if (queryString) {
query.filter({query: {query_string: {query: queryString, lenient: true}}});
criteria.es_highlight = search.getElasticHighlight();
}
criteria.source = query.getCriteria();
if (card.type === 'search' && card.search && card.search.filter.query.repo) {
criteria.repo = card.search.filter.query.repo;
} else if (desks.isPublishType(card.type)) {
criteria.repo = 'archive,published';
if (card.type === 'deskOutput') {
query.filter({not: {term: {state: 'unpublished'}}});
}
}
criteria.source.from = 0;
criteria.source.size = card.max_items || 25;
return criteria;
}
function shouldUpdate(card: ICard, data) {
switch (card.type) {
case 'stage':
// refresh stage if it matches updated stage
return data.stages && !!data.stages[card._id];
case 'personal':
return data.user === session.identity._id;
case DESK_OUTPUT:
case SENT_OUTPUT:
case SCHEDULED_OUTPUT:
var deskId = card._id.substring(0, card._id.indexOf(':'));
if (deskId) {
return data.desks && !!data.desks[deskId];
}
return false;
default:
// no way to determine if item should be visible, refresh
return true;
}
}
}
| {
query.filter({bool: {
must: {term: {original_creator: session.identity._id}},
must_not: {exists: {field: 'task.desk'}},
}});
} | conditional_block |
UrlscanGetHttpTransactions.js | var url = args.url;
var limit = args.limitl;
var defaultWaitTime = Number(args.wait_time_for_polling)
uuid = executeCommand('urlscan-submit-url-command', {'url': url})[0].Contents;
uri = executeCommand('urlscan-get-result-page', {'uuid': uuid})[0].Contents;
var resStatusCode = 404
var waitedTime = 0
while(resStatusCode == 404 && waitedTime < Number(args.timeout)) {
var resStatusCode = executeCommand('urlscan-poll-uri', {'uri': uri})[0].Contents;
if (resStatusCode == 200) {
break;
}
wait(defaultWaitTime);
waitedTime = waitedTime + defaultWaitTime;
}
if(resStatusCode == 200) {
return executeCommand('urlscan-get-http-transaction-list', {'uuid': uuid, 'url': url, 'limit': limit});
} else {
if(waitedTime >= Number(args.timeout)){ | return 'Could not get result from UrlScan, possible rate-limit issues.'
}
} | return 'Could not get result from UrlScan, please try to increase the timeout.'
} else { | random_line_split |
UrlscanGetHttpTransactions.js | var url = args.url;
var limit = args.limitl;
var defaultWaitTime = Number(args.wait_time_for_polling)
uuid = executeCommand('urlscan-submit-url-command', {'url': url})[0].Contents;
uri = executeCommand('urlscan-get-result-page', {'uuid': uuid})[0].Contents;
var resStatusCode = 404
var waitedTime = 0
while(resStatusCode == 404 && waitedTime < Number(args.timeout)) {
var resStatusCode = executeCommand('urlscan-poll-uri', {'uri': uri})[0].Contents;
if (resStatusCode == 200) {
break;
}
wait(defaultWaitTime);
waitedTime = waitedTime + defaultWaitTime;
}
if(resStatusCode == 200) {
return executeCommand('urlscan-get-http-transaction-list', {'uuid': uuid, 'url': url, 'limit': limit});
} else {
if(waitedTime >= Number(args.timeout)){
return 'Could not get result from UrlScan, please try to increase the timeout.'
} else |
}
| {
return 'Could not get result from UrlScan, possible rate-limit issues.'
} | conditional_block |
fixtureSelectWithState.tsx | import retry from '@skidding/async-retry';
import React from 'react';
import { createFixtureStateProps, createValues } from '../../fixtureState';
import { uuid } from '../../util';
import { testFixtureLoader } from '../testHelpers';
import { HelloMessage } from '../testHelpers/components';
import { wrapFixtures } from '../testHelpers/wrapFixture';
| const fixtures = wrapFixtures({
first: <HelloMessage name="Bianca" />,
});
const fixtureId = { path: 'first' };
testFixtureLoader(
'renders selected fixture with fixture state',
{ rendererId, fixtures },
async ({ renderer, selectFixture }) => {
await selectFixture({
rendererId,
fixtureId,
fixtureState: {
props: createFixtureStateProps({
fixtureState: {},
elementId: { decoratorId: 'root', elPath: '' },
values: createValues({ name: 'B' }),
componentName: 'HelloMessage',
}),
},
});
await retry(() => expect(renderer.toJSON()).toBe('Hello B'));
}
); | const rendererId = uuid(); | random_line_split |
TextArea.stories.tsx | /*
MIT License
Copyright (c) 2022 Looker Data Sciences, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import React from 'react'
import type { Story } from '@storybook/react/types-6-0'
import { defaultArgTypes as argTypes } from '../../../../../../apps/storybook/src/defaultArgTypes'
import type { TextAreaProps } from './TextArea'
import { TextArea } from './TextArea'
export default {
argTypes,
component: TextArea,
title: 'TextArea',
}
const Template: Story<TextAreaProps> = args => <TextArea {...args} />
export const Basic = Template.bind({})
export const Placeholder = Template.bind({})
Placeholder.args = {
placeholder: 'Placeholder',
} | defaultValue: 'A value',
}
export const Disabled = Template.bind({})
Disabled.args = {
...Value.args,
disabled: true,
}
export const Resize = Template.bind({})
Resize.args = {
resize: true,
}
export const Error = Template.bind({})
Error.args = {
validationType: 'error',
} |
export const Value = Template.bind({})
Value.args = { | random_line_split |
PaginationDropdown.test.tsx | import { shallow, ShallowWrapper } from 'enzyme';
import { DropdownItem } from 'reactstrap';
import PaginationDropdown from '../../src/utils/PaginationDropdown';
describe('<PaginationDropdown />', () => {
const setValue = jest.fn();
let wrapper: ShallowWrapper;
beforeEach(() => {
wrapper = shallow(<PaginationDropdown ranges={[ 10, 50, 100, 200 ]} value={50} setValue={setValue} />);
});
afterEach(jest.clearAllMocks);
afterEach(() => wrapper?.unmount());
it('renders expected amount of items', () => {
const items = wrapper.find(DropdownItem);
expect(items).toHaveLength(6);
});
it.each([ | ])('sets expected value when an item is clicked', (index, expectedValue) => {
const item = wrapper.find(DropdownItem).at(index);
expect(setValue).not.toHaveBeenCalled();
item.simulate('click');
expect(setValue).toHaveBeenCalledWith(expectedValue);
});
}); | [ 0, 10 ],
[ 1, 50 ],
[ 2, 100 ],
[ 3, 200 ],
[ 5, Infinity ], | random_line_split |
setup.py | from setuptools import setup, find_packages
import os
version = '0.5'
setup(name='uwosh.emergency.master',
version=version,
description="",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Nathan Van Gheem',
author_email='vangheem@gmail.com',
url='http://svn.plone.org/svn/plone/plone.example',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['uwosh', 'uwosh.emergency'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'uwosh.simpleemergency>=1.1',
'rsa'
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""", | ) | random_line_split | |
localStorageKeys.js | /*
* This program is part of the OpenLMIS logistics management information system platform software.
* Copyright © 2013 VillageReach
*
* This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License along with this program. If not, see http://www.gnu.org/licenses. For additional information contact info@OpenLMIS.org.
*/
//Add the key constants for localStorage('key','value') pairs in the application
var localStorageKeys = {
RIGHT: "RIGHTS",
CURRENCY: "CURRENCY",
USERNAME: "USERNAME",
FULLNAME: "FULLNAME",
USER_ID: "USER_ID",
REPORTS: {STOCK_IMBALANCE: "REPORT_STOCK_IMBALANCE", SUPPLY_STATUS: "REPORT_SUPPLY_STATUS"},
PREFERENCE: {
DEFAULT_PROGRAM: "DEFAULT_PROGRAM",
DEFAULT_SCHEDULE: "DEFAULT_SCHEDULE",
DEFAULT_PERIOD: "DEFAULT_PERIOD",
DEFAULT_SUPERVISORY_NODE: "DEFAULT_SUPERVISORY_NODE", | ALERT_SMS_NOTIFICATION_OVERDUE_REQUISITION: "ALERT_SMS_NOTIFICATION_OVERDUE_REQUISITION",
ALERT_EMAIL_OVER_DUE_REQUISITION: "ALERT_EMAIL_OVER_DUE_REQUISITION"
},
DASHBOARD_FILTERS: {
"SUMMARY": "DASHBOARD_SUMMARY_PAGE",
"STOCK": "DASHBOARD_STOCK_EFFICIENCY_PAGE",
"STOCK-OUT": "DASHBOARD_STOCKED_OUT_PAGE",
"DISTRICT-STOCK-OUT": "DASHBOARD_STOCKED_OUT_BY_DISTRICT_PAGE",
"DISTRICT-STOCK-OUT-DETAIL": "DASHBOARD_STOCKED_OUT_DETAIL_PAGE",
"ORDER": "DASHBOARD_ORDER_TURNAROUND_PAGE",
"NOTIFICATION": "DASHBOARD_NOTIFICATION_PAGE",
"RNR-STATUS-SUMMARY": "DASHBOARD_RNR_STATUS_SUMMARY_PAGE"
}
}; | DEFAULT_GEOGRAPHIC_ZONE: "DEFAULT_GEOGRAPHIC_ZONE",
DEFAULT_PRODUCT: "DEFAULT_PRODUCT",
DEFAULT_PRODUCTS: "DEFAULT_PRODUCTS",
DEFAULT_FACILITY: "DEFAULT_FACILITY", | random_line_split |
compute-graph.service.ts | // Copyright 2014 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Service for computing a graphical representation of an
* exploration.
*/
| import { Injectable } from '@angular/core';
import { States } from 'domain/exploration/StatesObjectFactory';
export interface GraphLink {
source: string;
target: string;
}
export interface GraphNodes {
[stateName: string]: string;
}
export interface GraphData {
finalStateIds: string[];
initStateId: string;
links: GraphLink[];
nodes: GraphNodes;
}
@Injectable({
providedIn: 'root'
})
export class ComputeGraphService {
_computeGraphData(initStateId: string, states: States): GraphData {
let nodes: Record<string, string> = {};
let links: { source: string; target: string }[] = [];
let finalStateIds = states.getFinalStateNames();
states.getStateNames().forEach(function(stateName) {
let interaction = states.getState(stateName).interaction;
nodes[stateName] = stateName;
if (interaction.id) {
let groups = interaction.answerGroups;
for (let h = 0; h < groups.length; h++) {
links.push({
source: stateName,
target: groups[h].outcome.dest,
});
}
if (interaction.defaultOutcome) {
links.push({
source: stateName,
target: interaction.defaultOutcome.dest,
});
}
}
});
return {
finalStateIds: finalStateIds,
initStateId: initStateId,
links: links,
nodes: nodes
};
}
_computeBfsTraversalOfStates(
initStateId: string, states: States, sourceStateName: string): string[] {
let stateGraph = this._computeGraphData(initStateId, states);
let stateNamesInBfsOrder: string[] = [];
let queue: string[] = [];
let seen: Record<string, boolean> = {};
seen[sourceStateName] = true;
queue.push(sourceStateName);
while (queue.length > 0) {
// '.shift()' here can return an undefined value, but we're already
// checking for queue.length > 0, so this is safe.
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
let currStateName = queue.shift()!;
stateNamesInBfsOrder.push(currStateName);
for (let e = 0; e < stateGraph.links.length; e++) {
let edge = stateGraph.links[e];
let dest = edge.target;
if (edge.source === currStateName && !seen.hasOwnProperty(dest)) {
seen[dest] = true;
queue.push(dest);
}
}
}
return stateNamesInBfsOrder;
}
compute(initStateId: string, states: States): GraphData {
return this._computeGraphData(initStateId, states);
}
computeBfsTraversalOfStates(
initStateId: string, states: States, sourceStateName: string): string[] {
return this._computeBfsTraversalOfStates(
initStateId, states, sourceStateName);
}
}
angular.module('oppia').factory(
'ComputeGraphService', downgradeInjectable(ComputeGraphService)); | import { downgradeInjectable } from '@angular/upgrade/static'; | random_line_split |
compute-graph.service.ts | // Copyright 2014 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Service for computing a graphical representation of an
* exploration.
*/
import { downgradeInjectable } from '@angular/upgrade/static';
import { Injectable } from '@angular/core';
import { States } from 'domain/exploration/StatesObjectFactory';
export interface GraphLink {
source: string;
target: string;
}
export interface GraphNodes {
[stateName: string]: string;
}
export interface GraphData {
finalStateIds: string[];
initStateId: string;
links: GraphLink[];
nodes: GraphNodes;
}
@Injectable({
providedIn: 'root'
})
export class ComputeGraphService {
_computeGraphData(initStateId: string, states: States): GraphData {
let nodes: Record<string, string> = {};
let links: { source: string; target: string }[] = [];
let finalStateIds = states.getFinalStateNames();
states.getStateNames().forEach(function(stateName) {
let interaction = states.getState(stateName).interaction;
nodes[stateName] = stateName;
if (interaction.id) {
let groups = interaction.answerGroups;
for (let h = 0; h < groups.length; h++) {
links.push({
source: stateName,
target: groups[h].outcome.dest,
});
}
if (interaction.defaultOutcome) {
links.push({
source: stateName,
target: interaction.defaultOutcome.dest,
});
}
}
});
return {
finalStateIds: finalStateIds,
initStateId: initStateId,
links: links,
nodes: nodes
};
}
| (
initStateId: string, states: States, sourceStateName: string): string[] {
let stateGraph = this._computeGraphData(initStateId, states);
let stateNamesInBfsOrder: string[] = [];
let queue: string[] = [];
let seen: Record<string, boolean> = {};
seen[sourceStateName] = true;
queue.push(sourceStateName);
while (queue.length > 0) {
// '.shift()' here can return an undefined value, but we're already
// checking for queue.length > 0, so this is safe.
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
let currStateName = queue.shift()!;
stateNamesInBfsOrder.push(currStateName);
for (let e = 0; e < stateGraph.links.length; e++) {
let edge = stateGraph.links[e];
let dest = edge.target;
if (edge.source === currStateName && !seen.hasOwnProperty(dest)) {
seen[dest] = true;
queue.push(dest);
}
}
}
return stateNamesInBfsOrder;
}
compute(initStateId: string, states: States): GraphData {
return this._computeGraphData(initStateId, states);
}
computeBfsTraversalOfStates(
initStateId: string, states: States, sourceStateName: string): string[] {
return this._computeBfsTraversalOfStates(
initStateId, states, sourceStateName);
}
}
angular.module('oppia').factory(
'ComputeGraphService', downgradeInjectable(ComputeGraphService));
| _computeBfsTraversalOfStates | identifier_name |
compute-graph.service.ts | // Copyright 2014 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Service for computing a graphical representation of an
* exploration.
*/
import { downgradeInjectable } from '@angular/upgrade/static';
import { Injectable } from '@angular/core';
import { States } from 'domain/exploration/StatesObjectFactory';
export interface GraphLink {
source: string;
target: string;
}
export interface GraphNodes {
[stateName: string]: string;
}
export interface GraphData {
finalStateIds: string[];
initStateId: string;
links: GraphLink[];
nodes: GraphNodes;
}
@Injectable({
providedIn: 'root'
})
export class ComputeGraphService {
_computeGraphData(initStateId: string, states: States): GraphData {
let nodes: Record<string, string> = {};
let links: { source: string; target: string }[] = [];
let finalStateIds = states.getFinalStateNames();
states.getStateNames().forEach(function(stateName) {
let interaction = states.getState(stateName).interaction;
nodes[stateName] = stateName;
if (interaction.id) {
let groups = interaction.answerGroups;
for (let h = 0; h < groups.length; h++) {
links.push({
source: stateName,
target: groups[h].outcome.dest,
});
}
if (interaction.defaultOutcome) {
links.push({
source: stateName,
target: interaction.defaultOutcome.dest,
});
}
}
});
return {
finalStateIds: finalStateIds,
initStateId: initStateId,
links: links,
nodes: nodes
};
}
_computeBfsTraversalOfStates(
initStateId: string, states: States, sourceStateName: string): string[] {
let stateGraph = this._computeGraphData(initStateId, states);
let stateNamesInBfsOrder: string[] = [];
let queue: string[] = [];
let seen: Record<string, boolean> = {};
seen[sourceStateName] = true;
queue.push(sourceStateName);
while (queue.length > 0) {
// '.shift()' here can return an undefined value, but we're already
// checking for queue.length > 0, so this is safe.
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
let currStateName = queue.shift()!;
stateNamesInBfsOrder.push(currStateName);
for (let e = 0; e < stateGraph.links.length; e++) {
let edge = stateGraph.links[e];
let dest = edge.target;
if (edge.source === currStateName && !seen.hasOwnProperty(dest)) |
}
}
return stateNamesInBfsOrder;
}
compute(initStateId: string, states: States): GraphData {
return this._computeGraphData(initStateId, states);
}
computeBfsTraversalOfStates(
initStateId: string, states: States, sourceStateName: string): string[] {
return this._computeBfsTraversalOfStates(
initStateId, states, sourceStateName);
}
}
angular.module('oppia').factory(
'ComputeGraphService', downgradeInjectable(ComputeGraphService));
| {
seen[dest] = true;
queue.push(dest);
} | conditional_block |
compute-graph.service.ts | // Copyright 2014 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Service for computing a graphical representation of an
* exploration.
*/
import { downgradeInjectable } from '@angular/upgrade/static';
import { Injectable } from '@angular/core';
import { States } from 'domain/exploration/StatesObjectFactory';
export interface GraphLink {
source: string;
target: string;
}
export interface GraphNodes {
[stateName: string]: string;
}
export interface GraphData {
finalStateIds: string[];
initStateId: string;
links: GraphLink[];
nodes: GraphNodes;
}
@Injectable({
providedIn: 'root'
})
export class ComputeGraphService {
_computeGraphData(initStateId: string, states: States): GraphData {
let nodes: Record<string, string> = {};
let links: { source: string; target: string }[] = [];
let finalStateIds = states.getFinalStateNames();
states.getStateNames().forEach(function(stateName) {
let interaction = states.getState(stateName).interaction;
nodes[stateName] = stateName;
if (interaction.id) {
let groups = interaction.answerGroups;
for (let h = 0; h < groups.length; h++) {
links.push({
source: stateName,
target: groups[h].outcome.dest,
});
}
if (interaction.defaultOutcome) {
links.push({
source: stateName,
target: interaction.defaultOutcome.dest,
});
}
}
});
return {
finalStateIds: finalStateIds,
initStateId: initStateId,
links: links,
nodes: nodes
};
}
_computeBfsTraversalOfStates(
initStateId: string, states: States, sourceStateName: string): string[] |
compute(initStateId: string, states: States): GraphData {
return this._computeGraphData(initStateId, states);
}
computeBfsTraversalOfStates(
initStateId: string, states: States, sourceStateName: string): string[] {
return this._computeBfsTraversalOfStates(
initStateId, states, sourceStateName);
}
}
angular.module('oppia').factory(
'ComputeGraphService', downgradeInjectable(ComputeGraphService));
| {
let stateGraph = this._computeGraphData(initStateId, states);
let stateNamesInBfsOrder: string[] = [];
let queue: string[] = [];
let seen: Record<string, boolean> = {};
seen[sourceStateName] = true;
queue.push(sourceStateName);
while (queue.length > 0) {
// '.shift()' here can return an undefined value, but we're already
// checking for queue.length > 0, so this is safe.
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
let currStateName = queue.shift()!;
stateNamesInBfsOrder.push(currStateName);
for (let e = 0; e < stateGraph.links.length; e++) {
let edge = stateGraph.links[e];
let dest = edge.target;
if (edge.source === currStateName && !seen.hasOwnProperty(dest)) {
seen[dest] = true;
queue.push(dest);
}
}
}
return stateNamesInBfsOrder;
} | identifier_body |
styles.py | """
InaSAFE Disaster risk assessment tool developed by AusAid **Messaging styles.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Style constants for use with messaging. Example usage::
from messaging.styles import PROGRESS_UPDATE_STYLE
m.ImportantText(myTitle, **PROGRESS_UPDATE_STYLE)
This will result in some standardised styling being applied to the important
text element.
"""
__author__ = 'tim@linfiniti.com'
__revision__ = '$Format:%H$'
__date__ = '06/06/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
# These all apply to heading elements
PROGRESS_UPDATE_STYLE = { | 'style_class': 'info'}
INFO_STYLE = {
'level': 5,
'icon': 'icon-info-sign icon-white',
'style_class': 'info'}
WARNING_STYLE = {
'level': 5,
'icon': 'icon-warning-sign icon-white',
'style_class': 'warning'}
SUGGESTION_STYLE = {
'level': 5,
'icon': 'icon-comment icon-white',
'style_class': 'suggestion'}
PROBLEM_STYLE = {
'level': 5,
'icon': 'icon-remove-sign icon-white',
'style_class': 'warning'}
DETAILS_STYLE = {
'level': 5,
'icon': 'icon-list icon-white',
'style_class': 'problem'}
SMALL_ICON_STYLE = {
'attributes': 'style="width: 24px; height: 24px;"',
}
TRACEBACK_STYLE = {
'level': 5,
'icon': 'icon-info-sign icon-white',
'style_class': 'inverse',
'attributes': 'onclick="toggleTracebacks();"'}
TRACEBACK_ITEMS_STYLE = {
'style_class': 'traceback-detail',
}
# This is typically a text element or its derivatives
KEYWORD_STYLE = {
# not working unless you turn css on and off again using inspector
#'style_class': 'label label-success'
} | 'level': 5,
'icon': 'icon-cog icon-white', | random_line_split |
gray.rs | /// Reads in a list of values and returns the gray codes of the length of each value
/// Does so only up to the length of 64 bits (unsigned values)
// io functions
use std::io::prelude::*;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::env;
/// Decimal to Gray Code
fn gray(num: u64) -> u64 {
(num >> 1) ^ num
}
/// Run up to some bit length (u8 <= 64)
fn gray_length(n: u8) -> Vec<u64> {
let size = 2u32.pow(n as u32);
let arr = 0 .. size as u64;
arr.map(|x| gray(x)).collect()
}
/// Ouput the binary number for the size
fn out(num: u64, size: u8) {
println!("{:0>width$b}", num, width=size as usize);
}
/// File I/O stuff
fn get_input<P: AsRef<Path>>(fname: P) -> Result< String, Box<Error>> {
let mut file = try!(File::open(fname));
let mut cntnt = String::new();
try!(file.read_to_string(&mut cntnt));
Ok(cntnt)
}
/// String helper
fn s_parser(item: &str) -> u8 {
let ret = match item.trim().parse::<u8>() {
Ok(n) => n,
Err(_) => 0,
};
ret
}
/// Parse File
fn parse(contents: String) -> Vec<u8> {
let vals: Vec<u8> = contents.split("\n").map(|x| s_parser(x)).collect();
vals
}
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
let vals = match get_input(path) { | for val in vals {
for gray in gray_length(val) {
out(gray, 3);
}
}
} | Ok(n) => parse(n),
Err(why) => panic!("File could not be accessed {}", why),
};
| random_line_split |
gray.rs | /// Reads in a list of values and returns the gray codes of the length of each value
/// Does so only up to the length of 64 bits (unsigned values)
// io functions
use std::io::prelude::*;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::env;
/// Decimal to Gray Code
fn gray(num: u64) -> u64 {
(num >> 1) ^ num
}
/// Run up to some bit length (u8 <= 64)
fn gray_length(n: u8) -> Vec<u64> {
let size = 2u32.pow(n as u32);
let arr = 0 .. size as u64;
arr.map(|x| gray(x)).collect()
}
/// Ouput the binary number for the size
fn out(num: u64, size: u8) {
println!("{:0>width$b}", num, width=size as usize);
}
/// File I/O stuff
fn get_input<P: AsRef<Path>>(fname: P) -> Result< String, Box<Error>> {
let mut file = try!(File::open(fname));
let mut cntnt = String::new();
try!(file.read_to_string(&mut cntnt));
Ok(cntnt)
}
/// String helper
fn s_parser(item: &str) -> u8 {
let ret = match item.trim().parse::<u8>() {
Ok(n) => n,
Err(_) => 0,
};
ret
}
/// Parse File
fn parse(contents: String) -> Vec<u8> |
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
let vals = match get_input(path) {
Ok(n) => parse(n),
Err(why) => panic!("File could not be accessed {}", why),
};
for val in vals {
for gray in gray_length(val) {
out(gray, 3);
}
}
}
| {
let vals: Vec<u8> = contents.split("\n").map(|x| s_parser(x)).collect();
vals
} | identifier_body |
gray.rs | /// Reads in a list of values and returns the gray codes of the length of each value
/// Does so only up to the length of 64 bits (unsigned values)
// io functions
use std::io::prelude::*;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::env;
/// Decimal to Gray Code
fn gray(num: u64) -> u64 {
(num >> 1) ^ num
}
/// Run up to some bit length (u8 <= 64)
fn gray_length(n: u8) -> Vec<u64> {
let size = 2u32.pow(n as u32);
let arr = 0 .. size as u64;
arr.map(|x| gray(x)).collect()
}
/// Ouput the binary number for the size
fn out(num: u64, size: u8) {
println!("{:0>width$b}", num, width=size as usize);
}
/// File I/O stuff
fn get_input<P: AsRef<Path>>(fname: P) -> Result< String, Box<Error>> {
let mut file = try!(File::open(fname));
let mut cntnt = String::new();
try!(file.read_to_string(&mut cntnt));
Ok(cntnt)
}
/// String helper
fn s_parser(item: &str) -> u8 {
let ret = match item.trim().parse::<u8>() {
Ok(n) => n,
Err(_) => 0,
};
ret
}
/// Parse File
fn parse(contents: String) -> Vec<u8> {
let vals: Vec<u8> = contents.split("\n").map(|x| s_parser(x)).collect();
vals
}
fn | () {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
let vals = match get_input(path) {
Ok(n) => parse(n),
Err(why) => panic!("File could not be accessed {}", why),
};
for val in vals {
for gray in gray_length(val) {
out(gray, 3);
}
}
}
| main | identifier_name |
future.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* A type representing values that may be computed concurrently and
* operations for working with them.
*
* # Example
*
* ```rust
* # fn fib(n: uint) -> uint {42};
* # fn make_a_sandwich() {};
* let mut delayed_fib = extra::future::spawn (|| fib(5000) );
* make_a_sandwich();
* println!("fib(5000) = {}", delayed_fib.get())
* ```
*/
#[allow(missing_doc)];
use std::cell::Cell;
use std::comm::{PortOne, oneshot};
use std::task;
use std::util::replace;
/// A type encapsulating the result of a computation which may not be complete
pub struct Future<A> {
priv state: FutureState<A>,
}
enum FutureState<A> {
Pending(~fn() -> A),
Evaluating,
Forced(A)
}
/// Methods on the `future` type
impl<A:Clone> Future<A> {
pub fn get(&mut self) -> A {
//! Get the value of the future.
(*(self.get_ref())).clone()
}
}
impl<A> Future<A> {
/// Gets the value from this future, forcing evaluation.
pub fn unwrap(self) -> A {
let mut this = self;
this.get_ref();
let state = replace(&mut this.state, Evaluating);
match state {
Forced(v) => v,
_ => fail2!( "Logic error." ),
}
}
pub fn get_ref<'a>(&'a mut self) -> &'a A |
pub fn from_value(val: A) -> Future<A> {
/*!
* Create a future from a value.
*
* The value is immediately available and calling `get` later will
* not block.
*/
Future {state: Forced(val)}
}
pub fn from_fn(f: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a function.
*
* The first time that the value is requested it will be retrieved by
* calling the function. Note that this function is a local
* function. It is not spawned into another task.
*/
Future {state: Pending(f)}
}
}
impl<A:Send> Future<A> {
pub fn from_port(port: PortOne<A>) -> Future<A> {
/*!
* Create a future from a port
*
* The first time that the value is requested the task will block
* waiting for the result to be received on the port.
*/
let port = Cell::new(port);
do Future::from_fn {
port.take().recv()
}
}
pub fn spawn(blk: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a unique closure.
*
* The closure will be run in a new task and its result used as the
* value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with(chan) |chan| {
chan.send(blk());
}
Future::from_port(port)
}
pub fn spawn_with<B: Send>(v: B, blk: ~fn(B) -> A) -> Future<A> {
/*!
* Create a future from a unique closure taking one argument.
*
* The closure and its argument will be moved into a new task. The
* closure will be run and its result used as the value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with((v, chan)) |(v, chan)| {
chan.send(blk(v));
}
Future::from_port(port)
}
}
#[cfg(test)]
mod test {
use future::Future;
use std::cell::Cell;
use std::comm::oneshot;
use std::task;
#[test]
fn test_from_value() {
let mut f = Future::from_value(~"snail");
assert_eq!(f.get(), ~"snail");
}
#[test]
fn test_from_port() {
let (po, ch) = oneshot();
ch.send(~"whale");
let mut f = Future::from_port(po);
assert_eq!(f.get(), ~"whale");
}
#[test]
fn test_from_fn() {
let mut f = Future::from_fn(|| ~"brail");
assert_eq!(f.get(), ~"brail");
}
#[test]
fn test_interface_get() {
let mut f = Future::from_value(~"fail");
assert_eq!(f.get(), ~"fail");
}
#[test]
fn test_interface_unwrap() {
let f = Future::from_value(~"fail");
assert_eq!(f.unwrap(), ~"fail");
}
#[test]
fn test_get_ref_method() {
let mut f = Future::from_value(22);
assert_eq!(*f.get_ref(), 22);
}
#[test]
fn test_spawn() {
let mut f = Future::spawn(|| ~"bale");
assert_eq!(f.get(), ~"bale");
}
#[test]
fn test_spawn_with() {
let mut f = Future::spawn_with(~"gale", |s| { s });
assert_eq!(f.get(), ~"gale");
}
#[test]
#[should_fail]
fn test_futurefail() {
let mut f = Future::spawn(|| fail2!());
let _x: ~str = f.get();
}
#[test]
fn test_sendable_future() {
let expected = "schlorf";
let f = Cell::new(do Future::spawn { expected });
do task::spawn {
let mut f = f.take();
let actual = f.get();
assert_eq!(actual, expected);
}
}
}
| {
/*!
* Executes the future's closure and then returns a borrowed
* pointer to the result. The borrowed pointer lasts as long as
* the future.
*/
match self.state {
Forced(ref v) => return v,
Evaluating => fail2!("Recursive forcing of future!"),
Pending(_) => {
match replace(&mut self.state, Evaluating) {
Forced(_) | Evaluating => fail2!("Logic error."),
Pending(f) => {
self.state = Forced(f());
self.get_ref()
}
}
}
}
} | identifier_body |
future.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* A type representing values that may be computed concurrently and
* operations for working with them.
*
* # Example
*
* ```rust
* # fn fib(n: uint) -> uint {42};
* # fn make_a_sandwich() {};
* let mut delayed_fib = extra::future::spawn (|| fib(5000) );
* make_a_sandwich();
* println!("fib(5000) = {}", delayed_fib.get())
* ```
*/
#[allow(missing_doc)];
use std::cell::Cell;
use std::comm::{PortOne, oneshot};
use std::task;
use std::util::replace;
/// A type encapsulating the result of a computation which may not be complete
pub struct Future<A> {
priv state: FutureState<A>,
}
enum FutureState<A> {
Pending(~fn() -> A),
Evaluating,
Forced(A)
}
/// Methods on the `future` type
impl<A:Clone> Future<A> {
pub fn get(&mut self) -> A {
//! Get the value of the future.
(*(self.get_ref())).clone()
}
}
impl<A> Future<A> {
/// Gets the value from this future, forcing evaluation.
pub fn unwrap(self) -> A {
let mut this = self;
this.get_ref();
let state = replace(&mut this.state, Evaluating);
match state {
Forced(v) => v,
_ => fail2!( "Logic error." ),
}
}
pub fn get_ref<'a>(&'a mut self) -> &'a A {
/*!
* Executes the future's closure and then returns a borrowed
* pointer to the result. The borrowed pointer lasts as long as
* the future.
*/
match self.state {
Forced(ref v) => return v,
Evaluating => fail2!("Recursive forcing of future!"),
Pending(_) => {
match replace(&mut self.state, Evaluating) {
Forced(_) | Evaluating => fail2!("Logic error."),
Pending(f) => {
self.state = Forced(f());
self.get_ref()
}
}
}
}
}
pub fn from_value(val: A) -> Future<A> {
/*!
* Create a future from a value.
*
* The value is immediately available and calling `get` later will
* not block.
*/
Future {state: Forced(val)}
}
pub fn from_fn(f: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a function.
*
* The first time that the value is requested it will be retrieved by
* calling the function. Note that this function is a local
* function. It is not spawned into another task.
*/
Future {state: Pending(f)}
}
}
impl<A:Send> Future<A> {
pub fn from_port(port: PortOne<A>) -> Future<A> {
/*!
* Create a future from a port
*
* The first time that the value is requested the task will block
* waiting for the result to be received on the port.
*/
let port = Cell::new(port);
do Future::from_fn {
port.take().recv()
}
}
pub fn spawn(blk: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a unique closure.
*
* The closure will be run in a new task and its result used as the
* value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with(chan) |chan| {
chan.send(blk());
}
Future::from_port(port)
}
pub fn spawn_with<B: Send>(v: B, blk: ~fn(B) -> A) -> Future<A> {
/*!
* Create a future from a unique closure taking one argument.
* |
do task::spawn_with((v, chan)) |(v, chan)| {
chan.send(blk(v));
}
Future::from_port(port)
}
}
#[cfg(test)]
mod test {
use future::Future;
use std::cell::Cell;
use std::comm::oneshot;
use std::task;
#[test]
fn test_from_value() {
let mut f = Future::from_value(~"snail");
assert_eq!(f.get(), ~"snail");
}
#[test]
fn test_from_port() {
let (po, ch) = oneshot();
ch.send(~"whale");
let mut f = Future::from_port(po);
assert_eq!(f.get(), ~"whale");
}
#[test]
fn test_from_fn() {
let mut f = Future::from_fn(|| ~"brail");
assert_eq!(f.get(), ~"brail");
}
#[test]
fn test_interface_get() {
let mut f = Future::from_value(~"fail");
assert_eq!(f.get(), ~"fail");
}
#[test]
fn test_interface_unwrap() {
let f = Future::from_value(~"fail");
assert_eq!(f.unwrap(), ~"fail");
}
#[test]
fn test_get_ref_method() {
let mut f = Future::from_value(22);
assert_eq!(*f.get_ref(), 22);
}
#[test]
fn test_spawn() {
let mut f = Future::spawn(|| ~"bale");
assert_eq!(f.get(), ~"bale");
}
#[test]
fn test_spawn_with() {
let mut f = Future::spawn_with(~"gale", |s| { s });
assert_eq!(f.get(), ~"gale");
}
#[test]
#[should_fail]
fn test_futurefail() {
let mut f = Future::spawn(|| fail2!());
let _x: ~str = f.get();
}
#[test]
fn test_sendable_future() {
let expected = "schlorf";
let f = Cell::new(do Future::spawn { expected });
do task::spawn {
let mut f = f.take();
let actual = f.get();
assert_eq!(actual, expected);
}
}
} | * The closure and its argument will be moved into a new task. The
* closure will be run and its result used as the value of the future.
*/
let (port, chan) = oneshot(); | random_line_split |
future.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* A type representing values that may be computed concurrently and
* operations for working with them.
*
* # Example
*
* ```rust
* # fn fib(n: uint) -> uint {42};
* # fn make_a_sandwich() {};
* let mut delayed_fib = extra::future::spawn (|| fib(5000) );
* make_a_sandwich();
* println!("fib(5000) = {}", delayed_fib.get())
* ```
*/
#[allow(missing_doc)];
use std::cell::Cell;
use std::comm::{PortOne, oneshot};
use std::task;
use std::util::replace;
/// A type encapsulating the result of a computation which may not be complete
pub struct Future<A> {
priv state: FutureState<A>,
}
enum FutureState<A> {
Pending(~fn() -> A),
Evaluating,
Forced(A)
}
/// Methods on the `future` type
impl<A:Clone> Future<A> {
pub fn get(&mut self) -> A {
//! Get the value of the future.
(*(self.get_ref())).clone()
}
}
impl<A> Future<A> {
/// Gets the value from this future, forcing evaluation.
pub fn unwrap(self) -> A {
let mut this = self;
this.get_ref();
let state = replace(&mut this.state, Evaluating);
match state {
Forced(v) => v,
_ => fail2!( "Logic error." ),
}
}
pub fn get_ref<'a>(&'a mut self) -> &'a A {
/*!
* Executes the future's closure and then returns a borrowed
* pointer to the result. The borrowed pointer lasts as long as
* the future.
*/
match self.state {
Forced(ref v) => return v,
Evaluating => fail2!("Recursive forcing of future!"),
Pending(_) => {
match replace(&mut self.state, Evaluating) {
Forced(_) | Evaluating => fail2!("Logic error."),
Pending(f) => {
self.state = Forced(f());
self.get_ref()
}
}
}
}
}
pub fn from_value(val: A) -> Future<A> {
/*!
* Create a future from a value.
*
* The value is immediately available and calling `get` later will
* not block.
*/
Future {state: Forced(val)}
}
pub fn from_fn(f: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a function.
*
* The first time that the value is requested it will be retrieved by
* calling the function. Note that this function is a local
* function. It is not spawned into another task.
*/
Future {state: Pending(f)}
}
}
impl<A:Send> Future<A> {
pub fn from_port(port: PortOne<A>) -> Future<A> {
/*!
* Create a future from a port
*
* The first time that the value is requested the task will block
* waiting for the result to be received on the port.
*/
let port = Cell::new(port);
do Future::from_fn {
port.take().recv()
}
}
pub fn spawn(blk: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a unique closure.
*
* The closure will be run in a new task and its result used as the
* value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with(chan) |chan| {
chan.send(blk());
}
Future::from_port(port)
}
pub fn spawn_with<B: Send>(v: B, blk: ~fn(B) -> A) -> Future<A> {
/*!
* Create a future from a unique closure taking one argument.
*
* The closure and its argument will be moved into a new task. The
* closure will be run and its result used as the value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with((v, chan)) |(v, chan)| {
chan.send(blk(v));
}
Future::from_port(port)
}
}
#[cfg(test)]
mod test {
use future::Future;
use std::cell::Cell;
use std::comm::oneshot;
use std::task;
#[test]
fn test_from_value() {
let mut f = Future::from_value(~"snail");
assert_eq!(f.get(), ~"snail");
}
#[test]
fn test_from_port() {
let (po, ch) = oneshot();
ch.send(~"whale");
let mut f = Future::from_port(po);
assert_eq!(f.get(), ~"whale");
}
#[test]
fn test_from_fn() {
let mut f = Future::from_fn(|| ~"brail");
assert_eq!(f.get(), ~"brail");
}
#[test]
fn test_interface_get() {
let mut f = Future::from_value(~"fail");
assert_eq!(f.get(), ~"fail");
}
#[test]
fn test_interface_unwrap() {
let f = Future::from_value(~"fail");
assert_eq!(f.unwrap(), ~"fail");
}
#[test]
fn test_get_ref_method() {
let mut f = Future::from_value(22);
assert_eq!(*f.get_ref(), 22);
}
#[test]
fn test_spawn() {
let mut f = Future::spawn(|| ~"bale");
assert_eq!(f.get(), ~"bale");
}
#[test]
fn | () {
let mut f = Future::spawn_with(~"gale", |s| { s });
assert_eq!(f.get(), ~"gale");
}
#[test]
#[should_fail]
fn test_futurefail() {
let mut f = Future::spawn(|| fail2!());
let _x: ~str = f.get();
}
#[test]
fn test_sendable_future() {
let expected = "schlorf";
let f = Cell::new(do Future::spawn { expected });
do task::spawn {
let mut f = f.take();
let actual = f.get();
assert_eq!(actual, expected);
}
}
}
| test_spawn_with | identifier_name |
index.test-d.ts | import {expectType} from 'tsd';
import stripCssComments from './index.js';
expectType<string>(
stripCssComments('/*! <copyright> */ body { /* unicorns */color: hotpink; }'),
);
expectType<string>(
stripCssComments(
'/*! <copyright> */ body { /* unicorns */color: hotpink; }', {
preserve: false,
}),
);
expectType<string>(
stripCssComments('/*# preserved */ body { /* unicorns */color: hotpink; }', {
preserve: /^#/,
}), | }),
);
expectType<string>(
stripCssComments('/*# preserved */ body { /* unicorns */color: hotpink; }', {
whitespace: false,
}),
); | );
expectType<string>(
stripCssComments('/*# preserved */ body { /* unicorns */color: hotpink; }', {
preserve: comment => comment.startsWith('#'), | random_line_split |
mongoose.ts | import chalk = require('chalk');
import path = require('path');
import mongoose = require('mongoose');
import { FrameworkConfiguration } from '../config';
const fc: FrameworkConfiguration = FrameworkConfiguration.get();
export interface ConnectCallbackFunction {
(db: mongoose.Mongoose): any;
}
export interface DisconnectCallbackFunction {
(err: any): any;
}
export interface LoadModuleCallbackFunction {
(): any;
}
export class MongooseAdapter {
db?: mongoose.Mongoose;
constructor() {
this.db = null;
}
connect(callbackFn?: ConnectCallbackFunction): void {
this.db = mongoose.connect(fc.config.db.uri, fc.config.db.options,
(err: any): void => {
if (err) {
console.error(chalk.red('Could not connect to MongoDB!'));
console.log(err.toString());
} else {
mongoose.set('debug', fc.config.db.debug);
if (callbackFn) callbackFn(this.db);
}
});
}
disconnect(callbackFn?: DisconnectCallbackFunction): void {
mongoose.disconnect((err: any): void => {
console.info(chalk.yellow('Disconnected from MongoDB'));
this.db = null;
if (callbackFn) callbackFn(err);
});
}
| (): boolean {
return this.db !== null;
}
loadModels(callbackFn?: LoadModuleCallbackFunction): void {
for (let modelPath of fc.assets.server.models)
require(path.resolve(modelPath));
}
}
| isConnected | identifier_name |
mongoose.ts | import chalk = require('chalk');
import path = require('path');
import mongoose = require('mongoose');
import { FrameworkConfiguration } from '../config';
const fc: FrameworkConfiguration = FrameworkConfiguration.get();
export interface ConnectCallbackFunction {
(db: mongoose.Mongoose): any;
}
export interface DisconnectCallbackFunction {
(err: any): any;
}
export interface LoadModuleCallbackFunction {
(): any;
}
export class MongooseAdapter {
db?: mongoose.Mongoose;
|
constructor() {
this.db = null;
}
connect(callbackFn?: ConnectCallbackFunction): void {
this.db = mongoose.connect(fc.config.db.uri, fc.config.db.options,
(err: any): void => {
if (err) {
console.error(chalk.red('Could not connect to MongoDB!'));
console.log(err.toString());
} else {
mongoose.set('debug', fc.config.db.debug);
if (callbackFn) callbackFn(this.db);
}
});
}
disconnect(callbackFn?: DisconnectCallbackFunction): void {
mongoose.disconnect((err: any): void => {
console.info(chalk.yellow('Disconnected from MongoDB'));
this.db = null;
if (callbackFn) callbackFn(err);
});
}
isConnected(): boolean {
return this.db !== null;
}
loadModels(callbackFn?: LoadModuleCallbackFunction): void {
for (let modelPath of fc.assets.server.models)
require(path.resolve(modelPath));
}
} | random_line_split | |
mongoose.ts | import chalk = require('chalk');
import path = require('path');
import mongoose = require('mongoose');
import { FrameworkConfiguration } from '../config';
const fc: FrameworkConfiguration = FrameworkConfiguration.get();
export interface ConnectCallbackFunction {
(db: mongoose.Mongoose): any;
}
export interface DisconnectCallbackFunction {
(err: any): any;
}
export interface LoadModuleCallbackFunction {
(): any;
}
export class MongooseAdapter {
db?: mongoose.Mongoose;
constructor() {
this.db = null;
}
connect(callbackFn?: ConnectCallbackFunction): void {
this.db = mongoose.connect(fc.config.db.uri, fc.config.db.options,
(err: any): void => {
if (err) | else {
mongoose.set('debug', fc.config.db.debug);
if (callbackFn) callbackFn(this.db);
}
});
}
disconnect(callbackFn?: DisconnectCallbackFunction): void {
mongoose.disconnect((err: any): void => {
console.info(chalk.yellow('Disconnected from MongoDB'));
this.db = null;
if (callbackFn) callbackFn(err);
});
}
isConnected(): boolean {
return this.db !== null;
}
loadModels(callbackFn?: LoadModuleCallbackFunction): void {
for (let modelPath of fc.assets.server.models)
require(path.resolve(modelPath));
}
}
| {
console.error(chalk.red('Could not connect to MongoDB!'));
console.log(err.toString());
} | conditional_block |
mongoose.ts | import chalk = require('chalk');
import path = require('path');
import mongoose = require('mongoose');
import { FrameworkConfiguration } from '../config';
const fc: FrameworkConfiguration = FrameworkConfiguration.get();
export interface ConnectCallbackFunction {
(db: mongoose.Mongoose): any;
}
export interface DisconnectCallbackFunction {
(err: any): any;
}
export interface LoadModuleCallbackFunction {
(): any;
}
export class MongooseAdapter {
db?: mongoose.Mongoose;
constructor() {
this.db = null;
}
connect(callbackFn?: ConnectCallbackFunction): void {
this.db = mongoose.connect(fc.config.db.uri, fc.config.db.options,
(err: any): void => {
if (err) {
console.error(chalk.red('Could not connect to MongoDB!'));
console.log(err.toString());
} else {
mongoose.set('debug', fc.config.db.debug);
if (callbackFn) callbackFn(this.db);
}
});
}
disconnect(callbackFn?: DisconnectCallbackFunction): void {
mongoose.disconnect((err: any): void => {
console.info(chalk.yellow('Disconnected from MongoDB'));
this.db = null;
if (callbackFn) callbackFn(err);
});
}
isConnected(): boolean {
return this.db !== null;
}
loadModels(callbackFn?: LoadModuleCallbackFunction): void |
}
| {
for (let modelPath of fc.assets.server.models)
require(path.resolve(modelPath));
} | identifier_body |
redact.test.js | /**
* Copyright 2017, Google, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
require(`../../system-test/_setup`);
const cmd = 'node redact';
// redact_string
test(`should redact sensitive data from a string`, async (t) => {
const output = await runAsync(`${cmd} string "I am Gary and my phone number is (123) 456-7890." REDACTED -t US_MALE_NAME PHONE_NUMBER`);
t.is(output, 'I am REDACTED and my phone number is REDACTED.');
});
test(`should ignore unspecified type names when redacting from a string`, async (t) => {
const output = await runAsync(`${cmd} string "I am Gary and my phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER`);
t.is(output, 'I am Gary and my phone number is REDACTED.');
});
test(`should report string redaction handling errors`, async (t) => {
const output = await runAsync(`${cmd} string "My name is Gary and my phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER -a foo`);
t.regex(output, /Error in redactString/);
});
// CLI options
test(`should have a minLikelihood option`, async (t) => {
const promiseA = runAsync(`${cmd} string "My phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER -m VERY_LIKELY`);
const promiseB = runAsync(`${cmd} string "My phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER -m UNLIKELY`);
const outputA = await promiseA;
t.is(outputA, 'My phone number is (123) 456-7890.');
const outputB = await promiseB;
t.is(outputB, 'My phone number is REDACTED.');
});
test(`should have an option for custom auth tokens`, async (t) => { | const output = await runAsync(`${cmd} string "My name is Gary and my phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER -a foo`);
t.regex(output, /Error in redactString/);
t.regex(output, /invalid authentication/);
}); | random_line_split | |
graphs.py | # -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import itertools
from typing import List, Any, Optional, TYPE_CHECKING
import networkx as nx
import numpy as np
import tqdm
from knossos_utils.skeleton import Skeleton, SkeletonAnnotation, SkeletonNode
from scipy import spatial
if TYPE_CHECKING:
from ..reps.super_segmentation import SuperSegmentationObject
from .. import global_params
from ..mp.mp_utils import start_multiprocess_imap as start_multiprocess
def bfs_smoothing(vertices, vertex_labels, max_edge_length=120, n_voting=40):
"""
Smooth vertex labels by applying a majority vote on a
BFS subset of nodes for every node in the graph
Parameters
Args:
vertices: np.array
N, 3
vertex_labels: np.array
N, 1
max_edge_length: float
maximum distance between vertices to consider them connected in the
graph
n_voting: int
Number of collected nodes during BFS used for majority vote
Returns: np.array
smoothed vertex labels
"""
G = create_graph_from_coords(vertices, max_dist=max_edge_length, mst=False,
force_single_cc=False)
# create BFS subset
bfs_nn = split_subcc(G, max_nb=n_voting, verbose=False)
new_vertex_labels = np.zeros_like(vertex_labels)
for ii in range(len(vertex_labels)):
curr_labels = vertex_labels[bfs_nn[ii]]
labels, counts = np.unique(curr_labels, return_counts=True)
majority_label = labels[np.argmax(counts)]
new_vertex_labels[ii] = majority_label
return new_vertex_labels
def split_subcc(g, max_nb, verbose=False, start_nodes=None):
"""
Creates subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Graph
max_nb: int
verbose: bool
start_nodes: iterable
node ID's
Returns: dict
"""
subnodes = {}
if verbose:
nb_nodes = g.number_of_nodes()
pbar = tqdm.tqdm(total=nb_nodes, leave=False)
if start_nodes is None:
iter_ixs = g.nodes()
else:
iter_ixs = start_nodes
for n in iter_ixs:
n_subgraph = [n]
nb_edges = 0
for e in nx.bfs_edges(g, n):
n_subgraph.append(e[1])
nb_edges += 1
if nb_edges == max_nb:
break
subnodes[n] = n_subgraph
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return subnodes
def chunkify_contiguous(l, n):
"""Yield successive n-sized chunks from l.
https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks"""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_subcc_join(g: nx.Graph, subgraph_size: int, lo_first_n: int = 1) -> List[List[Any]]:
"""
Creates a subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Supervoxel graph
subgraph_size: Size of subgraphs. The difference between `subgraph_size` and `lo_first_n` defines the
supervoxel overlap.
lo_first_n: Leave out first n nodes: will collect `subgraph_size` nodes starting from center node and then
omit the first lo_first_n nodes, i.e. not use them as new starting nodes.
Returns:
"""
start_node = list(g.nodes())[0]
for n, d in dict(g.degree).items():
if d == 1:
start_node = n
break
dfs_nodes = list(nx.dfs_preorder_nodes(g, start_node))
# get subgraphs via splicing of traversed node list into equally sized fragments. they might
# be unconnected if branch sizes mod subgraph_size != 0, then a chunk will contain multiple connected components.
chunks = list(chunkify_contiguous(dfs_nodes, lo_first_n))
sub_graphs = []
for ch in chunks:
# collect all connected component subgraphs
sg = g.subgraph(ch).copy()
sub_graphs += list((sg.subgraph(c) for c in nx.connected_components(sg)))
# add more context to subgraphs
subgraphs_withcontext = []
for sg in sub_graphs:
# add context but omit artificial start node
context_nodes = []
for n in list(sg.nodes()):
subgraph_nodes_with_context = []
nb_edges = sg.number_of_nodes()
for e in nx.bfs_edges(g, n):
subgraph_nodes_with_context += list(e)
nb_edges += 1
if nb_edges == subgraph_size:
break
context_nodes += subgraph_nodes_with_context
# add original nodes
context_nodes = list(set(context_nodes))
for n in list(sg.nodes()):
if n in context_nodes:
context_nodes.remove(n)
subgraph_nodes_with_context = list(sg.nodes()) + context_nodes
subgraphs_withcontext.append(subgraph_nodes_with_context)
return subgraphs_withcontext
def merge_nodes(G, nodes, new_node):
""" FOR UNWEIGHTED, UNDIRECTED GRAPHS ONLY
"""
if G.is_directed():
raise ValueError('Method "merge_nodes" is only valid for undirected graphs.')
G.add_node(new_node)
for n in nodes:
for e in G.edges(n):
# add edge between new node and original partner node
edge = list(e)
edge.remove(n)
paired_node = edge[0]
G.add_edge(new_node, paired_node)
for n in nodes: # remove the merged nodes
G.remove_node(n)
def split_glia_graph(nx_g, thresh, clahe=False, nb_cpus=1, pred_key_appendix=""):
"""
Split graph into glia and non-glua CC's.
Args:
nx_g: nx.Graph
thresh: float
clahe: bool
nb_cpus: int
pred_key_appendix: str
verbose: bool
Returns: list, list
Neuron, glia connected components.
"""
glia_key = "glia_probas"
if clahe:
glia_key += "_clahe"
glia_key += pred_key_appendix
glianess, size = get_glianess_dict(list(nx_g.nodes()), thresh, glia_key,
nb_cpus=nb_cpus)
return remove_glia_nodes(nx_g, size, glianess, return_removed_nodes=True)
def split_glia(sso, thresh, clahe=False, pred_key_appendix=""):
"""
Split SuperSegmentationObject into glia and non glia
SegmentationObjects.
Args:
sso: SuperSegmentationObject
thresh: float
clahe: bool
pred_key_appendix: str
Defines type of glia predictions
Returns: list, list (of SegmentationObject)
Neuron, glia nodes
"""
nx_G = sso.rag
nonglia_ccs, glia_ccs = split_glia_graph(nx_G, thresh=thresh, clahe=clahe,
nb_cpus=sso.nb_cpus, pred_key_appendix=pred_key_appendix)
return nonglia_ccs, glia_ccs
def create_ccsize_dict(g: nx.Graph, bbs: dict, is_connected_components: bool = False) -> dict:
"""
Calculate bounding box size of connected components.
Args:
g: Supervoxel graph.
bbs: Bounding boxes (physical units).
is_connected_components: If graph `g` already is connected components. If False,
``nx.connected_components`` is applied.
Returns:
Look-up which stores the connected component bounding box for every single node in the input Graph `g`.
"""
if not is_connected_components:
ccs = nx.connected_components(g)
else:
ccs = g
node2cssize_dict = {}
for cc in ccs:
# if ID is not in bbs, it was skipped due to low voxel count
curr_bbs = [bbs[n] for n in cc if n in bbs]
if len(curr_bbs) == 0:
raise ValueError(f'Could not find a single bounding box for connected component with IDs: {cc}.')
else:
curr_bbs = np.concatenate(curr_bbs)
cc_size = np.linalg.norm(np.max(curr_bbs, axis=0) -
np.min(curr_bbs, axis=0), ord=2)
for n in cc:
node2cssize_dict[n] = cc_size
return node2cssize_dict
def get_glianess_dict(seg_objs, thresh, glia_key, nb_cpus=1,
use_sv_volume=False, verbose=False):
glianess = {}
sizes = {}
params = [[so, glia_key, thresh, use_sv_volume] for so in seg_objs]
res = start_multiprocess(glia_loader_helper, params, nb_cpus=nb_cpus,
verbose=verbose, show_progress=verbose)
for ii, el in enumerate(res):
so = seg_objs[ii]
glianess[so] = el[0]
sizes[so] = el[1]
return glianess, sizes
def glia_loader_helper(args):
so, glia_key, thresh, use_sv_volume = args
if glia_key not in so.attr_dict.keys():
so.load_attr_dict()
curr_glianess = so.glia_pred(thresh)
if not use_sv_volume:
curr_size = so.mesh_bb
else:
curr_size = so.size
return curr_glianess, curr_size
def remove_glia_nodes(g, size_dict, glia_dict, return_removed_nodes=False):
"""
Calculate distance weights for shortest path analysis or similar, based on
glia and size vertex properties and removes unsupporting glia nodes.
Args:
g: Graph
size_dict:
glia_dict:
return_removed_nodes: bool
Returns: list of list of nodes
Remaining connected components of type neuron
"""
# set up node weights based on glia prediction and size
# weights = {}
# e_weights = {}
# for n in g.nodes():
# weights[n] = np.linalg.norm(size_dict[n][1]-size_dict[n][0], ord=2)\
# * glia_dict[n]
# # set up edge weights based on sum of node weights
# for e in g.edges():
# e_weights[e] = weights[list(e)[0]] + weights[list(e)[1]]
# nx.set_node_attributes(g, weights, 'weight')
# nx.set_edge_attributes(g, e_weights, 'weights')
# get neuron type connected component sizes
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0:
g_neuron.remove_node(n)
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
if np.all(np.array(list(neuron2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant neuron SV
if return_removed_nodes:
return [], [list(g.nodes())]
return []
# get glia type connected component sizes
g_glia = g.copy()
for n in g.nodes():
if glia_dict[n] == 0:
g_glia.remove_node(n)
glia2ccsize_dict = create_ccsize_dict(g_glia, size_dict)
if np.all(np.array(list(glia2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant glia SV
if return_removed_nodes:
return [list(g.nodes())], []
return [list(g.nodes())]
tiny_glia_fragments = []
for n in g_glia.nodes():
if glia2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
tiny_glia_fragments += [n]
# create new neuron graph without sufficiently big glia connected components
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0 and n not in tiny_glia_fragments:
g_neuron.remove_node(n)
# find orphaned neuron SV's and add them to glia graph
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
g_tmp = g_neuron.copy()
for n in g_tmp.nodes():
if neuron2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
g_neuron.remove_node(n)
# create new glia graph with remaining nodes
# (as the complementary set of sufficiently big neuron connected components)
g_glia = g.copy()
for n in g_neuron.nodes():
g_glia.remove_node(n)
neuron_ccs = list(nx.connected_components(g_neuron))
if return_removed_nodes:
glia_ccs = list(nx.connected_components(g_glia))
assert len(g_glia) + len(g_neuron) == len(g)
return neuron_ccs, glia_ccs
return neuron_ccs
def glia_path_length(glia_path, glia_dict, write_paths=None):
"""
Get the path length of glia SV within glia_path. Assumes single connected
glia component within this path. Uses the mesh property of each
SegmentationObject to build a graph from all vertices to find shortest path
through (or more precise: along the surface of) glia. Edges between non-glia
vertices have negligible distance (0.0001) to ensure shortest path
along non-glia surfaces.
Args:
glia_path: list of SegmentationObjects
glia_dict: dict
Dictionary which keys the SegmentationObjects in glia_path and returns
their glia prediction
write_paths: bool
Returns: float
Shortest path between neuron type nodes in nm
"""
g = nx.Graph()
col = {}
curr_ind = 0
if write_paths is not None:
all_vert = np.zeros((0, 3))
for so in glia_path:
is_glia_sv = int(glia_dict[so] > 0)
ind, vert = so.mesh
# connect meshes of different SV, starts after first SV
if curr_ind > 0:
# build kd tree from vertices of SV before
kd_tree = spatial.cKDTree(vert_resh)
# get indices of vertives of SV before (= indices of graph nodes)
ind_offset_before = curr_ind - len(vert_resh)
# query vertices of current mesh to find close connects
next_vert_resh = vert.reshape((-1, 3))
dists, ixs = kd_tree.query(next_vert_resh, distance_upper_bound=500)
for kk, ix in enumerate(ixs):
if dists[kk] > 500:
continue
if is_glia_sv:
edge_weight = eucl_dist(next_vert_resh[kk], vert_resh[ix])
else:
edge_weight = 0.0001
g.add_edge(curr_ind + kk, ind_offset_before + ix,
weights=edge_weight)
vert_resh = vert.reshape((-1, 3))
# save all vertices for writing shortest path skeleton
if write_paths is not None:
all_vert = np.concatenate([all_vert, vert_resh])
# connect fragments of SV mesh
kd_tree = spatial.cKDTree(vert_resh)
dists, ixs = kd_tree.query(vert_resh, k=20, distance_upper_bound=500)
for kk in range(len(ixs)):
nn_ixs = ixs[kk]
nn_dists = dists[kk]
col[curr_ind + kk] = glia_dict[so]
for curr_ix, curr_dist in zip(nn_ixs, nn_dists):
col[curr_ind + curr_ix] = glia_dict[so]
if is_glia_sv:
dist = curr_dist
else: # only take path through glia into account
dist = 0
g.add_edge(kk + curr_ind, curr_ix + curr_ind, weights=dist)
curr_ind += len(vert_resh)
start_ix = 0 # choose any index of the first mesh
end_ix = curr_ind - 1 # choose any index of the last mesh
shortest_path_length = nx.dijkstra_path_length(g, start_ix, end_ix, weight="weights")
if write_paths is not None:
shortest_path = nx.dijkstra_path(g, start_ix, end_ix, weight="weights")
anno = coordpath2anno([all_vert[ix] for ix in shortest_path])
anno.setComment("{0:.4}".format(shortest_path_length))
skel = Skeleton()
skel.add_annotation(anno)
skel.to_kzip("{{}/{0:.4}_vertpath.k.zip".format(write_paths, shortest_path_length))
return shortest_path_length
def eucl_dist(a, b):
return np.linalg.norm(a - b)
def get_glia_paths(g, glia_dict, node2ccsize_dict, min_cc_size_neuron,
node2ccsize_dict_glia, min_cc_size_glia):
"""
Currently not in use, Refactoring needed
Find paths between neuron type SV grpah nodes which contain glia nodes.
Args:
g: nx.Graph
glia_dict:
node2ccsize_dict:
min_cc_size_neuron:
node2ccsize_dict_glia:
min_cc_size_glia:
Returns:
"""
end_nodes = []
paths = nx.all_pairs_dijkstra_path(g, weight="weights")
for n, d in g.degree().items():
if d == 1 and glia_dict[n] == 0 and node2ccsize_dict[n] > min_cc_size_neuron:
end_nodes.append(n)
# find all nodes along these ways and store them as mandatory nodes
glia_paths = []
glia_svixs_in_paths = []
for a, b in itertools.combinations(end_nodes, 2):
glia_nodes = [n for n in paths[a][b] if glia_dict[n] != 0]
if len(glia_nodes) == 0:
continue
sv_ccsizes = [node2ccsize_dict_glia[n] for n in glia_nodes]
if np.max(sv_ccsizes) <= min_cc_size_glia: # check minimum glia size
continue
sv_ixs = np.array([n.id for n in glia_nodes])
glia_nodes_already_exist = False
for el_ixs in glia_svixs_in_paths:
if np.all(sv_ixs == el_ixs):
glia_nodes_already_exist = True
break
if glia_nodes_already_exist: # check if same glia path exists already
continue
glia_paths.append(paths[a][b])
glia_svixs_in_paths.append(np.array([so.id for so in glia_nodes]))
return glia_paths
def write_sopath2skeleton(so_path, dest_path, scaling=None, comment=None):
"""
Writes very simple skeleton, each node represents the center of mass of a
SV, and edges are created in list order.
Args:
so_path: list of SegmentationObject
dest_path: str
scaling: np.ndarray or tuple
comment: str
Returns:
"""
if scaling is None:
scaling = np.array(global_params.config['scaling'])
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling
rep_nodes = []
for so in so_path:
vert = so.mesh[1].reshape((-1, 3))
com = np.mean(vert, axis=0)
kd_tree = spatial.cKDTree(vert)
dist, nn_ix = kd_tree.query([com])
nn = vert[nn_ix[0]] / scaling
n = SkeletonNode().from_scratch(anno, nn[0], nn[1], nn[2])
anno.addNode(n)
rep_nodes.append(n)
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
if comment is not None:
anno.setComment(comment)
skel.add_annotation(anno)
skel.to_kzip(dest_path)
def coordpath2anno(coords: np.ndarray, scaling: Optional[np.ndarray] = None) -> SkeletonAnnotation:
"""
Creates skeleton from scaled coordinates, assume coords are in order for
edge creation.
Args:
coords: np.array
scaling: np.ndarray
Returns: SkeletonAnnotation
"""
if scaling is None:
scaling = global_params.config['scaling']
anno = SkeletonAnnotation()
anno.scaling = scaling
rep_nodes = []
for c in coords:
n = SkeletonNode().from_scratch(anno, c[0] / scaling[0], c[1] / scaling[1],
c[2] / scaling[2])
anno.addNode(n)
rep_nodes.append(n)
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
return anno
def create_graph_from_coords(coords: np.ndarray, max_dist: float = 6000, force_single_cc: bool = True,
mst: bool = False) -> nx.Graph:
"""
Generate skeleton from sample locations by adding edges between points with a maximum distance and then pruning
the skeleton using MST. Nodes will have a 'position' attribute.
Args:
coords: Coordinates.
max_dist: Add edges between two nodes that are within this distance.
force_single_cc: Force that the tree generated from coords is a single connected component.
mst: Compute the minimum spanning tree.
Returns:
Networkx graph. Edge between nodes (coord indices) using the ordering of coords, i.e. the
edge (1, 2) connects coordinate coord[1] and coord[2].
"""
g = nx.Graph()
if len(coords) == 1:
g.add_node(0)
g.add_weighted_edges_from([[0, 0, 0]])
return g
kd_t = spatial.cKDTree(coords)
pairs = kd_t.query_pairs(r=max_dist, output_type="ndarray")
g.add_nodes_from([(ix, dict(position=coord)) for ix, coord in enumerate(coords)])
weights = np.linalg.norm(coords[pairs[:, 0]] - coords[pairs[:, 1]], axis=1)
g.add_weighted_edges_from([[pairs[i][0], pairs[i][1], weights[i]] for i in range(len(pairs))])
if force_single_cc: # make sure its a connected component
g = stitch_skel_nx(g)
if mst:
g = nx.minimum_spanning_tree(g)
return g
def draw_glia_graph(G, dest_path, min_sv_size=0, ext_glia=None, iterations=150, seed=0,
glia_key="glia_probas", node_size_cap=np.inf, mcmp=None, pos=None):
"""
Draw graph with nodes colored in red (glia) and blue) depending on their
class. Writes drawing to dest_path.
Args:
G: nx.Graph
dest_path: str
min_sv_size: int
ext_glia: dict
keys: node in G, values: number indicating class
iterations:
seed: int
Default: 0; random seed for layout generation
glia_key: str
node_size_cap: int
mcmp: color palette
pos:
Returns:
"""
import matplotlib.pyplot as plt
import seaborn as sns
if mcmp is None:
mcmp = sns.diverging_palette(250, 15, s=99, l=60, center="dark",
as_cmap=True)
np.random.seed(0)
seg_objs = list(G.nodes())
glianess, size = get_glianess_dict(seg_objs, glia_thresh, glia_key, 5,
use_sv_volume=True)
if ext_glia is not None:
for n in G.nodes():
glianess[n] = ext_glia[n.id]
plt.figure()
n_size = np.array([size[n] ** (1. / 3) for n in G.nodes()]).astype(
np.float32) # reduce cubic relation to a linear one
# n_size = np.array([np.linalg.norm(size[n][1]-size[n][0]) for n in G.nodes()])
if node_size_cap == "max":
node_size_cap = np.max(n_size)
n_size[n_size > node_size_cap] = node_size_cap
col = np.array([glianess[n] for n in G.nodes()])
col = col[n_size >= min_sv_size]
nodelist = list(np.array(list(G.nodes()))[n_size > min_sv_size])
n_size = n_size[n_size >= min_sv_size]
n_size = n_size / np.max(n_size) * 25.
if pos is None:
pos = nx.spring_layout(G, weight="weight", iterations=iterations, random_state=seed)
nx.draw(G, nodelist=nodelist, node_color=col, node_size=n_size,
cmap=mcmp, width=0.15, pos=pos, linewidths=0)
plt.savefig(dest_path)
plt.close()
return pos
def nxGraph2kzip(g, coords, kzip_path):
import tqdm
scaling = global_params.config['scaling']
coords = coords / scaling
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(g.edges()), leave=False)
for v in g.nodes():
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in g.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def svgraph2kzip(ssv: 'SuperSegmentationObject', kzip_path: str):
"""
Writes the SV graph stored in `ssv.edgelist_path` to a kzip file.
The representative coordinate of a SV is used as the corresponding node
location.
Args:
ssv: Cell reconstruction object.
kzip_path: Path to the output kzip file.
"""
sv_graph = nx.read_edgelist(ssv.edgelist_path, nodetype=int)
coords = {ix: ssv.get_seg_obj('sv', ix).rep_coord for ix in sv_graph.nodes}
import tqdm
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = ssv.scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(sv_graph.edges()), leave=False)
for v in sv_graph.nodes:
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
n.setComment(f'{v}')
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in sv_graph.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def stitch_skel_nx(skel_nx: nx.Graph, n_jobs: int = 1) -> nx.Graph:
| """
Stitch connected components within a graph by recursively adding edges between the closest components.
Args:
skel_nx: Networkx graph. Nodes require 'position' attribute.
n_jobs: Number of jobs used for query of cKDTree.
Returns:
Single connected component graph.
"""
if skel_nx.number_of_nodes() == 0:
return skel_nx
no_of_seg = nx.number_connected_components(skel_nx)
if no_of_seg == 1:
return skel_nx
skel_nx_nodes = np.array([skel_nx.nodes[ix]['position'] for ix in skel_nx.nodes()], dtype=np.int64)
while no_of_seg != 1:
rest_nodes = []
rest_nodes_ixs = []
list_of_comp = np.array([c for c in sorted(nx.connected_components(skel_nx), key=len, reverse=True)])
for single_rest_graph in list_of_comp[1:]:
rest_nodes += [skel_nx_nodes[int(ix)] for ix in single_rest_graph]
rest_nodes_ixs += list(single_rest_graph)
current_set_of_nodes = [skel_nx_nodes[int(ix)] for ix in list_of_comp[0]]
current_set_of_nodes_ixs = list(list_of_comp[0])
tree = spatial.cKDTree(rest_nodes, 1)
thread_lengths, indices = tree.query(current_set_of_nodes, n_jobs=n_jobs)
start_thread_index = np.argmin(thread_lengths)
stop_thread_index = indices[start_thread_index]
e1 = current_set_of_nodes_ixs[start_thread_index]
e2 = rest_nodes_ixs[stop_thread_index]
skel_nx.add_edge(e1, e2)
no_of_seg -= 1
return skel_nx | identifier_body | |
graphs.py | # -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import itertools
from typing import List, Any, Optional, TYPE_CHECKING
import networkx as nx
import numpy as np
import tqdm
from knossos_utils.skeleton import Skeleton, SkeletonAnnotation, SkeletonNode
from scipy import spatial
if TYPE_CHECKING:
from ..reps.super_segmentation import SuperSegmentationObject
from .. import global_params
from ..mp.mp_utils import start_multiprocess_imap as start_multiprocess
def bfs_smoothing(vertices, vertex_labels, max_edge_length=120, n_voting=40):
"""
Smooth vertex labels by applying a majority vote on a
BFS subset of nodes for every node in the graph
Parameters
Args:
vertices: np.array
N, 3
vertex_labels: np.array
N, 1
max_edge_length: float
maximum distance between vertices to consider them connected in the
graph
n_voting: int
Number of collected nodes during BFS used for majority vote
Returns: np.array
smoothed vertex labels
"""
G = create_graph_from_coords(vertices, max_dist=max_edge_length, mst=False,
force_single_cc=False)
# create BFS subset
bfs_nn = split_subcc(G, max_nb=n_voting, verbose=False)
new_vertex_labels = np.zeros_like(vertex_labels)
for ii in range(len(vertex_labels)):
curr_labels = vertex_labels[bfs_nn[ii]]
labels, counts = np.unique(curr_labels, return_counts=True)
majority_label = labels[np.argmax(counts)]
new_vertex_labels[ii] = majority_label
return new_vertex_labels
def split_subcc(g, max_nb, verbose=False, start_nodes=None):
"""
Creates subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Graph
max_nb: int
verbose: bool
start_nodes: iterable
node ID's
Returns: dict
"""
subnodes = {}
if verbose:
nb_nodes = g.number_of_nodes()
pbar = tqdm.tqdm(total=nb_nodes, leave=False)
if start_nodes is None:
iter_ixs = g.nodes()
else:
iter_ixs = start_nodes
for n in iter_ixs:
n_subgraph = [n]
nb_edges = 0
for e in nx.bfs_edges(g, n):
n_subgraph.append(e[1])
nb_edges += 1
if nb_edges == max_nb:
break
subnodes[n] = n_subgraph
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return subnodes
def chunkify_contiguous(l, n):
"""Yield successive n-sized chunks from l.
https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks"""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_subcc_join(g: nx.Graph, subgraph_size: int, lo_first_n: int = 1) -> List[List[Any]]:
"""
Creates a subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Supervoxel graph
subgraph_size: Size of subgraphs. The difference between `subgraph_size` and `lo_first_n` defines the
supervoxel overlap.
lo_first_n: Leave out first n nodes: will collect `subgraph_size` nodes starting from center node and then
omit the first lo_first_n nodes, i.e. not use them as new starting nodes.
Returns:
"""
start_node = list(g.nodes())[0]
for n, d in dict(g.degree).items():
if d == 1:
start_node = n
break
dfs_nodes = list(nx.dfs_preorder_nodes(g, start_node))
# get subgraphs via splicing of traversed node list into equally sized fragments. they might
# be unconnected if branch sizes mod subgraph_size != 0, then a chunk will contain multiple connected components.
chunks = list(chunkify_contiguous(dfs_nodes, lo_first_n))
sub_graphs = []
for ch in chunks:
# collect all connected component subgraphs
sg = g.subgraph(ch).copy()
sub_graphs += list((sg.subgraph(c) for c in nx.connected_components(sg)))
# add more context to subgraphs
subgraphs_withcontext = []
for sg in sub_graphs:
# add context but omit artificial start node
context_nodes = []
for n in list(sg.nodes()):
subgraph_nodes_with_context = []
nb_edges = sg.number_of_nodes()
for e in nx.bfs_edges(g, n):
subgraph_nodes_with_context += list(e)
nb_edges += 1
if nb_edges == subgraph_size:
break
context_nodes += subgraph_nodes_with_context
# add original nodes
context_nodes = list(set(context_nodes))
for n in list(sg.nodes()):
if n in context_nodes:
context_nodes.remove(n)
subgraph_nodes_with_context = list(sg.nodes()) + context_nodes
subgraphs_withcontext.append(subgraph_nodes_with_context)
return subgraphs_withcontext
def merge_nodes(G, nodes, new_node):
""" FOR UNWEIGHTED, UNDIRECTED GRAPHS ONLY
"""
if G.is_directed():
raise ValueError('Method "merge_nodes" is only valid for undirected graphs.')
G.add_node(new_node)
for n in nodes:
for e in G.edges(n):
# add edge between new node and original partner node
edge = list(e)
edge.remove(n)
paired_node = edge[0]
G.add_edge(new_node, paired_node)
for n in nodes: # remove the merged nodes
G.remove_node(n)
def split_glia_graph(nx_g, thresh, clahe=False, nb_cpus=1, pred_key_appendix=""):
"""
Split graph into glia and non-glua CC's.
Args:
nx_g: nx.Graph
thresh: float
clahe: bool
nb_cpus: int
pred_key_appendix: str
verbose: bool
Returns: list, list
Neuron, glia connected components.
"""
glia_key = "glia_probas"
if clahe:
glia_key += "_clahe"
glia_key += pred_key_appendix
glianess, size = get_glianess_dict(list(nx_g.nodes()), thresh, glia_key,
nb_cpus=nb_cpus)
return remove_glia_nodes(nx_g, size, glianess, return_removed_nodes=True)
def split_glia(sso, thresh, clahe=False, pred_key_appendix=""):
"""
Split SuperSegmentationObject into glia and non glia
SegmentationObjects.
Args:
sso: SuperSegmentationObject
thresh: float
clahe: bool
pred_key_appendix: str
Defines type of glia predictions
Returns: list, list (of SegmentationObject)
Neuron, glia nodes
"""
nx_G = sso.rag
nonglia_ccs, glia_ccs = split_glia_graph(nx_G, thresh=thresh, clahe=clahe,
nb_cpus=sso.nb_cpus, pred_key_appendix=pred_key_appendix)
return nonglia_ccs, glia_ccs
def create_ccsize_dict(g: nx.Graph, bbs: dict, is_connected_components: bool = False) -> dict:
"""
Calculate bounding box size of connected components.
Args:
g: Supervoxel graph.
bbs: Bounding boxes (physical units).
is_connected_components: If graph `g` already is connected components. If False,
``nx.connected_components`` is applied.
Returns:
Look-up which stores the connected component bounding box for every single node in the input Graph `g`.
"""
if not is_connected_components:
ccs = nx.connected_components(g)
else:
ccs = g
node2cssize_dict = {}
for cc in ccs:
# if ID is not in bbs, it was skipped due to low voxel count
curr_bbs = [bbs[n] for n in cc if n in bbs]
if len(curr_bbs) == 0:
raise ValueError(f'Could not find a single bounding box for connected component with IDs: {cc}.')
else:
curr_bbs = np.concatenate(curr_bbs)
cc_size = np.linalg.norm(np.max(curr_bbs, axis=0) -
np.min(curr_bbs, axis=0), ord=2)
for n in cc:
node2cssize_dict[n] = cc_size
return node2cssize_dict
def get_glianess_dict(seg_objs, thresh, glia_key, nb_cpus=1,
use_sv_volume=False, verbose=False):
glianess = {}
sizes = {}
params = [[so, glia_key, thresh, use_sv_volume] for so in seg_objs]
res = start_multiprocess(glia_loader_helper, params, nb_cpus=nb_cpus,
verbose=verbose, show_progress=verbose)
for ii, el in enumerate(res):
so = seg_objs[ii]
glianess[so] = el[0]
sizes[so] = el[1]
return glianess, sizes
def glia_loader_helper(args):
so, glia_key, thresh, use_sv_volume = args
if glia_key not in so.attr_dict.keys():
so.load_attr_dict()
curr_glianess = so.glia_pred(thresh)
if not use_sv_volume:
curr_size = so.mesh_bb
else:
curr_size = so.size
return curr_glianess, curr_size
def remove_glia_nodes(g, size_dict, glia_dict, return_removed_nodes=False):
"""
Calculate distance weights for shortest path analysis or similar, based on
glia and size vertex properties and removes unsupporting glia nodes.
Args:
g: Graph
size_dict:
glia_dict:
return_removed_nodes: bool
Returns: list of list of nodes
Remaining connected components of type neuron
"""
# set up node weights based on glia prediction and size
# weights = {}
# e_weights = {}
# for n in g.nodes():
# weights[n] = np.linalg.norm(size_dict[n][1]-size_dict[n][0], ord=2)\
# * glia_dict[n]
# # set up edge weights based on sum of node weights
# for e in g.edges():
# e_weights[e] = weights[list(e)[0]] + weights[list(e)[1]]
# nx.set_node_attributes(g, weights, 'weight')
# nx.set_edge_attributes(g, e_weights, 'weights')
# get neuron type connected component sizes
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0:
g_neuron.remove_node(n)
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
if np.all(np.array(list(neuron2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant neuron SV
if return_removed_nodes:
return [], [list(g.nodes())]
return []
# get glia type connected component sizes
g_glia = g.copy()
for n in g.nodes():
if glia_dict[n] == 0:
g_glia.remove_node(n)
glia2ccsize_dict = create_ccsize_dict(g_glia, size_dict)
if np.all(np.array(list(glia2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant glia SV
if return_removed_nodes:
return [list(g.nodes())], []
return [list(g.nodes())]
tiny_glia_fragments = []
for n in g_glia.nodes():
if glia2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
tiny_glia_fragments += [n]
# create new neuron graph without sufficiently big glia connected components
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0 and n not in tiny_glia_fragments:
g_neuron.remove_node(n)
# find orphaned neuron SV's and add them to glia graph
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
g_tmp = g_neuron.copy()
for n in g_tmp.nodes():
if neuron2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
g_neuron.remove_node(n)
# create new glia graph with remaining nodes
# (as the complementary set of sufficiently big neuron connected components)
g_glia = g.copy()
for n in g_neuron.nodes():
g_glia.remove_node(n)
neuron_ccs = list(nx.connected_components(g_neuron))
if return_removed_nodes:
glia_ccs = list(nx.connected_components(g_glia))
assert len(g_glia) + len(g_neuron) == len(g)
return neuron_ccs, glia_ccs
return neuron_ccs
def glia_path_length(glia_path, glia_dict, write_paths=None):
"""
Get the path length of glia SV within glia_path. Assumes single connected
glia component within this path. Uses the mesh property of each
SegmentationObject to build a graph from all vertices to find shortest path
through (or more precise: along the surface of) glia. Edges between non-glia
vertices have negligible distance (0.0001) to ensure shortest path
along non-glia surfaces.
Args:
glia_path: list of SegmentationObjects
glia_dict: dict
Dictionary which keys the SegmentationObjects in glia_path and returns
their glia prediction
write_paths: bool
Returns: float
Shortest path between neuron type nodes in nm
"""
g = nx.Graph()
col = {}
curr_ind = 0
if write_paths is not None:
all_vert = np.zeros((0, 3))
for so in glia_path:
is_glia_sv = int(glia_dict[so] > 0)
ind, vert = so.mesh
# connect meshes of different SV, starts after first SV
if curr_ind > 0:
# build kd tree from vertices of SV before
kd_tree = spatial.cKDTree(vert_resh)
# get indices of vertives of SV before (= indices of graph nodes)
ind_offset_before = curr_ind - len(vert_resh)
# query vertices of current mesh to find close connects
next_vert_resh = vert.reshape((-1, 3))
dists, ixs = kd_tree.query(next_vert_resh, distance_upper_bound=500)
for kk, ix in enumerate(ixs):
if dists[kk] > 500:
continue
if is_glia_sv:
edge_weight = eucl_dist(next_vert_resh[kk], vert_resh[ix])
else:
edge_weight = 0.0001
g.add_edge(curr_ind + kk, ind_offset_before + ix,
weights=edge_weight)
vert_resh = vert.reshape((-1, 3))
# save all vertices for writing shortest path skeleton
if write_paths is not None:
all_vert = np.concatenate([all_vert, vert_resh])
# connect fragments of SV mesh
kd_tree = spatial.cKDTree(vert_resh)
dists, ixs = kd_tree.query(vert_resh, k=20, distance_upper_bound=500)
for kk in range(len(ixs)):
nn_ixs = ixs[kk]
nn_dists = dists[kk]
col[curr_ind + kk] = glia_dict[so]
for curr_ix, curr_dist in zip(nn_ixs, nn_dists):
col[curr_ind + curr_ix] = glia_dict[so]
if is_glia_sv:
dist = curr_dist
else: # only take path through glia into account
dist = 0
g.add_edge(kk + curr_ind, curr_ix + curr_ind, weights=dist)
curr_ind += len(vert_resh)
start_ix = 0 # choose any index of the first mesh
end_ix = curr_ind - 1 # choose any index of the last mesh
shortest_path_length = nx.dijkstra_path_length(g, start_ix, end_ix, weight="weights")
if write_paths is not None:
shortest_path = nx.dijkstra_path(g, start_ix, end_ix, weight="weights")
anno = coordpath2anno([all_vert[ix] for ix in shortest_path])
anno.setComment("{0:.4}".format(shortest_path_length))
skel = Skeleton()
skel.add_annotation(anno)
skel.to_kzip("{{}/{0:.4}_vertpath.k.zip".format(write_paths, shortest_path_length))
return shortest_path_length
def eucl_dist(a, b):
return np.linalg.norm(a - b)
def get_glia_paths(g, glia_dict, node2ccsize_dict, min_cc_size_neuron,
node2ccsize_dict_glia, min_cc_size_glia):
"""
Currently not in use, Refactoring needed
Find paths between neuron type SV grpah nodes which contain glia nodes.
Args:
g: nx.Graph
glia_dict:
node2ccsize_dict:
min_cc_size_neuron:
node2ccsize_dict_glia:
min_cc_size_glia:
Returns:
"""
end_nodes = []
paths = nx.all_pairs_dijkstra_path(g, weight="weights")
for n, d in g.degree().items():
if d == 1 and glia_dict[n] == 0 and node2ccsize_dict[n] > min_cc_size_neuron:
end_nodes.append(n)
# find all nodes along these ways and store them as mandatory nodes
glia_paths = []
glia_svixs_in_paths = []
for a, b in itertools.combinations(end_nodes, 2):
glia_nodes = [n for n in paths[a][b] if glia_dict[n] != 0]
if len(glia_nodes) == 0:
continue
sv_ccsizes = [node2ccsize_dict_glia[n] for n in glia_nodes]
if np.max(sv_ccsizes) <= min_cc_size_glia: # check minimum glia size
continue
sv_ixs = np.array([n.id for n in glia_nodes])
glia_nodes_already_exist = False
for el_ixs in glia_svixs_in_paths:
if np.all(sv_ixs == el_ixs):
glia_nodes_already_exist = True
break
if glia_nodes_already_exist: # check if same glia path exists already
continue
glia_paths.append(paths[a][b])
glia_svixs_in_paths.append(np.array([so.id for so in glia_nodes]))
return glia_paths
def write_sopath2skeleton(so_path, dest_path, scaling=None, comment=None):
"""
Writes very simple skeleton, each node represents the center of mass of a
SV, and edges are created in list order.
Args:
so_path: list of SegmentationObject
dest_path: str
scaling: np.ndarray or tuple
comment: str
Returns:
"""
if scaling is None:
scaling = np.array(global_params.config['scaling'])
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling
rep_nodes = []
for so in so_path:
|
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
if comment is not None:
anno.setComment(comment)
skel.add_annotation(anno)
skel.to_kzip(dest_path)
def coordpath2anno(coords: np.ndarray, scaling: Optional[np.ndarray] = None) -> SkeletonAnnotation:
"""
Creates skeleton from scaled coordinates, assume coords are in order for
edge creation.
Args:
coords: np.array
scaling: np.ndarray
Returns: SkeletonAnnotation
"""
if scaling is None:
scaling = global_params.config['scaling']
anno = SkeletonAnnotation()
anno.scaling = scaling
rep_nodes = []
for c in coords:
n = SkeletonNode().from_scratch(anno, c[0] / scaling[0], c[1] / scaling[1],
c[2] / scaling[2])
anno.addNode(n)
rep_nodes.append(n)
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
return anno
def create_graph_from_coords(coords: np.ndarray, max_dist: float = 6000, force_single_cc: bool = True,
mst: bool = False) -> nx.Graph:
"""
Generate skeleton from sample locations by adding edges between points with a maximum distance and then pruning
the skeleton using MST. Nodes will have a 'position' attribute.
Args:
coords: Coordinates.
max_dist: Add edges between two nodes that are within this distance.
force_single_cc: Force that the tree generated from coords is a single connected component.
mst: Compute the minimum spanning tree.
Returns:
Networkx graph. Edge between nodes (coord indices) using the ordering of coords, i.e. the
edge (1, 2) connects coordinate coord[1] and coord[2].
"""
g = nx.Graph()
if len(coords) == 1:
g.add_node(0)
g.add_weighted_edges_from([[0, 0, 0]])
return g
kd_t = spatial.cKDTree(coords)
pairs = kd_t.query_pairs(r=max_dist, output_type="ndarray")
g.add_nodes_from([(ix, dict(position=coord)) for ix, coord in enumerate(coords)])
weights = np.linalg.norm(coords[pairs[:, 0]] - coords[pairs[:, 1]], axis=1)
g.add_weighted_edges_from([[pairs[i][0], pairs[i][1], weights[i]] for i in range(len(pairs))])
if force_single_cc: # make sure its a connected component
g = stitch_skel_nx(g)
if mst:
g = nx.minimum_spanning_tree(g)
return g
def draw_glia_graph(G, dest_path, min_sv_size=0, ext_glia=None, iterations=150, seed=0,
glia_key="glia_probas", node_size_cap=np.inf, mcmp=None, pos=None):
"""
Draw graph with nodes colored in red (glia) and blue) depending on their
class. Writes drawing to dest_path.
Args:
G: nx.Graph
dest_path: str
min_sv_size: int
ext_glia: dict
keys: node in G, values: number indicating class
iterations:
seed: int
Default: 0; random seed for layout generation
glia_key: str
node_size_cap: int
mcmp: color palette
pos:
Returns:
"""
import matplotlib.pyplot as plt
import seaborn as sns
if mcmp is None:
mcmp = sns.diverging_palette(250, 15, s=99, l=60, center="dark",
as_cmap=True)
np.random.seed(0)
seg_objs = list(G.nodes())
glianess, size = get_glianess_dict(seg_objs, glia_thresh, glia_key, 5,
use_sv_volume=True)
if ext_glia is not None:
for n in G.nodes():
glianess[n] = ext_glia[n.id]
plt.figure()
n_size = np.array([size[n] ** (1. / 3) for n in G.nodes()]).astype(
np.float32) # reduce cubic relation to a linear one
# n_size = np.array([np.linalg.norm(size[n][1]-size[n][0]) for n in G.nodes()])
if node_size_cap == "max":
node_size_cap = np.max(n_size)
n_size[n_size > node_size_cap] = node_size_cap
col = np.array([glianess[n] for n in G.nodes()])
col = col[n_size >= min_sv_size]
nodelist = list(np.array(list(G.nodes()))[n_size > min_sv_size])
n_size = n_size[n_size >= min_sv_size]
n_size = n_size / np.max(n_size) * 25.
if pos is None:
pos = nx.spring_layout(G, weight="weight", iterations=iterations, random_state=seed)
nx.draw(G, nodelist=nodelist, node_color=col, node_size=n_size,
cmap=mcmp, width=0.15, pos=pos, linewidths=0)
plt.savefig(dest_path)
plt.close()
return pos
def nxGraph2kzip(g, coords, kzip_path):
import tqdm
scaling = global_params.config['scaling']
coords = coords / scaling
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(g.edges()), leave=False)
for v in g.nodes():
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in g.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def svgraph2kzip(ssv: 'SuperSegmentationObject', kzip_path: str):
"""
Writes the SV graph stored in `ssv.edgelist_path` to a kzip file.
The representative coordinate of a SV is used as the corresponding node
location.
Args:
ssv: Cell reconstruction object.
kzip_path: Path to the output kzip file.
"""
sv_graph = nx.read_edgelist(ssv.edgelist_path, nodetype=int)
coords = {ix: ssv.get_seg_obj('sv', ix).rep_coord for ix in sv_graph.nodes}
import tqdm
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = ssv.scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(sv_graph.edges()), leave=False)
for v in sv_graph.nodes:
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
n.setComment(f'{v}')
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in sv_graph.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def stitch_skel_nx(skel_nx: nx.Graph, n_jobs: int = 1) -> nx.Graph:
"""
Stitch connected components within a graph by recursively adding edges between the closest components.
Args:
skel_nx: Networkx graph. Nodes require 'position' attribute.
n_jobs: Number of jobs used for query of cKDTree.
Returns:
Single connected component graph.
"""
if skel_nx.number_of_nodes() == 0:
return skel_nx
no_of_seg = nx.number_connected_components(skel_nx)
if no_of_seg == 1:
return skel_nx
skel_nx_nodes = np.array([skel_nx.nodes[ix]['position'] for ix in skel_nx.nodes()], dtype=np.int64)
while no_of_seg != 1:
rest_nodes = []
rest_nodes_ixs = []
list_of_comp = np.array([c for c in sorted(nx.connected_components(skel_nx), key=len, reverse=True)])
for single_rest_graph in list_of_comp[1:]:
rest_nodes += [skel_nx_nodes[int(ix)] for ix in single_rest_graph]
rest_nodes_ixs += list(single_rest_graph)
current_set_of_nodes = [skel_nx_nodes[int(ix)] for ix in list_of_comp[0]]
current_set_of_nodes_ixs = list(list_of_comp[0])
tree = spatial.cKDTree(rest_nodes, 1)
thread_lengths, indices = tree.query(current_set_of_nodes, n_jobs=n_jobs)
start_thread_index = np.argmin(thread_lengths)
stop_thread_index = indices[start_thread_index]
e1 = current_set_of_nodes_ixs[start_thread_index]
e2 = rest_nodes_ixs[stop_thread_index]
skel_nx.add_edge(e1, e2)
no_of_seg -= 1
return skel_nx
| vert = so.mesh[1].reshape((-1, 3))
com = np.mean(vert, axis=0)
kd_tree = spatial.cKDTree(vert)
dist, nn_ix = kd_tree.query([com])
nn = vert[nn_ix[0]] / scaling
n = SkeletonNode().from_scratch(anno, nn[0], nn[1], nn[2])
anno.addNode(n)
rep_nodes.append(n) | conditional_block |
graphs.py | # -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import itertools
from typing import List, Any, Optional, TYPE_CHECKING
import networkx as nx
import numpy as np
import tqdm
from knossos_utils.skeleton import Skeleton, SkeletonAnnotation, SkeletonNode
from scipy import spatial
if TYPE_CHECKING:
from ..reps.super_segmentation import SuperSegmentationObject
from .. import global_params
from ..mp.mp_utils import start_multiprocess_imap as start_multiprocess
def bfs_smoothing(vertices, vertex_labels, max_edge_length=120, n_voting=40):
"""
Smooth vertex labels by applying a majority vote on a
BFS subset of nodes for every node in the graph
Parameters
Args:
vertices: np.array
N, 3
vertex_labels: np.array
N, 1
max_edge_length: float
maximum distance between vertices to consider them connected in the
graph
n_voting: int
Number of collected nodes during BFS used for majority vote
Returns: np.array
smoothed vertex labels
"""
G = create_graph_from_coords(vertices, max_dist=max_edge_length, mst=False,
force_single_cc=False)
# create BFS subset
bfs_nn = split_subcc(G, max_nb=n_voting, verbose=False)
new_vertex_labels = np.zeros_like(vertex_labels)
for ii in range(len(vertex_labels)):
curr_labels = vertex_labels[bfs_nn[ii]]
labels, counts = np.unique(curr_labels, return_counts=True)
majority_label = labels[np.argmax(counts)]
new_vertex_labels[ii] = majority_label
return new_vertex_labels
def split_subcc(g, max_nb, verbose=False, start_nodes=None):
"""
Creates subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Graph
max_nb: int
verbose: bool
start_nodes: iterable
node ID's
Returns: dict
"""
subnodes = {}
if verbose:
nb_nodes = g.number_of_nodes()
pbar = tqdm.tqdm(total=nb_nodes, leave=False)
if start_nodes is None:
iter_ixs = g.nodes()
else:
iter_ixs = start_nodes
for n in iter_ixs:
n_subgraph = [n]
nb_edges = 0
for e in nx.bfs_edges(g, n):
n_subgraph.append(e[1])
nb_edges += 1
if nb_edges == max_nb:
break
subnodes[n] = n_subgraph
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return subnodes
def chunkify_contiguous(l, n):
"""Yield successive n-sized chunks from l.
https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks"""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_subcc_join(g: nx.Graph, subgraph_size: int, lo_first_n: int = 1) -> List[List[Any]]:
"""
Creates a subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Supervoxel graph
subgraph_size: Size of subgraphs. The difference between `subgraph_size` and `lo_first_n` defines the
supervoxel overlap.
lo_first_n: Leave out first n nodes: will collect `subgraph_size` nodes starting from center node and then
omit the first lo_first_n nodes, i.e. not use them as new starting nodes.
Returns:
"""
start_node = list(g.nodes())[0]
for n, d in dict(g.degree).items():
if d == 1:
start_node = n
break
dfs_nodes = list(nx.dfs_preorder_nodes(g, start_node))
# get subgraphs via splicing of traversed node list into equally sized fragments. they might
# be unconnected if branch sizes mod subgraph_size != 0, then a chunk will contain multiple connected components.
chunks = list(chunkify_contiguous(dfs_nodes, lo_first_n))
sub_graphs = []
for ch in chunks:
# collect all connected component subgraphs
sg = g.subgraph(ch).copy()
sub_graphs += list((sg.subgraph(c) for c in nx.connected_components(sg)))
# add more context to subgraphs
subgraphs_withcontext = []
for sg in sub_graphs:
# add context but omit artificial start node
context_nodes = []
for n in list(sg.nodes()):
subgraph_nodes_with_context = []
nb_edges = sg.number_of_nodes()
for e in nx.bfs_edges(g, n):
subgraph_nodes_with_context += list(e)
nb_edges += 1
if nb_edges == subgraph_size:
break
context_nodes += subgraph_nodes_with_context
# add original nodes
context_nodes = list(set(context_nodes))
for n in list(sg.nodes()):
if n in context_nodes:
context_nodes.remove(n)
subgraph_nodes_with_context = list(sg.nodes()) + context_nodes
subgraphs_withcontext.append(subgraph_nodes_with_context)
return subgraphs_withcontext
def merge_nodes(G, nodes, new_node):
""" FOR UNWEIGHTED, UNDIRECTED GRAPHS ONLY
"""
if G.is_directed():
raise ValueError('Method "merge_nodes" is only valid for undirected graphs.')
G.add_node(new_node)
for n in nodes:
for e in G.edges(n):
# add edge between new node and original partner node
edge = list(e)
edge.remove(n)
paired_node = edge[0]
G.add_edge(new_node, paired_node)
for n in nodes: # remove the merged nodes
G.remove_node(n)
def split_glia_graph(nx_g, thresh, clahe=False, nb_cpus=1, pred_key_appendix=""):
"""
Split graph into glia and non-glua CC's.
Args:
nx_g: nx.Graph
thresh: float
clahe: bool
nb_cpus: int
pred_key_appendix: str
verbose: bool
Returns: list, list
Neuron, glia connected components.
"""
glia_key = "glia_probas"
if clahe:
glia_key += "_clahe"
glia_key += pred_key_appendix
glianess, size = get_glianess_dict(list(nx_g.nodes()), thresh, glia_key,
nb_cpus=nb_cpus)
return remove_glia_nodes(nx_g, size, glianess, return_removed_nodes=True)
def split_glia(sso, thresh, clahe=False, pred_key_appendix=""):
"""
Split SuperSegmentationObject into glia and non glia
SegmentationObjects.
Args:
sso: SuperSegmentationObject
thresh: float
clahe: bool
pred_key_appendix: str
Defines type of glia predictions
Returns: list, list (of SegmentationObject)
Neuron, glia nodes
"""
nx_G = sso.rag
nonglia_ccs, glia_ccs = split_glia_graph(nx_G, thresh=thresh, clahe=clahe,
nb_cpus=sso.nb_cpus, pred_key_appendix=pred_key_appendix)
return nonglia_ccs, glia_ccs
def create_ccsize_dict(g: nx.Graph, bbs: dict, is_connected_components: bool = False) -> dict:
"""
Calculate bounding box size of connected components.
Args:
g: Supervoxel graph.
bbs: Bounding boxes (physical units).
is_connected_components: If graph `g` already is connected components. If False,
``nx.connected_components`` is applied.
Returns:
Look-up which stores the connected component bounding box for every single node in the input Graph `g`.
"""
if not is_connected_components:
ccs = nx.connected_components(g)
else:
ccs = g
node2cssize_dict = {}
for cc in ccs:
# if ID is not in bbs, it was skipped due to low voxel count
curr_bbs = [bbs[n] for n in cc if n in bbs]
if len(curr_bbs) == 0:
raise ValueError(f'Could not find a single bounding box for connected component with IDs: {cc}.')
else:
curr_bbs = np.concatenate(curr_bbs)
cc_size = np.linalg.norm(np.max(curr_bbs, axis=0) -
np.min(curr_bbs, axis=0), ord=2)
for n in cc:
node2cssize_dict[n] = cc_size
return node2cssize_dict
def get_glianess_dict(seg_objs, thresh, glia_key, nb_cpus=1,
use_sv_volume=False, verbose=False):
glianess = {}
sizes = {}
params = [[so, glia_key, thresh, use_sv_volume] for so in seg_objs]
res = start_multiprocess(glia_loader_helper, params, nb_cpus=nb_cpus,
verbose=verbose, show_progress=verbose)
for ii, el in enumerate(res):
so = seg_objs[ii]
glianess[so] = el[0]
sizes[so] = el[1]
return glianess, sizes
def glia_loader_helper(args):
so, glia_key, thresh, use_sv_volume = args
if glia_key not in so.attr_dict.keys():
so.load_attr_dict()
curr_glianess = so.glia_pred(thresh)
if not use_sv_volume:
curr_size = so.mesh_bb
else:
curr_size = so.size
return curr_glianess, curr_size
def remove_glia_nodes(g, size_dict, glia_dict, return_removed_nodes=False):
"""
Calculate distance weights for shortest path analysis or similar, based on
glia and size vertex properties and removes unsupporting glia nodes.
Args:
g: Graph
size_dict:
glia_dict:
return_removed_nodes: bool
Returns: list of list of nodes
Remaining connected components of type neuron
"""
# set up node weights based on glia prediction and size
# weights = {}
# e_weights = {}
# for n in g.nodes():
# weights[n] = np.linalg.norm(size_dict[n][1]-size_dict[n][0], ord=2)\
# * glia_dict[n]
# # set up edge weights based on sum of node weights
# for e in g.edges():
# e_weights[e] = weights[list(e)[0]] + weights[list(e)[1]]
# nx.set_node_attributes(g, weights, 'weight')
# nx.set_edge_attributes(g, e_weights, 'weights')
# get neuron type connected component sizes
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0:
g_neuron.remove_node(n)
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
if np.all(np.array(list(neuron2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant neuron SV
if return_removed_nodes:
return [], [list(g.nodes())]
return []
# get glia type connected component sizes
g_glia = g.copy()
for n in g.nodes():
if glia_dict[n] == 0:
g_glia.remove_node(n)
glia2ccsize_dict = create_ccsize_dict(g_glia, size_dict)
if np.all(np.array(list(glia2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant glia SV
if return_removed_nodes:
return [list(g.nodes())], []
return [list(g.nodes())]
tiny_glia_fragments = []
for n in g_glia.nodes():
if glia2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
tiny_glia_fragments += [n]
# create new neuron graph without sufficiently big glia connected components
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0 and n not in tiny_glia_fragments:
g_neuron.remove_node(n)
# find orphaned neuron SV's and add them to glia graph
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
g_tmp = g_neuron.copy()
for n in g_tmp.nodes():
if neuron2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
g_neuron.remove_node(n)
# create new glia graph with remaining nodes
# (as the complementary set of sufficiently big neuron connected components)
g_glia = g.copy()
for n in g_neuron.nodes():
g_glia.remove_node(n)
neuron_ccs = list(nx.connected_components(g_neuron))
if return_removed_nodes:
glia_ccs = list(nx.connected_components(g_glia))
assert len(g_glia) + len(g_neuron) == len(g)
return neuron_ccs, glia_ccs
return neuron_ccs
def | (glia_path, glia_dict, write_paths=None):
"""
Get the path length of glia SV within glia_path. Assumes single connected
glia component within this path. Uses the mesh property of each
SegmentationObject to build a graph from all vertices to find shortest path
through (or more precise: along the surface of) glia. Edges between non-glia
vertices have negligible distance (0.0001) to ensure shortest path
along non-glia surfaces.
Args:
glia_path: list of SegmentationObjects
glia_dict: dict
Dictionary which keys the SegmentationObjects in glia_path and returns
their glia prediction
write_paths: bool
Returns: float
Shortest path between neuron type nodes in nm
"""
g = nx.Graph()
col = {}
curr_ind = 0
if write_paths is not None:
all_vert = np.zeros((0, 3))
for so in glia_path:
is_glia_sv = int(glia_dict[so] > 0)
ind, vert = so.mesh
# connect meshes of different SV, starts after first SV
if curr_ind > 0:
# build kd tree from vertices of SV before
kd_tree = spatial.cKDTree(vert_resh)
# get indices of vertives of SV before (= indices of graph nodes)
ind_offset_before = curr_ind - len(vert_resh)
# query vertices of current mesh to find close connects
next_vert_resh = vert.reshape((-1, 3))
dists, ixs = kd_tree.query(next_vert_resh, distance_upper_bound=500)
for kk, ix in enumerate(ixs):
if dists[kk] > 500:
continue
if is_glia_sv:
edge_weight = eucl_dist(next_vert_resh[kk], vert_resh[ix])
else:
edge_weight = 0.0001
g.add_edge(curr_ind + kk, ind_offset_before + ix,
weights=edge_weight)
vert_resh = vert.reshape((-1, 3))
# save all vertices for writing shortest path skeleton
if write_paths is not None:
all_vert = np.concatenate([all_vert, vert_resh])
# connect fragments of SV mesh
kd_tree = spatial.cKDTree(vert_resh)
dists, ixs = kd_tree.query(vert_resh, k=20, distance_upper_bound=500)
for kk in range(len(ixs)):
nn_ixs = ixs[kk]
nn_dists = dists[kk]
col[curr_ind + kk] = glia_dict[so]
for curr_ix, curr_dist in zip(nn_ixs, nn_dists):
col[curr_ind + curr_ix] = glia_dict[so]
if is_glia_sv:
dist = curr_dist
else: # only take path through glia into account
dist = 0
g.add_edge(kk + curr_ind, curr_ix + curr_ind, weights=dist)
curr_ind += len(vert_resh)
start_ix = 0 # choose any index of the first mesh
end_ix = curr_ind - 1 # choose any index of the last mesh
shortest_path_length = nx.dijkstra_path_length(g, start_ix, end_ix, weight="weights")
if write_paths is not None:
shortest_path = nx.dijkstra_path(g, start_ix, end_ix, weight="weights")
anno = coordpath2anno([all_vert[ix] for ix in shortest_path])
anno.setComment("{0:.4}".format(shortest_path_length))
skel = Skeleton()
skel.add_annotation(anno)
skel.to_kzip("{{}/{0:.4}_vertpath.k.zip".format(write_paths, shortest_path_length))
return shortest_path_length
def eucl_dist(a, b):
return np.linalg.norm(a - b)
def get_glia_paths(g, glia_dict, node2ccsize_dict, min_cc_size_neuron,
node2ccsize_dict_glia, min_cc_size_glia):
"""
Currently not in use, Refactoring needed
Find paths between neuron type SV grpah nodes which contain glia nodes.
Args:
g: nx.Graph
glia_dict:
node2ccsize_dict:
min_cc_size_neuron:
node2ccsize_dict_glia:
min_cc_size_glia:
Returns:
"""
end_nodes = []
paths = nx.all_pairs_dijkstra_path(g, weight="weights")
for n, d in g.degree().items():
if d == 1 and glia_dict[n] == 0 and node2ccsize_dict[n] > min_cc_size_neuron:
end_nodes.append(n)
# find all nodes along these ways and store them as mandatory nodes
glia_paths = []
glia_svixs_in_paths = []
for a, b in itertools.combinations(end_nodes, 2):
glia_nodes = [n for n in paths[a][b] if glia_dict[n] != 0]
if len(glia_nodes) == 0:
continue
sv_ccsizes = [node2ccsize_dict_glia[n] for n in glia_nodes]
if np.max(sv_ccsizes) <= min_cc_size_glia: # check minimum glia size
continue
sv_ixs = np.array([n.id for n in glia_nodes])
glia_nodes_already_exist = False
for el_ixs in glia_svixs_in_paths:
if np.all(sv_ixs == el_ixs):
glia_nodes_already_exist = True
break
if glia_nodes_already_exist: # check if same glia path exists already
continue
glia_paths.append(paths[a][b])
glia_svixs_in_paths.append(np.array([so.id for so in glia_nodes]))
return glia_paths
def write_sopath2skeleton(so_path, dest_path, scaling=None, comment=None):
"""
Writes very simple skeleton, each node represents the center of mass of a
SV, and edges are created in list order.
Args:
so_path: list of SegmentationObject
dest_path: str
scaling: np.ndarray or tuple
comment: str
Returns:
"""
if scaling is None:
scaling = np.array(global_params.config['scaling'])
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling
rep_nodes = []
for so in so_path:
vert = so.mesh[1].reshape((-1, 3))
com = np.mean(vert, axis=0)
kd_tree = spatial.cKDTree(vert)
dist, nn_ix = kd_tree.query([com])
nn = vert[nn_ix[0]] / scaling
n = SkeletonNode().from_scratch(anno, nn[0], nn[1], nn[2])
anno.addNode(n)
rep_nodes.append(n)
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
if comment is not None:
anno.setComment(comment)
skel.add_annotation(anno)
skel.to_kzip(dest_path)
def coordpath2anno(coords: np.ndarray, scaling: Optional[np.ndarray] = None) -> SkeletonAnnotation:
"""
Creates skeleton from scaled coordinates, assume coords are in order for
edge creation.
Args:
coords: np.array
scaling: np.ndarray
Returns: SkeletonAnnotation
"""
if scaling is None:
scaling = global_params.config['scaling']
anno = SkeletonAnnotation()
anno.scaling = scaling
rep_nodes = []
for c in coords:
n = SkeletonNode().from_scratch(anno, c[0] / scaling[0], c[1] / scaling[1],
c[2] / scaling[2])
anno.addNode(n)
rep_nodes.append(n)
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
return anno
def create_graph_from_coords(coords: np.ndarray, max_dist: float = 6000, force_single_cc: bool = True,
mst: bool = False) -> nx.Graph:
"""
Generate skeleton from sample locations by adding edges between points with a maximum distance and then pruning
the skeleton using MST. Nodes will have a 'position' attribute.
Args:
coords: Coordinates.
max_dist: Add edges between two nodes that are within this distance.
force_single_cc: Force that the tree generated from coords is a single connected component.
mst: Compute the minimum spanning tree.
Returns:
Networkx graph. Edge between nodes (coord indices) using the ordering of coords, i.e. the
edge (1, 2) connects coordinate coord[1] and coord[2].
"""
g = nx.Graph()
if len(coords) == 1:
g.add_node(0)
g.add_weighted_edges_from([[0, 0, 0]])
return g
kd_t = spatial.cKDTree(coords)
pairs = kd_t.query_pairs(r=max_dist, output_type="ndarray")
g.add_nodes_from([(ix, dict(position=coord)) for ix, coord in enumerate(coords)])
weights = np.linalg.norm(coords[pairs[:, 0]] - coords[pairs[:, 1]], axis=1)
g.add_weighted_edges_from([[pairs[i][0], pairs[i][1], weights[i]] for i in range(len(pairs))])
if force_single_cc: # make sure its a connected component
g = stitch_skel_nx(g)
if mst:
g = nx.minimum_spanning_tree(g)
return g
def draw_glia_graph(G, dest_path, min_sv_size=0, ext_glia=None, iterations=150, seed=0,
glia_key="glia_probas", node_size_cap=np.inf, mcmp=None, pos=None):
"""
Draw graph with nodes colored in red (glia) and blue) depending on their
class. Writes drawing to dest_path.
Args:
G: nx.Graph
dest_path: str
min_sv_size: int
ext_glia: dict
keys: node in G, values: number indicating class
iterations:
seed: int
Default: 0; random seed for layout generation
glia_key: str
node_size_cap: int
mcmp: color palette
pos:
Returns:
"""
import matplotlib.pyplot as plt
import seaborn as sns
if mcmp is None:
mcmp = sns.diverging_palette(250, 15, s=99, l=60, center="dark",
as_cmap=True)
np.random.seed(0)
seg_objs = list(G.nodes())
glianess, size = get_glianess_dict(seg_objs, glia_thresh, glia_key, 5,
use_sv_volume=True)
if ext_glia is not None:
for n in G.nodes():
glianess[n] = ext_glia[n.id]
plt.figure()
n_size = np.array([size[n] ** (1. / 3) for n in G.nodes()]).astype(
np.float32) # reduce cubic relation to a linear one
# n_size = np.array([np.linalg.norm(size[n][1]-size[n][0]) for n in G.nodes()])
if node_size_cap == "max":
node_size_cap = np.max(n_size)
n_size[n_size > node_size_cap] = node_size_cap
col = np.array([glianess[n] for n in G.nodes()])
col = col[n_size >= min_sv_size]
nodelist = list(np.array(list(G.nodes()))[n_size > min_sv_size])
n_size = n_size[n_size >= min_sv_size]
n_size = n_size / np.max(n_size) * 25.
if pos is None:
pos = nx.spring_layout(G, weight="weight", iterations=iterations, random_state=seed)
nx.draw(G, nodelist=nodelist, node_color=col, node_size=n_size,
cmap=mcmp, width=0.15, pos=pos, linewidths=0)
plt.savefig(dest_path)
plt.close()
return pos
def nxGraph2kzip(g, coords, kzip_path):
import tqdm
scaling = global_params.config['scaling']
coords = coords / scaling
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(g.edges()), leave=False)
for v in g.nodes():
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in g.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def svgraph2kzip(ssv: 'SuperSegmentationObject', kzip_path: str):
"""
Writes the SV graph stored in `ssv.edgelist_path` to a kzip file.
The representative coordinate of a SV is used as the corresponding node
location.
Args:
ssv: Cell reconstruction object.
kzip_path: Path to the output kzip file.
"""
sv_graph = nx.read_edgelist(ssv.edgelist_path, nodetype=int)
coords = {ix: ssv.get_seg_obj('sv', ix).rep_coord for ix in sv_graph.nodes}
import tqdm
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = ssv.scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(sv_graph.edges()), leave=False)
for v in sv_graph.nodes:
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
n.setComment(f'{v}')
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in sv_graph.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def stitch_skel_nx(skel_nx: nx.Graph, n_jobs: int = 1) -> nx.Graph:
"""
Stitch connected components within a graph by recursively adding edges between the closest components.
Args:
skel_nx: Networkx graph. Nodes require 'position' attribute.
n_jobs: Number of jobs used for query of cKDTree.
Returns:
Single connected component graph.
"""
if skel_nx.number_of_nodes() == 0:
return skel_nx
no_of_seg = nx.number_connected_components(skel_nx)
if no_of_seg == 1:
return skel_nx
skel_nx_nodes = np.array([skel_nx.nodes[ix]['position'] for ix in skel_nx.nodes()], dtype=np.int64)
while no_of_seg != 1:
rest_nodes = []
rest_nodes_ixs = []
list_of_comp = np.array([c for c in sorted(nx.connected_components(skel_nx), key=len, reverse=True)])
for single_rest_graph in list_of_comp[1:]:
rest_nodes += [skel_nx_nodes[int(ix)] for ix in single_rest_graph]
rest_nodes_ixs += list(single_rest_graph)
current_set_of_nodes = [skel_nx_nodes[int(ix)] for ix in list_of_comp[0]]
current_set_of_nodes_ixs = list(list_of_comp[0])
tree = spatial.cKDTree(rest_nodes, 1)
thread_lengths, indices = tree.query(current_set_of_nodes, n_jobs=n_jobs)
start_thread_index = np.argmin(thread_lengths)
stop_thread_index = indices[start_thread_index]
e1 = current_set_of_nodes_ixs[start_thread_index]
e2 = rest_nodes_ixs[stop_thread_index]
skel_nx.add_edge(e1, e2)
no_of_seg -= 1
return skel_nx
| glia_path_length | identifier_name |
graphs.py | # -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import itertools
from typing import List, Any, Optional, TYPE_CHECKING
import networkx as nx
import numpy as np
import tqdm
from knossos_utils.skeleton import Skeleton, SkeletonAnnotation, SkeletonNode
from scipy import spatial
if TYPE_CHECKING:
from ..reps.super_segmentation import SuperSegmentationObject
from .. import global_params
from ..mp.mp_utils import start_multiprocess_imap as start_multiprocess
def bfs_smoothing(vertices, vertex_labels, max_edge_length=120, n_voting=40):
"""
Smooth vertex labels by applying a majority vote on a
BFS subset of nodes for every node in the graph
Parameters
Args:
vertices: np.array
N, 3
vertex_labels: np.array
N, 1
max_edge_length: float
maximum distance between vertices to consider them connected in the
graph
n_voting: int
Number of collected nodes during BFS used for majority vote
Returns: np.array
smoothed vertex labels
"""
G = create_graph_from_coords(vertices, max_dist=max_edge_length, mst=False,
force_single_cc=False)
# create BFS subset
bfs_nn = split_subcc(G, max_nb=n_voting, verbose=False)
new_vertex_labels = np.zeros_like(vertex_labels)
for ii in range(len(vertex_labels)):
curr_labels = vertex_labels[bfs_nn[ii]]
labels, counts = np.unique(curr_labels, return_counts=True)
majority_label = labels[np.argmax(counts)]
new_vertex_labels[ii] = majority_label
return new_vertex_labels
def split_subcc(g, max_nb, verbose=False, start_nodes=None):
"""
Creates subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Graph
max_nb: int
verbose: bool
start_nodes: iterable
node ID's
Returns: dict
"""
subnodes = {}
if verbose:
nb_nodes = g.number_of_nodes()
pbar = tqdm.tqdm(total=nb_nodes, leave=False)
if start_nodes is None:
iter_ixs = g.nodes()
else:
iter_ixs = start_nodes
for n in iter_ixs:
n_subgraph = [n]
nb_edges = 0
for e in nx.bfs_edges(g, n):
n_subgraph.append(e[1])
nb_edges += 1
if nb_edges == max_nb:
break
subnodes[n] = n_subgraph
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return subnodes
def chunkify_contiguous(l, n):
"""Yield successive n-sized chunks from l.
https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks"""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_subcc_join(g: nx.Graph, subgraph_size: int, lo_first_n: int = 1) -> List[List[Any]]:
"""
Creates a subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Supervoxel graph
subgraph_size: Size of subgraphs. The difference between `subgraph_size` and `lo_first_n` defines the
supervoxel overlap.
lo_first_n: Leave out first n nodes: will collect `subgraph_size` nodes starting from center node and then
omit the first lo_first_n nodes, i.e. not use them as new starting nodes.
Returns:
"""
start_node = list(g.nodes())[0]
for n, d in dict(g.degree).items():
if d == 1:
start_node = n
break
dfs_nodes = list(nx.dfs_preorder_nodes(g, start_node))
# get subgraphs via splicing of traversed node list into equally sized fragments. they might
# be unconnected if branch sizes mod subgraph_size != 0, then a chunk will contain multiple connected components.
chunks = list(chunkify_contiguous(dfs_nodes, lo_first_n))
sub_graphs = []
for ch in chunks:
# collect all connected component subgraphs
sg = g.subgraph(ch).copy()
sub_graphs += list((sg.subgraph(c) for c in nx.connected_components(sg)))
# add more context to subgraphs
subgraphs_withcontext = []
for sg in sub_graphs:
# add context but omit artificial start node
context_nodes = []
for n in list(sg.nodes()):
subgraph_nodes_with_context = []
nb_edges = sg.number_of_nodes()
for e in nx.bfs_edges(g, n):
subgraph_nodes_with_context += list(e)
nb_edges += 1
if nb_edges == subgraph_size:
break
context_nodes += subgraph_nodes_with_context
# add original nodes
context_nodes = list(set(context_nodes))
for n in list(sg.nodes()):
if n in context_nodes:
context_nodes.remove(n)
subgraph_nodes_with_context = list(sg.nodes()) + context_nodes
subgraphs_withcontext.append(subgraph_nodes_with_context)
return subgraphs_withcontext
def merge_nodes(G, nodes, new_node):
""" FOR UNWEIGHTED, UNDIRECTED GRAPHS ONLY
"""
if G.is_directed():
raise ValueError('Method "merge_nodes" is only valid for undirected graphs.')
G.add_node(new_node)
for n in nodes:
for e in G.edges(n):
# add edge between new node and original partner node
edge = list(e)
edge.remove(n)
paired_node = edge[0]
G.add_edge(new_node, paired_node)
for n in nodes: # remove the merged nodes
G.remove_node(n)
def split_glia_graph(nx_g, thresh, clahe=False, nb_cpus=1, pred_key_appendix=""):
"""
Split graph into glia and non-glua CC's.
Args:
nx_g: nx.Graph
thresh: float
clahe: bool
nb_cpus: int
pred_key_appendix: str
verbose: bool
Returns: list, list
Neuron, glia connected components.
"""
glia_key = "glia_probas"
if clahe:
glia_key += "_clahe"
glia_key += pred_key_appendix
glianess, size = get_glianess_dict(list(nx_g.nodes()), thresh, glia_key,
nb_cpus=nb_cpus)
return remove_glia_nodes(nx_g, size, glianess, return_removed_nodes=True)
def split_glia(sso, thresh, clahe=False, pred_key_appendix=""):
"""
Split SuperSegmentationObject into glia and non glia
SegmentationObjects.
Args:
sso: SuperSegmentationObject
thresh: float
clahe: bool
pred_key_appendix: str
Defines type of glia predictions
Returns: list, list (of SegmentationObject)
Neuron, glia nodes
"""
nx_G = sso.rag
nonglia_ccs, glia_ccs = split_glia_graph(nx_G, thresh=thresh, clahe=clahe,
nb_cpus=sso.nb_cpus, pred_key_appendix=pred_key_appendix)
return nonglia_ccs, glia_ccs
def create_ccsize_dict(g: nx.Graph, bbs: dict, is_connected_components: bool = False) -> dict:
"""
Calculate bounding box size of connected components.
Args:
g: Supervoxel graph.
bbs: Bounding boxes (physical units).
is_connected_components: If graph `g` already is connected components. If False,
``nx.connected_components`` is applied.
Returns:
Look-up which stores the connected component bounding box for every single node in the input Graph `g`.
"""
if not is_connected_components:
ccs = nx.connected_components(g)
else:
ccs = g
node2cssize_dict = {}
for cc in ccs:
# if ID is not in bbs, it was skipped due to low voxel count
curr_bbs = [bbs[n] for n in cc if n in bbs]
if len(curr_bbs) == 0:
raise ValueError(f'Could not find a single bounding box for connected component with IDs: {cc}.')
else:
curr_bbs = np.concatenate(curr_bbs)
cc_size = np.linalg.norm(np.max(curr_bbs, axis=0) -
np.min(curr_bbs, axis=0), ord=2)
for n in cc:
node2cssize_dict[n] = cc_size
return node2cssize_dict
def get_glianess_dict(seg_objs, thresh, glia_key, nb_cpus=1,
use_sv_volume=False, verbose=False):
glianess = {}
sizes = {}
params = [[so, glia_key, thresh, use_sv_volume] for so in seg_objs]
res = start_multiprocess(glia_loader_helper, params, nb_cpus=nb_cpus,
verbose=verbose, show_progress=verbose)
for ii, el in enumerate(res):
so = seg_objs[ii]
glianess[so] = el[0]
sizes[so] = el[1]
return glianess, sizes
def glia_loader_helper(args):
so, glia_key, thresh, use_sv_volume = args
if glia_key not in so.attr_dict.keys():
so.load_attr_dict()
curr_glianess = so.glia_pred(thresh)
if not use_sv_volume:
curr_size = so.mesh_bb
else:
curr_size = so.size
return curr_glianess, curr_size
def remove_glia_nodes(g, size_dict, glia_dict, return_removed_nodes=False):
"""
Calculate distance weights for shortest path analysis or similar, based on
glia and size vertex properties and removes unsupporting glia nodes.
Args:
g: Graph
size_dict:
glia_dict:
return_removed_nodes: bool
Returns: list of list of nodes
Remaining connected components of type neuron
"""
# set up node weights based on glia prediction and size
# weights = {}
# e_weights = {}
# for n in g.nodes():
# weights[n] = np.linalg.norm(size_dict[n][1]-size_dict[n][0], ord=2)\
# * glia_dict[n]
# # set up edge weights based on sum of node weights
# for e in g.edges():
# e_weights[e] = weights[list(e)[0]] + weights[list(e)[1]]
# nx.set_node_attributes(g, weights, 'weight')
# nx.set_edge_attributes(g, e_weights, 'weights')
# get neuron type connected component sizes
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0:
g_neuron.remove_node(n)
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
if np.all(np.array(list(neuron2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant neuron SV
if return_removed_nodes:
return [], [list(g.nodes())]
return []
# get glia type connected component sizes
g_glia = g.copy()
for n in g.nodes():
if glia_dict[n] == 0:
g_glia.remove_node(n)
glia2ccsize_dict = create_ccsize_dict(g_glia, size_dict)
if np.all(np.array(list(glia2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant glia SV
if return_removed_nodes:
return [list(g.nodes())], []
return [list(g.nodes())]
tiny_glia_fragments = []
for n in g_glia.nodes():
if glia2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
tiny_glia_fragments += [n]
# create new neuron graph without sufficiently big glia connected components
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0 and n not in tiny_glia_fragments:
g_neuron.remove_node(n)
# find orphaned neuron SV's and add them to glia graph
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
g_tmp = g_neuron.copy()
for n in g_tmp.nodes():
if neuron2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
g_neuron.remove_node(n)
# create new glia graph with remaining nodes
# (as the complementary set of sufficiently big neuron connected components)
g_glia = g.copy()
for n in g_neuron.nodes():
g_glia.remove_node(n)
neuron_ccs = list(nx.connected_components(g_neuron))
if return_removed_nodes:
glia_ccs = list(nx.connected_components(g_glia))
assert len(g_glia) + len(g_neuron) == len(g)
return neuron_ccs, glia_ccs
return neuron_ccs
def glia_path_length(glia_path, glia_dict, write_paths=None):
"""
Get the path length of glia SV within glia_path. Assumes single connected
glia component within this path. Uses the mesh property of each
SegmentationObject to build a graph from all vertices to find shortest path
through (or more precise: along the surface of) glia. Edges between non-glia
vertices have negligible distance (0.0001) to ensure shortest path
along non-glia surfaces.
Args:
glia_path: list of SegmentationObjects
glia_dict: dict
Dictionary which keys the SegmentationObjects in glia_path and returns
their glia prediction
write_paths: bool
Returns: float
Shortest path between neuron type nodes in nm
"""
g = nx.Graph()
col = {}
curr_ind = 0
if write_paths is not None:
all_vert = np.zeros((0, 3))
for so in glia_path:
is_glia_sv = int(glia_dict[so] > 0)
ind, vert = so.mesh
# connect meshes of different SV, starts after first SV
if curr_ind > 0:
# build kd tree from vertices of SV before
kd_tree = spatial.cKDTree(vert_resh)
# get indices of vertives of SV before (= indices of graph nodes)
ind_offset_before = curr_ind - len(vert_resh)
# query vertices of current mesh to find close connects
next_vert_resh = vert.reshape((-1, 3))
dists, ixs = kd_tree.query(next_vert_resh, distance_upper_bound=500)
for kk, ix in enumerate(ixs):
if dists[kk] > 500:
continue
if is_glia_sv:
edge_weight = eucl_dist(next_vert_resh[kk], vert_resh[ix])
else:
edge_weight = 0.0001
g.add_edge(curr_ind + kk, ind_offset_before + ix,
weights=edge_weight)
vert_resh = vert.reshape((-1, 3))
# save all vertices for writing shortest path skeleton
if write_paths is not None:
all_vert = np.concatenate([all_vert, vert_resh])
# connect fragments of SV mesh
kd_tree = spatial.cKDTree(vert_resh)
dists, ixs = kd_tree.query(vert_resh, k=20, distance_upper_bound=500)
for kk in range(len(ixs)):
nn_ixs = ixs[kk]
nn_dists = dists[kk]
col[curr_ind + kk] = glia_dict[so]
for curr_ix, curr_dist in zip(nn_ixs, nn_dists):
col[curr_ind + curr_ix] = glia_dict[so]
if is_glia_sv:
dist = curr_dist
else: # only take path through glia into account
dist = 0
g.add_edge(kk + curr_ind, curr_ix + curr_ind, weights=dist)
curr_ind += len(vert_resh)
start_ix = 0 # choose any index of the first mesh
end_ix = curr_ind - 1 # choose any index of the last mesh
shortest_path_length = nx.dijkstra_path_length(g, start_ix, end_ix, weight="weights")
if write_paths is not None:
shortest_path = nx.dijkstra_path(g, start_ix, end_ix, weight="weights")
anno = coordpath2anno([all_vert[ix] for ix in shortest_path])
anno.setComment("{0:.4}".format(shortest_path_length))
skel = Skeleton()
skel.add_annotation(anno)
skel.to_kzip("{{}/{0:.4}_vertpath.k.zip".format(write_paths, shortest_path_length))
return shortest_path_length
def eucl_dist(a, b):
return np.linalg.norm(a - b)
def get_glia_paths(g, glia_dict, node2ccsize_dict, min_cc_size_neuron,
node2ccsize_dict_glia, min_cc_size_glia):
"""
Currently not in use, Refactoring needed
Find paths between neuron type SV grpah nodes which contain glia nodes.
Args:
g: nx.Graph
glia_dict:
node2ccsize_dict:
min_cc_size_neuron:
node2ccsize_dict_glia:
min_cc_size_glia:
Returns:
"""
end_nodes = []
paths = nx.all_pairs_dijkstra_path(g, weight="weights")
for n, d in g.degree().items():
if d == 1 and glia_dict[n] == 0 and node2ccsize_dict[n] > min_cc_size_neuron:
end_nodes.append(n)
# find all nodes along these ways and store them as mandatory nodes
glia_paths = []
glia_svixs_in_paths = []
for a, b in itertools.combinations(end_nodes, 2):
glia_nodes = [n for n in paths[a][b] if glia_dict[n] != 0]
if len(glia_nodes) == 0:
continue
sv_ccsizes = [node2ccsize_dict_glia[n] for n in glia_nodes]
if np.max(sv_ccsizes) <= min_cc_size_glia: # check minimum glia size
continue
sv_ixs = np.array([n.id for n in glia_nodes])
glia_nodes_already_exist = False
for el_ixs in glia_svixs_in_paths:
if np.all(sv_ixs == el_ixs):
glia_nodes_already_exist = True
break
if glia_nodes_already_exist: # check if same glia path exists already
continue
glia_paths.append(paths[a][b])
glia_svixs_in_paths.append(np.array([so.id for so in glia_nodes]))
return glia_paths
def write_sopath2skeleton(so_path, dest_path, scaling=None, comment=None):
"""
Writes very simple skeleton, each node represents the center of mass of a
SV, and edges are created in list order.
Args:
so_path: list of SegmentationObject
dest_path: str
scaling: np.ndarray or tuple
comment: str
Returns:
"""
if scaling is None:
scaling = np.array(global_params.config['scaling'])
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling | kd_tree = spatial.cKDTree(vert)
dist, nn_ix = kd_tree.query([com])
nn = vert[nn_ix[0]] / scaling
n = SkeletonNode().from_scratch(anno, nn[0], nn[1], nn[2])
anno.addNode(n)
rep_nodes.append(n)
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
if comment is not None:
anno.setComment(comment)
skel.add_annotation(anno)
skel.to_kzip(dest_path)
def coordpath2anno(coords: np.ndarray, scaling: Optional[np.ndarray] = None) -> SkeletonAnnotation:
"""
Creates skeleton from scaled coordinates, assume coords are in order for
edge creation.
Args:
coords: np.array
scaling: np.ndarray
Returns: SkeletonAnnotation
"""
if scaling is None:
scaling = global_params.config['scaling']
anno = SkeletonAnnotation()
anno.scaling = scaling
rep_nodes = []
for c in coords:
n = SkeletonNode().from_scratch(anno, c[0] / scaling[0], c[1] / scaling[1],
c[2] / scaling[2])
anno.addNode(n)
rep_nodes.append(n)
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
return anno
def create_graph_from_coords(coords: np.ndarray, max_dist: float = 6000, force_single_cc: bool = True,
mst: bool = False) -> nx.Graph:
"""
Generate skeleton from sample locations by adding edges between points with a maximum distance and then pruning
the skeleton using MST. Nodes will have a 'position' attribute.
Args:
coords: Coordinates.
max_dist: Add edges between two nodes that are within this distance.
force_single_cc: Force that the tree generated from coords is a single connected component.
mst: Compute the minimum spanning tree.
Returns:
Networkx graph. Edge between nodes (coord indices) using the ordering of coords, i.e. the
edge (1, 2) connects coordinate coord[1] and coord[2].
"""
g = nx.Graph()
if len(coords) == 1:
g.add_node(0)
g.add_weighted_edges_from([[0, 0, 0]])
return g
kd_t = spatial.cKDTree(coords)
pairs = kd_t.query_pairs(r=max_dist, output_type="ndarray")
g.add_nodes_from([(ix, dict(position=coord)) for ix, coord in enumerate(coords)])
weights = np.linalg.norm(coords[pairs[:, 0]] - coords[pairs[:, 1]], axis=1)
g.add_weighted_edges_from([[pairs[i][0], pairs[i][1], weights[i]] for i in range(len(pairs))])
if force_single_cc: # make sure its a connected component
g = stitch_skel_nx(g)
if mst:
g = nx.minimum_spanning_tree(g)
return g
def draw_glia_graph(G, dest_path, min_sv_size=0, ext_glia=None, iterations=150, seed=0,
glia_key="glia_probas", node_size_cap=np.inf, mcmp=None, pos=None):
"""
Draw graph with nodes colored in red (glia) and blue) depending on their
class. Writes drawing to dest_path.
Args:
G: nx.Graph
dest_path: str
min_sv_size: int
ext_glia: dict
keys: node in G, values: number indicating class
iterations:
seed: int
Default: 0; random seed for layout generation
glia_key: str
node_size_cap: int
mcmp: color palette
pos:
Returns:
"""
import matplotlib.pyplot as plt
import seaborn as sns
if mcmp is None:
mcmp = sns.diverging_palette(250, 15, s=99, l=60, center="dark",
as_cmap=True)
np.random.seed(0)
seg_objs = list(G.nodes())
glianess, size = get_glianess_dict(seg_objs, glia_thresh, glia_key, 5,
use_sv_volume=True)
if ext_glia is not None:
for n in G.nodes():
glianess[n] = ext_glia[n.id]
plt.figure()
n_size = np.array([size[n] ** (1. / 3) for n in G.nodes()]).astype(
np.float32) # reduce cubic relation to a linear one
# n_size = np.array([np.linalg.norm(size[n][1]-size[n][0]) for n in G.nodes()])
if node_size_cap == "max":
node_size_cap = np.max(n_size)
n_size[n_size > node_size_cap] = node_size_cap
col = np.array([glianess[n] for n in G.nodes()])
col = col[n_size >= min_sv_size]
nodelist = list(np.array(list(G.nodes()))[n_size > min_sv_size])
n_size = n_size[n_size >= min_sv_size]
n_size = n_size / np.max(n_size) * 25.
if pos is None:
pos = nx.spring_layout(G, weight="weight", iterations=iterations, random_state=seed)
nx.draw(G, nodelist=nodelist, node_color=col, node_size=n_size,
cmap=mcmp, width=0.15, pos=pos, linewidths=0)
plt.savefig(dest_path)
plt.close()
return pos
def nxGraph2kzip(g, coords, kzip_path):
import tqdm
scaling = global_params.config['scaling']
coords = coords / scaling
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(g.edges()), leave=False)
for v in g.nodes():
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in g.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def svgraph2kzip(ssv: 'SuperSegmentationObject', kzip_path: str):
"""
Writes the SV graph stored in `ssv.edgelist_path` to a kzip file.
The representative coordinate of a SV is used as the corresponding node
location.
Args:
ssv: Cell reconstruction object.
kzip_path: Path to the output kzip file.
"""
sv_graph = nx.read_edgelist(ssv.edgelist_path, nodetype=int)
coords = {ix: ssv.get_seg_obj('sv', ix).rep_coord for ix in sv_graph.nodes}
import tqdm
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = ssv.scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(sv_graph.edges()), leave=False)
for v in sv_graph.nodes:
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
n.setComment(f'{v}')
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in sv_graph.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def stitch_skel_nx(skel_nx: nx.Graph, n_jobs: int = 1) -> nx.Graph:
"""
Stitch connected components within a graph by recursively adding edges between the closest components.
Args:
skel_nx: Networkx graph. Nodes require 'position' attribute.
n_jobs: Number of jobs used for query of cKDTree.
Returns:
Single connected component graph.
"""
if skel_nx.number_of_nodes() == 0:
return skel_nx
no_of_seg = nx.number_connected_components(skel_nx)
if no_of_seg == 1:
return skel_nx
skel_nx_nodes = np.array([skel_nx.nodes[ix]['position'] for ix in skel_nx.nodes()], dtype=np.int64)
while no_of_seg != 1:
rest_nodes = []
rest_nodes_ixs = []
list_of_comp = np.array([c for c in sorted(nx.connected_components(skel_nx), key=len, reverse=True)])
for single_rest_graph in list_of_comp[1:]:
rest_nodes += [skel_nx_nodes[int(ix)] for ix in single_rest_graph]
rest_nodes_ixs += list(single_rest_graph)
current_set_of_nodes = [skel_nx_nodes[int(ix)] for ix in list_of_comp[0]]
current_set_of_nodes_ixs = list(list_of_comp[0])
tree = spatial.cKDTree(rest_nodes, 1)
thread_lengths, indices = tree.query(current_set_of_nodes, n_jobs=n_jobs)
start_thread_index = np.argmin(thread_lengths)
stop_thread_index = indices[start_thread_index]
e1 = current_set_of_nodes_ixs[start_thread_index]
e2 = rest_nodes_ixs[stop_thread_index]
skel_nx.add_edge(e1, e2)
no_of_seg -= 1
return skel_nx | rep_nodes = []
for so in so_path:
vert = so.mesh[1].reshape((-1, 3))
com = np.mean(vert, axis=0) | random_line_split |
create.component.ts | import { Component } from '@angular/core';
import { ROUTER_DIRECTIVES, Router } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { HTTP_PROVIDERS } from '@angular/http';
import 'rxjs/Rx';
import { RewardType, RewardTypeService } from './RewardType.service';
@Component({
moduleId:module.id,
selector: 'create',
template: require('./template.html'),
styles: [ require('./style.css') ],
directives: [ROUTER_DIRECTIVES],
providers: [RewardTypeService, HTTP_PROVIDERS],
})
export class CreateComponent {
list: RewardType[];
type: number;
loading: number;
constructor(private rt: RewardTypeService, private router: Router) {
this.type = 1;
}
ngOnInit() { this.getList(); }
getList() {
this.list = [
{"type":1,"name": "展示型优惠券","ico": "type-ico-show", "status":1, "isChecked":true},
{"type":2,"name": "核验型优惠码","ico": "type-ico-pin", "status":1, "isChecked":false},
{"type":3,"name": "大转盘","ico": "type-ico-baccarat", "status":1, "isChecked":false},
{"type":4,"name": "手机话费","ico": "type-ico-phone", "status":0, "isChecked":false},
{"type":5,"name": "天会宝","ico": "type-ico-tian", "status":0, "isChecked":false},
{"type":6,"name": "万里通积分","ico": "type-ico-wan", "status":0, "isChecked":false},
{"type":7,"name": "线下实物寄送","ico": "type-ico-outline", "status":0, "isChecked":false},
{"type":8,"name": "集分宝","ico": "type-ico-score", "status":0, "isChecked":false}
];
// this.rt.getRewardtypes().subscribe(
// heroes => this.list = heroes ,
// error => this.handleError);
}
onSelect(item: RewardType) {
if (item.status === 0) return;
this.type = item.type;
this.rt.updateChecked(item.type, this.list);
}
routerUrl(type: number) {
let router;
switch (type) {
case 1: router = '/show/add';
break;
case 2: router = '/pin/add';
break;
case 3: router = '/baccarat/add';
break;
case 4: router = '/';
break;
case 5: router = '/';
break;
case 6: router = '/';
break;
case 7: router = '/';
break;
case 8: router = '/';
break;
default:
router = '/create';
}
this.router.navigate([router]);
}
redirectTo() {
this.list.map(data => { if (data.isChecked) { this.routerUrl(d | // In a real world app, we might use a remote logging infrastructure
let errMsg = error.message || 'Server error';
console.error(errMsg); // log to console instead
return Observable.throw(errMsg);
}
goBack() {
window.history.back();
}
}
| ata.type); } });
}
private handleError(error: any) {
this.loading = 0;
| identifier_body |
create.component.ts | import { Component } from '@angular/core';
import { ROUTER_DIRECTIVES, Router } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { HTTP_PROVIDERS } from '@angular/http';
import 'rxjs/Rx';
import { RewardType, RewardTypeService } from './RewardType.service';
@Component({
moduleId:module.id,
selector: 'create',
template: require('./template.html'),
styles: [ require('./style.css') ],
directives: [ROUTER_DIRECTIVES],
providers: [RewardTypeService, HTTP_PROVIDERS],
})
export class CreateComponent {
list: RewardType[];
type: number;
loading: number;
constructor(private rt: RewardTypeService, private router: Router) {
this.type = 1;
}
ngOnInit() { this.getList(); }
getList() {
this.list = [
{"type":1,"name": "展示型优惠券","ico": "type-ico-show", "status":1, "isChecked":true},
{"type":2,"name": "核验型优惠码","ico": "type-ico-pin", "status":1, "isChecked":false},
{"type":3,"name": "大转盘","ico": "type-ico-baccarat", "status":1, "isChecked":false},
{"type":4,"name": "手机话费","ico": "type-ico-phone", "status":0, "isChecked":false},
{"type":5,"name": "天会宝","ico": "type-ico-tian", "status":0, "isChecked":false},
{"type":6,"name": "万里通积分","ico": "type-ico-wan", "status":0, "isChecked":false},
{"type":7,"name": "线下实物寄送","ico": "type-ico-outline", "status":0, "isChecked":false},
{"type":8,"name": "集分宝","ico": "type-ico-score", "status":0, "isChecked":false}
];
// this.rt.getRewardtypes().subscribe(
// heroes => this.list = heroes ,
// error => this.handleError);
}
onSelect(item: RewardType) {
if (item.status === 0) return;
this.type = item.type;
this.rt.updateChecked(item.type, this.list);
}
routerUrl(type: number) {
let router;
switch (type) {
| case 1: router = '/show/add';
break;
case 2: router = '/pin/add';
break;
case 3: router = '/baccarat/add';
break;
case 4: router = '/';
break;
case 5: router = '/';
break;
case 6: router = '/';
break;
case 7: router = '/';
break;
case 8: router = '/';
break;
default:
router = '/create';
}
this.router.navigate([router]);
}
redirectTo() {
this.list.map(data => { if (data.isChecked) { this.routerUrl(data.type); } });
}
private handleError(error: any) {
this.loading = 0;
// In a real world app, we might use a remote logging infrastructure
let errMsg = error.message || 'Server error';
console.error(errMsg); // log to console instead
return Observable.throw(errMsg);
}
goBack() {
window.history.back();
}
}
| identifier_name | |
create.component.ts | import { Component } from '@angular/core';
import { ROUTER_DIRECTIVES, Router } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { HTTP_PROVIDERS } from '@angular/http';
import 'rxjs/Rx';
import { RewardType, RewardTypeService } from './RewardType.service';
@Component({
moduleId:module.id,
selector: 'create',
template: require('./template.html'),
styles: [ require('./style.css') ],
directives: [ROUTER_DIRECTIVES],
providers: [RewardTypeService, HTTP_PROVIDERS],
})
export class CreateComponent {
list: RewardType[];
type: number;
loading: number;
constructor(private rt: RewardTypeService, private router: Router) {
this.type = 1;
}
ngOnInit() { this.getList(); }
getList() {
this.list = [
{"type":1,"name": "展示型优惠券","ico": "type-ico-show", "status":1, "isChecked":true},
{"type":2,"name": "核验型优惠码","ico": "type-ico-pin", "status":1, "isChecked":false},
{"type":3,"name": "大转盘","ico": "type-ico-baccarat", "status":1, "isChecked":false},
{"type":4,"name": "手机话费","ico": "type-ico-phone", "status":0, "isChecked":false},
{"type":5,"name": "天会宝","ico": "type-ico-tian", "status":0, "isChecked":false},
{"type":6,"name": "万里通积分","ico": "type-ico-wan", "status":0, "isChecked":false},
{"type":7,"name": "线下实物寄送","ico": "type-ico-outline", "status":0, "isChecked":false},
{"type":8,"name": "集分宝","ico": "type-ico-score", "status":0, "isChecked":false}
];
// this.rt.getRewardtypes().subscribe(
// heroes => this.list = heroes ,
// error => this.handleError);
}
onSelect(item: RewardType) {
if (item.status === 0) return;
this.type = item.type;
this.rt.updateChecked(item.type, this.list);
}
routerUrl(type: number) {
let router;
switch (type) {
case 1: router = '/show/add';
break;
case 2: router = '/pin/add';
break;
case 3: router = '/baccarat/add';
break;
case 4: router = '/';
break;
case 5: router = '/';
break;
case 6: router = '/';
break;
case 7: router = '/'; | break;
default:
router = '/create';
}
this.router.navigate([router]);
}
redirectTo() {
this.list.map(data => { if (data.isChecked) { this.routerUrl(data.type); } });
}
private handleError(error: any) {
this.loading = 0;
// In a real world app, we might use a remote logging infrastructure
let errMsg = error.message || 'Server error';
console.error(errMsg); // log to console instead
return Observable.throw(errMsg);
}
goBack() {
window.history.back();
}
} | break;
case 8: router = '/'; | random_line_split |
object_safety.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Object safety" refers to the ability for a trait to be converted
//! to an object. In general, traits may only be converted to an
//! object if all of their methods meet certain criteria. In particular,
//! they must:
//!
//! - have a suitable receiver from which we can extract a vtable;
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters
use super::supertraits;
use super::elaborate_predicates;
use middle::subst::{self, SelfSpace, TypeSpace};
use middle::traits;
use middle::ty::{self, ToPolyTraitRef, Ty};
use std::rc::Rc;
use syntax::ast;
#[derive(Debug)]
pub enum ObjectSafetyViolation<'tcx> {
/// Self : Sized declared on the trait
SizedSelf,
/// Supertrait reference references `Self` an in illegal location
/// (e.g. `trait Foo : Bar<Self>`)
SupertraitSelf,
/// Method has something illegal
Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
}
/// Reasons a method might not be object-safe.
#[derive(Copy,Clone,Debug)]
pub enum | {
/// e.g., `fn foo()`
StaticMethod,
/// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
ReferencesSelf,
/// e.g., `fn foo<A>()`
Generic,
}
pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
// Because we query yes/no results frequently, we keep a cache:
let def = tcx.lookup_trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = object_safety_violations(tcx, trait_def_id).is_empty();
// Record just a yes/no result in the cache; this is what is
// queried most frequently. Note that this may overwrite a
// previous result, but always with the same thing.
def.set_object_safety(result);
result
});
debug!("is_object_safe({:?}) = {}", trait_def_id, result);
result
}
pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
traits::supertrait_def_ids(tcx, trait_def_id)
.flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
.collect()
}
fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
// Check methods for violations.
let mut violations: Vec<_> =
tcx.trait_items(trait_def_id).iter()
.flat_map(|item| {
match *item {
ty::MethodTraitItem(ref m) => {
object_safety_violation_for_method(tcx, trait_def_id, &**m)
.map(|code| ObjectSafetyViolation::Method(m.clone(), code))
.into_iter()
}
_ => None.into_iter(),
}
})
.collect();
// Check the trait itself.
if trait_has_sized_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SizedSelf);
}
if supertraits_reference_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SupertraitSelf);
}
debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
trait_def_id,
violations);
violations
}
fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = trait_def.trait_ref.clone();
let trait_ref = trait_ref.to_poly_trait_ref();
let predicates = tcx.lookup_super_predicates(trait_def_id);
predicates
.predicates
.into_iter()
.map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
data.0.trait_ref.substs.types.get_slice(TypeSpace)
.iter()
.cloned()
.any(is_self)
}
ty::Predicate::Projection(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::Equate(..) => {
false
}
}
})
}
fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_predicates = tcx.lookup_predicates(trait_def_id);
generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
}
fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
generics: &ty::Generics<'tcx>,
predicates: &ty::GenericPredicates<'tcx>)
-> bool
{
let sized_def_id = match tcx.lang_items.sized_trait() {
Some(def_id) => def_id,
None => { return false; /* No Sized trait, can't require it! */ }
};
// Search for a predicate like `Self : Sized` amongst the trait bounds.
let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID);
let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
elaborate_predicates(tcx, predicates)
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
is_self(trait_pred.0.self_ty())
}
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::TypeOutlives(..) => {
false
}
}
})
}
/// Returns `Some(_)` if this method makes the containing trait not object safe.
fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// Any method that has a `Self : Sized` requisite is otherwise
// exempt from the regulations.
if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
return None;
}
virtual_call_violation_for_method(tcx, trait_def_id, method)
}
/// We say a method is *vtable safe* if it can be invoked on a trait
/// object. Note that object-safe traits can have some
/// non-vtable-safe methods, so long as they require `Self:Sized` or
/// otherwise ensure that they cannot be used when `Self=Trait`.
pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> bool
{
virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
}
/// Returns `Some(_)` if this method cannot be called on a trait
/// object; this does not necessarily imply that the enclosing trait
/// is not object safe, because the method might have a where clause
/// `Self:Sized`.
fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// The method's first parameter must be something that derefs (or
// autorefs) to `&self`. For now, we only accept `self`, `&self`
// and `Box<Self>`.
match method.explicit_self {
ty::StaticExplicitSelfCategory => {
return Some(MethodViolationCode::StaticMethod);
}
ty::ByValueExplicitSelfCategory |
ty::ByReferenceExplicitSelfCategory(..) |
ty::ByBoxExplicitSelfCategory => {
}
}
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
let ref sig = method.fty.sig;
for &input_ty in &sig.0.inputs[1..] {
if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
if let ty::FnConverging(result_type) = sig.0.output {
if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
// We can't monomorphize things like `fn foo<A>(...)`.
if !method.generics.types.is_empty_in(subst::FnSpace) {
return Some(MethodViolationCode::Generic);
}
None
}
fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
ty: Ty<'tcx>)
-> bool
{
// This is somewhat subtle. In general, we want to forbid
// references to `Self` in the argument and return types,
// since the value of `Self` is erased. However, there is one
// exception: it is ok to reference `Self` in order to access
// an associated type of the current trait, since we retain
// the value of those associated types in the object type
// itself.
//
// ```rust
// trait SuperTrait {
// type X;
// }
//
// trait Trait : SuperTrait {
// type Y;
// fn foo(&self, x: Self) // bad
// fn foo(&self) -> Self // bad
// fn foo(&self) -> Option<Self> // bad
// fn foo(&self) -> Self::Y // OK, desugars to next example
// fn foo(&self) -> <Self as Trait>::Y // OK
// fn foo(&self) -> Self::X // OK, desugars to next example
// fn foo(&self) -> <Self as SuperTrait>::X // OK
// }
// ```
//
// However, it is not as simple as allowing `Self` in a projected
// type, because there are illegal ways to use `Self` as well:
//
// ```rust
// trait Trait : SuperTrait {
// ...
// fn foo(&self) -> <Self as SomeOtherTrait>::X;
// }
// ```
//
// Here we will not have the type of `X` recorded in the
// object type, and we cannot resolve `Self as SomeOtherTrait`
// without knowing what `Self` is.
let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
ty::TyParam(ref param_ty) => {
if param_ty.space == SelfSpace {
error = true;
}
false // no contained types to walk
}
ty::TyProjection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
if supertraits.is_none() {
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = ty::Binder(trait_def.trait_ref.clone());
supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
}
// Determine whether the trait reference `Foo as
// SomeTrait` is in fact a supertrait of the
// current trait. In that case, this type is
// legal, because the type `X` will be specified
// in the object type. Note that we can just use
// direct equality here because all of these types
// are part of the formal parameter listing, and
// hence there should be no inference variables.
let projection_trait_ref = ty::Binder(data.trait_ref.clone());
let is_supertrait_of_current_trait =
supertraits.as_ref().unwrap().contains(&projection_trait_ref);
if is_supertrait_of_current_trait {
false // do not walk contained types, do not report error, do collect $200
} else {
true // DO walk contained types, POSSIBLY reporting an error
}
}
_ => true, // walk contained types, if any
}
});
error
}
fn is_self<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyParam(ref data) => data.space == subst::SelfSpace,
_ => false,
}
}
| MethodViolationCode | identifier_name |
object_safety.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Object safety" refers to the ability for a trait to be converted
//! to an object. In general, traits may only be converted to an
//! object if all of their methods meet certain criteria. In particular,
//! they must:
//!
//! - have a suitable receiver from which we can extract a vtable;
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters
use super::supertraits;
use super::elaborate_predicates;
use middle::subst::{self, SelfSpace, TypeSpace};
use middle::traits;
use middle::ty::{self, ToPolyTraitRef, Ty};
use std::rc::Rc;
use syntax::ast;
#[derive(Debug)]
pub enum ObjectSafetyViolation<'tcx> {
/// Self : Sized declared on the trait
SizedSelf,
/// Supertrait reference references `Self` an in illegal location
/// (e.g. `trait Foo : Bar<Self>`)
SupertraitSelf,
/// Method has something illegal
Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
}
/// Reasons a method might not be object-safe.
#[derive(Copy,Clone,Debug)]
pub enum MethodViolationCode {
/// e.g., `fn foo()`
StaticMethod,
/// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
ReferencesSelf,
/// e.g., `fn foo<A>()`
Generic,
}
pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
// Because we query yes/no results frequently, we keep a cache:
let def = tcx.lookup_trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = object_safety_violations(tcx, trait_def_id).is_empty();
// Record just a yes/no result in the cache; this is what is
// queried most frequently. Note that this may overwrite a
// previous result, but always with the same thing.
def.set_object_safety(result);
result
});
debug!("is_object_safe({:?}) = {}", trait_def_id, result);
result
}
pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
traits::supertrait_def_ids(tcx, trait_def_id)
.flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
.collect()
}
fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
// Check methods for violations.
let mut violations: Vec<_> =
tcx.trait_items(trait_def_id).iter()
.flat_map(|item| {
match *item {
ty::MethodTraitItem(ref m) => {
object_safety_violation_for_method(tcx, trait_def_id, &**m)
.map(|code| ObjectSafetyViolation::Method(m.clone(), code))
.into_iter()
}
_ => None.into_iter(),
}
})
.collect();
// Check the trait itself.
if trait_has_sized_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SizedSelf);
}
if supertraits_reference_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SupertraitSelf);
}
debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
trait_def_id,
violations);
violations
}
fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = trait_def.trait_ref.clone();
let trait_ref = trait_ref.to_poly_trait_ref();
let predicates = tcx.lookup_super_predicates(trait_def_id);
predicates
.predicates
.into_iter()
.map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
data.0.trait_ref.substs.types.get_slice(TypeSpace)
.iter()
.cloned()
.any(is_self)
}
ty::Predicate::Projection(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::Equate(..) => {
false
}
}
})
}
fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_predicates = tcx.lookup_predicates(trait_def_id);
generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
}
fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
generics: &ty::Generics<'tcx>,
predicates: &ty::GenericPredicates<'tcx>)
-> bool
{
let sized_def_id = match tcx.lang_items.sized_trait() {
Some(def_id) => def_id,
None => { return false; /* No Sized trait, can't require it! */ }
};
// Search for a predicate like `Self : Sized` amongst the trait bounds.
let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID);
let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
elaborate_predicates(tcx, predicates)
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
is_self(trait_pred.0.self_ty())
}
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::TypeOutlives(..) => {
false
}
}
})
}
/// Returns `Some(_)` if this method makes the containing trait not object safe.
fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// Any method that has a `Self : Sized` requisite is otherwise
// exempt from the regulations.
if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
return None;
}
virtual_call_violation_for_method(tcx, trait_def_id, method)
}
/// We say a method is *vtable safe* if it can be invoked on a trait
/// object. Note that object-safe traits can have some
/// non-vtable-safe methods, so long as they require `Self:Sized` or
/// otherwise ensure that they cannot be used when `Self=Trait`.
pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> bool
{
virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
}
/// Returns `Some(_)` if this method cannot be called on a trait
/// object; this does not necessarily imply that the enclosing trait
/// is not object safe, because the method might have a where clause
/// `Self:Sized`.
fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// The method's first parameter must be something that derefs (or
// autorefs) to `&self`. For now, we only accept `self`, `&self`
// and `Box<Self>`.
match method.explicit_self {
ty::StaticExplicitSelfCategory => {
return Some(MethodViolationCode::StaticMethod);
}
ty::ByValueExplicitSelfCategory |
ty::ByReferenceExplicitSelfCategory(..) |
ty::ByBoxExplicitSelfCategory => {
}
}
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
let ref sig = method.fty.sig;
for &input_ty in &sig.0.inputs[1..] {
if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
if let ty::FnConverging(result_type) = sig.0.output {
if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
// We can't monomorphize things like `fn foo<A>(...)`.
if !method.generics.types.is_empty_in(subst::FnSpace) {
return Some(MethodViolationCode::Generic);
}
None
}
fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
ty: Ty<'tcx>)
-> bool
{
// This is somewhat subtle. In general, we want to forbid
// references to `Self` in the argument and return types,
// since the value of `Self` is erased. However, there is one
// exception: it is ok to reference `Self` in order to access
// an associated type of the current trait, since we retain
// the value of those associated types in the object type
// itself.
//
// ```rust
// trait SuperTrait {
// type X;
// }
//
// trait Trait : SuperTrait {
// type Y;
// fn foo(&self, x: Self) // bad
// fn foo(&self) -> Self // bad | // fn foo(&self) -> Self::Y // OK, desugars to next example
// fn foo(&self) -> <Self as Trait>::Y // OK
// fn foo(&self) -> Self::X // OK, desugars to next example
// fn foo(&self) -> <Self as SuperTrait>::X // OK
// }
// ```
//
// However, it is not as simple as allowing `Self` in a projected
// type, because there are illegal ways to use `Self` as well:
//
// ```rust
// trait Trait : SuperTrait {
// ...
// fn foo(&self) -> <Self as SomeOtherTrait>::X;
// }
// ```
//
// Here we will not have the type of `X` recorded in the
// object type, and we cannot resolve `Self as SomeOtherTrait`
// without knowing what `Self` is.
let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
ty::TyParam(ref param_ty) => {
if param_ty.space == SelfSpace {
error = true;
}
false // no contained types to walk
}
ty::TyProjection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
if supertraits.is_none() {
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = ty::Binder(trait_def.trait_ref.clone());
supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
}
// Determine whether the trait reference `Foo as
// SomeTrait` is in fact a supertrait of the
// current trait. In that case, this type is
// legal, because the type `X` will be specified
// in the object type. Note that we can just use
// direct equality here because all of these types
// are part of the formal parameter listing, and
// hence there should be no inference variables.
let projection_trait_ref = ty::Binder(data.trait_ref.clone());
let is_supertrait_of_current_trait =
supertraits.as_ref().unwrap().contains(&projection_trait_ref);
if is_supertrait_of_current_trait {
false // do not walk contained types, do not report error, do collect $200
} else {
true // DO walk contained types, POSSIBLY reporting an error
}
}
_ => true, // walk contained types, if any
}
});
error
}
fn is_self<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyParam(ref data) => data.space == subst::SelfSpace,
_ => false,
}
} | // fn foo(&self) -> Option<Self> // bad | random_line_split |
fuzzyset.js | (function() {
var FuzzySet = function(arr, useLevenshtein, gramSizeLower, gramSizeUpper) {
var fuzzyset = {
};
// default options
arr = arr || [];
fuzzyset.gramSizeLower = gramSizeLower || 2;
fuzzyset.gramSizeUpper = gramSizeUpper || 3;
fuzzyset.useLevenshtein = (typeof useLevenshtein !== 'boolean') ? true : useLevenshtein;
// define all the object functions and attributes
fuzzyset.exactSet = {};
fuzzyset.matchDict = {};
fuzzyset.items = {};
// helper functions
var levenshtein = function(str1, str2) {
var current = [], prev, value;
for (var i = 0; i <= str2.length; i++)
for (var j = 0; j <= str1.length; j++) {
if (i && j)
if (str1.charAt(j - 1) === str2.charAt(i - 1))
value = prev;
else
value = Math.min(current[j], current[j - 1], prev) + 1;
else
value = i + j;
prev = current[j];
current[j] = value;
}
return current.pop();
};
// return an edit distance from 0 to 1
var _distance = function(str1, str2) {
if (str1 === null && str2 === null) throw 'Trying to compare two null values';
if (str1 === null || str2 === null) return 0;
str1 = String(str1); str2 = String(str2);
var distance = levenshtein(str1, str2);
if (str1.length > str2.length) {
return 1 - distance / str1.length;
} else {
return 1 - distance / str2.length;
}
};
var _nonWordRe = /[^a-zA-Z0-9\u00C0-\u00FF, ]+/g;
var _iterateGrams = function(value, gramSize) {
gramSize = gramSize || 2;
var simplified = '-' + value.toLowerCase().replace(_nonWordRe, '') + '-',
lenDiff = gramSize - simplified.length,
results = [];
if (lenDiff > 0) |
for (var i = 0; i < simplified.length - gramSize + 1; ++i) {
results.push(simplified.slice(i, i + gramSize));
}
return results;
};
var _gramCounter = function(value, gramSize) {
// return an object where key=gram, value=number of occurrences
gramSize = gramSize || 2;
var result = {},
grams = _iterateGrams(value, gramSize),
i = 0;
for (i; i < grams.length; ++i) {
if (grams[i] in result) {
result[grams[i]] += 1;
} else {
result[grams[i]] = 1;
}
}
return result;
};
// the main functions
fuzzyset.get = function(value, defaultValue, minMatchScore) {
// check for value in set, returning defaultValue or null if none found
if (minMatchScore === undefined) {
minMatchScore = .33
}
var result = this._get(value, minMatchScore);
if (!result && typeof defaultValue !== 'undefined') {
return defaultValue;
}
return result;
};
fuzzyset._get = function(value, minMatchScore) {
var normalizedValue = this._normalizeStr(value),
result = this.exactSet[normalizedValue];
if (result) {
return [[1, result]];
}
var results = [];
// start with high gram size and if there are no results, go to lower gram sizes
for (var gramSize = this.gramSizeUpper; gramSize >= this.gramSizeLower; --gramSize) {
results = this.__get(value, gramSize, minMatchScore);
if (results && results.length > 0) {
return results;
}
}
return null;
};
fuzzyset.__get = function(value, gramSize, minMatchScore) {
var normalizedValue = this._normalizeStr(value),
matches = {},
gramCounts = _gramCounter(normalizedValue, gramSize),
items = this.items[gramSize],
sumOfSquareGramCounts = 0,
gram,
gramCount,
i,
index,
otherGramCount;
for (gram in gramCounts) {
gramCount = gramCounts[gram];
sumOfSquareGramCounts += Math.pow(gramCount, 2);
if (gram in this.matchDict) {
for (i = 0; i < this.matchDict[gram].length; ++i) {
index = this.matchDict[gram][i][0];
otherGramCount = this.matchDict[gram][i][1];
if (index in matches) {
matches[index] += gramCount * otherGramCount;
} else {
matches[index] = gramCount * otherGramCount;
}
}
}
}
function isEmptyObject(obj) {
for(var prop in obj) {
if(obj.hasOwnProperty(prop))
return false;
}
return true;
}
if (isEmptyObject(matches)) {
return null;
}
var vectorNormal = Math.sqrt(sumOfSquareGramCounts),
results = [],
matchScore;
// build a results list of [score, str]
for (var matchIndex in matches) {
matchScore = matches[matchIndex];
results.push([matchScore / (vectorNormal * items[matchIndex][0]), items[matchIndex][1]]);
}
var sortDescending = function(a, b) {
if (a[0] < b[0]) {
return 1;
} else if (a[0] > b[0]) {
return -1;
} else {
return 0;
}
};
results.sort(sortDescending);
if (this.useLevenshtein) {
var newResults = [],
endIndex = Math.min(50, results.length);
// truncate somewhat arbitrarily to 50
for (var i = 0; i < endIndex; ++i) {
newResults.push([_distance(results[i][1], normalizedValue), results[i][1]]);
}
results = newResults;
results.sort(sortDescending);
}
var newResults = [];
results.forEach(function(scoreWordPair) {
if (scoreWordPair[0] >= minMatchScore) {
newResults.push([scoreWordPair[0], this.exactSet[scoreWordPair[1]]]);
}
}.bind(this))
return newResults;
};
fuzzyset.add = function(value) {
var normalizedValue = this._normalizeStr(value);
if (normalizedValue in this.exactSet) {
return false;
}
var i = this.gramSizeLower;
for (i; i < this.gramSizeUpper + 1; ++i) {
this._add(value, i);
}
};
fuzzyset._add = function(value, gramSize) {
var normalizedValue = this._normalizeStr(value),
items = this.items[gramSize] || [],
index = items.length;
items.push(0);
var gramCounts = _gramCounter(normalizedValue, gramSize),
sumOfSquareGramCounts = 0,
gram, gramCount;
for (gram in gramCounts) {
gramCount = gramCounts[gram];
sumOfSquareGramCounts += Math.pow(gramCount, 2);
if (gram in this.matchDict) {
this.matchDict[gram].push([index, gramCount]);
} else {
this.matchDict[gram] = [[index, gramCount]];
}
}
var vectorNormal = Math.sqrt(sumOfSquareGramCounts);
items[index] = [vectorNormal, normalizedValue];
this.items[gramSize] = items;
this.exactSet[normalizedValue] = value;
};
fuzzyset._normalizeStr = function(str) {
if (Object.prototype.toString.call(str) !== '[object String]') throw 'Must use a string as argument to FuzzySet functions';
return str.toLowerCase();
};
// return length of items in set
fuzzyset.length = function() {
var count = 0,
prop;
for (prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
count += 1;
}
}
return count;
};
// return is set is empty
fuzzyset.isEmpty = function() {
for (var prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
return false;
}
}
return true;
};
// return list of values loaded into set
fuzzyset.values = function() {
var values = [],
prop;
for (prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
values.push(this.exactSet[prop]);
}
}
return values;
};
// initialization
var i = fuzzyset.gramSizeLower;
for (i; i < fuzzyset.gramSizeUpper + 1; ++i) {
fuzzyset.items[i] = [];
}
// add all the items to the set
for (i = 0; i < arr.length; ++i) {
fuzzyset.add(arr[i]);
}
return fuzzyset;
};
var root = this;
// Export the fuzzyset object for **CommonJS**, with backwards-compatibility
// for the old `require()` API. If we're not in CommonJS, add `_` to the
// global object.
if (typeof module !== 'undefined' && module.exports) {
module.exports = FuzzySet;
root.FuzzySet = FuzzySet;
} else {
root.FuzzySet = FuzzySet;
}
})(); | {
for (var i = 0; i < lenDiff; ++i) {
value += '-';
}
} | conditional_block |
fuzzyset.js | (function() {
var FuzzySet = function(arr, useLevenshtein, gramSizeLower, gramSizeUpper) {
var fuzzyset = {
};
// default options
arr = arr || [];
fuzzyset.gramSizeLower = gramSizeLower || 2;
fuzzyset.gramSizeUpper = gramSizeUpper || 3;
fuzzyset.useLevenshtein = (typeof useLevenshtein !== 'boolean') ? true : useLevenshtein;
// define all the object functions and attributes
fuzzyset.exactSet = {};
fuzzyset.matchDict = {};
fuzzyset.items = {};
// helper functions
var levenshtein = function(str1, str2) {
var current = [], prev, value;
for (var i = 0; i <= str2.length; i++)
for (var j = 0; j <= str1.length; j++) {
if (i && j)
if (str1.charAt(j - 1) === str2.charAt(i - 1))
value = prev;
else
value = Math.min(current[j], current[j - 1], prev) + 1;
else
value = i + j;
prev = current[j];
current[j] = value;
}
return current.pop();
};
// return an edit distance from 0 to 1
var _distance = function(str1, str2) {
if (str1 === null && str2 === null) throw 'Trying to compare two null values';
if (str1 === null || str2 === null) return 0;
str1 = String(str1); str2 = String(str2);
var distance = levenshtein(str1, str2);
if (str1.length > str2.length) {
return 1 - distance / str1.length;
} else {
return 1 - distance / str2.length;
}
};
var _nonWordRe = /[^a-zA-Z0-9\u00C0-\u00FF, ]+/g;
var _iterateGrams = function(value, gramSize) {
gramSize = gramSize || 2;
var simplified = '-' + value.toLowerCase().replace(_nonWordRe, '') + '-',
lenDiff = gramSize - simplified.length,
results = [];
if (lenDiff > 0) {
for (var i = 0; i < lenDiff; ++i) {
value += '-';
}
}
for (var i = 0; i < simplified.length - gramSize + 1; ++i) {
results.push(simplified.slice(i, i + gramSize));
}
return results;
};
var _gramCounter = function(value, gramSize) {
// return an object where key=gram, value=number of occurrences
gramSize = gramSize || 2;
var result = {},
grams = _iterateGrams(value, gramSize),
i = 0;
for (i; i < grams.length; ++i) {
if (grams[i] in result) {
result[grams[i]] += 1;
} else {
result[grams[i]] = 1;
}
}
return result;
};
// the main functions
fuzzyset.get = function(value, defaultValue, minMatchScore) {
// check for value in set, returning defaultValue or null if none found
if (minMatchScore === undefined) {
minMatchScore = .33
}
var result = this._get(value, minMatchScore);
if (!result && typeof defaultValue !== 'undefined') {
return defaultValue;
}
return result;
};
fuzzyset._get = function(value, minMatchScore) {
var normalizedValue = this._normalizeStr(value),
result = this.exactSet[normalizedValue];
if (result) {
return [[1, result]];
}
var results = [];
// start with high gram size and if there are no results, go to lower gram sizes
for (var gramSize = this.gramSizeUpper; gramSize >= this.gramSizeLower; --gramSize) {
results = this.__get(value, gramSize, minMatchScore);
if (results && results.length > 0) {
return results;
}
}
return null;
};
fuzzyset.__get = function(value, gramSize, minMatchScore) {
var normalizedValue = this._normalizeStr(value),
matches = {},
gramCounts = _gramCounter(normalizedValue, gramSize),
items = this.items[gramSize],
sumOfSquareGramCounts = 0,
gram,
gramCount,
i,
index,
otherGramCount;
for (gram in gramCounts) {
gramCount = gramCounts[gram];
sumOfSquareGramCounts += Math.pow(gramCount, 2);
if (gram in this.matchDict) {
for (i = 0; i < this.matchDict[gram].length; ++i) {
index = this.matchDict[gram][i][0];
otherGramCount = this.matchDict[gram][i][1];
if (index in matches) {
matches[index] += gramCount * otherGramCount;
} else {
matches[index] = gramCount * otherGramCount;
}
}
}
}
function isEmptyObject(obj) {
for(var prop in obj) {
if(obj.hasOwnProperty(prop))
return false;
}
return true;
}
if (isEmptyObject(matches)) {
return null;
}
var vectorNormal = Math.sqrt(sumOfSquareGramCounts),
results = [],
matchScore;
// build a results list of [score, str]
for (var matchIndex in matches) {
matchScore = matches[matchIndex];
results.push([matchScore / (vectorNormal * items[matchIndex][0]), items[matchIndex][1]]);
}
var sortDescending = function(a, b) {
if (a[0] < b[0]) {
return 1;
} else if (a[0] > b[0]) {
return -1;
} else {
return 0;
}
};
results.sort(sortDescending);
if (this.useLevenshtein) {
var newResults = [],
endIndex = Math.min(50, results.length);
// truncate somewhat arbitrarily to 50
for (var i = 0; i < endIndex; ++i) {
newResults.push([_distance(results[i][1], normalizedValue), results[i][1]]);
}
results = newResults;
results.sort(sortDescending);
}
var newResults = [];
results.forEach(function(scoreWordPair) {
if (scoreWordPair[0] >= minMatchScore) {
newResults.push([scoreWordPair[0], this.exactSet[scoreWordPair[1]]]);
}
}.bind(this))
return newResults;
};
fuzzyset.add = function(value) {
var normalizedValue = this._normalizeStr(value);
if (normalizedValue in this.exactSet) {
return false;
}
var i = this.gramSizeLower;
for (i; i < this.gramSizeUpper + 1; ++i) {
this._add(value, i);
}
};
fuzzyset._add = function(value, gramSize) {
var normalizedValue = this._normalizeStr(value),
items = this.items[gramSize] || [],
index = items.length;
items.push(0);
var gramCounts = _gramCounter(normalizedValue, gramSize),
sumOfSquareGramCounts = 0,
gram, gramCount;
for (gram in gramCounts) {
gramCount = gramCounts[gram];
sumOfSquareGramCounts += Math.pow(gramCount, 2);
if (gram in this.matchDict) {
this.matchDict[gram].push([index, gramCount]);
} else {
this.matchDict[gram] = [[index, gramCount]];
}
}
var vectorNormal = Math.sqrt(sumOfSquareGramCounts);
items[index] = [vectorNormal, normalizedValue];
this.items[gramSize] = items;
this.exactSet[normalizedValue] = value;
};
fuzzyset._normalizeStr = function(str) { | };
// return length of items in set
fuzzyset.length = function() {
var count = 0,
prop;
for (prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
count += 1;
}
}
return count;
};
// return is set is empty
fuzzyset.isEmpty = function() {
for (var prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
return false;
}
}
return true;
};
// return list of values loaded into set
fuzzyset.values = function() {
var values = [],
prop;
for (prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
values.push(this.exactSet[prop]);
}
}
return values;
};
// initialization
var i = fuzzyset.gramSizeLower;
for (i; i < fuzzyset.gramSizeUpper + 1; ++i) {
fuzzyset.items[i] = [];
}
// add all the items to the set
for (i = 0; i < arr.length; ++i) {
fuzzyset.add(arr[i]);
}
return fuzzyset;
};
var root = this;
// Export the fuzzyset object for **CommonJS**, with backwards-compatibility
// for the old `require()` API. If we're not in CommonJS, add `_` to the
// global object.
if (typeof module !== 'undefined' && module.exports) {
module.exports = FuzzySet;
root.FuzzySet = FuzzySet;
} else {
root.FuzzySet = FuzzySet;
}
})(); | if (Object.prototype.toString.call(str) !== '[object String]') throw 'Must use a string as argument to FuzzySet functions';
return str.toLowerCase(); | random_line_split |
fuzzyset.js | (function() {
var FuzzySet = function(arr, useLevenshtein, gramSizeLower, gramSizeUpper) {
var fuzzyset = {
};
// default options
arr = arr || [];
fuzzyset.gramSizeLower = gramSizeLower || 2;
fuzzyset.gramSizeUpper = gramSizeUpper || 3;
fuzzyset.useLevenshtein = (typeof useLevenshtein !== 'boolean') ? true : useLevenshtein;
// define all the object functions and attributes
fuzzyset.exactSet = {};
fuzzyset.matchDict = {};
fuzzyset.items = {};
// helper functions
var levenshtein = function(str1, str2) {
var current = [], prev, value;
for (var i = 0; i <= str2.length; i++)
for (var j = 0; j <= str1.length; j++) {
if (i && j)
if (str1.charAt(j - 1) === str2.charAt(i - 1))
value = prev;
else
value = Math.min(current[j], current[j - 1], prev) + 1;
else
value = i + j;
prev = current[j];
current[j] = value;
}
return current.pop();
};
// return an edit distance from 0 to 1
var _distance = function(str1, str2) {
if (str1 === null && str2 === null) throw 'Trying to compare two null values';
if (str1 === null || str2 === null) return 0;
str1 = String(str1); str2 = String(str2);
var distance = levenshtein(str1, str2);
if (str1.length > str2.length) {
return 1 - distance / str1.length;
} else {
return 1 - distance / str2.length;
}
};
var _nonWordRe = /[^a-zA-Z0-9\u00C0-\u00FF, ]+/g;
var _iterateGrams = function(value, gramSize) {
gramSize = gramSize || 2;
var simplified = '-' + value.toLowerCase().replace(_nonWordRe, '') + '-',
lenDiff = gramSize - simplified.length,
results = [];
if (lenDiff > 0) {
for (var i = 0; i < lenDiff; ++i) {
value += '-';
}
}
for (var i = 0; i < simplified.length - gramSize + 1; ++i) {
results.push(simplified.slice(i, i + gramSize));
}
return results;
};
var _gramCounter = function(value, gramSize) {
// return an object where key=gram, value=number of occurrences
gramSize = gramSize || 2;
var result = {},
grams = _iterateGrams(value, gramSize),
i = 0;
for (i; i < grams.length; ++i) {
if (grams[i] in result) {
result[grams[i]] += 1;
} else {
result[grams[i]] = 1;
}
}
return result;
};
// the main functions
fuzzyset.get = function(value, defaultValue, minMatchScore) {
// check for value in set, returning defaultValue or null if none found
if (minMatchScore === undefined) {
minMatchScore = .33
}
var result = this._get(value, minMatchScore);
if (!result && typeof defaultValue !== 'undefined') {
return defaultValue;
}
return result;
};
fuzzyset._get = function(value, minMatchScore) {
var normalizedValue = this._normalizeStr(value),
result = this.exactSet[normalizedValue];
if (result) {
return [[1, result]];
}
var results = [];
// start with high gram size and if there are no results, go to lower gram sizes
for (var gramSize = this.gramSizeUpper; gramSize >= this.gramSizeLower; --gramSize) {
results = this.__get(value, gramSize, minMatchScore);
if (results && results.length > 0) {
return results;
}
}
return null;
};
fuzzyset.__get = function(value, gramSize, minMatchScore) {
var normalizedValue = this._normalizeStr(value),
matches = {},
gramCounts = _gramCounter(normalizedValue, gramSize),
items = this.items[gramSize],
sumOfSquareGramCounts = 0,
gram,
gramCount,
i,
index,
otherGramCount;
for (gram in gramCounts) {
gramCount = gramCounts[gram];
sumOfSquareGramCounts += Math.pow(gramCount, 2);
if (gram in this.matchDict) {
for (i = 0; i < this.matchDict[gram].length; ++i) {
index = this.matchDict[gram][i][0];
otherGramCount = this.matchDict[gram][i][1];
if (index in matches) {
matches[index] += gramCount * otherGramCount;
} else {
matches[index] = gramCount * otherGramCount;
}
}
}
}
function | (obj) {
for(var prop in obj) {
if(obj.hasOwnProperty(prop))
return false;
}
return true;
}
if (isEmptyObject(matches)) {
return null;
}
var vectorNormal = Math.sqrt(sumOfSquareGramCounts),
results = [],
matchScore;
// build a results list of [score, str]
for (var matchIndex in matches) {
matchScore = matches[matchIndex];
results.push([matchScore / (vectorNormal * items[matchIndex][0]), items[matchIndex][1]]);
}
var sortDescending = function(a, b) {
if (a[0] < b[0]) {
return 1;
} else if (a[0] > b[0]) {
return -1;
} else {
return 0;
}
};
results.sort(sortDescending);
if (this.useLevenshtein) {
var newResults = [],
endIndex = Math.min(50, results.length);
// truncate somewhat arbitrarily to 50
for (var i = 0; i < endIndex; ++i) {
newResults.push([_distance(results[i][1], normalizedValue), results[i][1]]);
}
results = newResults;
results.sort(sortDescending);
}
var newResults = [];
results.forEach(function(scoreWordPair) {
if (scoreWordPair[0] >= minMatchScore) {
newResults.push([scoreWordPair[0], this.exactSet[scoreWordPair[1]]]);
}
}.bind(this))
return newResults;
};
fuzzyset.add = function(value) {
var normalizedValue = this._normalizeStr(value);
if (normalizedValue in this.exactSet) {
return false;
}
var i = this.gramSizeLower;
for (i; i < this.gramSizeUpper + 1; ++i) {
this._add(value, i);
}
};
fuzzyset._add = function(value, gramSize) {
var normalizedValue = this._normalizeStr(value),
items = this.items[gramSize] || [],
index = items.length;
items.push(0);
var gramCounts = _gramCounter(normalizedValue, gramSize),
sumOfSquareGramCounts = 0,
gram, gramCount;
for (gram in gramCounts) {
gramCount = gramCounts[gram];
sumOfSquareGramCounts += Math.pow(gramCount, 2);
if (gram in this.matchDict) {
this.matchDict[gram].push([index, gramCount]);
} else {
this.matchDict[gram] = [[index, gramCount]];
}
}
var vectorNormal = Math.sqrt(sumOfSquareGramCounts);
items[index] = [vectorNormal, normalizedValue];
this.items[gramSize] = items;
this.exactSet[normalizedValue] = value;
};
fuzzyset._normalizeStr = function(str) {
if (Object.prototype.toString.call(str) !== '[object String]') throw 'Must use a string as argument to FuzzySet functions';
return str.toLowerCase();
};
// return length of items in set
fuzzyset.length = function() {
var count = 0,
prop;
for (prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
count += 1;
}
}
return count;
};
// return is set is empty
fuzzyset.isEmpty = function() {
for (var prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
return false;
}
}
return true;
};
// return list of values loaded into set
fuzzyset.values = function() {
var values = [],
prop;
for (prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
values.push(this.exactSet[prop]);
}
}
return values;
};
// initialization
var i = fuzzyset.gramSizeLower;
for (i; i < fuzzyset.gramSizeUpper + 1; ++i) {
fuzzyset.items[i] = [];
}
// add all the items to the set
for (i = 0; i < arr.length; ++i) {
fuzzyset.add(arr[i]);
}
return fuzzyset;
};
var root = this;
// Export the fuzzyset object for **CommonJS**, with backwards-compatibility
// for the old `require()` API. If we're not in CommonJS, add `_` to the
// global object.
if (typeof module !== 'undefined' && module.exports) {
module.exports = FuzzySet;
root.FuzzySet = FuzzySet;
} else {
root.FuzzySet = FuzzySet;
}
})(); | isEmptyObject | identifier_name |
fuzzyset.js | (function() {
var FuzzySet = function(arr, useLevenshtein, gramSizeLower, gramSizeUpper) {
var fuzzyset = {
};
// default options
arr = arr || [];
fuzzyset.gramSizeLower = gramSizeLower || 2;
fuzzyset.gramSizeUpper = gramSizeUpper || 3;
fuzzyset.useLevenshtein = (typeof useLevenshtein !== 'boolean') ? true : useLevenshtein;
// define all the object functions and attributes
fuzzyset.exactSet = {};
fuzzyset.matchDict = {};
fuzzyset.items = {};
// helper functions
var levenshtein = function(str1, str2) {
var current = [], prev, value;
for (var i = 0; i <= str2.length; i++)
for (var j = 0; j <= str1.length; j++) {
if (i && j)
if (str1.charAt(j - 1) === str2.charAt(i - 1))
value = prev;
else
value = Math.min(current[j], current[j - 1], prev) + 1;
else
value = i + j;
prev = current[j];
current[j] = value;
}
return current.pop();
};
// return an edit distance from 0 to 1
var _distance = function(str1, str2) {
if (str1 === null && str2 === null) throw 'Trying to compare two null values';
if (str1 === null || str2 === null) return 0;
str1 = String(str1); str2 = String(str2);
var distance = levenshtein(str1, str2);
if (str1.length > str2.length) {
return 1 - distance / str1.length;
} else {
return 1 - distance / str2.length;
}
};
var _nonWordRe = /[^a-zA-Z0-9\u00C0-\u00FF, ]+/g;
var _iterateGrams = function(value, gramSize) {
gramSize = gramSize || 2;
var simplified = '-' + value.toLowerCase().replace(_nonWordRe, '') + '-',
lenDiff = gramSize - simplified.length,
results = [];
if (lenDiff > 0) {
for (var i = 0; i < lenDiff; ++i) {
value += '-';
}
}
for (var i = 0; i < simplified.length - gramSize + 1; ++i) {
results.push(simplified.slice(i, i + gramSize));
}
return results;
};
var _gramCounter = function(value, gramSize) {
// return an object where key=gram, value=number of occurrences
gramSize = gramSize || 2;
var result = {},
grams = _iterateGrams(value, gramSize),
i = 0;
for (i; i < grams.length; ++i) {
if (grams[i] in result) {
result[grams[i]] += 1;
} else {
result[grams[i]] = 1;
}
}
return result;
};
// the main functions
fuzzyset.get = function(value, defaultValue, minMatchScore) {
// check for value in set, returning defaultValue or null if none found
if (minMatchScore === undefined) {
minMatchScore = .33
}
var result = this._get(value, minMatchScore);
if (!result && typeof defaultValue !== 'undefined') {
return defaultValue;
}
return result;
};
fuzzyset._get = function(value, minMatchScore) {
var normalizedValue = this._normalizeStr(value),
result = this.exactSet[normalizedValue];
if (result) {
return [[1, result]];
}
var results = [];
// start with high gram size and if there are no results, go to lower gram sizes
for (var gramSize = this.gramSizeUpper; gramSize >= this.gramSizeLower; --gramSize) {
results = this.__get(value, gramSize, minMatchScore);
if (results && results.length > 0) {
return results;
}
}
return null;
};
fuzzyset.__get = function(value, gramSize, minMatchScore) {
var normalizedValue = this._normalizeStr(value),
matches = {},
gramCounts = _gramCounter(normalizedValue, gramSize),
items = this.items[gramSize],
sumOfSquareGramCounts = 0,
gram,
gramCount,
i,
index,
otherGramCount;
for (gram in gramCounts) {
gramCount = gramCounts[gram];
sumOfSquareGramCounts += Math.pow(gramCount, 2);
if (gram in this.matchDict) {
for (i = 0; i < this.matchDict[gram].length; ++i) {
index = this.matchDict[gram][i][0];
otherGramCount = this.matchDict[gram][i][1];
if (index in matches) {
matches[index] += gramCount * otherGramCount;
} else {
matches[index] = gramCount * otherGramCount;
}
}
}
}
function isEmptyObject(obj) |
if (isEmptyObject(matches)) {
return null;
}
var vectorNormal = Math.sqrt(sumOfSquareGramCounts),
results = [],
matchScore;
// build a results list of [score, str]
for (var matchIndex in matches) {
matchScore = matches[matchIndex];
results.push([matchScore / (vectorNormal * items[matchIndex][0]), items[matchIndex][1]]);
}
var sortDescending = function(a, b) {
if (a[0] < b[0]) {
return 1;
} else if (a[0] > b[0]) {
return -1;
} else {
return 0;
}
};
results.sort(sortDescending);
if (this.useLevenshtein) {
var newResults = [],
endIndex = Math.min(50, results.length);
// truncate somewhat arbitrarily to 50
for (var i = 0; i < endIndex; ++i) {
newResults.push([_distance(results[i][1], normalizedValue), results[i][1]]);
}
results = newResults;
results.sort(sortDescending);
}
var newResults = [];
results.forEach(function(scoreWordPair) {
if (scoreWordPair[0] >= minMatchScore) {
newResults.push([scoreWordPair[0], this.exactSet[scoreWordPair[1]]]);
}
}.bind(this))
return newResults;
};
fuzzyset.add = function(value) {
var normalizedValue = this._normalizeStr(value);
if (normalizedValue in this.exactSet) {
return false;
}
var i = this.gramSizeLower;
for (i; i < this.gramSizeUpper + 1; ++i) {
this._add(value, i);
}
};
fuzzyset._add = function(value, gramSize) {
var normalizedValue = this._normalizeStr(value),
items = this.items[gramSize] || [],
index = items.length;
items.push(0);
var gramCounts = _gramCounter(normalizedValue, gramSize),
sumOfSquareGramCounts = 0,
gram, gramCount;
for (gram in gramCounts) {
gramCount = gramCounts[gram];
sumOfSquareGramCounts += Math.pow(gramCount, 2);
if (gram in this.matchDict) {
this.matchDict[gram].push([index, gramCount]);
} else {
this.matchDict[gram] = [[index, gramCount]];
}
}
var vectorNormal = Math.sqrt(sumOfSquareGramCounts);
items[index] = [vectorNormal, normalizedValue];
this.items[gramSize] = items;
this.exactSet[normalizedValue] = value;
};
fuzzyset._normalizeStr = function(str) {
if (Object.prototype.toString.call(str) !== '[object String]') throw 'Must use a string as argument to FuzzySet functions';
return str.toLowerCase();
};
// return length of items in set
fuzzyset.length = function() {
var count = 0,
prop;
for (prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
count += 1;
}
}
return count;
};
// return is set is empty
fuzzyset.isEmpty = function() {
for (var prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
return false;
}
}
return true;
};
// return list of values loaded into set
fuzzyset.values = function() {
var values = [],
prop;
for (prop in this.exactSet) {
if (this.exactSet.hasOwnProperty(prop)) {
values.push(this.exactSet[prop]);
}
}
return values;
};
// initialization
var i = fuzzyset.gramSizeLower;
for (i; i < fuzzyset.gramSizeUpper + 1; ++i) {
fuzzyset.items[i] = [];
}
// add all the items to the set
for (i = 0; i < arr.length; ++i) {
fuzzyset.add(arr[i]);
}
return fuzzyset;
};
var root = this;
// Export the fuzzyset object for **CommonJS**, with backwards-compatibility
// for the old `require()` API. If we're not in CommonJS, add `_` to the
// global object.
if (typeof module !== 'undefined' && module.exports) {
module.exports = FuzzySet;
root.FuzzySet = FuzzySet;
} else {
root.FuzzySet = FuzzySet;
}
})(); | {
for(var prop in obj) {
if(obj.hasOwnProperty(prop))
return false;
}
return true;
} | identifier_body |
tableViewConflicts.js | // Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
module.exports = {
'Shows how many conflicts have appeared': function (client) {
const waitTime = client.globals.maxWaitTime;
const newDatabaseName = client.globals.testDatabaseName;
const baseUrl = client.options.launch_url;
client
.populateDatabaseWithConflicts(newDatabaseName)
.checkForDocumentCreated('outfit1')
.loginToGUI()
.url(baseUrl + '/#/database/' + newDatabaseName + '/_all_docs')
.clickWhenVisible('.fonticon-table')
.waitForElementVisible('.table', waitTime, false)
.waitForElementVisible('.table-container-autocomplete', waitTime, false)
.waitForElementVisible('.tableview-conflict', waitTime, false)
.assert.visible('.table [data-conflicts-indicator="true"]') | }
}; |
.end(); | random_line_split |
label_break_value_illegal_uses.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(label_break_value)]
// These are forbidden occurrences of label-break-value
fn labeled_unsafe() {
unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
}
fn labeled_if() {
if true | //~ ERROR expected `{`, found `'b`
}
fn labeled_else() {
if true {} else 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_match() {
match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator
}
pub fn main() {}
| 'b: {} | conditional_block |
label_break_value_illegal_uses.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(label_break_value)]
// These are forbidden occurrences of label-break-value
fn labeled_unsafe() {
unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
}
fn labeled_if() {
if true 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_else() {
if true {} else 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_match() {
match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator
} |
pub fn main() {} | random_line_split | |
label_break_value_illegal_uses.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(label_break_value)]
// These are forbidden occurrences of label-break-value
fn labeled_unsafe() {
unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
}
fn labeled_if() {
if true 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_else() {
if true {} else 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_match() {
match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator
}
pub fn main() | {} | identifier_body | |
label_break_value_illegal_uses.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(label_break_value)]
// These are forbidden occurrences of label-break-value
fn labeled_unsafe() {
unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
}
fn labeled_if() {
if true 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_else() {
if true {} else 'b: {} //~ ERROR expected `{`, found `'b`
}
fn | () {
match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator
}
pub fn main() {}
| labeled_match | identifier_name |
defaults.py | from draftjs_exporter.constants import BLOCK_TYPES, INLINE_STYLES
from draftjs_exporter.dom import DOM
from draftjs_exporter.types import Element, Props
def | (props: Props) -> Element:
"""
Renders the children of a component without any specific
markup for the component itself.
"""
return props["children"]
def code_block(props: Props) -> Element:
return DOM.create_element(
"pre", {}, DOM.create_element("code", {}, props["children"])
)
# Default block map to extend.
BLOCK_MAP = {
BLOCK_TYPES.UNSTYLED: "p",
BLOCK_TYPES.HEADER_ONE: "h1",
BLOCK_TYPES.HEADER_TWO: "h2",
BLOCK_TYPES.HEADER_THREE: "h3",
BLOCK_TYPES.HEADER_FOUR: "h4",
BLOCK_TYPES.HEADER_FIVE: "h5",
BLOCK_TYPES.HEADER_SIX: "h6",
BLOCK_TYPES.UNORDERED_LIST_ITEM: {"element": "li", "wrapper": "ul"},
BLOCK_TYPES.ORDERED_LIST_ITEM: {"element": "li", "wrapper": "ol"},
BLOCK_TYPES.BLOCKQUOTE: "blockquote",
BLOCK_TYPES.PRE: "pre",
BLOCK_TYPES.CODE: code_block,
BLOCK_TYPES.ATOMIC: render_children,
}
# Default style map to extend.
# Tags come from https://developer.mozilla.org/en-US/docs/Web/HTML/Element.
# and are loosely aligned with https://github.com/jpuri/draftjs-to-html.
# Only styles that map to HTML elements are allowed as defaults.
STYLE_MAP = {
INLINE_STYLES.BOLD: "strong",
INLINE_STYLES.CODE: "code",
INLINE_STYLES.ITALIC: "em",
INLINE_STYLES.UNDERLINE: "u",
INLINE_STYLES.STRIKETHROUGH: "s",
INLINE_STYLES.SUPERSCRIPT: "sup",
INLINE_STYLES.SUBSCRIPT: "sub",
INLINE_STYLES.MARK: "mark",
INLINE_STYLES.QUOTATION: "q",
INLINE_STYLES.SMALL: "small",
INLINE_STYLES.SAMPLE: "samp",
INLINE_STYLES.INSERT: "ins",
INLINE_STYLES.DELETE: "del",
INLINE_STYLES.KEYBOARD: "kbd",
}
| render_children | identifier_name |
defaults.py | from draftjs_exporter.constants import BLOCK_TYPES, INLINE_STYLES
from draftjs_exporter.dom import DOM
from draftjs_exporter.types import Element, Props
| """
Renders the children of a component without any specific
markup for the component itself.
"""
return props["children"]
def code_block(props: Props) -> Element:
return DOM.create_element(
"pre", {}, DOM.create_element("code", {}, props["children"])
)
# Default block map to extend.
BLOCK_MAP = {
BLOCK_TYPES.UNSTYLED: "p",
BLOCK_TYPES.HEADER_ONE: "h1",
BLOCK_TYPES.HEADER_TWO: "h2",
BLOCK_TYPES.HEADER_THREE: "h3",
BLOCK_TYPES.HEADER_FOUR: "h4",
BLOCK_TYPES.HEADER_FIVE: "h5",
BLOCK_TYPES.HEADER_SIX: "h6",
BLOCK_TYPES.UNORDERED_LIST_ITEM: {"element": "li", "wrapper": "ul"},
BLOCK_TYPES.ORDERED_LIST_ITEM: {"element": "li", "wrapper": "ol"},
BLOCK_TYPES.BLOCKQUOTE: "blockquote",
BLOCK_TYPES.PRE: "pre",
BLOCK_TYPES.CODE: code_block,
BLOCK_TYPES.ATOMIC: render_children,
}
# Default style map to extend.
# Tags come from https://developer.mozilla.org/en-US/docs/Web/HTML/Element.
# and are loosely aligned with https://github.com/jpuri/draftjs-to-html.
# Only styles that map to HTML elements are allowed as defaults.
STYLE_MAP = {
INLINE_STYLES.BOLD: "strong",
INLINE_STYLES.CODE: "code",
INLINE_STYLES.ITALIC: "em",
INLINE_STYLES.UNDERLINE: "u",
INLINE_STYLES.STRIKETHROUGH: "s",
INLINE_STYLES.SUPERSCRIPT: "sup",
INLINE_STYLES.SUBSCRIPT: "sub",
INLINE_STYLES.MARK: "mark",
INLINE_STYLES.QUOTATION: "q",
INLINE_STYLES.SMALL: "small",
INLINE_STYLES.SAMPLE: "samp",
INLINE_STYLES.INSERT: "ins",
INLINE_STYLES.DELETE: "del",
INLINE_STYLES.KEYBOARD: "kbd",
} | def render_children(props: Props) -> Element: | random_line_split |
defaults.py | from draftjs_exporter.constants import BLOCK_TYPES, INLINE_STYLES
from draftjs_exporter.dom import DOM
from draftjs_exporter.types import Element, Props
def render_children(props: Props) -> Element:
"""
Renders the children of a component without any specific
markup for the component itself.
"""
return props["children"]
def code_block(props: Props) -> Element:
|
# Default block map to extend.
BLOCK_MAP = {
BLOCK_TYPES.UNSTYLED: "p",
BLOCK_TYPES.HEADER_ONE: "h1",
BLOCK_TYPES.HEADER_TWO: "h2",
BLOCK_TYPES.HEADER_THREE: "h3",
BLOCK_TYPES.HEADER_FOUR: "h4",
BLOCK_TYPES.HEADER_FIVE: "h5",
BLOCK_TYPES.HEADER_SIX: "h6",
BLOCK_TYPES.UNORDERED_LIST_ITEM: {"element": "li", "wrapper": "ul"},
BLOCK_TYPES.ORDERED_LIST_ITEM: {"element": "li", "wrapper": "ol"},
BLOCK_TYPES.BLOCKQUOTE: "blockquote",
BLOCK_TYPES.PRE: "pre",
BLOCK_TYPES.CODE: code_block,
BLOCK_TYPES.ATOMIC: render_children,
}
# Default style map to extend.
# Tags come from https://developer.mozilla.org/en-US/docs/Web/HTML/Element.
# and are loosely aligned with https://github.com/jpuri/draftjs-to-html.
# Only styles that map to HTML elements are allowed as defaults.
STYLE_MAP = {
INLINE_STYLES.BOLD: "strong",
INLINE_STYLES.CODE: "code",
INLINE_STYLES.ITALIC: "em",
INLINE_STYLES.UNDERLINE: "u",
INLINE_STYLES.STRIKETHROUGH: "s",
INLINE_STYLES.SUPERSCRIPT: "sup",
INLINE_STYLES.SUBSCRIPT: "sub",
INLINE_STYLES.MARK: "mark",
INLINE_STYLES.QUOTATION: "q",
INLINE_STYLES.SMALL: "small",
INLINE_STYLES.SAMPLE: "samp",
INLINE_STYLES.INSERT: "ins",
INLINE_STYLES.DELETE: "del",
INLINE_STYLES.KEYBOARD: "kbd",
}
| return DOM.create_element(
"pre", {}, DOM.create_element("code", {}, props["children"])
) | identifier_body |
TwitchSource.py | """News source to send a notification whenever a twitch streamer goes live."""
import datetime
import logging
import discord
from dateutil import parser
from .AbstractSources import DataBasedSource
DOZER_LOGGER = logging.getLogger('dozer')
class TwitchSource(DataBasedSource):
"""News source to send a notification whenever a twitch streamer goes live."""
full_name = "Twitch"
short_name = "twitch"
base_url = "https://twitch.tv"
description = "Makes a post whenever a specified user goes live on Twitch"
token_url = "https://id.twitch.tv/oauth2/token"
api_url = "https://api.twitch.tv/helix"
color = discord.Color.from_rgb(145, 70, 255)
class TwitchUser(DataBasedSource.DataPoint):
"""A helper class to represent a single Twitch streamer"""
def __init__(self, user_id, display_name, profile_image_url, login):
super().__init__(login, display_name)
self.user_id = user_id
self.display_name = display_name
self.profile_image_url = profile_image_url
self.login = login
def __init__(self, aiohttp_session, bot):
super().__init__(aiohttp_session, bot)
self.access_token = None
self.client_id = None
self.expiry_time = None
self.users = {}
self.seen_streams = set()
async def get_token(self):
"""Use OAuth2 to request a new token. If token fails, disable the source."""
client_id = self.bot.config['news']['twitch']['client_id']
self.client_id = client_id
client_secret = self.bot.config['news']['twitch']['client_secret']
params = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials'
}
response = await self.http_session.post(self.token_url, params=params)
response = await response.json()
try:
self.access_token = response['access_token']
except KeyError:
DOZER_LOGGER.critical(f"Error in {self.full_name} Token Get: {response['message']}")
self.disabled = True
return
expiry_seconds = response['expires_in']
time_delta = datetime.timedelta(seconds=expiry_seconds)
self.expiry_time = datetime.datetime.now() + time_delta
async def request(self, url, *args, headers=None, **kwargs):
"""Make a OAuth2 verified request to a API Endpoint"""
if headers is None:
headers = {'Authorization': f"Bearer {self.access_token}",
"Client-ID": self.client_id}
else:
headers['Authorization'] = f"Bearer {self.access_token}"
url = f"{self.api_url}/{url}"
response = await self.http_session.get(url, headers=headers, *args, **kwargs)
if response.status == 401:
if 'WWW-Authenticate' in response.headers:
DOZER_LOGGER.info("Twitch token expired when request made, request new token and retrying.")
await self.get_token()
return await self.request(url, headers=headers, *args, **kwargs)
json = await response.json()
return json
async def first_run(self, data=None):
"""Make sure we have a token, then verify and add all the current users in the DB"""
await self.get_token()
if not data:
return
params = []
for login in data:
params.append(('login', login))
json = await self.request("users", params=params)
for user in json['data']:
user_obj = TwitchSource.TwitchUser(user['id'], user['display_name'], user['profile_image_url'],
user['login'])
self.users[user['id']] = user_obj
async def clean_data(self, text):
"""Request user data from Twitch to verify the username exists and clean the data"""
try:
user_obj = self.users[text]
except KeyError:
json = await self.request('users', params={'login': text})
if len(json['data']) == 0:
raise DataBasedSource.InvalidDataException("No user with that login name found")
elif len(json['data']) > 1:
raise DataBasedSource.InvalidDataException("More than one user with that login name found")
user_obj = TwitchSource.TwitchUser(json['data'][0]['id'], json['data'][0]['display_name'],
json['data'][0]['profile_image_url'], json['data'][0]['login'])
return user_obj
async def add_data(self, obj):
"""Add the user object to the store"""
self.users[obj.user_id] = obj
return True
async def remove_data(self, obj):
"""Remove the user object from the store"""
try:
del self.users[obj.user_id]
return True
except KeyError:
return False
async def get_new_posts(self):
"""Assemble all the current user IDs, get any game names and return the embeds and strings"""
if datetime.datetime.now() > self.expiry_time:
DOZER_LOGGER.info("Refreshing Twitch token due to expiry time")
await self.get_token()
params = []
for user in self.users.values():
params.append(('user_id', user.user_id))
params.append(('first', len(self.users)))
json = await self.request("streams", params=params)
if len(json['data']) == 0:
|
# streams endpoint only returns game ID, do a second request to get game names
game_ids = []
for stream in json['data']:
game_ids.append(stream['game_id'])
params = []
for game in game_ids:
params.append(('id', game))
games_json = await self.request("games", params=params)
games = {}
for game in games_json['data']:
games[game['id']] = game['name']
posts = {}
for stream in json['data']:
if stream['id'] not in self.seen_streams:
embed = self.generate_embed(stream, games)
plain = self.generate_plain_text(stream, games)
posts[stream['user_name']] = {
'embed': [embed],
'plain': [plain]
}
self.seen_streams.add(stream['id'])
return posts
def generate_embed(self, data, games):
"""Given data on a stream and a dict of games, assemble an embed"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
embed = discord.Embed()
embed.title = f"{display_name} is now live on Twitch!"
embed.colour = self.color
embed.description = data['title']
embed.url = f"https://www.twitch.tv/{data['user_name']}"
embed.add_field(name="Playing", value=games[data['game_id']], inline=True)
embed.add_field(name="Watching", value=data['viewer_count'], inline=True)
embed.set_author(name=display_name, url=embed.url, icon_url=self.users[data['user_id']].profile_image_url)
embed.set_image(url=data['thumbnail_url'].format(width=1920, height=1080))
start_time = parser.isoparse(data['started_at'])
embed.timestamp = start_time
return embed
def generate_plain_text(self, data, games):
"""Given data on a stream and a dict of games, assemble a string"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
return f"{display_name} is now live on Twitch!\n" \
f"Playing {games[data['game_id']]} with {data['viewer_count']} currently watching\n" \
f"Watch at https://www.twitch.tv/{data['user_name']}"
| return {} | conditional_block |
TwitchSource.py | """News source to send a notification whenever a twitch streamer goes live."""
import datetime
import logging
import discord
from dateutil import parser
from .AbstractSources import DataBasedSource
DOZER_LOGGER = logging.getLogger('dozer')
class TwitchSource(DataBasedSource):
"""News source to send a notification whenever a twitch streamer goes live."""
full_name = "Twitch"
short_name = "twitch"
base_url = "https://twitch.tv"
description = "Makes a post whenever a specified user goes live on Twitch"
token_url = "https://id.twitch.tv/oauth2/token"
api_url = "https://api.twitch.tv/helix"
color = discord.Color.from_rgb(145, 70, 255)
class TwitchUser(DataBasedSource.DataPoint):
"""A helper class to represent a single Twitch streamer"""
def __init__(self, user_id, display_name, profile_image_url, login):
super().__init__(login, display_name)
self.user_id = user_id
self.display_name = display_name
self.profile_image_url = profile_image_url
self.login = login
def __init__(self, aiohttp_session, bot):
super().__init__(aiohttp_session, bot)
self.access_token = None
self.client_id = None
self.expiry_time = None
self.users = {}
self.seen_streams = set() | client_id = self.bot.config['news']['twitch']['client_id']
self.client_id = client_id
client_secret = self.bot.config['news']['twitch']['client_secret']
params = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials'
}
response = await self.http_session.post(self.token_url, params=params)
response = await response.json()
try:
self.access_token = response['access_token']
except KeyError:
DOZER_LOGGER.critical(f"Error in {self.full_name} Token Get: {response['message']}")
self.disabled = True
return
expiry_seconds = response['expires_in']
time_delta = datetime.timedelta(seconds=expiry_seconds)
self.expiry_time = datetime.datetime.now() + time_delta
async def request(self, url, *args, headers=None, **kwargs):
"""Make a OAuth2 verified request to a API Endpoint"""
if headers is None:
headers = {'Authorization': f"Bearer {self.access_token}",
"Client-ID": self.client_id}
else:
headers['Authorization'] = f"Bearer {self.access_token}"
url = f"{self.api_url}/{url}"
response = await self.http_session.get(url, headers=headers, *args, **kwargs)
if response.status == 401:
if 'WWW-Authenticate' in response.headers:
DOZER_LOGGER.info("Twitch token expired when request made, request new token and retrying.")
await self.get_token()
return await self.request(url, headers=headers, *args, **kwargs)
json = await response.json()
return json
async def first_run(self, data=None):
"""Make sure we have a token, then verify and add all the current users in the DB"""
await self.get_token()
if not data:
return
params = []
for login in data:
params.append(('login', login))
json = await self.request("users", params=params)
for user in json['data']:
user_obj = TwitchSource.TwitchUser(user['id'], user['display_name'], user['profile_image_url'],
user['login'])
self.users[user['id']] = user_obj
async def clean_data(self, text):
"""Request user data from Twitch to verify the username exists and clean the data"""
try:
user_obj = self.users[text]
except KeyError:
json = await self.request('users', params={'login': text})
if len(json['data']) == 0:
raise DataBasedSource.InvalidDataException("No user with that login name found")
elif len(json['data']) > 1:
raise DataBasedSource.InvalidDataException("More than one user with that login name found")
user_obj = TwitchSource.TwitchUser(json['data'][0]['id'], json['data'][0]['display_name'],
json['data'][0]['profile_image_url'], json['data'][0]['login'])
return user_obj
async def add_data(self, obj):
"""Add the user object to the store"""
self.users[obj.user_id] = obj
return True
async def remove_data(self, obj):
"""Remove the user object from the store"""
try:
del self.users[obj.user_id]
return True
except KeyError:
return False
async def get_new_posts(self):
"""Assemble all the current user IDs, get any game names and return the embeds and strings"""
if datetime.datetime.now() > self.expiry_time:
DOZER_LOGGER.info("Refreshing Twitch token due to expiry time")
await self.get_token()
params = []
for user in self.users.values():
params.append(('user_id', user.user_id))
params.append(('first', len(self.users)))
json = await self.request("streams", params=params)
if len(json['data']) == 0:
return {}
# streams endpoint only returns game ID, do a second request to get game names
game_ids = []
for stream in json['data']:
game_ids.append(stream['game_id'])
params = []
for game in game_ids:
params.append(('id', game))
games_json = await self.request("games", params=params)
games = {}
for game in games_json['data']:
games[game['id']] = game['name']
posts = {}
for stream in json['data']:
if stream['id'] not in self.seen_streams:
embed = self.generate_embed(stream, games)
plain = self.generate_plain_text(stream, games)
posts[stream['user_name']] = {
'embed': [embed],
'plain': [plain]
}
self.seen_streams.add(stream['id'])
return posts
def generate_embed(self, data, games):
"""Given data on a stream and a dict of games, assemble an embed"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
embed = discord.Embed()
embed.title = f"{display_name} is now live on Twitch!"
embed.colour = self.color
embed.description = data['title']
embed.url = f"https://www.twitch.tv/{data['user_name']}"
embed.add_field(name="Playing", value=games[data['game_id']], inline=True)
embed.add_field(name="Watching", value=data['viewer_count'], inline=True)
embed.set_author(name=display_name, url=embed.url, icon_url=self.users[data['user_id']].profile_image_url)
embed.set_image(url=data['thumbnail_url'].format(width=1920, height=1080))
start_time = parser.isoparse(data['started_at'])
embed.timestamp = start_time
return embed
def generate_plain_text(self, data, games):
"""Given data on a stream and a dict of games, assemble a string"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
return f"{display_name} is now live on Twitch!\n" \
f"Playing {games[data['game_id']]} with {data['viewer_count']} currently watching\n" \
f"Watch at https://www.twitch.tv/{data['user_name']}" |
async def get_token(self):
"""Use OAuth2 to request a new token. If token fails, disable the source.""" | random_line_split |
TwitchSource.py | """News source to send a notification whenever a twitch streamer goes live."""
import datetime
import logging
import discord
from dateutil import parser
from .AbstractSources import DataBasedSource
DOZER_LOGGER = logging.getLogger('dozer')
class TwitchSource(DataBasedSource):
"""News source to send a notification whenever a twitch streamer goes live."""
full_name = "Twitch"
short_name = "twitch"
base_url = "https://twitch.tv"
description = "Makes a post whenever a specified user goes live on Twitch"
token_url = "https://id.twitch.tv/oauth2/token"
api_url = "https://api.twitch.tv/helix"
color = discord.Color.from_rgb(145, 70, 255)
class TwitchUser(DataBasedSource.DataPoint):
"""A helper class to represent a single Twitch streamer"""
def __init__(self, user_id, display_name, profile_image_url, login):
super().__init__(login, display_name)
self.user_id = user_id
self.display_name = display_name
self.profile_image_url = profile_image_url
self.login = login
def __init__(self, aiohttp_session, bot):
super().__init__(aiohttp_session, bot)
self.access_token = None
self.client_id = None
self.expiry_time = None
self.users = {}
self.seen_streams = set()
async def get_token(self):
"""Use OAuth2 to request a new token. If token fails, disable the source."""
client_id = self.bot.config['news']['twitch']['client_id']
self.client_id = client_id
client_secret = self.bot.config['news']['twitch']['client_secret']
params = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials'
}
response = await self.http_session.post(self.token_url, params=params)
response = await response.json()
try:
self.access_token = response['access_token']
except KeyError:
DOZER_LOGGER.critical(f"Error in {self.full_name} Token Get: {response['message']}")
self.disabled = True
return
expiry_seconds = response['expires_in']
time_delta = datetime.timedelta(seconds=expiry_seconds)
self.expiry_time = datetime.datetime.now() + time_delta
async def request(self, url, *args, headers=None, **kwargs):
"""Make a OAuth2 verified request to a API Endpoint"""
if headers is None:
headers = {'Authorization': f"Bearer {self.access_token}",
"Client-ID": self.client_id}
else:
headers['Authorization'] = f"Bearer {self.access_token}"
url = f"{self.api_url}/{url}"
response = await self.http_session.get(url, headers=headers, *args, **kwargs)
if response.status == 401:
if 'WWW-Authenticate' in response.headers:
DOZER_LOGGER.info("Twitch token expired when request made, request new token and retrying.")
await self.get_token()
return await self.request(url, headers=headers, *args, **kwargs)
json = await response.json()
return json
async def first_run(self, data=None):
"""Make sure we have a token, then verify and add all the current users in the DB"""
await self.get_token()
if not data:
return
params = []
for login in data:
params.append(('login', login))
json = await self.request("users", params=params)
for user in json['data']:
user_obj = TwitchSource.TwitchUser(user['id'], user['display_name'], user['profile_image_url'],
user['login'])
self.users[user['id']] = user_obj
async def clean_data(self, text):
"""Request user data from Twitch to verify the username exists and clean the data"""
try:
user_obj = self.users[text]
except KeyError:
json = await self.request('users', params={'login': text})
if len(json['data']) == 0:
raise DataBasedSource.InvalidDataException("No user with that login name found")
elif len(json['data']) > 1:
raise DataBasedSource.InvalidDataException("More than one user with that login name found")
user_obj = TwitchSource.TwitchUser(json['data'][0]['id'], json['data'][0]['display_name'],
json['data'][0]['profile_image_url'], json['data'][0]['login'])
return user_obj
async def add_data(self, obj):
"""Add the user object to the store"""
self.users[obj.user_id] = obj
return True
async def remove_data(self, obj):
"""Remove the user object from the store"""
try:
del self.users[obj.user_id]
return True
except KeyError:
return False
async def | (self):
"""Assemble all the current user IDs, get any game names and return the embeds and strings"""
if datetime.datetime.now() > self.expiry_time:
DOZER_LOGGER.info("Refreshing Twitch token due to expiry time")
await self.get_token()
params = []
for user in self.users.values():
params.append(('user_id', user.user_id))
params.append(('first', len(self.users)))
json = await self.request("streams", params=params)
if len(json['data']) == 0:
return {}
# streams endpoint only returns game ID, do a second request to get game names
game_ids = []
for stream in json['data']:
game_ids.append(stream['game_id'])
params = []
for game in game_ids:
params.append(('id', game))
games_json = await self.request("games", params=params)
games = {}
for game in games_json['data']:
games[game['id']] = game['name']
posts = {}
for stream in json['data']:
if stream['id'] not in self.seen_streams:
embed = self.generate_embed(stream, games)
plain = self.generate_plain_text(stream, games)
posts[stream['user_name']] = {
'embed': [embed],
'plain': [plain]
}
self.seen_streams.add(stream['id'])
return posts
def generate_embed(self, data, games):
"""Given data on a stream and a dict of games, assemble an embed"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
embed = discord.Embed()
embed.title = f"{display_name} is now live on Twitch!"
embed.colour = self.color
embed.description = data['title']
embed.url = f"https://www.twitch.tv/{data['user_name']}"
embed.add_field(name="Playing", value=games[data['game_id']], inline=True)
embed.add_field(name="Watching", value=data['viewer_count'], inline=True)
embed.set_author(name=display_name, url=embed.url, icon_url=self.users[data['user_id']].profile_image_url)
embed.set_image(url=data['thumbnail_url'].format(width=1920, height=1080))
start_time = parser.isoparse(data['started_at'])
embed.timestamp = start_time
return embed
def generate_plain_text(self, data, games):
"""Given data on a stream and a dict of games, assemble a string"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
return f"{display_name} is now live on Twitch!\n" \
f"Playing {games[data['game_id']]} with {data['viewer_count']} currently watching\n" \
f"Watch at https://www.twitch.tv/{data['user_name']}"
| get_new_posts | identifier_name |
TwitchSource.py | """News source to send a notification whenever a twitch streamer goes live."""
import datetime
import logging
import discord
from dateutil import parser
from .AbstractSources import DataBasedSource
DOZER_LOGGER = logging.getLogger('dozer')
class TwitchSource(DataBasedSource):
"""News source to send a notification whenever a twitch streamer goes live."""
full_name = "Twitch"
short_name = "twitch"
base_url = "https://twitch.tv"
description = "Makes a post whenever a specified user goes live on Twitch"
token_url = "https://id.twitch.tv/oauth2/token"
api_url = "https://api.twitch.tv/helix"
color = discord.Color.from_rgb(145, 70, 255)
class TwitchUser(DataBasedSource.DataPoint):
"""A helper class to represent a single Twitch streamer"""
def __init__(self, user_id, display_name, profile_image_url, login):
super().__init__(login, display_name)
self.user_id = user_id
self.display_name = display_name
self.profile_image_url = profile_image_url
self.login = login
def __init__(self, aiohttp_session, bot):
super().__init__(aiohttp_session, bot)
self.access_token = None
self.client_id = None
self.expiry_time = None
self.users = {}
self.seen_streams = set()
async def get_token(self):
"""Use OAuth2 to request a new token. If token fails, disable the source."""
client_id = self.bot.config['news']['twitch']['client_id']
self.client_id = client_id
client_secret = self.bot.config['news']['twitch']['client_secret']
params = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials'
}
response = await self.http_session.post(self.token_url, params=params)
response = await response.json()
try:
self.access_token = response['access_token']
except KeyError:
DOZER_LOGGER.critical(f"Error in {self.full_name} Token Get: {response['message']}")
self.disabled = True
return
expiry_seconds = response['expires_in']
time_delta = datetime.timedelta(seconds=expiry_seconds)
self.expiry_time = datetime.datetime.now() + time_delta
async def request(self, url, *args, headers=None, **kwargs):
"""Make a OAuth2 verified request to a API Endpoint"""
if headers is None:
headers = {'Authorization': f"Bearer {self.access_token}",
"Client-ID": self.client_id}
else:
headers['Authorization'] = f"Bearer {self.access_token}"
url = f"{self.api_url}/{url}"
response = await self.http_session.get(url, headers=headers, *args, **kwargs)
if response.status == 401:
if 'WWW-Authenticate' in response.headers:
DOZER_LOGGER.info("Twitch token expired when request made, request new token and retrying.")
await self.get_token()
return await self.request(url, headers=headers, *args, **kwargs)
json = await response.json()
return json
async def first_run(self, data=None):
|
async def clean_data(self, text):
"""Request user data from Twitch to verify the username exists and clean the data"""
try:
user_obj = self.users[text]
except KeyError:
json = await self.request('users', params={'login': text})
if len(json['data']) == 0:
raise DataBasedSource.InvalidDataException("No user with that login name found")
elif len(json['data']) > 1:
raise DataBasedSource.InvalidDataException("More than one user with that login name found")
user_obj = TwitchSource.TwitchUser(json['data'][0]['id'], json['data'][0]['display_name'],
json['data'][0]['profile_image_url'], json['data'][0]['login'])
return user_obj
async def add_data(self, obj):
"""Add the user object to the store"""
self.users[obj.user_id] = obj
return True
async def remove_data(self, obj):
"""Remove the user object from the store"""
try:
del self.users[obj.user_id]
return True
except KeyError:
return False
async def get_new_posts(self):
"""Assemble all the current user IDs, get any game names and return the embeds and strings"""
if datetime.datetime.now() > self.expiry_time:
DOZER_LOGGER.info("Refreshing Twitch token due to expiry time")
await self.get_token()
params = []
for user in self.users.values():
params.append(('user_id', user.user_id))
params.append(('first', len(self.users)))
json = await self.request("streams", params=params)
if len(json['data']) == 0:
return {}
# streams endpoint only returns game ID, do a second request to get game names
game_ids = []
for stream in json['data']:
game_ids.append(stream['game_id'])
params = []
for game in game_ids:
params.append(('id', game))
games_json = await self.request("games", params=params)
games = {}
for game in games_json['data']:
games[game['id']] = game['name']
posts = {}
for stream in json['data']:
if stream['id'] not in self.seen_streams:
embed = self.generate_embed(stream, games)
plain = self.generate_plain_text(stream, games)
posts[stream['user_name']] = {
'embed': [embed],
'plain': [plain]
}
self.seen_streams.add(stream['id'])
return posts
def generate_embed(self, data, games):
"""Given data on a stream and a dict of games, assemble an embed"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
embed = discord.Embed()
embed.title = f"{display_name} is now live on Twitch!"
embed.colour = self.color
embed.description = data['title']
embed.url = f"https://www.twitch.tv/{data['user_name']}"
embed.add_field(name="Playing", value=games[data['game_id']], inline=True)
embed.add_field(name="Watching", value=data['viewer_count'], inline=True)
embed.set_author(name=display_name, url=embed.url, icon_url=self.users[data['user_id']].profile_image_url)
embed.set_image(url=data['thumbnail_url'].format(width=1920, height=1080))
start_time = parser.isoparse(data['started_at'])
embed.timestamp = start_time
return embed
def generate_plain_text(self, data, games):
"""Given data on a stream and a dict of games, assemble a string"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
return f"{display_name} is now live on Twitch!\n" \
f"Playing {games[data['game_id']]} with {data['viewer_count']} currently watching\n" \
f"Watch at https://www.twitch.tv/{data['user_name']}"
| """Make sure we have a token, then verify and add all the current users in the DB"""
await self.get_token()
if not data:
return
params = []
for login in data:
params.append(('login', login))
json = await self.request("users", params=params)
for user in json['data']:
user_obj = TwitchSource.TwitchUser(user['id'], user['display_name'], user['profile_image_url'],
user['login'])
self.users[user['id']] = user_obj | identifier_body |
server.js | // EXPRESS SERVER HERE //
// BASE SETUP
var express = require('express'),
app = express(),
bodyParser = require('body-parser'),
cookieParser = require('cookie-parser'),
session = require('express-session'),
methodOverride = require('method-override'),
// routes = require('./routes/routes'),
morgan = require('morgan'),
serveStatic = require('serve-static'),
errorHandler = require('errorhandler');
// =========================CONFIGURATION===========================//
// =================================================================//
app.set('port', process.env.PORT || 9001);
/*
* Set to 9001 to not interfere with Gulp 9000.
* If you're using Cloud9, or an IDE that uses a different port, process.env.PORT will
* take care of your problems. You don't need to set a new port.
*/
app.use(serveStatic('app', {'index': 'true'})); // Set to True or False if you want to start on Index or not
app.use('/bower_components', express.static(__dirname + '/bower_components'));
app.use(bodyParser.urlencoded({ extended: true }));
app.use(bodyParser.json());
app.use(methodOverride());
app.use(morgan('dev'));
app.use(cookieParser('secret'));
app.use(session({secret: 'evernote now', resave: true, saveUninitialized: true}));
app.use(function(req, res, next) {
res.locals.session = req.session;
next();
});
if (process.env.NODE_ENV === 'development') {
app.use(errorHandler());
}
// ==========================ROUTER=================================//
// =================================================================//
// ROUTES FOR THE API - RUN IN THE ORDER LISTED
var router = express.Router();
// ------------- ROUTES ---------------- //
// REGISTERING THE ROUTES
app.use('/', router);
// STARTING THE SERVER | exports = module.exports = app; // This is needed otherwise the index.js for routes will not work |
console.log('Serving on port ' + app.get('port') + '. Serving more Nodes than Big Macs!');
app.listen(app.get('port')); // Not used if Gulp is activated - it is bypassed | random_line_split |
server.js | // EXPRESS SERVER HERE //
// BASE SETUP
var express = require('express'),
app = express(),
bodyParser = require('body-parser'),
cookieParser = require('cookie-parser'),
session = require('express-session'),
methodOverride = require('method-override'),
// routes = require('./routes/routes'),
morgan = require('morgan'),
serveStatic = require('serve-static'),
errorHandler = require('errorhandler');
// =========================CONFIGURATION===========================//
// =================================================================//
app.set('port', process.env.PORT || 9001);
/*
* Set to 9001 to not interfere with Gulp 9000.
* If you're using Cloud9, or an IDE that uses a different port, process.env.PORT will
* take care of your problems. You don't need to set a new port.
*/
app.use(serveStatic('app', {'index': 'true'})); // Set to True or False if you want to start on Index or not
app.use('/bower_components', express.static(__dirname + '/bower_components'));
app.use(bodyParser.urlencoded({ extended: true }));
app.use(bodyParser.json());
app.use(methodOverride());
app.use(morgan('dev'));
app.use(cookieParser('secret'));
app.use(session({secret: 'evernote now', resave: true, saveUninitialized: true}));
app.use(function(req, res, next) {
res.locals.session = req.session;
next();
});
if (process.env.NODE_ENV === 'development') |
// ==========================ROUTER=================================//
// =================================================================//
// ROUTES FOR THE API - RUN IN THE ORDER LISTED
var router = express.Router();
// ------------- ROUTES ---------------- //
// REGISTERING THE ROUTES
app.use('/', router);
// STARTING THE SERVER
console.log('Serving on port ' + app.get('port') + '. Serving more Nodes than Big Macs!');
app.listen(app.get('port')); // Not used if Gulp is activated - it is bypassed
exports = module.exports = app; // This is needed otherwise the index.js for routes will not work
| {
app.use(errorHandler());
} | conditional_block |
CalcView.js | /**
* @license
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*/
CLASS({
package: 'foam.apps.calc',
name: 'CalcView',
extends: 'foam.ui.View',
requires: [
'foam.apps.calc.CalcButton',
'foam.apps.calc.CalcSpeechView',
'foam.apps.calc.Fonts',
'foam.apps.calc.HistoryCitationView',
'foam.apps.calc.MainButtonsView',
'foam.apps.calc.NumberFormatter',
'foam.apps.calc.SecondaryButtonsView',
'foam.apps.calc.TertiaryButtonsView',
'foam.ui.SlidePanel',
'foam.ui.animated.Label'
// 'foam.chromeapp.ui.ZoomView'
],
imports: [ 'document' ],
exports: [ 'data' ],
properties: [
{
model_: 'StringProperty',
name: 'row1Formatted',
view: 'foam.ui.animated.Label',
preSet: function(_,nu) {
return this.numberFormatter.i18nNumber(nu);
}
},
{
name: 'data',
postSet: function() {
this.numberFormatter = this.data.numberFormatter;
Events.follow(this.data.row1$, this.row1Formatted$);
}
},
{
name: 'installFonts_',
hidden: true,
factory: function() {
return this.document.head.querySelector('link[rel=stylesheet][href*=RobotoDraft]') ?
'' : this.Fonts.create();
}
},
{
model_: 'IntProperty',
name: 'animating_',
defaultValue: false,
postSet: function(old, nu) {
if ( nu || old === nu || ! this.$ ) return;
// After animations: Set "top" property of inner calc display to prevent
// over-scrolling.
var outerHeight = this.$outer.clientHeight;
var innerHeight = this.$inner.clientHeight;
this.$inner.style.top = innerHeight < outerHeight ?
'calc(100% - ' + innerHeight + 'px)' :
'0px';
}
},
{
name: '$inner',
getter: function() { return this.$.querySelector('.inner-calc-display'); }
},
{
name: '$outer',
getter: function() { return this.$.querySelector('.calc-display'); }
}
],
methods: {
initHTML: function() {
this.SUPER();
this.$parent.addEventListener('paste', this.onPaste);
// This block causes the calc-display to scroll when updated.
// To remove this feature replace the .inner-calc-display 'transition:' and
// 'top:' styles with 'bottom: 0'.
var move = EventService.framed(EventService.framed(function() {
if ( ! this.$ ) return;
var value = DOMValue.create({element: this.$outer, property: 'scrollTop' });
Movement.compile([
function() { ++this.animating_; }.bind(this),
[200, function() { value.value = this.$inner.clientHeight; }.bind(this)],
function() { --this.animating_; }.bind(this)
])();
}.bind(this)));
Events.dynamic(function() { this.data.op; this.data.history; this.data.a1; this.data.a2; }.bind(this), move);
this.X.window.addEventListener('resize', move);
this.$.querySelector('.keypad').addEventListener('mousedown', function(e) { e.preventDefault(); return false; });
}
},
listeners: [
{
name: 'onPaste',
whenIdle: true,
code: function(evt) {
var CMD = { '0': '0', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5', '6': '6', '7': '7', '8': '8', '9': '9', '+': 'plus', '-': 'minus', '*': 'mult', '/': 'div', '%': 'percent', '=': 'equals' };
CMD[this.data.numberFormatter.useComma ? ',' : '.'] = 'point';
var data = evt.clipboardData.getData('text/plain');
for ( var i = 0 ; i < data.length ; i++ ) {
var c = data.charAt(i);
// If history is empty and the first character is '-' then insert a 0 to subtract from
if ( c === '-' && ! i && ! this.data.history.length && ! this.data.row1 ) this.data['0']();
var cmd = CMD[c];
if ( cmd ) this.data[cmd]();
}
}
}
],
templates: [
function CSS() {/*
.CalcView * {
box-sizing: border-box;
outline: none;
}
.CalcView {
-webkit-user-select: none;
-webkit-font-smoothing: antialiased;
font-family: RobotoDraft, 'Helvetica Neue', Helvetica, Arial;
font-size: 30px;
font-weight: 300;
height: 100%; | padding: 0;
width: 100%;
}
.CalcView ::-webkit-scrollbar {
display: none;
}
.CalcView ::-webkit-scrollbar-thumb {
display: none;
}
.calc {
background-color: #eee;
border: 0;
display: flex;
flex-direction: column;
height: 100%;
margin: 0;
padding: 0px;
}
.deg, .rad {
background-color: #eee;
color: #111;
font-size: 22px;
font-weight: 400;
opacity: 0;
padding-left: 8px;
padding-right: 10px;
transition: opacity 0.8s;
}
.active {
opacity: 1;
z-index: 2;
}
.calc-display, .calc-display:focus {
border: none;
letter-spacing: 1px;
line-height: 36px;
margin: 0;
min-width: 140px;
padding: 0 25pt 2pt 25pt;
text-align: right;
-webkit-user-select: text;
overflow-y: scroll;
overflow-x: hidden;
}
.edge {
background: linear-gradient(to bottom, rgba(240,240,240,1) 0%,
rgba(240,240,240,0) 100%);
height: 20px;
position: absolute;
top: 0;
width: 100%;
z-index: 1;
}
.calc .buttons {
flex: 1 1 100%;
width: 100%;
height: 252px;
}
.button-row {
display: flex;
flex-direction: row;
flex-wrap: nowrap;
flex: 1 1 100%;
justify-content: space-between;
}
.button {
flex-grow: 1;
justify-content: center;
display: flex;
align-items: center;
background-color: #4b4b4b;
}
.rhs-ops {
border-left-width: 1px;
border-left-style: solid;
border-left-color: rgb(68, 68, 68);
background: #777;
}
.rhs-ops .button {
background-color: #777;
}
.button-column {
display: flex;
flex-direction: column;
flex-wrap: nowrap;
}
.inner-calc-display {
position: absolute;
right: 20pt;
top: 100%;
width: 100%;
padding-left: 50px;
padding-bottom: 11px;
}
.calc-display {
flex-grow: 5;
position: relative;
}
.secondaryButtons {
padding-left: 30px;
background: rgb(52, 153, 128);
}
.secondaryButtons .button {
background: rgb(52, 153, 128);
}
.tertiaryButtons {
padding-left: 35px;
background: rgb(29, 233, 182);
}
.tertiaryButtons .button {
background: rgb(29, 233, 182);
}
.keypad {
flex-grow: 0;
flex-shrink: 0;
margin-bottom: -4px;
z-index: 5;
}
.alabel {
font-size: 30px;
}
.calc hr {
border-style: outset;
opacity: 0.5;
}
*/},
{
name: 'toHTML',
template: function() {/*
<%= this.CalcSpeechView.create({calc: this.data}) %>
<!-- <%= this.ZoomView.create() %> -->
<% X.registerModel(this.CalcButton, 'foam.ui.ActionButton'); %>
<div id="%%id" class="CalcView">
<div style="position: relative;z-index: 100;">
<div tabindex="1" style="position: absolute;">
<span aria-label="{{{this.data.model_.RAD.label}}}" style="top: 10;left: 0;position: absolute;" id="<%= this.setClass('active', function() { return ! this.data.degreesMode; }) %>" class="rad" title="{{{this.data.model_.RAD.label}}}"></span>
<span aria-label="{{{this.data.model_.DEG.label}}}" style="top: 10;left: 0;position: absolute;" id="<%= this.setClass('active', function() { return this.data.degreesMode; }) %>" class="deg" title="{{{this.data.model_.DEG.label}}}"></span>
</div>
</div>
<div class="edge"></div>
<div class="calc">
<div class="calc-display">
<div class="inner-calc-display">
$$history{ rowView: 'foam.apps.calc.HistoryCitationView' }
<div>$$row1Formatted{mode: 'read-only', tabIndex: 3, escapeHTML: false}</div>
</div>
</div>
<div class="keypad">
<div class="edge2"></div>
<%= this.SlidePanel.create({
data: this.data,
side: 'right',
minWidth: 310,
minPanelWidth: 320,
panelRatio: 0.55,
mainView: 'foam.apps.calc.MainButtonsView',
stripWidth: 30,
panelView: {
factory_: 'foam.ui.SlidePanel',
side: 'right',
stripWidth: 30,
minWidth: 320,
minPanelWidth: 220,
panelRatio: 3/7,
mainView: 'foam.apps.calc.SecondaryButtonsView',
panelView: 'foam.apps.calc.TertiaryButtonsView'
}
}) %>
</div>
</div>
</div>
*/}
}
]
}); | position: fixed;
margin: 0; | random_line_split |
app.js | (function(){
var app = angular.module('gnsApp', ['firebase', 'gns.controllers', 'ui.router'])
app.controller('loadController', function($scope, $firebaseObject){
var storage = firebase.storage();
$scope.storageRef = storage.ref()
$scope.one = []
$scope.empty_alert = false/*
$scope.alert_empty_field = false
$scope.alert_exito = false
$scope.confirm_sobre = false
$scope.sobreescribir = false
$scope.alert_sobre = false*/
$scope.main_menu = 'herbario'
$scope.main_menu_open = false
$scope.done = false
var rootRef = firebase.database().ref();
$scope.database = $firebaseObject(rootRef)
$scope.lista_especies = []
$scope.metadatos = {especie:[], genero:[], familia:[], orden:[], clase:[], division:[]}
$scope.save_database = function(){
$scope.database.$save()
}
$scope.black_screen = function(){
if($scope.main_menu_open || $scope.nueva_planta_abierto || $scope.nueva_seccion_abierto || $scope.planta_abierto){
return true
}else{ return false}
}
$scope.black_screen_click = function(){
$scope.main_menu_open = false
$scope.nueva_planta_abierto = false
$scope.nueva_seccion_abierto = false
$scope.planta_abierto = false
}
$scope.empty_alert_accept = function(){
$scope.done = true
$scope.empty_alert = false
}
$scope.select_menu_option = function(opt){
$scope.main_menu = opt
$scope.main_menu_open = false
}
$scope.start_list = function(){
var ordered_list = []
var unordered_list = []
var k=0
for(var e in $scope.database.Basic){
if($scope.database.Basic[e].id!=null) |
}
var j = 0
for(var genero in $scope.metadatos.genero){
for(var i = 0; i<unordered_list.length; i++){
if(unordered_list[i].genero == genero){
ordered_list[j] = unordered_list[i]
j++
}
}
}
$scope.lista_especies = ordered_list
}
var fulfill = function(){
var result = []
for(var n=1; n < 26; n++){
result.push(n)
}
return result
}
$scope.database.$loaded().then(function(){
var date = new Date()
var y = date.getUTCFullYear() + ""
var m = (date.getUTCMonth() + 1) + ""
var d = date.getUTCDate() + ""
$scope.today = d + "/" + m + "/" + y
if($scope.database['Basic'] == null){
$scope.empty_alert = true
$scope.database['Basic'] = {no_especies: 0}
}else{
$scope.metadatos = $scope.database.metadatos
$scope.done = true
$scope.start_list()
}
if($scope.storageRef.child('Basic') == null){
$scope.storageRef.child('Basic') = {}
}
/*if($scope.database.jardin_principal == null){
$scope.jardin.nombre = 'Jardín de lo Invisible'
$scope.jardin.fecha_de_creacion= $scope.today
$scope.jardin.no_secciones= 0
}else{
$scope.jardin = $scope.database.jardin_principal
}*/
$scope.first_especie = $scope.lista_especies[0]
$scope.one=fulfill()
})
})
app.config(function($stateProvider, $urlRouterProvider) {
$urlRouterProvider.otherwise('/herbario');
$stateProvider
.state('herbario', {
name: 'herbario',
url: '/herbario',
controller: 'herbarioController',
templateUrl: "components/herbario/view.html"
})
.state('agregar_especie', {
name: 'agregar_especie',
controller: 'agregarController',
url: '/agregar_especie',
templateUrl: "components/agregar_especie/view.html"
})
.state('jardin', {
name: 'jardin',
controller: 'jardinController',
url: '/jardin',
templateUrl: "components/jardin/view.html"
})
.state('manejar_datos', {
name: 'manejar_datos',
controller: 'manejarController',
url: '/manejar_datos',
templateUrl: "components/manejar_datos/view.html"
})
.state('configuracion_perfil', {
name: 'configuracion_perfil',
controller: 'configuraciondeperfilController',
url: '/configuracion_perfil',
templateUrl: "components/configuracion_perfil/view.html"
})
.state('calendar', {
name: 'calendar',
controller: 'calendarController',
url: '/calendar',
templateUrl: "components/calendar/view.html"
})
.state('tourist', {
name: 'tourist',
controller: 'touristController',
url: '/tourist',
templateUrl: "components/tourist/view.html"
})
})
})(); | {
unordered_list[k]= $scope.database.Basic[e]
k++
} | conditional_block |
app.js | (function(){
var app = angular.module('gnsApp', ['firebase', 'gns.controllers', 'ui.router'])
app.controller('loadController', function($scope, $firebaseObject){
var storage = firebase.storage();
$scope.storageRef = storage.ref()
$scope.one = []
$scope.empty_alert = false/*
$scope.alert_empty_field = false
$scope.alert_exito = false
$scope.confirm_sobre = false
$scope.sobreescribir = false
$scope.alert_sobre = false*/
$scope.main_menu = 'herbario'
$scope.main_menu_open = false
$scope.done = false
var rootRef = firebase.database().ref();
$scope.database = $firebaseObject(rootRef)
$scope.lista_especies = []
$scope.metadatos = {especie:[], genero:[], familia:[], orden:[], clase:[], division:[]}
$scope.save_database = function(){
$scope.database.$save()
}
$scope.black_screen = function(){
if($scope.main_menu_open || $scope.nueva_planta_abierto || $scope.nueva_seccion_abierto || $scope.planta_abierto){
return true
}else{ return false}
}
$scope.black_screen_click = function(){
$scope.main_menu_open = false
$scope.nueva_planta_abierto = false
$scope.nueva_seccion_abierto = false
$scope.planta_abierto = false
}
$scope.empty_alert_accept = function(){
$scope.done = true
$scope.empty_alert = false
}
$scope.select_menu_option = function(opt){
$scope.main_menu = opt
$scope.main_menu_open = false
}
$scope.start_list = function(){
var ordered_list = []
var unordered_list = []
var k=0
for(var e in $scope.database.Basic){
if($scope.database.Basic[e].id!=null){
unordered_list[k]= $scope.database.Basic[e]
k++
}
}
var j = 0
for(var genero in $scope.metadatos.genero){
for(var i = 0; i<unordered_list.length; i++){
if(unordered_list[i].genero == genero){
ordered_list[j] = unordered_list[i]
j++
}
}
}
$scope.lista_especies = ordered_list
}
var fulfill = function(){
var result = []
for(var n=1; n < 26; n++){
result.push(n)
}
return result
}
$scope.database.$loaded().then(function(){
var date = new Date()
var y = date.getUTCFullYear() + ""
var m = (date.getUTCMonth() + 1) + ""
var d = date.getUTCDate() + ""
$scope.today = d + "/" + m + "/" + y
if($scope.database['Basic'] == null){
$scope.empty_alert = true
$scope.database['Basic'] = {no_especies: 0}
}else{
$scope.metadatos = $scope.database.metadatos
$scope.done = true
$scope.start_list()
}
if($scope.storageRef.child('Basic') == null){
$scope.storageRef.child('Basic') = {}
}
/*if($scope.database.jardin_principal == null){
$scope.jardin.nombre = 'Jardín de lo Invisible'
$scope.jardin.fecha_de_creacion= $scope.today
$scope.jardin.no_secciones= 0
}else{
$scope.jardin = $scope.database.jardin_principal
}*/
$scope.first_especie = $scope.lista_especies[0]
$scope.one=fulfill()
})
})
app.config(function($stateProvider, $urlRouterProvider) {
$urlRouterProvider.otherwise('/herbario');
$stateProvider
.state('herbario', {
name: 'herbario',
url: '/herbario',
controller: 'herbarioController',
templateUrl: "components/herbario/view.html"
})
.state('agregar_especie', {
name: 'agregar_especie',
controller: 'agregarController',
url: '/agregar_especie',
templateUrl: "components/agregar_especie/view.html"
})
.state('jardin', {
name: 'jardin',
controller: 'jardinController',
url: '/jardin',
templateUrl: "components/jardin/view.html"
})
.state('manejar_datos', {
name: 'manejar_datos',
controller: 'manejarController',
url: '/manejar_datos',
templateUrl: "components/manejar_datos/view.html"
})
.state('configuracion_perfil', {
name: 'configuracion_perfil',
controller: 'configuraciondeperfilController', | url: '/configuracion_perfil',
templateUrl: "components/configuracion_perfil/view.html"
})
.state('calendar', {
name: 'calendar',
controller: 'calendarController',
url: '/calendar',
templateUrl: "components/calendar/view.html"
})
.state('tourist', {
name: 'tourist',
controller: 'touristController',
url: '/tourist',
templateUrl: "components/tourist/view.html"
})
})
})(); | random_line_split | |
setup.py | """Device tracker helpers."""
import asyncio
from typing import Dict, Any, Callable, Optional
from types import ModuleType
import attr
from homeassistant.core import callback
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.helpers import config_per_platform
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import dt as dt_util
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from .const import (
DOMAIN,
PLATFORM_TYPE_LEGACY,
CONF_SCAN_INTERVAL,
SCAN_INTERVAL,
SOURCE_TYPE_ROUTER,
LOGGER,
)
@attr.s
class DeviceTrackerPlatform:
"""Class to hold platform information."""
LEGACY_SETUP = (
"async_get_scanner",
"get_scanner",
"async_setup_scanner",
"setup_scanner",
)
name = attr.ib(type=str)
platform = attr.ib(type=ModuleType)
config = attr.ib(type=Dict)
@property
def type(self):
"""Return platform type."""
for methods, platform_type in ((self.LEGACY_SETUP, PLATFORM_TYPE_LEGACY),):
for meth in methods:
if hasattr(self.platform, meth):
return platform_type
return None
async def async_setup_legacy(self, hass, tracker, discovery_info=None):
"""Set up a legacy platform."""
LOGGER.info("Setting up %s.%s", DOMAIN, self.type)
try:
scanner = None
setup = None
if hasattr(self.platform, "async_get_scanner"):
scanner = await self.platform.async_get_scanner(
hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "get_scanner"):
scanner = await hass.async_add_job(
self.platform.get_scanner, hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "async_setup_scanner"):
setup = await self.platform.async_setup_scanner(
hass, self.config, tracker.async_see, discovery_info
)
elif hasattr(self.platform, "setup_scanner"):
setup = await hass.async_add_job(
self.platform.setup_scanner,
hass,
self.config,
tracker.see,
discovery_info,
)
else:
raise HomeAssistantError("Invalid legacy device_tracker platform.")
if scanner:
async_setup_scanner_platform(
hass, self.config, scanner, tracker.async_see, self.type
)
return
if not setup:
LOGGER.error("Error setting up platform %s", self.type)
return
except Exception: # pylint: disable=broad-except
LOGGER.exception("Error setting up platform %s", self.type)
async def async_extract_config(hass, config):
|
async def async_create_platform_type(
hass, config, p_type, p_config
) -> Optional[DeviceTrackerPlatform]:
"""Determine type of platform."""
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
return None
return DeviceTrackerPlatform(p_type, platform, p_config)
@callback
def async_setup_scanner_platform(
hass: HomeAssistantType,
config: ConfigType,
scanner: Any,
async_see_device: Callable,
platform: str,
):
"""Set up the connect scanner-based platform to device tracker.
This method must be run in the event loop.
"""
interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
update_lock = asyncio.Lock()
scanner.hass = hass
# Initial scan of each mac we also tell about host name for config
seen: Any = set()
async def async_device_tracker_scan(now: dt_util.dt.datetime):
"""Handle interval matches."""
if update_lock.locked():
LOGGER.warning(
"Updating device list from %s took longer than the scheduled "
"scan interval %s",
platform,
interval,
)
return
async with update_lock:
found_devices = await scanner.async_scan_devices()
for mac in found_devices:
if mac in seen:
host_name = None
else:
host_name = await scanner.async_get_device_name(mac)
seen.add(mac)
try:
extra_attributes = await scanner.async_get_extra_attributes(mac)
except NotImplementedError:
extra_attributes = dict()
kwargs = {
"mac": mac,
"host_name": host_name,
"source_type": SOURCE_TYPE_ROUTER,
"attributes": {
"scanner": scanner.__class__.__name__,
**extra_attributes,
},
}
zone_home = hass.states.get(hass.components.zone.ENTITY_ID_HOME)
if zone_home:
kwargs["gps"] = [
zone_home.attributes[ATTR_LATITUDE],
zone_home.attributes[ATTR_LONGITUDE],
]
kwargs["gps_accuracy"] = 0
hass.async_create_task(async_see_device(**kwargs))
async_track_time_interval(hass, async_device_tracker_scan, interval)
hass.async_create_task(async_device_tracker_scan(None))
| """Extract device tracker config and split between legacy and modern."""
legacy = []
for platform in await asyncio.gather(
*(
async_create_platform_type(hass, config, p_type, p_config)
for p_type, p_config in config_per_platform(config, DOMAIN)
)
):
if platform is None:
continue
if platform.type == PLATFORM_TYPE_LEGACY:
legacy.append(platform)
else:
raise ValueError(
"Unable to determine type for {}: {}".format(
platform.name, platform.type
)
)
return legacy | identifier_body |
setup.py | """Device tracker helpers."""
import asyncio
from typing import Dict, Any, Callable, Optional
from types import ModuleType
import attr
from homeassistant.core import callback
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.helpers import config_per_platform
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import dt as dt_util
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from .const import (
DOMAIN,
PLATFORM_TYPE_LEGACY,
CONF_SCAN_INTERVAL,
SCAN_INTERVAL,
SOURCE_TYPE_ROUTER,
LOGGER,
)
@attr.s
class | :
"""Class to hold platform information."""
LEGACY_SETUP = (
"async_get_scanner",
"get_scanner",
"async_setup_scanner",
"setup_scanner",
)
name = attr.ib(type=str)
platform = attr.ib(type=ModuleType)
config = attr.ib(type=Dict)
@property
def type(self):
"""Return platform type."""
for methods, platform_type in ((self.LEGACY_SETUP, PLATFORM_TYPE_LEGACY),):
for meth in methods:
if hasattr(self.platform, meth):
return platform_type
return None
async def async_setup_legacy(self, hass, tracker, discovery_info=None):
"""Set up a legacy platform."""
LOGGER.info("Setting up %s.%s", DOMAIN, self.type)
try:
scanner = None
setup = None
if hasattr(self.platform, "async_get_scanner"):
scanner = await self.platform.async_get_scanner(
hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "get_scanner"):
scanner = await hass.async_add_job(
self.platform.get_scanner, hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "async_setup_scanner"):
setup = await self.platform.async_setup_scanner(
hass, self.config, tracker.async_see, discovery_info
)
elif hasattr(self.platform, "setup_scanner"):
setup = await hass.async_add_job(
self.platform.setup_scanner,
hass,
self.config,
tracker.see,
discovery_info,
)
else:
raise HomeAssistantError("Invalid legacy device_tracker platform.")
if scanner:
async_setup_scanner_platform(
hass, self.config, scanner, tracker.async_see, self.type
)
return
if not setup:
LOGGER.error("Error setting up platform %s", self.type)
return
except Exception: # pylint: disable=broad-except
LOGGER.exception("Error setting up platform %s", self.type)
async def async_extract_config(hass, config):
"""Extract device tracker config and split between legacy and modern."""
legacy = []
for platform in await asyncio.gather(
*(
async_create_platform_type(hass, config, p_type, p_config)
for p_type, p_config in config_per_platform(config, DOMAIN)
)
):
if platform is None:
continue
if platform.type == PLATFORM_TYPE_LEGACY:
legacy.append(platform)
else:
raise ValueError(
"Unable to determine type for {}: {}".format(
platform.name, platform.type
)
)
return legacy
async def async_create_platform_type(
hass, config, p_type, p_config
) -> Optional[DeviceTrackerPlatform]:
"""Determine type of platform."""
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
return None
return DeviceTrackerPlatform(p_type, platform, p_config)
@callback
def async_setup_scanner_platform(
hass: HomeAssistantType,
config: ConfigType,
scanner: Any,
async_see_device: Callable,
platform: str,
):
"""Set up the connect scanner-based platform to device tracker.
This method must be run in the event loop.
"""
interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
update_lock = asyncio.Lock()
scanner.hass = hass
# Initial scan of each mac we also tell about host name for config
seen: Any = set()
async def async_device_tracker_scan(now: dt_util.dt.datetime):
"""Handle interval matches."""
if update_lock.locked():
LOGGER.warning(
"Updating device list from %s took longer than the scheduled "
"scan interval %s",
platform,
interval,
)
return
async with update_lock:
found_devices = await scanner.async_scan_devices()
for mac in found_devices:
if mac in seen:
host_name = None
else:
host_name = await scanner.async_get_device_name(mac)
seen.add(mac)
try:
extra_attributes = await scanner.async_get_extra_attributes(mac)
except NotImplementedError:
extra_attributes = dict()
kwargs = {
"mac": mac,
"host_name": host_name,
"source_type": SOURCE_TYPE_ROUTER,
"attributes": {
"scanner": scanner.__class__.__name__,
**extra_attributes,
},
}
zone_home = hass.states.get(hass.components.zone.ENTITY_ID_HOME)
if zone_home:
kwargs["gps"] = [
zone_home.attributes[ATTR_LATITUDE],
zone_home.attributes[ATTR_LONGITUDE],
]
kwargs["gps_accuracy"] = 0
hass.async_create_task(async_see_device(**kwargs))
async_track_time_interval(hass, async_device_tracker_scan, interval)
hass.async_create_task(async_device_tracker_scan(None))
| DeviceTrackerPlatform | identifier_name |
setup.py | """Device tracker helpers."""
import asyncio
from typing import Dict, Any, Callable, Optional
from types import ModuleType
import attr
from homeassistant.core import callback
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.helpers import config_per_platform
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import dt as dt_util
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from .const import (
DOMAIN,
PLATFORM_TYPE_LEGACY,
CONF_SCAN_INTERVAL,
SCAN_INTERVAL,
SOURCE_TYPE_ROUTER,
LOGGER,
)
@attr.s
class DeviceTrackerPlatform:
"""Class to hold platform information."""
LEGACY_SETUP = (
"async_get_scanner",
"get_scanner",
"async_setup_scanner",
"setup_scanner",
)
name = attr.ib(type=str)
platform = attr.ib(type=ModuleType)
config = attr.ib(type=Dict)
@property
def type(self):
"""Return platform type."""
for methods, platform_type in ((self.LEGACY_SETUP, PLATFORM_TYPE_LEGACY),):
for meth in methods:
|
return None
async def async_setup_legacy(self, hass, tracker, discovery_info=None):
"""Set up a legacy platform."""
LOGGER.info("Setting up %s.%s", DOMAIN, self.type)
try:
scanner = None
setup = None
if hasattr(self.platform, "async_get_scanner"):
scanner = await self.platform.async_get_scanner(
hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "get_scanner"):
scanner = await hass.async_add_job(
self.platform.get_scanner, hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "async_setup_scanner"):
setup = await self.platform.async_setup_scanner(
hass, self.config, tracker.async_see, discovery_info
)
elif hasattr(self.platform, "setup_scanner"):
setup = await hass.async_add_job(
self.platform.setup_scanner,
hass,
self.config,
tracker.see,
discovery_info,
)
else:
raise HomeAssistantError("Invalid legacy device_tracker platform.")
if scanner:
async_setup_scanner_platform(
hass, self.config, scanner, tracker.async_see, self.type
)
return
if not setup:
LOGGER.error("Error setting up platform %s", self.type)
return
except Exception: # pylint: disable=broad-except
LOGGER.exception("Error setting up platform %s", self.type)
async def async_extract_config(hass, config):
"""Extract device tracker config and split between legacy and modern."""
legacy = []
for platform in await asyncio.gather(
*(
async_create_platform_type(hass, config, p_type, p_config)
for p_type, p_config in config_per_platform(config, DOMAIN)
)
):
if platform is None:
continue
if platform.type == PLATFORM_TYPE_LEGACY:
legacy.append(platform)
else:
raise ValueError(
"Unable to determine type for {}: {}".format(
platform.name, platform.type
)
)
return legacy
async def async_create_platform_type(
hass, config, p_type, p_config
) -> Optional[DeviceTrackerPlatform]:
"""Determine type of platform."""
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
return None
return DeviceTrackerPlatform(p_type, platform, p_config)
@callback
def async_setup_scanner_platform(
hass: HomeAssistantType,
config: ConfigType,
scanner: Any,
async_see_device: Callable,
platform: str,
):
"""Set up the connect scanner-based platform to device tracker.
This method must be run in the event loop.
"""
interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
update_lock = asyncio.Lock()
scanner.hass = hass
# Initial scan of each mac we also tell about host name for config
seen: Any = set()
async def async_device_tracker_scan(now: dt_util.dt.datetime):
"""Handle interval matches."""
if update_lock.locked():
LOGGER.warning(
"Updating device list from %s took longer than the scheduled "
"scan interval %s",
platform,
interval,
)
return
async with update_lock:
found_devices = await scanner.async_scan_devices()
for mac in found_devices:
if mac in seen:
host_name = None
else:
host_name = await scanner.async_get_device_name(mac)
seen.add(mac)
try:
extra_attributes = await scanner.async_get_extra_attributes(mac)
except NotImplementedError:
extra_attributes = dict()
kwargs = {
"mac": mac,
"host_name": host_name,
"source_type": SOURCE_TYPE_ROUTER,
"attributes": {
"scanner": scanner.__class__.__name__,
**extra_attributes,
},
}
zone_home = hass.states.get(hass.components.zone.ENTITY_ID_HOME)
if zone_home:
kwargs["gps"] = [
zone_home.attributes[ATTR_LATITUDE],
zone_home.attributes[ATTR_LONGITUDE],
]
kwargs["gps_accuracy"] = 0
hass.async_create_task(async_see_device(**kwargs))
async_track_time_interval(hass, async_device_tracker_scan, interval)
hass.async_create_task(async_device_tracker_scan(None))
| if hasattr(self.platform, meth):
return platform_type | conditional_block |
setup.py | """Device tracker helpers."""
import asyncio
from typing import Dict, Any, Callable, Optional
from types import ModuleType
import attr
from homeassistant.core import callback
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.helpers import config_per_platform
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import dt as dt_util
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from .const import (
DOMAIN,
PLATFORM_TYPE_LEGACY,
CONF_SCAN_INTERVAL,
SCAN_INTERVAL,
SOURCE_TYPE_ROUTER,
LOGGER,
)
@attr.s
class DeviceTrackerPlatform:
"""Class to hold platform information."""
LEGACY_SETUP = (
"async_get_scanner",
"get_scanner",
"async_setup_scanner",
"setup_scanner",
)
name = attr.ib(type=str)
platform = attr.ib(type=ModuleType)
config = attr.ib(type=Dict)
@property
def type(self):
"""Return platform type."""
for methods, platform_type in ((self.LEGACY_SETUP, PLATFORM_TYPE_LEGACY),):
for meth in methods:
if hasattr(self.platform, meth):
return platform_type
return None
async def async_setup_legacy(self, hass, tracker, discovery_info=None):
"""Set up a legacy platform."""
LOGGER.info("Setting up %s.%s", DOMAIN, self.type)
try:
scanner = None
setup = None
if hasattr(self.platform, "async_get_scanner"):
scanner = await self.platform.async_get_scanner(
hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "get_scanner"):
scanner = await hass.async_add_job(
self.platform.get_scanner, hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "async_setup_scanner"):
setup = await self.platform.async_setup_scanner(
hass, self.config, tracker.async_see, discovery_info
)
elif hasattr(self.platform, "setup_scanner"):
setup = await hass.async_add_job(
self.platform.setup_scanner,
hass,
self.config,
tracker.see,
discovery_info,
)
else:
raise HomeAssistantError("Invalid legacy device_tracker platform.")
if scanner:
async_setup_scanner_platform(
hass, self.config, scanner, tracker.async_see, self.type
)
return
if not setup:
LOGGER.error("Error setting up platform %s", self.type)
return
except Exception: # pylint: disable=broad-except
LOGGER.exception("Error setting up platform %s", self.type)
async def async_extract_config(hass, config):
"""Extract device tracker config and split between legacy and modern."""
legacy = []
for platform in await asyncio.gather(
*(
async_create_platform_type(hass, config, p_type, p_config)
for p_type, p_config in config_per_platform(config, DOMAIN)
)
):
if platform is None:
continue
if platform.type == PLATFORM_TYPE_LEGACY:
legacy.append(platform)
else:
raise ValueError(
"Unable to determine type for {}: {}".format(
platform.name, platform.type
)
)
return legacy
async def async_create_platform_type(
hass, config, p_type, p_config
) -> Optional[DeviceTrackerPlatform]:
"""Determine type of platform."""
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
return None
return DeviceTrackerPlatform(p_type, platform, p_config)
@callback
def async_setup_scanner_platform(
hass: HomeAssistantType,
config: ConfigType,
scanner: Any,
async_see_device: Callable,
platform: str,
):
"""Set up the connect scanner-based platform to device tracker.
This method must be run in the event loop.
"""
interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
update_lock = asyncio.Lock()
scanner.hass = hass
# Initial scan of each mac we also tell about host name for config
seen: Any = set()
async def async_device_tracker_scan(now: dt_util.dt.datetime):
"""Handle interval matches."""
if update_lock.locked():
LOGGER.warning(
"Updating device list from %s took longer than the scheduled "
"scan interval %s",
platform,
interval,
)
return
async with update_lock:
found_devices = await scanner.async_scan_devices()
for mac in found_devices:
if mac in seen:
host_name = None
else:
host_name = await scanner.async_get_device_name(mac)
seen.add(mac)
try:
extra_attributes = await scanner.async_get_extra_attributes(mac)
except NotImplementedError:
extra_attributes = dict()
kwargs = {
"mac": mac,
"host_name": host_name,
"source_type": SOURCE_TYPE_ROUTER,
"attributes": {
"scanner": scanner.__class__.__name__,
**extra_attributes,
},
}
zone_home = hass.states.get(hass.components.zone.ENTITY_ID_HOME)
if zone_home:
kwargs["gps"] = [
zone_home.attributes[ATTR_LATITUDE],
zone_home.attributes[ATTR_LONGITUDE],
]
kwargs["gps_accuracy"] = 0
hass.async_create_task(async_see_device(**kwargs)) | hass.async_create_task(async_device_tracker_scan(None)) |
async_track_time_interval(hass, async_device_tracker_scan, interval) | random_line_split |
initial.rs | use git::Config;
use helpers;
use author::Author;
use author_selection::AuthorSelection;
use xdg::BaseDirectories;
use Result;
use CannotProcede;
pub fn initial() -> Result<Config> {
let xdg_dirs = BaseDirectories::with_prefix("partners")?;
let config_path = xdg_dirs.place_config_file("partners.cfg")?;
if !config_path.exists() {
println!("config file not found at {:?}", config_path);
if helpers::confirm("do you want to create it?")? {
helpers::create_config_file(&config_path)?;
} else {
Err(CannotProcede)?;
}
}
let partners_config = Config::File(config_path);
let author = match Config::Local.current_author() {
Ok(author) => author,
Err(_) => {
println!("It seems like the current git author is not known to partners");
let nick = Config::Local.nick().or_else(|_| {
println!("Please enter a nickname you would like to use");
helpers::query_required("Nick")
})?;
let name = Config::Local.user_name().or_else(|_| {
println!("Unable to determine your name from git configuration");
helpers::query_required("Name")
})?;
let email = Config::Local.user_email().ok().or_else(|| {
println!("Unable to determine your email address from git configuration");
helpers::query_optional("Email").ok().and_then(|v| v)
});
let author = Author { nick: nick, name: name, email: email };
let selection = AuthorSelection::new(&partners_config, vec![author.clone()])?;
Config::Global.set_current_author(&selection)?;
author
}
};
if partners_config.find_author(&author.nick).is_none() |
Ok(partners_config)
} | {
partners_config.add_author(&author)?;
} | conditional_block |
initial.rs | use git::Config;
use helpers;
use author::Author;
use author_selection::AuthorSelection;
use xdg::BaseDirectories;
use Result;
use CannotProcede;
pub fn initial() -> Result<Config> {
let xdg_dirs = BaseDirectories::with_prefix("partners")?;
let config_path = xdg_dirs.place_config_file("partners.cfg")?; | if helpers::confirm("do you want to create it?")? {
helpers::create_config_file(&config_path)?;
} else {
Err(CannotProcede)?;
}
}
let partners_config = Config::File(config_path);
let author = match Config::Local.current_author() {
Ok(author) => author,
Err(_) => {
println!("It seems like the current git author is not known to partners");
let nick = Config::Local.nick().or_else(|_| {
println!("Please enter a nickname you would like to use");
helpers::query_required("Nick")
})?;
let name = Config::Local.user_name().or_else(|_| {
println!("Unable to determine your name from git configuration");
helpers::query_required("Name")
})?;
let email = Config::Local.user_email().ok().or_else(|| {
println!("Unable to determine your email address from git configuration");
helpers::query_optional("Email").ok().and_then(|v| v)
});
let author = Author { nick: nick, name: name, email: email };
let selection = AuthorSelection::new(&partners_config, vec![author.clone()])?;
Config::Global.set_current_author(&selection)?;
author
}
};
if partners_config.find_author(&author.nick).is_none() {
partners_config.add_author(&author)?;
}
Ok(partners_config)
} |
if !config_path.exists() {
println!("config file not found at {:?}", config_path);
| random_line_split |
initial.rs | use git::Config;
use helpers;
use author::Author;
use author_selection::AuthorSelection;
use xdg::BaseDirectories;
use Result;
use CannotProcede;
pub fn initial() -> Result<Config> | {
let xdg_dirs = BaseDirectories::with_prefix("partners")?;
let config_path = xdg_dirs.place_config_file("partners.cfg")?;
if !config_path.exists() {
println!("config file not found at {:?}", config_path);
if helpers::confirm("do you want to create it?")? {
helpers::create_config_file(&config_path)?;
} else {
Err(CannotProcede)?;
}
}
let partners_config = Config::File(config_path);
let author = match Config::Local.current_author() {
Ok(author) => author,
Err(_) => {
println!("It seems like the current git author is not known to partners");
let nick = Config::Local.nick().or_else(|_| {
println!("Please enter a nickname you would like to use");
helpers::query_required("Nick")
})?;
let name = Config::Local.user_name().or_else(|_| {
println!("Unable to determine your name from git configuration");
helpers::query_required("Name")
})?;
let email = Config::Local.user_email().ok().or_else(|| {
println!("Unable to determine your email address from git configuration");
helpers::query_optional("Email").ok().and_then(|v| v)
});
let author = Author { nick: nick, name: name, email: email };
let selection = AuthorSelection::new(&partners_config, vec![author.clone()])?;
Config::Global.set_current_author(&selection)?;
author
}
};
if partners_config.find_author(&author.nick).is_none() {
partners_config.add_author(&author)?;
}
Ok(partners_config)
} | identifier_body | |
initial.rs | use git::Config;
use helpers;
use author::Author;
use author_selection::AuthorSelection;
use xdg::BaseDirectories;
use Result;
use CannotProcede;
pub fn | () -> Result<Config> {
let xdg_dirs = BaseDirectories::with_prefix("partners")?;
let config_path = xdg_dirs.place_config_file("partners.cfg")?;
if !config_path.exists() {
println!("config file not found at {:?}", config_path);
if helpers::confirm("do you want to create it?")? {
helpers::create_config_file(&config_path)?;
} else {
Err(CannotProcede)?;
}
}
let partners_config = Config::File(config_path);
let author = match Config::Local.current_author() {
Ok(author) => author,
Err(_) => {
println!("It seems like the current git author is not known to partners");
let nick = Config::Local.nick().or_else(|_| {
println!("Please enter a nickname you would like to use");
helpers::query_required("Nick")
})?;
let name = Config::Local.user_name().or_else(|_| {
println!("Unable to determine your name from git configuration");
helpers::query_required("Name")
})?;
let email = Config::Local.user_email().ok().or_else(|| {
println!("Unable to determine your email address from git configuration");
helpers::query_optional("Email").ok().and_then(|v| v)
});
let author = Author { nick: nick, name: name, email: email };
let selection = AuthorSelection::new(&partners_config, vec![author.clone()])?;
Config::Global.set_current_author(&selection)?;
author
}
};
if partners_config.find_author(&author.nick).is_none() {
partners_config.add_author(&author)?;
}
Ok(partners_config)
} | initial | identifier_name |
express.js | 'use strict';
var env = require('./env.js'),
express = require('express'),
morgan = require('morgan'),
compression = require('compression'),
bodyParser = require('body-parser'),
methodOverride = require('method-override'),
session = require('express-session'),
passport = require('passport');
module.exports = function () {
var app = express();
if (process.env.NODE_ENV === 'development') {
app.use(morgan('dev'));
} else if (process.env.NODE_ENV === 'development') {
app.use(compression);
}
app.use(bodyParser.urlencoded({
extended: true
}));
app.use(bodyParser.json());
app.use(methodOverride());
app.use(session({
saveUninitialized: true,
resave: true,
secret: env.sessionSecret
}));
app.use(passport.initialize());
app.use(passport.session());
require('../app/route/index.server.route.js')(app);
require('../app/route/user.server.route.js')(app); |
app.use(express.static('./public'));
return app;
}; | require('../app/route/post.server.route.js')(app);
require('../app/route/tag.server.route.js')(app);
require('../app/route/comment.server.route.js')(app); | random_line_split |
mod.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font, font_context};
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font_list, font_template};
|
#[cfg(target_os = "macos")]
pub use crate::platform::macos::{font, font_context, font_list, font_template};
#[cfg(any(target_os = "linux", target_os = "android"))]
mod freetype {
use libc::c_char;
use std::ffi::CStr;
use std::str;
/// Creates a String from the given null-terminated buffer.
/// Panics if the buffer does not contain UTF-8.
unsafe fn c_str_to_string(s: *const c_char) -> String {
str::from_utf8(CStr::from_ptr(s).to_bytes())
.unwrap()
.to_owned()
}
pub mod font;
pub mod font_context;
#[cfg(target_os = "linux")]
pub mod font_list;
#[cfg(target_os = "android")]
mod android {
pub mod font_list;
}
#[cfg(target_os = "android")]
pub use self::android::font_list;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub mod font_template;
}
#[cfg(target_os = "macos")]
mod macos {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
#[cfg(target_os = "windows")]
mod windows {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
} | #[cfg(target_os = "windows")]
pub use crate::platform::windows::{font, font_context, font_list, font_template}; | random_line_split |
mod.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font, font_context};
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font_list, font_template};
#[cfg(target_os = "windows")]
pub use crate::platform::windows::{font, font_context, font_list, font_template};
#[cfg(target_os = "macos")]
pub use crate::platform::macos::{font, font_context, font_list, font_template};
#[cfg(any(target_os = "linux", target_os = "android"))]
mod freetype {
use libc::c_char;
use std::ffi::CStr;
use std::str;
/// Creates a String from the given null-terminated buffer.
/// Panics if the buffer does not contain UTF-8.
unsafe fn c_str_to_string(s: *const c_char) -> String |
pub mod font;
pub mod font_context;
#[cfg(target_os = "linux")]
pub mod font_list;
#[cfg(target_os = "android")]
mod android {
pub mod font_list;
}
#[cfg(target_os = "android")]
pub use self::android::font_list;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub mod font_template;
}
#[cfg(target_os = "macos")]
mod macos {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
#[cfg(target_os = "windows")]
mod windows {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
| {
str::from_utf8(CStr::from_ptr(s).to_bytes())
.unwrap()
.to_owned()
} | identifier_body |
mod.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font, font_context};
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font_list, font_template};
#[cfg(target_os = "windows")]
pub use crate::platform::windows::{font, font_context, font_list, font_template};
#[cfg(target_os = "macos")]
pub use crate::platform::macos::{font, font_context, font_list, font_template};
#[cfg(any(target_os = "linux", target_os = "android"))]
mod freetype {
use libc::c_char;
use std::ffi::CStr;
use std::str;
/// Creates a String from the given null-terminated buffer.
/// Panics if the buffer does not contain UTF-8.
unsafe fn | (s: *const c_char) -> String {
str::from_utf8(CStr::from_ptr(s).to_bytes())
.unwrap()
.to_owned()
}
pub mod font;
pub mod font_context;
#[cfg(target_os = "linux")]
pub mod font_list;
#[cfg(target_os = "android")]
mod android {
pub mod font_list;
}
#[cfg(target_os = "android")]
pub use self::android::font_list;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub mod font_template;
}
#[cfg(target_os = "macos")]
mod macos {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
#[cfg(target_os = "windows")]
mod windows {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
| c_str_to_string | identifier_name |
base.rs | use indexmap::IndexMap;
use crate::{
ast::{Directive, FromInputValue, InputValue, Selection},
executor::{ExecutionResult, Executor, Registry, Variables},
parser::Spanning,
schema::meta::{Argument, MetaType},
value::{DefaultScalarValue, Object, ScalarValue, Value},
FieldResult, GraphQLEnum, IntoFieldError,
};
/// GraphQL type kind
///
/// The GraphQL specification defines a number of type kinds - the meta type\
/// of a type.
#[derive(Clone, Eq, PartialEq, Debug, GraphQLEnum)]
#[graphql(name = "__TypeKind", internal)]
pub enum TypeKind {
/// ## Scalar types
///
/// Scalar types appear as the leaf nodes of GraphQL queries. Strings,\
/// numbers, and booleans are the built in types, and while it's possible\
/// to define your own, it's relatively uncommon.
Scalar,
/// ## Object types
///
/// The most common type to be implemented by users. Objects have fields\
/// and can implement interfaces.
Object,
/// ## Interface types
///
/// Interface types are used to represent overlapping fields between\
/// multiple types, and can be queried for their concrete type.
Interface,
/// ## Union types
///
/// Unions are similar to interfaces but can not contain any fields on\
/// their own.
Union,
/// ## Enum types
///
/// Like scalars, enum types appear as the leaf nodes of GraphQL queries.
Enum,
/// ## Input objects
///
/// Represents complex values provided in queries _into_ the system.
#[graphql(name = "INPUT_OBJECT")]
InputObject,
/// ## List types
///
/// Represent lists of other types. This library provides implementations\
/// for vectors and slices, but other Rust types can be extended to serve\
/// as GraphQL lists.
List,
/// ## Non-null types
///
/// In GraphQL, nullable types are the default. By putting a `!` after a\
/// type, it becomes non-nullable.
#[graphql(name = "NON_NULL")]
NonNull,
}
/// Field argument container
#[derive(Debug)]
pub struct Arguments<'a, S = DefaultScalarValue> {
args: Option<IndexMap<&'a str, InputValue<S>>>,
}
impl<'a, S> Arguments<'a, S> {
#[doc(hidden)]
pub fn new(
mut args: Option<IndexMap<&'a str, InputValue<S>>>,
meta_args: &'a Option<Vec<Argument<S>>>,
) -> Self
where
S: Clone,
{
if meta_args.is_some() && args.is_none() {
args = Some(IndexMap::new());
}
if let (Some(args), Some(meta_args)) = (&mut args, meta_args) {
for arg in meta_args {
let arg_name = arg.name.as_str();
if args.get(arg_name).map_or(true, InputValue::is_null) {
if let Some(val) = arg.default_value.as_ref() {
args.insert(arg_name, val.clone());
}
}
}
}
Self { args }
}
/// Gets an argument by the given `name` and converts it into the desired
/// type.
///
/// If the argument is found, or a default argument has been provided, the
/// given [`InputValue`] will be converted into the type `T`.
///
/// Returns [`None`] if an argument with such `name` is not present.
///
/// # Errors
///
/// If the [`FromInputValue`] conversion fails.
pub fn get<T>(&self, name: &str) -> FieldResult<Option<T>, S>
where
T: FromInputValue<S>,
T::Error: IntoFieldError<S>,
{
self.args
.as_ref()
.and_then(|args| args.get(name))
.map(InputValue::convert)
.transpose()
.map_err(IntoFieldError::into_field_error)
}
}
/// Primary trait used to resolve GraphQL values.
///
/// All the convenience macros ultimately expand into an implementation of this trait for the given
/// type. The macros remove duplicated definitions of fields and arguments, and add type checks on
/// all resolving functions automatically. This can all be done manually too.
///
/// [`GraphQLValue`] provides _some_ convenience methods for you, in the form of optional trait
/// methods. The `type_name` method is mandatory, but other than that, it depends on what type
/// you're exposing:
/// - [Scalars][4], [enums][5], [lists][6] and [non-null wrappers][7] only require `resolve`.
/// - [Interfaces][1] and [objects][3] require `resolve_field` _or_ `resolve` if you want to
/// implement a custom resolution logic (probably not).
/// - [Interfaces][1] and [unions][2] require `resolve_into_type` and `concrete_type_name`.
/// - [Input objects][8] do not require anything.
///
/// # Object safety
///
/// This trait is [object safe][11], therefore may be turned into a [trait object][12] and used for | /// # Example
///
/// This trait is intended to be used in a conjunction with a [`GraphQLType`] trait. See the example
/// in the documentation of a [`GraphQLType`] trait.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
/// [4]: https://spec.graphql.org/June2018/#sec-Scalars
/// [5]: https://spec.graphql.org/June2018/#sec-Enums
/// [6]: https://spec.graphql.org/June2018/#sec-Type-System.List
/// [7]: https://spec.graphql.org/June2018/#sec-Type-System.Non-Null
/// [8]: https://spec.graphql.org/June2018/#sec-Input-Objects
/// [11]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
/// [12]: https://doc.rust-lang.org/reference/types/trait-object.html
pub trait GraphQLValue<S = DefaultScalarValue>
where
S: ScalarValue,
{
/// Context type for this [`GraphQLValue`].
///
/// It's threaded through a query execution to all affected nodes, and can be used to hold
/// common data, e.g. database connections or request session information.
type Context;
/// Type that may carry additional schema information for this [`GraphQLValue`].
///
/// It can be used to implement a schema that is partly dynamic, meaning that it can use
/// information that is not known at compile time, for instance by reading it from a
/// configuration file at startup.
type TypeInfo;
/// Returns name of the [`GraphQLType`] exposed by this [`GraphQLValue`].
///
/// This function will be called multiple times during a query execution. It must _not_ perform
/// any calculation and _always_ return the same value.
///
/// Usually, it should just call a [`GraphQLType::name`] inside.
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str>;
/// Resolves the value of a single field on this [`GraphQLValue`].
///
/// The `arguments` object contains all the specified arguments, with default values being
/// substituted for the ones not provided by the query.
///
/// The `executor` can be used to drive selections into sub-[objects][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_field(
&self,
_info: &Self::TypeInfo,
_field_name: &str,
_arguments: &Arguments<S>,
_executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
panic!("GraphQLValue::resolve_field() must be implemented by objects and interfaces");
}
/// Resolves this [`GraphQLValue`] (being an [interface][1] or an [union][2]) into a concrete
/// downstream [object][3] type.
///
/// Tries to resolve this [`GraphQLValue`] into the provided `type_name`. If the type matches,
/// then passes the instance along to [`Executor::resolve`].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_into_type(
&self,
info: &Self::TypeInfo,
type_name: &str,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if self.type_name(info).unwrap() == type_name {
self.resolve(info, selection_set, executor)
} else {
panic!(
"GraphQLValue::resolve_into_type() must be implemented by unions and interfaces"
);
}
}
/// Returns the concrete [`GraphQLType`] name for this [`GraphQLValue`] being an [interface][1],
/// an [union][2] or an [object][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
#[allow(unused_variables)]
fn concrete_type_name(&self, context: &Self::Context, info: &Self::TypeInfo) -> String {
panic!(
"GraphQLValue::concrete_type_name() must be implemented by unions, interfaces \
and objects",
);
}
/// Resolves the provided `selection_set` against this [`GraphQLValue`].
///
/// For non-[object][3] types, the `selection_set` will be [`None`] and the value should simply
/// be returned.
///
/// For [objects][3], all fields in the `selection_set` should be resolved. The default
/// implementation uses [`GraphQLValue::resolve_field`] to resolve all fields, including those
/// through a fragment expansion.
///
/// Since the [GraphQL spec specifies][0] that errors during field processing should result in
/// a null-value, this might return `Ok(Null)` in case of a failure. Errors are recorded
/// internally.
///
/// # Panics
///
/// The default implementation panics, if `selection_set` is [`None`].
///
/// [0]: https://spec.graphql.org/June2018/#sec-Errors-and-Non-Nullability
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve(
&self,
info: &Self::TypeInfo,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if let Some(sel) = selection_set {
let mut res = Object::with_capacity(sel.len());
Ok(
if resolve_selection_set_into(self, info, sel, executor, &mut res) {
Value::Object(res)
} else {
Value::null()
},
)
} else {
panic!("GraphQLValue::resolve() must be implemented by non-object output types");
}
}
}
crate::sa::assert_obj_safe!(GraphQLValue<Context = (), TypeInfo = ()>);
/// Helper alias for naming [trait objects][1] of [`GraphQLValue`].
///
/// [1]: https://doc.rust-lang.org/reference/types/trait-object.html
pub type DynGraphQLValue<S, C, TI> =
dyn GraphQLValue<S, Context = C, TypeInfo = TI> + Send + Sync + 'static;
/// Primary trait used to expose Rust types in a GraphQL schema.
///
/// All of the convenience macros ultimately expand into an implementation of
/// this trait for the given type. This can all be done manually.
///
/// # Example
///
/// Manually deriving an [object][3] is straightforward, but tedious. This is the equivalent of the
/// `User` object as shown in the example in the documentation root:
/// ```
/// # use std::collections::HashMap;
/// use juniper::{
/// meta::MetaType, Arguments, Context, DefaultScalarValue, Executor, ExecutionResult,
/// FieldResult, GraphQLType, GraphQLValue, Registry,
/// };
///
/// #[derive(Debug)]
/// struct Database { users: HashMap<String, User> }
/// impl Context for Database {}
///
/// #[derive(Debug)]
/// struct User { id: String, name: String, friend_ids: Vec<String> }
///
/// impl GraphQLType<DefaultScalarValue> for User {
/// fn name(_: &()) -> Option<&'static str> {
/// Some("User")
/// }
///
/// fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> MetaType<'r>
/// where DefaultScalarValue: 'r,
/// {
/// // First, we need to define all fields and their types on this type.
/// //
/// // If we need arguments, want to implement interfaces, or want to add documentation
/// // strings, we can do it here.
/// let fields = &[
/// registry.field::<&String>("id", &()),
/// registry.field::<&String>("name", &()),
/// registry.field::<Vec<&User>>("friends", &()),
/// ];
/// registry.build_object_type::<User>(&(), fields).into_meta()
/// }
/// }
///
/// impl GraphQLValue<DefaultScalarValue> for User {
/// type Context = Database;
/// type TypeInfo = ();
///
/// fn type_name(&self, _: &()) -> Option<&'static str> {
/// <User as GraphQLType>::name(&())
/// }
///
/// fn resolve_field(
/// &self,
/// info: &(),
/// field_name: &str,
/// args: &Arguments,
/// executor: &Executor<Database>
/// ) -> ExecutionResult
/// {
/// // Next, we need to match the queried field name. All arms of this match statement
/// // return `ExecutionResult`, which makes it hard to statically verify that the type you
/// // pass on to `executor.resolve*` actually matches the one that you defined in `meta()`
/// // above.
/// let database = executor.context();
/// match field_name {
/// // Because scalars are defined with another `Context` associated type, you must use
/// // `resolve_with_ctx` here to make the `executor` perform automatic type conversion
/// // of its argument.
/// "id" => executor.resolve_with_ctx(info, &self.id),
/// "name" => executor.resolve_with_ctx(info, &self.name),
///
/// // You pass a vector of `User` objects to `executor.resolve`, and it will determine
/// // which fields of the sub-objects to actually resolve based on the query.
/// // The `executor` instance keeps track of its current position in the query.
/// "friends" => executor.resolve(info,
/// &self.friend_ids.iter()
/// .filter_map(|id| database.users.get(id))
/// .collect::<Vec<_>>()
/// ),
///
/// // We can only reach this panic in two cases: either a mismatch between the defined
/// // schema in `meta()` above, or a validation failed because of a this library bug.
/// //
/// // In either of those two cases, the only reasonable way out is to panic the thread.
/// _ => panic!("Field {} not found on type User", field_name),
/// }
/// }
/// }
/// ```
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
pub trait GraphQLType<S = DefaultScalarValue>: GraphQLValue<S>
where
S: ScalarValue,
{
/// Returns name of this [`GraphQLType`] to expose.
///
/// This function will be called multiple times during schema construction. It must _not_
/// perform any calculation and _always_ return the same value.
fn name(info: &Self::TypeInfo) -> Option<&str>;
/// Returns [`MetaType`] representing this [`GraphQLType`].
fn meta<'r>(info: &Self::TypeInfo, registry: &mut Registry<'r, S>) -> MetaType<'r, S>
where
S: 'r;
}
/// Resolver logic for queries'/mutations' selection set.
/// Calls appropriate resolver method for each field or fragment found
/// and then merges returned values into `result` or pushes errors to
/// field's/fragment's sub executor.
///
/// Returns false if any errors occurred and true otherwise.
pub(crate) fn resolve_selection_set_into<T, S>(
instance: &T,
info: &T::TypeInfo,
selection_set: &[Selection<S>],
executor: &Executor<T::Context, S>,
result: &mut Object<S>,
) -> bool
where
T: GraphQLValue<S> + ?Sized,
S: ScalarValue,
{
let meta_type = executor
.schema()
.concrete_type_by_name(
instance
.type_name(info)
.expect("Resolving named type's selection set")
.as_ref(),
)
.expect("Type not found in schema");
for selection in selection_set {
match *selection {
Selection::Field(Spanning {
item: ref f,
start: ref start_pos,
..
}) => {
if is_excluded(&f.directives, executor.variables()) {
continue;
}
let response_name = f.alias.as_ref().unwrap_or(&f.name).item;
if f.name.item == "__typename" {
result.add_field(
response_name,
Value::scalar(instance.concrete_type_name(executor.context(), info)),
);
continue;
}
let meta_field = meta_type.field_by_name(f.name.item).unwrap_or_else(|| {
panic!(
"Field {} not found on type {:?}",
f.name.item,
meta_type.name()
)
});
let exec_vars = executor.variables();
let sub_exec = executor.field_sub_executor(
response_name,
f.name.item,
*start_pos,
f.selection_set.as_ref().map(|v| &v[..]),
);
let field_result = instance.resolve_field(
info,
f.name.item,
&Arguments::new(
f.arguments.as_ref().map(|m| {
m.item
.iter()
.map(|&(ref k, ref v)| {
(k.item, v.item.clone().into_const(exec_vars))
})
.collect()
}),
&meta_field.arguments,
),
&sub_exec,
);
match field_result {
Ok(Value::Null) if meta_field.field_type.is_non_null() => return false,
Ok(v) => merge_key_into(result, response_name, v),
Err(e) => {
sub_exec.push_error_at(e, *start_pos);
if meta_field.field_type.is_non_null() {
return false;
}
result.add_field(response_name, Value::null());
}
}
}
Selection::FragmentSpread(Spanning {
item: ref spread,
start: ref start_pos,
..
}) => {
if is_excluded(&spread.directives, executor.variables()) {
continue;
}
let fragment = &executor
.fragment_by_name(spread.name.item)
.expect("Fragment could not be found");
let sub_exec = executor.type_sub_executor(
Some(fragment.type_condition.item),
Some(&fragment.selection_set[..]),
);
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
let type_name = instance.type_name(info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, fragment.type_condition.item)
|| Some(fragment.type_condition.item) == type_name
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
}
Selection::InlineFragment(Spanning {
item: ref fragment,
start: ref start_pos,
..
}) => {
if is_excluded(&fragment.directives, executor.variables()) {
continue;
}
let sub_exec = executor.type_sub_executor(
fragment.type_condition.as_ref().map(|c| c.item),
Some(&fragment.selection_set[..]),
);
if let Some(ref type_condition) = fragment.type_condition {
// Check whether the type matches the type condition.
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, type_condition.item)
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
} else if !resolve_selection_set_into(
instance,
info,
&fragment.selection_set[..],
&sub_exec,
result,
) {
return false;
}
}
}
}
true
}
pub(super) fn is_excluded<S>(
directives: &Option<Vec<Spanning<Directive<S>>>>,
vars: &Variables<S>,
) -> bool
where
S: ScalarValue,
{
if let Some(ref directives) = *directives {
for &Spanning {
item: ref directive,
..
} in directives
{
let condition: bool = directive
.arguments
.iter()
.flat_map(|m| m.item.get("if"))
.flat_map(|v| v.item.clone().into_const(vars).convert())
.next()
.unwrap();
if (directive.name.item == "skip" && condition)
|| (directive.name.item == "include" && !condition)
{
return true;
}
}
}
false
}
/// Merges `response_name`/`value` pair into `result`
pub(crate) fn merge_key_into<S>(result: &mut Object<S>, response_name: &str, value: Value<S>) {
if let Some(v) = result.get_mut_field_value(response_name) {
match v {
Value::Object(dest_obj) => {
if let Value::Object(src_obj) = value {
merge_maps(dest_obj, src_obj);
}
}
Value::List(dest_list) => {
if let Value::List(src_list) = value {
dest_list
.iter_mut()
.zip(src_list.into_iter())
.for_each(|(d, s)| {
if let Value::Object(d_obj) = d {
if let Value::Object(s_obj) = s {
merge_maps(d_obj, s_obj);
}
}
});
}
}
_ => {}
}
return;
}
result.add_field(response_name, value);
}
/// Merges `src` object's fields into `dest`
fn merge_maps<S>(dest: &mut Object<S>, src: Object<S>) {
for (key, value) in src {
if dest.contains_field(&key) {
merge_key_into(dest, &key, value);
} else {
dest.add_field(key, value);
}
}
} | /// resolving GraphQL values even when a concrete Rust type is erased.
/// | random_line_split |
base.rs | use indexmap::IndexMap;
use crate::{
ast::{Directive, FromInputValue, InputValue, Selection},
executor::{ExecutionResult, Executor, Registry, Variables},
parser::Spanning,
schema::meta::{Argument, MetaType},
value::{DefaultScalarValue, Object, ScalarValue, Value},
FieldResult, GraphQLEnum, IntoFieldError,
};
/// GraphQL type kind
///
/// The GraphQL specification defines a number of type kinds - the meta type\
/// of a type.
#[derive(Clone, Eq, PartialEq, Debug, GraphQLEnum)]
#[graphql(name = "__TypeKind", internal)]
pub enum TypeKind {
/// ## Scalar types
///
/// Scalar types appear as the leaf nodes of GraphQL queries. Strings,\
/// numbers, and booleans are the built in types, and while it's possible\
/// to define your own, it's relatively uncommon.
Scalar,
/// ## Object types
///
/// The most common type to be implemented by users. Objects have fields\
/// and can implement interfaces.
Object,
/// ## Interface types
///
/// Interface types are used to represent overlapping fields between\
/// multiple types, and can be queried for their concrete type.
Interface,
/// ## Union types
///
/// Unions are similar to interfaces but can not contain any fields on\
/// their own.
Union,
/// ## Enum types
///
/// Like scalars, enum types appear as the leaf nodes of GraphQL queries.
Enum,
/// ## Input objects
///
/// Represents complex values provided in queries _into_ the system.
#[graphql(name = "INPUT_OBJECT")]
InputObject,
/// ## List types
///
/// Represent lists of other types. This library provides implementations\
/// for vectors and slices, but other Rust types can be extended to serve\
/// as GraphQL lists.
List,
/// ## Non-null types
///
/// In GraphQL, nullable types are the default. By putting a `!` after a\
/// type, it becomes non-nullable.
#[graphql(name = "NON_NULL")]
NonNull,
}
/// Field argument container
#[derive(Debug)]
pub struct Arguments<'a, S = DefaultScalarValue> {
args: Option<IndexMap<&'a str, InputValue<S>>>,
}
impl<'a, S> Arguments<'a, S> {
#[doc(hidden)]
pub fn new(
mut args: Option<IndexMap<&'a str, InputValue<S>>>,
meta_args: &'a Option<Vec<Argument<S>>>,
) -> Self
where
S: Clone,
{
if meta_args.is_some() && args.is_none() {
args = Some(IndexMap::new());
}
if let (Some(args), Some(meta_args)) = (&mut args, meta_args) {
for arg in meta_args {
let arg_name = arg.name.as_str();
if args.get(arg_name).map_or(true, InputValue::is_null) {
if let Some(val) = arg.default_value.as_ref() {
args.insert(arg_name, val.clone());
}
}
}
}
Self { args }
}
/// Gets an argument by the given `name` and converts it into the desired
/// type.
///
/// If the argument is found, or a default argument has been provided, the
/// given [`InputValue`] will be converted into the type `T`.
///
/// Returns [`None`] if an argument with such `name` is not present.
///
/// # Errors
///
/// If the [`FromInputValue`] conversion fails.
pub fn get<T>(&self, name: &str) -> FieldResult<Option<T>, S>
where
T: FromInputValue<S>,
T::Error: IntoFieldError<S>,
{
self.args
.as_ref()
.and_then(|args| args.get(name))
.map(InputValue::convert)
.transpose()
.map_err(IntoFieldError::into_field_error)
}
}
/// Primary trait used to resolve GraphQL values.
///
/// All the convenience macros ultimately expand into an implementation of this trait for the given
/// type. The macros remove duplicated definitions of fields and arguments, and add type checks on
/// all resolving functions automatically. This can all be done manually too.
///
/// [`GraphQLValue`] provides _some_ convenience methods for you, in the form of optional trait
/// methods. The `type_name` method is mandatory, but other than that, it depends on what type
/// you're exposing:
/// - [Scalars][4], [enums][5], [lists][6] and [non-null wrappers][7] only require `resolve`.
/// - [Interfaces][1] and [objects][3] require `resolve_field` _or_ `resolve` if you want to
/// implement a custom resolution logic (probably not).
/// - [Interfaces][1] and [unions][2] require `resolve_into_type` and `concrete_type_name`.
/// - [Input objects][8] do not require anything.
///
/// # Object safety
///
/// This trait is [object safe][11], therefore may be turned into a [trait object][12] and used for
/// resolving GraphQL values even when a concrete Rust type is erased.
///
/// # Example
///
/// This trait is intended to be used in a conjunction with a [`GraphQLType`] trait. See the example
/// in the documentation of a [`GraphQLType`] trait.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
/// [4]: https://spec.graphql.org/June2018/#sec-Scalars
/// [5]: https://spec.graphql.org/June2018/#sec-Enums
/// [6]: https://spec.graphql.org/June2018/#sec-Type-System.List
/// [7]: https://spec.graphql.org/June2018/#sec-Type-System.Non-Null
/// [8]: https://spec.graphql.org/June2018/#sec-Input-Objects
/// [11]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
/// [12]: https://doc.rust-lang.org/reference/types/trait-object.html
pub trait GraphQLValue<S = DefaultScalarValue>
where
S: ScalarValue,
{
/// Context type for this [`GraphQLValue`].
///
/// It's threaded through a query execution to all affected nodes, and can be used to hold
/// common data, e.g. database connections or request session information.
type Context;
/// Type that may carry additional schema information for this [`GraphQLValue`].
///
/// It can be used to implement a schema that is partly dynamic, meaning that it can use
/// information that is not known at compile time, for instance by reading it from a
/// configuration file at startup.
type TypeInfo;
/// Returns name of the [`GraphQLType`] exposed by this [`GraphQLValue`].
///
/// This function will be called multiple times during a query execution. It must _not_ perform
/// any calculation and _always_ return the same value.
///
/// Usually, it should just call a [`GraphQLType::name`] inside.
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str>;
/// Resolves the value of a single field on this [`GraphQLValue`].
///
/// The `arguments` object contains all the specified arguments, with default values being
/// substituted for the ones not provided by the query.
///
/// The `executor` can be used to drive selections into sub-[objects][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_field(
&self,
_info: &Self::TypeInfo,
_field_name: &str,
_arguments: &Arguments<S>,
_executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
panic!("GraphQLValue::resolve_field() must be implemented by objects and interfaces");
}
/// Resolves this [`GraphQLValue`] (being an [interface][1] or an [union][2]) into a concrete
/// downstream [object][3] type.
///
/// Tries to resolve this [`GraphQLValue`] into the provided `type_name`. If the type matches,
/// then passes the instance along to [`Executor::resolve`].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_into_type(
&self,
info: &Self::TypeInfo,
type_name: &str,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if self.type_name(info).unwrap() == type_name {
self.resolve(info, selection_set, executor)
} else {
panic!(
"GraphQLValue::resolve_into_type() must be implemented by unions and interfaces"
);
}
}
/// Returns the concrete [`GraphQLType`] name for this [`GraphQLValue`] being an [interface][1],
/// an [union][2] or an [object][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
#[allow(unused_variables)]
fn concrete_type_name(&self, context: &Self::Context, info: &Self::TypeInfo) -> String {
panic!(
"GraphQLValue::concrete_type_name() must be implemented by unions, interfaces \
and objects",
);
}
/// Resolves the provided `selection_set` against this [`GraphQLValue`].
///
/// For non-[object][3] types, the `selection_set` will be [`None`] and the value should simply
/// be returned.
///
/// For [objects][3], all fields in the `selection_set` should be resolved. The default
/// implementation uses [`GraphQLValue::resolve_field`] to resolve all fields, including those
/// through a fragment expansion.
///
/// Since the [GraphQL spec specifies][0] that errors during field processing should result in
/// a null-value, this might return `Ok(Null)` in case of a failure. Errors are recorded
/// internally.
///
/// # Panics
///
/// The default implementation panics, if `selection_set` is [`None`].
///
/// [0]: https://spec.graphql.org/June2018/#sec-Errors-and-Non-Nullability
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve(
&self,
info: &Self::TypeInfo,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if let Some(sel) = selection_set {
let mut res = Object::with_capacity(sel.len());
Ok(
if resolve_selection_set_into(self, info, sel, executor, &mut res) {
Value::Object(res)
} else {
Value::null()
},
)
} else {
panic!("GraphQLValue::resolve() must be implemented by non-object output types");
}
}
}
crate::sa::assert_obj_safe!(GraphQLValue<Context = (), TypeInfo = ()>);
/// Helper alias for naming [trait objects][1] of [`GraphQLValue`].
///
/// [1]: https://doc.rust-lang.org/reference/types/trait-object.html
pub type DynGraphQLValue<S, C, TI> =
dyn GraphQLValue<S, Context = C, TypeInfo = TI> + Send + Sync + 'static;
/// Primary trait used to expose Rust types in a GraphQL schema.
///
/// All of the convenience macros ultimately expand into an implementation of
/// this trait for the given type. This can all be done manually.
///
/// # Example
///
/// Manually deriving an [object][3] is straightforward, but tedious. This is the equivalent of the
/// `User` object as shown in the example in the documentation root:
/// ```
/// # use std::collections::HashMap;
/// use juniper::{
/// meta::MetaType, Arguments, Context, DefaultScalarValue, Executor, ExecutionResult,
/// FieldResult, GraphQLType, GraphQLValue, Registry,
/// };
///
/// #[derive(Debug)]
/// struct Database { users: HashMap<String, User> }
/// impl Context for Database {}
///
/// #[derive(Debug)]
/// struct User { id: String, name: String, friend_ids: Vec<String> }
///
/// impl GraphQLType<DefaultScalarValue> for User {
/// fn name(_: &()) -> Option<&'static str> {
/// Some("User")
/// }
///
/// fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> MetaType<'r>
/// where DefaultScalarValue: 'r,
/// {
/// // First, we need to define all fields and their types on this type.
/// //
/// // If we need arguments, want to implement interfaces, or want to add documentation
/// // strings, we can do it here.
/// let fields = &[
/// registry.field::<&String>("id", &()),
/// registry.field::<&String>("name", &()),
/// registry.field::<Vec<&User>>("friends", &()),
/// ];
/// registry.build_object_type::<User>(&(), fields).into_meta()
/// }
/// }
///
/// impl GraphQLValue<DefaultScalarValue> for User {
/// type Context = Database;
/// type TypeInfo = ();
///
/// fn type_name(&self, _: &()) -> Option<&'static str> {
/// <User as GraphQLType>::name(&())
/// }
///
/// fn resolve_field(
/// &self,
/// info: &(),
/// field_name: &str,
/// args: &Arguments,
/// executor: &Executor<Database>
/// ) -> ExecutionResult
/// {
/// // Next, we need to match the queried field name. All arms of this match statement
/// // return `ExecutionResult`, which makes it hard to statically verify that the type you
/// // pass on to `executor.resolve*` actually matches the one that you defined in `meta()`
/// // above.
/// let database = executor.context();
/// match field_name {
/// // Because scalars are defined with another `Context` associated type, you must use
/// // `resolve_with_ctx` here to make the `executor` perform automatic type conversion
/// // of its argument.
/// "id" => executor.resolve_with_ctx(info, &self.id),
/// "name" => executor.resolve_with_ctx(info, &self.name),
///
/// // You pass a vector of `User` objects to `executor.resolve`, and it will determine
/// // which fields of the sub-objects to actually resolve based on the query.
/// // The `executor` instance keeps track of its current position in the query.
/// "friends" => executor.resolve(info,
/// &self.friend_ids.iter()
/// .filter_map(|id| database.users.get(id))
/// .collect::<Vec<_>>()
/// ),
///
/// // We can only reach this panic in two cases: either a mismatch between the defined
/// // schema in `meta()` above, or a validation failed because of a this library bug.
/// //
/// // In either of those two cases, the only reasonable way out is to panic the thread.
/// _ => panic!("Field {} not found on type User", field_name),
/// }
/// }
/// }
/// ```
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
pub trait GraphQLType<S = DefaultScalarValue>: GraphQLValue<S>
where
S: ScalarValue,
{
/// Returns name of this [`GraphQLType`] to expose.
///
/// This function will be called multiple times during schema construction. It must _not_
/// perform any calculation and _always_ return the same value.
fn name(info: &Self::TypeInfo) -> Option<&str>;
/// Returns [`MetaType`] representing this [`GraphQLType`].
fn meta<'r>(info: &Self::TypeInfo, registry: &mut Registry<'r, S>) -> MetaType<'r, S>
where
S: 'r;
}
/// Resolver logic for queries'/mutations' selection set.
/// Calls appropriate resolver method for each field or fragment found
/// and then merges returned values into `result` or pushes errors to
/// field's/fragment's sub executor.
///
/// Returns false if any errors occurred and true otherwise.
pub(crate) fn resolve_selection_set_into<T, S>(
instance: &T,
info: &T::TypeInfo,
selection_set: &[Selection<S>],
executor: &Executor<T::Context, S>,
result: &mut Object<S>,
) -> bool
where
T: GraphQLValue<S> + ?Sized,
S: ScalarValue,
{
let meta_type = executor
.schema()
.concrete_type_by_name(
instance
.type_name(info)
.expect("Resolving named type's selection set")
.as_ref(),
)
.expect("Type not found in schema");
for selection in selection_set {
match *selection {
Selection::Field(Spanning {
item: ref f,
start: ref start_pos,
..
}) => {
if is_excluded(&f.directives, executor.variables()) {
continue;
}
let response_name = f.alias.as_ref().unwrap_or(&f.name).item;
if f.name.item == "__typename" {
result.add_field(
response_name,
Value::scalar(instance.concrete_type_name(executor.context(), info)),
);
continue;
}
let meta_field = meta_type.field_by_name(f.name.item).unwrap_or_else(|| {
panic!(
"Field {} not found on type {:?}",
f.name.item,
meta_type.name()
)
});
let exec_vars = executor.variables();
let sub_exec = executor.field_sub_executor(
response_name,
f.name.item,
*start_pos,
f.selection_set.as_ref().map(|v| &v[..]),
);
let field_result = instance.resolve_field(
info,
f.name.item,
&Arguments::new(
f.arguments.as_ref().map(|m| {
m.item
.iter()
.map(|&(ref k, ref v)| {
(k.item, v.item.clone().into_const(exec_vars))
})
.collect()
}),
&meta_field.arguments,
),
&sub_exec,
);
match field_result {
Ok(Value::Null) if meta_field.field_type.is_non_null() => return false,
Ok(v) => merge_key_into(result, response_name, v),
Err(e) => {
sub_exec.push_error_at(e, *start_pos);
if meta_field.field_type.is_non_null() {
return false;
}
result.add_field(response_name, Value::null());
}
}
}
Selection::FragmentSpread(Spanning {
item: ref spread,
start: ref start_pos,
..
}) => |
Selection::InlineFragment(Spanning {
item: ref fragment,
start: ref start_pos,
..
}) => {
if is_excluded(&fragment.directives, executor.variables()) {
continue;
}
let sub_exec = executor.type_sub_executor(
fragment.type_condition.as_ref().map(|c| c.item),
Some(&fragment.selection_set[..]),
);
if let Some(ref type_condition) = fragment.type_condition {
// Check whether the type matches the type condition.
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, type_condition.item)
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
} else if !resolve_selection_set_into(
instance,
info,
&fragment.selection_set[..],
&sub_exec,
result,
) {
return false;
}
}
}
}
true
}
pub(super) fn is_excluded<S>(
directives: &Option<Vec<Spanning<Directive<S>>>>,
vars: &Variables<S>,
) -> bool
where
S: ScalarValue,
{
if let Some(ref directives) = *directives {
for &Spanning {
item: ref directive,
..
} in directives
{
let condition: bool = directive
.arguments
.iter()
.flat_map(|m| m.item.get("if"))
.flat_map(|v| v.item.clone().into_const(vars).convert())
.next()
.unwrap();
if (directive.name.item == "skip" && condition)
|| (directive.name.item == "include" && !condition)
{
return true;
}
}
}
false
}
/// Merges `response_name`/`value` pair into `result`
pub(crate) fn merge_key_into<S>(result: &mut Object<S>, response_name: &str, value: Value<S>) {
if let Some(v) = result.get_mut_field_value(response_name) {
match v {
Value::Object(dest_obj) => {
if let Value::Object(src_obj) = value {
merge_maps(dest_obj, src_obj);
}
}
Value::List(dest_list) => {
if let Value::List(src_list) = value {
dest_list
.iter_mut()
.zip(src_list.into_iter())
.for_each(|(d, s)| {
if let Value::Object(d_obj) = d {
if let Value::Object(s_obj) = s {
merge_maps(d_obj, s_obj);
}
}
});
}
}
_ => {}
}
return;
}
result.add_field(response_name, value);
}
/// Merges `src` object's fields into `dest`
fn merge_maps<S>(dest: &mut Object<S>, src: Object<S>) {
for (key, value) in src {
if dest.contains_field(&key) {
merge_key_into(dest, &key, value);
} else {
dest.add_field(key, value);
}
}
}
| {
if is_excluded(&spread.directives, executor.variables()) {
continue;
}
let fragment = &executor
.fragment_by_name(spread.name.item)
.expect("Fragment could not be found");
let sub_exec = executor.type_sub_executor(
Some(fragment.type_condition.item),
Some(&fragment.selection_set[..]),
);
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
let type_name = instance.type_name(info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, fragment.type_condition.item)
|| Some(fragment.type_condition.item) == type_name
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
} | conditional_block |
base.rs | use indexmap::IndexMap;
use crate::{
ast::{Directive, FromInputValue, InputValue, Selection},
executor::{ExecutionResult, Executor, Registry, Variables},
parser::Spanning,
schema::meta::{Argument, MetaType},
value::{DefaultScalarValue, Object, ScalarValue, Value},
FieldResult, GraphQLEnum, IntoFieldError,
};
/// GraphQL type kind
///
/// The GraphQL specification defines a number of type kinds - the meta type\
/// of a type.
#[derive(Clone, Eq, PartialEq, Debug, GraphQLEnum)]
#[graphql(name = "__TypeKind", internal)]
pub enum TypeKind {
/// ## Scalar types
///
/// Scalar types appear as the leaf nodes of GraphQL queries. Strings,\
/// numbers, and booleans are the built in types, and while it's possible\
/// to define your own, it's relatively uncommon.
Scalar,
/// ## Object types
///
/// The most common type to be implemented by users. Objects have fields\
/// and can implement interfaces.
Object,
/// ## Interface types
///
/// Interface types are used to represent overlapping fields between\
/// multiple types, and can be queried for their concrete type.
Interface,
/// ## Union types
///
/// Unions are similar to interfaces but can not contain any fields on\
/// their own.
Union,
/// ## Enum types
///
/// Like scalars, enum types appear as the leaf nodes of GraphQL queries.
Enum,
/// ## Input objects
///
/// Represents complex values provided in queries _into_ the system.
#[graphql(name = "INPUT_OBJECT")]
InputObject,
/// ## List types
///
/// Represent lists of other types. This library provides implementations\
/// for vectors and slices, but other Rust types can be extended to serve\
/// as GraphQL lists.
List,
/// ## Non-null types
///
/// In GraphQL, nullable types are the default. By putting a `!` after a\
/// type, it becomes non-nullable.
#[graphql(name = "NON_NULL")]
NonNull,
}
/// Field argument container
#[derive(Debug)]
pub struct Arguments<'a, S = DefaultScalarValue> {
args: Option<IndexMap<&'a str, InputValue<S>>>,
}
impl<'a, S> Arguments<'a, S> {
#[doc(hidden)]
pub fn new(
mut args: Option<IndexMap<&'a str, InputValue<S>>>,
meta_args: &'a Option<Vec<Argument<S>>>,
) -> Self
where
S: Clone,
{
if meta_args.is_some() && args.is_none() {
args = Some(IndexMap::new());
}
if let (Some(args), Some(meta_args)) = (&mut args, meta_args) {
for arg in meta_args {
let arg_name = arg.name.as_str();
if args.get(arg_name).map_or(true, InputValue::is_null) {
if let Some(val) = arg.default_value.as_ref() {
args.insert(arg_name, val.clone());
}
}
}
}
Self { args }
}
/// Gets an argument by the given `name` and converts it into the desired
/// type.
///
/// If the argument is found, or a default argument has been provided, the
/// given [`InputValue`] will be converted into the type `T`.
///
/// Returns [`None`] if an argument with such `name` is not present.
///
/// # Errors
///
/// If the [`FromInputValue`] conversion fails.
pub fn get<T>(&self, name: &str) -> FieldResult<Option<T>, S>
where
T: FromInputValue<S>,
T::Error: IntoFieldError<S>,
{
self.args
.as_ref()
.and_then(|args| args.get(name))
.map(InputValue::convert)
.transpose()
.map_err(IntoFieldError::into_field_error)
}
}
/// Primary trait used to resolve GraphQL values.
///
/// All the convenience macros ultimately expand into an implementation of this trait for the given
/// type. The macros remove duplicated definitions of fields and arguments, and add type checks on
/// all resolving functions automatically. This can all be done manually too.
///
/// [`GraphQLValue`] provides _some_ convenience methods for you, in the form of optional trait
/// methods. The `type_name` method is mandatory, but other than that, it depends on what type
/// you're exposing:
/// - [Scalars][4], [enums][5], [lists][6] and [non-null wrappers][7] only require `resolve`.
/// - [Interfaces][1] and [objects][3] require `resolve_field` _or_ `resolve` if you want to
/// implement a custom resolution logic (probably not).
/// - [Interfaces][1] and [unions][2] require `resolve_into_type` and `concrete_type_name`.
/// - [Input objects][8] do not require anything.
///
/// # Object safety
///
/// This trait is [object safe][11], therefore may be turned into a [trait object][12] and used for
/// resolving GraphQL values even when a concrete Rust type is erased.
///
/// # Example
///
/// This trait is intended to be used in a conjunction with a [`GraphQLType`] trait. See the example
/// in the documentation of a [`GraphQLType`] trait.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
/// [4]: https://spec.graphql.org/June2018/#sec-Scalars
/// [5]: https://spec.graphql.org/June2018/#sec-Enums
/// [6]: https://spec.graphql.org/June2018/#sec-Type-System.List
/// [7]: https://spec.graphql.org/June2018/#sec-Type-System.Non-Null
/// [8]: https://spec.graphql.org/June2018/#sec-Input-Objects
/// [11]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
/// [12]: https://doc.rust-lang.org/reference/types/trait-object.html
pub trait GraphQLValue<S = DefaultScalarValue>
where
S: ScalarValue,
{
/// Context type for this [`GraphQLValue`].
///
/// It's threaded through a query execution to all affected nodes, and can be used to hold
/// common data, e.g. database connections or request session information.
type Context;
/// Type that may carry additional schema information for this [`GraphQLValue`].
///
/// It can be used to implement a schema that is partly dynamic, meaning that it can use
/// information that is not known at compile time, for instance by reading it from a
/// configuration file at startup.
type TypeInfo;
/// Returns name of the [`GraphQLType`] exposed by this [`GraphQLValue`].
///
/// This function will be called multiple times during a query execution. It must _not_ perform
/// any calculation and _always_ return the same value.
///
/// Usually, it should just call a [`GraphQLType::name`] inside.
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str>;
/// Resolves the value of a single field on this [`GraphQLValue`].
///
/// The `arguments` object contains all the specified arguments, with default values being
/// substituted for the ones not provided by the query.
///
/// The `executor` can be used to drive selections into sub-[objects][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_field(
&self,
_info: &Self::TypeInfo,
_field_name: &str,
_arguments: &Arguments<S>,
_executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
panic!("GraphQLValue::resolve_field() must be implemented by objects and interfaces");
}
/// Resolves this [`GraphQLValue`] (being an [interface][1] or an [union][2]) into a concrete
/// downstream [object][3] type.
///
/// Tries to resolve this [`GraphQLValue`] into the provided `type_name`. If the type matches,
/// then passes the instance along to [`Executor::resolve`].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_into_type(
&self,
info: &Self::TypeInfo,
type_name: &str,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> |
/// Returns the concrete [`GraphQLType`] name for this [`GraphQLValue`] being an [interface][1],
/// an [union][2] or an [object][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
#[allow(unused_variables)]
fn concrete_type_name(&self, context: &Self::Context, info: &Self::TypeInfo) -> String {
panic!(
"GraphQLValue::concrete_type_name() must be implemented by unions, interfaces \
and objects",
);
}
/// Resolves the provided `selection_set` against this [`GraphQLValue`].
///
/// For non-[object][3] types, the `selection_set` will be [`None`] and the value should simply
/// be returned.
///
/// For [objects][3], all fields in the `selection_set` should be resolved. The default
/// implementation uses [`GraphQLValue::resolve_field`] to resolve all fields, including those
/// through a fragment expansion.
///
/// Since the [GraphQL spec specifies][0] that errors during field processing should result in
/// a null-value, this might return `Ok(Null)` in case of a failure. Errors are recorded
/// internally.
///
/// # Panics
///
/// The default implementation panics, if `selection_set` is [`None`].
///
/// [0]: https://spec.graphql.org/June2018/#sec-Errors-and-Non-Nullability
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve(
&self,
info: &Self::TypeInfo,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if let Some(sel) = selection_set {
let mut res = Object::with_capacity(sel.len());
Ok(
if resolve_selection_set_into(self, info, sel, executor, &mut res) {
Value::Object(res)
} else {
Value::null()
},
)
} else {
panic!("GraphQLValue::resolve() must be implemented by non-object output types");
}
}
}
crate::sa::assert_obj_safe!(GraphQLValue<Context = (), TypeInfo = ()>);
/// Helper alias for naming [trait objects][1] of [`GraphQLValue`].
///
/// [1]: https://doc.rust-lang.org/reference/types/trait-object.html
pub type DynGraphQLValue<S, C, TI> =
dyn GraphQLValue<S, Context = C, TypeInfo = TI> + Send + Sync + 'static;
/// Primary trait used to expose Rust types in a GraphQL schema.
///
/// All of the convenience macros ultimately expand into an implementation of
/// this trait for the given type. This can all be done manually.
///
/// # Example
///
/// Manually deriving an [object][3] is straightforward, but tedious. This is the equivalent of the
/// `User` object as shown in the example in the documentation root:
/// ```
/// # use std::collections::HashMap;
/// use juniper::{
/// meta::MetaType, Arguments, Context, DefaultScalarValue, Executor, ExecutionResult,
/// FieldResult, GraphQLType, GraphQLValue, Registry,
/// };
///
/// #[derive(Debug)]
/// struct Database { users: HashMap<String, User> }
/// impl Context for Database {}
///
/// #[derive(Debug)]
/// struct User { id: String, name: String, friend_ids: Vec<String> }
///
/// impl GraphQLType<DefaultScalarValue> for User {
/// fn name(_: &()) -> Option<&'static str> {
/// Some("User")
/// }
///
/// fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> MetaType<'r>
/// where DefaultScalarValue: 'r,
/// {
/// // First, we need to define all fields and their types on this type.
/// //
/// // If we need arguments, want to implement interfaces, or want to add documentation
/// // strings, we can do it here.
/// let fields = &[
/// registry.field::<&String>("id", &()),
/// registry.field::<&String>("name", &()),
/// registry.field::<Vec<&User>>("friends", &()),
/// ];
/// registry.build_object_type::<User>(&(), fields).into_meta()
/// }
/// }
///
/// impl GraphQLValue<DefaultScalarValue> for User {
/// type Context = Database;
/// type TypeInfo = ();
///
/// fn type_name(&self, _: &()) -> Option<&'static str> {
/// <User as GraphQLType>::name(&())
/// }
///
/// fn resolve_field(
/// &self,
/// info: &(),
/// field_name: &str,
/// args: &Arguments,
/// executor: &Executor<Database>
/// ) -> ExecutionResult
/// {
/// // Next, we need to match the queried field name. All arms of this match statement
/// // return `ExecutionResult`, which makes it hard to statically verify that the type you
/// // pass on to `executor.resolve*` actually matches the one that you defined in `meta()`
/// // above.
/// let database = executor.context();
/// match field_name {
/// // Because scalars are defined with another `Context` associated type, you must use
/// // `resolve_with_ctx` here to make the `executor` perform automatic type conversion
/// // of its argument.
/// "id" => executor.resolve_with_ctx(info, &self.id),
/// "name" => executor.resolve_with_ctx(info, &self.name),
///
/// // You pass a vector of `User` objects to `executor.resolve`, and it will determine
/// // which fields of the sub-objects to actually resolve based on the query.
/// // The `executor` instance keeps track of its current position in the query.
/// "friends" => executor.resolve(info,
/// &self.friend_ids.iter()
/// .filter_map(|id| database.users.get(id))
/// .collect::<Vec<_>>()
/// ),
///
/// // We can only reach this panic in two cases: either a mismatch between the defined
/// // schema in `meta()` above, or a validation failed because of a this library bug.
/// //
/// // In either of those two cases, the only reasonable way out is to panic the thread.
/// _ => panic!("Field {} not found on type User", field_name),
/// }
/// }
/// }
/// ```
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
pub trait GraphQLType<S = DefaultScalarValue>: GraphQLValue<S>
where
S: ScalarValue,
{
/// Returns name of this [`GraphQLType`] to expose.
///
/// This function will be called multiple times during schema construction. It must _not_
/// perform any calculation and _always_ return the same value.
fn name(info: &Self::TypeInfo) -> Option<&str>;
/// Returns [`MetaType`] representing this [`GraphQLType`].
fn meta<'r>(info: &Self::TypeInfo, registry: &mut Registry<'r, S>) -> MetaType<'r, S>
where
S: 'r;
}
/// Resolver logic for queries'/mutations' selection set.
/// Calls appropriate resolver method for each field or fragment found
/// and then merges returned values into `result` or pushes errors to
/// field's/fragment's sub executor.
///
/// Returns false if any errors occurred and true otherwise.
pub(crate) fn resolve_selection_set_into<T, S>(
instance: &T,
info: &T::TypeInfo,
selection_set: &[Selection<S>],
executor: &Executor<T::Context, S>,
result: &mut Object<S>,
) -> bool
where
T: GraphQLValue<S> + ?Sized,
S: ScalarValue,
{
let meta_type = executor
.schema()
.concrete_type_by_name(
instance
.type_name(info)
.expect("Resolving named type's selection set")
.as_ref(),
)
.expect("Type not found in schema");
for selection in selection_set {
match *selection {
Selection::Field(Spanning {
item: ref f,
start: ref start_pos,
..
}) => {
if is_excluded(&f.directives, executor.variables()) {
continue;
}
let response_name = f.alias.as_ref().unwrap_or(&f.name).item;
if f.name.item == "__typename" {
result.add_field(
response_name,
Value::scalar(instance.concrete_type_name(executor.context(), info)),
);
continue;
}
let meta_field = meta_type.field_by_name(f.name.item).unwrap_or_else(|| {
panic!(
"Field {} not found on type {:?}",
f.name.item,
meta_type.name()
)
});
let exec_vars = executor.variables();
let sub_exec = executor.field_sub_executor(
response_name,
f.name.item,
*start_pos,
f.selection_set.as_ref().map(|v| &v[..]),
);
let field_result = instance.resolve_field(
info,
f.name.item,
&Arguments::new(
f.arguments.as_ref().map(|m| {
m.item
.iter()
.map(|&(ref k, ref v)| {
(k.item, v.item.clone().into_const(exec_vars))
})
.collect()
}),
&meta_field.arguments,
),
&sub_exec,
);
match field_result {
Ok(Value::Null) if meta_field.field_type.is_non_null() => return false,
Ok(v) => merge_key_into(result, response_name, v),
Err(e) => {
sub_exec.push_error_at(e, *start_pos);
if meta_field.field_type.is_non_null() {
return false;
}
result.add_field(response_name, Value::null());
}
}
}
Selection::FragmentSpread(Spanning {
item: ref spread,
start: ref start_pos,
..
}) => {
if is_excluded(&spread.directives, executor.variables()) {
continue;
}
let fragment = &executor
.fragment_by_name(spread.name.item)
.expect("Fragment could not be found");
let sub_exec = executor.type_sub_executor(
Some(fragment.type_condition.item),
Some(&fragment.selection_set[..]),
);
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
let type_name = instance.type_name(info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, fragment.type_condition.item)
|| Some(fragment.type_condition.item) == type_name
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
}
Selection::InlineFragment(Spanning {
item: ref fragment,
start: ref start_pos,
..
}) => {
if is_excluded(&fragment.directives, executor.variables()) {
continue;
}
let sub_exec = executor.type_sub_executor(
fragment.type_condition.as_ref().map(|c| c.item),
Some(&fragment.selection_set[..]),
);
if let Some(ref type_condition) = fragment.type_condition {
// Check whether the type matches the type condition.
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, type_condition.item)
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
} else if !resolve_selection_set_into(
instance,
info,
&fragment.selection_set[..],
&sub_exec,
result,
) {
return false;
}
}
}
}
true
}
pub(super) fn is_excluded<S>(
directives: &Option<Vec<Spanning<Directive<S>>>>,
vars: &Variables<S>,
) -> bool
where
S: ScalarValue,
{
if let Some(ref directives) = *directives {
for &Spanning {
item: ref directive,
..
} in directives
{
let condition: bool = directive
.arguments
.iter()
.flat_map(|m| m.item.get("if"))
.flat_map(|v| v.item.clone().into_const(vars).convert())
.next()
.unwrap();
if (directive.name.item == "skip" && condition)
|| (directive.name.item == "include" && !condition)
{
return true;
}
}
}
false
}
/// Merges `response_name`/`value` pair into `result`
pub(crate) fn merge_key_into<S>(result: &mut Object<S>, response_name: &str, value: Value<S>) {
if let Some(v) = result.get_mut_field_value(response_name) {
match v {
Value::Object(dest_obj) => {
if let Value::Object(src_obj) = value {
merge_maps(dest_obj, src_obj);
}
}
Value::List(dest_list) => {
if let Value::List(src_list) = value {
dest_list
.iter_mut()
.zip(src_list.into_iter())
.for_each(|(d, s)| {
if let Value::Object(d_obj) = d {
if let Value::Object(s_obj) = s {
merge_maps(d_obj, s_obj);
}
}
});
}
}
_ => {}
}
return;
}
result.add_field(response_name, value);
}
/// Merges `src` object's fields into `dest`
fn merge_maps<S>(dest: &mut Object<S>, src: Object<S>) {
for (key, value) in src {
if dest.contains_field(&key) {
merge_key_into(dest, &key, value);
} else {
dest.add_field(key, value);
}
}
}
| {
if self.type_name(info).unwrap() == type_name {
self.resolve(info, selection_set, executor)
} else {
panic!(
"GraphQLValue::resolve_into_type() must be implemented by unions and interfaces"
);
}
} | identifier_body |
base.rs | use indexmap::IndexMap;
use crate::{
ast::{Directive, FromInputValue, InputValue, Selection},
executor::{ExecutionResult, Executor, Registry, Variables},
parser::Spanning,
schema::meta::{Argument, MetaType},
value::{DefaultScalarValue, Object, ScalarValue, Value},
FieldResult, GraphQLEnum, IntoFieldError,
};
/// GraphQL type kind
///
/// The GraphQL specification defines a number of type kinds - the meta type\
/// of a type.
#[derive(Clone, Eq, PartialEq, Debug, GraphQLEnum)]
#[graphql(name = "__TypeKind", internal)]
pub enum | {
/// ## Scalar types
///
/// Scalar types appear as the leaf nodes of GraphQL queries. Strings,\
/// numbers, and booleans are the built in types, and while it's possible\
/// to define your own, it's relatively uncommon.
Scalar,
/// ## Object types
///
/// The most common type to be implemented by users. Objects have fields\
/// and can implement interfaces.
Object,
/// ## Interface types
///
/// Interface types are used to represent overlapping fields between\
/// multiple types, and can be queried for their concrete type.
Interface,
/// ## Union types
///
/// Unions are similar to interfaces but can not contain any fields on\
/// their own.
Union,
/// ## Enum types
///
/// Like scalars, enum types appear as the leaf nodes of GraphQL queries.
Enum,
/// ## Input objects
///
/// Represents complex values provided in queries _into_ the system.
#[graphql(name = "INPUT_OBJECT")]
InputObject,
/// ## List types
///
/// Represent lists of other types. This library provides implementations\
/// for vectors and slices, but other Rust types can be extended to serve\
/// as GraphQL lists.
List,
/// ## Non-null types
///
/// In GraphQL, nullable types are the default. By putting a `!` after a\
/// type, it becomes non-nullable.
#[graphql(name = "NON_NULL")]
NonNull,
}
/// Field argument container
#[derive(Debug)]
pub struct Arguments<'a, S = DefaultScalarValue> {
args: Option<IndexMap<&'a str, InputValue<S>>>,
}
impl<'a, S> Arguments<'a, S> {
#[doc(hidden)]
pub fn new(
mut args: Option<IndexMap<&'a str, InputValue<S>>>,
meta_args: &'a Option<Vec<Argument<S>>>,
) -> Self
where
S: Clone,
{
if meta_args.is_some() && args.is_none() {
args = Some(IndexMap::new());
}
if let (Some(args), Some(meta_args)) = (&mut args, meta_args) {
for arg in meta_args {
let arg_name = arg.name.as_str();
if args.get(arg_name).map_or(true, InputValue::is_null) {
if let Some(val) = arg.default_value.as_ref() {
args.insert(arg_name, val.clone());
}
}
}
}
Self { args }
}
/// Gets an argument by the given `name` and converts it into the desired
/// type.
///
/// If the argument is found, or a default argument has been provided, the
/// given [`InputValue`] will be converted into the type `T`.
///
/// Returns [`None`] if an argument with such `name` is not present.
///
/// # Errors
///
/// If the [`FromInputValue`] conversion fails.
pub fn get<T>(&self, name: &str) -> FieldResult<Option<T>, S>
where
T: FromInputValue<S>,
T::Error: IntoFieldError<S>,
{
self.args
.as_ref()
.and_then(|args| args.get(name))
.map(InputValue::convert)
.transpose()
.map_err(IntoFieldError::into_field_error)
}
}
/// Primary trait used to resolve GraphQL values.
///
/// All the convenience macros ultimately expand into an implementation of this trait for the given
/// type. The macros remove duplicated definitions of fields and arguments, and add type checks on
/// all resolving functions automatically. This can all be done manually too.
///
/// [`GraphQLValue`] provides _some_ convenience methods for you, in the form of optional trait
/// methods. The `type_name` method is mandatory, but other than that, it depends on what type
/// you're exposing:
/// - [Scalars][4], [enums][5], [lists][6] and [non-null wrappers][7] only require `resolve`.
/// - [Interfaces][1] and [objects][3] require `resolve_field` _or_ `resolve` if you want to
/// implement a custom resolution logic (probably not).
/// - [Interfaces][1] and [unions][2] require `resolve_into_type` and `concrete_type_name`.
/// - [Input objects][8] do not require anything.
///
/// # Object safety
///
/// This trait is [object safe][11], therefore may be turned into a [trait object][12] and used for
/// resolving GraphQL values even when a concrete Rust type is erased.
///
/// # Example
///
/// This trait is intended to be used in a conjunction with a [`GraphQLType`] trait. See the example
/// in the documentation of a [`GraphQLType`] trait.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
/// [4]: https://spec.graphql.org/June2018/#sec-Scalars
/// [5]: https://spec.graphql.org/June2018/#sec-Enums
/// [6]: https://spec.graphql.org/June2018/#sec-Type-System.List
/// [7]: https://spec.graphql.org/June2018/#sec-Type-System.Non-Null
/// [8]: https://spec.graphql.org/June2018/#sec-Input-Objects
/// [11]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
/// [12]: https://doc.rust-lang.org/reference/types/trait-object.html
pub trait GraphQLValue<S = DefaultScalarValue>
where
S: ScalarValue,
{
/// Context type for this [`GraphQLValue`].
///
/// It's threaded through a query execution to all affected nodes, and can be used to hold
/// common data, e.g. database connections or request session information.
type Context;
/// Type that may carry additional schema information for this [`GraphQLValue`].
///
/// It can be used to implement a schema that is partly dynamic, meaning that it can use
/// information that is not known at compile time, for instance by reading it from a
/// configuration file at startup.
type TypeInfo;
/// Returns name of the [`GraphQLType`] exposed by this [`GraphQLValue`].
///
/// This function will be called multiple times during a query execution. It must _not_ perform
/// any calculation and _always_ return the same value.
///
/// Usually, it should just call a [`GraphQLType::name`] inside.
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str>;
/// Resolves the value of a single field on this [`GraphQLValue`].
///
/// The `arguments` object contains all the specified arguments, with default values being
/// substituted for the ones not provided by the query.
///
/// The `executor` can be used to drive selections into sub-[objects][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_field(
&self,
_info: &Self::TypeInfo,
_field_name: &str,
_arguments: &Arguments<S>,
_executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
panic!("GraphQLValue::resolve_field() must be implemented by objects and interfaces");
}
/// Resolves this [`GraphQLValue`] (being an [interface][1] or an [union][2]) into a concrete
/// downstream [object][3] type.
///
/// Tries to resolve this [`GraphQLValue`] into the provided `type_name`. If the type matches,
/// then passes the instance along to [`Executor::resolve`].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_into_type(
&self,
info: &Self::TypeInfo,
type_name: &str,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if self.type_name(info).unwrap() == type_name {
self.resolve(info, selection_set, executor)
} else {
panic!(
"GraphQLValue::resolve_into_type() must be implemented by unions and interfaces"
);
}
}
/// Returns the concrete [`GraphQLType`] name for this [`GraphQLValue`] being an [interface][1],
/// an [union][2] or an [object][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
#[allow(unused_variables)]
fn concrete_type_name(&self, context: &Self::Context, info: &Self::TypeInfo) -> String {
panic!(
"GraphQLValue::concrete_type_name() must be implemented by unions, interfaces \
and objects",
);
}
/// Resolves the provided `selection_set` against this [`GraphQLValue`].
///
/// For non-[object][3] types, the `selection_set` will be [`None`] and the value should simply
/// be returned.
///
/// For [objects][3], all fields in the `selection_set` should be resolved. The default
/// implementation uses [`GraphQLValue::resolve_field`] to resolve all fields, including those
/// through a fragment expansion.
///
/// Since the [GraphQL spec specifies][0] that errors during field processing should result in
/// a null-value, this might return `Ok(Null)` in case of a failure. Errors are recorded
/// internally.
///
/// # Panics
///
/// The default implementation panics, if `selection_set` is [`None`].
///
/// [0]: https://spec.graphql.org/June2018/#sec-Errors-and-Non-Nullability
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve(
&self,
info: &Self::TypeInfo,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if let Some(sel) = selection_set {
let mut res = Object::with_capacity(sel.len());
Ok(
if resolve_selection_set_into(self, info, sel, executor, &mut res) {
Value::Object(res)
} else {
Value::null()
},
)
} else {
panic!("GraphQLValue::resolve() must be implemented by non-object output types");
}
}
}
crate::sa::assert_obj_safe!(GraphQLValue<Context = (), TypeInfo = ()>);
/// Helper alias for naming [trait objects][1] of [`GraphQLValue`].
///
/// [1]: https://doc.rust-lang.org/reference/types/trait-object.html
pub type DynGraphQLValue<S, C, TI> =
dyn GraphQLValue<S, Context = C, TypeInfo = TI> + Send + Sync + 'static;
/// Primary trait used to expose Rust types in a GraphQL schema.
///
/// All of the convenience macros ultimately expand into an implementation of
/// this trait for the given type. This can all be done manually.
///
/// # Example
///
/// Manually deriving an [object][3] is straightforward, but tedious. This is the equivalent of the
/// `User` object as shown in the example in the documentation root:
/// ```
/// # use std::collections::HashMap;
/// use juniper::{
/// meta::MetaType, Arguments, Context, DefaultScalarValue, Executor, ExecutionResult,
/// FieldResult, GraphQLType, GraphQLValue, Registry,
/// };
///
/// #[derive(Debug)]
/// struct Database { users: HashMap<String, User> }
/// impl Context for Database {}
///
/// #[derive(Debug)]
/// struct User { id: String, name: String, friend_ids: Vec<String> }
///
/// impl GraphQLType<DefaultScalarValue> for User {
/// fn name(_: &()) -> Option<&'static str> {
/// Some("User")
/// }
///
/// fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> MetaType<'r>
/// where DefaultScalarValue: 'r,
/// {
/// // First, we need to define all fields and their types on this type.
/// //
/// // If we need arguments, want to implement interfaces, or want to add documentation
/// // strings, we can do it here.
/// let fields = &[
/// registry.field::<&String>("id", &()),
/// registry.field::<&String>("name", &()),
/// registry.field::<Vec<&User>>("friends", &()),
/// ];
/// registry.build_object_type::<User>(&(), fields).into_meta()
/// }
/// }
///
/// impl GraphQLValue<DefaultScalarValue> for User {
/// type Context = Database;
/// type TypeInfo = ();
///
/// fn type_name(&self, _: &()) -> Option<&'static str> {
/// <User as GraphQLType>::name(&())
/// }
///
/// fn resolve_field(
/// &self,
/// info: &(),
/// field_name: &str,
/// args: &Arguments,
/// executor: &Executor<Database>
/// ) -> ExecutionResult
/// {
/// // Next, we need to match the queried field name. All arms of this match statement
/// // return `ExecutionResult`, which makes it hard to statically verify that the type you
/// // pass on to `executor.resolve*` actually matches the one that you defined in `meta()`
/// // above.
/// let database = executor.context();
/// match field_name {
/// // Because scalars are defined with another `Context` associated type, you must use
/// // `resolve_with_ctx` here to make the `executor` perform automatic type conversion
/// // of its argument.
/// "id" => executor.resolve_with_ctx(info, &self.id),
/// "name" => executor.resolve_with_ctx(info, &self.name),
///
/// // You pass a vector of `User` objects to `executor.resolve`, and it will determine
/// // which fields of the sub-objects to actually resolve based on the query.
/// // The `executor` instance keeps track of its current position in the query.
/// "friends" => executor.resolve(info,
/// &self.friend_ids.iter()
/// .filter_map(|id| database.users.get(id))
/// .collect::<Vec<_>>()
/// ),
///
/// // We can only reach this panic in two cases: either a mismatch between the defined
/// // schema in `meta()` above, or a validation failed because of a this library bug.
/// //
/// // In either of those two cases, the only reasonable way out is to panic the thread.
/// _ => panic!("Field {} not found on type User", field_name),
/// }
/// }
/// }
/// ```
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
pub trait GraphQLType<S = DefaultScalarValue>: GraphQLValue<S>
where
S: ScalarValue,
{
/// Returns name of this [`GraphQLType`] to expose.
///
/// This function will be called multiple times during schema construction. It must _not_
/// perform any calculation and _always_ return the same value.
fn name(info: &Self::TypeInfo) -> Option<&str>;
/// Returns [`MetaType`] representing this [`GraphQLType`].
fn meta<'r>(info: &Self::TypeInfo, registry: &mut Registry<'r, S>) -> MetaType<'r, S>
where
S: 'r;
}
/// Resolver logic for queries'/mutations' selection set.
/// Calls appropriate resolver method for each field or fragment found
/// and then merges returned values into `result` or pushes errors to
/// field's/fragment's sub executor.
///
/// Returns false if any errors occurred and true otherwise.
pub(crate) fn resolve_selection_set_into<T, S>(
instance: &T,
info: &T::TypeInfo,
selection_set: &[Selection<S>],
executor: &Executor<T::Context, S>,
result: &mut Object<S>,
) -> bool
where
T: GraphQLValue<S> + ?Sized,
S: ScalarValue,
{
let meta_type = executor
.schema()
.concrete_type_by_name(
instance
.type_name(info)
.expect("Resolving named type's selection set")
.as_ref(),
)
.expect("Type not found in schema");
for selection in selection_set {
match *selection {
Selection::Field(Spanning {
item: ref f,
start: ref start_pos,
..
}) => {
if is_excluded(&f.directives, executor.variables()) {
continue;
}
let response_name = f.alias.as_ref().unwrap_or(&f.name).item;
if f.name.item == "__typename" {
result.add_field(
response_name,
Value::scalar(instance.concrete_type_name(executor.context(), info)),
);
continue;
}
let meta_field = meta_type.field_by_name(f.name.item).unwrap_or_else(|| {
panic!(
"Field {} not found on type {:?}",
f.name.item,
meta_type.name()
)
});
let exec_vars = executor.variables();
let sub_exec = executor.field_sub_executor(
response_name,
f.name.item,
*start_pos,
f.selection_set.as_ref().map(|v| &v[..]),
);
let field_result = instance.resolve_field(
info,
f.name.item,
&Arguments::new(
f.arguments.as_ref().map(|m| {
m.item
.iter()
.map(|&(ref k, ref v)| {
(k.item, v.item.clone().into_const(exec_vars))
})
.collect()
}),
&meta_field.arguments,
),
&sub_exec,
);
match field_result {
Ok(Value::Null) if meta_field.field_type.is_non_null() => return false,
Ok(v) => merge_key_into(result, response_name, v),
Err(e) => {
sub_exec.push_error_at(e, *start_pos);
if meta_field.field_type.is_non_null() {
return false;
}
result.add_field(response_name, Value::null());
}
}
}
Selection::FragmentSpread(Spanning {
item: ref spread,
start: ref start_pos,
..
}) => {
if is_excluded(&spread.directives, executor.variables()) {
continue;
}
let fragment = &executor
.fragment_by_name(spread.name.item)
.expect("Fragment could not be found");
let sub_exec = executor.type_sub_executor(
Some(fragment.type_condition.item),
Some(&fragment.selection_set[..]),
);
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
let type_name = instance.type_name(info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, fragment.type_condition.item)
|| Some(fragment.type_condition.item) == type_name
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
}
Selection::InlineFragment(Spanning {
item: ref fragment,
start: ref start_pos,
..
}) => {
if is_excluded(&fragment.directives, executor.variables()) {
continue;
}
let sub_exec = executor.type_sub_executor(
fragment.type_condition.as_ref().map(|c| c.item),
Some(&fragment.selection_set[..]),
);
if let Some(ref type_condition) = fragment.type_condition {
// Check whether the type matches the type condition.
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, type_condition.item)
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
} else if !resolve_selection_set_into(
instance,
info,
&fragment.selection_set[..],
&sub_exec,
result,
) {
return false;
}
}
}
}
true
}
pub(super) fn is_excluded<S>(
directives: &Option<Vec<Spanning<Directive<S>>>>,
vars: &Variables<S>,
) -> bool
where
S: ScalarValue,
{
if let Some(ref directives) = *directives {
for &Spanning {
item: ref directive,
..
} in directives
{
let condition: bool = directive
.arguments
.iter()
.flat_map(|m| m.item.get("if"))
.flat_map(|v| v.item.clone().into_const(vars).convert())
.next()
.unwrap();
if (directive.name.item == "skip" && condition)
|| (directive.name.item == "include" && !condition)
{
return true;
}
}
}
false
}
/// Merges `response_name`/`value` pair into `result`
pub(crate) fn merge_key_into<S>(result: &mut Object<S>, response_name: &str, value: Value<S>) {
if let Some(v) = result.get_mut_field_value(response_name) {
match v {
Value::Object(dest_obj) => {
if let Value::Object(src_obj) = value {
merge_maps(dest_obj, src_obj);
}
}
Value::List(dest_list) => {
if let Value::List(src_list) = value {
dest_list
.iter_mut()
.zip(src_list.into_iter())
.for_each(|(d, s)| {
if let Value::Object(d_obj) = d {
if let Value::Object(s_obj) = s {
merge_maps(d_obj, s_obj);
}
}
});
}
}
_ => {}
}
return;
}
result.add_field(response_name, value);
}
/// Merges `src` object's fields into `dest`
fn merge_maps<S>(dest: &mut Object<S>, src: Object<S>) {
for (key, value) in src {
if dest.contains_field(&key) {
merge_key_into(dest, &key, value);
} else {
dest.add_field(key, value);
}
}
}
| TypeKind | identifier_name |
master.py | import argparse
import time
import subprocess
import logging
from deep_architect import search_logging as sl
from deep_architect import utils as ut
from deep_architect.contrib.communicators.mongo_communicator import MongoCommunicator
from search_space_factory import name_to_search_space_factory_fn
from searcher import name_to_searcher_fn
logging.basicConfig(format='[%(levelname)s] %(asctime)s: %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
BUCKET_NAME = 'deep_architect'
RESULTS_TOPIC = 'results'
ARCH_TOPIC = 'architectures'
KILL_SIGNAL = 'kill'
PUBLISH_SIGNAL = 'publish'
def process_config_and_args():
parser = argparse.ArgumentParser("MPI Job for architecture search")
parser.add_argument('--config',
'-c',
action='store',
dest='config_name',
default='normal')
parser.add_argument(
'--config-file',
action='store',
dest='config_file',
default=
'/deep_architect/examples/contrib/kubernetes/experiment_config.json')
parser.add_argument('--bucket',
'-b',
action='store',
dest='bucket',
default=BUCKET_NAME)
# Other arguments
parser.add_argument('--resume',
'-r',
action='store_true',
dest='resume',
default=False)
parser.add_argument('--mongo-host',
'-m',
action='store',
dest='mongo_host',
default='127.0.0.1')
parser.add_argument('--mongo-port',
'-p',
action='store',
dest='mongo_port',
default=27017)
parser.add_argument('--log',
choices=['debug', 'info', 'warning', 'error'],
default='info')
parser.add_argument('--repetition', default=0)
options = parser.parse_args()
numeric_level = getattr(logging, options.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.log)
logging.getLogger().setLevel(numeric_level)
configs = ut.read_jsonfile(options.config_file)
config = configs[options.config_name]
config['bucket'] = options.bucket
comm = MongoCommunicator(host=options.mongo_host,
port=options.mongo_port,
data_refresher=True,
refresh_period=10)
datasets = {
'cifar10': ('data/cifar10/', 10),
}
_, num_classes = datasets[config['dataset']]
search_space_factory = name_to_search_space_factory_fn[
config['search_space']](num_classes)
config['save_every'] = 1 if 'save_every' not in config else config[
'save_every']
searcher = name_to_searcher_fn[config['searcher']](
search_space_factory.get_search_space)
config['num_epochs'] = -1 if 'epochs' not in config else config['epochs']
config['num_samples'] = -1 if 'samples' not in config else config['samples']
# SET UP GOOGLE STORE FOLDER
config['search_name'] = config['search_name'] + '_' + str(
options.repetition)
search_logger = sl.SearchLogger(config['search_folder'],
config['search_name'])
search_data_folder = search_logger.get_search_data_folderpath()
config['save_filepath'] = ut.join_paths(
(search_data_folder, config['searcher_file_name']))
config['eval_path'] = sl.get_all_evaluations_folderpath(
config['search_folder'], config['search_name'])
config['full_search_folder'] = sl.get_search_folderpath(
config['search_folder'], config['search_name'])
config['eval_hparams'] = {} if 'eval_hparams' not in config else config[
'eval_hparams']
state = {
'epochs': 0,
'models_sampled': 0,
'finished': 0,
'best_accuracy': 0.0
}
if options.resume:
try:
download_folder(search_data_folder, config['full_search_folder'],
config['bucket'])
searcher.load_state(search_data_folder)
if ut.file_exists(config['save_filepath']):
old_state = ut.read_jsonfile(config['save_filepath'])
state['epochs'] = old_state['epochs']
state['models_sampled'] = old_state['models_sampled']
state['finished'] = old_state['finished']
state['best_accuracy'] = old_state['best_accuracy']
except:
pass
return comm, search_logger, searcher, state, config
def download_folder(folder, location, bucket):
logger.info('Downloading gs://%s/%s to %s/', bucket, folder, location)
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', 'gs://' + bucket + '/' + folder,
location + '/'
])
def upload_folder(folder, location, bucket):
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', folder,
'gs://' + bucket + '/' + location + '/'
])
def get_topic_name(topic, config):
return config['search_folder'] + '_' + config['search_name'] + '_' + topic
def update_searcher(message, comm, search_logger, searcher, state, config):
data = message['data']
if not data == PUBLISH_SIGNAL:
results = data['results']
vs = data['vs']
evaluation_id = data['evaluation_id']
searcher_eval_token = data['searcher_eval_token']
log_results(results, vs, evaluation_id, searcher_eval_token,
search_logger, config)
searcher.update(results['validation_accuracy'], searcher_eval_token)
update_searcher_state(state, config, results)
save_searcher_state(searcher, state, config, search_logger)
publish_new_arch(comm, searcher, state, config)
comm.finish_processing(get_topic_name(RESULTS_TOPIC, config), message)
def save_searcher_state(searcher, state, config, search_logger):
logger.info('Models finished: %d Best Accuracy: %f', state['finished'],
state['best_accuracy'])
searcher.save_state(search_logger.get_search_data_folderpath())
state = {
'finished': state['finished'],
'models_sampled': state['models_sampled'],
'epochs': state['epochs'],
'best_accuracy': state['best_accuracy']
}
ut.write_jsonfile(state, config['save_filepath'])
upload_folder(search_logger.get_search_data_folderpath(),
config['full_search_folder'], config['bucket'])
return state
def update_searcher_state(state, config, results):
state['best_accuracy'] = max(state['best_accuracy'],
results['validation_accuracy'])
state['finished'] += 1
state['epochs'] += config['eval_epochs']
def log_results(results, vs, evaluation_id, searcher_eval_token, search_logger,
config):
logger.info("Updating searcher with evaluation %d and results %s",
evaluation_id, str(results))
eval_logger = search_logger.get_evaluation_logger(evaluation_id)
eval_logger.log_config(vs, searcher_eval_token)
eval_logger.log_results(results)
upload_folder(eval_logger.get_evaluation_folderpath(), config['eval_path'],
config['bucket'])
def publish_new_arch(comm, searcher, state, config):
while comm.check_data_exists(get_topic_name(ARCH_TOPIC, config),
'evaluation_id', state['models_sampled']):
state['models_sampled'] += 1
if should_end_searcher(state, config):
logger.info('Search finished, sending kill signal')
comm.publish(get_topic_name(ARCH_TOPIC, config), KILL_SIGNAL)
state['search_finished'] = True
elif should_continue(state, config):
logger.info('Publishing architecture number %d',
state['models_sampled'])
_, _, vs, searcher_eval_token = searcher.sample()
arch = {
'vs': vs,
'evaluation_id': state['models_sampled'],
'searcher_eval_token': searcher_eval_token,
'eval_hparams': config['eval_hparams']
}
comm.publish(get_topic_name(ARCH_TOPIC, config), arch)
state['models_sampled'] += 1
def should_continue(state, config):
cont = config[
'num_samples'] == -1 or state['models_sampled'] < config['num_samples']
cont = cont and (config['num_epochs'] == -1 or
state['epochs'] < config['num_epochs'])
return cont
def should_end_searcher(state, config):
kill = config['num_samples'] != -1 and state['finished'] >= config[
'num_samples']
kill = kill or (config['num_epochs'] != -1 and
state['epochs'] >= config['num_epochs'])
return kill
def | ():
comm, search_logger, searcher, state, config = process_config_and_args()
logger.info('Using config %s', str(config))
logger.info('Current state %s', str(state))
state['search_finished'] = False
comm.subscribe(get_topic_name(RESULTS_TOPIC, config),
callback=lambda message: update_searcher(
message, comm, search_logger, searcher, state, config))
while not state['search_finished']:
time.sleep(30)
comm.unsubscribe(get_topic_name(RESULTS_TOPIC, config))
if __name__ == "__main__":
main()
| main | identifier_name |
master.py | import argparse
import time
import subprocess
import logging
from deep_architect import search_logging as sl
from deep_architect import utils as ut
from deep_architect.contrib.communicators.mongo_communicator import MongoCommunicator
from search_space_factory import name_to_search_space_factory_fn
from searcher import name_to_searcher_fn
logging.basicConfig(format='[%(levelname)s] %(asctime)s: %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
BUCKET_NAME = 'deep_architect'
RESULTS_TOPIC = 'results'
ARCH_TOPIC = 'architectures'
KILL_SIGNAL = 'kill'
PUBLISH_SIGNAL = 'publish'
def process_config_and_args():
parser = argparse.ArgumentParser("MPI Job for architecture search")
parser.add_argument('--config',
'-c',
action='store',
dest='config_name',
default='normal')
parser.add_argument(
'--config-file',
action='store',
dest='config_file',
default=
'/deep_architect/examples/contrib/kubernetes/experiment_config.json')
parser.add_argument('--bucket',
'-b',
action='store',
dest='bucket',
default=BUCKET_NAME)
# Other arguments
parser.add_argument('--resume',
'-r',
action='store_true',
dest='resume',
default=False)
parser.add_argument('--mongo-host',
'-m',
action='store',
dest='mongo_host',
default='127.0.0.1')
parser.add_argument('--mongo-port',
'-p',
action='store',
dest='mongo_port',
default=27017)
parser.add_argument('--log',
choices=['debug', 'info', 'warning', 'error'],
default='info')
parser.add_argument('--repetition', default=0)
options = parser.parse_args()
numeric_level = getattr(logging, options.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.log)
logging.getLogger().setLevel(numeric_level)
configs = ut.read_jsonfile(options.config_file)
config = configs[options.config_name]
config['bucket'] = options.bucket
comm = MongoCommunicator(host=options.mongo_host,
port=options.mongo_port,
data_refresher=True,
refresh_period=10)
datasets = {
'cifar10': ('data/cifar10/', 10),
}
_, num_classes = datasets[config['dataset']]
search_space_factory = name_to_search_space_factory_fn[
config['search_space']](num_classes)
config['save_every'] = 1 if 'save_every' not in config else config[
'save_every']
searcher = name_to_searcher_fn[config['searcher']](
search_space_factory.get_search_space)
config['num_epochs'] = -1 if 'epochs' not in config else config['epochs']
config['num_samples'] = -1 if 'samples' not in config else config['samples']
# SET UP GOOGLE STORE FOLDER
config['search_name'] = config['search_name'] + '_' + str(
options.repetition)
search_logger = sl.SearchLogger(config['search_folder'],
config['search_name'])
search_data_folder = search_logger.get_search_data_folderpath()
config['save_filepath'] = ut.join_paths(
(search_data_folder, config['searcher_file_name']))
config['eval_path'] = sl.get_all_evaluations_folderpath(
config['search_folder'], config['search_name'])
config['full_search_folder'] = sl.get_search_folderpath(
config['search_folder'], config['search_name'])
config['eval_hparams'] = {} if 'eval_hparams' not in config else config[
'eval_hparams']
state = {
'epochs': 0,
'models_sampled': 0,
'finished': 0,
'best_accuracy': 0.0
}
if options.resume:
try:
download_folder(search_data_folder, config['full_search_folder'],
config['bucket'])
searcher.load_state(search_data_folder)
if ut.file_exists(config['save_filepath']):
old_state = ut.read_jsonfile(config['save_filepath'])
state['epochs'] = old_state['epochs']
state['models_sampled'] = old_state['models_sampled']
state['finished'] = old_state['finished']
state['best_accuracy'] = old_state['best_accuracy']
except:
pass
return comm, search_logger, searcher, state, config
def download_folder(folder, location, bucket):
logger.info('Downloading gs://%s/%s to %s/', bucket, folder, location)
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', 'gs://' + bucket + '/' + folder,
location + '/'
])
def upload_folder(folder, location, bucket):
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', folder,
'gs://' + bucket + '/' + location + '/'
])
def get_topic_name(topic, config):
return config['search_folder'] + '_' + config['search_name'] + '_' + topic
def update_searcher(message, comm, search_logger, searcher, state, config):
data = message['data']
if not data == PUBLISH_SIGNAL:
results = data['results']
vs = data['vs']
evaluation_id = data['evaluation_id']
searcher_eval_token = data['searcher_eval_token']
log_results(results, vs, evaluation_id, searcher_eval_token,
search_logger, config)
searcher.update(results['validation_accuracy'], searcher_eval_token)
update_searcher_state(state, config, results)
save_searcher_state(searcher, state, config, search_logger)
publish_new_arch(comm, searcher, state, config)
comm.finish_processing(get_topic_name(RESULTS_TOPIC, config), message) | searcher.save_state(search_logger.get_search_data_folderpath())
state = {
'finished': state['finished'],
'models_sampled': state['models_sampled'],
'epochs': state['epochs'],
'best_accuracy': state['best_accuracy']
}
ut.write_jsonfile(state, config['save_filepath'])
upload_folder(search_logger.get_search_data_folderpath(),
config['full_search_folder'], config['bucket'])
return state
def update_searcher_state(state, config, results):
state['best_accuracy'] = max(state['best_accuracy'],
results['validation_accuracy'])
state['finished'] += 1
state['epochs'] += config['eval_epochs']
def log_results(results, vs, evaluation_id, searcher_eval_token, search_logger,
config):
logger.info("Updating searcher with evaluation %d and results %s",
evaluation_id, str(results))
eval_logger = search_logger.get_evaluation_logger(evaluation_id)
eval_logger.log_config(vs, searcher_eval_token)
eval_logger.log_results(results)
upload_folder(eval_logger.get_evaluation_folderpath(), config['eval_path'],
config['bucket'])
def publish_new_arch(comm, searcher, state, config):
while comm.check_data_exists(get_topic_name(ARCH_TOPIC, config),
'evaluation_id', state['models_sampled']):
state['models_sampled'] += 1
if should_end_searcher(state, config):
logger.info('Search finished, sending kill signal')
comm.publish(get_topic_name(ARCH_TOPIC, config), KILL_SIGNAL)
state['search_finished'] = True
elif should_continue(state, config):
logger.info('Publishing architecture number %d',
state['models_sampled'])
_, _, vs, searcher_eval_token = searcher.sample()
arch = {
'vs': vs,
'evaluation_id': state['models_sampled'],
'searcher_eval_token': searcher_eval_token,
'eval_hparams': config['eval_hparams']
}
comm.publish(get_topic_name(ARCH_TOPIC, config), arch)
state['models_sampled'] += 1
def should_continue(state, config):
cont = config[
'num_samples'] == -1 or state['models_sampled'] < config['num_samples']
cont = cont and (config['num_epochs'] == -1 or
state['epochs'] < config['num_epochs'])
return cont
def should_end_searcher(state, config):
kill = config['num_samples'] != -1 and state['finished'] >= config[
'num_samples']
kill = kill or (config['num_epochs'] != -1 and
state['epochs'] >= config['num_epochs'])
return kill
def main():
comm, search_logger, searcher, state, config = process_config_and_args()
logger.info('Using config %s', str(config))
logger.info('Current state %s', str(state))
state['search_finished'] = False
comm.subscribe(get_topic_name(RESULTS_TOPIC, config),
callback=lambda message: update_searcher(
message, comm, search_logger, searcher, state, config))
while not state['search_finished']:
time.sleep(30)
comm.unsubscribe(get_topic_name(RESULTS_TOPIC, config))
if __name__ == "__main__":
main() |
def save_searcher_state(searcher, state, config, search_logger):
logger.info('Models finished: %d Best Accuracy: %f', state['finished'],
state['best_accuracy']) | random_line_split |
master.py | import argparse
import time
import subprocess
import logging
from deep_architect import search_logging as sl
from deep_architect import utils as ut
from deep_architect.contrib.communicators.mongo_communicator import MongoCommunicator
from search_space_factory import name_to_search_space_factory_fn
from searcher import name_to_searcher_fn
logging.basicConfig(format='[%(levelname)s] %(asctime)s: %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
BUCKET_NAME = 'deep_architect'
RESULTS_TOPIC = 'results'
ARCH_TOPIC = 'architectures'
KILL_SIGNAL = 'kill'
PUBLISH_SIGNAL = 'publish'
def process_config_and_args():
parser = argparse.ArgumentParser("MPI Job for architecture search")
parser.add_argument('--config',
'-c',
action='store',
dest='config_name',
default='normal')
parser.add_argument(
'--config-file',
action='store',
dest='config_file',
default=
'/deep_architect/examples/contrib/kubernetes/experiment_config.json')
parser.add_argument('--bucket',
'-b',
action='store',
dest='bucket',
default=BUCKET_NAME)
# Other arguments
parser.add_argument('--resume',
'-r',
action='store_true',
dest='resume',
default=False)
parser.add_argument('--mongo-host',
'-m',
action='store',
dest='mongo_host',
default='127.0.0.1')
parser.add_argument('--mongo-port',
'-p',
action='store',
dest='mongo_port',
default=27017)
parser.add_argument('--log',
choices=['debug', 'info', 'warning', 'error'],
default='info')
parser.add_argument('--repetition', default=0)
options = parser.parse_args()
numeric_level = getattr(logging, options.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.log)
logging.getLogger().setLevel(numeric_level)
configs = ut.read_jsonfile(options.config_file)
config = configs[options.config_name]
config['bucket'] = options.bucket
comm = MongoCommunicator(host=options.mongo_host,
port=options.mongo_port,
data_refresher=True,
refresh_period=10)
datasets = {
'cifar10': ('data/cifar10/', 10),
}
_, num_classes = datasets[config['dataset']]
search_space_factory = name_to_search_space_factory_fn[
config['search_space']](num_classes)
config['save_every'] = 1 if 'save_every' not in config else config[
'save_every']
searcher = name_to_searcher_fn[config['searcher']](
search_space_factory.get_search_space)
config['num_epochs'] = -1 if 'epochs' not in config else config['epochs']
config['num_samples'] = -1 if 'samples' not in config else config['samples']
# SET UP GOOGLE STORE FOLDER
config['search_name'] = config['search_name'] + '_' + str(
options.repetition)
search_logger = sl.SearchLogger(config['search_folder'],
config['search_name'])
search_data_folder = search_logger.get_search_data_folderpath()
config['save_filepath'] = ut.join_paths(
(search_data_folder, config['searcher_file_name']))
config['eval_path'] = sl.get_all_evaluations_folderpath(
config['search_folder'], config['search_name'])
config['full_search_folder'] = sl.get_search_folderpath(
config['search_folder'], config['search_name'])
config['eval_hparams'] = {} if 'eval_hparams' not in config else config[
'eval_hparams']
state = {
'epochs': 0,
'models_sampled': 0,
'finished': 0,
'best_accuracy': 0.0
}
if options.resume:
try:
download_folder(search_data_folder, config['full_search_folder'],
config['bucket'])
searcher.load_state(search_data_folder)
if ut.file_exists(config['save_filepath']):
old_state = ut.read_jsonfile(config['save_filepath'])
state['epochs'] = old_state['epochs']
state['models_sampled'] = old_state['models_sampled']
state['finished'] = old_state['finished']
state['best_accuracy'] = old_state['best_accuracy']
except:
pass
return comm, search_logger, searcher, state, config
def download_folder(folder, location, bucket):
logger.info('Downloading gs://%s/%s to %s/', bucket, folder, location)
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', 'gs://' + bucket + '/' + folder,
location + '/'
])
def upload_folder(folder, location, bucket):
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', folder,
'gs://' + bucket + '/' + location + '/'
])
def get_topic_name(topic, config):
return config['search_folder'] + '_' + config['search_name'] + '_' + topic
def update_searcher(message, comm, search_logger, searcher, state, config):
data = message['data']
if not data == PUBLISH_SIGNAL:
results = data['results']
vs = data['vs']
evaluation_id = data['evaluation_id']
searcher_eval_token = data['searcher_eval_token']
log_results(results, vs, evaluation_id, searcher_eval_token,
search_logger, config)
searcher.update(results['validation_accuracy'], searcher_eval_token)
update_searcher_state(state, config, results)
save_searcher_state(searcher, state, config, search_logger)
publish_new_arch(comm, searcher, state, config)
comm.finish_processing(get_topic_name(RESULTS_TOPIC, config), message)
def save_searcher_state(searcher, state, config, search_logger):
logger.info('Models finished: %d Best Accuracy: %f', state['finished'],
state['best_accuracy'])
searcher.save_state(search_logger.get_search_data_folderpath())
state = {
'finished': state['finished'],
'models_sampled': state['models_sampled'],
'epochs': state['epochs'],
'best_accuracy': state['best_accuracy']
}
ut.write_jsonfile(state, config['save_filepath'])
upload_folder(search_logger.get_search_data_folderpath(),
config['full_search_folder'], config['bucket'])
return state
def update_searcher_state(state, config, results):
state['best_accuracy'] = max(state['best_accuracy'],
results['validation_accuracy'])
state['finished'] += 1
state['epochs'] += config['eval_epochs']
def log_results(results, vs, evaluation_id, searcher_eval_token, search_logger,
config):
logger.info("Updating searcher with evaluation %d and results %s",
evaluation_id, str(results))
eval_logger = search_logger.get_evaluation_logger(evaluation_id)
eval_logger.log_config(vs, searcher_eval_token)
eval_logger.log_results(results)
upload_folder(eval_logger.get_evaluation_folderpath(), config['eval_path'],
config['bucket'])
def publish_new_arch(comm, searcher, state, config):
while comm.check_data_exists(get_topic_name(ARCH_TOPIC, config),
'evaluation_id', state['models_sampled']):
state['models_sampled'] += 1
if should_end_searcher(state, config):
logger.info('Search finished, sending kill signal')
comm.publish(get_topic_name(ARCH_TOPIC, config), KILL_SIGNAL)
state['search_finished'] = True
elif should_continue(state, config):
|
def should_continue(state, config):
cont = config[
'num_samples'] == -1 or state['models_sampled'] < config['num_samples']
cont = cont and (config['num_epochs'] == -1 or
state['epochs'] < config['num_epochs'])
return cont
def should_end_searcher(state, config):
kill = config['num_samples'] != -1 and state['finished'] >= config[
'num_samples']
kill = kill or (config['num_epochs'] != -1 and
state['epochs'] >= config['num_epochs'])
return kill
def main():
comm, search_logger, searcher, state, config = process_config_and_args()
logger.info('Using config %s', str(config))
logger.info('Current state %s', str(state))
state['search_finished'] = False
comm.subscribe(get_topic_name(RESULTS_TOPIC, config),
callback=lambda message: update_searcher(
message, comm, search_logger, searcher, state, config))
while not state['search_finished']:
time.sleep(30)
comm.unsubscribe(get_topic_name(RESULTS_TOPIC, config))
if __name__ == "__main__":
main()
| logger.info('Publishing architecture number %d',
state['models_sampled'])
_, _, vs, searcher_eval_token = searcher.sample()
arch = {
'vs': vs,
'evaluation_id': state['models_sampled'],
'searcher_eval_token': searcher_eval_token,
'eval_hparams': config['eval_hparams']
}
comm.publish(get_topic_name(ARCH_TOPIC, config), arch)
state['models_sampled'] += 1 | conditional_block |
master.py | import argparse
import time
import subprocess
import logging
from deep_architect import search_logging as sl
from deep_architect import utils as ut
from deep_architect.contrib.communicators.mongo_communicator import MongoCommunicator
from search_space_factory import name_to_search_space_factory_fn
from searcher import name_to_searcher_fn
logging.basicConfig(format='[%(levelname)s] %(asctime)s: %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
BUCKET_NAME = 'deep_architect'
RESULTS_TOPIC = 'results'
ARCH_TOPIC = 'architectures'
KILL_SIGNAL = 'kill'
PUBLISH_SIGNAL = 'publish'
def process_config_and_args():
parser = argparse.ArgumentParser("MPI Job for architecture search")
parser.add_argument('--config',
'-c',
action='store',
dest='config_name',
default='normal')
parser.add_argument(
'--config-file',
action='store',
dest='config_file',
default=
'/deep_architect/examples/contrib/kubernetes/experiment_config.json')
parser.add_argument('--bucket',
'-b',
action='store',
dest='bucket',
default=BUCKET_NAME)
# Other arguments
parser.add_argument('--resume',
'-r',
action='store_true',
dest='resume',
default=False)
parser.add_argument('--mongo-host',
'-m',
action='store',
dest='mongo_host',
default='127.0.0.1')
parser.add_argument('--mongo-port',
'-p',
action='store',
dest='mongo_port',
default=27017)
parser.add_argument('--log',
choices=['debug', 'info', 'warning', 'error'],
default='info')
parser.add_argument('--repetition', default=0)
options = parser.parse_args()
numeric_level = getattr(logging, options.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.log)
logging.getLogger().setLevel(numeric_level)
configs = ut.read_jsonfile(options.config_file)
config = configs[options.config_name]
config['bucket'] = options.bucket
comm = MongoCommunicator(host=options.mongo_host,
port=options.mongo_port,
data_refresher=True,
refresh_period=10)
datasets = {
'cifar10': ('data/cifar10/', 10),
}
_, num_classes = datasets[config['dataset']]
search_space_factory = name_to_search_space_factory_fn[
config['search_space']](num_classes)
config['save_every'] = 1 if 'save_every' not in config else config[
'save_every']
searcher = name_to_searcher_fn[config['searcher']](
search_space_factory.get_search_space)
config['num_epochs'] = -1 if 'epochs' not in config else config['epochs']
config['num_samples'] = -1 if 'samples' not in config else config['samples']
# SET UP GOOGLE STORE FOLDER
config['search_name'] = config['search_name'] + '_' + str(
options.repetition)
search_logger = sl.SearchLogger(config['search_folder'],
config['search_name'])
search_data_folder = search_logger.get_search_data_folderpath()
config['save_filepath'] = ut.join_paths(
(search_data_folder, config['searcher_file_name']))
config['eval_path'] = sl.get_all_evaluations_folderpath(
config['search_folder'], config['search_name'])
config['full_search_folder'] = sl.get_search_folderpath(
config['search_folder'], config['search_name'])
config['eval_hparams'] = {} if 'eval_hparams' not in config else config[
'eval_hparams']
state = {
'epochs': 0,
'models_sampled': 0,
'finished': 0,
'best_accuracy': 0.0
}
if options.resume:
try:
download_folder(search_data_folder, config['full_search_folder'],
config['bucket'])
searcher.load_state(search_data_folder)
if ut.file_exists(config['save_filepath']):
old_state = ut.read_jsonfile(config['save_filepath'])
state['epochs'] = old_state['epochs']
state['models_sampled'] = old_state['models_sampled']
state['finished'] = old_state['finished']
state['best_accuracy'] = old_state['best_accuracy']
except:
pass
return comm, search_logger, searcher, state, config
def download_folder(folder, location, bucket):
logger.info('Downloading gs://%s/%s to %s/', bucket, folder, location)
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', 'gs://' + bucket + '/' + folder,
location + '/'
])
def upload_folder(folder, location, bucket):
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', folder,
'gs://' + bucket + '/' + location + '/'
])
def get_topic_name(topic, config):
return config['search_folder'] + '_' + config['search_name'] + '_' + topic
def update_searcher(message, comm, search_logger, searcher, state, config):
data = message['data']
if not data == PUBLISH_SIGNAL:
results = data['results']
vs = data['vs']
evaluation_id = data['evaluation_id']
searcher_eval_token = data['searcher_eval_token']
log_results(results, vs, evaluation_id, searcher_eval_token,
search_logger, config)
searcher.update(results['validation_accuracy'], searcher_eval_token)
update_searcher_state(state, config, results)
save_searcher_state(searcher, state, config, search_logger)
publish_new_arch(comm, searcher, state, config)
comm.finish_processing(get_topic_name(RESULTS_TOPIC, config), message)
def save_searcher_state(searcher, state, config, search_logger):
|
def update_searcher_state(state, config, results):
state['best_accuracy'] = max(state['best_accuracy'],
results['validation_accuracy'])
state['finished'] += 1
state['epochs'] += config['eval_epochs']
def log_results(results, vs, evaluation_id, searcher_eval_token, search_logger,
config):
logger.info("Updating searcher with evaluation %d and results %s",
evaluation_id, str(results))
eval_logger = search_logger.get_evaluation_logger(evaluation_id)
eval_logger.log_config(vs, searcher_eval_token)
eval_logger.log_results(results)
upload_folder(eval_logger.get_evaluation_folderpath(), config['eval_path'],
config['bucket'])
def publish_new_arch(comm, searcher, state, config):
while comm.check_data_exists(get_topic_name(ARCH_TOPIC, config),
'evaluation_id', state['models_sampled']):
state['models_sampled'] += 1
if should_end_searcher(state, config):
logger.info('Search finished, sending kill signal')
comm.publish(get_topic_name(ARCH_TOPIC, config), KILL_SIGNAL)
state['search_finished'] = True
elif should_continue(state, config):
logger.info('Publishing architecture number %d',
state['models_sampled'])
_, _, vs, searcher_eval_token = searcher.sample()
arch = {
'vs': vs,
'evaluation_id': state['models_sampled'],
'searcher_eval_token': searcher_eval_token,
'eval_hparams': config['eval_hparams']
}
comm.publish(get_topic_name(ARCH_TOPIC, config), arch)
state['models_sampled'] += 1
def should_continue(state, config):
cont = config[
'num_samples'] == -1 or state['models_sampled'] < config['num_samples']
cont = cont and (config['num_epochs'] == -1 or
state['epochs'] < config['num_epochs'])
return cont
def should_end_searcher(state, config):
kill = config['num_samples'] != -1 and state['finished'] >= config[
'num_samples']
kill = kill or (config['num_epochs'] != -1 and
state['epochs'] >= config['num_epochs'])
return kill
def main():
comm, search_logger, searcher, state, config = process_config_and_args()
logger.info('Using config %s', str(config))
logger.info('Current state %s', str(state))
state['search_finished'] = False
comm.subscribe(get_topic_name(RESULTS_TOPIC, config),
callback=lambda message: update_searcher(
message, comm, search_logger, searcher, state, config))
while not state['search_finished']:
time.sleep(30)
comm.unsubscribe(get_topic_name(RESULTS_TOPIC, config))
if __name__ == "__main__":
main()
| logger.info('Models finished: %d Best Accuracy: %f', state['finished'],
state['best_accuracy'])
searcher.save_state(search_logger.get_search_data_folderpath())
state = {
'finished': state['finished'],
'models_sampled': state['models_sampled'],
'epochs': state['epochs'],
'best_accuracy': state['best_accuracy']
}
ut.write_jsonfile(state, config['save_filepath'])
upload_folder(search_logger.get_search_data_folderpath(),
config['full_search_folder'], config['bucket'])
return state | identifier_body |
platform.test.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import assert = require('assert');
import Platform = require('vs/platform/platform');
import Types = require('vs/base/common/types');
suite('Platform / Registry', () => {
test('registry - api', function() {
assert.ok(Types.isFunction(Platform.Registry.add));
assert.ok(Types.isFunction(Platform.Registry.as));
assert.ok(Types.isFunction(Platform.Registry.knows));
});
test('registry - mixin', function() {
Platform.Registry.add('foo', { bar: true });
assert.ok(Platform.Registry.knows('foo')); |
test('registry - knows, as', function() {
let ext = {};
Platform.Registry.add('knows,as', ext);
assert.ok(Platform.Registry.knows('knows,as'));
assert.ok(!Platform.Registry.knows('knows,as1234'));
assert.ok(Platform.Registry.as('knows,as') === ext);
assert.ok(Platform.Registry.as('knows,as1234') === null);
});
test('registry - mixin, fails on duplicate ids', function() {
Platform.Registry.add('foo-dup', { bar: true });
try {
Platform.Registry.add('foo-dup', { bar: false });
assert.ok(false);
} catch (e) {
assert.ok(true);
}
});
}); | assert.ok(Platform.Registry.as('foo').bar);
assert.equal(Platform.Registry.as('foo').bar, true);
}); | random_line_split |
TextureDummy.js | /**
* Copyright (c) 2015, Alexander Orzechowski.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* Currently in beta stage. Changes can and will be made to the core mechanic
* making this not backwards compatible.
*
* Github: https://github.com/Need4Speed402/tessellator
*/
Tessellator.TextureDummy = function (ready){
this.super(null);
if (ready) | ;
};
Tessellator.extend(Tessellator.TextureDummy, Tessellator.Texture);
Tessellator.TextureDummy.prototype.configure = Tessellator.EMPTY_FUNC;
Tessellator.TextureDummy.prototype.bind = Tessellator.EMPTY_FUNC; | {
this.setReady();
} | conditional_block |
TextureDummy.js | /**
* Copyright (c) 2015, Alexander Orzechowski.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
/**
* Currently in beta stage. Changes can and will be made to the core mechanic
* making this not backwards compatible.
*
* Github: https://github.com/Need4Speed402/tessellator
*/
Tessellator.TextureDummy = function (ready){
this.super(null);
if (ready){
this.setReady();
};
};
Tessellator.extend(Tessellator.TextureDummy, Tessellator.Texture);
Tessellator.TextureDummy.prototype.configure = Tessellator.EMPTY_FUNC;
Tessellator.TextureDummy.prototype.bind = Tessellator.EMPTY_FUNC; | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
| random_line_split |
S15.3.2.1_A3_T14.js | // Copyright 2009 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/**
* When the Function constructor is called with arguments p, body the following steps are taken:
* i) Let Result(i) be the first argument
* ii) Let P be ToString(Result(i))
* iii) Call ToString(body)
* iv) If P is not parsable as a FormalParameterList_opt then throw a SyntaxError exception
* v) If body is not parsable as FunctionBody then throw a SyntaxError exception
* vi) Create a new Function object as specified in 13.2 with parameters specified by parsing P as a FormalParameterListopt and body specified by parsing body as a FunctionBody
* Pass in a scope chain consisting of the global object as the Scope parameter
* vii) Return Result(vi)
*
* @path ch15/15.3/15.3.2/S15.3.2.1_A3_T14.js
* @description Values of the function constructor arguments are "a,b,c" and an undefined variable
*/
var p = "a,b,c";
//CHECK#1
try {
var f = new Function(p, body);
} catch (e) {
$FAIL('#1: test failed with error '+e);
}
//CHECK#2
if (f.constructor !== Function) |
//CHECK#3
if (f()!==undefined) {
$ERROR('#3: When the Function constructor is called with arguments p, body the following steps are taken...');
}
var body;
| {
$ERROR('#2: When the Function constructor is called with arguments p, body creates a new Function object as specified in 13.2');
} | conditional_block |
S15.3.2.1_A3_T14.js | // Copyright 2009 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/**
* When the Function constructor is called with arguments p, body the following steps are taken:
* i) Let Result(i) be the first argument
* ii) Let P be ToString(Result(i))
* iii) Call ToString(body)
* iv) If P is not parsable as a FormalParameterList_opt then throw a SyntaxError exception
* v) If body is not parsable as FunctionBody then throw a SyntaxError exception
* vi) Create a new Function object as specified in 13.2 with parameters specified by parsing P as a FormalParameterListopt and body specified by parsing body as a FunctionBody
* Pass in a scope chain consisting of the global object as the Scope parameter | * @description Values of the function constructor arguments are "a,b,c" and an undefined variable
*/
var p = "a,b,c";
//CHECK#1
try {
var f = new Function(p, body);
} catch (e) {
$FAIL('#1: test failed with error '+e);
}
//CHECK#2
if (f.constructor !== Function) {
$ERROR('#2: When the Function constructor is called with arguments p, body creates a new Function object as specified in 13.2');
}
//CHECK#3
if (f()!==undefined) {
$ERROR('#3: When the Function constructor is called with arguments p, body the following steps are taken...');
}
var body; | * vii) Return Result(vi)
*
* @path ch15/15.3/15.3.2/S15.3.2.1_A3_T14.js | random_line_split |
error.rs | // Copyright 2016 evic Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::io;
use std::fmt;
use std::error;
/// Error types.
#[derive(Debug)]
pub enum Error {
/// An error originating from reading or writing to the underlying buffer.
Io(io::Error),
/// An error related to the provided firmware.
Firmware(String),
/// An error originating from the main application.
CliError(String)
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Io(ref err) => write!(f, "IO error: {}", err),
Error::Firmware(ref err) => write!(f, "Firmware error: {}", err),
Error::CliError(ref err) => write!(f, "CLI error: {}", err),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Io(..) => "eVic IO error",
Error::Firmware(..) => "eVic firmware error",
Error::CliError(..) => "eVic CLI error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::Io(ref err) => Some(err),
_ => None
}
}
}
impl From<io::Error> for Error {
fn | (error: io::Error) -> Error {
Error::Io(error)
}
}
| from | identifier_name |
error.rs | // Copyright 2016 evic Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::io;
use std::fmt;
use std::error;
/// Error types.
#[derive(Debug)]
pub enum Error {
/// An error originating from reading or writing to the underlying buffer.
Io(io::Error),
/// An error related to the provided firmware.
Firmware(String),
/// An error originating from the main application.
CliError(String)
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Io(..) => "eVic IO error",
Error::Firmware(..) => "eVic firmware error",
Error::CliError(..) => "eVic CLI error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::Io(ref err) => Some(err),
_ => None
}
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::Io(error)
}
}
| {
match *self {
Error::Io(ref err) => write!(f, "IO error: {}", err),
Error::Firmware(ref err) => write!(f, "Firmware error: {}", err),
Error::CliError(ref err) => write!(f, "CLI error: {}", err),
}
} | identifier_body |
error.rs | // Copyright 2016 evic Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::io;
use std::fmt;
use std::error;
/// Error types.
#[derive(Debug)]
pub enum Error {
/// An error originating from reading or writing to the underlying buffer.
Io(io::Error),
/// An error related to the provided firmware.
Firmware(String),
/// An error originating from the main application.
CliError(String)
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Io(ref err) => write!(f, "IO error: {}", err),
Error::Firmware(ref err) => write!(f, "Firmware error: {}", err),
Error::CliError(ref err) => write!(f, "CLI error: {}", err),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Io(..) => "eVic IO error",
Error::Firmware(..) => "eVic firmware error",
Error::CliError(..) => "eVic CLI error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::Io(ref err) => Some(err),
_ => None
}
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::Io(error) | } | } | random_line_split |
morphology.py | import numpy as np
import scipy.interpolate as interp
import warnings
from astropy.io import fits
def concentration(radii, phot, eta_radius=0.2, eta_radius_factor=1.5, interp_kind='linear', add_zero=False):
"""
Calculates the concentration parameter
C = 5 * log10(r_80 / r2_0)
Inputs:
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
interp_kind -- kind of interpolation; passed to scipy.interpolate.interp1d.
Some options are linear, quadratic, and cubic.
add_zero -- add a 0 radius and zero flux point to their respective arrays
to help with interpolation at small radii; should only matter for quadratic or
cubic interpolation
"""
assert len(radii) == len(phot)
assert np.all(radii > 0)
assert np.all(phot > 0)
if add_zero:
radii = np.insert(radii, 0, 0)
phot = np.insert(phot, 0, 0)
eta_vals = eta(radii, phot)
if np.any(eta_vals < 0.2):
eta_interp = interp.interp1d(eta_vals, radii, kind=interp_kind)
eta_r = eta_radius_factor * eta_interp(eta_radius)
else:
warnings.warn("eta is never less than " + str(eta_radius) + ". Using lowest eta value as proxy")
eta_r = eta_radius_factor * radii[np.argmin(eta_vals)]
phot_interp = interp.interp1d(radii, phot, kind=interp_kind)
if eta_r < np.max(radii):
maxphot = phot_interp(eta_r)
else:
|
norm_phot = phot / maxphot
radius_interp = interp.interp1d(norm_phot, radii, kind=interp_kind)
r20 = radius_interp(0.2)
r80 = radius_interp(0.8)
assert r20 < r80 < np.max(radii)
c = 5 * np.log10(r80 / r20)
return c
def eta(radii, phot):
"""
eta = I(r) / \bar{I}(<r)
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
this is currently calculated quite naively, and probably could be done better
"""
phot_area = np.pi * radii**2
phot_area_diff = np.ediff1d(phot_area, to_begin=phot_area[0])
I_bar = phot / (phot_area)
I_delta_r = np.ediff1d(phot, to_begin=phot[0]) / phot_area_diff
I_r = (I_delta_r[:-1] + I_delta_r[1:]) / 2 #lost last array element here
I_r = np.append(I_r, I_delta_r[-1]) #added it back in here
eta = I_r / I_bar
return eta
def find_eta(eta_val, radii, phot):
eta_interp = interp.interp1d(eta(radii, phot), radii)
return eta_interp(eta_val)
def snr(name):
"""
name before fits and apphot files
"""
#first calculate the image uncertainty using the MAD
hdulist = fits.open(name + '_bs.fits')
im_med = np.median(hdulist[0].data)
im_err = np.median(np.abs(hdulist[0].data - im_med))
#now get the total flux
apphot = np.loadtxt(name + ".apphot", usecols=[0,1])
radii = apphot[:,0]
phot = apphot[:,1]
try:
eta_rad = find_eta(0.2, radii, phot)
if eta_rad > np.max(radii)/1.5:
eta_rad = np.max(radii)/1.5
except ValueError:
eta_rad = 1.0
phot_interp = interp.interp1d(radii, phot)
total_phot = phot_interp(1.5*eta_rad)
return total_phot / np.sqrt(np.pi*(1.5*eta_rad)**2 * im_err**2)
| maxphot = np.max(phot) | conditional_block |
morphology.py | import numpy as np
import scipy.interpolate as interp
import warnings
from astropy.io import fits
def concentration(radii, phot, eta_radius=0.2, eta_radius_factor=1.5, interp_kind='linear', add_zero=False):
"""
Calculates the concentration parameter
C = 5 * log10(r_80 / r2_0)
Inputs:
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
interp_kind -- kind of interpolation; passed to scipy.interpolate.interp1d.
Some options are linear, quadratic, and cubic.
add_zero -- add a 0 radius and zero flux point to their respective arrays
to help with interpolation at small radii; should only matter for quadratic or
cubic interpolation
"""
assert len(radii) == len(phot)
assert np.all(radii > 0)
assert np.all(phot > 0)
if add_zero:
radii = np.insert(radii, 0, 0)
phot = np.insert(phot, 0, 0)
eta_vals = eta(radii, phot)
if np.any(eta_vals < 0.2):
eta_interp = interp.interp1d(eta_vals, radii, kind=interp_kind)
eta_r = eta_radius_factor * eta_interp(eta_radius)
else:
warnings.warn("eta is never less than " + str(eta_radius) + ". Using lowest eta value as proxy")
eta_r = eta_radius_factor * radii[np.argmin(eta_vals)]
phot_interp = interp.interp1d(radii, phot, kind=interp_kind)
if eta_r < np.max(radii):
maxphot = phot_interp(eta_r)
else:
maxphot = np.max(phot)
norm_phot = phot / maxphot
radius_interp = interp.interp1d(norm_phot, radii, kind=interp_kind)
r20 = radius_interp(0.2)
r80 = radius_interp(0.8)
assert r20 < r80 < np.max(radii)
c = 5 * np.log10(r80 / r20)
return c
def eta(radii, phot):
"""
eta = I(r) / \bar{I}(<r)
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
this is currently calculated quite naively, and probably could be done better
"""
phot_area = np.pi * radii**2
phot_area_diff = np.ediff1d(phot_area, to_begin=phot_area[0])
I_bar = phot / (phot_area)
I_delta_r = np.ediff1d(phot, to_begin=phot[0]) / phot_area_diff
I_r = (I_delta_r[:-1] + I_delta_r[1:]) / 2 #lost last array element here
I_r = np.append(I_r, I_delta_r[-1]) #added it back in here
eta = I_r / I_bar
return eta
def find_eta(eta_val, radii, phot):
|
def snr(name):
"""
name before fits and apphot files
"""
#first calculate the image uncertainty using the MAD
hdulist = fits.open(name + '_bs.fits')
im_med = np.median(hdulist[0].data)
im_err = np.median(np.abs(hdulist[0].data - im_med))
#now get the total flux
apphot = np.loadtxt(name + ".apphot", usecols=[0,1])
radii = apphot[:,0]
phot = apphot[:,1]
try:
eta_rad = find_eta(0.2, radii, phot)
if eta_rad > np.max(radii)/1.5:
eta_rad = np.max(radii)/1.5
except ValueError:
eta_rad = 1.0
phot_interp = interp.interp1d(radii, phot)
total_phot = phot_interp(1.5*eta_rad)
return total_phot / np.sqrt(np.pi*(1.5*eta_rad)**2 * im_err**2)
| eta_interp = interp.interp1d(eta(radii, phot), radii)
return eta_interp(eta_val) | identifier_body |
morphology.py | import numpy as np
import scipy.interpolate as interp
import warnings
from astropy.io import fits
def concentration(radii, phot, eta_radius=0.2, eta_radius_factor=1.5, interp_kind='linear', add_zero=False):
"""
Calculates the concentration parameter
C = 5 * log10(r_80 / r2_0)
Inputs:
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
interp_kind -- kind of interpolation; passed to scipy.interpolate.interp1d.
Some options are linear, quadratic, and cubic.
add_zero -- add a 0 radius and zero flux point to their respective arrays
to help with interpolation at small radii; should only matter for quadratic or
cubic interpolation
"""
assert len(radii) == len(phot)
assert np.all(radii > 0)
assert np.all(phot > 0)
if add_zero:
radii = np.insert(radii, 0, 0)
phot = np.insert(phot, 0, 0)
eta_vals = eta(radii, phot)
if np.any(eta_vals < 0.2):
eta_interp = interp.interp1d(eta_vals, radii, kind=interp_kind)
eta_r = eta_radius_factor * eta_interp(eta_radius)
else:
warnings.warn("eta is never less than " + str(eta_radius) + ". Using lowest eta value as proxy")
eta_r = eta_radius_factor * radii[np.argmin(eta_vals)]
phot_interp = interp.interp1d(radii, phot, kind=interp_kind)
if eta_r < np.max(radii):
maxphot = phot_interp(eta_r)
else:
maxphot = np.max(phot)
norm_phot = phot / maxphot
radius_interp = interp.interp1d(norm_phot, radii, kind=interp_kind)
r20 = radius_interp(0.2)
r80 = radius_interp(0.8)
assert r20 < r80 < np.max(radii)
c = 5 * np.log10(r80 / r20)
return c
def | (radii, phot):
"""
eta = I(r) / \bar{I}(<r)
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
this is currently calculated quite naively, and probably could be done better
"""
phot_area = np.pi * radii**2
phot_area_diff = np.ediff1d(phot_area, to_begin=phot_area[0])
I_bar = phot / (phot_area)
I_delta_r = np.ediff1d(phot, to_begin=phot[0]) / phot_area_diff
I_r = (I_delta_r[:-1] + I_delta_r[1:]) / 2 #lost last array element here
I_r = np.append(I_r, I_delta_r[-1]) #added it back in here
eta = I_r / I_bar
return eta
def find_eta(eta_val, radii, phot):
eta_interp = interp.interp1d(eta(radii, phot), radii)
return eta_interp(eta_val)
def snr(name):
"""
name before fits and apphot files
"""
#first calculate the image uncertainty using the MAD
hdulist = fits.open(name + '_bs.fits')
im_med = np.median(hdulist[0].data)
im_err = np.median(np.abs(hdulist[0].data - im_med))
#now get the total flux
apphot = np.loadtxt(name + ".apphot", usecols=[0,1])
radii = apphot[:,0]
phot = apphot[:,1]
try:
eta_rad = find_eta(0.2, radii, phot)
if eta_rad > np.max(radii)/1.5:
eta_rad = np.max(radii)/1.5
except ValueError:
eta_rad = 1.0
phot_interp = interp.interp1d(radii, phot)
total_phot = phot_interp(1.5*eta_rad)
return total_phot / np.sqrt(np.pi*(1.5*eta_rad)**2 * im_err**2)
| eta | identifier_name |
morphology.py | import numpy as np
import scipy.interpolate as interp
import warnings
from astropy.io import fits
def concentration(radii, phot, eta_radius=0.2, eta_radius_factor=1.5, interp_kind='linear', add_zero=False):
"""
Calculates the concentration parameter
C = 5 * log10(r_80 / r2_0)
Inputs:
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
interp_kind -- kind of interpolation; passed to scipy.interpolate.interp1d.
Some options are linear, quadratic, and cubic.
add_zero -- add a 0 radius and zero flux point to their respective arrays
to help with interpolation at small radii; should only matter for quadratic or
cubic interpolation
"""
assert len(radii) == len(phot)
assert np.all(radii > 0)
assert np.all(phot > 0)
if add_zero:
radii = np.insert(radii, 0, 0)
phot = np.insert(phot, 0, 0)
eta_vals = eta(radii, phot)
if np.any(eta_vals < 0.2):
eta_interp = interp.interp1d(eta_vals, radii, kind=interp_kind)
eta_r = eta_radius_factor * eta_interp(eta_radius)
else:
warnings.warn("eta is never less than " + str(eta_radius) + ". Using lowest eta value as proxy")
eta_r = eta_radius_factor * radii[np.argmin(eta_vals)]
phot_interp = interp.interp1d(radii, phot, kind=interp_kind)
if eta_r < np.max(radii):
maxphot = phot_interp(eta_r)
else:
maxphot = np.max(phot)
norm_phot = phot / maxphot
radius_interp = interp.interp1d(norm_phot, radii, kind=interp_kind)
r20 = radius_interp(0.2)
r80 = radius_interp(0.8)
assert r20 < r80 < np.max(radii)
c = 5 * np.log10(r80 / r20)
return c
def eta(radii, phot):
"""
eta = I(r) / \bar{I}(<r)
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
this is currently calculated quite naively, and probably could be done better
"""
phot_area = np.pi * radii**2
phot_area_diff = np.ediff1d(phot_area, to_begin=phot_area[0])
I_bar = phot / (phot_area)
I_delta_r = np.ediff1d(phot, to_begin=phot[0]) / phot_area_diff
I_r = (I_delta_r[:-1] + I_delta_r[1:]) / 2 #lost last array element here
I_r = np.append(I_r, I_delta_r[-1]) #added it back in here
eta = I_r / I_bar
return eta
|
def snr(name):
"""
name before fits and apphot files
"""
#first calculate the image uncertainty using the MAD
hdulist = fits.open(name + '_bs.fits')
im_med = np.median(hdulist[0].data)
im_err = np.median(np.abs(hdulist[0].data - im_med))
#now get the total flux
apphot = np.loadtxt(name + ".apphot", usecols=[0,1])
radii = apphot[:,0]
phot = apphot[:,1]
try:
eta_rad = find_eta(0.2, radii, phot)
if eta_rad > np.max(radii)/1.5:
eta_rad = np.max(radii)/1.5
except ValueError:
eta_rad = 1.0
phot_interp = interp.interp1d(radii, phot)
total_phot = phot_interp(1.5*eta_rad)
return total_phot / np.sqrt(np.pi*(1.5*eta_rad)**2 * im_err**2) | def find_eta(eta_val, radii, phot):
eta_interp = interp.interp1d(eta(radii, phot), radii)
return eta_interp(eta_val) | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.