code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import numpy as np
vector = np.array([5, 10, 15, 20])
equal_to_ten = (vector == 10)
print(equal_to_ten)
matrix = np.array([[10, 25, 30], [45, 50, 55], [60, 65, 70]])
equal_to_25 = (matrix[:, 1]) == 25
print(equal_to_25)
##Lire le dataset world_alcohol.csv dans la variable world_alcohol
world_alcohol = np.genfromtxt('world_alcohol.csv', delimiter = ',', dtype = 'U75', skip_header = 1)
#Extraire le 3e colonne de world_alcohol et comparer la au pays "Canada". Assigner le résultat à la variable countries_canada
countries_canada = world_alcohol[:, 2]
print(countries_canada == 'Canada')
#Xtraire la première colonne de world_alcohol et comparer la chaine de caratères "1984". Assigner les résultat à la variable years_1984
years_1984 = world_alcohol[:, 0]
print(years_1984 == '1984') | [
"numpy.array",
"numpy.genfromtxt"
] | [((29, 54), 'numpy.array', 'np.array', (['[5, 10, 15, 20]'], {}), '([5, 10, 15, 20])\n', (37, 54), True, 'import numpy as np\n'), ((118, 170), 'numpy.array', 'np.array', (['[[10, 25, 30], [45, 50, 55], [60, 65, 70]]'], {}), '([[10, 25, 30], [45, 50, 55], [60, 65, 70]])\n', (126, 170), True, 'import numpy as np\n'), ((313, 390), 'numpy.genfromtxt', 'np.genfromtxt', (['"""world_alcohol.csv"""'], {'delimiter': '""","""', 'dtype': '"""U75"""', 'skip_header': '(1)'}), "('world_alcohol.csv', delimiter=',', dtype='U75', skip_header=1)\n", (326, 390), True, 'import numpy as np\n')] |
# Copyright 2021 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from . import circuit_blocks
import cirq
import cirq_google
import numpy as np
_qubits = cirq.LineQubit.range(2)
@pytest.mark.parametrize(
"known_circuit, compiled_ops",
(
([cirq.SWAP(*_qubits)], circuit_blocks.swap_block(_qubits)),
(
[cirq.H(_qubits[0]), cirq.CNOT(*_qubits)],
circuit_blocks.bell_pair_block(_qubits),
),
(
[cirq.CNOT(*_qubits), cirq.H(_qubits[0])],
circuit_blocks.un_bell_pair_block(_qubits),
),
(
[
cirq.X(_qubits[0]) ** 0.5,
cirq.PhasedXZGate(
axis_phase_exponent=0.25, x_exponent=0.5, z_exponent=0
)(_qubits[1]),
cirq_google.SycamoreGate()(*_qubits),
],
circuit_blocks.scrambling_block(_qubits, [0, 3]),
),
(
[
cirq.Y(_qubits[0]) ** 0.5,
cirq.Y(_qubits[1]) ** 0.5,
cirq_google.SycamoreGate()(*_qubits),
],
circuit_blocks.scrambling_block(_qubits, [2, 2]),
),
),
)
def test_known_blocks_equal(known_circuit, compiled_ops):
desired_u = cirq.unitary(cirq.Circuit(known_circuit))
actual_u = cirq.unitary(cirq.Circuit(compiled_ops))
assert cirq.equal_up_to_global_phase(actual_u, desired_u)
def test_tsym_block_real():
tsym_circuit = circuit_blocks.tsym_block(_qubits, [0, 0]) # no rotations.
tsym_u = cirq.unitary(cirq.Circuit(tsym_circuit))
assert np.all(tsym_u.imag < 1e-6)
def test_block_1d_circuit():
depth = 8
n_qubits = 11
qubits = cirq.LineQubit.range(n_qubits)
def _simple_fun(pairs, unused):
assert len(unused) == 1
return [cirq.CNOT(*pairs).with_tags(str(unused[0]))]
test_block_circuit = circuit_blocks.block_1d_circuit(
qubits, depth, _simple_fun, np.vstack(np.arange((depth * len(qubits) // 2)))
)
assert len(test_block_circuit) == depth
assert len(test_block_circuit.all_qubits()) == n_qubits
tot_i = 0
for i, mom in enumerate(test_block_circuit):
for op in mom:
assert isinstance(op.gate, type(cirq.CNOT))
assert op.tags[0] == str(tot_i)
tot_i += 1
# Number of operations and working depth will
# always have parity that disagrees if number of
# qubits is odd.
assert i % 2 != tot_i % 2
def test_z_basis_gate():
assert circuit_blocks.inv_z_basis_gate("Z") == cirq.I
assert circuit_blocks.inv_z_basis_gate("X") == cirq.H
assert circuit_blocks.inv_z_basis_gate("Y") == cirq.PhasedXZGate(
axis_phase_exponent=-0.5, x_exponent=0.5, z_exponent=-0.5
)
| [
"cirq.equal_up_to_global_phase",
"cirq.SWAP",
"cirq.CNOT",
"cirq.H",
"cirq.LineQubit.range",
"cirq.Circuit",
"cirq.PhasedXZGate",
"cirq.Y",
"cirq_google.SycamoreGate",
"cirq.X",
"numpy.all"
] | [((676, 699), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (696, 699), False, 'import cirq\n'), ((1893, 1943), 'cirq.equal_up_to_global_phase', 'cirq.equal_up_to_global_phase', (['actual_u', 'desired_u'], {}), '(actual_u, desired_u)\n', (1922, 1943), False, 'import cirq\n'), ((2118, 2145), 'numpy.all', 'np.all', (['(tsym_u.imag < 1e-06)'], {}), '(tsym_u.imag < 1e-06)\n', (2124, 2145), True, 'import numpy as np\n'), ((2221, 2251), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['n_qubits'], {}), '(n_qubits)\n', (2241, 2251), False, 'import cirq\n'), ((1797, 1824), 'cirq.Circuit', 'cirq.Circuit', (['known_circuit'], {}), '(known_circuit)\n', (1809, 1824), False, 'import cirq\n'), ((1854, 1880), 'cirq.Circuit', 'cirq.Circuit', (['compiled_ops'], {}), '(compiled_ops)\n', (1866, 1880), False, 'import cirq\n'), ((2079, 2105), 'cirq.Circuit', 'cirq.Circuit', (['tsym_circuit'], {}), '(tsym_circuit)\n', (2091, 2105), False, 'import cirq\n'), ((3211, 3287), 'cirq.PhasedXZGate', 'cirq.PhasedXZGate', ([], {'axis_phase_exponent': '(-0.5)', 'x_exponent': '(0.5)', 'z_exponent': '(-0.5)'}), '(axis_phase_exponent=-0.5, x_exponent=0.5, z_exponent=-0.5)\n', (3228, 3287), False, 'import cirq\n'), ((779, 798), 'cirq.SWAP', 'cirq.SWAP', (['*_qubits'], {}), '(*_qubits)\n', (788, 798), False, 'import cirq\n'), ((861, 879), 'cirq.H', 'cirq.H', (['_qubits[0]'], {}), '(_qubits[0])\n', (867, 879), False, 'import cirq\n'), ((881, 900), 'cirq.CNOT', 'cirq.CNOT', (['*_qubits'], {}), '(*_qubits)\n', (890, 900), False, 'import cirq\n'), ((990, 1009), 'cirq.CNOT', 'cirq.CNOT', (['*_qubits'], {}), '(*_qubits)\n', (999, 1009), False, 'import cirq\n'), ((1011, 1029), 'cirq.H', 'cirq.H', (['_qubits[0]'], {}), '(_qubits[0])\n', (1017, 1029), False, 'import cirq\n'), ((1139, 1157), 'cirq.X', 'cirq.X', (['_qubits[0]'], {}), '(_qubits[0])\n', (1145, 1157), False, 'import cirq\n'), ((1182, 1255), 'cirq.PhasedXZGate', 'cirq.PhasedXZGate', ([], {'axis_phase_exponent': '(0.25)', 'x_exponent': '(0.5)', 'z_exponent': '(0)'}), '(axis_phase_exponent=0.25, x_exponent=0.5, z_exponent=0)\n', (1199, 1255), False, 'import cirq\n'), ((1323, 1349), 'cirq_google.SycamoreGate', 'cirq_google.SycamoreGate', ([], {}), '()\n', (1347, 1349), False, 'import cirq_google\n'), ((1489, 1507), 'cirq.Y', 'cirq.Y', (['_qubits[0]'], {}), '(_qubits[0])\n', (1495, 1507), False, 'import cirq\n'), ((1532, 1550), 'cirq.Y', 'cirq.Y', (['_qubits[1]'], {}), '(_qubits[1])\n', (1538, 1550), False, 'import cirq\n'), ((1575, 1601), 'cirq_google.SycamoreGate', 'cirq_google.SycamoreGate', ([], {}), '()\n', (1599, 1601), False, 'import cirq_google\n'), ((2337, 2354), 'cirq.CNOT', 'cirq.CNOT', (['*pairs'], {}), '(*pairs)\n', (2346, 2354), False, 'import cirq\n')] |
from RecoEgamma.EgammaElectronProducers.gsfElectrons_cfi import ecalDrivenGsfElectrons
lowPtGsfElectronsPreRegression = ecalDrivenGsfElectrons.clone(gsfElectronCoresTag = "lowPtGsfElectronCores")
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(lowPtGsfElectronsPreRegression,ctfTracksTag = "generalTracksBeforeMixing")
| [
"RecoEgamma.EgammaElectronProducers.gsfElectrons_cfi.ecalDrivenGsfElectrons.clone",
"Configuration.Eras.Modifier_fastSim_cff.fastSim.toModify"
] | [((121, 194), 'RecoEgamma.EgammaElectronProducers.gsfElectrons_cfi.ecalDrivenGsfElectrons.clone', 'ecalDrivenGsfElectrons.clone', ([], {'gsfElectronCoresTag': '"""lowPtGsfElectronCores"""'}), "(gsfElectronCoresTag='lowPtGsfElectronCores')\n", (149, 194), False, 'from RecoEgamma.EgammaElectronProducers.gsfElectrons_cfi import ecalDrivenGsfElectrons\n'), ((258, 353), 'Configuration.Eras.Modifier_fastSim_cff.fastSim.toModify', 'fastSim.toModify', (['lowPtGsfElectronsPreRegression'], {'ctfTracksTag': '"""generalTracksBeforeMixing"""'}), "(lowPtGsfElectronsPreRegression, ctfTracksTag=\n 'generalTracksBeforeMixing')\n", (274, 353), False, 'from Configuration.Eras.Modifier_fastSim_cff import fastSim\n')] |
# Generated by Django 3.0.5 on 2020-07-08 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('programs', '0003_program_total_lines'),
]
operations = [
migrations.AlterField(
model_name='program',
name='finish_date',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='program',
name='start_date',
field=models.DateField(),
),
]
| [
"django.db.models.DateField"
] | [((344, 371), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)'}), '(null=True)\n', (360, 371), False, 'from django.db import migrations, models\n'), ((498, 516), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (514, 516), False, 'from django.db import migrations, models\n')] |
###
# Python http post example.
#
# License - MIT.
###
import os
# pip install requests.
import requests
# pip install lxml
# pip install beautifulsoup4
from bs4 import BeautifulSoup
# login github class.
class login_github():
# {
# Initialization function.
def __init__(self):
# {
# Chromium core browser user agent.
self._headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36'
}
self._login_page = 'https://github.com/login'
self._session_page = 'https://github.com/session'
self._session = requests.Session()
# }
# Close session.
def close(self):
# {
self._session.close()
# }
# Get html data.
def datas(self, url_path):
# {
datas = self._session.get(url = url_path, headers = self._headers)
return datas
# }
# Http Get.
def get(self):
# {
html = requests.get(url = self._login_page, headers = self._headers)
soup = BeautifulSoup(html.text, 'lxml')
tokens = soup.find_all('input', type="hidden")[1]
attrs = tokens.attrs['value']
return attrs
# }
# Http Post.
def post(self, Username, Password):
# {
data = {
'commit' : 'Sign in',
'utf8' : ' ✓',
'authenticity_token': self.get(),
'login' : Username,
'password' : Password,
'webauthn-support': ' supported'
}
# Post.
res = self._session.post(
url = self._session_page,
data = data,
headers = self._headers
)
print(res.status_code)
# }
# }
# Main function.
def main():
# {
test_page = 'https://github.com/torvalds/linux'
print('Login Github !')
Username = input('Username or email address: ')
Password = input('Password: ')
login = login_github()
# login.
login.post(Username, Password)
# get data.
datas = login.datas(test_page)
with open('test.html', 'wb') as fd:
fd.write(datas.content)
# close.
login.close()
# }
# Program entry.
if '__main__' == __name__:
main()
| [
"bs4.BeautifulSoup",
"requests.Session",
"requests.get"
] | [((683, 701), 'requests.Session', 'requests.Session', ([], {}), '()\n', (699, 701), False, 'import requests\n'), ((1030, 1087), 'requests.get', 'requests.get', ([], {'url': 'self._login_page', 'headers': 'self._headers'}), '(url=self._login_page, headers=self._headers)\n', (1042, 1087), False, 'import requests\n'), ((1110, 1142), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html.text', '"""lxml"""'], {}), "(html.text, 'lxml')\n", (1123, 1142), False, 'from bs4 import BeautifulSoup\n')] |
from yattag import Doc
from .CollectionEffort import CollectionEffort
from .MeasureCompact import MeasureCompact
from .NetInformation import NetInformation
from .SimpleContent import CollectionDescriptionText, PassCount
class BiologicalHabitatCollectionInformation:
"""
Allows for the reporting of biological habitat sample collection information.
"""
__collectionDuration: MeasureCompact
__collectionArea: MeasureCompact
__collectionEffort: CollectionEffort
__reachLengthMeasure: MeasureCompact
__reachWidthMeasure: MeasureCompact
__collectionDescriptionText: CollectionDescriptionText
__passCount: PassCount
__netInformation: NetInformation
def __init__(
self,
o: dict = None,
*,
collectionDuration: MeasureCompact = None,
collectionArea: MeasureCompact = None,
collectionEffort: CollectionEffort = None,
reachLengthMeasure: MeasureCompact = None,
reachWidthMeasure: MeasureCompact = None,
collectionDescriptionText: CollectionDescriptionText = None,
passCount: PassCount = None,
netInformation: NetInformation = None
):
if isinstance(o, BiologicalHabitatCollectionInformation):
# Assign attributes from objects without typechecking
self.__collectionDuration = o.collectionDuration
self.__collectionArea = o.collectionArea
self.__collectionEffort = o.collectionEffort
self.__reachLengthMeasure = o.reachLengthMeasure
self.__reachWidthMeasure = o.reachWidthMeasure
self.__collectionDescriptionText = o.collectionDescriptionText
self.__passCount = o.passCount
self.__netInformation = o.netInformation
elif isinstance(o, dict):
# Assign attributes from dictionary with typechecking
self.collectionDuration = o.get("collectionDuration")
self.collectionArea = o.get("collectionArea")
self.collectionEffort = o.get("collectionEffort")
self.reachLengthMeasure = o.get("reachLengthMeasure")
self.reachWidthMeasure = o.get("reachWidthMeasure")
self.collectionDescriptionText = o.get("collectionDescriptionText")
self.passCount = o.get("passCount")
self.netInformation = o.get("netInformation")
else:
# Assign attributes from named keywords with typechecking
self.collectionDuration = collectionDuration
self.collectionArea = collectionArea
self.collectionEffort = collectionEffort
self.reachLengthMeasure = reachLengthMeasure
self.reachWidthMeasure = reachWidthMeasure
self.collectionDescriptionText = collectionDescriptionText
self.passCount = passCount
self.netInformation = netInformation
@property
def collectionDuration(self) -> MeasureCompact:
"""
The length of time a collection procedure or protocol was performed (e.g. total
energized time for electrofishing, or total time kick net used).
"""
return self.__collectionDuration
@collectionDuration.setter
def collectionDuration(self, val: MeasureCompact) -> None:
"""
The length of time a collection procedure or protocol was performed (e.g. total
energized time for electrofishing, or total time kick net used).
"""
self.__collectionDuration = None if val is None else MeasureCompact(val)
@property
def collectionArea(self) -> MeasureCompact:
"""
The area of a collection procedure or protocol was performed (e.g. total area
coverage for electrofishing, or total area kick net used).
"""
return self.__collectionArea
@collectionArea.setter
def collectionArea(self, val: MeasureCompact) -> None:
"""
The area of a collection procedure or protocol was performed (e.g. total area
coverage for electrofishing, or total area kick net used).
"""
self.__collectionArea = None if val is None else MeasureCompact(val)
@property
def collectionEffort(self) -> CollectionEffort:
return self.__collectionEffort
@collectionEffort.setter
def collectionEffort(self, val: CollectionEffort) -> None:
self.__collectionEffort = None if val is None else CollectionEffort(val)
@property
def reachLengthMeasure(self) -> MeasureCompact:
"""
A measurement of the water body length distance in which the procedure or
protocol was performed.
"""
return self.__reachLengthMeasure
@reachLengthMeasure.setter
def reachLengthMeasure(self, val: MeasureCompact) -> None:
"""
A measurement of the water body length distance in which the procedure or
protocol was performed.
"""
self.__reachLengthMeasure = None if val is None else MeasureCompact(val)
@property
def reachWidthMeasure(self) -> MeasureCompact:
"""
A measurement of the reach width during collection procedures.
"""
return self.__reachWidthMeasure
@reachWidthMeasure.setter
def reachWidthMeasure(self, val: MeasureCompact) -> None:
"""
A measurement of the reach width during collection procedures.
"""
self.__reachWidthMeasure = None if val is None else MeasureCompact(val)
@property
def collectionDescriptionText(self) -> CollectionDescriptionText:
return self.__collectionDescriptionText
@collectionDescriptionText.setter
def collectionDescriptionText(self, val: CollectionDescriptionText) -> None:
self.__collectionDescriptionText = (
None if val is None else CollectionDescriptionText(val)
)
@property
def passCount(self) -> PassCount:
return self.__passCount
@passCount.setter
def passCount(self, val: PassCount) -> None:
self.__passCount = None if val is None else PassCount(val)
@property
def netInformation(self) -> NetInformation:
return self.__netInformation
@netInformation.setter
def netInformation(self, val: NetInformation) -> None:
self.__netInformation = None if val is None else NetInformation(val)
def generateXML(self, name: str = "BiologicalHabitatCollectionInformation") -> str:
doc = Doc()
asis = doc.asis
line = doc.line
tag = doc.tag
with tag(name):
if self.__collectionDuration is not None:
asis(self.__collectionDuration.generateXML("CollectionDuration"))
if self.__collectionArea is not None:
asis(self.__collectionArea.generateXML("CollectionArea"))
if self.__collectionEffort is not None:
asis(self.__collectionEffort.generateXML("CollectionEffort"))
if self.__reachLengthMeasure is not None:
asis(self.__reachLengthMeasure.generateXML("ReachLengthMeasure"))
if self.__reachWidthMeasure is not None:
asis(self.__reachWidthMeasure.generateXML("ReachWidthMeasure"))
if self.__collectionDescriptionText is not None:
line("CollectionDescriptionText", self.__collectionDescriptionText)
if self.__passCount is not None:
line("PassCount", self.__passCount)
if self.__netInformation is not None:
asis(self.__netInformation.generateXML("NetInformation"))
return doc.getvalue()
| [
"yattag.Doc"
] | [((6591, 6596), 'yattag.Doc', 'Doc', ([], {}), '()\n', (6594, 6596), False, 'from yattag import Doc\n')] |
"""title
https://adventofcode.com/2021/day/23
"""
from heapq import heappush, heappop
import itertools
entry_finder = {} # mapping of tasks to entries
REMOVED = '<removed-task>' # placeholder for a removed task
counter = itertools.count() # unique sequence count
def add_task(pq, task, priority=0):
'Add a new task or update the priority of an existing task'
if task in entry_finder:
remove_task(task)
count = next(counter)
entry = [priority, count, task]
entry_finder[task] = entry
heappush(pq, entry)
def remove_task(task):
'Mark an existing task as REMOVED. Raise KeyError if not found.'
entry = entry_finder.pop(task)
entry[-1] = REMOVED
def pop_task(pq):
'Remove and return the lowest priority task. Raise KeyError if empty.'
while pq:
priority, count, task = heappop(pq)
if task is not REMOVED:
del entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
COST = dict(zip('abcd', [1, 10, 100, 1000]))
HOME = ['a1', 'a2', 'b1', 'b2', 'c1', 'c2', 'd1', 'd2']
EMPTY = '-'
MAP = {
'a2': [('a1', 1, '')],
'a1': [('a2', 1, ''), ('h1', 3, '2'), ('h2', 2, ''), ('h3', 2, ''), ('h4', 4, '3'),
('h5', 6, '34'), ('h6', 8, '345'), ('h7', 9, '3456')],
'b2': [('b1', 1, '')],
'b1': [('b2', 1, ''), ('h1', 5, '23'), ('h2', 4, '3'), ('h3', 2, ''), ('h4', 2, ''),
('h5', 4, '4'), ('h6', 6, '45'), ('h7', 7, '456')],
'c2': [('c1', 1, '')],
'c1': [('c2', 1, ''), ('h1', 7, '234'), ('h2', 6, '34'), ('h3', 4, '4'), ('h4', 2, ''),
('h5', 2, ''), ('h6', 4, '5'), ('h7', 5, '56')],
'd2': [('d1', 1, '')],
'd1': [('d2', 1, ''), ('h1', 9, '2345'), ('h2', 8, '345'), ('h3', 6, '45'), ('h4', 4, '5'),
('h5', 2, ''), ('h6', 2, ''), ('h7', 3, '6')],
'h1': [('a1', 3, '2'), ('b1', 5, '23'), ('c1', 7, '234'), ('d1', 9, '2345')],
'h2': [('a1', 2, ''), ('b1', 4, '3'), ('c1', 6, '34'), ('d1', 8, '345')],
'h3': [('a1', 2, ''), ('b1', 2, ''), ('c1', 4, '4'), ('d1', 6, '45')],
'h4': [('a1', 4, '3'), ('b1', 2, ''), ('c1', 2, ''), ('d1', 4, '5')],
'h5': [('a1', 6, '34'), ('b1', 4, '4'), ('c1', 2, ''), ('d1', 2, '')],
'h6': [('a1', 8, '345'), ('b1', 6, '45'), ('c1', 4, '5'), ('d1', 2, '')],
'h7': [('a1', 9, '3456'), ('b1', 7, '456'), ('c1', 5, '56'), ('d1', 3, '6')],
}
MAP = {
'a2': [('a1', 1)],
'a1': [('a2', 1), ('h2', 2), ('h3', 2)],
'b2': [('b1', 1)],
'b1': [('b2', 1), ('h3', 2), ('h4', 2)],
'c2': [('c1', 1)],
'c1': [('c2', 1), ('h4', 2), ('h5', 2)],
'd2': [('d1', 1)],
'd1': [('d2', 1), ('h5', 2), ('h6', 2)],
'h1': [('h2', 1)],
'h2': [('h1', 1), ('a1', 2), ('h3', 2)],
'h3': [('a1', 2), ('b1', 2), ('h2', 2), ('h4', 2)],
'h4': [('b1', 2), ('c1', 2), ('h3', 2), ('h5', 2)],
'h5': [('c1', 2), ('d1', 2), ('h4', 2), ('h6', 2)],
'h6': [('d1', 2), ('h5', 2), ('h7', 1)],
'h7': [('h6', 1)],
}
POSITIONS = list(sorted(MAP))
class Amphipods:
def __init__(self, positions):
# {location: occupant}
self.cost = 0
self.pods = positions
def __repr__(self):
return ' '.join([pos + ':' + self.pods[pos] for pos in POSITIONS]).upper()
def get_str_hash(self):
return ''.join([self.pods[pos] for pos in POSITIONS])
def wins(self):
return self.get_str_hash()[:8] == 'aabbccdd'
def is_blocked(self, blocked):
return any([True for b in blocked if self.pods['h' + b] != EMPTY])
def is_move_valid(self, pos, target, char):
to_hallway = target[0] == 'h'
up_pocket = pos[1] == '2' and target[1] == '1'
into_right_pocket = target[0] == char and pos[0] == 'h'
deeper = pos[1] == '1' and target[1] == '2'
make_space = pos[0] == char and self.pods[pos[0] + '2'][0] != char
leave_good_position = char == pos[0] and (up_pocket or (to_hallway and not make_space))
return (into_right_pocket or deeper or to_hallway or up_pocket) and not leave_good_position
def get_possible_moves(self):
for pos in self.pods:
if self.pods[pos] != EMPTY:
for target, nsteps in MAP[pos]:
char = self.pods[pos]
if (
self.pods[target] == EMPTY and
self.is_move_valid(pos, target, char)
# and not self.is_blocked(blocked)
):
new = self.pods.copy()
new[pos] = EMPTY
new[target] = char
a = Amphipods(new)
a.cost = self.cost + nsteps * COST[char]
yield a
def solve(data):
start = Amphipods(data)
candidates = [] # pq
add_task(candidates, start, 0)
visited = set()
i = 0
while candidates:
i += 1
cand = pop_task(candidates)
if cand.wins():
return cand.cost
h = cand.get_str_hash()
if h in visited:
continue
visited.add(h)
for move in cand.get_possible_moves():
add_task(candidates, move, move.cost)
if i % 10000 == 0:
print(len(candidates), cand.cost)
def solve2(data):
return data
if __name__ == '__main__':
INPUT = {
'a1': 'd',
'a2': 'c',
'b1': 'b',
'b2': 'a',
'c1': 'c',
'c2': 'd',
'd1': 'a',
'd2': 'b',
'h1': '-',
'h2': '-',
'h3': '-',
'h4': '-',
'h5': '-',
'h6': '-',
'h7': '-',
}
result = solve(INPUT)
print(f'Example 1: {result}')
# 15338
# result = solve2(input_data)
# print(f'Example 2: {result}')
| [
"heapq.heappush",
"itertools.count",
"heapq.heappop"
] | [((244, 261), 'itertools.count', 'itertools.count', ([], {}), '()\n', (259, 261), False, 'import itertools\n'), ((543, 562), 'heapq.heappush', 'heappush', (['pq', 'entry'], {}), '(pq, entry)\n', (551, 562), False, 'from heapq import heappush, heappop\n'), ((856, 867), 'heapq.heappop', 'heappop', (['pq'], {}), '(pq)\n', (863, 867), False, 'from heapq import heappush, heappop\n')] |
'''
REST API for processing free-text diagnosis statements into either:
(1) OWL class expressions
(2) FHIR Condition Resources
(3) SNOMED CT Expressions
(4) Raw concept relationship graphs
'''
from flask import Flask, request
import api
from transformers.transform import FhirConditionTransformer, SnomedExpressionTransformer, OwlTransformer, DefaultTransformer
app = Flask(__name__)
transformers = {
'owl': OwlTransformer(),
'fhir': FhirConditionTransformer(),
'snomed': SnomedExpressionTransformer(),
'raw': DefaultTransformer()
}
get_args = {
'owl': lambda x: {
'expression': x.get('expression', 'true') == 'true',
'instances': x.get('instances', 'true') == 'true'
},
'fhir': lambda x: {},
'snomed': lambda x: {},
'raw': lambda x: {}
}
@app.route('/process', methods=['GET'])
def predict():
dx = request.args.get('dx')
format = request.args.get('format', default='snomed')
graph = api.process(dx)[1]
t = transformers[format]
return t.serialize(t.transform(graph, **get_args[format](request.args)))
# If we're running in stand alone mode, run the application
if __name__ == '__main__':
app.run(debug=False, port=5150) | [
"flask.request.args.get",
"flask.Flask",
"api.process",
"transformers.transform.DefaultTransformer",
"transformers.transform.FhirConditionTransformer",
"transformers.transform.SnomedExpressionTransformer",
"transformers.transform.OwlTransformer"
] | [((387, 402), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (392, 402), False, 'from flask import Flask, request\n'), ((432, 448), 'transformers.transform.OwlTransformer', 'OwlTransformer', ([], {}), '()\n', (446, 448), False, 'from transformers.transform import FhirConditionTransformer, SnomedExpressionTransformer, OwlTransformer, DefaultTransformer\n'), ((462, 488), 'transformers.transform.FhirConditionTransformer', 'FhirConditionTransformer', ([], {}), '()\n', (486, 488), False, 'from transformers.transform import FhirConditionTransformer, SnomedExpressionTransformer, OwlTransformer, DefaultTransformer\n'), ((504, 533), 'transformers.transform.SnomedExpressionTransformer', 'SnomedExpressionTransformer', ([], {}), '()\n', (531, 533), False, 'from transformers.transform import FhirConditionTransformer, SnomedExpressionTransformer, OwlTransformer, DefaultTransformer\n'), ((546, 566), 'transformers.transform.DefaultTransformer', 'DefaultTransformer', ([], {}), '()\n', (564, 566), False, 'from transformers.transform import FhirConditionTransformer, SnomedExpressionTransformer, OwlTransformer, DefaultTransformer\n'), ((877, 899), 'flask.request.args.get', 'request.args.get', (['"""dx"""'], {}), "('dx')\n", (893, 899), False, 'from flask import Flask, request\n'), ((914, 958), 'flask.request.args.get', 'request.args.get', (['"""format"""'], {'default': '"""snomed"""'}), "('format', default='snomed')\n", (930, 958), False, 'from flask import Flask, request\n'), ((972, 987), 'api.process', 'api.process', (['dx'], {}), '(dx)\n', (983, 987), False, 'import api\n')] |
from pyimagesearch.centroidtracker import CentroidTracker
from pyimagesearch.trackableobject import TrackableObject
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-i", "--input", type=str,
help="path to optional input video file")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
ap.add_argument("-c", "--confidence", type=float, default=0.4,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=30,
help="# of skip frames between detections")
args = vars(ap.parse_args())
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
if not args.get("input", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, grab a reference to the video file
else:
print("[INFO] opening video file...")
vs = cv2.VideoCapture(args["input"])
writer = None
# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
W = None
H = None
# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0
# start the frames per second throughput estimator
fps = FPS().start()
while True:
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream
frame = vs.read()
frame = frame[1] if args.get("input", False) else frame
# if we are viewing a video and we did not grab a frame then we
# have reached the end of the video
if args["input"] is not None and frame is None:
break
# resize the frame to have a maximum width of 500 pixels (the
# less data we have, the faster we can process it), then convert
# the frame from BGR to RGB for dlib
frame = imutils.resize(frame, width=500)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# if the frame dimensions are empty, set them
if W is None or H is None:
(H, W) = frame.shape[:2]
# if we are supposed to be writing a video to disk, initialize
# the writer
if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(W, H), True)
status = "Waiting"
rects = []
# check to see if we should run a more computationally expensive
# object detection method to aid our tracker
if totalFrames % args["skip_frames"] == 0:
# set the status and initialize our new set of object trackers
status = "Detecting"
trackers = []
# convert the frame to a blob and pass the blob through the
# network and obtain the detections
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
net.setInput(blob)
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
# with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by requiring a minimum
# confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections list
idx = int(detections[0, 0, i, 1])
# if the class label is not a person, ignore it
if CLASSES[idx] != "person":
continue
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
# construct a dlib rectangle object from the bounding
# box coordinates and then start the dlib correlation
# tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
# add the tracker to our list of trackers so we can
# utilize it during skip frames
trackers.append(tracker)
else:
# loop over the trackers
for tracker in trackers:
# set the status of our system to be 'tracking' rather
# than 'waiting' or 'detecting'
status = "Tracking"
# update the tracker and grab the updated position
tracker.update(rgb)
pos = tracker.get_position()
# unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects.append((startX, startY, endX, endY))
cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
objects = ct.update(rects)
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
# construct a tuple of information we will be displaying on the
# frame
info = [
("Up", totalUp),
("Down", totalDown),
("Status", status),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
if writer is not None:
writer.write(frame)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the q key was pressed, break from the loop
if key == ord("q"):
break
# increment the total number of frames processed thus far and
# then update the FPS counter
totalFrames += 1
fps.update()
fps.stop()
# check to see if we need to release the video writer pointer
if writer is not None:
writer.release()
# if we are not using a video file, stop the camera video stream
if not args.get("input", False):
vs.stop()
# otherwise, release the video file pointer
else:
vs.release()
# close any open windows
cv2.destroyAllWindows() | [
"time.sleep",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"numpy.arange",
"imutils.video.VideoStream",
"argparse.ArgumentParser",
"dlib.rectangle",
"cv2.line",
"cv2.dnn.readNetFromCaffe",
"cv2.VideoWriter",
"imutils.video.FPS",
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"cv2.dnn.... | [((276, 301), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (299, 301), False, 'import argparse\n'), ((1220, 1277), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (["args['prototxt']", "args['model']"], {}), "(args['prototxt'], args['model'])\n", (1244, 1277), False, 'import cv2\n'), ((1874, 1924), 'pyimagesearch.centroidtracker.CentroidTracker', 'CentroidTracker', ([], {'maxDisappeared': '(40)', 'maxDistance': '(50)'}), '(maxDisappeared=40, maxDistance=50)\n', (1889, 1924), False, 'from pyimagesearch.centroidtracker import CentroidTracker\n'), ((3699, 3732), 'numpy.arange', 'np.arange', (['(0)', 'detections.shape[2]'], {}), '(0, detections.shape[2])\n', (3708, 3732), True, 'import numpy as np\n'), ((5362, 5421), 'cv2.line', 'cv2.line', (['frame', '(0, H // 2)', '(W, H // 2)', '(0, 255, 255)', '(2)'], {}), '(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)\n', (5370, 5421), False, 'import cv2\n'), ((5600, 5714), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(centroid[0] - 10, centroid[1] - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 255, 0)', '(2)'], {}), '(frame, text, (centroid[0] - 10, centroid[1] - 10), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n', (5611, 5714), False, 'import cv2\n'), ((5710, 5775), 'cv2.circle', 'cv2.circle', (['frame', '(centroid[0], centroid[1])', '(4)', '(0, 255, 0)', '(-1)'], {}), '(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n', (5720, 5775), False, 'import cv2\n'), ((6860, 6883), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6881, 6883), False, 'import cv2\n'), ((1390, 1405), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (1400, 1405), False, 'import time\n'), ((1507, 1538), 'cv2.VideoCapture', 'cv2.VideoCapture', (["args['input']"], {}), "(args['input'])\n", (1523, 1538), False, 'import cv2\n'), ((2738, 2770), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(500)'}), '(frame, width=500)\n', (2752, 2770), False, 'import imutils\n'), ((2778, 2816), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2790, 2816), False, 'import cv2\n'), ((3581, 3634), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(0.007843)', '(W, H)', '(127.5)'], {}), '(frame, 0.007843, (W, H), 127.5)\n', (3602, 3634), False, 'import cv2\n'), ((6059, 6159), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(10, H - (i * 20 + 20))', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.6)', '(0, 0, 255)', '(2)'], {}), '(frame, text, (10, H - (i * 20 + 20)), cv2.FONT_HERSHEY_SIMPLEX,\n 0.6, (0, 0, 255), 2)\n', (6070, 6159), False, 'import cv2\n'), ((2199, 2204), 'imutils.video.FPS', 'FPS', ([], {}), '()\n', (2202, 2204), False, 'from imutils.video import FPS\n'), ((3063, 3094), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (3085, 3094), False, 'import cv2\n'), ((3106, 3163), 'cv2.VideoWriter', 'cv2.VideoWriter', (["args['output']", 'fourcc', '(30)', '(W, H)', '(True)'], {}), "(args['output'], fourcc, 30, (W, H), True)\n", (3121, 3163), False, 'import cv2\n'), ((6256, 6282), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (6266, 6282), False, 'import cv2\n'), ((1362, 1380), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (1373, 1380), False, 'from imutils.video import VideoStream\n'), ((4481, 4507), 'dlib.correlation_tracker', 'dlib.correlation_tracker', ([], {}), '()\n', (4505, 4507), False, 'import dlib\n'), ((4520, 4562), 'dlib.rectangle', 'dlib.rectangle', (['startX', 'startY', 'endX', 'endY'], {}), '(startX, startY, endX, endY)\n', (4534, 4562), False, 'import dlib\n'), ((6297, 6311), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6308, 6311), False, 'import cv2\n'), ((4222, 4244), 'numpy.array', 'np.array', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (4230, 4244), True, 'import numpy as np\n')] |
#!/usr/bin/python
from config.utils import *
from elementals import Prompter
from function_context import SourceContext, BinaryContext, IslandContext
import os
import sys
import argparse
import logging
from collections import defaultdict
def recordManualAnchors(library_config, knowledge_config, lib_name, prompter):
"""Record the list of user defined manual anchor matches.
Args:
library_config (json): json loaded data from the library's configuration
knowledge_config (dict): a mapping of all of the accumulated knowledge for the currently analysed binary
lib_name (str): name of the open source library that will contain these manual anchors
prompter (prompter): prompter instance
Return Value:
Updated knowledge mapping (to be stored back as a *json file)
"""
# Prepare & load the stats from each file (using the functions file)
src_file_names = []
prompter.info("Loading the information regarding the compiled source files")
prompter.addIndent()
files_config = library_config[JSON_TAG_FILES]
for full_file_path in files_config:
prompter.debug(f"Parsing the canonical representation of file: {full_file_path.split(os.path.sep)[-1]}")
src_file_names.append(full_file_path)
parseFileStats(full_file_path, files_config[full_file_path])
prompter.removeIndent()
# get the variables from the utils file
src_functions_list, src_functions_ctx, src_file_mappings = getSourceFunctions()
# pre-processed list indices (efficiency improvement)
func_indices = defaultdict(list)
for func_idx, func_name in enumerate(src_functions_list):
func_indices[func_name].append(func_idx)
# Start requesting the user to add his manual records
manual_anchors = {}
prompter.info("Starting the input loop")
prompter.addIndent()
finished = False
while not finished:
prompter.info("Enter the details for the current manual anchor:")
parsed_correctly = True
while parsed_correctly:
function_name = prompter.input("Function Name (case sensitive): ")
# check existence
if src_functions_list.count(function_name) == 0:
prompter.error(f"Function \"{function_name}\" does not exist")
parsed_correctly = False
break
# check uniqueness
if src_functions_list.count(function_name) > 1:
file_name = prompter.input("File Name (case sensitive): ")
src_indices = list(filter(lambda x: src_functions_ctx[x].file == file_name, func_indices[function_name]))
if len(src_indices) == 0:
prompter.error(f"Function \"{function_name}\" does not exist in file \"{file_name}\"")
parsed_correctly = False
break
src_index = src_indices[0]
else:
src_index = func_indices[function_name][0]
# get the binary address
bin_ea_str_raw = prompter.input("Function Address (ea in the form: 0x12345678): ")
if bin_ea_str_raw.startswith("0x"):
bin_ea_str = bin_ea_str_raw[2:]
else:
bin_ea_str = bin_ea_str_raw
try:
bin_ea = int(bin_ea_str, 16)
except ValueError:
prompter.error(f"Illegal hexa address: \"{bin_ea_str_raw}\"")
parsed_correctly = False
break
# finished successfully :)
manual_anchors[src_index] = bin_ea
break
should_continue = prompter.input("Do you want to add another manual anchor? <Y/N>: ")
finished = should_continue.lower() != "y"
prompter.removeIndent()
# add the info to the json
if len(manual_anchors) > 0:
if JSON_TAG_MANUAL_ANCHORS not in knowledge_config:
knowledge_config[JSON_TAG_MANUAL_ANCHORS] = {}
all_manual_anchors = knowledge_config[JSON_TAG_MANUAL_ANCHORS]
if lib_name not in all_manual_anchors:
all_manual_anchors[lib_name] = {}
cur_manual_anchors = all_manual_anchors[lib_name]
# merge the results
for new_index in manual_anchors:
src_ctx = src_functions_ctx[new_index]
cur_manual_anchors[str(new_index)] = [src_ctx.file, src_ctx.name, hex(manual_anchors[new_index]), manual_anchors[new_index]]
# return back the data
return knowledge_config
def main(args):
"""Run the manual anchors script.
Args:
args (list): list of command line arguments
"""
global disas_cmd
# argument parser
parser = argparse.ArgumentParser(description=f"Enables the user to manually defined matches, acting as manual anchors, later to be used by {LIBRARY_NAME}'s Matcher.")
parser.add_argument("bin", metavar="bin", type=str,
help="path to the disassembler's database for the wanted binary")
parser.add_argument("name", metavar="lib-name", type=str,
help="name (case sensitive) of the relevant open source library")
parser.add_argument("version", metavar="lib-version", type=str,
help="version string (case sensitive) as used by the identifier")
parser.add_argument("config", metavar="configs", type=str,
help="path to the *.json \"configs\" directory")
parser.add_argument("-D", "--debug", action="store_true", help="set logging level to logging.DEBUG")
parser.add_argument("-W", "--windows", action="store_true", help="signals that the binary was compiled for Windows")
# parse the args
args = parser.parse_args(args)
library_name = args.name
library_version = args.version
bin_path = args.bin
config_path = args.config
is_debug = args.debug
is_windows = args.windows
# open the log
prompter = Prompter(min_log_level=logging.INFO if not is_debug else logging.DEBUG)
prompter.info("Starting the Script")
# use the user supplied flag
if is_windows:
setWindowsMode()
# always init the utils before we start
initUtils(prompter, None, invoked_before=True)
# register our contexts
registerContexts(SourceContext, BinaryContext, IslandContext)
# Load the information from the relevant library
lib_config_file = constructConfigPath(library_name, library_version)
prompter.debug(f"Loading the configuration file for library: {library_name}")
prompter.addIndent()
cur_config_path = os.path.join(config_path, lib_config_file)
if not os.path.exists(cur_config_path):
prompter.error(f"Missing configuration file ({lib_config_file}) for \"{library_name}\" Version: \"{library_version}\"")
return
# Load the configuration file
fd = open(cur_config_path, "r")
library_config = json.load(fd)
fd.close()
prompter.removeIndent()
# Load the existing knowledge config, if exists
prompter.debug(f"Opening knowledge configuration file from path: {accumulatedKnowledgePath(bin_path)}")
prompter.addIndent()
knowledge_config = loadKnowledge(bin_path)
if knowledge_config is None:
prompter.debug("Failed to find an existing configuration file")
knowledge_config = {}
prompter.removeIndent()
# receive all of the couples from the user
knowledge_config = recordManualAnchors(library_config, knowledge_config, library_name, prompter)
prompter.info("Storing the data to the knowledge configuration file")
storeKnowledge(knowledge_config, bin_path)
# finished
prompter.info("Finished Successfully")
if __name__ == "__main__":
main(sys.argv[1:])
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.path.join",
"elementals.Prompter",
"collections.defaultdict"
] | [((1607, 1624), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1618, 1624), False, 'from collections import defaultdict\n'), ((4715, 4882), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'f"""Enables the user to manually defined matches, acting as manual anchors, later to be used by {LIBRARY_NAME}\'s Matcher."""'}), '(description=\n f"Enables the user to manually defined matches, acting as manual anchors, later to be used by {LIBRARY_NAME}\'s Matcher."\n )\n', (4738, 4882), False, 'import argparse\n'), ((5983, 6054), 'elementals.Prompter', 'Prompter', ([], {'min_log_level': '(logging.INFO if not is_debug else logging.DEBUG)'}), '(min_log_level=logging.INFO if not is_debug else logging.DEBUG)\n', (5991, 6054), False, 'from elementals import Prompter\n'), ((6620, 6662), 'os.path.join', 'os.path.join', (['config_path', 'lib_config_file'], {}), '(config_path, lib_config_file)\n', (6632, 6662), False, 'import os\n'), ((6674, 6705), 'os.path.exists', 'os.path.exists', (['cur_config_path'], {}), '(cur_config_path)\n', (6688, 6705), False, 'import os\n')] |
import logging
import os
import time
import requests
from lxml import etree
import urllib.parse
import json
import schedule
from colorama import Fore,init
def getICBCNews()->tuple:
logging.debug('Getting icbc news...')
url = 'https://www.icbc.com.cn/ICBC/纪念币专区/default.htm'
re = requests.get(url)
html = etree.HTML(re.content.decode('utf8'))
logging.info(f'Response status is {re.status_code}')
news = html.xpath('//span[@class="ChannelSummaryList-insty"]/a/@data-collecting-param')
urls = html.xpath('//span[@class="ChannelSummaryList-insty"]/a/@href')
newUrl = 'https://www.icbc.com.cn'
newNews = news[0]
newUrl += urllib.parse.unquote(urls[0])
logging.info(f'Got latest news 《{colored(newNews[:8])}...》')
return newNews, newUrl
def getPBCNews()->tuple:
logging.debug('Getting pbc news...')
url = 'http://www.pbc.gov.cn/huobijinyinju/147948/147964/index.html'
re = requests.post(url)
html = etree.HTML(re.content.decode('utf8'))
logging.info(f'Response status is {re.status_code}')
news = html.xpath('//font[@class="newslist_style"]/a/@title')
urls = html.xpath('//font[@class="newslist_style"]/a/@href')
newNews = news[0]
newUrl = 'http://www.pbc.gov.cn'
newUrl += urls[0]
logging.info(f'Got latest news 《{colored(newNews[:8])}...》')
return newNews, newUrl
def colored(string:str)->str:
return Fore.RED + string +Fore.RESET
def main(msgChannel:list)->None:
icbcNews, icbcUrl = getICBCNews()
pbcNews, pbcUrl = getPBCNews()
if not os.path.isfile('commemorativeCoins.json'):
open('commemorativeCoins.json', 'w').close()
logging.info('Did not find json log, created one.')
with open('commemorativeCoins.json', 'r') as f:
oldVersion = json.load(f)
logging.debug('Read json log.')
newVersion = oldVersion.copy()
newVersion['icbc']['news'] = icbcNews
newVersion['icbc']['url'] = icbcUrl
newVersion['pbc']['news'] = pbcNews
newVersion['pbc']['url'] = pbcUrl
if oldVersion != newVersion:
# send change
content = [icbcNews, pbcNews]
url = [icbcUrl, pbcUrl]
logging.info(f'Found new article {colored(content)}')
for sendMsg in msgChannel:
sendMsg('纪念币更新', content, url)
with open('commemorativeCoins.json', 'w') as f:
json.dump(newVersion, f, ensure_ascii=False)
logging.info('Wrote to json log.')
else:
# send heartbeat
for sendMsg in msgChannel:
sendMsg('Heartbeat', ['plan is running, did not find new article'], ['a'])
logging.info(f'did not find new article')
def sendMail(title:str, contents:list, urls:list)->None:
import yagmail
content = ''
for url, con in zip(urls, contents):
content += f'<a href="{url}">{con}</a>'
yag = yagmail.SMTP('<EMAIL>', 'sfrjjkcpkhhsbefa', host='smtp.qq.com')
yag.send(to='<EMAIL>', subject=title, contents=content)
logging.info(f'sent mail {title}')
def showToast(title:str, content:list, *args)->None:
from win10toast import ToastNotifier
toast = ToastNotifier()
for c in content:
toast.show_toast(title, c)
logging.info(f'Made toast {title}')
def showNoti(title:str,content:list,urls:list)->None:
for c,u in zip(content,urls):
cmd=f'termux-notification --action "termux-open-url {u}" --content "tap to open {title}" --title {content}'
os.system(cmd)
logging.info(f'Sent notification {title}')
def getEnv()->list:
# 1: win 10 toast 2:email 3:android notification
# todo multi way
NOTIFIER_CHANNEL=os.getenv('NOTIFIER_CHANNEL','23')
logging.debug(f'Get system variant NOTIFIER_CHANNEL:{NOTIFIER_CHANNEL}')
NOTIFIER_CHANNEL=set(NOTIFIER_CHANNEL)
baseChannel=set('123')
if not NOTIFIER_CHANNEL.issubset(baseChannel):
NOTIFIER_CHANNEL &= baseChannel
logging.warning(colored(f'Only "1" "2" or "3" expected in NOTIFIER_CHANNEL, {NOTIFIER_CHANNEL} was given. Aborted illegal options.'))
msgChannelDict={
'1':showToast,
'2':sendMail,
'3':showNoti
}
msgChannel=[]
for i in NOTIFIER_CHANNEL:
msgChannel.append(msgChannelDict[i])
logging.info(f'Message sendMsg set as {NOTIFIER_CHANNEL}:{[f.__name__ for f in msgChannel]}')
return msgChannel
def logPlan():
plan = [repr(p) for p in schedule.get_jobs('mainPlan')]
logging.info('\n'.join(plan))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: <%(funcName)s> - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.debug('Started process.')
#colorama init
#init(autoreset=True)
msgChannel=getEnv()
schedule.every(2).hours.do(logPlan)
schedule.every().day.at('10:00').do(main,msgChannel=msgChannel).tag('mainPlan')
schedule.run_all()
while True:
schedule.run_pending()
time.sleep(10)
| [
"logging.basicConfig",
"requests.post",
"logging.debug",
"os.getenv",
"schedule.run_pending",
"win10toast.ToastNotifier",
"schedule.run_all",
"requests.get",
"time.sleep",
"os.path.isfile",
"schedule.every",
"json.load",
"os.system",
"logging.info",
"yagmail.SMTP",
"json.dump",
"sche... | [((188, 225), 'logging.debug', 'logging.debug', (['"""Getting icbc news..."""'], {}), "('Getting icbc news...')\n", (201, 225), False, 'import logging\n'), ((295, 312), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (307, 312), False, 'import requests\n'), ((366, 418), 'logging.info', 'logging.info', (['f"""Response status is {re.status_code}"""'], {}), "(f'Response status is {re.status_code}')\n", (378, 418), False, 'import logging\n'), ((817, 853), 'logging.debug', 'logging.debug', (['"""Getting pbc news..."""'], {}), "('Getting pbc news...')\n", (830, 853), False, 'import logging\n'), ((937, 955), 'requests.post', 'requests.post', (['url'], {}), '(url)\n', (950, 955), False, 'import requests\n'), ((1009, 1061), 'logging.info', 'logging.info', (['f"""Response status is {re.status_code}"""'], {}), "(f'Response status is {re.status_code}')\n", (1021, 1061), False, 'import logging\n'), ((1808, 1839), 'logging.debug', 'logging.debug', (['"""Read json log."""'], {}), "('Read json log.')\n", (1821, 1839), False, 'import logging\n'), ((2870, 2933), 'yagmail.SMTP', 'yagmail.SMTP', (['"""<EMAIL>"""', '"""sfrjjkcpkhhsbefa"""'], {'host': '"""smtp.qq.com"""'}), "('<EMAIL>', 'sfrjjkcpkhhsbefa', host='smtp.qq.com')\n", (2882, 2933), False, 'import yagmail\n'), ((2998, 3032), 'logging.info', 'logging.info', (['f"""sent mail {title}"""'], {}), "(f'sent mail {title}')\n", (3010, 3032), False, 'import logging\n'), ((3142, 3157), 'win10toast.ToastNotifier', 'ToastNotifier', ([], {}), '()\n', (3155, 3157), False, 'from win10toast import ToastNotifier\n'), ((3219, 3254), 'logging.info', 'logging.info', (['f"""Made toast {title}"""'], {}), "(f'Made toast {title}')\n", (3231, 3254), False, 'import logging\n'), ((3487, 3529), 'logging.info', 'logging.info', (['f"""Sent notification {title}"""'], {}), "(f'Sent notification {title}')\n", (3499, 3529), False, 'import logging\n'), ((3647, 3682), 'os.getenv', 'os.getenv', (['"""NOTIFIER_CHANNEL"""', '"""23"""'], {}), "('NOTIFIER_CHANNEL', '23')\n", (3656, 3682), False, 'import os\n'), ((3686, 3758), 'logging.debug', 'logging.debug', (['f"""Get system variant NOTIFIER_CHANNEL:{NOTIFIER_CHANNEL}"""'], {}), "(f'Get system variant NOTIFIER_CHANNEL:{NOTIFIER_CHANNEL}')\n", (3699, 3758), False, 'import logging\n'), ((4283, 4386), 'logging.info', 'logging.info', (['f"""Message sendMsg set as {NOTIFIER_CHANNEL}:{[f.__name__ for f in msgChannel]}"""'], {}), "(\n f'Message sendMsg set as {NOTIFIER_CHANNEL}:{[f.__name__ for f in msgChannel]}'\n )\n", (4295, 4386), False, 'import logging\n'), ((4542, 4686), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s: <%(funcName)s> - %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s: <%(funcName)s> - %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (4561, 4686), False, 'import logging\n'), ((4705, 4738), 'logging.debug', 'logging.debug', (['"""Started process."""'], {}), "('Started process.')\n", (4718, 4738), False, 'import logging\n'), ((4942, 4960), 'schedule.run_all', 'schedule.run_all', ([], {}), '()\n', (4958, 4960), False, 'import schedule\n'), ((1561, 1602), 'os.path.isfile', 'os.path.isfile', (['"""commemorativeCoins.json"""'], {}), "('commemorativeCoins.json')\n", (1575, 1602), False, 'import os\n'), ((1665, 1716), 'logging.info', 'logging.info', (['"""Did not find json log, created one."""'], {}), "('Did not find json log, created one.')\n", (1677, 1716), False, 'import logging\n'), ((1791, 1803), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1800, 1803), False, 'import json\n'), ((2426, 2460), 'logging.info', 'logging.info', (['"""Wrote to json log."""'], {}), "('Wrote to json log.')\n", (2438, 2460), False, 'import logging\n'), ((2627, 2668), 'logging.info', 'logging.info', (['f"""did not find new article"""'], {}), "(f'did not find new article')\n", (2639, 2668), False, 'import logging\n'), ((3468, 3482), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (3477, 3482), False, 'import os\n'), ((4985, 5007), 'schedule.run_pending', 'schedule.run_pending', ([], {}), '()\n', (5005, 5007), False, 'import schedule\n'), ((5016, 5030), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (5026, 5030), False, 'import time\n'), ((2372, 2416), 'json.dump', 'json.dump', (['newVersion', 'f'], {'ensure_ascii': '(False)'}), '(newVersion, f, ensure_ascii=False)\n', (2381, 2416), False, 'import json\n'), ((4445, 4474), 'schedule.get_jobs', 'schedule.get_jobs', (['"""mainPlan"""'], {}), "('mainPlan')\n", (4462, 4474), False, 'import schedule\n'), ((4813, 4830), 'schedule.every', 'schedule.every', (['(2)'], {}), '(2)\n', (4827, 4830), False, 'import schedule\n'), ((4853, 4869), 'schedule.every', 'schedule.every', ([], {}), '()\n', (4867, 4869), False, 'import schedule\n')] |
# -*- coding: utf-8 -*-
""" 东方财富网:流通股东爬虫(已废弃) """
import scrapy
import json
import time
from crawl import db
from crawl import helper
from crawl.models.Stock import Stock
from crawl.models.CirculationShareholder import CirculationShareholder
class CirculationShareholdersSpider(scrapy.Spider):
name = "circulation_shareholders"
allowed_domains = ["emweb.securities.eastmoney.com"]
date_tab = 1 #抓取流通股东的第几个日期Tab项
uri_tpl = 'http://emweb.securities.eastmoney.com/f10_v2/ShareholderResearch.aspx?type=web&code=%s%s'
def start_requests(self):
for stock in Stock.query().order_by('code asc').all():
yield scrapy.Request(self.shareholder_url(stock), callback=self.parse, meta={'stock': stock})
# 爬虫结束后关闭数据库连接
def closed(spider, reason):
db.session.close()
###########################################################################################################################
def parse(self, response):
stock = response.meta['stock']
tab_expression = '//div[@id="sdltgd"]/following-sibling::div[@class="content"]/div/ul/li[%d]/span/text()' % (self.date_tab,)
date = helper.filter_value(response.xpath(tab_expression).extract_first(), str, 'utf-8')
# 如果当前股票这一季(date)财报的流通股东数据 没有 OR 已抓取过,则直接跳过不处理
if not date or self.alreadyCrawled(stock, date):
return
for tr in response.xpath('//div[@id="TTCS_Table_Div"]/table[1]/tr[position()>1 and position()<last()]'):
circulation_shareholder = {
'code': stock.code,
'date': date,
'index': helper.filter_value(tr.xpath('th[1]/em/text()').extract_first(), int),
'name': helper.filter_value(tr.xpath('td[1]/text()').extract_first(), str, 'utf-8'),
'nature': helper.filter_value(tr.xpath('td[2]/text()').extract_first(), str, 'utf-8'),
'share_num': helper.filter_value(self.tr_get_share_num(tr), int),
'share_ratio': self.tr_get_share_ratio(tr),
'share_change': self.tr_get_share_change(tr),
'share_change_ratio': self.tr_get_share_change_ratio(tr),
'share_state': self.tr_get_share_state(tr),
}
self.save_circulation_shareholder(circulation_shareholder)
def alreadyCrawled(self, stock, date):
crawled = CirculationShareholder.query()\
.filter(CirculationShareholder.code==stock.code, CirculationShareholder.date==date)\
.count() > 0
return crawled
def tr_get_share_num(self, tr):
share_num = tr.xpath('td[4]/text()').extract_first().replace(',', '')
return share_num
def tr_get_share_change(self, tr):
raw_text = tr.xpath('td[6]/text()').extract_first()
if raw_text in [u'不变', u'新进']:
share_change = 0
else:
share_change = int(raw_text.replace(',', ''))
return share_change
def tr_get_share_ratio(self, tr):
share_ratio = helper.filter_value(tr.xpath('td[5]/text()').extract_first().rstrip('%').replace(',', ''), float)
return share_ratio
def tr_get_share_change_ratio(self, tr):
share_change_ratio = helper.filter_value(tr.xpath('td[7]/text()').extract_first().rstrip('%').replace(',', ''), float)
return share_change_ratio
def tr_get_share_state(self, tr):
raw_text = tr.xpath('td[6]/text()').extract_first()
if raw_text == u'不变':
share_state = 0
elif raw_text == u'新进':
share_state = 2
elif '-' in raw_text:
share_state = -1
else:
share_state = 1
return share_state
def shareholder_url(self, stock):
url = self.uri_tpl % (stock.exchange, stock.code)
return url
def save_circulation_shareholder(self, circulation_shareholder_data):
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
circulation_shareholder = CirculationShareholder(**circulation_shareholder_data)
circulation_shareholder.created_at = now
circulation_shareholder.save()
| [
"crawl.models.CirculationShareholder.CirculationShareholder.query",
"crawl.models.CirculationShareholder.CirculationShareholder",
"crawl.db.session.close",
"crawl.models.Stock.Stock.query",
"time.localtime"
] | [((795, 813), 'crawl.db.session.close', 'db.session.close', ([], {}), '()\n', (811, 813), False, 'from crawl import db\n'), ((4028, 4082), 'crawl.models.CirculationShareholder.CirculationShareholder', 'CirculationShareholder', ([], {}), '(**circulation_shareholder_data)\n', (4050, 4082), False, 'from crawl.models.CirculationShareholder import CirculationShareholder\n'), ((3976, 3992), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3990, 3992), False, 'import time\n'), ((587, 600), 'crawl.models.Stock.Stock.query', 'Stock.query', ([], {}), '()\n', (598, 600), False, 'from crawl.models.Stock import Stock\n'), ((2382, 2412), 'crawl.models.CirculationShareholder.CirculationShareholder.query', 'CirculationShareholder.query', ([], {}), '()\n', (2410, 2412), False, 'from crawl.models.CirculationShareholder import CirculationShareholder\n')] |
"""
Testing for textpipe doc.py
"""
import pytest
import random
import spacy
from textpipe.doc import Doc
TEXT_1 = """<p><b>Text mining</b>, also referred to as <i><b>text data mining</b></i>, roughly
equivalent to <b>text analytics</b>, is the process of deriving high-quality <a href="/wiki/Information"
title="Information">information</a> from <a href="/wiki/Plain_text" title="Plain text">text</a>.
High-quality information is typically derived through the devising of patterns and trends through means
such as <a href="/wiki/Pattern_recognition" title="Pattern recognition">statistical pattern learning</a>.
Text mining usually involves the process of structuring the input text (usually parsing, along with the
addition of some derived linguistic features and the removal of others, and subsequent insertion into a
<a href="/wiki/Database" title="Database">database</a>), deriving patterns within the
<a href="/wiki/Structured_data" class="mw-redirect" title="Structured data">structured data</a>, and
finally evaluation and interpretation of the output. Google is a company named Google.
"""
TEXT_2 = """<p><b>Textmining</b>, ook wel <i>textdatamining</i>, verwijst naar het proces om met
allerhande<a href="/wiki/Informatietechnologie" title="Informatietechnologie">ICT</a>-technieken
waardevolle informatie te halen uit grote hoeveelheden tekstmateriaal. Met deze technieken wordt
gepoogd patronen en tendensen te ontwaren. Concreet gaat men teksten softwarematig structureren
en ontleden, transformeren, vervolgens inbrengen in databanken, en ten slotte evalueren en
interpreteren. Philips is een bedrijf genaamd Philips.</p>
"""
TEXT_3 = ''
TEXT_4 = """this is a paragraph
this is a paragraph
"""
TEXT_5 = """<NAME> is sinds de oprichting van Facebook de directeur van het bedrijf."""
ents_model = spacy.blank('nl')
custom_spacy_nlps = {'nl': {'ents': ents_model}}
DOC_1 = Doc(TEXT_1)
DOC_2 = Doc(TEXT_2)
DOC_3 = Doc(TEXT_3)
DOC_4 = Doc(TEXT_4)
DOC_5 = Doc(TEXT_5, spacy_nlps=custom_spacy_nlps)
def test_load_custom_model():
"""
The custom spacy language modules should be correctly loaded into the doc.
"""
model_mapping = {'nl': 'ents'}
lang = DOC_5.language if DOC_5.is_reliable_language else DOC_5.hint_language
assert lang == 'nl'
assert sorted(DOC_5.find_ents()) == sorted([('<NAME>', 'PER'), ('Facebook', 'MISC')])
assert DOC_5.find_ents(model_mapping[lang]) == []
def test_nwords_nsents():
assert DOC_1.nwords == 112
assert DOC_2.nwords == 65
assert DOC_3.nwords == 0
assert DOC_1.nsents == 4
assert DOC_2.nsents == 4
assert DOC_3.nsents == 0
def test_entities():
assert sorted(DOC_1.ents) == sorted([('Google', 'ORG')])
assert sorted(DOC_2.ents) == sorted([('Concreet', 'LOC'), ('Textmining', 'PER'),
('Philips', 'ORG'), ('allerhandeICT', 'PER')])
assert DOC_3.ents == []
def test_complexity():
assert DOC_1.complexity == 30.464548969072155
assert DOC_2.complexity == 17.652500000000003
assert DOC_3.complexity == 100
def test_clean():
assert len(TEXT_1) >= len(DOC_1.clean)
assert len(TEXT_2) >= len(DOC_2.clean)
assert len(TEXT_3) >= len(DOC_3.clean)
def test_clean_newlines():
assert ' '.join(TEXT_4.split()) == DOC_4.clean
def test_language():
assert DOC_1.language == 'en'
assert DOC_2.language == 'nl'
assert DOC_3.language == 'un'
def test_extract_keyterms():
non_ranker = 'bulthaup'
rankers = ['textrank', 'sgrank', 'singlerank']
with pytest.raises(ValueError, message=f'algorithm "{non_ranker}" not '
f'available; use one of {rankers}'):
DOC_1.extract_keyterms(ranker=non_ranker)
assert len(DOC_1.extract_keyterms()) == 10
# limits number of keyterms
assert len(DOC_1.extract_keyterms(n_terms=2)) == 2
# works with empty documents
assert DOC_3.extract_keyterms() == []
# works with other rankers
assert isinstance(DOC_2.extract_keyterms(ranker=random.choice(rankers)), list)
| [
"textpipe.doc.Doc",
"spacy.blank",
"pytest.raises",
"random.choice"
] | [((1816, 1833), 'spacy.blank', 'spacy.blank', (['"""nl"""'], {}), "('nl')\n", (1827, 1833), False, 'import spacy\n'), ((1892, 1903), 'textpipe.doc.Doc', 'Doc', (['TEXT_1'], {}), '(TEXT_1)\n', (1895, 1903), False, 'from textpipe.doc import Doc\n'), ((1912, 1923), 'textpipe.doc.Doc', 'Doc', (['TEXT_2'], {}), '(TEXT_2)\n', (1915, 1923), False, 'from textpipe.doc import Doc\n'), ((1932, 1943), 'textpipe.doc.Doc', 'Doc', (['TEXT_3'], {}), '(TEXT_3)\n', (1935, 1943), False, 'from textpipe.doc import Doc\n'), ((1952, 1963), 'textpipe.doc.Doc', 'Doc', (['TEXT_4'], {}), '(TEXT_4)\n', (1955, 1963), False, 'from textpipe.doc import Doc\n'), ((1972, 2013), 'textpipe.doc.Doc', 'Doc', (['TEXT_5'], {'spacy_nlps': 'custom_spacy_nlps'}), '(TEXT_5, spacy_nlps=custom_spacy_nlps)\n', (1975, 2013), False, 'from textpipe.doc import Doc\n'), ((3548, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {'message': 'f"""algorithm "{non_ranker}" not available; use one of {rankers}"""'}), '(ValueError, message=\n f\'algorithm "{non_ranker}" not available; use one of {rankers}\')\n', (3561, 3651), False, 'import pytest\n'), ((4037, 4059), 'random.choice', 'random.choice', (['rankers'], {}), '(rankers)\n', (4050, 4059), False, 'import random\n')] |
from django.template import Library
from taggit.models import Tag
from ..models import Article, Category
register = Library()
@register.inclusion_tag('article/tags/recent_articles.html')
def get_recent_articles(number=5):
articles = Article.published.all()[:number]
return {'articles': articles,}
@register.inclusion_tag('article/tags/archive_articles.html')
def get_archive_articles():
archives = Article.published.all()
return {'archives': archives,}
@register.inclusion_tag('article/tags/category.html')
def get_categories():
categories = Category.objects.all()
return {'categories': categories,}
@register.inclusion_tag('article/tags/tag.html')
def get_tags():
tags = Tag.objects.all().order_by('name')
return {'tags': tags,}
| [
"taggit.models.Tag.objects.all",
"django.template.Library"
] | [((119, 128), 'django.template.Library', 'Library', ([], {}), '()\n', (126, 128), False, 'from django.template import Library\n'), ((707, 724), 'taggit.models.Tag.objects.all', 'Tag.objects.all', ([], {}), '()\n', (722, 724), False, 'from taggit.models import Tag\n')] |
from datetime import datetime
from django.db import models
from django.contrib import admin
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django_fsm import FSMField, transition
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from explorer.models import Challenge, Mission
IMAGE_STATUS_OPTIONS = ((0,'No Image'), (1, 'Quicklook'), (2, 'Final'), (3,'Poor Quality'))
COLOUR_STATE = {'New': '000',
'Submitted': 'F1C40F',
'Observed':'5DADE2',
'Identify':'E67E22',
'Analyse':'A569BD',
'Investigate':'aaa',
'Summary':'2ECC71',
'Failed':'E74C3C'
}
class Proposal(models.Model):
code = models.CharField(max_length=20, unique=True)
active = models.BooleanField(default=True)
def __str__(self):
if self.active:
state = ""
else:
state = "NOT "
return "{} is {}active".format(self.code, state)
class User(AbstractUser):
token = models.CharField(help_text=_('Authentication for Valhalla'), max_length=50, blank=True, null=True)
archive_token = models.CharField(help_text=_('Authentication for LCO archive'), max_length=50, blank=True, null=True)
default_proposal = models.ForeignKey(Proposal, null=True, blank=True, on_delete=models.CASCADE)
mission_1 = models.BooleanField(help_text=_('Has user competed Mission 1?'), default=False)
mission_2 = models.BooleanField(help_text=_('Has user competed Mission 2?'), default=False)
mission_3 = models.BooleanField(help_text=_('Has user competed Mission 3?'), default=False)
class Progress(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
challenge = models.ForeignKey(Challenge, on_delete=models.CASCADE)
requestid = models.CharField(max_length=20, null=True, blank=True)
frameids = models.CharField(max_length=20, null=True, blank=True)
status = FSMField(default='New', choices=settings.PROGRESS_OPTIONS)
last_update = models.DateTimeField(default=datetime.utcnow)
target = models.CharField(max_length=100)
image_file = models.FileField(null=True, blank=True, upload_to='images')
image_status = models.SmallIntegerField(default=0, choices=IMAGE_STATUS_OPTIONS)
def has_image(self):
if self.image_file is not None:
return True
else:
return False
has_image.boolean = True
def coloured_state(self):
return format_html(
'<span style="color: #{};">{}</span>',
COLOUR_STATE[self.status],
self.status,
)
def image_tag(self):
return mark_safe('<img src="{}" width="150" height="150" />'.format(self.image_file.url))
image_tag.short_description = 'Image'
def __str__(self):
return "{} is {} in {}".format(self.user.username, self.challenge, self.status)
class Meta:
unique_together = (("user","challenge"),)
verbose_name_plural = 'Challenge Progress'
@transition(field=status, source=['New'], target='Submitted')
def submit(self):
pass
@transition(field=status, source=['Submitted'], target='Failed')
def failed(self):
pass
@transition(field=status, source=['Failed'], target='New')
def retry(self):
self.requestid = ''
self.frameids = ''
pass
@transition(field=status, source=['Submitted'], target='Observed')
def observed(self):
pass
@transition(field=status, source=['Observed'], target='Identify')
def identify(self):
pass
@transition(field=status, source=['Identify'], target='Analyse')
def analyse(self):
pass
@transition(field=status, source=['Analyse'], target='Investigate')
def investigate(self):
pass
@transition(field=status, source=['Analyse','Investigate'], target='Summary')
def completed(self):
pass
class Question(models.Model):
text = models.TextField()
challenge = models.ForeignKey(Challenge, on_delete=models.CASCADE)
class Meta:
verbose_name_plural = 'questions'
class Answer(models.Model):
text = models.TextField()
question = models.ForeignKey(Question, on_delete=models.CASCADE)
class UserAnswer(models.Model):
answer = models.ForeignKey(Answer, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
| [
"django_fsm.transition",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.db.models.BooleanField",
"django.utils.translation.ugettext",
"django.utils.html.format_html",
"django.db.models.SmallIntegerField",
"django_fsm.FSMField",
"django.db.models.... | [((848, 892), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)'}), '(max_length=20, unique=True)\n', (864, 892), False, 'from django.db import models\n'), ((906, 939), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (925, 939), False, 'from django.db import models\n'), ((1393, 1469), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Proposal'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.CASCADE'}), '(Proposal, null=True, blank=True, on_delete=models.CASCADE)\n', (1410, 1469), False, 'from django.db import models\n'), ((1800, 1849), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (1817, 1849), False, 'from django.db import models\n'), ((1866, 1920), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Challenge'], {'on_delete': 'models.CASCADE'}), '(Challenge, on_delete=models.CASCADE)\n', (1883, 1920), False, 'from django.db import models\n'), ((1937, 1991), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (1953, 1991), False, 'from django.db import models\n'), ((2007, 2061), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (2023, 2061), False, 'from django.db import models\n'), ((2075, 2133), 'django_fsm.FSMField', 'FSMField', ([], {'default': '"""New"""', 'choices': 'settings.PROGRESS_OPTIONS'}), "(default='New', choices=settings.PROGRESS_OPTIONS)\n", (2083, 2133), False, 'from django_fsm import FSMField, transition\n'), ((2152, 2197), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.utcnow'}), '(default=datetime.utcnow)\n', (2172, 2197), False, 'from django.db import models\n'), ((2211, 2243), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2227, 2243), False, 'from django.db import models\n'), ((2261, 2320), 'django.db.models.FileField', 'models.FileField', ([], {'null': '(True)', 'blank': '(True)', 'upload_to': '"""images"""'}), "(null=True, blank=True, upload_to='images')\n", (2277, 2320), False, 'from django.db import models\n'), ((2340, 2405), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(0)', 'choices': 'IMAGE_STATUS_OPTIONS'}), '(default=0, choices=IMAGE_STATUS_OPTIONS)\n', (2364, 2405), False, 'from django.db import models\n'), ((3150, 3210), 'django_fsm.transition', 'transition', ([], {'field': 'status', 'source': "['New']", 'target': '"""Submitted"""'}), "(field=status, source=['New'], target='Submitted')\n", (3160, 3210), False, 'from django_fsm import FSMField, transition\n'), ((3252, 3315), 'django_fsm.transition', 'transition', ([], {'field': 'status', 'source': "['Submitted']", 'target': '"""Failed"""'}), "(field=status, source=['Submitted'], target='Failed')\n", (3262, 3315), False, 'from django_fsm import FSMField, transition\n'), ((3357, 3414), 'django_fsm.transition', 'transition', ([], {'field': 'status', 'source': "['Failed']", 'target': '"""New"""'}), "(field=status, source=['Failed'], target='New')\n", (3367, 3414), False, 'from django_fsm import FSMField, transition\n'), ((3510, 3575), 'django_fsm.transition', 'transition', ([], {'field': 'status', 'source': "['Submitted']", 'target': '"""Observed"""'}), "(field=status, source=['Submitted'], target='Observed')\n", (3520, 3575), False, 'from django_fsm import FSMField, transition\n'), ((3619, 3683), 'django_fsm.transition', 'transition', ([], {'field': 'status', 'source': "['Observed']", 'target': '"""Identify"""'}), "(field=status, source=['Observed'], target='Identify')\n", (3629, 3683), False, 'from django_fsm import FSMField, transition\n'), ((3727, 3790), 'django_fsm.transition', 'transition', ([], {'field': 'status', 'source': "['Identify']", 'target': '"""Analyse"""'}), "(field=status, source=['Identify'], target='Analyse')\n", (3737, 3790), False, 'from django_fsm import FSMField, transition\n'), ((3833, 3899), 'django_fsm.transition', 'transition', ([], {'field': 'status', 'source': "['Analyse']", 'target': '"""Investigate"""'}), "(field=status, source=['Analyse'], target='Investigate')\n", (3843, 3899), False, 'from django_fsm import FSMField, transition\n'), ((3946, 4023), 'django_fsm.transition', 'transition', ([], {'field': 'status', 'source': "['Analyse', 'Investigate']", 'target': '"""Summary"""'}), "(field=status, source=['Analyse', 'Investigate'], target='Summary')\n", (3956, 4023), False, 'from django_fsm import FSMField, transition\n'), ((4103, 4121), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (4119, 4121), False, 'from django.db import models\n'), ((4138, 4192), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Challenge'], {'on_delete': 'models.CASCADE'}), '(Challenge, on_delete=models.CASCADE)\n', (4155, 4192), False, 'from django.db import models\n'), ((4292, 4310), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (4308, 4310), False, 'from django.db import models\n'), ((4326, 4379), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Question'], {'on_delete': 'models.CASCADE'}), '(Question, on_delete=models.CASCADE)\n', (4343, 4379), False, 'from django.db import models\n'), ((4426, 4477), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Answer'], {'on_delete': 'models.CASCADE'}), '(Answer, on_delete=models.CASCADE)\n', (4443, 4477), False, 'from django.db import models\n'), ((4489, 4538), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (4506, 4538), False, 'from django.db import models\n'), ((2610, 2705), 'django.utils.html.format_html', 'format_html', (['"""<span style="color: #{};">{}</span>"""', 'COLOUR_STATE[self.status]', 'self.status'], {}), '(\'<span style="color: #{};">{}</span>\', COLOUR_STATE[self.status\n ], self.status)\n', (2621, 2705), False, 'from django.utils.html import format_html\n'), ((1176, 1208), 'django.utils.translation.ugettext', '_', (['"""Authentication for Valhalla"""'], {}), "('Authentication for Valhalla')\n", (1177, 1208), True, 'from django.utils.translation import ugettext as _\n'), ((1295, 1330), 'django.utils.translation.ugettext', '_', (['"""Authentication for LCO archive"""'], {}), "('Authentication for LCO archive')\n", (1296, 1330), True, 'from django.utils.translation import ugettext as _\n'), ((1516, 1549), 'django.utils.translation.ugettext', '_', (['"""Has user competed Mission 1?"""'], {}), "('Has user competed Mission 1?')\n", (1517, 1549), True, 'from django.utils.translation import ugettext as _\n'), ((1612, 1645), 'django.utils.translation.ugettext', '_', (['"""Has user competed Mission 2?"""'], {}), "('Has user competed Mission 2?')\n", (1613, 1645), True, 'from django.utils.translation import ugettext as _\n'), ((1708, 1741), 'django.utils.translation.ugettext', '_', (['"""Has user competed Mission 3?"""'], {}), "('Has user competed Mission 3?')\n", (1709, 1741), True, 'from django.utils.translation import ugettext as _\n')] |
# -*- coding: utf-8 -*-
"""
Module defines DataDog exporter class.
"""
from datadog import initialize, statsd
from twindb_backup.exporter.base_exporter import (
BaseExporter,
ExportCategory,
ExportMeasureType,
)
from twindb_backup.exporter.exceptions import DataDogExporterError
class DataDogExporter(BaseExporter): # pylint: disable=too-few-public-methods
"""
DataDog exporter class
"""
def __init__(self, app_key, api_key):
super(DataDogExporter, self).__init__()
options = {"api_key": api_key, "app_key": app_key}
initialize(**options)
self._suffix = "twindb."
def export(self, category, measure_type, data):
"""
Export data to DataDog
:param category: Data meant
:param measure_type: Type of measure
:param data: Data to posting
:raise: DataDogExporterError if data is invalid
"""
if isinstance(data, (int, float)):
metric_name = self._suffix
if category == ExportCategory.files:
metric_name += "files."
else:
metric_name += "mysql."
if measure_type == ExportMeasureType.backup:
metric_name += "backup_time"
else:
metric_name += "restore_time"
statsd.gauge(metric_name, data)
else:
raise DataDogExporterError("Invalid input data")
| [
"twindb_backup.exporter.exceptions.DataDogExporterError",
"datadog.initialize",
"datadog.statsd.gauge"
] | [((575, 596), 'datadog.initialize', 'initialize', ([], {}), '(**options)\n', (585, 596), False, 'from datadog import initialize, statsd\n'), ((1319, 1350), 'datadog.statsd.gauge', 'statsd.gauge', (['metric_name', 'data'], {}), '(metric_name, data)\n', (1331, 1350), False, 'from datadog import initialize, statsd\n'), ((1383, 1425), 'twindb_backup.exporter.exceptions.DataDogExporterError', 'DataDogExporterError', (['"""Invalid input data"""'], {}), "('Invalid input data')\n", (1403, 1425), False, 'from twindb_backup.exporter.exceptions import DataDogExporterError\n')] |
from src.utils.config import CONFIG
from discord.ext.commands import MemberConverter
from random import sample, random
async def kick_person(user):
await user.move_to(None)
async def random_kick(bot, ctx, user):
prob = random()
if user is not None:
if prob <= 0.5:
await ctx.send(f'โชคร้ายหน่อยนะ {str(user)} บายย')
await user.move_to(None)
else:
# choose only 1 line between 18 or 19 or keep both?
await ctx.send(f'โชคดีไป {str(user)}')
if user != ctx.author:
await ctx.send(f'มึงโดนแทนละกั้น {str(ctx.author)}')
await ctx.author.move_to(None)
else:
snap_emoji = CONFIG.EMOJI_ID.thanos_snap
bot_id = CONFIG.BOT_ID
await ctx.send(snap_emoji)
general_channel = ctx.author.voice.channel
member_ids = list(general_channel.voice_states.keys())
random_member_ids = sample(member_ids, len(member_ids)//2)
for member_id in random_member_ids:
if bot_id == member_id:
continue
player = await MemberConverter().convert(ctx, str(member_id))
print(str(player))
await player.move_to(None)
await ctx.send(f'Perfectly balanced, as all things should be')
| [
"random.random",
"discord.ext.commands.MemberConverter"
] | [((231, 239), 'random.random', 'random', ([], {}), '()\n', (237, 239), False, 'from random import sample, random\n'), ((1114, 1131), 'discord.ext.commands.MemberConverter', 'MemberConverter', ([], {}), '()\n', (1129, 1131), False, 'from discord.ext.commands import MemberConverter\n')] |
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.builtin import Command
from TgBot.loader import dp
from TgBot.utils.misc import rate_limit
from TgBot.utils import chat_emails_keyboard
from TgBot.states.watch import WatchGmail
from loader import gmail_API, psqldb
from re import match
import logging
@rate_limit(5, 'watch')
@dp.message_handler(Command('watch'), state=None)
async def start_watch_email(message: types.Message):
"""
Add email to watch its updates
"""
text = 'Надішліть у відповідь вашу електронну пошту, з якої ' \
'бажаєте отримувати нові листи (тільки GMail)'
# get available emails and put it in a keyboard layout
await message.answer(text, reply_markup=await chat_emails_keyboard(message.chat.id))
await WatchGmail.Add.set()
@dp.message_handler(state=WatchGmail.Add)
async def add(message: types.Message, state: FSMContext):
email = message.text.strip()
chat_id = message.chat.id
if not match(r'^[\w\.-]+@gmail\.com$', email):
logging.info(f"Mail <{email}> was rejected")
await message.answer('Невідомий формат пошти',
reply_markup=types.ReplyKeyboardRemove())
else:
# check if that email is attached to the chat
match_email_chat = await psqldb.email_in_chat(email=email,
chat_id=chat_id)
if not match_email_chat:
await message.answer(
f'Пошта {email} не приєднана до чату',
reply_markup=types.ReplyKeyboardRemove()
)
else:
# link email to chat to send emails in the future
await psqldb.add_watched_chat_emails(email=email, chat_id=chat_id)
# then only watch email if it not already watched
is_email_watched = await psqldb.email_watched(email=email)
# if email already watched -- thats all, just add to the chat, not watch
if not is_email_watched:
creds = tuple(await psqldb.get_gmail_creds(email=email))
user_creds = gmail_API.make_user_creds(*creds)
watch_response = await gmail_API.start_watch(
user_creds=user_creds,
email=email)
# watch response example
# {'historyId': '1336627', 'expiration': '1612395124174'}
logging.info(str(watch_response))
if watch_response:
await psqldb.watch_email(
email=email,
history_id=int(watch_response["historyId"])
)
await message.answer(
f'Сповіщення від пошти {email} додані до чату',
reply_markup=types.ReplyKeyboardRemove())
else:
await message.answer(
f'Не вдалося додати сповіщення від пошти {email}',
reply_markup=types.ReplyKeyboardRemove()
)
else:
await message.answer(f'Сповіщення від пошти {email} прикріплені до чату',
reply_markup=types.ReplyKeyboardRemove())
await state.finish()
| [
"TgBot.utils.misc.rate_limit",
"loader.gmail_API.make_user_creds",
"aiogram.dispatcher.filters.builtin.Command",
"re.match",
"TgBot.utils.chat_emails_keyboard",
"aiogram.types.ReplyKeyboardRemove",
"loader.gmail_API.start_watch",
"loader.psqldb.get_gmail_creds",
"loader.psqldb.email_watched",
"loa... | [((355, 377), 'TgBot.utils.misc.rate_limit', 'rate_limit', (['(5)', '"""watch"""'], {}), "(5, 'watch')\n", (365, 377), False, 'from TgBot.utils.misc import rate_limit\n'), ((841, 881), 'TgBot.loader.dp.message_handler', 'dp.message_handler', ([], {'state': 'WatchGmail.Add'}), '(state=WatchGmail.Add)\n', (859, 881), False, 'from TgBot.loader import dp\n'), ((398, 414), 'aiogram.dispatcher.filters.builtin.Command', 'Command', (['"""watch"""'], {}), "('watch')\n", (405, 414), False, 'from aiogram.dispatcher.filters.builtin import Command\n'), ((817, 837), 'TgBot.states.watch.WatchGmail.Add.set', 'WatchGmail.Add.set', ([], {}), '()\n', (835, 837), False, 'from TgBot.states.watch import WatchGmail\n'), ((1014, 1054), 're.match', 'match', (['"""^[\\\\w\\\\.-]+@gmail\\\\.com$"""', 'email'], {}), "('^[\\\\w\\\\.-]+@gmail\\\\.com$', email)\n", (1019, 1054), False, 'from re import match\n'), ((1062, 1106), 'logging.info', 'logging.info', (['f"""Mail <{email}> was rejected"""'], {}), "(f'Mail <{email}> was rejected')\n", (1074, 1106), False, 'import logging\n'), ((1330, 1380), 'loader.psqldb.email_in_chat', 'psqldb.email_in_chat', ([], {'email': 'email', 'chat_id': 'chat_id'}), '(email=email, chat_id=chat_id)\n', (1350, 1380), False, 'from loader import gmail_API, psqldb\n'), ((1722, 1782), 'loader.psqldb.add_watched_chat_emails', 'psqldb.add_watched_chat_emails', ([], {'email': 'email', 'chat_id': 'chat_id'}), '(email=email, chat_id=chat_id)\n', (1752, 1782), False, 'from loader import gmail_API, psqldb\n'), ((1882, 1915), 'loader.psqldb.email_watched', 'psqldb.email_watched', ([], {'email': 'email'}), '(email=email)\n', (1902, 1915), False, 'from loader import gmail_API, psqldb\n'), ((2140, 2173), 'loader.gmail_API.make_user_creds', 'gmail_API.make_user_creds', (['*creds'], {}), '(*creds)\n', (2165, 2173), False, 'from loader import gmail_API, psqldb\n'), ((768, 805), 'TgBot.utils.chat_emails_keyboard', 'chat_emails_keyboard', (['message.chat.id'], {}), '(message.chat.id)\n', (788, 805), False, 'from TgBot.utils import chat_emails_keyboard\n'), ((1204, 1231), 'aiogram.types.ReplyKeyboardRemove', 'types.ReplyKeyboardRemove', ([], {}), '()\n', (1229, 1231), False, 'from aiogram import types\n'), ((2213, 2270), 'loader.gmail_API.start_watch', 'gmail_API.start_watch', ([], {'user_creds': 'user_creds', 'email': 'email'}), '(user_creds=user_creds, email=email)\n', (2234, 2270), False, 'from loader import gmail_API, psqldb\n'), ((1586, 1613), 'aiogram.types.ReplyKeyboardRemove', 'types.ReplyKeyboardRemove', ([], {}), '()\n', (1611, 1613), False, 'from aiogram import types\n'), ((2074, 2109), 'loader.psqldb.get_gmail_creds', 'psqldb.get_gmail_creds', ([], {'email': 'email'}), '(email=email)\n', (2096, 2109), False, 'from loader import gmail_API, psqldb\n'), ((3249, 3276), 'aiogram.types.ReplyKeyboardRemove', 'types.ReplyKeyboardRemove', ([], {}), '()\n', (3274, 3276), False, 'from aiogram import types\n'), ((2836, 2863), 'aiogram.types.ReplyKeyboardRemove', 'types.ReplyKeyboardRemove', ([], {}), '()\n', (2861, 2863), False, 'from aiogram import types\n'), ((3041, 3068), 'aiogram.types.ReplyKeyboardRemove', 'types.ReplyKeyboardRemove', ([], {}), '()\n', (3066, 3068), False, 'from aiogram import types\n')] |
#!/bin/python3
#Utilities for downloading and parsing Final Fantasy 14 Loadstone content
#Copyright <NAME> 2016 BSD 3 clause license
import requests
from bs4 import BeautifulSoup
import re
def loastone_login():
print('http://na.finalfantasyxiv.com/lodestone/account/login/')
#Get a page from the Loadstone
# returns a BeautifulSoup object
def get_loadstone_page(url,session_id):
#Time format used for cookies
#import time
#time.strftime('%a, %d-%b-%Y %H:%M:%S %Z')
#ldst_is_support_browser=1, ldst_touchstone=1, ldst_bypass_browser=1", expires=session_expiration
cookies = dict(ldst_sess=session_id,domain='finalfantasyxiv.com', path='/')
raw_page = requests.get(url, cookies=cookies)
if(raw_page.status_code != 200):
raise Exception("Unable to download web page!")
return BeautifulSoup(raw_page.text,'html.parser')
#Each item has a separate detail page that must be loaded to determine if it's HQ or not
def is_item_hq(raw_item,session_id):
tooltip_url = 'http://na.finalfantasyxiv.com/' + item.find('div', attrs={"class": 'item_txt'})['data-lazy_load_url']
tooltip_page = get_loadstone_page(tooltip_url,session_id)
return bool(tooltip_page.find("img", src = re.compile('http://img\.finalfantasyxiv\.com/lds/pc/global/images/common/ic/hq.png.*')))
#Debug function to write some data to 'test.html'
def write_data(data):
out_file=open('test.html','w')
#for i in data:
#out_file.write(str(i))
out_file.write(str(data))
out_file.close()
#Debug function to write a pretty parsed version of a Loadstone page
def write_loadstone_page(url,session_id):
soup_page = get_loadstone_page(url,session_id)
write_data(soup_page.prettify().encode('utf8'))
#Use this to convert the provided items into something useful
def list_items_table(items):
item_row_format='<tr><td><img src="{image}"></img></td><td>{name}</td><td>{quantity}</td><td>{location}</td><td>{sub_location}</td></tr>\n'
item_buffer = '<table>\n'
for i in items:
item_buffer += item_row_format.format(**i)
item_buffer += '</table>\n'
return item_buffer
#Get all items in the Free company chest (does not get number of crystals or gil)
#Does not handle HQ Items yet
def get_fc_items(fc_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/freecompany/'+str(fc_id)+'/chest/'
soup_page = get_loadstone_page(url,session_id)
#Get all items
raw_items=soup_page.find_all("tr", attrs={"data-default_sort": True})
#Parse the items
items=[]
for item in raw_items:
tmp = {}
tmp['name'] = item.find("h2", attrs={"class": 'db-tooltip__item__name'}).text.strip()
tmp['quantity'] = int(item['data-stack'])
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Company Chest'
tmp['sub_location'] = item.find_parent('tbody')['id']
items.append(tmp)
return items
#Get all items in a retainers inventory (does not get number of crystals or gil)
#Does not handle HQ Items yet
def get_retainer_items(char_id,retainer_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/character/'+str(char_id)+'/retainer/'+retainer_id+'/baggage/'
soup_page = get_loadstone_page(url,session_id)
#Get retainers name
retainer_name = soup_page.find("div", attrs={"class": 'retainer--name'}).p.text.strip()
#Get all items
raw_items=soup_page.find_all("tr", attrs={"data-default_sort": True})
#Parse the items
items=[]
for item in raw_items:
#if(is_item_hq(item,session_id)):
#print("HQ")
tmp = {}
tmp['name'] = item.find("a", attrs={"class": 'highlight'}).text.strip()
tmp['quantity'] = int(item['data-stack'])
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Retainer: ' + retainer_name
tmp['sub_location'] = 'Inventory'
items.append(tmp)
return items
#Get all items a retainer is selling (does not get number of crystals or gil)
#HQ Item handling is suspect
#Note: This may return already sold items:
# sale_inventory is supposed to filter those out, but I din't think it's working correctly
def get_retainer_selling(char_id,retainer_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/character/'+str(char_id)+'/retainer/'+retainer_id+'/market/'
soup_page = get_loadstone_page(url,session_id)
#Get retainers name
retainer_name = soup_page.find("div", attrs={"class": 'retainer--name'}).p.text.strip()
#Get all items
sale_inventory=soup_page.find("div", attrs={"class": 'active'}).find('tbody')
#If no items, just return an empty set
if not sale_inventory:
return []
raw_items=sale_inventory.find_all("tr")
#Parse the items
items=[]
for item in raw_items:
tmp = {}
tmp['name'] = item.find("a", attrs={"class": 'highlight'}).text.strip()
tmp['quantity'] = int(item.find("td", attrs={"class": 'even'}).text.strip())
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Retainer: ' + retainer_name
tmp['sub_location'] = 'Selling'
tmp['is_hq'] = bool(item.find("img", src = re.compile('http://img\.finalfantasyxiv\.com/lds/pc/global/images/common/ic/hq.png.*')))
items.append(tmp)
return items
| [
"bs4.BeautifulSoup",
"requests.get",
"re.compile"
] | [((682, 716), 'requests.get', 'requests.get', (['url'], {'cookies': 'cookies'}), '(url, cookies=cookies)\n', (694, 716), False, 'import requests\n'), ((823, 866), 'bs4.BeautifulSoup', 'BeautifulSoup', (['raw_page.text', '"""html.parser"""'], {}), "(raw_page.text, 'html.parser')\n", (836, 866), False, 'from bs4 import BeautifulSoup\n'), ((1235, 1333), 're.compile', 're.compile', (['"""http://img\\\\.finalfantasyxiv\\\\.com/lds/pc/global/images/common/ic/hq.png.*"""'], {}), "(\n 'http://img\\\\.finalfantasyxiv\\\\.com/lds/pc/global/images/common/ic/hq.png.*'\n )\n", (1245, 1333), False, 'import re\n'), ((5274, 5372), 're.compile', 're.compile', (['"""http://img\\\\.finalfantasyxiv\\\\.com/lds/pc/global/images/common/ic/hq.png.*"""'], {}), "(\n 'http://img\\\\.finalfantasyxiv\\\\.com/lds/pc/global/images/common/ic/hq.png.*'\n )\n", (5284, 5372), False, 'import re\n')] |
from typing import List
from queue import Queue
import logging
import os
import os.path
from entity_info import EntityInfo, EntityType
class LocalFileCollector:
"""
This class is used to collect all paths that are need to be backed up.
"""
def __init__(self, source_list: List[str], queue_size: int = 15):
"""
:param source_list: List of paths on the local machine. A path can be a single file or a directory.
:param queue_size: Size of the output queue. See. queue.Queue
"""
super().__init__()
if (not source_list) or not isinstance(source_list, List): # It needs to be a list for copy
raise ValueError("No or invalid sources provided")
self._source_list = source_list.copy()
# Check sources beforehand and fail early
for i, source in enumerate(source_list):
if not os.path.isabs(source):
raise ValueError(
f"{i}. element of source list is not absolute path: {source}; Only absolute paths supported!")
if not (os.path.isfile(source) or os.path.isdir(source)):
raise ValueError(f"{i}. element of source list is not a supported type: {source}")
self._entity_queue = Queue(queue_size)
self._logger = logging.getLogger(self.__class__.__name__)
self._commonpath = os.path.commonpath(source_list)
def collect(self):
"""
Run the actual collection.
Goes trough the source list, and put every file with their full path into the output queue.
This is a blocking function.
"""
for source in self._source_list:
if os.path.isfile(source) or os.path.islink(source):
# Walk over dirs only
self._entity_queue.put(source)
continue
if not os.path.isdir(source):
# This should only happen if the filesystem contents changed during the backup
self._logger.warning(f"{source} is not supported type. Skipping!")
continue
potentially_empty_dirs = [source] # The root dir is potentially empty until it's visited
for root, dirs, files in os.walk(source):
potentially_empty_dirs.extend([os.path.join(root, d) for d in dirs])
if dirs or files:
potentially_empty_dirs.remove(root)
for file in files:
full_path = os.path.abspath(os.path.join(root, file))
self._entity_queue.put(
EntityInfo(
path=full_path,
type=EntityType.FILE
)
)
for empty_dir in potentially_empty_dirs:
self._entity_queue.put(
EntityInfo(
path=empty_dir,
type=EntityType.EMPTY_DIRECTORY
)
)
@property
def commonpath(self):
return self._commonpath
@property
def queue(self) -> Queue:
return self._entity_queue
| [
"logging.getLogger",
"os.path.isabs",
"os.path.islink",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"entity_info.EntityInfo",
"os.path.commonpath",
"queue.Queue",
"os.walk"
] | [((1261, 1278), 'queue.Queue', 'Queue', (['queue_size'], {}), '(queue_size)\n', (1266, 1278), False, 'from queue import Queue\n'), ((1303, 1345), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (1320, 1345), False, 'import logging\n'), ((1373, 1404), 'os.path.commonpath', 'os.path.commonpath', (['source_list'], {}), '(source_list)\n', (1391, 1404), False, 'import os\n'), ((2229, 2244), 'os.walk', 'os.walk', (['source'], {}), '(source)\n', (2236, 2244), False, 'import os\n'), ((889, 910), 'os.path.isabs', 'os.path.isabs', (['source'], {}), '(source)\n', (902, 910), False, 'import os\n'), ((1683, 1705), 'os.path.isfile', 'os.path.isfile', (['source'], {}), '(source)\n', (1697, 1705), False, 'import os\n'), ((1709, 1731), 'os.path.islink', 'os.path.islink', (['source'], {}), '(source)\n', (1723, 1731), False, 'import os\n'), ((1863, 1884), 'os.path.isdir', 'os.path.isdir', (['source'], {}), '(source)\n', (1876, 1884), False, 'import os\n'), ((1082, 1104), 'os.path.isfile', 'os.path.isfile', (['source'], {}), '(source)\n', (1096, 1104), False, 'import os\n'), ((1108, 1129), 'os.path.isdir', 'os.path.isdir', (['source'], {}), '(source)\n', (1121, 1129), False, 'import os\n'), ((2868, 2927), 'entity_info.EntityInfo', 'EntityInfo', ([], {'path': 'empty_dir', 'type': 'EntityType.EMPTY_DIRECTORY'}), '(path=empty_dir, type=EntityType.EMPTY_DIRECTORY)\n', (2878, 2927), False, 'from entity_info import EntityInfo, EntityType\n'), ((2293, 2314), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (2305, 2314), False, 'import os\n'), ((2506, 2530), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2518, 2530), False, 'import os\n'), ((2601, 2649), 'entity_info.EntityInfo', 'EntityInfo', ([], {'path': 'full_path', 'type': 'EntityType.FILE'}), '(path=full_path, type=EntityType.FILE)\n', (2611, 2649), False, 'from entity_info import EntityInfo, EntityType\n')] |
from setuptools import setup
setup(
name='voice-commander',
version='0.0.2a',
packages=['voice_commander'],
install_requires=['fuzzywuzzy', 'fuzzywuzzy[speedup]', 'keyboard', 'easygui', 'pyaudio', 'SpeechRecognition'],
url='https://github.com/spyoungtech/voice-commander',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='cross-platform voice-activation hooks and keyboard macros'
)
| [
"setuptools.setup"
] | [((30, 425), 'setuptools.setup', 'setup', ([], {'name': '"""voice-commander"""', 'version': '"""0.0.2a"""', 'packages': "['voice_commander']", 'install_requires': "['fuzzywuzzy', 'fuzzywuzzy[speedup]', 'keyboard', 'easygui', 'pyaudio',\n 'SpeechRecognition']", 'url': '"""https://github.com/spyoungtech/voice-commander"""', 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""cross-platform voice-activation hooks and keyboard macros"""'}), "(name='voice-commander', version='0.0.2a', packages=['voice_commander'\n ], install_requires=['fuzzywuzzy', 'fuzzywuzzy[speedup]', 'keyboard',\n 'easygui', 'pyaudio', 'SpeechRecognition'], url=\n 'https://github.com/spyoungtech/voice-commander', license='MIT', author\n ='<NAME>', author_email='<EMAIL>', description=\n 'cross-platform voice-activation hooks and keyboard macros')\n", (35, 425), False, 'from setuptools import setup\n')] |
from model.BBCluster import CustomSentenceTransformer, OptimCluster, euclid_dist
from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, \
get_paratext_dict
from util.Data import InputTRECCARExample
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from typing import Dict, Iterable, List
import transformers
from sentence_transformers import SentenceTransformer
from sentence_transformers import models
from sentence_transformers.evaluation import SentenceEvaluator
from sentence_transformers.util import batch_to_device
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import adjusted_rand_score, normalized_mutual_info_score, adjusted_mutual_info_score
from tqdm.autonotebook import trange
from clearml import Task
import pickle
import argparse
import random
random.seed(42)
torch.manual_seed(42)
np.random.seed(42)
def prepare_cluster_data_train(pages_file, art_qrels, top_qrels, paratext):
page_paras, rev_para_top, _ = get_trec_dat(art_qrels, top_qrels, None)
ptext_dict = get_paratext_dict(paratext)
top_cluster_data = []
pages = []
with open(pages_file, 'r') as f:
for l in f:
pages.append(l.rstrip('\n'))
for i in trange(len(pages)):
page = pages[i]
paras = page_paras[page]
paratexts = [ptext_dict[p] for p in paras]
top_sections = list(set([rev_para_top[p] for p in paras]))
if len(top_sections) < 2:
continue
top_labels = [top_sections.index(rev_para_top[p]) for p in paras]
query_text = ' '.join(page.split('enwiki:')[1].split('%20'))
top_cluster_data.append(InputTRECCARExample(qid=page, q_context=query_text, pids=paras, texts=paratexts,
label=np.array(top_labels)))
print('Total data instances: %5d' % len(top_cluster_data))
return top_cluster_data
def prepare_cluster_data_for_eval(art_qrels, top_qrels, paratext, do_filter, val_samples):
page_paras, rev_para_top, _ = get_trec_dat(art_qrels, top_qrels, None)
len_paras = np.array([len(page_paras[page]) for page in page_paras.keys()])
print('mean paras: %.2f, std: %.2f, max paras: %.2f' % (np.mean(len_paras), np.std(len_paras), np.max(len_paras)))
ptext_dict = get_paratext_dict(paratext)
top_cluster_data = []
pages = list(page_paras.keys())
skipped_pages = 0
max_num_doc = max([len(page_paras[p]) for p in page_paras.keys()])
for i in trange(len(pages)):
page = pages[i]
paras = page_paras[page]
paratexts = [ptext_dict[p] for p in paras]
top_sections = list(set([rev_para_top[p] for p in paras]))
top_labels = [top_sections.index(rev_para_top[p]) for p in paras]
query_text = ' '.join(page.split('enwiki:')[1].split('%20'))
n = len(paras)
if do_filter:
if n < 20 or n > 200:
skipped_pages += 1
continue
paras = paras[:max_num_doc] if n >= max_num_doc else paras + ['dummy'] * (max_num_doc - n)
paratexts = paratexts[:max_num_doc] if n >= max_num_doc else paratexts + [''] * (max_num_doc - n)
top_labels = top_labels[:max_num_doc] if n >= max_num_doc else top_labels + [-1] * (max_num_doc - n)
if do_filter:
if len(set(top_labels)) < 2 or n / len(set(top_labels)) < 2.5:
## the page should have at least 2 top level sections and n/k should be at least 2.5
skipped_pages += 1
continue
top_cluster_data.append(InputTRECCARExample(qid=page, q_context=query_text, pids=paras, texts=paratexts,
label=np.array(top_labels)))
if val_samples > 0:
top_cluster_data = top_cluster_data[:val_samples]
print('Total data instances: %5d' % len(top_cluster_data))
return top_cluster_data
class QuerySpecificClusterModel(nn.Module):
def __init__(self, path:str=None, query_transformer:CustomSentenceTransformer=None,
psg_transformer:CustomSentenceTransformer=None, device:torch.device=None):
super(QuerySpecificClusterModel, self).__init__()
if path is not None:
self.query_model = CustomSentenceTransformer(path+'/query_model')
self.psg_model = CustomSentenceTransformer(path+'/psg_model')
else:
self.query_model = query_transformer
self.psg_model = psg_transformer
self.optim = OptimCluster
self.device = device
def save(self, path):
self.query_model.save(path+'/query_model')
self.psg_model.save(path+'/psg_model')
def _get_scheduler(self, optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Taken from SentenceTransformers
Returns the correct learning rate scheduler
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
def query_batch_collate_fn(self, batch):
num_texts = len(batch[0].texts)
queries = []
texts = [[] for _ in range(num_texts)]
labels = []
for example in batch:
queries.append(example.q_context)
for idx, text in enumerate(example.texts):
texts[idx].append(text)
labels.append(example.label)
labels = torch.tensor(labels).to(self.device)
q_tokenized = self.query_model.tokenize(queries)
batch_to_device(q_tokenized, self.device)
psg_features = []
for idx in range(num_texts):
p_tokenized = self.psg_model.tokenize(texts[idx])
batch_to_device(p_tokenized, self.device)
psg_features.append(p_tokenized)
return q_tokenized, psg_features, labels
def forward(self, query_feature: Dict[str, Tensor], passage_features: Iterable[Dict[str, Tensor]], labels: Tensor):
n = labels.shape[1]
query_embedding = self.query_model(query_feature)['sentence_embedding']
# its the scaling vector, so each element in vector should be [0, 1]
psg_embeddings = torch.stack([self.psg_model(passages)['sentence_embedding']
for passages in passage_features], dim=1)
scaled_psg_embeddings = torch.tile(query_embedding.unsqueeze(1), (1, n, 1)) * psg_embeddings
return scaled_psg_embeddings
class BBClusterLossModel(nn.Module):
def __init__(self, model: QuerySpecificClusterModel, device, lambda_val: float, reg_const: float):
super(BBClusterLossModel, self).__init__()
self.model = model
self.lambda_val = lambda_val
self.reg = reg_const
self.optim = OptimCluster()
self.device = device
def true_adj_mat(self, label):
n = label.numel()
adj_mat = torch.zeros((n, n))
for i in range(n):
for j in range(n):
if i == j or label[i] == label[j]:
adj_mat[i][j] = 1.0
return adj_mat
def forward(self, query_feature: Dict[str, Tensor], passage_features: Iterable[Dict[str, Tensor]], labels: Tensor):
batch_size = labels.shape[0]
n = labels.shape[1]
ks = [torch.unique(labels[i]).numel() for i in range(batch_size)]
true_adjacency_mats = torch.stack([self.true_adj_mat(labels[i]) for i in range(batch_size)]).to(self.device)
query_embedding = self.model.query_model(query_feature)['sentence_embedding']
# its the scaling vector, so each element in vector should be [0, 1]
psg_embeddings = torch.stack([self.model.psg_model(passages)['sentence_embedding']
for passages in passage_features], dim=1)
scaled_psg_embeddings = torch.tile(query_embedding.unsqueeze(1), (1, n, 1)) * psg_embeddings
embeddings_dist_mats = torch.stack([euclid_dist(scaled_psg_embeddings[i]) for i in range(batch_size)])
mean_similar_dist = (embeddings_dist_mats * true_adjacency_mats).sum() / true_adjacency_mats.sum()
mean_dissimilar_dist = (embeddings_dist_mats * (1.0 - true_adjacency_mats)).sum() / (
1 - true_adjacency_mats).sum()
adjacency_mats = self.optim.apply(embeddings_dist_mats, self.lambda_val, ks).to(self.device)
err_mats = adjacency_mats * (1.0 - true_adjacency_mats) + (1.0 - adjacency_mats) * true_adjacency_mats
err_mean = err_mats.mean(dim=0).sum()
loss = err_mean + self.reg * (mean_similar_dist - mean_dissimilar_dist)
return loss
class BBClusterRNNLossModel(nn.Module):
def __init__(self, model: QuerySpecificClusterModel, device, lambda_val: float, reg_const: float):
super(BBClusterRNNLossModel, self).__init__()
self.model = model
self.lambda_val = lambda_val
self.reg = reg_const
self.optim = OptimCluster()
self.device = device
def true_adj_mat(self, label):
n = label.numel()
adj_mat = torch.zeros((n, n))
for i in range(n):
for j in range(n):
if i == j or label[i] == label[j]:
adj_mat[i][j] = 1.0
return adj_mat
def forward(self, query_feature: Dict[str, Tensor], passage_features: Iterable[Dict[str, Tensor]], labels: Tensor):
batch_size = labels.shape[0]
n = labels.shape[1]
ks = [torch.unique(labels[i]).numel() for i in range(batch_size)]
true_adjacency_mats = torch.stack([self.true_adj_mat(labels[i]) for i in range(batch_size)]).to(self.device)
psg_embeddings = torch.stack([self.model.psg_model(passages)['sentence_embedding']
for passages in passage_features], dim=1)
scaling_vector = torch.tensor([0])
# obtain the scaling vector from psg_embeddngs using RNN
scaled_psg_embeddings = torch.tile(scaling_vector.unsqueeze(1), (1, n, 1)) * psg_embeddings
embeddings_dist_mats = torch.stack([euclid_dist(scaled_psg_embeddings[i]) for i in range(batch_size)])
mean_similar_dist = (embeddings_dist_mats * true_adjacency_mats).sum() / true_adjacency_mats.sum()
mean_dissimilar_dist = (embeddings_dist_mats * (1.0 - true_adjacency_mats)).sum() / (
1 - true_adjacency_mats).sum()
adjacency_mats = self.optim.apply(embeddings_dist_mats, self.lambda_val, ks).to(self.device)
err_mats = adjacency_mats * (1.0 - true_adjacency_mats) + (1.0 - adjacency_mats) * true_adjacency_mats
err_mean = err_mats.mean(dim=0).sum()
loss = err_mean + self.reg * (mean_similar_dist - mean_dissimilar_dist)
return loss
class QueryClusterEvaluator(SentenceEvaluator):
def __init__(self, queries: List[str], passages: List[List[str]], labels: List[Tensor], use_model_device=True):
self.queries = queries
self.passages = passages
self.labels = labels
self.use_model_device = use_model_device
@classmethod
def from_input_examples(cls, examples: List[InputTRECCARExample], use_model_device, **kwargs):
queries = []
passages = []
labels = []
for example in examples:
queries.append(example.q_context)
passages.append(example.texts)
labels.append(torch.from_numpy(example.label))
return cls(queries=queries, passages=passages, labels=labels, use_model_device=use_model_device, **kwargs)
def euclid_dist(self, x):
dist_mat = torch.norm(x[:, None] - x, dim=2, p=2)
return dist_mat
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
rand_scores, nmi_scores, ami_scores = [], [], []
model_device = model.device
if not self.use_model_device:
model.cpu()
for i in trange(len(self.queries), desc="Evaluating on val", smoothing=0.05):
query = self.queries[i]
passages_to_cluster = [self.passages[i][p] for p in range(len(self.passages[i]))
if len(self.passages[i][p])>0]
true_label = self.labels[i][:len(passages_to_cluster)]
query_feature = model.query_model.tokenize(query)
doc_features = model.psg_model.tokenize(passages_to_cluster)
if self.use_model_device:
batch_to_device(doc_features, model_device)
query_embedding = model.query_model(query_feature)['sentence_embedding']
psg_embeddings = model.psg_model(doc_features)['sentence_embedding']
scaled_psg_embeddings = query_embedding * psg_embeddings
embeddings_dist_mat = self.euclid_dist(scaled_psg_embeddings)
cl = AgglomerativeClustering(n_clusters=torch.unique(true_label).numel(), affinity='precomputed',
linkage='average')
cluster_label = cl.fit_predict(embeddings_dist_mat.detach().cpu().numpy())
rand_scores.append(adjusted_rand_score(true_label.numpy(), cluster_label))
nmi_scores.append(normalized_mutual_info_score(true_label.numpy(), cluster_label))
ami_scores.append(adjusted_mutual_info_score(true_label.numpy(), cluster_label))
mean_rand = np.mean(np.array(rand_scores))
mean_nmi = np.mean(np.array(nmi_scores))
mean_ami = np.mean(np.array(ami_scores))
print("\nRAND: %.5f, NMI: %.5f, AMI: %.5f\n" % (mean_rand, mean_nmi, mean_ami), flush=True)
if not self.use_model_device:
model.to(model_device)
return mean_rand
def train(train_cluster_data, val_cluster_data, test_cluster_data, output_path, eval_steps,
num_epochs, warmup_frac, lambda_val, reg, use_model_device, max_train_size=-1, train_psg_model=False,
model_name='distilbert-base-uncased', out_features=256, steps_per_epoch=None, weight_decay=0.01,
optimizer_class=transformers.AdamW, scheduler='WarmupLinear', optimizer_params={'lr':2e-5},
show_progress_bar=True, max_grad_norm=1, save_best_model=True):
tensorboard_writer = SummaryWriter('./tensorboard_logs')
task = Task.init(project_name='Query Specific BB Clustering', task_name='query_bbc_fixed_lambda')
config_dict = {'lambda_val': lambda_val, 'reg': reg}
config_dict = task.connect(config_dict)
if torch.cuda.is_available():
device = torch.device('cuda')
print('CUDA is available and using device: '+str(device))
else:
device = torch.device('cpu')
print('CUDA not available, using device: '+str(device))
### Configure sentence transformers for training and train on the provided dataset
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
query_word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
query_pooling_model = models.Pooling(query_word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
query_dense_model = models.Dense(in_features=query_pooling_model.get_sentence_embedding_dimension(),
out_features=out_features,
activation_function=nn.Sigmoid())
psg_word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
psg_pooling_model = models.Pooling(psg_word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
psg_dense_model = models.Dense(in_features=psg_pooling_model.get_sentence_embedding_dimension(),
out_features=out_features,
activation_function=nn.Tanh())
query_model = CustomSentenceTransformer(modules=[query_word_embedding_model, query_pooling_model,
query_dense_model])
psg_model = SentenceTransformer(modules=[psg_word_embedding_model, psg_pooling_model, psg_dense_model])
model = QuerySpecificClusterModel(query_transformer=query_model, psg_transformer=psg_model, device=device)
train_dataloader = DataLoader(train_cluster_data, shuffle=True, batch_size=1)
evaluator = QueryClusterEvaluator.from_input_examples(val_cluster_data, use_model_device)
test_evaluator = QueryClusterEvaluator.from_input_examples(test_cluster_data, use_model_device)
warmup_steps = int(len(train_dataloader) * num_epochs * warmup_frac) # 10% of train data
print("Untrained performance")
model.to(device)
evaluator(model)
train_dataloader.collate_fn = model.query_batch_collate_fn
# Train the model
best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = len(train_dataloader)
num_train_steps = int(steps_per_epoch * num_epochs)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
data_iter = iter(train_dataloader)
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = model._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps,
t_total=num_train_steps)
config = {'epochs': num_epochs, 'steps_per_epoch': steps_per_epoch}
global_step = 0
loss_model = BBClusterLossModel(model, device, lambda_val, reg)
for epoch in trange(config.get('epochs'), desc="Epoch", disable=not show_progress_bar):
training_steps = 0
running_loss_0 = 0.0
model.zero_grad()
model.train()
if not train_psg_model:
for m in model.psg_model.modules():
m.training = False
for _ in trange(config.get('steps_per_epoch'), desc="Iteration", smoothing=0.05, disable=not show_progress_bar):
try:
data = next(data_iter)
except StopIteration:
data_iter = iter(train_dataloader)
data = next(data_iter)
query_feature, psg_features, labels = data
if max_train_size > 0 and labels.shape[1] > max_train_size:
print('skipping instance with '+str(labels.shape[1])+' passages')
continue
loss_val = loss_model(query_feature, psg_features, labels)
running_loss_0 += loss_val.item()
loss_val.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
scheduler_obj.step()
training_steps += 1
global_step += 1
if eval_steps > 0 and training_steps % eval_steps == 0:
tensorboard_writer.add_scalar('training_loss', running_loss_0 / eval_steps, global_step)
# logger.report_scalar('Loss', 'training_loss', iteration=global_step, v
# alue=running_loss_0/evaluation_steps)
running_loss_0 = 0.0
# self._eval_during_training(evaluator, output_path, save_best_model, epoch, training_steps, callback)
if evaluator is not None:
score = evaluator(model, output_path=output_path, epoch=epoch, steps=training_steps)
tensorboard_writer.add_scalar('val_ARI', score, global_step)
# logger.report_scalar('Training progress', 'val_ARI', iteration=global_step, value=score)
if score > best_score:
best_score = score
if save_best_model:
print('Saving model at: ' + output_path)
model.save(output_path)
model.zero_grad()
model.train()
if not train_psg_model:
for m in model.psg_model.modules():
m.training = False
if evaluator is not None:
score = evaluator(model, output_path=output_path, epoch=epoch, steps=training_steps)
tensorboard_writer.add_scalar('val_ARI', score, global_step)
# logger.report_scalar('Training progress', 'val_ARI', iteration=global_step, value=score)
if score > best_score:
best_score = score
if save_best_model:
model.save(output_path)
if test_evaluator is not None:
best_model = QuerySpecificClusterModel(output_path)
if torch.cuda.is_available():
model.to(torch.device('cpu'))
best_model.to(device)
test_ari = test_evaluator(best_model)
best_model.to(torch.device('cpu'))
model.to(device)
else:
test_ari = test_evaluator(best_model)
tensorboard_writer.add_scalar('test_ARI', test_ari, global_step)
# logger.report_scalar('Training progress', 'test_ARI', iteration=global_step, value=test_ari)
if evaluator is None and output_path is not None: # No evaluator, but output path: save final model version
model.save(output_path)
def save_sqst_dataset(train_pages_file, art_qrels, top_qrels, paratext, val_samples, outdir):
page_paras, rev_para_top, _ = get_trec_dat(art_qrels, top_qrels, None)
ptext_dict = get_paratext_dict(paratext)
train_cluster_data = []
test_cluster_data = []
pages = []
with open(train_pages_file, 'r') as f:
for l in f:
pages.append(l.rstrip('\n'))
for i in trange(len(pages)):
page = pages[i]
paras = page_paras[page]
page_sec_para_dict = {}
for p in paras:
sec = rev_para_top[p]
if sec not in page_sec_para_dict.keys():
page_sec_para_dict[sec] = [p]
else:
page_sec_para_dict[sec].append(p)
sections = list(set([rev_para_top[p] for p in paras]))
train_paras = []
test_paras = []
for s in page_sec_para_dict.keys():
test_paras += page_sec_para_dict[s][:len(page_sec_para_dict[s])//2]
train_paras += page_sec_para_dict[s][len(page_sec_para_dict[s])//2:]
test_labels = [sections.index(rev_para_top[p]) for p in test_paras]
train_labels = [sections.index(rev_para_top[p]) for p in train_paras]
test_paratexts = [ptext_dict[p] for p in test_paras]
train_paratexts = [ptext_dict[p] for p in train_paras]
query_text = ' '.join(page.split('enwiki:')[1].split('%20'))
test_cluster_data.append(InputTRECCARExample(qid=page, q_context=query_text, pids=test_paras,
texts=test_paratexts, label=np.array(test_labels)))
train_cluster_data.append(InputTRECCARExample(qid=page, q_context=query_text, pids=train_paras,
texts=train_paratexts, label=np.array(train_labels)))
random.shuffle(test_cluster_data)
val_cluster_data = test_cluster_data[:val_samples]
test_cluster_data = test_cluster_data[val_samples:]
with open(outdir + '/sqst_treccar_train.pkl', 'wb') as f:
pickle.dump(train_cluster_data, f)
with open(outdir + '/sqst_treccar_val.pkl', 'wb') as f:
pickle.dump(val_cluster_data, f)
with open(outdir + '/sqst_treccar_test.pkl', 'wb') as f:
pickle.dump(test_cluster_data, f)
print('No. of data instances - Train: %5d, Val: %5d, Test: %5d' % (len(train_cluster_data), len(val_cluster_data),
len(test_cluster_data)))
def save_squt_dataset(train_pages_file, art_qrels, top_qrels, paratext, val_samples, outdir):
page_paras, rev_para_top, _ = get_trec_dat(art_qrels, top_qrels, None)
ptext_dict = get_paratext_dict(paratext)
train_cluster_data = []
test_cluster_data = []
pages = []
with open(train_pages_file, 'r') as f:
for l in f:
pages.append(l.rstrip('\n'))
for i in trange(len(pages)):
page = pages[i]
paras = page_paras[page]
page_sec_para_dict = {}
for p in paras:
sec = rev_para_top[p]
if sec not in page_sec_para_dict.keys():
page_sec_para_dict[sec] = [p]
else:
page_sec_para_dict[sec].append(p)
sections = list(set([rev_para_top[p] for p in paras]))
random.shuffle(sections)
test_sections, train_sections = sections[:len(sections)//2], sections[len(sections)//2:]
train_paras = []
test_paras = []
for s in test_sections:
test_paras += page_sec_para_dict[s]
for s in train_sections:
train_paras += page_sec_para_dict[s]
test_labels = [sections.index(rev_para_top[p]) for p in test_paras]
train_labels = [sections.index(rev_para_top[p]) for p in train_paras]
test_paratexts = [ptext_dict[p] for p in test_paras]
train_paratexts = [ptext_dict[p] for p in train_paras]
query_text = ' '.join(page.split('enwiki:')[1].split('%20'))
test_cluster_data.append(InputTRECCARExample(qid=page, q_context=query_text, pids=test_paras,
texts=test_paratexts, label=np.array(test_labels)))
train_cluster_data.append(InputTRECCARExample(qid=page, q_context=query_text, pids=train_paras,
texts=train_paratexts, label=np.array(train_labels)))
random.shuffle(test_cluster_data)
val_cluster_data = test_cluster_data[:val_samples]
test_cluster_data = test_cluster_data[val_samples:]
with open(outdir + '/squt_treccar_train.pkl', 'wb') as f:
pickle.dump(train_cluster_data, f)
with open(outdir + '/squt_treccar_val.pkl', 'wb') as f:
pickle.dump(val_cluster_data, f)
with open(outdir + '/squt_treccar_test.pkl', 'wb') as f:
pickle.dump(test_cluster_data, f)
print(
'No. of data instances - Train: %5d, Val: %5d, Test: %5d' % (len(train_cluster_data), len(val_cluster_data),
len(test_cluster_data)))
def save_sbert_embeds(sbert_model_name, pages_path, art_qrels, paratext_file, outpath):
sbert = SentenceTransformer(sbert_model_name)
page_paras, _, _ = get_trec_dat(art_qrels, None, None)
paratext_dict = get_paratext_dict(paratext_file)
paras = []
paratexts = []
with open(pages_path, 'r') as f:
for l in f:
page = l.rstrip('\n')
paras += page_paras[page]
paratexts += [paratext_dict[p] for p in page_paras[page]]
print(str(len(paratexts))+' paras to be encoded')
para_embeddings = sbert.encode(paratexts, show_progress_bar=True)
para_data = {'paraids': paras, 'paravecs': para_embeddings}
with open(outpath, 'wb') as f:
pickle.dump(para_data, f)
def main():
parser = argparse.ArgumentParser(description='Run treccar experiments')
parser.add_argument('-in', '--input_dir', default='/home/sk1105/sumanta/trec_dataset/train')
parser.add_argument('-out', '--output_model_path', default='/home/sk1105/sumanta/bb_cluster_models/temp_model')
parser.add_argument('-mn', '--model_name', default='distilbert-base-uncased')
parser.add_argument('-ls', '--loss', default='bb')
parser.add_argument('-lm', '--lambda_val', type=float, default=200.0)
parser.add_argument('-b', '--beta', type=float, default=10.0)
parser.add_argument('-rg', '--reg_const', type=float, default=2.5)
parser.add_argument('-ep', '--num_epoch', type=int, default=3)
parser.add_argument('-ws', '--warmup', type=float, default=0.1)
parser.add_argument('-es', '--eval_steps', type=int, default=100)
parser.add_argument('-md', '--max_sample_size', type=int, default=-1)
parser.add_argument('-ext', '--exp_type', default='sqst')
parser.add_argument('--gpu_eval', default=False, action='store_true')
parser.add_argument('--train_psg_model', default=False, action='store_true')
args = parser.parse_args()
input_dir = args.input_dir
output_path = args.output_model_path
model_name = args.model_name
loss_name = args.loss
lambda_val = args.lambda_val
beta = args.beta
reg = args.reg_const
epochs = args.num_epoch
warmup_fraction = args.warmup
eval_steps = args.eval_steps
max_sample_size = args.max_sample_size
exp_type = args.exp_type
gpu_eval = args.gpu_eval
train_psg_model = args.train_psg_model
if exp_type == 'sqst':
with open(input_dir + '/sqst/sqst_treccar_train.pkl', 'rb') as f:
train_cluster_data = pickle.load(f)
with open(input_dir + '/sqst/sqst_treccar_val.pkl', 'rb') as f:
val_cluster_data = pickle.load(f)
with open(input_dir + '/sqst/sqst_treccar_test.pkl', 'rb') as f:
test_cluster_data = pickle.load(f)
elif exp_type == 'squt':
with open(input_dir + '/squt/squt_treccar_train.pkl', 'rb') as f:
train_cluster_data = pickle.load(f)
with open(input_dir + '/squt/squt_treccar_val.pkl', 'rb') as f:
val_cluster_data = pickle.load(f)
with open(input_dir + '/squt/squt_treccar_test.pkl', 'rb') as f:
test_cluster_data = pickle.load(f)
print('Data loaded, starting to train')
train(train_cluster_data, val_cluster_data, test_cluster_data, output_path, eval_steps, epochs, warmup_fraction,
lambda_val, reg, gpu_eval, max_train_size=max_sample_size, train_psg_model=train_psg_model,
model_name=model_name)
if __name__ == '__main__':
main() | [
"torch.nn.Tanh",
"sentence_transformers.models.Transformer",
"transformers.get_constant_schedule_with_warmup",
"sentence_transformers.util.batch_to_device",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.Sigmoid",
"numpy.mean",
"t... | [((960, 975), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (971, 975), False, 'import random\n'), ((976, 997), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (993, 997), False, 'import torch\n'), ((998, 1016), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1012, 1016), True, 'import numpy as np\n'), ((1128, 1168), 'experiments.treccar_run.get_trec_dat', 'get_trec_dat', (['art_qrels', 'top_qrels', 'None'], {}), '(art_qrels, top_qrels, None)\n', (1140, 1168), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((1186, 1213), 'experiments.treccar_run.get_paratext_dict', 'get_paratext_dict', (['paratext'], {}), '(paratext)\n', (1203, 1213), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((2170, 2210), 'experiments.treccar_run.get_trec_dat', 'get_trec_dat', (['art_qrels', 'top_qrels', 'None'], {}), '(art_qrels, top_qrels, None)\n', (2182, 2210), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((2427, 2454), 'experiments.treccar_run.get_paratext_dict', 'get_paratext_dict', (['paratext'], {}), '(paratext)\n', (2444, 2454), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((15382, 15417), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['"""./tensorboard_logs"""'], {}), "('./tensorboard_logs')\n", (15395, 15417), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((15429, 15524), 'clearml.Task.init', 'Task.init', ([], {'project_name': '"""Query Specific BB Clustering"""', 'task_name': '"""query_bbc_fixed_lambda"""'}), "(project_name='Query Specific BB Clustering', task_name=\n 'query_bbc_fixed_lambda')\n", (15438, 15524), False, 'from clearml import Task\n'), ((15628, 15653), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15651, 15653), False, 'import torch\n'), ((16099, 16129), 'sentence_transformers.models.Transformer', 'models.Transformer', (['model_name'], {}), '(model_name)\n', (16117, 16129), False, 'from sentence_transformers import models\n'), ((16764, 16794), 'sentence_transformers.models.Transformer', 'models.Transformer', (['model_name'], {}), '(model_name)\n', (16782, 16794), False, 'from sentence_transformers import models\n'), ((17424, 17531), 'model.BBCluster.CustomSentenceTransformer', 'CustomSentenceTransformer', ([], {'modules': '[query_word_embedding_model, query_pooling_model, query_dense_model]'}), '(modules=[query_word_embedding_model,\n query_pooling_model, query_dense_model])\n', (17449, 17531), False, 'from model.BBCluster import CustomSentenceTransformer, OptimCluster, euclid_dist\n'), ((17597, 17692), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', ([], {'modules': '[psg_word_embedding_model, psg_pooling_model, psg_dense_model]'}), '(modules=[psg_word_embedding_model, psg_pooling_model,\n psg_dense_model])\n', (17616, 17692), False, 'from sentence_transformers import SentenceTransformer\n'), ((17825, 17883), 'torch.utils.data.DataLoader', 'DataLoader', (['train_cluster_data'], {'shuffle': '(True)', 'batch_size': '(1)'}), '(train_cluster_data, shuffle=True, batch_size=1)\n', (17835, 17883), False, 'from torch.utils.data import DataLoader\n'), ((23219, 23259), 'experiments.treccar_run.get_trec_dat', 'get_trec_dat', (['art_qrels', 'top_qrels', 'None'], {}), '(art_qrels, top_qrels, None)\n', (23231, 23259), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((23277, 23304), 'experiments.treccar_run.get_paratext_dict', 'get_paratext_dict', (['paratext'], {}), '(paratext)\n', (23294, 23304), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((24912, 24945), 'random.shuffle', 'random.shuffle', (['test_cluster_data'], {}), '(test_cluster_data)\n', (24926, 24945), False, 'import random\n'), ((25710, 25750), 'experiments.treccar_run.get_trec_dat', 'get_trec_dat', (['art_qrels', 'top_qrels', 'None'], {}), '(art_qrels, top_qrels, None)\n', (25722, 25750), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((25768, 25795), 'experiments.treccar_run.get_paratext_dict', 'get_paratext_dict', (['paratext'], {}), '(paratext)\n', (25785, 25795), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((27491, 27524), 'random.shuffle', 'random.shuffle', (['test_cluster_data'], {}), '(test_cluster_data)\n', (27505, 27524), False, 'import random\n'), ((28267, 28304), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['sbert_model_name'], {}), '(sbert_model_name)\n', (28286, 28304), False, 'from sentence_transformers import SentenceTransformer\n'), ((28328, 28363), 'experiments.treccar_run.get_trec_dat', 'get_trec_dat', (['art_qrels', 'None', 'None'], {}), '(art_qrels, None, None)\n', (28340, 28363), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((28384, 28416), 'experiments.treccar_run.get_paratext_dict', 'get_paratext_dict', (['paratext_file'], {}), '(paratext_file)\n', (28401, 28416), False, 'from experiments.treccar_run import prepare_cluster_data_train_only, prepare_cluster_data2, get_trec_dat, get_paratext_dict\n'), ((28934, 28996), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run treccar experiments"""'}), "(description='Run treccar experiments')\n", (28957, 28996), False, 'import argparse\n'), ((6756, 6797), 'sentence_transformers.util.batch_to_device', 'batch_to_device', (['q_tokenized', 'self.device'], {}), '(q_tokenized, self.device)\n', (6771, 6797), False, 'from sentence_transformers.util import batch_to_device\n'), ((7991, 8005), 'model.BBCluster.OptimCluster', 'OptimCluster', ([], {}), '()\n', (8003, 8005), False, 'from model.BBCluster import CustomSentenceTransformer, OptimCluster, euclid_dist\n'), ((8115, 8134), 'torch.zeros', 'torch.zeros', (['(n, n)'], {}), '((n, n))\n', (8126, 8134), False, 'import torch\n'), ((10154, 10168), 'model.BBCluster.OptimCluster', 'OptimCluster', ([], {}), '()\n', (10166, 10168), False, 'from model.BBCluster import CustomSentenceTransformer, OptimCluster, euclid_dist\n'), ((10278, 10297), 'torch.zeros', 'torch.zeros', (['(n, n)'], {}), '((n, n))\n', (10289, 10297), False, 'import torch\n'), ((11045, 11062), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (11057, 11062), False, 'import torch\n'), ((12782, 12820), 'torch.norm', 'torch.norm', (['(x[:, None] - x)'], {'dim': '(2)', 'p': '(2)'}), '(x[:, None] - x, dim=2, p=2)\n', (12792, 12820), False, 'import torch\n'), ((15672, 15692), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (15684, 15692), False, 'import torch\n'), ((15786, 15805), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (15798, 15805), False, 'import torch\n'), ((25127, 25161), 'pickle.dump', 'pickle.dump', (['train_cluster_data', 'f'], {}), '(train_cluster_data, f)\n', (25138, 25161), False, 'import pickle\n'), ((25230, 25262), 'pickle.dump', 'pickle.dump', (['val_cluster_data', 'f'], {}), '(val_cluster_data, f)\n', (25241, 25262), False, 'import pickle\n'), ((25332, 25365), 'pickle.dump', 'pickle.dump', (['test_cluster_data', 'f'], {}), '(test_cluster_data, f)\n', (25343, 25365), False, 'import pickle\n'), ((26388, 26412), 'random.shuffle', 'random.shuffle', (['sections'], {}), '(sections)\n', (26402, 26412), False, 'import random\n'), ((27706, 27740), 'pickle.dump', 'pickle.dump', (['train_cluster_data', 'f'], {}), '(train_cluster_data, f)\n', (27717, 27740), False, 'import pickle\n'), ((27809, 27841), 'pickle.dump', 'pickle.dump', (['val_cluster_data', 'f'], {}), '(val_cluster_data, f)\n', (27820, 27841), False, 'import pickle\n'), ((27911, 27944), 'pickle.dump', 'pickle.dump', (['test_cluster_data', 'f'], {}), '(test_cluster_data, f)\n', (27922, 27944), False, 'import pickle\n'), ((28881, 28906), 'pickle.dump', 'pickle.dump', (['para_data', 'f'], {}), '(para_data, f)\n', (28892, 28906), False, 'import pickle\n'), ((4389, 4437), 'model.BBCluster.CustomSentenceTransformer', 'CustomSentenceTransformer', (["(path + '/query_model')"], {}), "(path + '/query_model')\n", (4414, 4437), False, 'from model.BBCluster import CustomSentenceTransformer, OptimCluster, euclid_dist\n'), ((4465, 4511), 'model.BBCluster.CustomSentenceTransformer', 'CustomSentenceTransformer', (["(path + '/psg_model')"], {}), "(path + '/psg_model')\n", (4490, 4511), False, 'from model.BBCluster import CustomSentenceTransformer, OptimCluster, euclid_dist\n'), ((5108, 5153), 'transformers.get_constant_schedule', 'transformers.get_constant_schedule', (['optimizer'], {}), '(optimizer)\n', (5142, 5153), False, 'import transformers\n'), ((6936, 6977), 'sentence_transformers.util.batch_to_device', 'batch_to_device', (['p_tokenized', 'self.device'], {}), '(p_tokenized, self.device)\n', (6951, 6977), False, 'from sentence_transformers.util import batch_to_device\n'), ((14550, 14571), 'numpy.array', 'np.array', (['rand_scores'], {}), '(rand_scores)\n', (14558, 14571), True, 'import numpy as np\n'), ((14600, 14620), 'numpy.array', 'np.array', (['nmi_scores'], {}), '(nmi_scores)\n', (14608, 14620), True, 'import numpy as np\n'), ((14649, 14669), 'numpy.array', 'np.array', (['ami_scores'], {}), '(ami_scores)\n', (14657, 14669), True, 'import numpy as np\n'), ((16719, 16731), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (16729, 16731), True, 'import torch.nn as nn\n'), ((17394, 17403), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (17401, 17403), True, 'import torch.nn as nn\n'), ((22440, 22465), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (22463, 22465), False, 'import torch\n'), ((30669, 30683), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (30680, 30683), False, 'import pickle\n'), ((30787, 30801), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (30798, 30801), False, 'import pickle\n'), ((30907, 30921), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (30918, 30921), False, 'import pickle\n'), ((2351, 2369), 'numpy.mean', 'np.mean', (['len_paras'], {}), '(len_paras)\n', (2358, 2369), True, 'import numpy as np\n'), ((2371, 2388), 'numpy.std', 'np.std', (['len_paras'], {}), '(len_paras)\n', (2377, 2388), True, 'import numpy as np\n'), ((2390, 2407), 'numpy.max', 'np.max', (['len_paras'], {}), '(len_paras)\n', (2396, 2407), True, 'import numpy as np\n'), ((5217, 5310), 'transformers.get_constant_schedule_with_warmup', 'transformers.get_constant_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps'}), '(optimizer, num_warmup_steps=\n warmup_steps)\n', (5263, 5310), False, 'import transformers\n'), ((6653, 6673), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (6665, 6673), False, 'import torch\n'), ((9166, 9203), 'model.BBCluster.euclid_dist', 'euclid_dist', (['scaled_psg_embeddings[i]'], {}), '(scaled_psg_embeddings[i])\n', (9177, 9203), False, 'from model.BBCluster import CustomSentenceTransformer, OptimCluster, euclid_dist\n'), ((11273, 11310), 'model.BBCluster.euclid_dist', 'euclid_dist', (['scaled_psg_embeddings[i]'], {}), '(scaled_psg_embeddings[i])\n', (11284, 11310), False, 'from model.BBCluster import CustomSentenceTransformer, OptimCluster, euclid_dist\n'), ((12584, 12615), 'torch.from_numpy', 'torch.from_numpy', (['example.label'], {}), '(example.label)\n', (12600, 12615), False, 'import torch\n'), ((13637, 13680), 'sentence_transformers.util.batch_to_device', 'batch_to_device', (['doc_features', 'model_device'], {}), '(doc_features, model_device)\n', (13652, 13680), False, 'from sentence_transformers.util import batch_to_device\n'), ((31058, 31072), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (31069, 31072), False, 'import pickle\n'), ((31176, 31190), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (31187, 31190), False, 'import pickle\n'), ((31296, 31310), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (31307, 31310), False, 'import pickle\n'), ((1930, 1950), 'numpy.array', 'np.array', (['top_labels'], {}), '(top_labels)\n', (1938, 1950), True, 'import numpy as np\n'), ((3849, 3869), 'numpy.array', 'np.array', (['top_labels'], {}), '(top_labels)\n', (3857, 3869), True, 'import numpy as np\n'), ((5367, 5486), 'transformers.get_linear_schedule_with_warmup', 'transformers.get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 't_total'}), '(optimizer, num_warmup_steps=\n warmup_steps, num_training_steps=t_total)\n', (5411, 5486), False, 'import transformers\n'), ((8508, 8531), 'torch.unique', 'torch.unique', (['labels[i]'], {}), '(labels[i])\n', (8520, 8531), False, 'import torch\n'), ((10671, 10694), 'torch.unique', 'torch.unique', (['labels[i]'], {}), '(labels[i])\n', (10683, 10694), False, 'import torch\n'), ((22492, 22511), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (22504, 22511), False, 'import torch\n'), ((22635, 22654), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (22647, 22654), False, 'import torch\n'), ((24673, 24694), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (24681, 24694), True, 'import numpy as np\n'), ((24883, 24905), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (24891, 24905), True, 'import numpy as np\n'), ((27251, 27272), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (27259, 27272), True, 'import numpy as np\n'), ((27462, 27484), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (27470, 27484), True, 'import numpy as np\n'), ((5607, 5726), 'transformers.get_cosine_schedule_with_warmup', 'transformers.get_cosine_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 't_total'}), '(optimizer, num_warmup_steps=\n warmup_steps, num_training_steps=t_total)\n', (5651, 5726), False, 'import transformers\n'), ((5863, 6000), 'transformers.get_cosine_with_hard_restarts_schedule_with_warmup', 'transformers.get_cosine_with_hard_restarts_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 't_total'}), '(optimizer,\n num_warmup_steps=warmup_steps, num_training_steps=t_total)\n', (5926, 6000), False, 'import transformers\n'), ((14042, 14066), 'torch.unique', 'torch.unique', (['true_label'], {}), '(true_label)\n', (14054, 14066), False, 'import torch\n')] |
import os
from django.core.files import File
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.conf import settings
from sme_management.models import *
def create_sample_sme(org_name='Andela', address='Ikorodu Rd'):
"""Create and return sample sme."""
return SME.objects.create(org_name=org_name, address=address)
def create_sample_user(email='<EMAIL>',
first_name='Test', last_name='User'):
"""Create and return a sample user"""
return get_user_model().objects.create_user(
first_name=first_name,
last_name=last_name,
email=email,
phone_no='09090909121',
password='<PASSWORD>**'
)
def create_sample_project_with_documents():
"""Create a sample project with documents."""
# Create a sample test file
fpath = "testfile.txt"
file = open(fpath, "w")
file.write("Hello World")
file.close()
file = open(fpath, "r")
f = File(file)
sme = create_sample_sme()
return SMEProject.objects.create(
project_name="Sample Project",
project_description="Sample Desc",
business_plan=f,
investment_tenure_end_date="2021-06-15",
cashflow_statement=f,
income_statement=f,
balance_sheet=f,
category='',
amount_required=30,
amount_raised=0,
equity_offering=15,
status="UNAPPROVED",
sme=sme
), fpath
def create_sample_project_without_documents():
"""Create a sample project with documents."""
sme = create_sample_sme()
return SMEProject.objects.create(
project_name="Sample Project",
project_description="Sample Desc",
investment_tenure_end_date="2021-06-15",
category='',
amount_required=30,
amount_raised=0,
equity_offering=15,
status="UNAPPROVED",
sme=sme
)
class SMEManagementModelTests(TestCase):
# def setUp(self):
# settings.MEDIA_ROOT = os.path.join(settings.BASE_DIR, "media-tmp")
def test_sme_created_successfully(self):
"""Test that sme is created successfully"""
sme = create_sample_sme()
self.assertEqual(str(sme), sme.org_name)
def test_sme_user_created_successfully(self):
"""Test that sme user is created successfully"""
SMEUser.objects.create(
sme=create_sample_sme(),
user=create_sample_user()
)
sme_users = SMEUser.objects.all()
self.assertEqual(len(sme_users), 1)
def test_sme_project_created_successfully(self):
"""Test that sme project is created successfully"""
sme_project, fpath = create_sample_project_with_documents()
self.assertEqual(sme_project.project_name, "Sample Project")
self.assertEqual(sme_project.status, "UNAPPROVED")
self.assertTrue(
os.path.exists(settings.BASE_DIR + sme_project.business_plan.url))
self.assertTrue(
os.path.exists(settings.BASE_DIR + sme_project.cashflow_statement.url))
self.assertTrue(
os.path.exists(settings.BASE_DIR + sme_project.income_statement.url))
self.assertTrue(
os.path.exists(settings.BASE_DIR + sme_project.balance_sheet.url))
os.remove(fpath)
os.remove(settings.BASE_DIR + sme_project.business_plan.url)
os.remove(settings.BASE_DIR + sme_project.cashflow_statement.url)
os.remove(settings.BASE_DIR + sme_project.income_statement.url)
os.remove(settings.BASE_DIR + sme_project.balance_sheet.url)
def test_sme_project_milestones_created_successfully(self):
"""Test that an sme project milestone is created successfully"""
sme_project = create_sample_project_without_documents()
project_milestone = SMEProjectMilestones.objects.create(
name="Sample Milestone",
description="Sample Desc",
amount_required=27,
sme_project=sme_project,
sme=sme_project.sme
)
self.assertEqual(project_milestone.name, "Sample Milestone")
| [
"os.path.exists",
"django.contrib.auth.get_user_model",
"django.core.files.File",
"os.remove"
] | [((981, 991), 'django.core.files.File', 'File', (['file'], {}), '(file)\n', (985, 991), False, 'from django.core.files import File\n'), ((3296, 3312), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (3305, 3312), False, 'import os\n'), ((3321, 3381), 'os.remove', 'os.remove', (['(settings.BASE_DIR + sme_project.business_plan.url)'], {}), '(settings.BASE_DIR + sme_project.business_plan.url)\n', (3330, 3381), False, 'import os\n'), ((3390, 3455), 'os.remove', 'os.remove', (['(settings.BASE_DIR + sme_project.cashflow_statement.url)'], {}), '(settings.BASE_DIR + sme_project.cashflow_statement.url)\n', (3399, 3455), False, 'import os\n'), ((3464, 3527), 'os.remove', 'os.remove', (['(settings.BASE_DIR + sme_project.income_statement.url)'], {}), '(settings.BASE_DIR + sme_project.income_statement.url)\n', (3473, 3527), False, 'import os\n'), ((3536, 3596), 'os.remove', 'os.remove', (['(settings.BASE_DIR + sme_project.balance_sheet.url)'], {}), '(settings.BASE_DIR + sme_project.balance_sheet.url)\n', (3545, 3596), False, 'import os\n'), ((2900, 2965), 'os.path.exists', 'os.path.exists', (['(settings.BASE_DIR + sme_project.business_plan.url)'], {}), '(settings.BASE_DIR + sme_project.business_plan.url)\n', (2914, 2965), False, 'import os\n'), ((3004, 3074), 'os.path.exists', 'os.path.exists', (['(settings.BASE_DIR + sme_project.cashflow_statement.url)'], {}), '(settings.BASE_DIR + sme_project.cashflow_statement.url)\n', (3018, 3074), False, 'import os\n'), ((3113, 3181), 'os.path.exists', 'os.path.exists', (['(settings.BASE_DIR + sme_project.income_statement.url)'], {}), '(settings.BASE_DIR + sme_project.income_statement.url)\n', (3127, 3181), False, 'import os\n'), ((3220, 3285), 'os.path.exists', 'os.path.exists', (['(settings.BASE_DIR + sme_project.balance_sheet.url)'], {}), '(settings.BASE_DIR + sme_project.balance_sheet.url)\n', (3234, 3285), False, 'import os\n'), ((525, 541), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (539, 541), False, 'from django.contrib.auth import get_user_model\n')] |
import requests
import requests.exceptions
import requests_mock
from tieronepointfive.enums import State, Transition
from tieronepointfive.state_machine import StateMachineTick
from tieronepointfive.evaluation_helpers import HttpHelper
from ..mock_config import MockConfig
google = 'https://www.google.com'
bing = 'https://www.bing.com'
def _assert_ticks_equal(actual_tick, expected_tick):
assert actual_tick.is_complete() == expected_tick.is_complete()
assert actual_tick.is_steady() == expected_tick.is_steady()
assert actual_tick.start_state == expected_tick.start_state
assert actual_tick.transition == expected_tick.transition
assert actual_tick.end_state == expected_tick.end_state
assert actual_tick.is_terminal == expected_tick.is_terminal
def _test_http_helper(expected_transition, expected_end_state, expected_is_terminal):
config = MockConfig()
helper = HttpHelper(config)
start_tick = StateMachineTick(State.CONNECTION_WORKING)
actual_tick = helper.evaluate(start_tick)
expected_tick = StateMachineTick.create_completed(State.CONNECTION_WORKING, expected_transition, expected_end_state, expected_is_terminal)
_assert_ticks_equal(actual_tick, expected_tick)
def test_http_helper_working_connection():
with requests_mock.Mocker() as req_mock:
req_mock.get(google, text='sure')
req_mock.get(bing, text='sure')
_test_http_helper(Transition.ALL_SITES_REACHED, State.CONNECTION_WORKING, True)
def test_http_helper_partially_working():
with requests_mock.Mocker() as req_mock:
req_mock.get(google, text='sure')
req_mock.get(bing, exc=requests.exceptions.ConnectTimeout)
_test_http_helper(Transition.SOME_SITES_REACHED, State.CONNECTION_FAILED, True)
def test_http_helper_bad_return_codes():
with requests_mock.Mocker() as req_mock:
req_mock.get(google, text='bad', status_code=400)
req_mock.get(bing, text='bad', status_code=400)
_test_http_helper(Transition.NO_SITES_REACHED, State.CONNECTION_FAILED, True)
| [
"tieronepointfive.evaluation_helpers.HttpHelper",
"requests_mock.Mocker",
"tieronepointfive.state_machine.StateMachineTick.create_completed",
"tieronepointfive.state_machine.StateMachineTick"
] | [((927, 945), 'tieronepointfive.evaluation_helpers.HttpHelper', 'HttpHelper', (['config'], {}), '(config)\n', (937, 945), False, 'from tieronepointfive.evaluation_helpers import HttpHelper\n'), ((964, 1006), 'tieronepointfive.state_machine.StateMachineTick', 'StateMachineTick', (['State.CONNECTION_WORKING'], {}), '(State.CONNECTION_WORKING)\n', (980, 1006), False, 'from tieronepointfive.state_machine import StateMachineTick\n'), ((1075, 1201), 'tieronepointfive.state_machine.StateMachineTick.create_completed', 'StateMachineTick.create_completed', (['State.CONNECTION_WORKING', 'expected_transition', 'expected_end_state', 'expected_is_terminal'], {}), '(State.CONNECTION_WORKING,\n expected_transition, expected_end_state, expected_is_terminal)\n', (1108, 1201), False, 'from tieronepointfive.state_machine import StateMachineTick\n'), ((1309, 1331), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (1329, 1331), False, 'import requests_mock\n'), ((1577, 1599), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (1597, 1599), False, 'import requests_mock\n'), ((1871, 1893), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (1891, 1893), False, 'import requests_mock\n')] |
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from glance.db.migration import CURRENT_RELEASE
from glance.version import version_info
class TestDataMigrationVersion(testtools.TestCase):
def test_migration_version(self):
"""Make sure the data migration version info has been updated."""
release_number = int(version_info.version_string().split('.', 1)[0])
# by rule, release names must be composed of the 26 letters of the
# ISO Latin alphabet (ord('A')==65, ord('Z')==90)
release_letter = str(CURRENT_RELEASE[:1].upper()).encode('ascii')
# Convert release letter into an int in [1:26]. The first
# glance release was 'Bexar'.
converted_release_letter = (ord(release_letter) -
ord(u'B'.encode('ascii')) + 1)
# Project the release number into [1:26]
converted_release_number = release_number % 26
# Prepare for the worst with a super-informative message
msg = ('\n\n'
'EMERGENCY!\n'
'glance.db.migration.CURRENT_RELEASE is out of sync '
'with the glance version.\n'
' CURRENT_RELEASE: %s\n'
' glance version: %s\n'
'glance.db.migration.CURRENT_RELEASE needs to be '
'updated IMMEDIATELY.\n'
'The gate will be wedged until the update is made.\n'
'EMERGENCY!\n'
'\n') % (CURRENT_RELEASE,
version_info.version_string())
self.assertEqual(converted_release_letter,
converted_release_number,
msg)
| [
"glance.version.version_info.version_string"
] | [((2106, 2135), 'glance.version.version_info.version_string', 'version_info.version_string', ([], {}), '()\n', (2133, 2135), False, 'from glance.version import version_info\n'), ((932, 961), 'glance.version.version_info.version_string', 'version_info.version_string', ([], {}), '()\n', (959, 961), False, 'from glance.version import version_info\n')] |
'''
Instructor control script for Project 5- Text Adventure Beta
@author: acbart
@requires: pedal
@title: Project 5- Text Adventure- Control Script
@version: 4/4/2019 10:29am
'''
__version__ = 1
from pedal.assertions.organizers import phase, postcondition, precondition
from pedal.assertions.setup import resolve_all
from pedal.assertions.runtime import *
from pedal.assertions.static import *
from pedal.sandbox.commands import evaluate, call, run
from pedal.toolkit.functions import *
from pedal.sandbox.mocked import disabled_builtin, MockPedal, BlockedModule
from pedal.cait import parse_program
from cisc108.assertions import _validate_type
import os
import sys
# Start with some sanity checks
student = run()
assert_ran()
ensure_import('cisc108')
# Is there a World record? Is it a dictionary?
# Is there an INTRODUCTION? Is it a string?
# All functions defined? Documented? Covered?
# make_world() -> dict
# describe(world) -> str
# get_options(world) -> list[str]
# update(world, command) -> str
# describe_ending(world) -> str
# choose(list[str]) -> str
# WIN_PATH
# LOSE_PATH
# Can we play through the given paths and win/lose/quit them?
FUNCTION_VALUE = 2
def assertType(data, name, score=None, message=None, report=MAIN_REPORT,
contextualize=True):
_setup_assertions(report)
WorldType = student.data[name]
reason = _validate_type(data._actual_value, WorldType, "The value")
if reason == None:
report.give_partial(score)
return True
context = _build_context([data], "was", "to be a {}.\n".format(name), False)
failure = AssertionException("{}".format(data._actual_value))
report['assertions']['collected'].append(failure)
context = context.format(data._actual_value, WorldType)
report.attach('Instructor Test', category='student', tool='Assertions',
mistake={'message': "Student code failed instructor test.<br>\n" +
context + reason})
return False
with phase("records", score=1/10):
assert_has_variable(student, "World")
# I expected the variable "World" to be a dict
assert_is_instance(student['World'], dict)
with phase("introduction", score=1/10):
assert_has_variable(student, "INTRODUCTION")
assert_is_instance(student['INTRODUCTION'], str)
assert_true(student['INTRODUCTION'])
with phase("make_world", before="make_world_components"):
ensure_signature('make_world', 0, returns='World')
assert_has_function(student, 'make_world')
call('make_world', target='initial_world')
assert_is_instance(student["initial_world"], student["World"])
with phase("make_world_components", after="win_and_lose_paths"):
student.start_grouping_context()
call('make_world', target="initial_world")
assert_in("status", student['initial_world'])
assert_equal(evaluate("initial_world['status']", target='status'),
'playing')
student.stop_grouping_context()
with phase("make_world_components", after="win_and_lose_paths"):
initial_world = student.call('make_world', target='world')
@phase("make_world_components", after="win_and_lose_paths")
def grade_make_world_map():
initial_world = student.call('make_world', target='world')
# Map
assertIn('locations', initial_world)
assertIsInstance(initial_world['locations'], dict)
x = initial_world['locations'].keys()
assertGreaterEqual(initial_world['locations'], 1,
message="I expected there to be more than one location in your world.")
# Confirm locations
for name, location in initial_world['locations'].items():
assertIsInstance(name, str)
assertIsInstance(location, dict)
# Neighbors
assertIn('neighbors', location)
assertIsInstance(location['neighbors'], list)
# About
assertIn('about', location)
assertIsInstance(location['about'], str)
# Stuff
assertIn('stuff', location)
assertIsInstance(location['stuff'], list)
@phase("make_world_components", after="win_and_lose_paths")
def grade_make_world_player():
initial_world = student.call('make_world')
# Player
assertIn('you', initial_world)
assertType(initial_world['you'], 'Player')
# Location
assertIn('at', initial_world['you'])
assertIsInstance(initial_world['you']['at'], str)
# Inventory
assertIn('inventory', initial_world['you'])
assertIsInstance(initial_world['you']['inventory'], list)
@phase("make_world_done", after='make_world')
def grade_make_world_finished():
give_partial(FUNCTION_VALUE)
@phase("describe", after='make_world_done')
def grade_describe():
assertGenerally(match_signature('describe', 1))
assertHasFunction(student, 'describe', args=['World'], returns='str')
initial_world = student.call('make_world', target='world')
message = student.call('describe', initial_world._actual_value, keep_context=True,
target='message', context='message = describe(world)')
assertIsInstance(message, str)
give_partial(FUNCTION_VALUE)
@phase("get_choices", after='make_world_done')
def grade_get_choices():
assertGenerally(match_signature('get_choices', 1))
assertHasFunction(student, 'get_choices', args=['World'], returns='list[str]')
initial_world = student.call('make_world', target='world')
options = student.call('get_choices', initial_world._actual_value, keep_context=True,
target='commands', context='commands = get_choices(world)')
assertIsInstance(options, list)
assertGreater(options, 0,
message="I expected there to be more than one command.")
assertIsInstance(options[0], str)
assertIn('Quit', options)
give_partial(FUNCTION_VALUE)
@phase("choose", after='get_choices')
def grade_choose():
assertGenerally(match_signature('choose', 1))
assertHasFunction(student, 'choose', args=['list[str]'], returns='str')
assertEqual(student.call('choose', ['Quit', 'Run', 'Fight'],
inputs=['Walk', 'Skip', 'Fight']),
'Fight')
assertEqual(student.call('choose', ['Quit', 'Run', 'Fight'],
inputs=['Quit']),
'Quit')
assertEqual(student.call('choose', ['Open', 'Close', 'Sleep'],
inputs=['Walk', 'Walk', 'Sleep']),
'Sleep')
give_partial(FUNCTION_VALUE)
@phase("update", after='get_choices')
def grade_update():
assertGenerally(match_signature('update', 2))
assertHasFunction(student, 'update', args=['World', 'str'], returns='str')
initial_world = student.call('make_world', target='world')
options = student.call('get_choices', initial_world._actual_value, keep_context=True,
target='commands', context='commands = get_choices(world)')
message = student.call('update', initial_world._actual_value, options._actual_value[0], keep_context=True,
target='message', context='message = update(world, commands[0])')
assertIsInstance(message, str)
give_partial(FUNCTION_VALUE)
@phase("describe_ending", after='update')
def grade_describe_ending():
assertGenerally(match_signature('describe_ending', 1))
assertHasFunction(student, 'describe_ending', args=['World'], returns='str')
initial_world = student.call('make_world', target='world')
message = student.call('update', initial_world._actual_value, 'Quit', keep_context=True,
target='message', context='message = update(world, "Quit")')
message = student.call('describe_ending', initial_world._actual_value, keep_context=True,
target='message', context='message = describe_ending(world)')
assertIsInstance(message, str)
give_partial(FUNCTION_VALUE)
def test_path(path, outcome, path_name):
world = student.call('make_world', target='world', keep_context=True)
for command in path:
assertIn('status', world)
assertEqual(world['status'], 'playing')
assertIsInstance(command, str)
message = student.call('describe', world._actual_value, keep_context=True,
target='message', context='message = describe(world)')
assertIsInstance(message, str)
options = student.call('get_choices', world._actual_value, keep_context=True,
target='commands', context='commands = get_choices(world)')
assertIsInstance(options, list)
assertIn(command, options)
message = student.call('update', world._actual_value, command, keep_context=True,
target='message', context='message = update(world, {})'.format(repr(command)))
assertIsInstance(message, str)
assertEqual(world['status'].value, outcome,
message="I tried your {path_name} path, but your world's status ended as '{outcome}' instead of '{expected}'.".format(path_name=path_name, outcome=world['status'].value, expected=outcome))
@phase("win_and_lose_paths", after=['make_world', 'get_choices',
'describe', 'choose', 'update',
'describe_ending'])
def grade_win_and_lose_paths():
assertHas(student, "WIN_PATH", types=list)
assertHas(student, "LOSE_PATH", types=list)
student.run("# I am going to try your WIN_PATH", context=None)
test_path(student.data['WIN_PATH'], 'winning', 'WIN_PATH')
student.run("# I am going to try your LOSE_PATH", context=None)
test_path(student.data['LOSE_PATH'], 'losing', 'LOSE_PATH')
compliment("I was able to play your game!")
give_partial(FUNCTION_VALUE*2)
@phase('conclusion', after='make_world_components')
def finish_grading():
# 2
assertGenerally(all_documented(), score=5)
if sanity:
resolve_all(set_success=True)
| [
"pedal.sandbox.commands.call",
"pedal.sandbox.commands.run",
"pedal.assertions.organizers.phase",
"pedal.assertions.setup.resolve_all",
"cisc108.assertions._validate_type",
"pedal.sandbox.commands.evaluate"
] | [((713, 718), 'pedal.sandbox.commands.run', 'run', ([], {}), '()\n', (716, 718), False, 'from pedal.sandbox.commands import evaluate, call, run\n'), ((3099, 3157), 'pedal.assertions.organizers.phase', 'phase', (['"""make_world_components"""'], {'after': '"""win_and_lose_paths"""'}), "('make_world_components', after='win_and_lose_paths')\n", (3104, 3157), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((4023, 4081), 'pedal.assertions.organizers.phase', 'phase', (['"""make_world_components"""'], {'after': '"""win_and_lose_paths"""'}), "('make_world_components', after='win_and_lose_paths')\n", (4028, 4081), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((4493, 4537), 'pedal.assertions.organizers.phase', 'phase', (['"""make_world_done"""'], {'after': '"""make_world"""'}), "('make_world_done', after='make_world')\n", (4498, 4537), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((4606, 4648), 'pedal.assertions.organizers.phase', 'phase', (['"""describe"""'], {'after': '"""make_world_done"""'}), "('describe', after='make_world_done')\n", (4611, 4648), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((5099, 5144), 'pedal.assertions.organizers.phase', 'phase', (['"""get_choices"""'], {'after': '"""make_world_done"""'}), "('get_choices', after='make_world_done')\n", (5104, 5144), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((5792, 5828), 'pedal.assertions.organizers.phase', 'phase', (['"""choose"""'], {'after': '"""get_choices"""'}), "('choose', after='get_choices')\n", (5797, 5828), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((6456, 6492), 'pedal.assertions.organizers.phase', 'phase', (['"""update"""'], {'after': '"""get_choices"""'}), "('update', after='get_choices')\n", (6461, 6492), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((7156, 7196), 'pedal.assertions.organizers.phase', 'phase', (['"""describe_ending"""'], {'after': '"""update"""'}), "('describe_ending', after='update')\n", (7161, 7196), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((9057, 9176), 'pedal.assertions.organizers.phase', 'phase', (['"""win_and_lose_paths"""'], {'after': "['make_world', 'get_choices', 'describe', 'choose', 'update', 'describe_ending'\n ]"}), "('win_and_lose_paths', after=['make_world', 'get_choices', 'describe',\n 'choose', 'update', 'describe_ending'])\n", (9062, 9176), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((9720, 9770), 'pedal.assertions.organizers.phase', 'phase', (['"""conclusion"""'], {'after': '"""make_world_components"""'}), "('conclusion', after='make_world_components')\n", (9725, 9770), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((1365, 1423), 'cisc108.assertions._validate_type', '_validate_type', (['data._actual_value', 'WorldType', '"""The value"""'], {}), "(data._actual_value, WorldType, 'The value')\n", (1379, 1423), False, 'from cisc108.assertions import _validate_type\n'), ((2004, 2034), 'pedal.assertions.organizers.phase', 'phase', (['"""records"""'], {'score': '(1 / 10)'}), "('records', score=1 / 10)\n", (2009, 2034), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((2180, 2215), 'pedal.assertions.organizers.phase', 'phase', (['"""introduction"""'], {'score': '(1 / 10)'}), "('introduction', score=1 / 10)\n", (2185, 2215), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((2364, 2415), 'pedal.assertions.organizers.phase', 'phase', (['"""make_world"""'], {'before': '"""make_world_components"""'}), "('make_world', before='make_world_components')\n", (2369, 2415), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((2523, 2565), 'pedal.sandbox.commands.call', 'call', (['"""make_world"""'], {'target': '"""initial_world"""'}), "('make_world', target='initial_world')\n", (2527, 2565), False, 'from pedal.sandbox.commands import evaluate, call, run\n'), ((2639, 2697), 'pedal.assertions.organizers.phase', 'phase', (['"""make_world_components"""'], {'after': '"""win_and_lose_paths"""'}), "('make_world_components', after='win_and_lose_paths')\n", (2644, 2697), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((2740, 2782), 'pedal.sandbox.commands.call', 'call', (['"""make_world"""'], {'target': '"""initial_world"""'}), "('make_world', target='initial_world')\n", (2744, 2782), False, 'from pedal.sandbox.commands import evaluate, call, run\n'), ((2974, 3032), 'pedal.assertions.organizers.phase', 'phase', (['"""make_world_components"""'], {'after': '"""win_and_lose_paths"""'}), "('make_world_components', after='win_and_lose_paths')\n", (2979, 3032), False, 'from pedal.assertions.organizers import phase, postcondition, precondition\n'), ((9864, 9893), 'pedal.assertions.setup.resolve_all', 'resolve_all', ([], {'set_success': '(True)'}), '(set_success=True)\n', (9875, 9893), False, 'from pedal.assertions.setup import resolve_all\n'), ((2850, 2902), 'pedal.sandbox.commands.evaluate', 'evaluate', (['"""initial_world[\'status\']"""'], {'target': '"""status"""'}), '("initial_world[\'status\']", target=\'status\')\n', (2858, 2902), False, 'from pedal.sandbox.commands import evaluate, call, run\n')] |
import csv
import pandas as pd
import numpy as np
import os
import sys
root_dir = os.path.dirname(__file__)
sys.path.insert(0, root_dir + '/../..')
class ETL:
def __init__(self):
self.data = None
def load_data(self, path):
self.data = pd.read_csv(path)
return self
| [
"os.path.dirname",
"sys.path.insert",
"pandas.read_csv"
] | [((83, 108), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (98, 108), False, 'import os\n'), ((109, 148), 'sys.path.insert', 'sys.path.insert', (['(0)', "(root_dir + '/../..')"], {}), "(0, root_dir + '/../..')\n", (124, 148), False, 'import sys\n'), ((262, 279), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (273, 279), True, 'import pandas as pd\n')] |
"""
Tornado server utilities
- LoggingApplication: a base Application class with logging and metrics
- RequestHandler: a base request handler with helpers
Dependencies:
- metrics
- servicelog
See tests/test_tornadoutil.py for usage example.
"""
import json
import datetime
import uuid
import httplib # for httplib.responses
import tornado.web
import tornado.options
import tornado.httpserver
import metrics
import servicelog
REQUEST_ID_HEADER = 'X-Request-Id'
AUTH_USER_HEADER = 'X-Auth-User'
class LoggingApplication(tornado.web.Application):
"""
Overrides base log_request method to log requests to JSON
UDP collector. Logs method, uri, remote_ip, etc.
Also supports logging arbitrary key/value pair logging via a handler's
'logvalues' attribute.
>>> app = LoggingApplication('myservice')
"""
def __init__(self, service_id, *args, **kwargs):
self.service_id = service_id or 'undefined'
metrics.configure(prefix=self.service_id)
super(LoggingApplication, self).__init__(*args, **kwargs)
def run(self, port): # pragma: no coverage
"""
Run on given port. Parse standard options and start the http server.
"""
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(self)
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start()
def log_request(self, handler):
"""
Override base method to log requests to JSON UDP collector and emit
a metric.
"""
packet = {'method': handler.request.method,
'uri': handler.request.uri,
'remote_ip': handler.request.remote_ip,
'status': handler.get_status(),
'request_time_ms': handler.request.request_time() * 1000.0,
'service_id': self.service_id,
'request_id': handler.request.headers.get(REQUEST_ID_HEADER,
'undefined')
}
# handler can optionally define additional data to log
if hasattr(handler, 'logvalues'):
for key, value in handler.logvalues.iteritems():
packet[key] = value
servicelog.log(packet)
metric = "requests." + str(handler.get_status())
metrics.timing(metric, handler.request.request_time() * 1000.0)
super(LoggingApplication, self).log_request(handler)
class RequestHandler(tornado.web.RequestHandler):
"""A handler with helpers"""
def appurl(self):
"""Return URL for app"""
return self.request.protocol + "://" + self.request.host
def caller(self):
"""Returns caller's ID (from X-Auth-User)"""
return self.request.headers.get(AUTH_USER_HEADER, 'undefined')
def logvalue(self, key, value):
"""Add log entry to request log info"""
if not hasattr(self, 'logvalues'):
self.logvalues = {}
self.logvalues[key] = value
def halt(self, code, msg=None):
"""Halt processing. Raise HTTP error with given code and message."""
raise tornado.web.HTTPError(code, msg)
def json(self, obj):
"""Use our own encoder to support additional types"""
return json.dumps(obj, cls=JSONEncoder)
def write_error(self, status_code, **kwargs):
"""Log halt_reason in service log and output error page"""
message = default_message = httplib.responses.get(status_code, '')
# HTTPError exceptions may have a log_message attribute
if 'exc_info' in kwargs:
(_, exc, _) = kwargs['exc_info']
if hasattr(exc, 'log_message'):
message = str(exc.log_message) or default_message
self.logvalue('halt_reason', message)
title = "{}: {}".format(status_code, default_message)
body = "{}: {}".format(status_code, message)
self.finish("<html><title>" + title + "</title>"
"<body>" + body + "</body></html>")
def timeit(self, metric, func, *args, **kwargs):
"""Time execution of callable and emit metric then return result."""
return metrics.timeit(metric, func, *args, **kwargs)
def require_json_content_type(self):
"""Raise 400 error if content type is not json"""
self.require_content_type('application/json')
def require_content_type(self, content_type):
"""Raises a 400 if request content type is not as specified."""
if self.request.headers.get('content-type', '') != content_type:
self.halt(400, 'Content type must be ' + content_type)
def prepare(self):
"""Override base method to add a request ID header if needed"""
self._ensure_request_id_header()
@property
def request_id(self):
"""Return request ID from header"""
return self.request.headers.get(REQUEST_ID_HEADER, 'undefined')
def set_headers(self, headers):
"""Set headers"""
for (header, value) in headers.iteritems():
self.set_header(header, value)
def _ensure_request_id_header(self):
"Ensure request headers have a request ID. Set one if needed."
if REQUEST_ID_HEADER not in self.request.headers:
self.request.headers.add(REQUEST_ID_HEADER, uuid.uuid1().hex)
class JSONEncoder(json.JSONEncoder):
"""
Adds support for datetime.datetime objects.
>>> json.dumps({'a': datetime.datetime(2013, 12, 10)}, cls=JSONEncoder)
'{"a": "2013-12-10T00:00:00Z"}'
>>> json.dumps({'a': datetime.time()}, cls=JSONEncoder)
Traceback (most recent call last):
...
TypeError: datetime.time(0, 0) is not JSON serializable
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime("%Y-%m-%dT%H:%M:%SZ")
else:
return json.JSONEncoder.default(self, obj)
| [
"servicelog.log",
"json.JSONEncoder.default",
"metrics.configure",
"json.dumps",
"uuid.uuid1",
"httplib.responses.get",
"metrics.timeit"
] | [((948, 989), 'metrics.configure', 'metrics.configure', ([], {'prefix': 'self.service_id'}), '(prefix=self.service_id)\n', (965, 989), False, 'import metrics\n'), ((2262, 2284), 'servicelog.log', 'servicelog.log', (['packet'], {}), '(packet)\n', (2276, 2284), False, 'import servicelog\n'), ((3290, 3322), 'json.dumps', 'json.dumps', (['obj'], {'cls': 'JSONEncoder'}), '(obj, cls=JSONEncoder)\n', (3300, 3322), False, 'import json\n'), ((3477, 3515), 'httplib.responses.get', 'httplib.responses.get', (['status_code', '""""""'], {}), "(status_code, '')\n", (3498, 3515), False, 'import httplib\n'), ((4188, 4233), 'metrics.timeit', 'metrics.timeit', (['metric', 'func', '*args'], {}), '(metric, func, *args, **kwargs)\n', (4202, 4233), False, 'import metrics\n'), ((5898, 5933), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (5922, 5933), False, 'import json\n'), ((5330, 5342), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (5340, 5342), False, 'import uuid\n')] |
# Copyright (c) 2022 Exograd SAS.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import datetime
from typing import Any, Dict, List, Optional, TypeVar, Type
import dateutil.parser
FieldType = TypeVar("FieldType", str, datetime.datetime, int, bool, dict, list)
class InvalidAPIObjectError(Exception):
"""An error signaled when an API object contains invalid data."""
def __init__(self, object_name: str, value: Any, reason: str) -> None:
super().__init__(f"invalid {object_name}: {reason}")
self.object_name = object_name
self.value = value
self.reason = reason
class APIObject:
"""An object exposed by the Eventline API."""
def __init__(self, object_name: str):
self._object_name = object_name
def __str__(self) -> str:
return repr(self)
def __repr__(self) -> str:
string = f"<eventline.{self._object_name}"
if hasattr(self, "id_"):
id_ = getattr(self, "id_")
if id_ is not None:
string += f" {id_}"
string += ">"
return string
class ReadableAPIObject(APIObject):
"""An API object which can be read from a JSON object."""
def _read(self, data: Dict[str, Any]) -> None:
pass
def _get_optional_field(
self,
data: Dict[str, Any],
key: str,
class_type: Type[FieldType],
class_name: str,
) -> Optional[FieldType]:
if not isinstance(data, dict):
raise InvalidAPIObjectError(
"response", data, "response data are not an object"
)
if key not in data:
return None
value = data.get(key, None)
if value is not None and not isinstance(value, class_type):
article = "a"
if class_name[0] in ("a", "e", "i", "o", "u"):
article = "an"
raise InvalidAPIObjectError(
self._object_name,
value,
f"field '{key}' is not {article} {class_name}",
)
return value
def _get_field(
self,
data: Dict[str, Any],
key: str,
class_type: Type[FieldType],
class_name: str,
) -> FieldType:
value = self._get_optional_field(data, key, class_type, class_name)
if value is None:
raise InvalidAPIObjectError(
self._object_name, data, f"missing field '{key}'"
)
return value
def _read_optional_string(
self,
data: Dict[str, Any],
key: str,
) -> Optional[str]:
return self._get_optional_field(data, key, str, "string")
def _read_string(
self,
data: Dict[str, Any],
key: str,
) -> str:
return self._get_field(data, key, str, "string")
def _read_optional_datetime(
self,
data: Dict[str, Any],
key: str,
) -> Optional[datetime.datetime]:
string = self._get_field(data, key, str, "string")
value = None
if string is not None:
try:
value = dateutil.parser.isoparse(string)
except Exception as ex:
raise InvalidAPIObjectError(
self._object_name,
string,
f"field '{key}' is not a valid datetime",
) from ex
return value
def _read_datetime(
self,
data: Dict[str, Any],
key: str,
) -> datetime.datetime:
value = self._read_optional_datetime(data, key)
if value is None:
raise InvalidAPIObjectError(
self._object_name, data, f"missing field '{key}'"
)
return value
def _read_optional_integer(
self,
data: Dict[str, Any],
key: str,
) -> Optional[int]:
return self._get_optional_field(data, key, int, "integer")
def _read_integer(
self,
data: Dict[str, Any],
key: str,
) -> int:
return self._get_field(data, key, int, "integer")
def _read_optional_boolean(
self,
data: Dict[str, Any],
key: str,
) -> Optional[bool]:
value = self._get_optional_field(data, key, bool, "boolean")
if value is None:
value = False
return value
def _read_boolean(
self,
data: Dict[str, Any],
key: str,
) -> bool:
return self._get_field(data, key, bool, "boolean")
def _read_optional_object(
self,
data: Dict[str, Any],
key: str,
class_type: Any,
) -> Optional[object]:
obj = self._get_optional_field(data, key, dict, "object")
value = None
if obj is not None:
value = class_type()
value._read(obj)
return value
def _read_object(
self,
data: Dict[str, Any],
key: str,
class_type: Any,
) -> object:
value = self._read_optional_object(data, key, class_type)
if value is None:
raise InvalidAPIObjectError(
self._object_name, data, f"missing field '{key}'"
)
return value
def _read_optional_object_array(
self,
data: Dict[str, Any],
key: str,
element_class_type: Any,
) -> Optional[List[object]]:
array = self._get_field(data, key, list, "array")
value = None
if array is not None:
value = []
for i, element in enumerate(array):
if not isinstance(element, dict):
raise InvalidAPIObjectError(
self._object_name,
element,
f"element at index {i} of field '{key}' is not an "
"object",
)
element_value = element_class_type()
element_value._read(element)
value.append(element_value)
return value
def _read_object_array(
self,
data: Dict[str, Any],
key: str,
element_class_type: Any,
) -> List[object]:
value = self._read_optional_object_array(data, key, element_class_type)
if value is None:
raise InvalidAPIObjectError(
self._object_name, data, f"missing field '{key}'"
)
return value
def _read_optional_string_array(
self,
data: Dict[str, Any],
key: str,
) -> Optional[List[str]]:
array = self._get_field(data, key, list, "array")
value = None
if array is not None:
value = []
for i, element in enumerate(array):
if not isinstance(element, str):
raise InvalidAPIObjectError(
self._object_name,
element,
f"element at index {i} of field '{key}' is not a "
"string",
)
value.append(element)
return value
def _read_string_array(
self,
data: Dict[str, Any],
key: str,
) -> List[str]:
value = self._read_optional_string_array(data, key)
if value is None:
raise InvalidAPIObjectError(
self._object_name, data, f"missing field '{key}'"
)
return value
class SerializableAPIObject(APIObject):
"""An API object which can be serialized to a JSON object."""
def _serialize(self) -> Dict[str, Any]:
return {}
| [
"typing.TypeVar"
] | [((869, 936), 'typing.TypeVar', 'TypeVar', (['"""FieldType"""', 'str', 'datetime.datetime', 'int', 'bool', 'dict', 'list'], {}), "('FieldType', str, datetime.datetime, int, bool, dict, list)\n", (876, 936), False, 'from typing import Any, Dict, List, Optional, TypeVar, Type\n')] |
# -*- coding: utf-8 -*-
import logging
from collections import defaultdict
from dplace_app.models import Language, ISOCode, Society, LanguageFamily
from util import delete_all
def xd_to_language(items, languoids):
delete_all(Language)
delete_all(LanguageFamily)
delete_all(ISOCode)
glottolog = {l['id']: l for l in languoids}
societies_ = defaultdict(list)
for s in Society.objects.all():
societies_[s.xd_id].append(s)
families = {}
languages = {}
isocodes = {}
count = 0
for item in items:
societies = societies_.get(item['xd_id'])
if not societies: # pragma: no cover
logging.warning("No societies found with xd_id %(xd_id)s" % item)
continue
ldata = glottolog.get(item['DialectLanguageGlottocode'])
if not ldata: # pragma: no cover
logging.warning("No language found for %s, skipping" % item)
continue
_xd_to_language(item, societies, ldata, languages, families, isocodes)
count += 1
return count
def _xd_to_language(dict_row, societies, ldata, languages, families, isocodes):
# get or create the language family:
# Note: If the related languoid is an isolate or a top-level family, we create a
# LanguageFamily object with the data of the languoid.
family_id = ldata['family_id'] or ldata['id']
family = families.get(family_id)
if not family:
family_name = ldata['family_name'] or ldata['name']
family = LanguageFamily.objects.create(name=family_name, scheme='G')
family.save()
families[family_id] = family
# get or create the language:
language = languages.get(ldata['id'])
if not language:
language = Language.objects.create(name=ldata['name'], glotto_code=ldata['id'])
language.family = family
if ldata['iso_code']:
if len(ldata['iso_code']) > 3: # pragma: no cover
logging.warning("ISOCode too long, skipping %s" % ldata['iso_code'])
else:
isocode = isocodes.get(ldata['iso_code'])
if not isocode:
isocode = ISOCode.objects.create(iso_code=ldata['iso_code'])
isocodes[ldata['iso_code']] = isocode
language.iso_code = isocode
language.save()
languages[ldata['id']] = language
for soc in societies:
soc.language = language
soc.save()
| [
"dplace_app.models.LanguageFamily.objects.create",
"logging.warning",
"dplace_app.models.ISOCode.objects.create",
"util.delete_all",
"collections.defaultdict",
"dplace_app.models.Society.objects.all",
"dplace_app.models.Language.objects.create"
] | [((222, 242), 'util.delete_all', 'delete_all', (['Language'], {}), '(Language)\n', (232, 242), False, 'from util import delete_all\n'), ((247, 273), 'util.delete_all', 'delete_all', (['LanguageFamily'], {}), '(LanguageFamily)\n', (257, 273), False, 'from util import delete_all\n'), ((278, 297), 'util.delete_all', 'delete_all', (['ISOCode'], {}), '(ISOCode)\n', (288, 297), False, 'from util import delete_all\n'), ((365, 382), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (376, 382), False, 'from collections import defaultdict\n'), ((396, 417), 'dplace_app.models.Society.objects.all', 'Society.objects.all', ([], {}), '()\n', (415, 417), False, 'from dplace_app.models import Language, ISOCode, Society, LanguageFamily\n'), ((1513, 1572), 'dplace_app.models.LanguageFamily.objects.create', 'LanguageFamily.objects.create', ([], {'name': 'family_name', 'scheme': '"""G"""'}), "(name=family_name, scheme='G')\n", (1542, 1572), False, 'from dplace_app.models import Language, ISOCode, Society, LanguageFamily\n'), ((1749, 1817), 'dplace_app.models.Language.objects.create', 'Language.objects.create', ([], {'name': "ldata['name']", 'glotto_code': "ldata['id']"}), "(name=ldata['name'], glotto_code=ldata['id'])\n", (1772, 1817), False, 'from dplace_app.models import Language, ISOCode, Society, LanguageFamily\n'), ((658, 723), 'logging.warning', 'logging.warning', (["('No societies found with xd_id %(xd_id)s' % item)"], {}), "('No societies found with xd_id %(xd_id)s' % item)\n", (673, 723), False, 'import logging\n'), ((865, 925), 'logging.warning', 'logging.warning', (["('No language found for %s, skipping' % item)"], {}), "('No language found for %s, skipping' % item)\n", (880, 925), False, 'import logging\n'), ((1961, 2029), 'logging.warning', 'logging.warning', (["('ISOCode too long, skipping %s' % ldata['iso_code'])"], {}), "('ISOCode too long, skipping %s' % ldata['iso_code'])\n", (1976, 2029), False, 'import logging\n'), ((2168, 2218), 'dplace_app.models.ISOCode.objects.create', 'ISOCode.objects.create', ([], {'iso_code': "ldata['iso_code']"}), "(iso_code=ldata['iso_code'])\n", (2190, 2218), False, 'from dplace_app.models import Language, ISOCode, Society, LanguageFamily\n')] |
"""
contains several utilities for error handling
allows for storing "chained error information" without copying the entire
traceback object.
Note: module is currently not used / within a later refactoring the following error-Approach will be
implemented:
- bellow-flow level errors are never ignored / rather: throw "chained exceptions"
- within flow handlers: error handling takes place. The strategy is as follows:
catch error / chain with "SOAnaFlowError" / log error / log trace / define default value
Author: `HBernigau <https://github.com/HBernigau>`_
Date: 01.2022
"""
import traceback
from dataclasses import dataclass, asdict
from typing import Optional, List
import yaml
def get_traceback(exc: Exception):
return traceback.format_exception(value=exc, etype=type(exc), tb=exc.__traceback__)
@dataclass
class ExceptionInfo():
"""
represents information about some exception
:param exc_type: the exception type
:param exc_args: arguments to the exception converted to string
:param exc_details: List of root cause exception infos (mioght be empty)
"""
exc_type: str
exc_args: List[str]
exc_details: Optional['ExceptionInfo']
@classmethod
def from_exception(cls, exc: Exception):
"""
constructs the exception info from some given exception
:param exc: the exception
:return: an instance of the current class
"""
exc_details = getattr(exc, 'exc_details', None)
exc_type = type(exc).__name__
exc_args = [str(item) for item in getattr(exc, 'args', list())]
return cls(exc_type, exc_args, exc_details)
@dataclass
class ErrologEntry:
error_info: ExceptionInfo
trace_back: List[str]
class SoAnaException(Exception):
"""
Base class for any user defined exception
"""
def __init__(self, *args, exc_details=None, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(exc_details, ExceptionInfo):
self.exc_details = exc_details
elif isinstance(exc_details, Exception):
self.exc_details = ExceptionInfo.from_exception(exc_details)
else:
self.exc_details = None
@property
def exc_trace(self):
return get_traceback(self)
@property
def as_exc_info(self):
return ExceptionInfo.from_exception(self)
def with_forward_error_func(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
return ExceptionInfo(exc_type='FlowTaskError',
exc_args=get_traceback(exc),
exc_details=ExceptionInfo.from_exception(exc)
)
return wrapped
def print_formated_error_info(exc: SoAnaException):
err_info=ExceptionInfo.from_exception(exc)
trb=[item for item in get_traceback(exc)]
res=asdict(ErrologEntry(err_info, trb))
print(yaml.safe_dump(res))
if __name__ == '__main__':
# some demo code...
class SomeHighLevelError(SoAnaException):
pass
def throw_error():
"""
trow an error, append to high level error
:return:
"""
try:
x = 1 / 0.0
except Exception as exc:
raise SomeHighLevelError('Custom exception caught', 42, exc_details=exc).with_traceback( exc.__traceback__) from exc
@with_forward_error_func
def throw_error_2():
throw_error()
def main():
try:
throw_error()
except Exception as exc:
exc_info = ExceptionInfo.from_exception(exc)
print('All')
print(exc_info)
print()
print('Nicely formatted')
print_formated_error_info(exc)
print('Details:')
print(exc_info.exc_details)
print()
print('Traceback: ')
print(''.join(get_traceback(exc)))
print('')
print('Output of wrapped function:')
print(throw_error_2())
main() | [
"yaml.safe_dump"
] | [((2969, 2988), 'yaml.safe_dump', 'yaml.safe_dump', (['res'], {}), '(res)\n', (2983, 2988), False, 'import yaml\n')] |
import os
import app.database.seed.seed_helper as helper
from app.translation.deserializer import Deserializer
from app.extensions import db
real_madrid = {
'name': '<NAME>',
'players': helper.read_csv_file(os.path.join(os.path.dirname(__file__), 'players.csv')),
}
team = Deserializer().deserialize_team(real_madrid)
db.session.add(team)
| [
"os.path.dirname",
"app.translation.deserializer.Deserializer",
"app.extensions.db.session.add"
] | [((330, 350), 'app.extensions.db.session.add', 'db.session.add', (['team'], {}), '(team)\n', (344, 350), False, 'from app.extensions import db\n'), ((285, 299), 'app.translation.deserializer.Deserializer', 'Deserializer', ([], {}), '()\n', (297, 299), False, 'from app.translation.deserializer import Deserializer\n'), ((231, 256), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (246, 256), False, 'import os\n')] |
import solver.algorithms as alg
import numpy as np
def problem4(t0, tf, NA0, NB0, tauA, tauB, n, returnlist=False):
"""Uses Euler's method to model the solution to a radioactive decay problem where dNA/dt = -NA/tauA and dNB/dt = NA/tauA - NB/tauB.
Args:
t0 (float): Start time
tf (float): End time
NA0 (int): Initial number of NA nuclei
NB0 (int): Initial number of NB nuclei
tauA (float): Decay time constant for NA
tauB (float): Decay time constant for NB
n (int): Number of points to sample at
returnlist (bool) = Controls whether the function returns the list of points or not. Defaults to false
Returns:
solution (list): Points on the graph of the approximate solution. Each element in the list has the form (t, array([NA, NB]))
In the graph, NA is green and NB is blue
"""
print("Problem 4: ~Radioactive Decay~ dNA/dt = -NA/tauA & dNB/dt = NA/tauA - NB/tauA - NB/tauB")
N0 = np.array([NA0, NB0])
A = np.array([[-1/tauA, 0],[1/tauA, -1/tauB]])
def dN_dt(t, N):
return A @ N
h = (tf-t0)/(n-1)
print("Time step of %f seconds." % h)
solution = alg.euler(t0, tf, n, N0, dN_dt)
if returnlist:
return solution
| [
"numpy.array",
"solver.algorithms.euler"
] | [((985, 1005), 'numpy.array', 'np.array', (['[NA0, NB0]'], {}), '([NA0, NB0])\n', (993, 1005), True, 'import numpy as np\n'), ((1014, 1063), 'numpy.array', 'np.array', (['[[-1 / tauA, 0], [1 / tauA, -1 / tauB]]'], {}), '([[-1 / tauA, 0], [1 / tauA, -1 / tauB]])\n', (1022, 1063), True, 'import numpy as np\n'), ((1178, 1209), 'solver.algorithms.euler', 'alg.euler', (['t0', 'tf', 'n', 'N0', 'dN_dt'], {}), '(t0, tf, n, N0, dN_dt)\n', (1187, 1209), True, 'import solver.algorithms as alg\n')] |
# This example shows how a space can be modelled with loops.
import exhaust
def generate_numbers(state: exhaust.State):
numbers = []
for _ in range(5):
numbers.append(state.randint(1, 5))
return numbers
for numbers in exhaust.space(generate_numbers):
print(numbers)
# Output:
# [1, 1, 1, 1, 1]
# [1, 1, 1, 1, 2]
# [1, 1, 1, 1, 3]
# [1, 1, 1, 1, 4]
# [1, 1, 1, 1, 5]
# [1, 1, 1, 2, 1]
# ...
# [5, 5, 5, 5, 5]
| [
"exhaust.space"
] | [((241, 272), 'exhaust.space', 'exhaust.space', (['generate_numbers'], {}), '(generate_numbers)\n', (254, 272), False, 'import exhaust\n')] |
from datetime import datetime, timedelta
from hamcrest.core.base_matcher import BaseMatcher
class WithinDatetimeMatcher(BaseMatcher):
def __init__(self, lower_limit_datetime: datetime, upper_limit_datetime: datetime):
self.__lowerLimit = lower_limit_datetime
self.__upperLimit = upper_limit_datetime
def _matches(self, item: datetime):
return self.__lowerLimit <= item <= self.__upperLimit
def describe_to(self, description):
description.append_text('datetime between {0} and {1}'.format(self.__lowerLimit, self.__upperLimit))
def within_an_hour():
low = datetime.utcnow() - timedelta(seconds=10)
up = datetime.utcnow() + timedelta(hours=1, seconds=10)
return WithinDatetimeMatcher(low, up)
| [
"datetime.timedelta",
"datetime.datetime.utcnow"
] | [((611, 628), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (626, 628), False, 'from datetime import datetime, timedelta\n'), ((631, 652), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (640, 652), False, 'from datetime import datetime, timedelta\n'), ((662, 679), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (677, 679), False, 'from datetime import datetime, timedelta\n'), ((682, 712), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)', 'seconds': '(10)'}), '(hours=1, seconds=10)\n', (691, 712), False, 'from datetime import datetime, timedelta\n')] |
import requests
from bs4 import BeautifulSoup
import pickle
import re, datetime
# 뉴스 중복 확인
def duplication_check(new_news, saved_news_list):
if new_news in saved_news_list:
return False
else:
saved_news_list.append(new_news)
return True
# 기사 날짜, 시간 표현 (시간정보가 '~전'인 경우)
def get_released_time1(current_time, time_info):
return_string = ''
p = re.compile('^[\d]*')
m = p.search(time_info)
number = int(time_info[:m.end()])
korean = time_info[m.end()]
# 뉴스사 페이지마다 날짜를 담은 태그가 다르고(=>태그나 class등으로 찾기 어려움), 페이지에 현재 날짜가 기사 입력 날짜보다 먼저 나오는
# 경우도 있어(뉴스1) 정규표현식으로도 정확한 기사 입력날짜를 얻기 힘듦.
# 기사의 발행 시각(released time)구하기
if korean == '분': # n분 전
released_time = current_time - datetime.timedelta(minutes=number)
return_string = released_time.strftime('%Y-%m-%d %H시')
elif korean == '시': # n시간 전
released_time = current_time - datetime.timedelta(hours=number)
return_string = released_time.strftime('%Y-%m-%d %H시')
elif korean == '일': # n일 전
released_time = current_time - datetime.timedelta(days=number)
return_string = released_time.strftime('%Y-%m-%d 00시') # 기사의 시간순 정렬을 위해 00시로 설정
else: # 몇 초전 기사일 수도 있음
released_time = current_time
return return_string
# 기사 날짜, 시간 표현 (시간정보가 '20xx.xx.xx'인 경우)
def get_released_time2(date_str):
yea = date_str[:4]
mon = date_str[5:7]
day = date_str[8:10]
return_string = yea + '-' + mon + '-' + day + ' 00시'
return return_string
# 기사의 시간순 정렬을 위한 연,월,일,시간 정보
def get_time_members(line):
p = re.compile('\d{4}-\d{2}-\d{2} \d{2}')
m = p.search(line)
yea = m.group()[:4]
mon = m.group()[5:7]
day = m.group()[8:10]
hou = m.group()[11:13]
return int(yea), int(mon), int(day), int(hou)
# 검색 페이지 request
url = 'https://search.naver.com/search.naver?where=news&query=%ED%8E%84%EC%96%B4%EB%B9%84%EC%8A%A4&sm=tab_tmr&nso=so:r,p:all,a:all&sort=0'
res = requests.get(url)
res.raise_for_status()
current_time = datetime.datetime.today()
# 뉴스 컨테이너 객체 설정
soup = BeautifulSoup(res.text, 'lxml')
news_container = soup.find('ul', attrs={'class':'list_news'})
list_news = news_container.find_all('li', attrs={'class':'bx'})
# 저장된 뉴스 제목 리스트 불러옴 (중복을 피하기 위해 기존에 저장된 뉴스의 제목으로 이루어진 리스트)
try:
saved_news_file = open('saved_news_list.pickle', 'rb')
saved_news_list = pickle.load(saved_news_file)
saved_news_file.close()
except Exception:
saved_news_list = list()
print('new list created')
finally:
print('list loaded successfully')
# 뉴스 제목의 앞 14글자로 중복을 파악한 후 중복이 아닌 뉴스는 html파일로 작성
with open('pana.html', 'a', encoding='utf-8') as f:
for news in list_news:
news_link = news.find('a', attrs={'class':'news_tit'})
if duplication_check(news_link.get_text()[:14], saved_news_list): # 제목이 길기 때문에 앞의 14글자만 비교
try:
time_info = news.find('span', text=re.compile(' 전$')) # class가 info인 span을 이용하면 신문의 몇면 몇단의 기사인지를 알려주는 내용도 있음
time_str = get_released_time1(current_time, time_info.get_text())
except AttributeError:
time_info = news.find('span', text=re.compile('\d{4}.\d{2}.\d{2}.')) # 일정기간 지난 뉴스는 time_info에 '~전'이 아니라 '2021.12.14'처럼 날짜의 형태로 나올 수 있음
time_str = get_released_time2(time_info.get_text())
finally:
f.write('<h3><a href="' + news_link['href'] + '" target="blank">' + news_link['title'] + ', ' +
time_str + '</a></h3>')
f.write('<br/>')
f.write('\n')
# saved_news_list.pickle 파일(저장된 뉴스의 제목을 담은 리스트) 갱신
with open('saved_news_list.pickle', 'wb') as f:
pickle.dump(saved_news_list, f)
print('dump successed')
# 여기부터 기사의 시간순 정렬
# lines list에 각 기사의 내용과 입력시간을 2차원 list로 저장 ([html내용, 연, 월, 일, 시]의 형태)
with open('pana.html', 'r', encoding='utf8') as f:
file_data = f.readlines()
lines = list()
for data in file_data:
lines.append([data])
for idx, data in enumerate(file_data):
year, month, day, hour = get_time_members(data)
lines[idx].append(year)
lines[idx].append(month)
lines[idx].append(day)
lines[idx].append(hour)
# lines를 기사 입력시간 순으로 정렬
lines.sort(key=lambda x: (-x[1], -x[2], -x[3], -x[4]))
# 정렬한 순서대로 내용을 덮어씀
with open('pana.html', 'w', encoding='utf8') as f:
for line in lines:
f.write(line[0]) | [
"pickle.dump",
"re.compile",
"pickle.load",
"requests.get",
"bs4.BeautifulSoup",
"datetime.datetime.today",
"datetime.timedelta"
] | [((1866, 1883), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1878, 1883), False, 'import requests\n'), ((1922, 1947), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1945, 1947), False, 'import re, datetime\n'), ((1972, 2003), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""lxml"""'], {}), "(res.text, 'lxml')\n", (1985, 2003), False, 'from bs4 import BeautifulSoup\n'), ((363, 384), 're.compile', 're.compile', (['"""^[\\\\d]*"""'], {}), "('^[\\\\d]*')\n", (373, 384), False, 'import re, datetime\n'), ((1500, 1541), 're.compile', 're.compile', (['"""\\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}"""'], {}), "('\\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}')\n", (1510, 1541), False, 'import re, datetime\n'), ((2272, 2300), 'pickle.load', 'pickle.load', (['saved_news_file'], {}), '(saved_news_file)\n', (2283, 2300), False, 'import pickle\n'), ((3461, 3492), 'pickle.dump', 'pickle.dump', (['saved_news_list', 'f'], {}), '(saved_news_list, f)\n', (3472, 3492), False, 'import pickle\n'), ((702, 736), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'number'}), '(minutes=number)\n', (720, 736), False, 'import re, datetime\n'), ((861, 893), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'number'}), '(hours=number)\n', (879, 893), False, 'import re, datetime\n'), ((1017, 1048), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'number'}), '(days=number)\n', (1035, 1048), False, 'import re, datetime\n'), ((2780, 2797), 're.compile', 're.compile', (['""" 전$"""'], {}), "(' 전$')\n", (2790, 2797), False, 'import re, datetime\n'), ((3000, 3035), 're.compile', 're.compile', (['"""\\\\d{4}.\\\\d{2}.\\\\d{2}."""'], {}), "('\\\\d{4}.\\\\d{2}.\\\\d{2}.')\n", (3010, 3035), False, 'import re, datetime\n')] |
"""
File name: extracted_features_gridsearch.py
Author: <NAME>
Date created: 29.04.2019
"""
import numpy as np
import sys
import os
import yaml
import pickle
import pandas as pd
import pandas.core.indexes
sys.modules['pandas.indexes'] = pandas.core.indexes
import json
import time
import keras
import tensorflow as tf
from keras.models import load_model,Sequential, Model
from keras.layers import Dense, Dropout, Input, concatenate
from keras.callbacks import EarlyStopping
from keras.backend.tensorflow_backend import set_session
from sklearn.model_selection import ParameterGrid
from sklearn.metrics import roc_auc_score
from helper import dataset, model
from imaging_predictive_models import imaging_dataset
from clinical_predictive_models import clinical_dataset, MLP
from multimodal_prediction_helper import multimodal_dataset
from plotting_helper import plot_evolution
#### ENVIRONMENT AND SESSION SET UP ####################################################################
# set the environment variable
os.environ["KERAS_BACKEND"] = "tensorflow"
# Silence INFO logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
# create a configuration protocol
config = tf.ConfigProto()
# set the allow_growth option to true in the protocol
config.gpu_options.allow_growth = True
# define GPU to use
config.gpu_options.visible_device_list = "0,1"
# start a sesstion that uses the configuration protocol
set_session(tf.Session(config=config))
#### READ CONFIGURATION FILE ##########
def join(loader,node):
seq = loader.construct_sequence(node)
return ''.join(str(i) for i in seq)
yaml.add_constructor('!join',join)
cfg = yaml.load(open('config.yml', 'r'))
#### ASSIGN PATHS AND VARIABLES #########################################################################
dataset_name = cfg['dataset name']
data_path = 'data/'
clin_feat_splits_path = data_path+ cfg['clinical dataset']['feature splits path']
img_feat_splits_path = data_path + cfg['imaging dataset']['feature splits path']
num_splits = cfg['number of runs']
model_name = cfg['model name']
def_params = cfg['hyperparameters'][model_name]
tuning_params = cfg['tuning parameters'][model_name]
performance_scores = cfg['final performance measures']
save_models = cfg['save options']['models path']
save_params = cfg['save options']['params path']
save_scores = cfg['save options']['scores path']
save_figures = cfg['save options']['figures path']
##### GET TRAINING,VALIDATION AND TEST DATA ##############################################################
data = multimodal_dataset(dataset_name)
data.load_feature_sets(img_feat_splits_path, clin_feat_splits_path)
#feature_sets = data.combine_features(combining_method = 'concat_and_normalize')
##### TRAIN AND SAVE MODELS #################################################################################
for i in range(num_splits):
#### ASSIGN TRAINING, TEST AND VALIDATION SETS FOR CURRENT SPLIT ##########################################
current_split_num = i+1
img_X_tr = data.img_sets[i]['train_data']
img_X_val = data.img_sets[i]['val_data']
img_X_te = data.img_sets[i]['test_data']
clin_X_tr = data.clin_sets[i]['train_data']
clin_X_val = data.clin_sets[i]['val_data']
clin_X_te = data.clin_sets[i]['test_data']
y_tr = data.img_sets[i]['train_labels']
y_val = data.img_sets[i]['val_labels']
y_te = data.img_sets[i]['test_labels']
if def_params['out_activation'] == 'softmax':
y_tr = pd.get_dummies(y_tr)
y_val = pd.get_dummies(y_val)
y_te = pd.get_dummies(y_te)
model_path = save_models + '/best_model_on_outer_training_set_split_'+str(current_split_num)+'.h5'
#params_path = save_params + '/best_parameters_run_'+str(current_split_num)+'.json'
tune_params_path = save_params + '/best_tuning_parameters_split_'+str(current_split_num)+'.json'
if os.path.isfile(model_path):
pass
else:
if not os.path.exists(save_models):
os.makedirs(save_models)
#### START GRID SEARCH #####################################################################################
start = time.time()
best_AUC = 0.5
i = 0
for tune in ParameterGrid(tuning_params):
img_input = Input(shape= (img_X_tr.shape[1],), name='image_input')
clin_input = Input(shape= (clin_X_tr.shape[1],), name='clinical_input')
dense1 = Dense(tune['num_neurons_embedding'][0], kernel_initializer = def_params['weight_init'], activation = def_params['hidden_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(clin_input)
dense2 = Dense(tune['num_neurons_embedding'][1], kernel_initializer = def_params['weight_init'], activation = def_params['hidden_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(img_input)
x = concatenate([dense1, dense2])
x = Dense(tune['num_neurons_final'], kernel_initializer = def_params['weight_init'], activation = def_params['hidden_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(x)
x= Dropout(tune['dropout_rate'])(x)
if def_params['out_activation'] == 'softmax':
output = Dense(2,kernel_initializer = def_params['weight_init'],activation= def_params['out_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(x)
else:
output = Dense(1,kernel_initializer = def_params['weight_init'],activation= def_params['out_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(x)
optimizer = keras.optimizers.Adam(lr = tune['learning_rate'])
model = Model(inputs=[img_input, clin_input], outputs=[output])
model.compile(loss=def_params['loss_func'], optimizer = optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = def_params['min_delta'], patience = def_params['iter_patience'], mode='auto')
callbacks = [e_stop]
history = model.fit({'image_input' : img_X_tr,'clinical_input' : clin_X_tr}, y_tr, callbacks = callbacks,validation_data= ([img_X_val, clin_X_val],y_val),
epochs=def_params['epochs'], batch_size= tune['batch_size'], verbose=0)
probs_val = model.predict([img_X_val,clin_X_val],batch_size = 8)
score_val = roc_auc_score(y_val, probs_val)
i +=1
if i%10 == 0:
print(i)
if score_val > best_AUC:
best_AUC = score_val
best_params = tune
loss_tr = history.history['loss']
loss_val = history.history['val_loss']
model.save(save_models + '/best_model_on_inner_training_set_split_'+str(current_split_num)+'.h5')
keras.backend.clear_session()
best_model = load_model(save_models + '/best_model_on_inner_training_set_split_'+str(current_split_num)+'.h5')
probs_tr = best_model.predict([img_X_tr,clin_X_tr],batch_size = 8)
probs_val = best_model.predict([img_X_val,clin_X_val],batch_size = 8)
probs_te = best_model.predict([img_X_te,clin_X_te],batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_val = roc_auc_score(y_val, probs_val)
score_te = roc_auc_score(y_te, probs_te)
# Save tuning parameters that resulted in the best model:
if not os.path.exists(save_params):
os.makedirs(save_params)
json.dump(best_params,open(tune_params_path,'w'))
# Save loss and auc scores calculated at each epoch during training:
if not os.path.exists(save_scores):
os.makedirs(save_scores)
np.savetxt(save_scores+'/inner_loop_loss_over_epochs_split_'+str(current_split_num)+'.csv', [loss_tr,loss_val], delimiter=",")
np.savetxt(save_scores+ "/inner_loop_auc_scores_split_"+str(current_split_num)+".csv", [score_tr, score_val, score_te], delimiter=",")
end = time.time()
print('Training time for split %s: %i minutes.'%(str(current_split_num),np.floor(((end-start)%3600)/60)))
| [
"helper.model.compile",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.ParameterGrid",
"os.path.exists",
"tensorflow.Session",
"yaml.add_constructor",
"keras.layers.concatenate",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"keras.backend.clear_session",
"tensorflow.ConfigPr... | [((1166, 1182), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1180, 1182), True, 'import tensorflow as tf\n'), ((1587, 1622), 'yaml.add_constructor', 'yaml.add_constructor', (['"""!join"""', 'join'], {}), "('!join', join)\n", (1607, 1622), False, 'import yaml\n'), ((2523, 2555), 'multimodal_prediction_helper.multimodal_dataset', 'multimodal_dataset', (['dataset_name'], {}), '(dataset_name)\n', (2541, 2555), False, 'from multimodal_prediction_helper import multimodal_dataset\n'), ((1412, 1437), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1422, 1437), True, 'import tensorflow as tf\n'), ((3794, 3820), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (3808, 3820), False, 'import os\n'), ((3420, 3440), 'pandas.get_dummies', 'pd.get_dummies', (['y_tr'], {}), '(y_tr)\n', (3434, 3440), True, 'import pandas as pd\n'), ((3451, 3472), 'pandas.get_dummies', 'pd.get_dummies', (['y_val'], {}), '(y_val)\n', (3465, 3472), True, 'import pandas as pd\n'), ((3482, 3502), 'pandas.get_dummies', 'pd.get_dummies', (['y_te'], {}), '(y_te)\n', (3496, 3502), True, 'import pandas as pd\n'), ((4023, 4034), 'time.time', 'time.time', ([], {}), '()\n', (4032, 4034), False, 'import time\n'), ((4077, 4105), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['tuning_params'], {}), '(tuning_params)\n', (4090, 4105), False, 'from sklearn.model_selection import ParameterGrid\n'), ((6815, 6844), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_tr', 'probs_tr'], {}), '(y_tr, probs_tr)\n', (6828, 6844), False, 'from sklearn.metrics import roc_auc_score\n'), ((6859, 6890), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_val', 'probs_val'], {}), '(y_val, probs_val)\n', (6872, 6890), False, 'from sklearn.metrics import roc_auc_score\n'), ((6904, 6933), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_te', 'probs_te'], {}), '(y_te, probs_te)\n', (6917, 6933), False, 'from sklearn.metrics import roc_auc_score\n'), ((7528, 7539), 'time.time', 'time.time', ([], {}), '()\n', (7537, 7539), False, 'import time\n'), ((3845, 3872), 'os.path.exists', 'os.path.exists', (['save_models'], {}), '(save_models)\n', (3859, 3872), False, 'import os\n'), ((3877, 3901), 'os.makedirs', 'os.makedirs', (['save_models'], {}), '(save_models)\n', (3888, 3901), False, 'import os\n'), ((4122, 4175), 'keras.layers.Input', 'Input', ([], {'shape': '(img_X_tr.shape[1],)', 'name': '"""image_input"""'}), "(shape=(img_X_tr.shape[1],), name='image_input')\n", (4127, 4175), False, 'from keras.layers import Dense, Dropout, Input, concatenate\n'), ((4193, 4250), 'keras.layers.Input', 'Input', ([], {'shape': '(clin_X_tr.shape[1],)', 'name': '"""clinical_input"""'}), "(shape=(clin_X_tr.shape[1],), name='clinical_input')\n", (4198, 4250), False, 'from keras.layers import Dense, Dropout, Input, concatenate\n'), ((4715, 4744), 'keras.layers.concatenate', 'concatenate', (['[dense1, dense2]'], {}), '([dense1, dense2])\n', (4726, 4744), False, 'from keras.layers import Dense, Dropout, Input, concatenate\n'), ((5431, 5478), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': "tune['learning_rate']"}), "(lr=tune['learning_rate'])\n", (5452, 5478), False, 'import keras\n'), ((5493, 5548), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input, clin_input]', 'outputs': '[output]'}), '(inputs=[img_input, clin_input], outputs=[output])\n', (5498, 5548), False, 'from keras.models import load_model, Sequential, Model\n'), ((5552, 5616), 'helper.model.compile', 'model.compile', ([], {'loss': "def_params['loss_func']", 'optimizer': 'optimizer'}), "(loss=def_params['loss_func'], optimizer=optimizer)\n", (5565, 5616), False, 'from helper import dataset, model\n'), ((5632, 5755), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': "def_params['min_delta']", 'patience': "def_params['iter_patience']", 'mode': '"""auto"""'}), "(monitor='val_loss', min_delta=def_params['min_delta'],\n patience=def_params['iter_patience'], mode='auto')\n", (5645, 5755), False, 'from keras.callbacks import EarlyStopping\n'), ((5795, 6016), 'helper.model.fit', 'model.fit', (["{'image_input': img_X_tr, 'clinical_input': clin_X_tr}", 'y_tr'], {'callbacks': 'callbacks', 'validation_data': '([img_X_val, clin_X_val], y_val)', 'epochs': "def_params['epochs']", 'batch_size': "tune['batch_size']", 'verbose': '(0)'}), "({'image_input': img_X_tr, 'clinical_input': clin_X_tr}, y_tr,\n callbacks=callbacks, validation_data=([img_X_val, clin_X_val], y_val),\n epochs=def_params['epochs'], batch_size=tune['batch_size'], verbose=0)\n", (5804, 6016), False, 'from helper import dataset, model\n'), ((6037, 6089), 'helper.model.predict', 'model.predict', (['[img_X_val, clin_X_val]'], {'batch_size': '(8)'}), '([img_X_val, clin_X_val], batch_size=8)\n', (6050, 6089), False, 'from helper import dataset, model\n'), ((6105, 6136), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_val', 'probs_val'], {}), '(y_val, probs_val)\n', (6118, 6136), False, 'from sklearn.metrics import roc_auc_score\n'), ((6447, 6476), 'keras.backend.clear_session', 'keras.backend.clear_session', ([], {}), '()\n', (6474, 6476), False, 'import keras\n'), ((7005, 7032), 'os.path.exists', 'os.path.exists', (['save_params'], {}), '(save_params)\n', (7019, 7032), False, 'import os\n'), ((7037, 7061), 'os.makedirs', 'os.makedirs', (['save_params'], {}), '(save_params)\n', (7048, 7061), False, 'import os\n'), ((7195, 7222), 'os.path.exists', 'os.path.exists', (['save_scores'], {}), '(save_scores)\n', (7209, 7222), False, 'import os\n'), ((7227, 7251), 'os.makedirs', 'os.makedirs', (['save_scores'], {}), '(save_scores)\n', (7238, 7251), False, 'import os\n'), ((4956, 4985), 'keras.layers.Dropout', 'Dropout', (["tune['dropout_rate']"], {}), "(tune['dropout_rate'])\n", (4963, 4985), False, 'from keras.layers import Dense, Dropout, Input, concatenate\n'), ((7615, 7650), 'numpy.floor', 'np.floor', (['((end - start) % 3600 / 60)'], {}), '((end - start) % 3600 / 60)\n', (7623, 7650), True, 'import numpy as np\n'), ((4427, 4466), 'keras.regularizers.l2', 'keras.regularizers.l2', (["tune['l2_ratio']"], {}), "(tune['l2_ratio'])\n", (4448, 4466), False, 'import keras\n'), ((4655, 4694), 'keras.regularizers.l2', 'keras.regularizers.l2', (["tune['l2_ratio']"], {}), "(tune['l2_ratio'])\n", (4676, 4694), False, 'import keras\n'), ((4906, 4945), 'keras.regularizers.l2', 'keras.regularizers.l2', (["tune['l2_ratio']"], {}), "(tune['l2_ratio'])\n", (4927, 4945), False, 'import keras\n'), ((5178, 5217), 'keras.regularizers.l2', 'keras.regularizers.l2', (["tune['l2_ratio']"], {}), "(tune['l2_ratio'])\n", (5199, 5217), False, 'import keras\n'), ((5371, 5410), 'keras.regularizers.l2', 'keras.regularizers.l2', (["tune['l2_ratio']"], {}), "(tune['l2_ratio'])\n", (5392, 5410), False, 'import keras\n')] |
# Generated by Django 2.1.1 on 2018-09-20 07:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FootageManager', '0002_footage_length'),
]
operations = [
migrations.AddField(
model_name='footage',
name='staticpath',
field=models.CharField(default='', max_length=200, verbose_name='static_path'),
preserve_default=False,
),
]
| [
"django.db.models.CharField"
] | [((342, 414), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(200)', 'verbose_name': '"""static_path"""'}), "(default='', max_length=200, verbose_name='static_path')\n", (358, 414), False, 'from django.db import migrations, models\n')] |
import pandas as pd
import json
import numpy as np
#DEFINITIONS
NAMESPACE = "it.gov.daf.dataset.opendata"
def getData(path):
if (path.lower().endswith((".json", ".geojson"))):
with open(path) as data_file:
dataJson = json.load(data_file)
return pd.io.json.json_normalize(dataJson, sep='.|.')
elif (path.lower().endswith((".csv", ".txt", ".text"))):
separator = csvInferSep(path)
return pd.read_csv(path, sep=separator)
else:
return "-1"
def getFieldsSchema(data):
fields = list()
for c, t in zip(data.columns, data.dtypes):
field = {"name": c, "type": formatConv(t)}
fields.append(field)
return fields;
def getDataSchema(path, datasetName):
data = getData(path)
fields = getFieldsSchema(data)
avro = {
"namespace": NAMESPACE,
"type": "record",
"name": datasetName,
"fields": fields
}
return avro
def formatConv(typeIn):
dic = {
np.dtype('O'): "String",
np.dtype('float64'): 'double',
np.dtype('float32'): 'double',
np.dtype('int64'): 'int',
np.dtype('int32'): 'int',
}
return dic.get(typeIn, "String");
def csvInferSep(path):
f = open(path)
sepList = [",", ';', ':', '|']
first = f.readline()
ordTupleSep = sorted([(x, first.count(x)) for x in sepList], key=lambda x: -x[1])
return ordTupleSep[0][0]
#print(getDataSchema("data.json", "testData"))
| [
"json.load",
"numpy.dtype",
"pandas.io.json.json_normalize",
"pandas.read_csv"
] | [((279, 325), 'pandas.io.json.json_normalize', 'pd.io.json.json_normalize', (['dataJson'], {'sep': '""".|."""'}), "(dataJson, sep='.|.')\n", (304, 325), True, 'import pandas as pd\n'), ((991, 1004), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (999, 1004), True, 'import numpy as np\n'), ((1024, 1043), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (1032, 1043), True, 'import numpy as np\n'), ((1063, 1082), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (1071, 1082), True, 'import numpy as np\n'), ((1102, 1119), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (1110, 1119), True, 'import numpy as np\n'), ((1136, 1153), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (1144, 1153), True, 'import numpy as np\n'), ((243, 263), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (252, 263), False, 'import json\n'), ((441, 473), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': 'separator'}), '(path, sep=separator)\n', (452, 473), True, 'import pandas as pd\n')] |
import redis
redis_client = redis.StrictRedis(host="127.0.0.1", port=6379)
input("") | [
"redis.StrictRedis"
] | [((29, 75), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': '"""127.0.0.1"""', 'port': '(6379)'}), "(host='127.0.0.1', port=6379)\n", (46, 75), False, 'import redis\n')] |
import logging
import random
from abc import abstractmethod
from pycoin.serialize import b2h
from cert_issuer import tx_utils
from cert_issuer.errors import InsufficientFundsError
from cert_issuer.signer import FinalizableSigner
# Estimate fees assuming worst case 3 inputs
ESTIMATE_NUM_INPUTS = 3
# Estimate fees assuming 1 output for change.
# Note that tx_utils calculations add on cost due to OP_RETURN size, so it doesn't need to be added here.
V2_NUM_OUTPUTS = 1
class TransactionHandler(object):
@abstractmethod
def ensure_balance(self):
pass
@abstractmethod
def issue_transaction(self, op_return_bytes):
pass
class TransactionCreator(object):
@abstractmethod
def estimate_cost_for_certificate_batch(self, tx_cost_constants, num_inputs=ESTIMATE_NUM_INPUTS):
pass
@abstractmethod
def create_transaction(self, tx_cost_constants, issuing_address, inputs, op_return_value):
pass
class TransactionV2Creator(TransactionCreator):
def estimate_cost_for_certificate_batch(self, tx_cost_constants, num_inputs=ESTIMATE_NUM_INPUTS):
total = tx_utils.calculate_tx_fee(tx_cost_constants, num_inputs, V2_NUM_OUTPUTS)
return total
def create_transaction(self, tx_cost_constants, issuing_address, inputs, op_return_value):
fee = tx_utils.calculate_tx_fee(tx_cost_constants, len(inputs), V2_NUM_OUTPUTS)
transaction = tx_utils.create_trx(
op_return_value,
fee,
issuing_address,
[],
inputs)
return transaction
class BitcoinTransactionHandler(TransactionHandler):
def __init__(self, connector, tx_cost_constants, secret_manager, issuing_address, prepared_inputs=None,
transaction_creator=TransactionV2Creator()):
self.connector = connector
self.tx_cost_constants = tx_cost_constants
self.secret_manager = secret_manager
self.issuing_address = issuing_address
self.prepared_inputs = prepared_inputs
self.transaction_creator = transaction_creator
def ensure_balance(self):
# ensure the issuing address has sufficient balance
balance = self.connector.get_balance(self.issuing_address)
transaction_cost = self.transaction_creator.estimate_cost_for_certificate_batch(self.tx_cost_constants)
logging.info('Total cost will be %d satoshis', transaction_cost)
if transaction_cost > balance:
error_message = 'Please add {} satoshis to the address {}'.format(
transaction_cost - balance, self.issuing_address)
logging.error(error_message)
raise InsufficientFundsError(error_message)
def issue_transaction(self, op_return_bytes):
op_return_value = b2h(op_return_bytes)
prepared_tx = self.create_transaction(op_return_bytes)
signed_tx = self.sign_transaction(prepared_tx)
self.verify_transaction(signed_tx, op_return_value)
txid = self.broadcast_transaction(signed_tx)
logging.info('Broadcast transaction with txid %s', txid)
return txid
def create_transaction(self, op_return_bytes):
if self.prepared_inputs:
inputs = self.prepared_inputs
else:
spendables = self.connector.get_unspent_outputs(self.issuing_address)
if not spendables:
error_message = 'No money to spend at address {}'.format(self.issuing_address)
logging.error(error_message)
raise InsufficientFundsError(error_message)
cost = self.transaction_creator.estimate_cost_for_certificate_batch(self.tx_cost_constants)
current_total = 0
inputs = []
random.shuffle(spendables)
for s in spendables:
inputs.append(s)
current_total += s.coin_value
if current_total > cost:
break
tx = self.transaction_creator.create_transaction(self.tx_cost_constants, self.issuing_address, inputs,
op_return_bytes)
hex_tx = b2h(tx.serialize())
logging.info('Unsigned hextx=%s', hex_tx)
prepared_tx = tx_utils.prepare_tx_for_signing(hex_tx, inputs)
return prepared_tx
def sign_transaction(self, prepared_tx):
with FinalizableSigner(self.secret_manager) as signer:
signed_tx = signer.sign_transaction(prepared_tx)
# log the actual byte count
tx_byte_count = tx_utils.get_byte_count(signed_tx)
logging.info('The actual transaction size is %d bytes', tx_byte_count)
signed_hextx = signed_tx.as_hex()
logging.info('Signed hextx=%s', signed_hextx)
return signed_tx
def verify_transaction(self, signed_tx, op_return_value):
signed_hextx = signed_tx.as_hex()
logging.info('Signed hextx=%s', signed_hextx)
tx_utils.verify_transaction(signed_hextx, op_return_value)
def broadcast_transaction(self, signed_tx):
tx_id = self.connector.broadcast_tx(signed_tx)
return tx_id
class MockTransactionHandler(TransactionHandler):
def ensure_balance(self):
pass
def issue_transaction(self, op_return_bytes):
return 'This has not been issued on a blockchain and is for testing only'
| [
"random.shuffle",
"logging.info",
"cert_issuer.tx_utils.get_byte_count",
"pycoin.serialize.b2h",
"cert_issuer.tx_utils.create_trx",
"cert_issuer.signer.FinalizableSigner",
"cert_issuer.tx_utils.verify_transaction",
"cert_issuer.tx_utils.calculate_tx_fee",
"cert_issuer.tx_utils.prepare_tx_for_signing... | [((1124, 1196), 'cert_issuer.tx_utils.calculate_tx_fee', 'tx_utils.calculate_tx_fee', (['tx_cost_constants', 'num_inputs', 'V2_NUM_OUTPUTS'], {}), '(tx_cost_constants, num_inputs, V2_NUM_OUTPUTS)\n', (1149, 1196), False, 'from cert_issuer import tx_utils\n'), ((1424, 1494), 'cert_issuer.tx_utils.create_trx', 'tx_utils.create_trx', (['op_return_value', 'fee', 'issuing_address', '[]', 'inputs'], {}), '(op_return_value, fee, issuing_address, [], inputs)\n', (1443, 1494), False, 'from cert_issuer import tx_utils\n'), ((2368, 2432), 'logging.info', 'logging.info', (['"""Total cost will be %d satoshis"""', 'transaction_cost'], {}), "('Total cost will be %d satoshis', transaction_cost)\n", (2380, 2432), False, 'import logging\n'), ((2792, 2812), 'pycoin.serialize.b2h', 'b2h', (['op_return_bytes'], {}), '(op_return_bytes)\n', (2795, 2812), False, 'from pycoin.serialize import b2h\n'), ((3053, 3109), 'logging.info', 'logging.info', (['"""Broadcast transaction with txid %s"""', 'txid'], {}), "('Broadcast transaction with txid %s', txid)\n", (3065, 3109), False, 'import logging\n'), ((4192, 4233), 'logging.info', 'logging.info', (['"""Unsigned hextx=%s"""', 'hex_tx'], {}), "('Unsigned hextx=%s', hex_tx)\n", (4204, 4233), False, 'import logging\n'), ((4256, 4303), 'cert_issuer.tx_utils.prepare_tx_for_signing', 'tx_utils.prepare_tx_for_signing', (['hex_tx', 'inputs'], {}), '(hex_tx, inputs)\n', (4287, 4303), False, 'from cert_issuer import tx_utils\n'), ((4562, 4596), 'cert_issuer.tx_utils.get_byte_count', 'tx_utils.get_byte_count', (['signed_tx'], {}), '(signed_tx)\n', (4585, 4596), False, 'from cert_issuer import tx_utils\n'), ((4605, 4675), 'logging.info', 'logging.info', (['"""The actual transaction size is %d bytes"""', 'tx_byte_count'], {}), "('The actual transaction size is %d bytes', tx_byte_count)\n", (4617, 4675), False, 'import logging\n'), ((4727, 4772), 'logging.info', 'logging.info', (['"""Signed hextx=%s"""', 'signed_hextx'], {}), "('Signed hextx=%s', signed_hextx)\n", (4739, 4772), False, 'import logging\n'), ((4911, 4956), 'logging.info', 'logging.info', (['"""Signed hextx=%s"""', 'signed_hextx'], {}), "('Signed hextx=%s', signed_hextx)\n", (4923, 4956), False, 'import logging\n'), ((4965, 5023), 'cert_issuer.tx_utils.verify_transaction', 'tx_utils.verify_transaction', (['signed_hextx', 'op_return_value'], {}), '(signed_hextx, op_return_value)\n', (4992, 5023), False, 'from cert_issuer import tx_utils\n'), ((2630, 2658), 'logging.error', 'logging.error', (['error_message'], {}), '(error_message)\n', (2643, 2658), False, 'import logging\n'), ((2677, 2714), 'cert_issuer.errors.InsufficientFundsError', 'InsufficientFundsError', (['error_message'], {}), '(error_message)\n', (2699, 2714), False, 'from cert_issuer.errors import InsufficientFundsError\n'), ((3755, 3781), 'random.shuffle', 'random.shuffle', (['spendables'], {}), '(spendables)\n', (3769, 3781), False, 'import random\n'), ((4390, 4428), 'cert_issuer.signer.FinalizableSigner', 'FinalizableSigner', (['self.secret_manager'], {}), '(self.secret_manager)\n', (4407, 4428), False, 'from cert_issuer.signer import FinalizableSigner\n'), ((3495, 3523), 'logging.error', 'logging.error', (['error_message'], {}), '(error_message)\n', (3508, 3523), False, 'import logging\n'), ((3546, 3583), 'cert_issuer.errors.InsufficientFundsError', 'InsufficientFundsError', (['error_message'], {}), '(error_message)\n', (3568, 3583), False, 'from cert_issuer.errors import InsufficientFundsError\n')] |
import pytest
import numpy as np
from csgo.analytics.distance import (
bombsite_distance,
point_distance,
polygon_area,
area_distance,
)
from csgo.analytics.coords import Encoder
class TestCSGOAnalytics:
"""Class to test CSGO analytics"""
def test_bombsite_distance(self):
"""Test bombsite distance function."""
assert bombsite_distance([0, 0, 0]) == 35
assert bombsite_distance([0, 0, 0], bombsite="B") == 38
assert bombsite_distance([0, 0, 0], bombsite="A", map="de_inferno") == 30
def test_point_distance(self):
"""Test point distance function"""
assert point_distance([0, 0], [1, 1], type="euclidean") == 1.4142135623730951
assert point_distance([0, 0], [1, 1], type="manhattan") == 2
assert point_distance([0, 0], [1, 1], type="canberra") == 2.0
assert point_distance([-1, 5], [2, 1], type="cosine") == 0.7368825942078912
assert point_distance([0, 0, 0], [100, 100, 100]) == 4
assert point_distance([0, 0, 0], [100, 100, 100], map="de_vertigo") == 1
def test_polygon_area(self):
"""Test polygon area function"""
assert polygon_area([0, 1, 2], [0, 1, 0]) == 1.0
def test_bombsite_invalid_map(self):
"""
Test bombsite function with an invalid map.
"""
with pytest.raises(ValueError):
bombsite_distance([0, 0, 0], map="dust2")
def test_point_invalid_map(self):
"""
Test point distance function with an invalid map.
"""
with pytest.raises(ValueError):
point_distance([0, 0, 0], [1, 1, 1], map="dust2")
def test_area_invalid_map(self):
"""
Test area distance function with an invalid map.
"""
with pytest.raises(ValueError):
area_distance(26, 42, map="dust2")
def test_area_dist(self):
"""
Tests that area distance returns correct value.
"""
assert area_distance(26, 42, map="de_mirage") == 26
def test_place_encode(self):
"""
Tests that place encoding works for correct values
"""
e = Encoder()
assert np.sum(e.encode("place", "TSpawn")) == 1
assert np.sum(e.encode("place", "TSpawnnn")) == 0
assert np.sum(e.encode("map", "de_dust2")) == 1
assert np.sum(e.encode("map", "de_dust0")) == 0
| [
"csgo.analytics.distance.area_distance",
"csgo.analytics.distance.polygon_area",
"pytest.raises",
"csgo.analytics.distance.point_distance",
"csgo.analytics.coords.Encoder",
"csgo.analytics.distance.bombsite_distance"
] | [((2150, 2159), 'csgo.analytics.coords.Encoder', 'Encoder', ([], {}), '()\n', (2157, 2159), False, 'from csgo.analytics.coords import Encoder\n'), ((364, 392), 'csgo.analytics.distance.bombsite_distance', 'bombsite_distance', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (381, 392), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((414, 456), 'csgo.analytics.distance.bombsite_distance', 'bombsite_distance', (['[0, 0, 0]'], {'bombsite': '"""B"""'}), "([0, 0, 0], bombsite='B')\n", (431, 456), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((478, 538), 'csgo.analytics.distance.bombsite_distance', 'bombsite_distance', (['[0, 0, 0]'], {'bombsite': '"""A"""', 'map': '"""de_inferno"""'}), "([0, 0, 0], bombsite='A', map='de_inferno')\n", (495, 538), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((639, 687), 'csgo.analytics.distance.point_distance', 'point_distance', (['[0, 0]', '[1, 1]'], {'type': '"""euclidean"""'}), "([0, 0], [1, 1], type='euclidean')\n", (653, 687), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((725, 773), 'csgo.analytics.distance.point_distance', 'point_distance', (['[0, 0]', '[1, 1]'], {'type': '"""manhattan"""'}), "([0, 0], [1, 1], type='manhattan')\n", (739, 773), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((794, 841), 'csgo.analytics.distance.point_distance', 'point_distance', (['[0, 0]', '[1, 1]'], {'type': '"""canberra"""'}), "([0, 0], [1, 1], type='canberra')\n", (808, 841), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((864, 910), 'csgo.analytics.distance.point_distance', 'point_distance', (['[-1, 5]', '[2, 1]'], {'type': '"""cosine"""'}), "([-1, 5], [2, 1], type='cosine')\n", (878, 910), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((948, 990), 'csgo.analytics.distance.point_distance', 'point_distance', (['[0, 0, 0]', '[100, 100, 100]'], {}), '([0, 0, 0], [100, 100, 100])\n', (962, 990), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((1011, 1071), 'csgo.analytics.distance.point_distance', 'point_distance', (['[0, 0, 0]', '[100, 100, 100]'], {'map': '"""de_vertigo"""'}), "([0, 0, 0], [100, 100, 100], map='de_vertigo')\n", (1025, 1071), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((1167, 1201), 'csgo.analytics.distance.polygon_area', 'polygon_area', (['[0, 1, 2]', '[0, 1, 0]'], {}), '([0, 1, 2], [0, 1, 0])\n', (1179, 1201), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((1340, 1365), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1353, 1365), False, 'import pytest\n'), ((1379, 1420), 'csgo.analytics.distance.bombsite_distance', 'bombsite_distance', (['[0, 0, 0]'], {'map': '"""dust2"""'}), "([0, 0, 0], map='dust2')\n", (1396, 1420), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((1555, 1580), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1568, 1580), False, 'import pytest\n'), ((1594, 1643), 'csgo.analytics.distance.point_distance', 'point_distance', (['[0, 0, 0]', '[1, 1, 1]'], {'map': '"""dust2"""'}), "([0, 0, 0], [1, 1, 1], map='dust2')\n", (1608, 1643), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((1776, 1801), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1789, 1801), False, 'import pytest\n'), ((1815, 1849), 'csgo.analytics.distance.area_distance', 'area_distance', (['(26)', '(42)'], {'map': '"""dust2"""'}), "(26, 42, map='dust2')\n", (1828, 1849), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n'), ((1976, 2014), 'csgo.analytics.distance.area_distance', 'area_distance', (['(26)', '(42)'], {'map': '"""de_mirage"""'}), "(26, 42, map='de_mirage')\n", (1989, 2014), False, 'from csgo.analytics.distance import bombsite_distance, point_distance, polygon_area, area_distance\n')] |
# coding: utf-8
import logging
import re
from itertools import chain
from textwrap import TextWrapper
from django.core import mail
from django.test import TestCase as DjangoTestCase
from django.views import debug
from six import string_types
from six.moves.urllib.parse import urlparse, urlunparse
from threadlocals.threadlocals import set_thread_variable
from .celery import app as celery_app
# patch the default formatter to use a unicode format string
logging._defaultFormatter = logging.Formatter("%(message)s")
logger = logging.getLogger(__name__)
HIDDEN_SETTING = re.compile(r"URL|BACKEND")
class TestCase(DjangoTestCase):
"""
Overrides the default Django TestCase to clear out the threadlocal request
variable during class setUp and tearDown.
"""
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
set_thread_variable("request", None)
@classmethod
def tearDownClass(cls):
set_thread_variable("request", None)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
set_thread_variable("request", None)
def tearDown(self):
set_thread_variable("request", None)
super(TestCase, self).tearDown()
def monkey_patch_cleanse_setting():
# monkey-patch django.views.debug.cleanse_setting to check for CELERY_RESULT_BACKEND
_cleanse_setting = debug.cleanse_setting
def cleanse_setting(key, value):
cleansed = _cleanse_setting(key, value)
if HIDDEN_SETTING.search(key):
try:
parsed = None
if isinstance(value, string_types):
parsed = urlparse(value)
if parsed and parsed.password:
# urlparse returns a read-only tuple, use a list to rewrite parts
parsed_list = list(parsed)
parsed_list[1] = parsed.netloc.replace(
f":{parsed.password}", ":**********", 1
)
# put Humpty Dumpty back together again
cleansed = urlunparse(parsed_list)
except Exception:
logger.exception("Exception cleansing URLs for error reporting")
return cleansed
debug.cleanse_setting = cleanse_setting
def monkey_patch_mail_admins():
# monkey-patch django.core.mail.mail_admins to properly wrap long lines
_mail_admins = mail.mail_admins
def mail_admins(subject, message, *args, **kwargs):
"""
Wraps the mail_admins function from Django to wrap long lines in emails.
The exim mail server used in EDD dis-allows lines longer than 998 bytes.
"""
wrapper = TextWrapper(
width=79,
break_on_hyphens=False,
replace_whitespace=False,
subsequent_indent=" ",
)
message = "\n".join(
chain(*(wrapper.wrap(line) for line in message.splitlines()))
)
_mail_admins(subject, message, *args, **kwargs)
mail.mail_admins = mail_admins
monkey_patch_cleanse_setting()
monkey_patch_mail_admins()
__all__ = ("celery_app", "TestCase")
| [
"logging.getLogger",
"threadlocals.threadlocals.set_thread_variable",
"re.compile",
"logging.Formatter",
"textwrap.TextWrapper",
"six.moves.urllib.parse.urlparse",
"six.moves.urllib.parse.urlunparse"
] | [((487, 519), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (504, 519), False, 'import logging\n'), ((529, 556), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (546, 556), False, 'import logging\n'), ((574, 599), 're.compile', 're.compile', (['"""URL|BACKEND"""'], {}), "('URL|BACKEND')\n", (584, 599), False, 'import re\n'), ((869, 905), 'threadlocals.threadlocals.set_thread_variable', 'set_thread_variable', (['"""request"""', 'None'], {}), "('request', None)\n", (888, 905), False, 'from threadlocals.threadlocals import set_thread_variable\n'), ((960, 996), 'threadlocals.threadlocals.set_thread_variable', 'set_thread_variable', (['"""request"""', 'None'], {}), "('request', None)\n", (979, 996), False, 'from threadlocals.threadlocals import set_thread_variable\n'), ((1110, 1146), 'threadlocals.threadlocals.set_thread_variable', 'set_thread_variable', (['"""request"""', 'None'], {}), "('request', None)\n", (1129, 1146), False, 'from threadlocals.threadlocals import set_thread_variable\n'), ((1180, 1216), 'threadlocals.threadlocals.set_thread_variable', 'set_thread_variable', (['"""request"""', 'None'], {}), "('request', None)\n", (1199, 1216), False, 'from threadlocals.threadlocals import set_thread_variable\n'), ((2727, 2826), 'textwrap.TextWrapper', 'TextWrapper', ([], {'width': '(79)', 'break_on_hyphens': '(False)', 'replace_whitespace': '(False)', 'subsequent_indent': '""" """'}), "(width=79, break_on_hyphens=False, replace_whitespace=False,\n subsequent_indent=' ')\n", (2738, 2826), False, 'from textwrap import TextWrapper\n'), ((1683, 1698), 'six.moves.urllib.parse.urlparse', 'urlparse', (['value'], {}), '(value)\n', (1691, 1698), False, 'from six.moves.urllib.parse import urlparse, urlunparse\n'), ((2116, 2139), 'six.moves.urllib.parse.urlunparse', 'urlunparse', (['parsed_list'], {}), '(parsed_list)\n', (2126, 2139), False, 'from six.moves.urllib.parse import urlparse, urlunparse\n')] |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: networking/v1alpha3/service_dependency.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='networking/v1alpha3/service_dependency.proto',
package='istio.networking.v1alpha3',
syntax='proto3',
serialized_pb=_b('\n,networking/v1alpha3/service_dependency.proto\x12\x19istio.networking.v1alpha3\"\x92\x03\n\x11ServiceDependency\x12M\n\x0c\x64\x65pendencies\x18\x01 \x03(\x0b\x32\x37.istio.networking.v1alpha3.ServiceDependency.Dependency\x1a)\n\x06Import\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04host\x18\x02 \x01(\t\x1a\x82\x02\n\nDependency\x12q\n\x16source_workload_labels\x18\x01 \x03(\x0b\x32Q.istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry\x12\x44\n\x07imports\x18\x02 \x03(\x0b\x32\x33.istio.networking.v1alpha3.ServiceDependency.Import\x1a;\n\x19SourceWorkloadLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01*&\n\x0b\x43onfigScope\x12\n\n\x06PUBLIC\x10\x00\x12\x0b\n\x07PRIVATE\x10\x01\x42\"Z istio.io/api/networking/v1alpha3b\x06proto3')
)
_CONFIGSCOPE = _descriptor.EnumDescriptor(
name='ConfigScope',
full_name='istio.networking.v1alpha3.ConfigScope',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PUBLIC', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRIVATE', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=480,
serialized_end=518,
)
_sym_db.RegisterEnumDescriptor(_CONFIGSCOPE)
ConfigScope = enum_type_wrapper.EnumTypeWrapper(_CONFIGSCOPE)
PUBLIC = 0
PRIVATE = 1
_SERVICEDEPENDENCY_IMPORT = _descriptor.Descriptor(
name='Import',
full_name='istio.networking.v1alpha3.ServiceDependency.Import',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='istio.networking.v1alpha3.ServiceDependency.Import.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='host', full_name='istio.networking.v1alpha3.ServiceDependency.Import.host', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=217,
)
_SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY = _descriptor.Descriptor(
name='SourceWorkloadLabelsEntry',
full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=419,
serialized_end=478,
)
_SERVICEDEPENDENCY_DEPENDENCY = _descriptor.Descriptor(
name='Dependency',
full_name='istio.networking.v1alpha3.ServiceDependency.Dependency',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source_workload_labels', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.source_workload_labels', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='imports', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.imports', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=220,
serialized_end=478,
)
_SERVICEDEPENDENCY = _descriptor.Descriptor(
name='ServiceDependency',
full_name='istio.networking.v1alpha3.ServiceDependency',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dependencies', full_name='istio.networking.v1alpha3.ServiceDependency.dependencies', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SERVICEDEPENDENCY_IMPORT, _SERVICEDEPENDENCY_DEPENDENCY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=478,
)
_SERVICEDEPENDENCY_IMPORT.containing_type = _SERVICEDEPENDENCY
_SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY.containing_type = _SERVICEDEPENDENCY_DEPENDENCY
_SERVICEDEPENDENCY_DEPENDENCY.fields_by_name['source_workload_labels'].message_type = _SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY
_SERVICEDEPENDENCY_DEPENDENCY.fields_by_name['imports'].message_type = _SERVICEDEPENDENCY_IMPORT
_SERVICEDEPENDENCY_DEPENDENCY.containing_type = _SERVICEDEPENDENCY
_SERVICEDEPENDENCY.fields_by_name['dependencies'].message_type = _SERVICEDEPENDENCY_DEPENDENCY
DESCRIPTOR.message_types_by_name['ServiceDependency'] = _SERVICEDEPENDENCY
DESCRIPTOR.enum_types_by_name['ConfigScope'] = _CONFIGSCOPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ServiceDependency = _reflection.GeneratedProtocolMessageType('ServiceDependency', (_message.Message,), dict(
Import = _reflection.GeneratedProtocolMessageType('Import', (_message.Message,), dict(
DESCRIPTOR = _SERVICEDEPENDENCY_IMPORT,
__module__ = 'networking.v1alpha3.service_dependency_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.ServiceDependency.Import)
))
,
Dependency = _reflection.GeneratedProtocolMessageType('Dependency', (_message.Message,), dict(
SourceWorkloadLabelsEntry = _reflection.GeneratedProtocolMessageType('SourceWorkloadLabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY,
__module__ = 'networking.v1alpha3.service_dependency_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry)
))
,
DESCRIPTOR = _SERVICEDEPENDENCY_DEPENDENCY,
__module__ = 'networking.v1alpha3.service_dependency_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.ServiceDependency.Dependency)
))
,
DESCRIPTOR = _SERVICEDEPENDENCY,
__module__ = 'networking.v1alpha3.service_dependency_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.ServiceDependency)
))
_sym_db.RegisterMessage(ServiceDependency)
_sym_db.RegisterMessage(ServiceDependency.Import)
_sym_db.RegisterMessage(ServiceDependency.Dependency)
_sym_db.RegisterMessage(ServiceDependency.Dependency.SourceWorkloadLabelsEntry)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z istio.io/api/networking/v1alpha3'))
_SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY.has_options = True
_SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.descriptor_pb2.MessageOptions",
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper",
"google.protobuf.descriptor.EnumValueDescriptor",
"google.protobuf.descriptor_pb2.FileOptions"
] | [((566, 592), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (590, 592), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((2141, 2188), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', (['_CONFIGSCOPE'], {}), '(_CONFIGSCOPE)\n', (2174, 2188), False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((9355, 9383), 'google.protobuf.descriptor_pb2.FileOptions', 'descriptor_pb2.FileOptions', ([], {}), '()\n', (9381, 9383), False, 'from google.protobuf import descriptor_pb2\n'), ((9595, 9626), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ([], {}), '()\n', (9624, 9626), False, 'from google.protobuf import descriptor_pb2\n'), ((1761, 1856), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""PUBLIC"""', 'index': '(0)', 'number': '(0)', 'options': 'None', 'type': 'None'}), "(name='PUBLIC', index=0, number=0, options=\n None, type=None)\n", (1792, 1856), True, 'from google.protobuf import descriptor as _descriptor\n'), ((1876, 1972), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', ([], {'name': '"""PRIVATE"""', 'index': '(1)', 'number': '(1)', 'options': 'None', 'type': 'None'}), "(name='PRIVATE', index=1, number=1, options=\n None, type=None)\n", (1907, 1972), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4658, 4689), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ([], {}), '()\n', (4687, 4689), False, 'from google.protobuf import descriptor_pb2\n'), ((5057, 5453), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""source_workload_labels"""', 'full_name': '"""istio.networking.v1alpha3.ServiceDependency.Dependency.source_workload_labels"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='source_workload_labels', full_name=\n 'istio.networking.v1alpha3.ServiceDependency.Dependency.source_workload_labels'\n , index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=\n False, default_value=[], message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None, options\n =None, file=DESCRIPTOR)\n", (5084, 5453), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5472, 5838), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""imports"""', 'full_name': '"""istio.networking.v1alpha3.ServiceDependency.Dependency.imports"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='imports', full_name=\n 'istio.networking.v1alpha3.ServiceDependency.Dependency.imports', index\n =1, number=2, type=11, cpp_type=10, label=3, has_default_value=False,\n default_value=[], message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, options=None, file=\n DESCRIPTOR)\n", (5499, 5838), True, 'from google.protobuf import descriptor as _descriptor\n'), ((6327, 6691), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""dependencies"""', 'full_name': '"""istio.networking.v1alpha3.ServiceDependency.dependencies"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='dependencies', full_name=\n 'istio.networking.v1alpha3.ServiceDependency.dependencies', index=0,\n number=1, type=11, cpp_type=10, label=3, has_default_value=False,\n default_value=[], message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, options=None, file=\n DESCRIPTOR)\n", (6354, 6691), True, 'from google.protobuf import descriptor as _descriptor\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 14:37:54 2021
@author: Ham
"""
import more_itertools as mit
import io
STDIN_SIO = io.StringIO("""
abcd
""".strip())
def partition_mit(s: str) -> list:
# Copied from: https://stackoverflow.com/questions/4904430/find-all-list-permutations-of-splitting-a-string-in-python
if s:
for lst in mit.partitions(s):
yield ["".join(sublst) for sublst in lst]
else:
yield []
def partition_recursive(s: str) -> list:
# Ham's own: NOT working
if s:
res = []
for i in range(1,len(s)+1):
lft = [s[:i]]
res += [lft + rest for rest in partition_recursive(s[i:])]
return res
else:
return []
def partition_generator(s: str) -> list:
# Copied from: https://stackoverflow.com/questions/52167339/get-all-possible-str-partitions-of-any-length
if s:
for i in range(1, len(s)+1):
lft = s[:i]
for p in partition_generator(s[i:]):
yield [lft] + p
else:
yield []
def splitter(str):
# Copied from: https://stackoverflow.com/questions/4904430/find-all-list-permutations-of-splitting-a-string-in-python
# Bug: missing the 1-element list containing the entire str: [str]
for i in range(1, len(str)):
start = str[0:i]
end = str[i:]
yield [start, end]
for split in splitter(end):
result = [start]
result.extend(split)
yield result
if __name__ == '__main__':
while True:
if not (line := STDIN_SIO.readline().strip()):
break
print('Partitioning "' + line + '":')
#print(*list(partition_generator(line)), sep='\n')
#print(*list(splitter(line)), sep='\n')
print(*list(partition_mit(line)), sep='\n')
#print(*list(partition_recursive(line)), sep='\n')
| [
"more_itertools.partitions"
] | [((375, 392), 'more_itertools.partitions', 'mit.partitions', (['s'], {}), '(s)\n', (389, 392), True, 'import more_itertools as mit\n')] |
"""A extrem simple Packet Encoder for the BasicPacketEncodingLayer"""
from PiCN.Layers.PacketEncodingLayer.Encoder import BasicEncoder
from PiCN.Packets import Packet, Content, Interest, Name, Nack, NackReason, UnknownPacket
class SimpleStringEncoder(BasicEncoder):
"""An extreme simple Packet Encoder for the BasicPacketEncodingLayer"""
def __init__(self, log_level=255):
super().__init__(logger_name="SimpleEnc", log_level=log_level)
def encode(self, packet: Packet):
res = None
name = self.escape_name(packet.name)
if(isinstance(packet, Interest)):
self.logger.info("Encode interest")
res = "I:" + name.to_string() + ":"
elif(isinstance(packet, Content)):
self.logger.info("Encode content object")
content = packet.content
content = content.replace(":", "%58")
res = "C:" + name.to_string() + ":" + ":" + content
elif(isinstance(packet, Nack)):
self.logger.info("Encode NACK")
res = "N:" + name.to_string() + ":" + ":" + packet.reason.value
elif(isinstance(packet, UnknownPacket)):
res = packet.wire_format
if res is not None:
return res.encode()
return None
def decode(self, wire_data) -> Packet:
data: str = wire_data.decode()
if data[0] == "I":
self.logger.info("Decode interest")
name = data.split(":")[1]
return Interest(self.unescape_name(Name(name)))
elif data[0] == "C":
self.logger.info("Decode content object")
name = data.split(":")[1]
content = data.split(":")[3].replace("%58", ":")
return Content(self.unescape_name(Name(name)), content)
elif data[0] == "N":
self.logger.info("Decode NACK")
name = data.split(":")[1]
reason = NackReason(data.split(":")[3])
return Nack(self.unescape_name(Name(name)), reason, None)
else:
self.logger.info("Decode failed (unknown packet type)")
return UnknownPacket(wire_format=wire_data)
def escape_name(self, name: Name):
"""escape a name"""
n2 = Name()
for c in name.string_components:
n2 += c.replace("/", "%2F")
return n2
def unescape_name(self, name: Name):
"""unescape a name"""
n2 = Name()
for c in name.string_components:
n2 += c.replace("%2F", "/")
return n2
| [
"PiCN.Packets.UnknownPacket",
"PiCN.Packets.Name"
] | [((2228, 2234), 'PiCN.Packets.Name', 'Name', ([], {}), '()\n', (2232, 2234), False, 'from PiCN.Packets import Packet, Content, Interest, Name, Nack, NackReason, UnknownPacket\n'), ((2419, 2425), 'PiCN.Packets.Name', 'Name', ([], {}), '()\n', (2423, 2425), False, 'from PiCN.Packets import Packet, Content, Interest, Name, Nack, NackReason, UnknownPacket\n'), ((1512, 1522), 'PiCN.Packets.Name', 'Name', (['name'], {}), '(name)\n', (1516, 1522), False, 'from PiCN.Packets import Packet, Content, Interest, Name, Nack, NackReason, UnknownPacket\n'), ((2109, 2145), 'PiCN.Packets.UnknownPacket', 'UnknownPacket', ([], {'wire_format': 'wire_data'}), '(wire_format=wire_data)\n', (2122, 2145), False, 'from PiCN.Packets import Packet, Content, Interest, Name, Nack, NackReason, UnknownPacket\n'), ((1753, 1763), 'PiCN.Packets.Name', 'Name', (['name'], {}), '(name)\n', (1757, 1763), False, 'from PiCN.Packets import Packet, Content, Interest, Name, Nack, NackReason, UnknownPacket\n'), ((1981, 1991), 'PiCN.Packets.Name', 'Name', (['name'], {}), '(name)\n', (1985, 1991), False, 'from PiCN.Packets import Packet, Content, Interest, Name, Nack, NackReason, UnknownPacket\n')] |
from Task import Task
from Helper import Cli
class CliExecute(Task):
def __init__(self, logMethod, parent, params):
super().__init__("CLI Execute", parent, params, logMethod, None)
def Run(self):
parameters = self.params['Parameters']
cwd = self.params['CWD']
cli = Cli(parameters, cwd, self.logMethod)
cli.Execute()
| [
"Helper.Cli"
] | [((310, 346), 'Helper.Cli', 'Cli', (['parameters', 'cwd', 'self.logMethod'], {}), '(parameters, cwd, self.logMethod)\n', (313, 346), False, 'from Helper import Cli\n')] |
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
schedule_interval = timedelta(days=2)
default_args = {
'owner': '<NAME>',
'depends_on_past': False,
'start_date': datetime.now() - schedule_interval,
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG(
'scheduler',
default_args=default_args,
description='DAG for the Spark Batch Job',
schedule_interval=schedule_interval)
task = BashOperator(
task_id='run_batch_job',
bash_command='cd /home/ubuntu/Spot/airflow ; ./spark-run.sh',
dag=dag)
task.doc_md = """\
#### Task Documentation
Spark Batch Job is scheduled to start every other day
"""
dag.doc_md = __doc__
| [
"datetime.datetime.now",
"datetime.timedelta",
"airflow.operators.bash_operator.BashOperator",
"airflow.DAG"
] | [((158, 175), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (167, 175), False, 'from datetime import datetime, timedelta\n'), ((455, 583), 'airflow.DAG', 'DAG', (['"""scheduler"""'], {'default_args': 'default_args', 'description': '"""DAG for the Spark Batch Job"""', 'schedule_interval': 'schedule_interval'}), "('scheduler', default_args=default_args, description=\n 'DAG for the Spark Batch Job', schedule_interval=schedule_interval)\n", (458, 583), False, 'from airflow import DAG\n'), ((605, 718), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', ([], {'task_id': '"""run_batch_job"""', 'bash_command': '"""cd /home/ubuntu/Spot/airflow ; ./spark-run.sh"""', 'dag': 'dag'}), "(task_id='run_batch_job', bash_command=\n 'cd /home/ubuntu/Spot/airflow ; ./spark-run.sh', dag=dag)\n", (617, 718), False, 'from airflow.operators.bash_operator import BashOperator\n'), ((424, 444), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (433, 444), False, 'from datetime import datetime, timedelta\n'), ((265, 279), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (277, 279), False, 'from datetime import datetime, timedelta\n')] |
import logging
logger = logging.getLogger(__name__)
# Pump rate in mL/s (4.3 L/min)
_PUMP_RATE_ML_PER_SEC = 4300.0 / 60.0
# Default amount of water to add to the plant (in mL) when pump manager detects
# low soil moisture.
DEFAULT_PUMP_AMOUNT = 200
class Pump(object):
"""Wrapper for a Seaflo 12V water pump."""
def __init__(self, pi_io, clock, pump_pin):
"""Creates a new Pump wrapper.
Args:
pi_io: Raspberry Pi I/O interface.
clock: A clock interface.
pump_pin: Raspberry Pi pin to which the pump is connected.
"""
self._pi_io = pi_io
self._clock = clock
self._pump_pin = pump_pin
def pump_water(self, amount_ml):
"""Pumps the specified amount of water.
Args:
amount_ml: Amount of water to pump (in mL).
Raises:
ValueError: The amount of water to be pumped is invalid.
"""
if amount_ml == 0.0:
return
elif amount_ml < 0.0:
raise ValueError('Cannot pump a negative amount of water')
else:
logger.info('turning pump on (with GPIO pin %d)', self._pump_pin)
self._pi_io.turn_pin_on(self._pump_pin)
wait_time_seconds = amount_ml / _PUMP_RATE_ML_PER_SEC
self._clock.wait(wait_time_seconds)
logger.info('turning pump off (with GPIO pin %d)', self._pump_pin)
self._pi_io.turn_pin_off(self._pump_pin)
logger.info('pumped %.f mL of water', amount_ml)
return
class PumpManager(object):
"""Pump Manager manages the water pump."""
def __init__(self, pump, pump_scheduler, moisture_threshold, pump_amount,
timer):
"""Creates a PumpManager object, which manages a water pump.
Args:
pump: A pump instance, which supports water pumping.
pump_scheduler: A pump scheduler instance that controls the time
periods in which the pump can be run.
moisture_threshold: Soil moisture threshold. If soil moisture is
below this value, manager pumps water on pump_if_needed calls.
pump_amount: Amount (in mL) to pump every time the water pump runs.
timer: A timer that counts down until the next forced pump. When
this timer expires, the pump manager runs the pump once,
regardless of the moisture level.
"""
self._pump = pump
self._pump_scheduler = pump_scheduler
self._moisture_threshold = moisture_threshold
self._pump_amount = pump_amount
self._timer = timer
def pump_if_needed(self, moisture):
"""Run the water pump if there is a need to run it.
Args:
moisture: Soil moisture level
Returns:
The amount of water pumped, in mL.
"""
if self._should_pump(moisture):
self._pump.pump_water(self._pump_amount)
self._timer.reset()
return self._pump_amount
return 0
def _should_pump(self, moisture):
"""Returns True if the pump should be run."""
if not self._pump_scheduler.is_running_pump_allowed():
return False
return (moisture < self._moisture_threshold) or self._timer.expired()
class PumpScheduler(object):
"""Controls when the pump is allowed to run."""
def __init__(self, local_clock, sleep_windows):
"""Creates new PumpScheduler instance.
Args:
local_clock: A local clock interface
sleep_windows: A list of 2-tuples, each representing a sleep window.
Tuple items are datetime.time objects.
"""
self._local_clock = local_clock
self._sleep_windows = sleep_windows
def is_running_pump_allowed(self):
"""Returns True if OK to run pump, otherwise False.
Pump is not allowed to run from the start of a sleep window (inclusive)
to the end of a sleep window (exclusive).
"""
current_time = self._local_clock.now().time()
for sleep_time, wake_time in self._sleep_windows:
# Check if sleep window wraps midnight.
if wake_time < sleep_time:
if current_time >= sleep_time or current_time < wake_time:
return False
else:
if sleep_time <= current_time < wake_time:
return False
return True
| [
"logging.getLogger"
] | [((25, 52), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (42, 52), False, 'import logging\n')] |
from setuptools import setup, find_packages
setup(name='dune.common',
namespace_packages=['dune'],
version='2.4',
description='Python package accompanying the DUNE project',
url='http://www.dune-project.org',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
packages=['dune.common',
'dune.common.parametertree',
'dune.common.modules',
],
install_requires=['pyparsing>=2.1.10',
],
)
| [
"setuptools.setup"
] | [((45, 400), 'setuptools.setup', 'setup', ([], {'name': '"""dune.common"""', 'namespace_packages': "['dune']", 'version': '"""2.4"""', 'description': '"""Python package accompanying the DUNE project"""', 'url': '"""http://www.dune-project.org"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""BSD"""', 'packages': "['dune.common', 'dune.common.parametertree', 'dune.common.modules']", 'install_requires': "['pyparsing>=2.1.10']"}), "(name='dune.common', namespace_packages=['dune'], version='2.4',\n description='Python package accompanying the DUNE project', url=\n 'http://www.dune-project.org', author='<NAME>', author_email='<EMAIL>',\n license='BSD', packages=['dune.common', 'dune.common.parametertree',\n 'dune.common.modules'], install_requires=['pyparsing>=2.1.10'])\n", (50, 400), False, 'from setuptools import setup, find_packages\n')] |
from unittest import TestCase
from inlinestyler.utils import inline_css
class TestUtils(TestCase):
def setUp(self):
self.the_document = '<html><head><style>.turn_red{ color: red; }</style></head><body><p class="turn_red">This text should be red.</p></body></html>'
def test_inline_css(self):
the_inlined_document = '<html>\n <head/>\n <body>\n <p class="turn_red" style="color: red">This text should be red.</p>\n </body>\n</html>\n'
self.assertEqual(inline_css(self.the_document), the_inlined_document)
| [
"inlinestyler.utils.inline_css"
] | [((495, 524), 'inlinestyler.utils.inline_css', 'inline_css', (['self.the_document'], {}), '(self.the_document)\n', (505, 524), False, 'from inlinestyler.utils import inline_css\n')] |
import datetime
import app.helpers.helpers
from app.controllers.api import record as record_api
from app.helpers import helpers
class TestRecord:
def get_record(self, records, type_):
for record in records:
if record["type"] == type_:
return record
def test_list_no_Record(self, client):
"""Test if db contains no record."""
headers = {"X-Api-Key": "123"}
res = client.get("/api/domain/list", headers=headers)
json_data = res.get_json()
assert json_data["code"] == 404
def test_add_record(self, client, mocker):
"""Test adding record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Add a record
- Query the db to assure it's created
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
res = client.post("/api/domain/add", data=data, headers=headers)
create_domain_data = res.get_json()
# add record
data = {
"zone": "company.com",
"owner": "host",
"rtype": "A",
"rdata": "1.1.1.1",
"ttl": 7200,
}
res = client.post("/api/record/add", data=data, headers=headers)
add_record_data = res.get_json()
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
assert create_domain_data["code"] == 201
assert create_domain_data["data"]["zone"] == "company.com"
assert add_record_data["code"] == 201
assert add_record_data["data"]["owner"] == "host"
assert add_record_data["data"]["rdata"] == "1.1.1.1"
assert list_record_data["code"] == 200
assert list_record_data["data"][0]["zone"] == "company.com"
assert list_record_data["data"][0]["user"]["email"] == "<EMAIL>"
def test_edit_record(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Add a record
- Edit a record
- Query the db to assure it's edited
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
# edit record
records = list_record_data["data"][0]["records"]
cname_record = self.get_record(records, "CNAME")
cname_record_id = cname_record["id"]
data = {
"zone": "company.com",
"owner": "www_edit",
"rtype": "CNAME",
"rdata": "company_edited.com",
"ttl": 3600,
}
res = client.put(
f"/api/record/edit/{cname_record_id}", data=data, headers=headers
)
edit_record_data = res.get_json()
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
records = list_record_data["data"][0]["records"]
edited_record_data = self.get_record(records, "CNAME")
assert edit_record_data["code"] == 200
assert edit_record_data["data"]["owner"] == "www_edit"
assert list_record_data["code"] == 200
assert edited_record_data["rdata"] == "company_edited.com"
def test_edit_record_no_ttl_change(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Edit a record with the same TTL
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
# edit record
records = list_record_data["data"][0]["records"]
cname_record = self.get_record(records, "CNAME")
cname_record_id = cname_record["id"]
data = {
"zone": "company.com",
"owner": "www",
"rtype": "CNAME",
"rdata": "company.com.",
"ttl": "3600",
}
res = client.put(
f"/api/record/edit/{cname_record_id}", data=data, headers=headers
)
edit_record_data = res.get_json()
assert edit_record_data["code"] == 409
assert edit_record_data["message"] == "The record already exists"
def test_edit_record_with_ttl_change(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Edit a record with the different TTL
- Query the db to assure it's edited
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
# edit record
records = list_record_data["data"][0]["records"]
cname_record = self.get_record(records, "CNAME")
cname_record_id = cname_record["id"]
data = {
"zone": "company.com",
"owner": "www",
"rtype": "CNAME",
"rdata": "company.com.",
"ttl": "300",
}
res = client.put(
f"/api/record/edit/{cname_record_id}", data=data, headers=headers
)
edit_record_data = res.get_json()
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
records = list_record_data["data"][0]["records"]
edited_record_data = self.get_record(records, "CNAME")
assert edit_record_data["code"] == 200
assert edit_record_data["data"]["ttl"] == "300"
assert list_record_data["code"] == 200
assert edited_record_data["ttl"] == "300"
def test_delete_record(self, client, mocker):
"""Test deleting record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- List the default records
- Delete one of the record
- Query the db to assure it's deleted
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
# edit record
records = list_record_data["data"][0]["records"]
cname_record = self.get_record(records, "CNAME")
cname_record_id = cname_record["id"]
delete_res = client.delete(
f"/api/record/delete/{cname_record_id}", headers=headers
)
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
records = list_record_data["data"][0]["records"]
assert delete_res.status_code == 204
# it must be 3 after deletion
assert len(records) == 3
def test_edit_record_no_ttl_change_MX(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Add MX record
- Edit a record with the same TTL
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# add record
data = {
"zone": "company.com",
"owner": "mx1",
"rtype": "MX",
"rdata": "10 mail.example.com.",
"ttl": 7200,
}
res = client.post("/api/record/add", data=data, headers=headers)
json_data = res.get_json()
record_id = json_data["data"]["id"]
# edit record
data = {
"zone": "company.com",
"owner": "mx1",
"rtype": "MX",
"rdata": "10 mail.example.com.",
"ttl": 7200,
}
res = client.put(f"/api/record/edit/{record_id}", data=data, headers=headers)
edit_record_data = res.get_json()
assert edit_record_data["code"] == 409
assert edit_record_data["message"] == "The record already exists"
def test_edit_record_with_ttl_change_MX(self, client, mocker):
"""Test editing record from its endpoint.
- Create a User
- Create a domain (with default SOA,NS,CNAME created)
- Add MX record
- Edit a record with the different TTL
- Query the db to assure it's edited
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# add record
data = {
"zone": "company.com",
"owner": "mx1",
"rtype": "MX",
"rdata": "10 mail.example.com.",
"ttl": 7200,
}
res = client.post("/api/record/add", data=data, headers=headers)
json_data = res.get_json()
record_id = json_data["data"]["id"]
# edit record
data = {
"zone": "company.com",
"owner": "mx1",
"rtype": "MX",
"rdata": "10 mail.example.com.",
"ttl": 14400,
}
res = client.put(f"/api/record/edit/{record_id}", data=data, headers=headers)
edit_record_data = res.get_json()
# list record
res = client.get("/api/domain/list", headers=headers)
list_record_data = res.get_json()
records = list_record_data["data"][0]["records"]
edited_record_data = self.get_record(records, "MX")
assert edit_record_data["code"] == 200
assert edit_record_data["data"]["ttl"] == "14400"
assert list_record_data["code"] == 200
assert edited_record_data["ttl"] == "14400"
def test_edit_record_respect_zone_limit(self, client, monkeypatch, mocker):
"""Test edit record respecting zone limit of 99
- Create a User
- Create a domain (with default SOA, NS, CNAME created)
- Add TXT record
- Edit a record with the different TXT value until it reaches a limit
- Edit a record with tomorrows date
"""
mocker.patch("app.helpers.producer.kafka_producer")
mocker.patch("app.helpers.producer.send")
headers = {"X-Api-Key": "123"}
# create user
data = {"email": "<EMAIL>"}
post_res = client.post("/api/user/add", data=data, headers=headers)
json_data = post_res.get_json()
user_id = json_data["data"]["id"]
# add domain
data = {"zone": "company.com", "user_id": user_id}
client.post("/api/domain/add", data=data, headers=headers)
# add record
data = {
"zone": "company.com",
"owner": "txt1",
"rtype": "TXT",
"rdata": "0",
"ttl": 7200,
}
res = client.post("/api/record/add", data=data, headers=headers)
json_data = res.get_json()
record_id = json_data["data"]["id"]
increment_serial = 0
# 50 times for edit record is enough to make serial > 99
# record edit increment serial twice at time
while increment_serial < 50:
data = {
"zone": "company.com",
"owner": "txt1",
"rtype": "TXT",
"rdata": f"{increment_serial}",
"ttl": 7200,
}
res = client.put(
f"/api/record/edit/{record_id}", data=data, headers=headers
)
edit_record_data = res.get_json()
increment_serial += 1
assert edit_record_data["code"] == 429
assert edit_record_data["message"] == "Zone Change Limit Reached"
# ensure correct serial
serial_resource = record_api.get_serial_resource("company.com")
today_date = helpers.soa_time_set()
assert serial_resource["serial_counter"] == "98"
assert serial_resource["serial_date"] == today_date
assert serial_resource["serial"] == f"{today_date}98"
#
# if user waits until tomorrow
#
def fake_soa_time_set():
tomorrow_date = datetime.datetime.now() + datetime.timedelta(days=1)
return tomorrow_date.strftime("%Y%m%d")
monkeypatch.setattr(app.helpers.helpers, "soa_time_set", fake_soa_time_set)
data = {
"zone": "company.com",
"owner": "txt1",
"rtype": "TXT",
"rdata": "random text",
"ttl": 7200,
}
res = client.put(f"/api/record/edit/{record_id}", data=data, headers=headers)
edit_record_data = res.get_json()
assert edit_record_data["code"] == 200
# ensure correct serial
serial_resource = record_api.get_serial_resource("company.com")
today_date = helpers.soa_time_set()
assert serial_resource["serial_counter"] == "03"
assert serial_resource["serial_date"] == today_date
assert serial_resource["serial"] == f"{today_date}03"
| [
"app.controllers.api.record.get_serial_resource",
"datetime.timedelta",
"app.helpers.helpers.soa_time_set",
"datetime.datetime.now"
] | [((14870, 14915), 'app.controllers.api.record.get_serial_resource', 'record_api.get_serial_resource', (['"""company.com"""'], {}), "('company.com')\n", (14900, 14915), True, 'from app.controllers.api import record as record_api\n'), ((14937, 14959), 'app.helpers.helpers.soa_time_set', 'helpers.soa_time_set', ([], {}), '()\n', (14957, 14959), False, 'from app.helpers import helpers\n'), ((15866, 15911), 'app.controllers.api.record.get_serial_resource', 'record_api.get_serial_resource', (['"""company.com"""'], {}), "('company.com')\n", (15896, 15911), True, 'from app.controllers.api import record as record_api\n'), ((15933, 15955), 'app.helpers.helpers.soa_time_set', 'helpers.soa_time_set', ([], {}), '()\n', (15953, 15955), False, 'from app.helpers import helpers\n'), ((15261, 15284), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15282, 15284), False, 'import datetime\n'), ((15287, 15313), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (15305, 15313), False, 'import datetime\n')] |
import sympy as simp
from sympy import sin, pi
import pandas as pd
from labtool_ex2 import Project
from uncertainties import ufloat
if __name__ == "__main__":
gm = {
"P1": r"P_1",
"PR": r"P_R",
"theoPR": r"P_R",
"U1": r"U_1",
"U2": r"U_2",
"I1": r"I_1",
"I2": r"I_2",
"U": r"U",
"Ur": r"U_r",
"R": r"R_L",
"S": r"S_1",
"P2": r"P_2",
"Q1": r"Q_1",
"PV": r"P_V",
"n": r"\eta",
"l": r"\lambda",
"a": r"a",
"b": r"b",
"c": r"c",
"XL": r"X_L",
"xl": r"x_l",
}
gv = {
"P1": r"\si{\watt}",
"PR": r"\si{\watt}",
"theoPR": r"\si{\watt}",
"U1": r"\si{\volt}",
"U2": r"\si{\volt}",
"I1": r"\si{\ampere}",
"I2": r"\si{\ampere}",
"U": r"\si{\volt}",
"Ur": r"\si{\volt}",
"R": r"\si{\ohm}",
"S": r"\si{\va}",
"P2": r"\si{\watt}",
"Q1": r"\si{\var}",
"PV": r"\si{\watt}",
"n": r"\si{\percent}",
"l": r"1",
"a": r"1",
"b": r"1",
"c": r"1",
"XL": r"\si{\ohm}",
"xl": r"\si{\ohm}",
}
simp.var(list(gv))
P = Project("Trafo", global_variables=gv, global_mapping=gm, font=13)
ax = P.figure.add_subplot()
# Versuch 1
P.load_data("./Data/Trafo/versuch1.csv")
print(P.data)
P.print_table(
P.data[["P1", "dP1", "U", "dU", "U1", "dU1", "I1", "dI1", "U2", "dU2"]],
name="messwertversuch1",
)
S = U1 * I1
P.data["S"] = P.apply_df(S)
P.data["dS"] = P.apply_df_err(S)
Q1 = (S ** 2 - P1 ** 2) ** 0.5
P.data["Q1"] = P.apply_df(Q1)
P.data["dQ1"] = P.apply_df_err(Q1)
l = P1 / S
P.data["l"] = P.apply_df(l)
P.data["dl"] = P.apply_df_err(l)
P.print_table(
P.data[["S", "dS", "Q1", "dQ1", "l", "dl"]],
name="wertversuch1",
)
# P2 = U2 * I2
# P.data["P2"] = P.apply_df(P2)
# P.data["dP2"] = P.apply_df_err(P2)
# PV = P - P2
# P.data["PV"] = P.apply_df(PV)
# P.data["dPV"] = P.apply_df_err(PV)
# n = P2 / P1
# P.data["n"] = P.apply_df(n)
# P.data["dn"] = P.apply_df_err(n)
# Versuch 2
simp.var(list(gv))
P.load_data("./Data/Trafo/versuch2.csv", loadnew=True)
print(P.data)
P.print_table(
P.data[
[
"P1",
"dP1",
"U1",
"dU1",
"I1",
"dI1",
"U2",
"dU2",
"I2",
"dI2",
]
],
name="messwertversuch2",
)
S = U1 * I1
P.data["S"] = P.apply_df(S)
P.data["dS"] = P.apply_df_err(S)
Q1 = (S ** 2 - P1 ** 2) ** 0.5
P.data["Q1"] = P.apply_df(Q1)
P.data["dQ1"] = P.apply_df_err(Q1)
l = P1 / S
P.data["l"] = P.apply_df(l)
P.data["dl"] = P.apply_df_err(l)
P2 = U2 * I2
P.data["P2"] = P.apply_df(P2)
P.data["dP2"] = P.apply_df_err(P2)
PV = P1 - P2
P.data["PV"] = P.apply_df(PV)
P.data["dPV"] = P.apply_df_err(PV)
n = P2 / P1 * 100
P.data["n"] = P.apply_df(n)
P.data["dn"] = P.apply_df_err(n)
P.print_table(
P.data[
["S", "dS", "Q1", "dQ1", "l", "dl", "P2", "dP2", "PV", "dPV", "n", "dn"]
],
name="wertversuch2",
)
# Versuch 3
P.load_data("./Data/Trafo/versuch3.csv", loadnew=True)
R = Ur / I2
PR = Ur * I2
P.data["R"] = P.apply_df(R)
P.data["dR"] = P.apply_df_err(R)
P.data["PR"] = P.apply_df(PR)
P.data["dPR"] = P.apply_df_err(PR)
P.print_table(
P.data[
[
"P1",
"dP1",
"U",
"dU",
"U1",
"dU1",
"I1",
"dI1",
]
],
name="messwertversuch3_1",
)
P.print_table(
P.data[
[
"U2",
"dU2",
"I2",
"dI2",
"Ur",
"dUr",
]
],
name="messwertversuch3_2",
)
P.print_table(
P.data[["PR", "dPR", "R", "dR"]],
name="wertversuch3",
)
S = U1 * I1
P.data["S"] = P.apply_df(S)
P.data["dS"] = P.apply_df_err(S)
Q1 = (S ** 2 - P1 ** 2) ** 0.5
P.data["Q1"] = P.apply_df(Q1)
P.data["dQ1"] = P.apply_df_err(Q1)
l = P1 / S
P.data["l"] = P.apply_df(l)
P.data["dl"] = P.apply_df_err(l)
P2 = U2 * I2
P.data["P2"] = P.apply_df(P2)
P.data["dP2"] = P.apply_df_err(P2)
PV = P1 - PR
P.data["PV"] = P.apply_df(PV)
P.data["dPV"] = P.apply_df_err(PV)
n = PR / P1 * 100
P.data["n"] = P.apply_df(n)
P.data["dn"] = P.apply_df_err(n)
P.print_table(
P.data[
[
"S",
"dS",
"Q1",
"dQ1",
"l",
"dl",
"P2",
"dP2",
"PV",
"dPV",
"n",
"dn",
]
],
name="wertversuch3_extra",
)
P.plot_data(
ax,
"R",
"PR",
label="Gemessene Daten",
style="r",
errors=True,
)
# P.plot_data(
# ax,
# "R",
# "P2",
# label="Nice",
# style="r",
# errors=True,
# )
simp.var(list(gv))
print(P.data)
PR = U2 ** 2 * R / (R ** 2 + (XL) ** 2)
P.print_expr(PR)
theoPR = U2 ** 2 * R / (R ** 2 + xl ** 2)
P.data["theoPR"] = P.apply_df(theoPR)
P.data["dtheoPR"] = P.apply_df_err(theoPR)
P.plot_function(
axes=ax,
x="R",
y="theoPR",
expr=theoPR,
label="theo. Leistungskurven",
style="green",
errors=True,
)
P.plot_fit(
axes=ax,
x="R",
y="PR",
eqn=PR,
style="r",
label="Leistungskurven",
use_all_known=False,
offset=[30, 10],
guess={"U2": 24, "XL": 31},
bounds=[
{"name": "U2", "min": "0", "max": "25"},
{"name": "L", "min": "30", "max": "32"},
],
add_fit_params=True,
granularity=10000,
)
test = ufloat(68, 1.4)
df = pd.DataFrame(
{"x": [ufloat(11, 1) * 1e8, ufloat(11, 1)], "y": [ufloat(11, 1), ufloat(11, 1)]}
)
print(df)
# arr = unumpy.uarray([1, 2], [0.01, 0.002])
P.print_ftable(df, name="test", split=True)
print(test.__format__("").split(r"+/-"))
ax.set_title(f"Leistungskurve am Lastwiderstand")
P.ax_legend_all(loc=1)
P.figure.savefig(f"./Output/{P.name}/leistungskurve.png", dpi=400)
P.figure.clear()
| [
"uncertainties.ufloat",
"labtool_ex2.Project"
] | [((1250, 1315), 'labtool_ex2.Project', 'Project', (['"""Trafo"""'], {'global_variables': 'gv', 'global_mapping': 'gm', 'font': '(13)'}), "('Trafo', global_variables=gv, global_mapping=gm, font=13)\n", (1257, 1315), False, 'from labtool_ex2 import Project\n'), ((6348, 6363), 'uncertainties.ufloat', 'ufloat', (['(68)', '(1.4)'], {}), '(68, 1.4)\n', (6354, 6363), False, 'from uncertainties import ufloat\n'), ((6423, 6436), 'uncertainties.ufloat', 'ufloat', (['(11)', '(1)'], {}), '(11, 1)\n', (6429, 6436), False, 'from uncertainties import ufloat\n'), ((6445, 6458), 'uncertainties.ufloat', 'ufloat', (['(11)', '(1)'], {}), '(11, 1)\n', (6451, 6458), False, 'from uncertainties import ufloat\n'), ((6460, 6473), 'uncertainties.ufloat', 'ufloat', (['(11)', '(1)'], {}), '(11, 1)\n', (6466, 6473), False, 'from uncertainties import ufloat\n'), ((6402, 6415), 'uncertainties.ufloat', 'ufloat', (['(11)', '(1)'], {}), '(11, 1)\n', (6408, 6415), False, 'from uncertainties import ufloat\n')] |
""" Reference: https://matheusfacure.github.io/python-causality-handbook/11-Propensity-Score.html#
"""
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors
import seaborn as sns
from matplotlib import pyplot as plt
from causalinference import CausalModel
pd.options.mode.chained_assignment = None # default='warn'
def get_pscore(data_frame, solver='liblinear'):
"""Calculate propensity score with logistic regression
Args:
data_frame (pandas.DataFrame): dataframe with input data
Returns:
pandas.DataFrame: dataframe with propensity score
"""
treatment = 'nudge'
outcome = 'outcome'
predictors = data_frame.columns.drop([treatment, outcome])
ps_model = LogisticRegression(solver=solver).fit(
data_frame[predictors].to_numpy().astype('int'),
data_frame[treatment].to_numpy().astype('int')
)
data_ps = data_frame.assign(pscore=ps_model.predict_proba(data_frame[predictors].values)[:, 1])
return data_ps
def check_weights(data_ps):
"""Check if sum of propensity score weights match sample size
Args:
data_ps (pandas.DataFrame): dataframe with propensity score
Return:
tuple: sample size, treated size from weigths, untreated size froms weigths
"""
weight_t = 1./data_ps.query("nudge==1")["pscore"]
weight_nt = 1./(1.-data_ps.query("nudge==0")["pscore"])
print(f"Original sample size {data_ps.shape[0]}")
print("Original treatment sample size", data_ps.query("nudge==1").shape[0])
print("Original control sample size", data_ps.query("nudge==0").shape[0])
print(f"Weighted treatment sample size {round(sum(weight_t), 1)}")
print(f"Weighted control sample size {round(sum(weight_nt), 1)}")
return data_ps.shape[0], sum(weight_t), sum(weight_nt)
def plot_confounding_evidence(data_ps):
"""Use the propensity score to find evidence of confounding
Args:
data_ps (pandas.DataFrame): dataframe with propensity score
"""
sns.boxplot(x="age", y="pscore", data=data_ps)
plt.title("Confounding Evidence")
plt.show()
sns.boxplot(x="gender", y="pscore", data=data_ps)
plt.title("Confounding Evidence")
plt.show()
def plot_overlap(data_ps):
""" check that there is overlap between the treated and untreated population
Args:
data_ps (pandas.DataFrame): dataframe with propensity score
"""
sns.distplot(data_ps.query("nudge==0")["pscore"], kde=False, label="Non Nudged")
sns.distplot(data_ps.query("nudge==1")["pscore"], kde=False, label="Nudged")
plt.title("Positivity Check")
plt.legend()
plt.show()
def get_ate(data_ps):
"""Get ATE without bias correction
Args:
data_ps (pandas.DataFrame): dataframe with propensity score
Returns:
float: average treatment effect
"""
result = data_ps.groupby("nudge")["outcome"].mean()
ate = result[1] - result[0]
# print("Calculate Average Treatment Effect:")
# print(f"Control: {round(result[0], 3)}")
# print(f"Treatment: {round(result[1], 3)}")
# print(f"ATE: {round(ate, 3)}")
return ate
def get_psw_ate(data_ps):
"""Get propensity score weigthed ATE
Args:
data_ps (pandas.DataFrame): dataframe with propensity score
"""
weight = ((data_ps["nudge"] - data_ps["pscore"]) / (data_ps["pscore"]*(1. - data_ps["pscore"])))
ate = np.mean(weight * data_ps["outcome"])
# weight_t = 1./data_ps.query("nudge==1")["pscore"]
# weight_nt = 1./(1.-data_ps.query("nudge==0")["pscore"])
# treatment = sum(data_ps.query("nudge==1")["outcome"]*weight_t) / len(data_ps)
# control = sum(data_ps.query("nudge==0")["outcome"]*weight_nt) / len(data_ps)
# ate = treatment - control
# print(f"Propensity score weighted ATE: {round(ate, 3)}")
return ate
def get_psm_ate(data_ps):
"""Get propensity score matched ATE using CausalModel
Args:
data_ps (pandas.DataFrame): dataframe with propensity score
"""
cmodel = CausalModel(
Y=data_ps["outcome"].values,
D=data_ps["nudge"].values,
X=data_ps[["pscore"]].values
)
cmodel.est_via_ols()
cmodel.est_via_matching(matches=1, bias_adj=True)
print(cmodel.estimates)
def match_ps(data_ps):
"""Match participants in treatment group to control group by propensity score
Args:
data_ps (pandas.DataFrame): dataframe with propensity score of all participants
Returns:
pandas.DataFrame: dataframe with nudged participants and matched control
"""
df1 = data_ps.reset_index()[data_ps["nudge"] == 1]
df2 = data_ps.reset_index()[data_ps["nudge"] == 0]
result = pd.merge_asof(df1.sort_values('pscore'),
df2.sort_values('pscore'),
on='pscore',
direction='nearest',
suffixes=['', '_control'])
columns = list(df1) + ['control']
result = result.rename(columns={"outcome_control": "control"})
result = result[columns].sort_values('index').reset_index(drop=True).drop(
columns=['index', 'nudge', 'pscore'])
return result
| [
"numpy.mean",
"causalinference.CausalModel",
"sklearn.linear_model.LogisticRegression",
"seaborn.boxplot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((2070, 2116), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""age"""', 'y': '"""pscore"""', 'data': 'data_ps'}), "(x='age', y='pscore', data=data_ps)\n", (2081, 2116), True, 'import seaborn as sns\n'), ((2121, 2154), 'matplotlib.pyplot.title', 'plt.title', (['"""Confounding Evidence"""'], {}), "('Confounding Evidence')\n", (2130, 2154), True, 'from matplotlib import pyplot as plt\n'), ((2159, 2169), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2167, 2169), True, 'from matplotlib import pyplot as plt\n'), ((2174, 2223), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""gender"""', 'y': '"""pscore"""', 'data': 'data_ps'}), "(x='gender', y='pscore', data=data_ps)\n", (2185, 2223), True, 'import seaborn as sns\n'), ((2228, 2261), 'matplotlib.pyplot.title', 'plt.title', (['"""Confounding Evidence"""'], {}), "('Confounding Evidence')\n", (2237, 2261), True, 'from matplotlib import pyplot as plt\n'), ((2266, 2276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2274, 2276), True, 'from matplotlib import pyplot as plt\n'), ((2643, 2672), 'matplotlib.pyplot.title', 'plt.title', (['"""Positivity Check"""'], {}), "('Positivity Check')\n", (2652, 2672), True, 'from matplotlib import pyplot as plt\n'), ((2677, 2689), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2687, 2689), True, 'from matplotlib import pyplot as plt\n'), ((2694, 2704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2702, 2704), True, 'from matplotlib import pyplot as plt\n'), ((3462, 3498), 'numpy.mean', 'np.mean', (["(weight * data_ps['outcome'])"], {}), "(weight * data_ps['outcome'])\n", (3469, 3498), True, 'import numpy as np\n'), ((4082, 4184), 'causalinference.CausalModel', 'CausalModel', ([], {'Y': "data_ps['outcome'].values", 'D': "data_ps['nudge'].values", 'X': "data_ps[['pscore']].values"}), "(Y=data_ps['outcome'].values, D=data_ps['nudge'].values, X=\n data_ps[['pscore']].values)\n", (4093, 4184), False, 'from causalinference import CausalModel\n'), ((791, 824), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': 'solver'}), '(solver=solver)\n', (809, 824), False, 'from sklearn.linear_model import LogisticRegression\n')] |
import subprocess
import os
def download_task_model(task):
m_path = os.path.join('/home/ubuntu/s3', "model_log_final", task,
"logs/model.permanent-ckpt")
dirs, fname = os.path.split(m_path)
dst_dir = dirs.replace('/home/ubuntu/s3', "s3://taskonomy-unpacked-oregon")
tmp_path = "/home/ubuntu/temp/{}".format(task)
subprocess.call('mkdir -p {}'.format(tmp_path), shell=True)
tmp_fname = os.path.join(tmp_path, fname)
aws_cp_command = "aws s3 cp {}.data-00000-of-00001 {}".format(os.path.join(dst_dir, fname), tmp_path)
subprocess.call(aws_cp_command, shell=True)
aws_cp_command = "aws s3 cp {}.meta {}".format(os.path.join(dst_dir, fname), tmp_path)
subprocess.call(aws_cp_command, shell=True)
aws_cp_command = "aws s3 cp {}.index {}".format(os.path.join(dst_dir, fname), tmp_path)
subprocess.call(aws_cp_command, shell=True)
list_of_tasks = 'autoencoder curvature denoise edge2d edge3d \
keypoint2d keypoint3d colorization jigsaw \
reshade rgb2depth rgb2mist rgb2sfnorm \
room_layout segment25d segment2d vanishing_point_well_defined \
segmentsemantic_rb class_1000 class_places impainting_whole'
list_of_tasks = 'impainting_whole'
list_of_tasks = list_of_tasks.split()
for t in list_of_tasks:
download_task_model(t)
| [
"os.path.join",
"subprocess.call",
"os.path.split"
] | [((72, 161), 'os.path.join', 'os.path.join', (['"""/home/ubuntu/s3"""', '"""model_log_final"""', 'task', '"""logs/model.permanent-ckpt"""'], {}), "('/home/ubuntu/s3', 'model_log_final', task,\n 'logs/model.permanent-ckpt')\n", (84, 161), False, 'import os\n'), ((214, 235), 'os.path.split', 'os.path.split', (['m_path'], {}), '(m_path)\n', (227, 235), False, 'import os\n'), ((447, 476), 'os.path.join', 'os.path.join', (['tmp_path', 'fname'], {}), '(tmp_path, fname)\n', (459, 476), False, 'import os\n'), ((587, 630), 'subprocess.call', 'subprocess.call', (['aws_cp_command'], {'shell': '(True)'}), '(aws_cp_command, shell=True)\n', (602, 630), False, 'import subprocess\n'), ((726, 769), 'subprocess.call', 'subprocess.call', (['aws_cp_command'], {'shell': '(True)'}), '(aws_cp_command, shell=True)\n', (741, 769), False, 'import subprocess\n'), ((866, 909), 'subprocess.call', 'subprocess.call', (['aws_cp_command'], {'shell': '(True)'}), '(aws_cp_command, shell=True)\n', (881, 909), False, 'import subprocess\n'), ((543, 571), 'os.path.join', 'os.path.join', (['dst_dir', 'fname'], {}), '(dst_dir, fname)\n', (555, 571), False, 'import os\n'), ((682, 710), 'os.path.join', 'os.path.join', (['dst_dir', 'fname'], {}), '(dst_dir, fname)\n', (694, 710), False, 'import os\n'), ((822, 850), 'os.path.join', 'os.path.join', (['dst_dir', 'fname'], {}), '(dst_dir, fname)\n', (834, 850), False, 'import os\n')] |
import base64
import hashlib
import inspect
import logging
import os
import random
import string
import sys
import textwrap
import re
import uuid
import warnings
from datetime import datetime
from secrets import token_bytes
from time import sleep
from urllib.parse import urlencode, quote_plus, unquote_plus
from dotenv import load_dotenv
def append_random_string(target, length=6, prefix='-'):
return f'{target}{random_string(length, prefix)}'
def bytes_md5(bytes : bytes):
return hashlib.md5(bytes).hexdigest()
def bytes_sha256(bytes : bytes):
return hashlib.sha256(bytes).hexdigest()
def base64_to_bytes(bytes_base64):
if type(bytes_base64) is str:
bytes_base64 = bytes_base64.encode()
return base64.decodebytes(bytes_base64)
def base64_to_str(target):
return bytes_to_str(base64_to_bytes(target))
def bytes_to_base64(bytes):
return base64.b64encode(bytes).decode()
def bytes_to_str(target, encoding='ascii'):
return target.decode(encoding=encoding)
def chunks(items:list, split: int):
if items and split and split > 0:
for i in range(0, len(items), split):
yield items[i:i + split]
def class_name(target):
if target:
return type(target).__name__
def class_functions(target):
functions = {}
for function_name, function_ref in inspect.getmembers(type(target), predicate=inspect.isfunction):
functions[function_name] = function_ref
return functions
def class_functions_names(target):
return list_set(class_functions(target))
def convert_to_number(value):
if value:
try:
if value[0] in ['£','$','€']:
return float(re.sub(r'[^\d.]', '', value))
else:
return float(value)
except:
return 0
else:
return 0
def date_now(use_utc=True, return_str=True):
value = date_time_now(use_utc=use_utc, return_str=False)
if return_str:
return date_to_str(date=value)
return value
def date_time_now(use_utc=True, return_str=True, milliseconds_numbers=0):
if use_utc:
value = datetime.utcnow()
else:
value = datetime.now()
if return_str:
return date_time_to_str(value, milliseconds_numbers=milliseconds_numbers)
return value
def date_time_to_str(date_time, date_time_format='%Y-%m-%d %H:%M:%S.%f', milliseconds_numbers=3):
date_time_str = date_time.strftime(date_time_format)
return time_str_milliseconds(datetime_str=date_time_str, datetime_format=date_time_format, milliseconds_numbers=milliseconds_numbers)
def date_to_str(date, date_format='%Y-%m-%d'):
return date.strftime(date_format)
def time_str_milliseconds(datetime_str, datetime_format, milliseconds_numbers=0):
if '.%f' in datetime_format and -1 < milliseconds_numbers < 6:
chars_to_remove = milliseconds_numbers-6
if milliseconds_numbers == 0:
chars_to_remove -= 1
return datetime_str[:chars_to_remove]
return datetime_str
def env_value(var_name):
return env_vars().get(var_name, None)
def env_vars():
"""
reload data from .env file and return dictionary with current environment variables
"""
load_dotenv()
vars = os.environ
data = {}
for key in vars:
data[key] = vars[key]
return data
def env_vars_list():
return list_set(env_vars())
def flist(target):
from osbot_utils.fluent.Fluent_List import Fluent_List
return Fluent_List(target)
def get_field(target, field, default=None):
if target is not None:
try:
value = getattr(target, field)
if value is not None:
return value
except:
pass
return default
def get_value(target, key, default=None):
if target is not None:
value = target.get(key)
if value is not None:
return value
return default
# todo: check if this should still be here
def get_random_color(max=5):
if max > 5: max = 5 # add support for more than 5 colors
colors = ['skyblue', 'darkseagreen', 'palevioletred', 'coral', 'darkgray']
return colors[random_number(0, max-1)]
def get_missing_fields(target,field):
missing_fields = []
for field in field:
if get_field(target, field) is None:
missing_fields.append(field)
return missing_fields
def is_number(value):
try:
if type(value) is int or type(value) is float :
int(value)
return True
except:
pass
return False
def ignore_warning__unclosed_ssl():
warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>")
def last_letter(text):
if text and (type(text) is str) and len(text) > 0:
return text[-1]
def len_list(target):
return len(list(target))
def list_add(array : list, value):
if value is not None:
array.append(value)
return value
def list_contains_list(array : list, values):
if array is not None:
if type(values) is list:
for item in values:
if item in array is False:
return False
return True
return False
def list_find(array:list, item):
if item in array:
return array.index(item)
return -1
def list_get_field(values, field):
return [item.get(field) for item in values]
def list_index_by(values, index_by):
from osbot_utils.fluent.Fluent_Dict import Fluent_Dict
results = {}
if values and index_by:
for item in values:
results[item.get(index_by)] = item
return Fluent_Dict(results)
def list_group_by(values, group_by):
results = {}
for item in values:
value = item.get(group_by)
if results.get(value) is None: results[value] = []
results[value].append(item)
return results
def list_get(array, position=None, default=None):
if type(array) is list:
if type(position) is int and position >=0 :
if len(array) > position:
return array[position]
return default
def list_pop(array:list, position=None, default=None):
if array:
if len(array) >0:
if type(position) is int:
if len(array) > position:
return array.pop(position)
else:
return array.pop()
return default
def list_pop_and_trim(array, position=None):
value = array_pop(array,position)
if type(value) is str:
return trim(value)
return value
def list_set(target):
if target:
return sorted(list(set(target)))
return []
def list_zip(*args):
return list(zip(*args))
def list_set_dict(target):
return sorted(list(set(obj_dict(target))))
def list_filter(target_list, filter_function):
return list(filter(filter_function, target_list))
def list_sorted(target_list, key, descending=False):
return list(sorted(target_list, key= lambda x:x.get(key,None) ,reverse=descending))
def list_filter_starts_with(target_list, prefix):
return list_filter(target_list, lambda x: x.startswith(prefix))
def list_filter_contains(target_list, value):
return list_filter(target_list, lambda x: x.find(value) > -1)
def log_critical(message): logger().critical(message) # level 50
def log_debug (message): logger().debug (message) # level 10
def log_error (message): logger().error (message) # level 40
def log_info (message): logger().info (message) # level 20
def log_warning (message): logger().warning (message) # level 30
def log_to_console(level="INFO"):
logger_set_level(level)
logger_add_handler__console()
print() # add extra print so that in pytest the first line is not hidden
def log_to_file(level="INFO"):
logger_set_level(level)
return logger_add_handler__file()
def logger():
return logging.getLogger()
def logger_add_handler(handler):
logger().addHandler(handler)
def logger_add_handler__console():
logger_add_handler(logging.StreamHandler())
def logger_add_handler__file(log_file=None):
from osbot_utils.utils.Files import temp_file
log_file = log_file or temp_file(extension=".log")
logger_add_handler(logging.FileHandler(filename=log_file))
return log_file
#logging.basicConfig(level=logging.DEBUG, filename='myapp.log', format='%(asctime)s %(levelname)s:%(message)s')
def logger_set_level(level):
logger().setLevel(level)
def logger_set_level_critical(): logger_set_level('CRITICAL') # level 50
def logger_set_level_debug (): logger_set_level('DEBUG' ) # level 10
def logger_set_level_error (): logger_set_level('ERROR' ) # level 40
def logger_set_level_info (): logger_set_level('INFO' ) # level 20
def logger_set_level_warning (): logger_set_level('WARNING' ) # level 30
def lower(target : str):
if target:
return target.lower()
return ""
def obj_data(target=None):
data = {}
for key,value in obj_items(target):
data[key] = value
return data
def obj_dict(target=None):
if target and hasattr(target,'__dict__'):
return target.__dict__
return {}
def obj_items(target=None):
return sorted(list(obj_dict(target).items()))
def obj_keys(target=None):
return sorted(list(obj_dict(target).keys()))
def obj_get_value(target=None, key=None, default=None):
return get_field(target=target, field=key, default=default)
def obj_values(target=None):
return list(obj_dict(target).values())
def size(target=None):
if target and hasattr(target, '__len__'):
return len(target)
return 0
def str_index(target:str, source:str):
try:
return target.index(source)
except:
return -1
def sys_path_python(python_folder='lib/python'):
return list_contains(sys.path, python_folder)
def str_md5(text : str):
if text:
return bytes_md5(text.encode())
return None
def none_or_empty(target,field):
if target and field:
value = target.get(field)
return (value is None) or value == ''
return True
def print_date_now(use_utc=True):
print(date_time_now(use_utc=use_utc))
def print_object_members(target, max_width=120, show_internals=False):
print()
print(f"Members for object: {target}"[:max_width])
print()
print(f"{'field':<20} | value")
print(f"{'-' * max_width}")
for name, val in inspect.getmembers(target):
if not show_internals and name.startswith("__"):
continue
print(f"{name:<20} | {val}"[:max_width])
def print_time_now(use_utc=True):
print(time_now(use_utc=use_utc))
def str_sha256(text: str):
if text:
return bytes_sha256(text.encode())
#return hashlib.sha256('{0}'.format(text).encode()).hexdigest()
return None
def time_delta_to_str(time_delta):
microseconds = time_delta.microseconds
milliseconds = int(microseconds / 1000)
total_seconds = int(time_delta.total_seconds())
return f'{total_seconds}s {milliseconds}ms'
def time_now(use_utc=True, milliseconds_numbers=1):
if use_utc:
datetime_now = datetime.utcnow()
else:
datetime_now = datetime.now()
return time_to_str(datetime_value=datetime_now,milliseconds_numbers=milliseconds_numbers)
def time_to_str(datetime_value, time_format='%H:%M:%S.%f', milliseconds_numbers=3):
time_str = datetime_value.strftime(time_format)
return time_str_milliseconds(datetime_str=time_str, datetime_format=time_format, milliseconds_numbers=milliseconds_numbers)
def timestamp_utc_now():
return int(datetime.utcnow().timestamp() * 1000)
return int(datetime.utcnow().strftime('%s')) * 1000
def timestamp_to_datetime(timestamp):
return datetime.fromtimestamp(timestamp/1000)
def to_string(target):
if target:
return str(target)
return ''
def random_bytes(length=24):
return token_bytes(length)
def random_filename(extension='.tmp', length=10):
from osbot_utils.utils.Files import file_extension_fix
extension = file_extension_fix(extension)
return '{0}{1}'.format(''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) , extension)
def random_port(min=20000,max=65000):
return random_number(min, max)
def random_number(min=1,max=65000):
return random.randint(min, max)
def random_password(length=24, prefix=''):
password = prefix + ''.join(random.choices(string.ascii_lowercase +
string.ascii_uppercase +
string.punctuation +
string.digits ,
k=length))
# replace these chars with _ (to make prevent errors in command prompts and urls)
items = ['"', '\'', '`','\\','/','}','?','#',';',':']
for item in items:
password = password.replace(item, '_')
return password
def random_string(length=8,prefix=''):
return prefix + ''.join(random.choices(string.ascii_uppercase, k=length)).lower()
def random_string_and_numbers(length=6,prefix=''):
return prefix + ''.join(random.choices(string.ascii_uppercase + string.digits, k=length))
def random_text(prefix=None,length=12):
if prefix is None: prefix = 'text_'
if last_letter(prefix) != '_':
prefix += '_'
return random_string_and_numbers(length=length, prefix=prefix)
def random_uuid():
return str(uuid.uuid4())
def remove(target_string, string_to_remove): # todo: refactor to str_*
return replace(target_string, string_to_remove, '')
def remove_multiple_spaces(target): # todo: refactor to str_*
return re.sub(' +', ' ', target)
def replace(target_string, string_to_find, string_to_replace): # todo: refactor to str_*
return target_string.replace(string_to_find, string_to_replace)
def remove_html_tags(html):
if html:
TAG_RE = re.compile(r'<[^>]+>')
return TAG_RE.sub('', html).replace(' ', ' ')
def split_lines(text):
return text.replace('\r\n','\n').split('\n')
def split_spaces(target):
return remove_multiple_spaces(target).split(' ')
def sorted_set(target : object):
if target:
return sorted(set(target))
return []
def str_to_base64(target):
return bytes_to_base64(str_to_bytes(target))
def str_to_bytes(target):
return target.encode()
def str_to_date(str_date, format='%Y-%m-%d %H:%M:%S.%f'):
return datetime.strptime(str_date,format)
def to_int(value, default=0):
try:
return int(value)
except:
return default
def trim(target):
if type(target) is str:
return target.strip()
return ""
def under_debugger():
return 'pydevd' in sys.modules
def unique(target):
return list_set(target)
def url_encode(data):
if type(data) is str:
return quote_plus(data)
def url_decode(data):
if type(data) is str:
return unquote_plus(data)
def upper(target : str):
if target:
return target.upper()
return ""
def wait(seconds):
sleep(seconds)
def word_wrap(text,length = 40):
return '\n'.join(textwrap.wrap(text, length))
def word_wrap_escaped(text,length = 40):
if text:
return '\\n'.join(textwrap.wrap(text, length))
array_find = list_find
array_get = list_get
array_pop = list_pop
array_pop_and_trim = list_pop_and_trim
array_add = list_add
bytes_to_string = bytes_to_str
convert_to_float = convert_to_number
datetime_now = date_time_now
list_contains = list_filter_contains
new_guid = random_uuid
obj_list_set = obj_keys
str_lines = split_lines
str_remove = remove
random_id = random_string
wait_for = wait
| [
"logging.getLogger",
"logging.StreamHandler",
"re.compile",
"base64.b64encode",
"time.sleep",
"random.choices",
"textwrap.wrap",
"base64.decodebytes",
"osbot_utils.fluent.Fluent_Dict.Fluent_Dict",
"urllib.parse.quote_plus",
"inspect.getmembers",
"dotenv.load_dotenv",
"logging.FileHandler",
... | [((738, 770), 'base64.decodebytes', 'base64.decodebytes', (['bytes_base64'], {}), '(bytes_base64)\n', (756, 770), False, 'import base64\n'), ((3201, 3214), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (3212, 3214), False, 'from dotenv import load_dotenv\n'), ((3462, 3481), 'osbot_utils.fluent.Fluent_List.Fluent_List', 'Fluent_List', (['target'], {}), '(target)\n', (3473, 3481), False, 'from osbot_utils.fluent.Fluent_List import Fluent_List\n'), ((4634, 4737), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'ResourceWarning', 'message': '"""unclosed.*<ssl.SSLSocket.*>"""'}), "('ignore', category=ResourceWarning, message=\n 'unclosed.*<ssl.SSLSocket.*>')\n", (4657, 4737), False, 'import warnings\n'), ((5666, 5686), 'osbot_utils.fluent.Fluent_Dict.Fluent_Dict', 'Fluent_Dict', (['results'], {}), '(results)\n', (5677, 5686), False, 'from osbot_utils.fluent.Fluent_Dict import Fluent_Dict\n'), ((7940, 7959), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7957, 7959), False, 'import logging\n'), ((10457, 10483), 'inspect.getmembers', 'inspect.getmembers', (['target'], {}), '(target)\n', (10475, 10483), False, 'import inspect\n'), ((11783, 11823), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(timestamp / 1000)'], {}), '(timestamp / 1000)\n', (11805, 11823), False, 'from datetime import datetime\n'), ((11943, 11962), 'secrets.token_bytes', 'token_bytes', (['length'], {}), '(length)\n', (11954, 11962), False, 'from secrets import token_bytes\n'), ((12089, 12118), 'osbot_utils.utils.Files.file_extension_fix', 'file_extension_fix', (['extension'], {}), '(extension)\n', (12107, 12118), False, 'from osbot_utils.utils.Files import file_extension_fix\n'), ((12356, 12380), 'random.randint', 'random.randint', (['min', 'max'], {}), '(min, max)\n', (12370, 12380), False, 'import random\n'), ((13790, 13815), 're.sub', 're.sub', (['""" +"""', '""" """', 'target'], {}), "(' +', ' ', target)\n", (13796, 13815), False, 'import re\n'), ((14572, 14607), 'datetime.datetime.strptime', 'datetime.strptime', (['str_date', 'format'], {}), '(str_date, format)\n', (14589, 14607), False, 'from datetime import datetime\n'), ((15179, 15193), 'time.sleep', 'sleep', (['seconds'], {}), '(seconds)\n', (15184, 15193), False, 'from time import sleep\n'), ((2111, 2128), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2126, 2128), False, 'from datetime import datetime\n'), ((2155, 2169), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2167, 2169), False, 'from datetime import datetime\n'), ((8086, 8109), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (8107, 8109), False, 'import logging\n'), ((8234, 8261), 'osbot_utils.utils.Files.temp_file', 'temp_file', ([], {'extension': '""".log"""'}), "(extension='.log')\n", (8243, 8261), False, 'from osbot_utils.utils.Files import temp_file\n'), ((8285, 8323), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': 'log_file'}), '(filename=log_file)\n', (8304, 8323), False, 'import logging\n'), ((11173, 11190), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11188, 11190), False, 'from datetime import datetime\n'), ((11224, 11238), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11236, 11238), False, 'from datetime import datetime\n'), ((13519, 13531), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13529, 13531), False, 'import uuid\n'), ((14038, 14059), 're.compile', 're.compile', (['"""<[^>]+>"""'], {}), "('<[^>]+>')\n", (14048, 14059), False, 'import re\n'), ((14970, 14986), 'urllib.parse.quote_plus', 'quote_plus', (['data'], {}), '(data)\n', (14980, 14986), False, 'from urllib.parse import urlencode, quote_plus, unquote_plus\n'), ((15051, 15069), 'urllib.parse.unquote_plus', 'unquote_plus', (['data'], {}), '(data)\n', (15063, 15069), False, 'from urllib.parse import urlencode, quote_plus, unquote_plus\n'), ((15249, 15276), 'textwrap.wrap', 'textwrap.wrap', (['text', 'length'], {}), '(text, length)\n', (15262, 15276), False, 'import textwrap\n'), ((502, 520), 'hashlib.md5', 'hashlib.md5', (['bytes'], {}), '(bytes)\n', (513, 520), False, 'import hashlib\n'), ((578, 599), 'hashlib.sha256', 'hashlib.sha256', (['bytes'], {}), '(bytes)\n', (592, 599), False, 'import hashlib\n'), ((888, 911), 'base64.b64encode', 'base64.b64encode', (['bytes'], {}), '(bytes)\n', (904, 911), False, 'import base64\n'), ((12154, 12218), 'random.choices', 'random.choices', (['(string.ascii_lowercase + string.digits)'], {'k': 'length'}), '(string.ascii_lowercase + string.digits, k=length)\n', (12168, 12218), False, 'import random\n'), ((12457, 12572), 'random.choices', 'random.choices', (['(string.ascii_lowercase + string.ascii_uppercase + string.punctuation +\n string.digits)'], {'k': 'length'}), '(string.ascii_lowercase + string.ascii_uppercase + string.\n punctuation + string.digits, k=length)\n', (12471, 12572), False, 'import random\n'), ((13213, 13277), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': 'length'}), '(string.ascii_uppercase + string.digits, k=length)\n', (13227, 13277), False, 'import random\n'), ((15359, 15386), 'textwrap.wrap', 'textwrap.wrap', (['text', 'length'], {}), '(text, length)\n', (15372, 15386), False, 'import textwrap\n'), ((1676, 1704), 're.sub', 're.sub', (['"""[^\\\\d.]"""', '""""""', 'value'], {}), "('[^\\\\d.]', '', value)\n", (1682, 1704), False, 'import re\n'), ((11639, 11656), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11654, 11656), False, 'from datetime import datetime\n'), ((11692, 11709), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11707, 11709), False, 'from datetime import datetime\n'), ((13075, 13123), 'random.choices', 'random.choices', (['string.ascii_uppercase'], {'k': 'length'}), '(string.ascii_uppercase, k=length)\n', (13089, 13123), False, 'import random\n')] |
#coding=utf-8
'''
Created on 2015-9-24
@author: Devuser
'''
from doraemon.project.models import Project
class ProjectLeftNavBar(object):
'''
classdocs
'''
def __init__(self,request,projectid):
self.request=request
# self.dashboard_href='/project/'+str(projectid)+'/dashboard'
self.task_href='/project/'+str(projectid)+'/task/'
self.settings_href='/project/'+str(projectid)+'/settings/basic'
self.fortesting_href='/project/'+str(projectid)+'/fortesting'
self.version_href='/project/'+str(projectid)+'/version'
self.archive_href='/project/'+str(projectid)+'/archive/all'
self.issue_href='/project/'+str(projectid)+'/issue/all'
self.statistics_href='/project/'+str(projectid)+'/statistics/all'
self.project=Project.objects.get(projectid)
class menuitem(object):
def __init__(self,namevalue,keyvalue):
self.key=keyvalue
self.name=namevalue
def get_name(self):
return self.name
def get_key(self):
return self.key
def get_id(self):
return "123456"
class ProjectTaskLeftNavBar(ProjectLeftNavBar):
'''
classdocs
'''
def __init__(self,request,projectid,**args):
ProjectLeftNavBar.__init__(self,request,projectid)
self.request=request
self.task_href='/project/'+str(projectid)+'/task/'+args['sub_nav_action']
self.task_active="leftmeunactive"
self.custom_menu_list=list()
class ProjectDashboardLeftNavBar(ProjectLeftNavBar):
'''
classdocs
'''
def __init__(self,request,projectid,**args):
ProjectLeftNavBar.__init__(self,request,projectid)
self.request=request
self.dashboard_active="leftmeunactive"
self.custom_menu_list=list()
class ProjectSettingsLeftNavBar(ProjectLeftNavBar):
'''
classdocs
'''
def __init__(self,request,projectid,**args):
ProjectLeftNavBar.__init__(self,request,projectid)
self.request=request
self.settings_href='/project/'+str(projectid)+'/settings/'+args['sub_nav_action']
self.settings_active="leftmeunactive"
self.custom_menu_list=list()
class ProjectForTestingLeftNavBar(ProjectLeftNavBar):
'''
classdocs
'''
def __init__(self,request,projectid,**args):
ProjectLeftNavBar.__init__(self,request,projectid)
self.request=request
self.fortesting_href='/project/'+str(projectid)+'/fortesting/'+args['sub_nav_action']
self.fortesting_active="leftmeunactive"
self.custom_menu_list=list()
class ProjectArchiveLeftNavBar(ProjectLeftNavBar):
'''
classdocs
'''
def __init__(self,request,projectid,**args):
ProjectLeftNavBar.__init__(self,request,projectid)
self.request=request
self.archive_href='/project/'+str(projectid)+'/archive/'+args['sub_nav_action']
self.archive_active="leftmeunactive"
self.custom_menu_list=list()
class ProjectIssueLeftNavBar(ProjectLeftNavBar):
'''
classdocs
'''
def __init__(self,request,projectid,**args):
ProjectLeftNavBar.__init__(self,request,projectid)
self.request=request
self.issue_href='/project/'+str(projectid)+'/issue/'+args['sub_nav_action']
self.issue_active="leftmeunactive"
self.custom_menu_list=list()
class ProjectStatisticsLeftNavBar(ProjectLeftNavBar):
'''
classdocs
'''
def __init__(self,request,projectid,**args):
ProjectLeftNavBar.__init__(self,request,projectid)
self.request=request
self.statistics_href='/project/'+str(projectid)+'/statistics/'+args['sub_nav_action']
self.statistics_active="leftmeunactive"
self.custom_menu_list=list()
class ProjectVersionLeftNavBar(ProjectLeftNavBar):
'''
classdocs
'''
def __init__(self,request,projectid,**args):
ProjectLeftNavBar.__init__(self,request,projectid)
self.request=request
self.version_href='/project/'+str(projectid)+'/version'
self.version_active="leftmeunactive"
self.custom_menu_list=list()
| [
"doraemon.project.models.Project.objects.get"
] | [((803, 833), 'doraemon.project.models.Project.objects.get', 'Project.objects.get', (['projectid'], {}), '(projectid)\n', (822, 833), False, 'from doraemon.project.models import Project\n')] |
#By @Joel_Noob
from pyrogram import Client, filters
from config import Config
@Client.on_message(
filters.private
& filters.command("broadcast")
& filters.user(Config.ADMINS)
& filters.reply
)
async def broadcast_(c, m):
await c.start_broadcast(
broadcast_message=m.reply_to_message, admin_id=m.from_user.id
)
| [
"pyrogram.filters.command",
"pyrogram.filters.user"
] | [((162, 189), 'pyrogram.filters.user', 'filters.user', (['Config.ADMINS'], {}), '(Config.ADMINS)\n', (174, 189), False, 'from pyrogram import Client, filters\n'), ((127, 155), 'pyrogram.filters.command', 'filters.command', (['"""broadcast"""'], {}), "('broadcast')\n", (142, 155), False, 'from pyrogram import Client, filters\n')] |
import numpy as np
import theano
import theano.tensor as tensor
from utils import _p, numpy_floatX
from utils import ortho_weight, uniform_weight, zero_bias
""" Encoder using LSTM Recurrent Neural Network. """
def param_init_encoder(options, params, prefix='lstm_encoder'):
n_x = options['n_x']
n_h = options['n_h']
W = np.concatenate([uniform_weight(n_x,n_h),
uniform_weight(n_x,n_h),
uniform_weight(n_x,n_h),
uniform_weight(n_x,n_h)], axis=1)
params[_p(prefix, 'W')] = W
U = np.concatenate([ortho_weight(n_h),
ortho_weight(n_h),
ortho_weight(n_h),
ortho_weight(n_h)], axis=1)
params[_p(prefix, 'U')] = U
params[_p(prefix,'b')] = zero_bias(4*n_h)
# It is observed that setting a high initial forget gate bias for LSTMs can
# give slighly better results (Le et al., 2015). Hence, the initial forget
# gate bias is set to 3.
params[_p(prefix, 'b')][n_h:2*n_h] = 3*np.ones((n_h,)).astype(theano.config.floatX)
return params
def encoder(tparams, state_below, mask, seq_output=False, prefix='lstm_encoder'):
""" state_below: size of n_steps * n_samples * n_x
"""
n_steps = state_below.shape[0]
n_samples = state_below.shape[1]
n_h = tparams[_p(prefix,'U')].shape[0]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
def _step(m_, x_, h_, c_, U):
preact = tensor.dot(h_, U)
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, n_h))
f = tensor.nnet.sigmoid(_slice(preact, 1, n_h))
o = tensor.nnet.sigmoid(_slice(preact, 2, n_h))
c = tensor.tanh(_slice(preact, 3, n_h))
c = f * c_ + i * c
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * tensor.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
seqs = [mask, state_below_]
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=[tensor.alloc(numpy_floatX(0.),
n_samples,n_h),
tensor.alloc(numpy_floatX(0.),
n_samples,n_h)],
non_sequences = [tparams[_p(prefix, 'U')]],
name=_p(prefix, '_layers'),
n_steps=n_steps,
strict=True)
h_rval = rval[0]
if seq_output:
return h_rval
else:
# size of n_samples * n_h
return h_rval[-1]
| [
"utils.zero_bias",
"utils.ortho_weight",
"numpy.ones",
"utils.uniform_weight",
"utils._p",
"theano.tensor.tanh",
"utils.numpy_floatX",
"theano.tensor.dot"
] | [((826, 844), 'utils.zero_bias', 'zero_bias', (['(4 * n_h)'], {}), '(4 * n_h)\n', (835, 844), False, 'from utils import ortho_weight, uniform_weight, zero_bias\n'), ((553, 568), 'utils._p', '_p', (['prefix', '"""W"""'], {}), "(prefix, 'W')\n", (555, 568), False, 'from utils import _p, numpy_floatX\n'), ((771, 786), 'utils._p', '_p', (['prefix', '"""U"""'], {}), "(prefix, 'U')\n", (773, 786), False, 'from utils import _p, numpy_floatX\n'), ((808, 823), 'utils._p', '_p', (['prefix', '"""b"""'], {}), "(prefix, 'b')\n", (810, 823), False, 'from utils import _p, numpy_floatX\n'), ((1725, 1742), 'theano.tensor.dot', 'tensor.dot', (['h_', 'U'], {}), '(h_, U)\n', (1735, 1742), True, 'import theano.tensor as tensor\n'), ((361, 385), 'utils.uniform_weight', 'uniform_weight', (['n_x', 'n_h'], {}), '(n_x, n_h)\n', (375, 385), False, 'from utils import ortho_weight, uniform_weight, zero_bias\n'), ((410, 434), 'utils.uniform_weight', 'uniform_weight', (['n_x', 'n_h'], {}), '(n_x, n_h)\n', (424, 434), False, 'from utils import ortho_weight, uniform_weight, zero_bias\n'), ((459, 483), 'utils.uniform_weight', 'uniform_weight', (['n_x', 'n_h'], {}), '(n_x, n_h)\n', (473, 483), False, 'from utils import ortho_weight, uniform_weight, zero_bias\n'), ((508, 532), 'utils.uniform_weight', 'uniform_weight', (['n_x', 'n_h'], {}), '(n_x, n_h)\n', (522, 532), False, 'from utils import ortho_weight, uniform_weight, zero_bias\n'), ((603, 620), 'utils.ortho_weight', 'ortho_weight', (['n_h'], {}), '(n_h)\n', (615, 620), False, 'from utils import ortho_weight, uniform_weight, zero_bias\n'), ((646, 663), 'utils.ortho_weight', 'ortho_weight', (['n_h'], {}), '(n_h)\n', (658, 663), False, 'from utils import ortho_weight, uniform_weight, zero_bias\n'), ((689, 706), 'utils.ortho_weight', 'ortho_weight', (['n_h'], {}), '(n_h)\n', (701, 706), False, 'from utils import ortho_weight, uniform_weight, zero_bias\n'), ((732, 749), 'utils.ortho_weight', 'ortho_weight', (['n_h'], {}), '(n_h)\n', (744, 749), False, 'from utils import ortho_weight, uniform_weight, zero_bias\n'), ((1048, 1063), 'utils._p', '_p', (['prefix', '"""b"""'], {}), "(prefix, 'b')\n", (1050, 1063), False, 'from utils import _p, numpy_floatX\n'), ((1656, 1671), 'utils._p', '_p', (['prefix', '"""b"""'], {}), "(prefix, 'b')\n", (1658, 1671), False, 'from utils import _p, numpy_floatX\n'), ((2088, 2102), 'theano.tensor.tanh', 'tensor.tanh', (['c'], {}), '(c)\n', (2099, 2102), True, 'import theano.tensor as tensor\n'), ((2703, 2724), 'utils._p', '_p', (['prefix', '"""_layers"""'], {}), "(prefix, '_layers')\n", (2705, 2724), False, 'from utils import _p, numpy_floatX\n'), ((1080, 1095), 'numpy.ones', 'np.ones', (['(n_h,)'], {}), '((n_h,))\n', (1087, 1095), True, 'import numpy as np\n'), ((1392, 1407), 'utils._p', '_p', (['prefix', '"""U"""'], {}), "(prefix, 'U')\n", (1394, 1407), False, 'from utils import _p, numpy_floatX\n'), ((1606, 1621), 'utils._p', '_p', (['prefix', '"""W"""'], {}), "(prefix, 'W')\n", (1608, 1621), False, 'from utils import _p, numpy_floatX\n'), ((2358, 2375), 'utils.numpy_floatX', 'numpy_floatX', (['(0.0)'], {}), '(0.0)\n', (2370, 2375), False, 'from utils import _p, numpy_floatX\n'), ((2503, 2520), 'utils.numpy_floatX', 'numpy_floatX', (['(0.0)'], {}), '(0.0)\n', (2515, 2520), False, 'from utils import _p, numpy_floatX\n'), ((2647, 2662), 'utils._p', '_p', (['prefix', '"""U"""'], {}), "(prefix, 'U')\n", (2649, 2662), False, 'from utils import _p, numpy_floatX\n')] |
# coding=utf-8
from plugins.auth.crypto.algo_base import BaseAlgorithm
from pbkdf2 import crypt
__author__ = '<NAME>'
class PBKDF2Algo(BaseAlgorithm):
def check(self, hash, value, salt=None):
return hash == self.hash(value, hash)
def hash(self, value, salt=None):
return crypt(
value, salt=salt, iterations=400
)
def gen_salt(self):
return None # It's in the hash
| [
"pbkdf2.crypt"
] | [((301, 340), 'pbkdf2.crypt', 'crypt', (['value'], {'salt': 'salt', 'iterations': '(400)'}), '(value, salt=salt, iterations=400)\n', (306, 340), False, 'from pbkdf2 import crypt\n')] |
import numpy as np
import statistics as stat
treshold = 3
def removeFalseData(input_data):
while(1):
try:
input_data.remove(False)
except ValueError:
break
return(input_data)
def removeNegatives(input_data):
processed_data = []
for val in input_data:
if val > 0:
processed_data.append(val)
return processed_data
def removeOutliers(input_data):
#dataset = removeNegatives(input_data)
processed_data = []
med = stat.median(input_data)
avg = np.mean(input_data)
stdev = np.std(input_data)
for val in input_data:
score = abs(val-med)
if score < 500:
processed_data.append(val)
# z_score = abs(((val) - avg)/stdev)
# if z_score < treshold:
# processed_data.append(val)
return processed_data
def filterData(input_data):
#print(input_data)
dataset = removeFalseData(input_data)
dataset = removeOutliers(dataset)
#print('filtered')
#print(dataset)
return np.mean(dataset)
| [
"statistics.median",
"numpy.mean",
"numpy.std"
] | [((528, 551), 'statistics.median', 'stat.median', (['input_data'], {}), '(input_data)\n', (539, 551), True, 'import statistics as stat\n'), ((563, 582), 'numpy.mean', 'np.mean', (['input_data'], {}), '(input_data)\n', (570, 582), True, 'import numpy as np\n'), ((596, 614), 'numpy.std', 'np.std', (['input_data'], {}), '(input_data)\n', (602, 614), True, 'import numpy as np\n'), ((1078, 1094), 'numpy.mean', 'np.mean', (['dataset'], {}), '(dataset)\n', (1085, 1094), True, 'import numpy as np\n')] |
import encoding
from torch import nn
import torch
class FCN(nn.Module):
"""
wrapper around encnet module, putting it into terms similar to psp/psanet for convenience
"""
def __init__(self, pretrained=True):
super(FCN, self).__init__()
self.model = encoding.models.get_model('FCN_ResNet50s_ADE', pretrained=pretrained)
def forward(self, x, y=None):
outs = self.model.forward(x)
seg, aux = outs
return seg, aux, None
if __name__ == "__main__":
model = FCN(pretrained=True).to("cuda")
x = torch.rand(size=(1, 3, 473, 473)).to("cuda")
model.forward(x)
print("Done!") | [
"encoding.models.get_model",
"torch.rand"
] | [((281, 350), 'encoding.models.get_model', 'encoding.models.get_model', (['"""FCN_ResNet50s_ADE"""'], {'pretrained': 'pretrained'}), "('FCN_ResNet50s_ADE', pretrained=pretrained)\n", (306, 350), False, 'import encoding\n'), ((558, 591), 'torch.rand', 'torch.rand', ([], {'size': '(1, 3, 473, 473)'}), '(size=(1, 3, 473, 473))\n', (568, 591), False, 'import torch\n')] |
# Для решения этой задачи необходимо установить библиотеку openpyxl и создать скрипт со следующем содержанием:
# Запустите скрипт и в качестве ответа введите то, что он выведет.
import openpyxl
wb = openpyxl.load_workbook('inputs/input12.xlsx')
sh = wb.active
nmin = sh.cell(row=7, column=2).value
for rownum in range(8, 28):
nmin = min(nmin, sh.cell(row=rownum, column=2).value)
print(nmin)
| [
"openpyxl.load_workbook"
] | [((201, 246), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['"""inputs/input12.xlsx"""'], {}), "('inputs/input12.xlsx')\n", (223, 246), False, 'import openpyxl\n')] |
from os.path import join as pjoin
from .utils import save_dict_to_json_file, load_dict_from_json_file
class Status(object):
def __init__(self, trainer=None, starting_epoch=0, starting_update=0):
self.current_epoch = starting_epoch
self.current_update = starting_update
self.current_update_in_epoch = 1
self.trainer = trainer
self.training_time = 0
self.done = False
self.extra = {}
def increment_update(self):
self.current_update += 1
self.current_update_in_epoch += 1
def increment_epoch(self):
self.current_epoch += 1
self.current_update_in_epoch = 0
def __repr__(self):
return ('Status object with state :\n' +\
' current_epoch = {!r}\n' +\
' current_update = {!r}\n' +\
' current_update_in_epoch = {!r}\n' +\
' trainer = {!r}\n' +\
' done = {!r}\n' +\
' extra = {!r}\n').format(self.current_epoch, self.current_update, self.current_update_in_epoch,
self.trainer, self.training_time, self.done, self.extra)
def save(self, savedir="./"):
state = {"version": 1,
"current_epoch": self.current_epoch,
"current_update": self.current_update,
"current_update_in_epoch": self.current_update_in_epoch,
"training_time": self.training_time,
"done": self.done,
"extra": self.extra,
}
save_dict_to_json_file(pjoin(savedir, 'status.json'), state)
def load(self, loaddir="./"):
state = load_dict_from_json_file(pjoin(loaddir, 'status.json'))
self.current_epoch = state["current_epoch"]
self.current_update = state["current_update"]
self.current_update_in_epoch = state["current_update_in_epoch"]
self.training_time = state["training_time"]
self.done = state["done"]
self.extra = state["extra"]
| [
"os.path.join"
] | [((1604, 1633), 'os.path.join', 'pjoin', (['savedir', '"""status.json"""'], {}), "(savedir, 'status.json')\n", (1609, 1633), True, 'from os.path import join as pjoin\n'), ((1718, 1747), 'os.path.join', 'pjoin', (['loaddir', '"""status.json"""'], {}), "(loaddir, 'status.json')\n", (1723, 1747), True, 'from os.path import join as pjoin\n')] |
"""URL Configuration"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.views.generic import TemplateView
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(('apps.users.urls', 'users'), namespace='users')),
path('', include(('apps.import_excel.urls', 'import_excel'), namespace='import_excel')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
| [
"django.conf.urls.static.static",
"django.urls.path",
"django.urls.include"
] | [((451, 512), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (457, 512), False, 'from django.conf.urls.static import static\n'), ((242, 273), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (246, 273), False, 'from django.urls import path, include\n'), ((288, 344), 'django.urls.include', 'include', (["('apps.users.urls', 'users')"], {'namespace': '"""users"""'}), "(('apps.users.urls', 'users'), namespace='users')\n", (295, 344), False, 'from django.urls import path, include\n'), ((364, 441), 'django.urls.include', 'include', (["('apps.import_excel.urls', 'import_excel')"], {'namespace': '"""import_excel"""'}), "(('apps.import_excel.urls', 'import_excel'), namespace='import_excel')\n", (371, 441), False, 'from django.urls import path, include\n'), ((608, 635), 'django.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (615, 635), False, 'from django.urls import path, include\n')] |
'''We shall say that an n-digit number is pandigital if it makes use of all the
digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is
also prime. What is the largest n-digit pandigital prime that exists?'''
import itertools
import functools
def pandigital_generator(n):
perms = itertools.permutations(range(1,n+1))
for perm in perms:
yield functools.reduce(lambda x,y: x*10 + y, perm)
def is_prime(n):
for x in range(2,int(n**.5)+1):
if n%x == 0:
return False
return True
def solve_p041():
for n in range(9,0, -1):
big_pandigitals = sorted(pandigital_generator(n), reverse=True)
for pand in big_pandigitals:
if is_prime(pand):
return pand
| [
"functools.reduce"
] | [((382, 429), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x * 10 + y)', 'perm'], {}), '(lambda x, y: x * 10 + y, perm)\n', (398, 429), False, 'import functools\n')] |
"""
Client creation action.
~~~~~~~~~~~~~~~~~~~~~~~
"""
from .action import Action
from .action import ActionExecutionException
from .action import InvalidActionConfigurationException
from .utils import get_user_roles
from .utils import InvalidUserResponse
from .utils import process_user_roles
import requests
import urllib
class CreateClientAction(Action):
@staticmethod
def valid_deploy_env(deploy_env):
"""
Returns True if the provided deployment environment is valid for this action, False otherwise
:param deploy_env: The target deployment environment.
:return: True always, as this action is valid for all environments.
"""
return True
def __init__(self, name, config_file_dir, action_config_json, *args, **kwargs):
"""
Constructor.
:param name: The action name.
:param config_file_dir: The directory containing the configuration file
:param action_config_json: The JSON configuration for this action
"""
super(CreateClientAction, self).__init__(name, *args, **kwargs)
self.action_config_json = action_config_json
if 'realmName' not in action_config_json:
raise InvalidActionConfigurationException('Configuration "{0}" missing property "realmName"'.format(name))
self.realm_name = action_config_json['realmName']
if 'client' not in action_config_json:
raise InvalidActionConfigurationException('Configuration "{0}" missing property "client"'.format(name))
self.client_data = action_config_json['client']
self.client_id = self.client_data.get('clientId', None)
if not self.client_id:
raise InvalidActionConfigurationException('Client configuration for "{0}" missing property "clientId"'.format(name))
def execute(self, keycloak_client):
"""
Execute this action. In this case, attempt to create a client.
:param keycloak_client: The client to use when interacting with Keycloak
"""
# Process the client data.
print('==== Creating client "{0}" in realm "{1}"...'.format(self.client_id, self.realm_name))
existing_client_data = self.get_client_by_client_id(self.realm_name, self.client_id, keycloak_client)
if not existing_client_data:
print('==== Client "{0}" does not exist, creating...'.format(self.client_id))
client_creation_path = '/admin/realms/{0}/clients'.format(urllib.parse.quote(self.realm_name))
create_response = keycloak_client.post(client_creation_path, json=self.client_data)
if create_response.status_code == requests.codes.created:
print('==== Client "{0}" created.'.format(self.client_id))
existing_client_data = self.get_client_by_client_id(self.realm_name, self.client_id, keycloak_client)
client_uuid = existing_client_data['id']
else:
raise ActionExecutionException('Unexpected response for client creation request ({0})'.format(create_response.status_code))
else:
print('==== Client "{0}" exists, updating...'.format(self.client_id))
client_uuid = existing_client_data['id']
client_update_path = '/admin/realms/{0}/clients/{1}'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid)
)
update_response = keycloak_client.put(client_update_path, json=self.client_data)
if update_response.status_code == requests.codes.no_content:
print('==== Client "{0}" updated.'.format(self.client_id))
else:
raise ActionExecutionException('Unexpected response for client update request ({0})'.format(update_response.status_code))
# Now update the secret.
if 'secret' in self.client_data:
print('==== NOT updating client "{0}" secret, as it is currently broken...'.format(self.client_id))
# NOTE: the following code is disabled because it requires a custom keycloak extension, which is not currently working
if False and 'secret' in self.client_data:
print('==== Updating client "{0}" secret...'.format(self.client_id))
client_secret_update_path = '/realms/{0}/clients-custom/{1}/client-secret'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid)
)
client_secret_update_response = keycloak_client.put(
client_secret_update_path, json={'secret': self.client_data['secret']}
)
if client_secret_update_response.status_code == requests.codes.no_content:
print('==== Client "{0}" secret updated.'.format(self.client_id))
else:
raise ActionExecutionException('Unexpected response for client secret update request ({0})'.format(
client_secret_update_response.status_code
))
# We always need to process mappers, as Keycloak adds default mappers on client creation calls.
self.update_protocol_mappers(existing_client_data, keycloak_client)
# Process the service account roles.
self.process_service_account_roles(client_uuid, self.action_config_json.get('roles', []), keycloak_client)
def update_protocol_mappers(self, existing_client_data, keycloak_client):
"""
Update the protocol mappers for the client.
:param existing_client_data: The existing client data
:param keycloak_client: The client to use when interacting with Keycloak
"""
print('==== Processing client "{0}" protocol mappers...'.format(self.client_id))
client_uuid = existing_client_data['id']
existing_mappers = existing_client_data['protocolMappers']
new_mappers = self.client_data['protocolMappers']
# Mapper names are unique, so we can use that field to see what needs to be updated, created, or deleted.
existing_mappers_by_name = self.mapper_list_to_map_by_name(existing_mappers)
new_mappers_by_name = self.mapper_list_to_map_by_name(new_mappers)
# See what needs to be created or updated.
for name, config in new_mappers_by_name.items():
if name in existing_mappers_by_name:
self.update_protocol_mapper(client_uuid, existing_mappers_by_name[name]['id'], config, keycloak_client)
else:
self.create_protocol_mapper(client_uuid, config, keycloak_client)
# See what needs to be deleted.
for name, config in existing_mappers_by_name.items():
if name not in new_mappers_by_name:
self.delete_protocol_mapper(client_uuid, existing_mappers_by_name[name]['id'], name, keycloak_client)
print('==== Processed client "{0}" protocol mappers.'.format(self.client_id))
@staticmethod
def mapper_list_to_map_by_name(mapper_list):
"""
Convert a list of protocol mappers to a map of mappers by keyed by name.
:param mapper_list: The list to convert
:return: The resulting map
"""
by_name = {}
for mapper in mapper_list:
by_name[mapper['name']] = mapper
return by_name
def update_protocol_mapper(self, client_uuid, mapper_id, mapper_config, keycloak_client):
"""
Update a protocol mapper.
:param client_uuid: The UUID of the client
:param mapper_id: the UUID of the mapper
:param mapper_config: The mapper config to use in the update request
:param keycloak_client: The client to use when interacting with Keycloak
"""
print('==== Updating client "{0}" protocol mapper "{1}".'.format(self.client_id, mapper_config['name']))
path = '/admin/realms/{0}/clients/{1}/protocol-mappers/models/{2}'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid),
urllib.parse.quote(mapper_id)
)
mapper_config['id'] = mapper_id
update_response = keycloak_client.put(path, json=mapper_config)
if update_response.status_code != requests.codes.no_content:
raise ActionExecutionException('Unexpected response for client protocol mapper update request ({0})'.format(update_response.status_code))
def create_protocol_mapper(self, client_uuid, mapper_config, keycloak_client):
"""
Create a protocol mapper.
:param client_uuid: The UUID of the client
:param mapper_config: The mapper config to use in the create request
:param keycloak_client: The client to use when interacting with Keycloak.
:return:
"""
print('==== Creating client "{0}" protocol mapper "{1}".'.format(self.client_id, mapper_config['name']))
path = '/admin/realms/{0}/clients/{1}/protocol-mappers/models'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid)
)
create_response = keycloak_client.post(path, json=mapper_config)
if create_response.status_code != requests.codes.created:
raise ActionExecutionException('Unexpected response for client protocol mapper create request ({0})'.format(create_response.status_code))
def delete_protocol_mapper(self, client_uuid, mapper_id, mapper_name, keycloak_client):
"""
Delete a protocol mapper.
:param client_uuid: The UUID of the client
:param mapper_id: the UUID of the mapper
:param mapper_name: The name of the mapper
:param keycloak_client: The client to use when interacting with Keycloak
"""
print('==== Deleting client "{0}" protocol mapper "{1}".'.format(self.client_id, mapper_name))
path = '/admin/realms/{0}/clients/{1}/protocol-mappers/models/{2}'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid),
urllib.parse.quote(mapper_id)
)
delete_response = keycloak_client.delete(path)
if delete_response.status_code != requests.codes.no_content:
raise ActionExecutionException('Unexpected response for client protocol mapper delete request ({0})'.format(delete_response.status_code))
def get_service_account_user(self, client_uuid, keycloak_client):
"""
Get the service account user for the client.
:param client_uuid: The client UUID
:param keycloak_client: The client to use when interacting with Keycloak
:return: The service account user configuration
"""
path = '/admin/realms/{0}/clients/{1}/service-account-user'.format(self.realm_name, client_uuid)
get_response = keycloak_client.get(path)
if get_response.status_code == requests.codes.ok:
return get_response.json()
if get_response.status_code == requests.codes.not_found:
return None
raise InvalidUserResponse('Unexpected user get response ({0})'.format(get_response.status_code))
def process_service_account_roles(self, client_uuid, service_account_roles, keycloak_client):
"""
Process the service account roles for the client.
:param client_uuid: The client UUID
:param service_account_roles: The roles to assign to the service account
:param keycloak_client: The client to use when interacting with Keycloak
"""
print('==== Processing client "{0}" service account roles...'.format(self.client_id))
user_config = self.get_service_account_user(client_uuid, keycloak_client)
if not user_config and len(service_account_roles) > 0:
raise ActionExecutionException('No service account user found for client "{0}"'.format(self.client_id))
user_id = user_config['id']
existing_roles = get_user_roles(self.realm_name, user_id, keycloak_client)
process_user_roles(self.realm_name, user_id, existing_roles, service_account_roles, keycloak_client)
print('==== Processed client "{0}" service account roles.'.format(self.client_id))
| [
"urllib.parse.quote"
] | [((8061, 8096), 'urllib.parse.quote', 'urllib.parse.quote', (['self.realm_name'], {}), '(self.realm_name)\n', (8079, 8096), False, 'import urllib\n'), ((8114, 8145), 'urllib.parse.quote', 'urllib.parse.quote', (['client_uuid'], {}), '(client_uuid)\n', (8132, 8145), False, 'import urllib\n'), ((8163, 8192), 'urllib.parse.quote', 'urllib.parse.quote', (['mapper_id'], {}), '(mapper_id)\n', (8181, 8192), False, 'import urllib\n'), ((9113, 9148), 'urllib.parse.quote', 'urllib.parse.quote', (['self.realm_name'], {}), '(self.realm_name)\n', (9131, 9148), False, 'import urllib\n'), ((9166, 9197), 'urllib.parse.quote', 'urllib.parse.quote', (['client_uuid'], {}), '(client_uuid)\n', (9184, 9197), False, 'import urllib\n'), ((10084, 10119), 'urllib.parse.quote', 'urllib.parse.quote', (['self.realm_name'], {}), '(self.realm_name)\n', (10102, 10119), False, 'import urllib\n'), ((10137, 10168), 'urllib.parse.quote', 'urllib.parse.quote', (['client_uuid'], {}), '(client_uuid)\n', (10155, 10168), False, 'import urllib\n'), ((10186, 10215), 'urllib.parse.quote', 'urllib.parse.quote', (['mapper_id'], {}), '(mapper_id)\n', (10204, 10215), False, 'import urllib\n'), ((2491, 2526), 'urllib.parse.quote', 'urllib.parse.quote', (['self.realm_name'], {}), '(self.realm_name)\n', (2509, 2526), False, 'import urllib\n'), ((3344, 3379), 'urllib.parse.quote', 'urllib.parse.quote', (['self.realm_name'], {}), '(self.realm_name)\n', (3362, 3379), False, 'import urllib\n'), ((3401, 3432), 'urllib.parse.quote', 'urllib.parse.quote', (['client_uuid'], {}), '(client_uuid)\n', (3419, 3432), False, 'import urllib\n'), ((4438, 4473), 'urllib.parse.quote', 'urllib.parse.quote', (['self.realm_name'], {}), '(self.realm_name)\n', (4456, 4473), False, 'import urllib\n'), ((4499, 4530), 'urllib.parse.quote', 'urllib.parse.quote', (['client_uuid'], {}), '(client_uuid)\n', (4517, 4530), False, 'import urllib\n')] |
from yuuhpizzakebab import app, admin_required, login_required
from .models import Pizza, Topping
from flask import render_template, session, redirect, url_for, request, flash
@app.route('/pizzas')
def list_pizzas():
"""Shows a list of pizzas."""
return render_template('pizza/pizzas.html',
pizzas=Pizza.get_all(),
selecting=request.args.get('selecting'))
@app.route('/pizza/create', methods=['GET', 'POST'])
@admin_required
def create_pizza():
"""Creates a new pizza.
Creates a new pizza with POST and associated any selected toppings with it.
Shows a form to fill with GET.
"""
if request.method == 'POST':
name = request.form['pizza_name']
price = request.form['pizza_price']
image_url = request.form['pizza_image_url']
selected_toppings = request.form.getlist('toppings')
p = Pizza(None, name, price, image_url, [])
success = p.save()
if not success:
flash('Some fields need to be filled', 'alert-danger')
return render_template('pizza/edit_pizza.html',
pizza=pizza,
available_toppings=Topping.get_all())
for t in selected_toppings:
topping_id = int(t)
p.add_topping(topping_id)
return redirect(url_for('list_pizzas'))
return render_template('pizza/edit_pizza.html',
available_toppings=Topping.get_all())
@app.route('/pizza/edit/<int:pizza_id>', methods=['GET', 'POST'])
@admin_required
def edit_pizza(pizza_id):
"""Edits a pizza.
arguments:
pizza_id -- id of the pizza
Saves the information with POST.
Shows a form to edit the contents with GET.
"""
if request.method == 'POST':
name = request.form['pizza_name']
price = request.form['pizza_price']
image_url = request.form['pizza_image_url']
selected_toppings = request.form.getlist('toppings')
p = Pizza(pizza_id, name, price, image_url, [])
success = p.save()
if not success:
flash('Some fields need to be filled', 'alert-danger')
return render_template('pizza/edit_pizza.html',
pizza=p,
available_toppings=Topping.get_all())
p.remove_toppings()
for t in selected_toppings:
topping_id = int(t)
p.add_topping(topping_id)
return redirect(url_for('list_pizzas'))
pizza = Pizza.get_by_id(pizza_id)
if not pizza:
return redirect(url_for('list_pizzas'))
return render_template('pizza/edit_pizza.html',
pizza=pizza,
available_toppings=Topping.get_all())
@app.route('/pizza/delete/<int:pizza_id>')
@admin_required
def delete_pizza(pizza_id):
"""Deletes a pizza.
arguments:
pizza_id -- id of the pizza
"""
Pizza.delete_by_id(pizza_id)
flash('Removed pizza', 'alert-success')
return redirect(url_for('list_pizzas'))
| [
"flask.request.args.get",
"flask.flash",
"yuuhpizzakebab.app.route",
"flask.request.form.getlist",
"flask.url_for"
] | [((180, 200), 'yuuhpizzakebab.app.route', 'app.route', (['"""/pizzas"""'], {}), "('/pizzas')\n", (189, 200), False, 'from yuuhpizzakebab import app, admin_required, login_required\n'), ((424, 475), 'yuuhpizzakebab.app.route', 'app.route', (['"""/pizza/create"""'], {'methods': "['GET', 'POST']"}), "('/pizza/create', methods=['GET', 'POST'])\n", (433, 475), False, 'from yuuhpizzakebab import app, admin_required, login_required\n'), ((1527, 1591), 'yuuhpizzakebab.app.route', 'app.route', (['"""/pizza/edit/<int:pizza_id>"""'], {'methods': "['GET', 'POST']"}), "('/pizza/edit/<int:pizza_id>', methods=['GET', 'POST'])\n", (1536, 1591), False, 'from yuuhpizzakebab import app, admin_required, login_required\n'), ((2836, 2877), 'yuuhpizzakebab.app.route', 'app.route', (['"""/pizza/delete/<int:pizza_id>"""'], {}), "('/pizza/delete/<int:pizza_id>')\n", (2845, 2877), False, 'from yuuhpizzakebab import app, admin_required, login_required\n'), ((3039, 3078), 'flask.flash', 'flash', (['"""Removed pizza"""', '"""alert-success"""'], {}), "('Removed pizza', 'alert-success')\n", (3044, 3078), False, 'from flask import render_template, session, redirect, url_for, request, flash\n'), ((864, 896), 'flask.request.form.getlist', 'request.form.getlist', (['"""toppings"""'], {}), "('toppings')\n", (884, 896), False, 'from flask import render_template, session, redirect, url_for, request, flash\n'), ((1998, 2030), 'flask.request.form.getlist', 'request.form.getlist', (['"""toppings"""'], {}), "('toppings')\n", (2018, 2030), False, 'from flask import render_template, session, redirect, url_for, request, flash\n'), ((3100, 3122), 'flask.url_for', 'url_for', (['"""list_pizzas"""'], {}), "('list_pizzas')\n", (3107, 3122), False, 'from flask import render_template, session, redirect, url_for, request, flash\n'), ((390, 419), 'flask.request.args.get', 'request.args.get', (['"""selecting"""'], {}), "('selecting')\n", (406, 419), False, 'from flask import render_template, session, redirect, url_for, request, flash\n'), ((1014, 1068), 'flask.flash', 'flash', (['"""Some fields need to be filled"""', '"""alert-danger"""'], {}), "('Some fields need to be filled', 'alert-danger')\n", (1019, 1068), False, 'from flask import render_template, session, redirect, url_for, request, flash\n'), ((1382, 1404), 'flask.url_for', 'url_for', (['"""list_pizzas"""'], {}), "('list_pizzas')\n", (1389, 1404), False, 'from flask import render_template, session, redirect, url_for, request, flash\n'), ((2152, 2206), 'flask.flash', 'flash', (['"""Some fields need to be filled"""', '"""alert-danger"""'], {}), "('Some fields need to be filled', 'alert-danger')\n", (2157, 2206), False, 'from flask import render_template, session, redirect, url_for, request, flash\n'), ((2545, 2567), 'flask.url_for', 'url_for', (['"""list_pizzas"""'], {}), "('list_pizzas')\n", (2552, 2567), False, 'from flask import render_template, session, redirect, url_for, request, flash\n'), ((2651, 2673), 'flask.url_for', 'url_for', (['"""list_pizzas"""'], {}), "('list_pizzas')\n", (2658, 2673), False, 'from flask import render_template, session, redirect, url_for, request, flash\n')] |
# External module dependencies
from dataclasses import dataclass, field
from typing import Any, ParamSpec, Callable, Tuple, List, Dict, Set
from pathlib import Path
import hashlib
import json
import yaml
# Internal module dependencies
from . import dataspec
###############################################################################
# Datatypes
###############################################################################
@dataclass
class Task:
desc: str
proc: str
flows: List[str]
args: Dict[str, List[str]]
inputs: List[str]
outputs: List[str]
@dataclass
class Agenda:
procs: Dict[str, List[str]] = field(default_factory = dict)
flows: Dict[str, List[List[str]]] = field(default_factory = dict)
tasks: List[Task] = field(default_factory = list)
@dataclass
class CompiledTask:
hash: str
description: str
flows: Dict[str, int]
command: List[str]
inputs: Set[Path]
outputs: Set[Path]
CompiledAgenda = List[CompiledTask]
###############################################################################
# Functions
###############################################################################
def load(agenda_path : Path) -> Agenda:
assert isinstance(agenda_path, Path)
with agenda_path.open('r') as agenda_file:
raw_data = yaml.safe_load(agenda_file)
return dataspec.decode(Agenda, raw_data)
def store(agenda_path : Path, agenda_data : Agenda):
assert isinstance(agenda_data, Agenda)
with agenda_path.open('w+') as agenda_file:
raw_data = dataspec.encode(Agenda, agenda_data)
yaml.dump(
raw_data,
agenda_file,
width = 80,
indent = 2,
default_flow_style = False
)
P = ParamSpec('P')
def compile(target_dir : Path, agenda_data : Agenda) -> CompiledAgenda:
def _compile_proc(template : List[str]) -> Callable[P, List[str]]:
def _compile(template : List[str]) -> Tuple[List[str], str]:
params : List[str] = []
parts : List[str] = []
for part in template:
if len(part) == 0: continue
if part.startswith('$'):
params.append(part[1:])
parts.append('%s')
else:
parts.append(part)
return params, ' '.join(parts)
params, command = _compile(template)
def _join(inputs : List[str]) -> str:
def _wrap(input : str) -> str:
if ' ' not in input: return input
return '\"%s\"' % input
return ' '.join([ _wrap(input) for input in inputs ])
def _split(input : str) -> List[str]:
result : List[str] = []
subresult = ''
worklist = input.split(' ')
while len(worklist) != 0:
part = worklist.pop(0)
if len(subresult) != 0:
subresult = '%s %s' % (subresult, part)
if part.endswith('\"'):
result.append(subresult[1:-1])
subresult = ''
continue
if part.startswith('\"'):
subresult += part
continue
result.append(part)
return result
def _apply(**args : Any) -> List[str]:
for param in params:
if param in args: continue
raise TypeError('Missing required argument \"%s\"' % param)
return list(filter(
lambda part: part != '',
_split(command % tuple(
_join(args[param])
for param in params
))
))
return _apply
def _task_hash(task_data : Task) -> str:
data = json.dumps({
'proc': task_data.proc,
'args': task_data.args,
'flows': list(sorted(task_data.flows)),
'inputs': list(sorted(task_data.inputs)),
'outputs': list(sorted(task_data.outputs))
}, sort_keys = True)
return hashlib.md5(data.encode('utf-8')).hexdigest()
# Compile procs
procs = {
name : _compile_proc(proc)
for name, proc in agenda_data.procs.items()
}
# Flow and stage to proc mapping
flow_proc_stage : Dict[str, Dict[str, int]] = {}
for flow in agenda_data.flows:
if flow not in flow_proc_stage: flow_proc_stage[flow] = dict()
for index, stage in enumerate(agenda_data.flows[flow]):
for proc in stage:
if proc not in procs:
raise RuntimeError(
'Undefined proc \"%s\" in stage %d of flow %s' % (
proc, index + 1, flow
)
)
if proc in flow_proc_stage[flow]:
raise RuntimeError(
'Proc \"%s\" reserved for stage %d of flow %s' % (
proc, flow_proc_stage[flow][proc] + 1, flow
)
)
flow_proc_stage[flow][proc] = index
# Parse tasks
agenda : CompiledAgenda = []
for task_data in agenda_data.tasks:
if task_data.proc not in procs:
raise RuntimeError('Undefined proc \"%s\" for task \"%s\"' % (
task_data.proc, task_data.desc
))
for flow in task_data.flows:
if flow not in flow_proc_stage:
raise RuntimeError('Undefined flow \"%s\" for task \"\"' % (
flow, task_data.desc
))
agenda.append(CompiledTask(
hash = _task_hash(task_data),
description = task_data.desc,
flows = {
flow : flow_proc_stage[flow][task_data.proc]
for flow in task_data.flows
},
command = procs[task_data.proc](**task_data.args),
inputs = {
Path(target_dir, input)
for input in set(task_data.inputs)
},
outputs = {
Path(target_dir, output)
for output in set(task_data.outputs)
}
))
# Done
return agenda
| [
"pathlib.Path",
"yaml.dump",
"yaml.safe_load",
"dataclasses.field",
"typing.ParamSpec"
] | [((1758, 1772), 'typing.ParamSpec', 'ParamSpec', (['"""P"""'], {}), "('P')\n", (1767, 1772), False, 'from typing import Any, ParamSpec, Callable, Tuple, List, Dict, Set\n'), ((640, 667), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (645, 667), False, 'from dataclasses import dataclass, field\n'), ((710, 737), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (715, 737), False, 'from dataclasses import dataclass, field\n'), ((764, 791), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (769, 791), False, 'from dataclasses import dataclass, field\n'), ((1312, 1339), 'yaml.safe_load', 'yaml.safe_load', (['agenda_file'], {}), '(agenda_file)\n', (1326, 1339), False, 'import yaml\n'), ((1598, 1676), 'yaml.dump', 'yaml.dump', (['raw_data', 'agenda_file'], {'width': '(80)', 'indent': '(2)', 'default_flow_style': '(False)'}), '(raw_data, agenda_file, width=80, indent=2, default_flow_style=False)\n', (1607, 1676), False, 'import yaml\n'), ((6025, 6048), 'pathlib.Path', 'Path', (['target_dir', 'input'], {}), '(target_dir, input)\n', (6029, 6048), False, 'from pathlib import Path\n'), ((6155, 6179), 'pathlib.Path', 'Path', (['target_dir', 'output'], {}), '(target_dir, output)\n', (6159, 6179), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# coding:utf-8
"""
@version: 3.4.4
@author: linfeng
@file: crawl_video.py
@time: 2019/3/16 16:04
"""
import os
import re
import requests
from multiprocessing import Process
from urllib.parse import urlparse
from base_crawl import BaseCrawl
class CrawlVideo(Process, BaseCrawl):
def __init__(self, m3u8_url: str, title: str, id: int) -> None:
"""
:param m3u8_url: https://pl-vod28.live.panda.tv/transcode/1764801/2019-03-01/cf72fe2cb073746bc325f538a474ea93/index.m3u8
:param title:
"""
Process.__init__(self)
BaseCrawl.__init__(self, id)
self.url = m3u8_url
self.path_url = self.__get_path_url()
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
title = re.sub(rstr, "", title)
self.title = "".join(title.split()).replace("!", "").replace("?", "") + ".mp4"
def run(self):
print("开始爬取:", self.title)
m3u8_content = requests.get(self.url, headers=self.headers).text
self.__parse_m3u8(m3u8_content)
def __parse_m3u8(self, m3u8_content: str):
if "#EXTM3U" not in m3u8_content:
print("m3u8 内容有误")
return
content_list = m3u8_content.split("\n")
for index, item in enumerate(content_list):
if "#EXTINF" in item:
url = self.__get_ts_url(content_list[index + 1])
self.__loop_try(url)
print("爬取%s 完毕" % self.title)
def __loop_try(self, url):
while True:
try:
response = requests.get(url, headers=self.headers, stream=True, timeout=60)
file_path = os.path.join(self.download_path, self.title)
with open(file_path, "ab") as data:
for chunk in response.iter_content(chunk_size=102400):
if chunk:
data.write(chunk)
data.flush()
break
except BaseException as exception:
print("爬取:%s 出错:%s,重试中" % (url, exception))
def __get_path_url(self):
url_parse = urlparse(self.url)
path_list = url_parse.path.strip().split("/")
path = "https://" + url_parse.hostname + "/" + "/".join(path_list[1:-1])
return path
def __get_ts_url(self, ts_name: str):
"""
:return:https://pl-vod28.live.panda.tv/transcode/1764801/2019-03-01/cf72fe2cb073746bc325f538a474ea93/index_0.ts
"""
path = self.path_url + "/" + ts_name
print("ts path:", path)
return path
| [
"urllib.parse.urlparse",
"multiprocessing.Process.__init__",
"os.path.join",
"requests.get",
"re.sub",
"base_crawl.BaseCrawl.__init__"
] | [((557, 579), 'multiprocessing.Process.__init__', 'Process.__init__', (['self'], {}), '(self)\n', (573, 579), False, 'from multiprocessing import Process\n'), ((588, 616), 'base_crawl.BaseCrawl.__init__', 'BaseCrawl.__init__', (['self', 'id'], {}), '(self, id)\n', (606, 616), False, 'from base_crawl import BaseCrawl\n'), ((769, 792), 're.sub', 're.sub', (['rstr', '""""""', 'title'], {}), "(rstr, '', title)\n", (775, 792), False, 'import re\n'), ((2124, 2142), 'urllib.parse.urlparse', 'urlparse', (['self.url'], {}), '(self.url)\n', (2132, 2142), False, 'from urllib.parse import urlparse\n'), ((958, 1002), 'requests.get', 'requests.get', (['self.url'], {'headers': 'self.headers'}), '(self.url, headers=self.headers)\n', (970, 1002), False, 'import requests\n'), ((1558, 1622), 'requests.get', 'requests.get', (['url'], {'headers': 'self.headers', 'stream': '(True)', 'timeout': '(60)'}), '(url, headers=self.headers, stream=True, timeout=60)\n', (1570, 1622), False, 'import requests\n'), ((1651, 1695), 'os.path.join', 'os.path.join', (['self.download_path', 'self.title'], {}), '(self.download_path, self.title)\n', (1663, 1695), False, 'import os\n')] |
import parser
from coretypes import *
def dopen(fname):
contents = open(fname).read()
return parser.parse_extern(contents)
def dshape(o):
if isinstance(o, str):
return parser.parse(o)
elif isinstance(o, DataShape):
return o
else:
raise TypeError('Cannot create dshape from object of type %s' % type(o))
datashape = dshape
| [
"parser.parse_extern",
"parser.parse"
] | [((102, 131), 'parser.parse_extern', 'parser.parse_extern', (['contents'], {}), '(contents)\n', (121, 131), False, 'import parser\n'), ((190, 205), 'parser.parse', 'parser.parse', (['o'], {}), '(o)\n', (202, 205), False, 'import parser\n')] |
# -*- coding: utf-8 -*-
import signal
from pyMagician import pyMagician
signal.signal(signal.SIGINT, signal.SIG_DFL)
magician = pyMagician(port='/dev/cu.usbmodem0121')
if magician.connect():
print('- success connect')
print('- read_version')
print(magician.read_version())
for n in range(3):
print('-',n,'led')
magician.led_on(wait=0.5)
magician.led_off(wait=0.5)
print('- capture')
received_size = magician.capture()
print(received_size)
print('- read_ir')
received_data = magician.read_ir()
if len(received_data) > 10:
data_str = str(received_data[0])
for d in received_data[1:10]:
data_str += ', ' + str(d)
print('len:',len(received_data),'data: [' + data_str, '...]')
else:
print('len:', len(received_data), 'data:', received_data)
print('- write_ir')
magician.write_ir([1,2,3,4,5,6,7,8,9,10])
read_data = magician.read_ir()
print('check:',read_data)
print('- send_ir')
magician.send_ir()
magician.close()
else:
print('failed connct') | [
"signal.signal",
"pyMagician.pyMagician"
] | [((75, 119), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_DFL'], {}), '(signal.SIGINT, signal.SIG_DFL)\n', (88, 119), False, 'import signal\n'), ((132, 171), 'pyMagician.pyMagician', 'pyMagician', ([], {'port': '"""/dev/cu.usbmodem0121"""'}), "(port='/dev/cu.usbmodem0121')\n", (142, 171), False, 'from pyMagician import pyMagician\n')] |
from bokeh.plotting import figure, output_file, show
from Backtest.main.Utils.AssetBrackets import AssetBrackets
class StratVisual:
def __init__(self, resultsDict):
self.resultsDict = resultsDict
self.A = AssetBrackets()
def periodReturns(self):
brackets = self.A.getBrackets()
rd = self.resultsDict
p = figure(plot_width=1000, plot_height=700)
for asset in [coin for coin in rd.keys() if coin != "Total"]:
if asset in brackets["big"]:
color = "green"
elif asset in brackets["mid"]:
color = "orange"
elif asset in brackets["small"]:
color = "red"
else:
color = "purple"
p.circle(rd[asset]["numPeriods"], rd[asset]["results"], size=5, color=color)
output_file("tmp.html")
show(p)
def stratByAsset(self):
pass
| [
"Backtest.main.Utils.AssetBrackets.AssetBrackets",
"bokeh.plotting.show",
"bokeh.plotting.figure",
"bokeh.plotting.output_file"
] | [((227, 242), 'Backtest.main.Utils.AssetBrackets.AssetBrackets', 'AssetBrackets', ([], {}), '()\n', (240, 242), False, 'from Backtest.main.Utils.AssetBrackets import AssetBrackets\n'), ((355, 395), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(1000)', 'plot_height': '(700)'}), '(plot_width=1000, plot_height=700)\n', (361, 395), False, 'from bokeh.plotting import figure, output_file, show\n'), ((838, 861), 'bokeh.plotting.output_file', 'output_file', (['"""tmp.html"""'], {}), "('tmp.html')\n", (849, 861), False, 'from bokeh.plotting import figure, output_file, show\n'), ((870, 877), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (874, 877), False, 'from bokeh.plotting import figure, output_file, show\n')] |
from functools import wraps
import flask
from flask import current_app as cap
from .builders.builder import Builder
from .config import DEFAULT_BUILDERS, set_default_config
class ResponseBuilder:
def __init__(self, app=None, builders=None):
"""
:param app:
:param builders:
"""
self._builders = {}
if app is not None:
self.init_app(app, builders)
def init_app(self, app, builders=None):
"""
:param app:
:param builders:
"""
set_default_config(app)
if not hasattr(app, 'extensions'):
app.extensions = dict()
app.extensions['response_builder'] = self
for name, builder in {**DEFAULT_BUILDERS, **(builders or {})}.items():
self.register_builder(name, builder, **app.config)
def register_builder(self, name, builder, **kwargs):
"""
:param name:
:param builder:
"""
if not issubclass(builder.__class__, Builder):
raise NameError(
"Invalid Builder: '{}'. "
"You must extend class: '{}'".format(builder, Builder.__name__)
)
if not builder.conf:
builder.conf = kwargs
else:
builder.conf.update(kwargs)
self._builders.update({name: builder})
def _builder_attr(**params):
def _wrapper(func=None, data=None):
"""
func and data are mutual exclusive:
if func is present means a decorator builder used
if data is provided means decorator used as attribute
:param func:
:param data:
:return:
"""
if func is not None:
@wraps(func)
def wrapped():
return self.build_response(name, func(), **params)
return wrapped
return self.build_response(name, data, **params)
return _wrapper
setattr(self, name, _builder_attr)
@staticmethod
def _empty_response(status, headers):
"""
:param status:
:param headers:
:return:
"""
resp = flask.make_response(b'', status, headers)
resp.headers.pop('Content-Type', None)
return resp
def build_response(self, builder=None, data=None, **kwargs):
"""
:param builder:
:param data:
:return:
"""
if isinstance(builder, str):
builder = self._builders.get(builder)
data, status, headers = self.normalize_response_data(data)
if data is None:
return self._empty_response(status, headers)
if not builder:
m = headers.get('Content-Type') or cap.config.get('RB_DEFAULT_RESPONSE_FORMAT')
for value in self._builders.values():
if value.mimetype == m:
builder = value
break
else:
raise NameError(
"Builder not found: using one of: '{}'".format(", ".join(self._builders.keys()))
)
elif not issubclass(builder.__class__, Builder):
raise NameError(
"Invalid Builder: '{}'. You must extend class: '{}'".format(builder, Builder.__name__)
)
builder.build(data, **kwargs)
return builder.response(status=status, headers=headers)
def get_mimetype_accept(self, default=None, acceptable=None, strict=True):
"""
:param default:
:param acceptable:
:param strict:
:return:
"""
def find_builder(a):
for b in self._builders.values():
if a == b.mimetype:
return b
mimetypes = flask.request.accept_mimetypes
default = default or cap.config['RB_DEFAULT_RESPONSE_FORMAT']
acceptable = acceptable or cap.config['RB_DEFAULT_ACCEPTABLE_MIMETYPES']
if not mimetypes or str(mimetypes) == '*/*':
builder = find_builder(default)
if builder:
return default, builder
for m in mimetypes:
m = m[0].split(';')[0] # in order to remove encoding param
accept = m if m in acceptable else None
builder = find_builder(accept)
if builder:
return accept, builder
if strict is True:
flask.abort(406, "Not Acceptable: {}".format(flask.request.accept_mimetypes))
return default, find_builder(default)
@staticmethod
def normalize_response_data(data):
"""
:param data:
:return:
"""
if isinstance(data, tuple):
v = data + (None,) * (3 - len(data))
if isinstance(v[1], int):
return v[0], v[1], v[2] or {}
return v[0], v[2], v[1] or {}
if isinstance(data, int):
return None, data, {}
return data, None, {}
def no_content(self, func):
"""
:param func:
:return:
"""
@wraps(func)
def wrapped(*args, **kwargs):
resp = func(*args, **kwargs)
data, status, headers = self.normalize_response_data(resp)
if data:
resp = self.build_response(data=resp)
else:
status = 204 if status in (None, 204) else status
resp = self._empty_response(status, headers)
return resp
return wrapped
def on_format(self, default=None, acceptable=None):
"""
:param default:
:param acceptable:
:return:
"""
def response(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
builder = flask.request.args.get(cap.config.get('RB_FORMAT_KEY')) or default
if builder not in (acceptable or self._builders.keys()):
for k, v in self._builders.items():
if v.mimetype == cap.config.get('RB_DEFAULT_RESPONSE_FORMAT'):
builder = k
break
return self.build_response(builder, fun(*args, **kwargs))
return wrapper
return response
def on_accept(self, default=None, acceptable=None, strict=True):
"""
:param default:
:param acceptable:
:param strict:
:return:
"""
def response(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
mimetype, builder = self.get_mimetype_accept(default, acceptable, strict)
return self.build_response(builder, fun(*args, **kwargs))
return wrapper
return response
def response(self, builder, **kwargs):
"""
:param builder:
:return:
"""
def _response(f):
@wraps(f)
def wrapper(*args, **kw):
return self.build_response(builder, f(*args, **kw), **kwargs)
return wrapper
return _response
def template_or_json(self, template: str, as_table=False, to_dict=None):
"""
:param template:
:param as_table:
:param to_dict:
:return:
"""
def response(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
varargs = {}
builder = self._builders.get('json')
# check if request is XHR
if flask.request.headers.get('X-Requested-With', '').lower() == "xmlhttprequest":
builder = self._builders.get('html')
varargs.update(dict(
template=template,
as_table=as_table,
to_dict=to_dict
))
resp = fun(*args, **kwargs)
return self.build_response(builder, resp, **varargs)
return wrapper
return response
| [
"flask.make_response",
"flask.current_app.config.get",
"flask.request.headers.get",
"functools.wraps"
] | [((2277, 2318), 'flask.make_response', 'flask.make_response', (["b''", 'status', 'headers'], {}), "(b'', status, headers)\n", (2296, 2318), False, 'import flask\n'), ((5184, 5195), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (5189, 5195), False, 'from functools import wraps\n'), ((5807, 5817), 'functools.wraps', 'wraps', (['fun'], {}), '(fun)\n', (5812, 5817), False, 'from functools import wraps\n'), ((6598, 6608), 'functools.wraps', 'wraps', (['fun'], {}), '(fun)\n', (6603, 6608), False, 'from functools import wraps\n'), ((7018, 7026), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (7023, 7026), False, 'from functools import wraps\n'), ((7432, 7442), 'functools.wraps', 'wraps', (['fun'], {}), '(fun)\n', (7437, 7442), False, 'from functools import wraps\n'), ((2849, 2893), 'flask.current_app.config.get', 'cap.config.get', (['"""RB_DEFAULT_RESPONSE_FORMAT"""'], {}), "('RB_DEFAULT_RESPONSE_FORMAT')\n", (2863, 2893), True, 'from flask import current_app as cap\n'), ((1815, 1826), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1820, 1826), False, 'from functools import wraps\n'), ((5909, 5940), 'flask.current_app.config.get', 'cap.config.get', (['"""RB_FORMAT_KEY"""'], {}), "('RB_FORMAT_KEY')\n", (5923, 5940), True, 'from flask import current_app as cap\n'), ((6123, 6167), 'flask.current_app.config.get', 'cap.config.get', (['"""RB_DEFAULT_RESPONSE_FORMAT"""'], {}), "('RB_DEFAULT_RESPONSE_FORMAT')\n", (6137, 6167), True, 'from flask import current_app as cap\n'), ((7629, 7678), 'flask.request.headers.get', 'flask.request.headers.get', (['"""X-Requested-With"""', '""""""'], {}), "('X-Requested-With', '')\n", (7654, 7678), False, 'import flask\n')] |
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import EventListing
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required
from events.forms import EventCreateView, EventUpdateForm
from django.contrib import messages
from django.urls import reverse
@login_required
def eventCreate(request):
if request.method == 'POST':
form = EventCreateView(request.POST, request.FILES)
if form.is_valid():
form.instance.author = request.user
form = form.save()
messages.success(request, "Your event has been created!")
return redirect(reverse('event-detail', kwargs={'pk': form.pk}))
else:
form = EventCreateView()
return render(request, 'events/eventlisting_form.html', {'form': form})
def eventUpdateView(request, pk):
instance = get_object_or_404(EventListing, id=pk)
form = EventUpdateForm(request.POST or None, instance=instance)
if form.is_valid():
form.save()
messages.success(request, "Your event has been updated!")
return redirect(reverse('event-detail', kwargs={'pk': pk}))
else:
e_form = EventUpdateForm(instance = EventListing.objects.get(pk=pk))
return render(request, 'events/eventupdate_form.html', {'e_form': e_form})
class EventPageView(ListView):
model = EventListing
template_name = 'events/events.html'
context_object_name = 'data'
ordering = ['-date_posted']
class UserEventPageView(ListView):
model = EventListing
template_name = 'events/user_event.html'
context_object_name = 'data'
paginate_by = 3
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return EventListing.objects.filter(author=user).order_by('-date_posted')
class EventUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = EventListing
fields = [] #models go here
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
event = self.get_object()
return self.request.user == event.author # do we need a conditional true/false here??
class EventDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = EventListing
success_url = '/'
def test_func(self):
event = self.get_object()
return self.request.user == event.author
class EventDetailView(DetailView):
model = EventListing
| [
"django.shortcuts.render",
"events.forms.EventCreateView",
"django.urls.reverse",
"django.shortcuts.get_object_or_404",
"events.forms.EventUpdateForm",
"django.contrib.messages.success"
] | [((890, 954), 'django.shortcuts.render', 'render', (['request', '"""events/eventlisting_form.html"""', "{'form': form}"], {}), "(request, 'events/eventlisting_form.html', {'form': form})\n", (896, 954), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1002, 1040), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['EventListing'], {'id': 'pk'}), '(EventListing, id=pk)\n', (1019, 1040), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1049, 1105), 'events.forms.EventUpdateForm', 'EventUpdateForm', (['(request.POST or None)'], {'instance': 'instance'}), '(request.POST or None, instance=instance)\n', (1064, 1105), False, 'from events.forms import EventCreateView, EventUpdateForm\n'), ((1349, 1416), 'django.shortcuts.render', 'render', (['request', '"""events/eventupdate_form.html"""', "{'e_form': e_form}"], {}), "(request, 'events/eventupdate_form.html', {'e_form': e_form})\n", (1355, 1416), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((591, 635), 'events.forms.EventCreateView', 'EventCreateView', (['request.POST', 'request.FILES'], {}), '(request.POST, request.FILES)\n', (606, 635), False, 'from events.forms import EventCreateView, EventUpdateForm\n'), ((864, 881), 'events.forms.EventCreateView', 'EventCreateView', ([], {}), '()\n', (879, 881), False, 'from events.forms import EventCreateView, EventUpdateForm\n'), ((1143, 1200), 'django.contrib.messages.success', 'messages.success', (['request', '"""Your event has been updated!"""'], {}), "(request, 'Your event has been updated!')\n", (1159, 1200), False, 'from django.contrib import messages\n'), ((722, 779), 'django.contrib.messages.success', 'messages.success', (['request', '"""Your event has been created!"""'], {}), "(request, 'Your event has been created!')\n", (738, 779), False, 'from django.contrib import messages\n'), ((1219, 1261), 'django.urls.reverse', 'reverse', (['"""event-detail"""'], {'kwargs': "{'pk': pk}"}), "('event-detail', kwargs={'pk': pk})\n", (1226, 1261), False, 'from django.urls import reverse\n'), ((799, 846), 'django.urls.reverse', 'reverse', (['"""event-detail"""'], {'kwargs': "{'pk': form.pk}"}), "('event-detail', kwargs={'pk': form.pk})\n", (806, 846), False, 'from django.urls import reverse\n')] |
import datetime
from unittest import mock
import pytest
from h_matchers import Any
from h.tasks import indexer
class FakeSettingsService:
def __init__(self):
self._data = {}
def get(self, key):
return self._data.get(key)
def put(self, key, value):
self._data[key] = value
class TestAddAnnotation:
def test_it_fetches_the_annotation(self, storage, annotation, celery):
id_ = "test-annotation-id"
storage.fetch_annotation.return_value = annotation
indexer.add_annotation(id_)
storage.fetch_annotation.assert_called_once_with(celery.request.db, id_)
def test_it_calls_index_with_annotation(self, storage, annotation, index, celery):
id_ = "test-annotation-id"
storage.fetch_annotation.return_value = annotation
indexer.add_annotation(id_)
index.assert_any_call(celery.request.es, annotation, celery.request)
def test_it_skips_indexing_when_annotation_cannot_be_loaded(
self, storage, index, celery
):
storage.fetch_annotation.return_value = None
indexer.add_annotation("test-annotation-id")
assert index.called is False
def test_during_reindex_adds_to_current_index(
self, storage, annotation, index, celery, settings_service
):
settings_service.put("reindex.new_index", "hypothesis-xyz123")
storage.fetch_annotation.return_value = annotation
indexer.add_annotation("test-annotation-id")
index.assert_any_call(
celery.request.es,
annotation,
celery.request,
target_index="hypothesis-xyz123",
)
def test_during_reindex_adds_to_new_index(
self, storage, annotation, index, celery, settings_service
):
settings_service.put("reindex.new_index", "hypothesis-xyz123")
storage.fetch_annotation.return_value = annotation
indexer.add_annotation("test-annotation-id")
index.assert_any_call(
celery.request.es,
annotation,
celery.request,
target_index="hypothesis-xyz123",
)
def test_it_indexes_thread_root(self, storage, reply, delay):
storage.fetch_annotation.return_value = reply
indexer.add_annotation("test-annotation-id")
delay.assert_called_once_with("root-id")
@pytest.fixture
def annotation(self):
return mock.Mock(spec_set=["is_reply"], is_reply=False)
@pytest.fixture
def reply(self):
return mock.Mock(
spec_set=["is_reply", "thread_root_id"],
is_reply=True,
thread_root_id="root-id",
)
@pytest.fixture
def delay(self, patch):
return patch("h.tasks.indexer.add_annotation.delay")
class TestDeleteAnnotation:
def test_it_deletes_from_index(self, delete, celery):
id_ = "test-annotation-id"
indexer.delete_annotation(id_)
delete.assert_any_call(celery.request.es, id_)
def test_during_reindex_deletes_from_current_index(
self, delete, celery, settings_service
):
settings_service.put("reindex.new_index", "hypothesis-xyz123")
indexer.delete_annotation("test-annotation-id")
delete.assert_any_call(
celery.request.es, "test-annotation-id", target_index="hypothesis-xyz123"
)
def test_during_reindex_deletes_from_new_index(
self, delete, celery, settings_service
):
settings_service.put("reindex.new_index", "hypothesis-xyz123")
indexer.delete_annotation("test-annotation-id")
delete.assert_any_call(
celery.request.es, "test-annotation-id", target_index="hypothesis-xyz123"
)
class TestReindexUserAnnotations:
def test_it_creates_batch_indexer(self, BatchIndexer, annotation_ids, celery):
userid = list(annotation_ids.keys())[0]
indexer.reindex_user_annotations(userid)
BatchIndexer.assert_any_call(
celery.request.db, celery.request.es, celery.request
)
def test_it_reindexes_users_annotations(self, BatchIndexer, annotation_ids):
userid = list(annotation_ids.keys())[0]
indexer.reindex_user_annotations(userid)
args, _ = BatchIndexer.return_value.index.call_args
actual = args[0]
expected = annotation_ids[userid]
assert sorted(expected) == sorted(actual)
@pytest.fixture
def annotation_ids(self, factories):
userid1 = "acct:<EMAIL>"
userid2 = "acct:<EMAIL>"
return {
userid1: [
a.id for a in factories.Annotation.create_batch(3, userid=userid1)
],
userid2: [
a.id for a in factories.Annotation.create_batch(2, userid=userid2)
],
}
class TestReindexAnnotationsInDateRange:
def test_it(self, BatchIndexer, celery, matching_annotations_ids):
indexer.reindex_annotations_in_date_range(
datetime.datetime.utcnow() - datetime.timedelta(days=7),
datetime.datetime.utcnow(),
)
BatchIndexer.assert_called_once_with(
celery.request.db, celery.request.es, celery.request,
)
BatchIndexer.return_value.index.assert_called_once_with(Any())
indexed_annotations = list(BatchIndexer.return_value.index.call_args[0][0])
assert sorted(indexed_annotations) == sorted(matching_annotations_ids)
@pytest.fixture(autouse=True)
def matching_annotations_ids(self, factories):
"""Annotations that're within the timeframe that we're reindexing."""
return [
annotation.id
for annotation in factories.Annotation.create_batch(
3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=3)
)
]
@pytest.fixture(autouse=True)
def not_matching_annotations(self, factories):
"""Annotations that're outside the timeframe that we're reindexing."""
before_annotations = factories.Annotation.build_batch(
3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=14)
)
after_annotations = factories.Annotation.build_batch(
3, updated=datetime.datetime.utcnow() + datetime.timedelta(days=14)
)
return before_annotations + after_annotations
pytestmark = pytest.mark.usefixtures("settings_service")
@pytest.fixture(autouse=True)
def BatchIndexer(patch):
return patch("h.tasks.indexer.BatchIndexer")
@pytest.fixture(autouse=True)
def celery(patch, pyramid_request):
cel = patch("h.tasks.indexer.celery")
cel.request = pyramid_request
return cel
@pytest.fixture(autouse=True)
def delete(patch):
return patch("h.tasks.indexer.delete")
@pytest.fixture(autouse=True)
def index(patch):
return patch("h.tasks.indexer.index")
@pytest.fixture
def pyramid_request(pyramid_request):
pyramid_request.es = mock.Mock()
return pyramid_request
@pytest.fixture
def settings_service(pyramid_config):
service = FakeSettingsService()
pyramid_config.register_service(service, name="settings")
return service
@pytest.fixture(autouse=True)
def storage(patch):
return patch("h.tasks.indexer.storage")
| [
"unittest.mock.Mock",
"datetime.datetime.utcnow",
"h.tasks.indexer.add_annotation",
"h.tasks.indexer.reindex_user_annotations",
"pytest.mark.usefixtures",
"h_matchers.Any",
"pytest.fixture",
"datetime.timedelta",
"h.tasks.indexer.delete_annotation"
] | [((6377, 6420), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""settings_service"""'], {}), "('settings_service')\n", (6400, 6420), False, 'import pytest\n'), ((6424, 6452), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (6438, 6452), False, 'import pytest\n'), ((6530, 6558), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (6544, 6558), False, 'import pytest\n'), ((6689, 6717), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (6703, 6717), False, 'import pytest\n'), ((6783, 6811), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (6797, 6811), False, 'import pytest\n'), ((7168, 7196), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (7182, 7196), False, 'import pytest\n'), ((5465, 5493), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (5479, 5493), False, 'import pytest\n'), ((5844, 5872), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (5858, 5872), False, 'import pytest\n'), ((6953, 6964), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (6962, 6964), False, 'from unittest import mock\n'), ((519, 546), 'h.tasks.indexer.add_annotation', 'indexer.add_annotation', (['id_'], {}), '(id_)\n', (541, 546), False, 'from h.tasks import indexer\n'), ((820, 847), 'h.tasks.indexer.add_annotation', 'indexer.add_annotation', (['id_'], {}), '(id_)\n', (842, 847), False, 'from h.tasks import indexer\n'), ((1098, 1142), 'h.tasks.indexer.add_annotation', 'indexer.add_annotation', (['"""test-annotation-id"""'], {}), "('test-annotation-id')\n", (1120, 1142), False, 'from h.tasks import indexer\n'), ((1446, 1490), 'h.tasks.indexer.add_annotation', 'indexer.add_annotation', (['"""test-annotation-id"""'], {}), "('test-annotation-id')\n", (1468, 1490), False, 'from h.tasks import indexer\n'), ((1923, 1967), 'h.tasks.indexer.add_annotation', 'indexer.add_annotation', (['"""test-annotation-id"""'], {}), "('test-annotation-id')\n", (1945, 1967), False, 'from h.tasks import indexer\n'), ((2269, 2313), 'h.tasks.indexer.add_annotation', 'indexer.add_annotation', (['"""test-annotation-id"""'], {}), "('test-annotation-id')\n", (2291, 2313), False, 'from h.tasks import indexer\n'), ((2426, 2474), 'unittest.mock.Mock', 'mock.Mock', ([], {'spec_set': "['is_reply']", 'is_reply': '(False)'}), "(spec_set=['is_reply'], is_reply=False)\n", (2435, 2474), False, 'from unittest import mock\n'), ((2532, 2627), 'unittest.mock.Mock', 'mock.Mock', ([], {'spec_set': "['is_reply', 'thread_root_id']", 'is_reply': '(True)', 'thread_root_id': '"""root-id"""'}), "(spec_set=['is_reply', 'thread_root_id'], is_reply=True,\n thread_root_id='root-id')\n", (2541, 2627), False, 'from unittest import mock\n'), ((2912, 2942), 'h.tasks.indexer.delete_annotation', 'indexer.delete_annotation', (['id_'], {}), '(id_)\n', (2937, 2942), False, 'from h.tasks import indexer\n'), ((3190, 3237), 'h.tasks.indexer.delete_annotation', 'indexer.delete_annotation', (['"""test-annotation-id"""'], {}), "('test-annotation-id')\n", (3215, 3237), False, 'from h.tasks import indexer\n'), ((3554, 3601), 'h.tasks.indexer.delete_annotation', 'indexer.delete_annotation', (['"""test-annotation-id"""'], {}), "('test-annotation-id')\n", (3579, 3601), False, 'from h.tasks import indexer\n'), ((3907, 3947), 'h.tasks.indexer.reindex_user_annotations', 'indexer.reindex_user_annotations', (['userid'], {}), '(userid)\n', (3939, 3947), False, 'from h.tasks import indexer\n'), ((4201, 4241), 'h.tasks.indexer.reindex_user_annotations', 'indexer.reindex_user_annotations', (['userid'], {}), '(userid)\n', (4233, 4241), False, 'from h.tasks import indexer\n'), ((5064, 5090), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5088, 5090), False, 'import datetime\n'), ((5289, 5294), 'h_matchers.Any', 'Any', ([], {}), '()\n', (5292, 5294), False, 'from h_matchers import Any\n'), ((4995, 5021), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5019, 5021), False, 'import datetime\n'), ((5024, 5050), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (5042, 5050), False, 'import datetime\n'), ((6089, 6115), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (6113, 6115), False, 'import datetime\n'), ((6118, 6145), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(14)'}), '(days=14)\n', (6136, 6145), False, 'import datetime\n'), ((6241, 6267), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (6265, 6267), False, 'import datetime\n'), ((6270, 6297), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(14)'}), '(days=14)\n', (6288, 6297), False, 'import datetime\n'), ((5758, 5784), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5782, 5784), False, 'import datetime\n'), ((5787, 5813), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (5805, 5813), False, 'import datetime\n')] |
#
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import webbrowser
import random
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.core.window import Window
from kivy.core.text import LabelBase
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.screenmanager import Screen
from kivy.properties import ObjectProperty
from kivy.utils import get_color_from_hex
#from kivy.uix.camera import Camera
#from arithmetic import Arithmetic, json_settings
#-----pour la webam---
#from kivy.lang import Builder
from kivy.uix.image import Image
from kivy.graphics.texture import Texture
import cv2
import os
import sys
from random import shuffle
import time
#Recognition predictor
import model as rec
#For music
from kivy.core.audio import SoundLoader
#from camCapture import camCapture
# Color the background
Window.clearcolor = get_color_from_hex("#300000")
# Register fonts
#LabelBase.register(
# name="Roboto",
# fn_regular="./fonts/Roboto-Thin.ttf",
# fn_bold="./fonts/Roboto-Medium.ttf"
#)
################################################################################
#NO NEED FOR NOW
#For Gifs
class MyImage(Image):
frame_counter = 0
frame_number = 7 # depends on the gif frames
def on_texture(self, instance, value):
if self.frame_counter == self.frame_number + 1:
self._coreimage.anim_reset(False)
self.frame_counter += 1
class KivyTutorRoot(BoxLayout):
"""
Root of all widgets
"""
hmi_screen = ObjectProperty(None)
def __init__(self, **kwargs):
super(KivyTutorRoot, self).__init__(**kwargs)
# List of previous screens
self.screen_list = []
#Is the states order in the alphabetical order or not
#NO NEED
self.is_mix = False
self.hmi_popup = HmiPopup()
#self.myImage = MyImage(Image)
self.current = 0
#To campare with prediction result
self.list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y']
self.score = 0
#Recover alphabet states: (folders where each has the corresponding letter and image/gif)
self.path = 'states'
self.states = os.listdir(self.path)
self.init_state = 'A'
#Dialogue to interact with the user
self.path_diaologue = 'dialogue'
self.dialogues = os.listdir(self.path_diaologue)
self.init_dialogue = True
#Check if there is a next dialogue in the current state
self.dialogue_idx = 0
def changeScreen(self, next_screen):
# If screen is not already in the list fo prevous screens
if self.ids.kivy_screen_manager.current not in self.screen_list:
self.screen_list.append(self.ids.kivy_screen_manager.current)
if next_screen == "about this app":
self.ids.kivy_screen_manager.current = "about_screen"
else:
#Is the states order in the alphabetical order or not, depends on mode
if (next_screen == 'challenge'):
shuffle(self.states)
self.init_state = self.states[0]
self.hmi_screen.question_image.text = self.states[0]
#Reset
self.hmi_screen.button.idx = 0
self.score = 0
self.dialogue_idx = 0
self.current = 0
#When you change from modes
if(self.init_dialogue == False):
self.hmi_screen.interact_button.text = ""
if (next_screen == 'learn'):
self.states.sort()
self.init_state = self.states[0]
self.hmi_screen.question_image.text = self.states[0]
#Reset
self.hmi_screen.button.idx = 0
self.score = 0
self.dialogue_idx = 0
self.current = 0
#When you change from modes
if(self.init_dialogue == False):
self.hmi_screen.interact_button.text = ""
#image = self.path+'/'+self.init_state+'/image.png'
image = self.path+'/'+self.init_state+"/gif.gif"
self.hmi_screen.image.source = image
self.ids.kivy_screen_manager.current = "hmi_screen"
def changeState(self):
if (len(self.states) == 0):
print ("There are no states!")
sys.quit()
idx = self.hmi_screen.button.idx
self.current = idx
if idx < len(self.states):
letter = self.states[idx]
#image = self.path+'/'+letter+'/image.png'
#image = self.myImage(source = self.path+'/'+letter+"/gif.gif")
image = self.path+'/'+letter+"/gif.gif"
self.hmi_screen.image.source = image
self.hmi_screen.question_image.text = letter
self.hmi_screen.button.idx += 1
#Reset the dialogue counter
self.dialogue_idx = 0
#Go the next dialogue state letter
#Here if we exceed the len of states, we are done => finish == True
else:
self.hmi_popup.open('Done', self.score)
print('done!')
#Reset loop
self.hmi_screen.button.idx = 0
self.score = 0
def onBackBtn(self):
# Check if there are any screen to go back to
if self.screen_list:
# if there are screens we can go back to, the just do it
self.ids.kivy_screen_manager.current = self.screen_list.pop()
# Saw we don't want to close
return True
# No more screens to go back to
return False
def camCapture(self):
count = 0
cap = cv2.VideoCapture(0)
#fourcc = cv2.VideoWriter_fourcc(*'XVID') #pour enregistrer lw fichier codec
#load recognition models just one time
class_model, detect_model, args, class_names = rec.load_models()
while(True):
ret, frame = cap.read()
cv2.imshow('Point you hand at me :)',frame)
#Call predictor
prediction = rec.predict(frame, class_model, detect_model, args, class_names)
count += 1
# HOW TO SAFELY CLOSE THE CAM WINDOW using a button not q
if cv2.waitKey(1) & 0xFF == ord('q'):#to get out from the infinite loop
break
if (prediction == self.list[self.current]):
#Go to the next letter
#self.current += 1
#increase score
self.score += 100
self.hmi_popup.open('Yes', self.score)
break
#If exceed count limit then display error message
if(count >= 200):
#decrease score
self.score -= 10
self.hmi_popup.open('No', self.score)
break
#Destroy window cam
cap.release()
cv2.destroyAllWindows()
#Instructive and interactive Dialogue with the user
#A button when you click on it, it displays the next message
def interaction(self):
if (len(self.dialogues) == 0):
print ("There are no dialogues!")
sys.quit()
#Read current dialogue using the states[idx] from dialogue folder
if(self.init_dialogue == True):
#Get the init dialogue file
#Starts only one time during the app launch
file = self.path_diaologue+"/init.txt"
else:
#Get the current state letter dialogue file
file = self.path_diaologue+'/'+self.states[self.current]+'.txt'
#Save lines of current states in a list
with open(file) as f:
list_dialogue = f.read().splitlines()
if(self.dialogue_idx < len(list_dialogue)):
self.hmi_screen.interact_button.text = list_dialogue[self.dialogue_idx]
self.dialogue_idx += 1
else:
if(self.init_dialogue == True):
#Go to the next dialogue states
#init dialogue is done just one time during the app launch
self.init_dialogue = False
self.hmi_screen.interact_button.text = "Click to show description!"
################################################################################
class HmiScreen(Screen):
#Widget that will arc as a screen and hold funcs for hmi questions
def __init__(self, *args, **kwargs):
super(HmiScreen, self).__init__(*args, **kwargs)
################################################################################
class HmiPopup(Popup):
#Popup for telling user whether he got it right or wrong
GOOD = "{}\n:)"
BAD = "{}\nTry again!!"
#good_index = 0
bad_index = -1
#Read response messages from txt files
with open('good_response.txt') as f:
GOOD_LIST = f.read().splitlines()
with open('bad_response.txt') as f:
BAD_LIST = f.read().splitlines()
message = ObjectProperty()
wrapped_button = ObjectProperty()
def __init__(self, *args, **kwargs):
super(HmiPopup, self).__init__(*args, **kwargs)
def open(self, answer, score):
# If answer is correct take off button if its visible
if answer == 'Yes':
if self.wrapped_button in self.content.children:
self.content.remove_widget(self.wrapped_button)
# If answers is wrong, display button if not visible
elif answer == 'No':
if self.wrapped_button not in self.content.children:
self.content.add_widget(self.wrapped_button)
elif answer == 'Done':
if self.wrapped_button not in self.content.children:
self.content.add_widget(self.wrapped_button)
# Set up text message
self.message.text = self._prep_text(answer, score)
# display popup
super(HmiPopup, self).open()
if answer == 'Yes':
#pop up vanish after n sec
Clock.schedule_once(self.dismiss, 10)
def _prep_text(self, answer, score):
if(self.bad_index >= len(self.BAD_LIST)-1):
self.bad_index = -1
if answer == 'Yes':
index = random.randint(0, len(self.GOOD_LIST) - 1)
return self.GOOD.format(self.GOOD_LIST[index])
elif answer == 'No':
#Dont do random
hmi_screen = App.get_running_app().root.hmi_screen
self.bad_index += 1
return self.BAD.format(self.BAD_LIST[self.bad_index])
elif answer== 'Done':
if(score > 0):
return 'You did it, GOOD JOB!\n'+'Score: '+str(score)
else:
return 'Maybe if you try again, you\'ll get them all this time :)\nClick on "Next" to reset.'
################################################################################
class KivyTutorApp(App):
#App object
def __init__(self, **kwargs):
super(KivyTutorApp, self).__init__(**kwargs)
self.use_kivy_settings = False
Window.bind(on_keyboard=self.onBackBtn)
#Add background music
self.sound = SoundLoader.load('driving.mp3')
if self.sound:
print("Sound found at %s" % self.sound.source)
print("Sound is %.3f seconds" % self.sound.length)
#loop the background music
self.sound.loop = True
#start with 50% volume
self.sound.volume = 0.5
self.sound.play()
def stops(self):
self.sound.stop()
def toggle(self):
self.sound.state = 'play' if self.M.state == 'stop' else 'play'
return self.sound.state
def onBackBtn(self, window, key, *args):
# user presses back button
if key == 27:
return self.root.onBackBtn()
def build(self):
#TO DO: SETTINGS MUISC : VOLUME + MUTE
return KivyTutorRoot()
def getText(self):
return ("Hey There!\nThis App was built using "
"[b][ref=kivy]kivy[/ref][/b]\n"
"Feel free to look at the source code "
"[b][ref=source]here[/ref][/b].\n"
"This app is under the [b][ref=mit]MIT License[/ref][/b]\n"
)
def on_ref_press(self, instance, ref):
_dict = {
"source": "https://github.com/faresbs/sign-language-tutor",
"kivy": "http://kivy.org/#home",
"mit": "https://github.com/faresbs/sign-language-tutor/blob/master/LICENSE"
}
webbrowser.open(_dict[ref])
def build_config(self, config):
config.setdefaults("General", {"volume_music": 1, "mute_music": False})
def build_settings(self, settings):
settings.add_json_panel("Sign Language Tutor", self.config,
data=json_settings)
#def on_config_change(self, config, section, key, value):
# if key == "volume_music":
# self.root.hmi_screen.volume = int(value)
# elif key == "mute_music":
# self.root.hmi_screen.mute = int(value)
if __name__ == '__main__':
KivyTutorApp().run()
| [
"os.listdir",
"random.shuffle",
"webbrowser.open",
"sys.quit",
"kivy.core.window.Window.bind",
"model.load_models",
"kivy.core.audio.SoundLoader.load",
"cv2.imshow",
"kivy.app.App.get_running_app",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"kivy.clock.Clock.schedule_once",
"kivy.utils.get... | [((1019, 1048), 'kivy.utils.get_color_from_hex', 'get_color_from_hex', (['"""#300000"""'], {}), "('#300000')\n", (1037, 1048), False, 'from kivy.utils import get_color_from_hex\n'), ((1701, 1721), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (1715, 1721), False, 'from kivy.properties import ObjectProperty\n'), ((9395, 9411), 'kivy.properties.ObjectProperty', 'ObjectProperty', ([], {}), '()\n', (9409, 9411), False, 'from kivy.properties import ObjectProperty\n'), ((9433, 9449), 'kivy.properties.ObjectProperty', 'ObjectProperty', ([], {}), '()\n', (9447, 9449), False, 'from kivy.properties import ObjectProperty\n'), ((2477, 2498), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (2487, 2498), False, 'import os\n'), ((2641, 2672), 'os.listdir', 'os.listdir', (['self.path_diaologue'], {}), '(self.path_diaologue)\n', (2651, 2672), False, 'import os\n'), ((6062, 6081), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (6078, 6081), False, 'import cv2\n'), ((6270, 6287), 'model.load_models', 'rec.load_models', ([], {}), '()\n', (6285, 6287), True, 'import model as rec\n'), ((7305, 7328), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7326, 7328), False, 'import cv2\n'), ((11477, 11516), 'kivy.core.window.Window.bind', 'Window.bind', ([], {'on_keyboard': 'self.onBackBtn'}), '(on_keyboard=self.onBackBtn)\n', (11488, 11516), False, 'from kivy.core.window import Window\n'), ((11569, 11600), 'kivy.core.audio.SoundLoader.load', 'SoundLoader.load', (['"""driving.mp3"""'], {}), "('driving.mp3')\n", (11585, 11600), False, 'from kivy.core.audio import SoundLoader\n'), ((12998, 13025), 'webbrowser.open', 'webbrowser.open', (['_dict[ref]'], {}), '(_dict[ref])\n', (13013, 13025), False, 'import webbrowser\n'), ((4707, 4717), 'sys.quit', 'sys.quit', ([], {}), '()\n', (4715, 4717), False, 'import sys\n'), ((6358, 6402), 'cv2.imshow', 'cv2.imshow', (['"""Point you hand at me :)"""', 'frame'], {}), "('Point you hand at me :)', frame)\n", (6368, 6402), False, 'import cv2\n'), ((6468, 6532), 'model.predict', 'rec.predict', (['frame', 'class_model', 'detect_model', 'args', 'class_names'], {}), '(frame, class_model, detect_model, args, class_names)\n', (6479, 6532), True, 'import model as rec\n'), ((7578, 7588), 'sys.quit', 'sys.quit', ([], {}), '()\n', (7586, 7588), False, 'import sys\n'), ((10409, 10446), 'kivy.clock.Clock.schedule_once', 'Clock.schedule_once', (['self.dismiss', '(10)'], {}), '(self.dismiss, 10)\n', (10428, 10446), False, 'from kivy.clock import Clock\n'), ((3344, 3364), 'random.shuffle', 'shuffle', (['self.states'], {}), '(self.states)\n', (3351, 3364), False, 'from random import shuffle\n'), ((6643, 6657), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6654, 6657), False, 'import cv2\n'), ((10807, 10828), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (10826, 10828), False, 'from kivy.app import App\n')] |
from http.server import HTTPServer, BaseHTTPRequestHandler
from json import dumps as toJSON
from json import loads as formatJSON
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
if self.path != '/favicon.ico':
db_dump = []
entities = {
'MAGA' : 1,
'MCD' : 2,
'MINDEF' : 3,
'MINEDUC' : 4,
'MEM' : 5,
'MINFIN' : 6,
'MINEX' : 7,
'MSPAS' : 8,
'MINTRAB' : 9,
'USAC' : 10
}
myclient = ('PRIVATE-URL')
mydb = myclient['Ministerio']
mycol = mydb['Grafica']
opts = self.path.split('/')
if opts[2] == '1':
for item in mycol.find({ '_id': entities[opts[1]]*10 + 1 }, { '_id': 0, opts[1]: 1}):
db_dump.append(['Detalle', 'Egreso'])
for i in range(6):
db_dump.append([item[opts[1]][i]['detalle'], float(item[opts[1]][i]['egreso'])])
elif opts[2] == '2':
for item in mycol.find({ '_id': entities[opts[1]]*10 + 2 }, { '_id': 0, opts[1]: 1}):
for i in range(6):
db_dump.append(float(item[opts[1]][i]['asignado'])/1000)
elif opts[2] == '4':
for item in mycol.find({ '_id': str(entities[opts[1]]) + '-4-10' }, { '_id': 0, opts[1]: 1}):
db_dump.append(['Detalle', 'Ingreso'])
for i in range(6):
db_dump.append([item[opts[1]][i]['detalle'], float(item[opts[1]][i]['ingreso'])])
if opts[2] == '5':
for item in mycol.find({ '_id': entities[opts[1]]*10 + 5 }, { '_id': 0, opts[1]: 1}):
db_dump.append(['Location','Parent','Market trade volume (size)','Market increase/decrease (color)'])
db_dump.append(['Proveedores', None, 0, 0])
for i in range(6):
db_dump.append([item[opts[1]][i]['proveedor'],'Proveedores', float(item[opts[1]][i]['debito']), -i*10])
else:
pass
self.wfile.write(toJSON(db_dump).encode('utf-8'))
print( 'SERVER>> started' )
httpd = HTTPServer(('0.0.0.0', 8080), SimpleHTTPRequestHandler)
httpd.serve_forever() | [
"json.dumps",
"http.server.HTTPServer"
] | [((2523, 2578), 'http.server.HTTPServer', 'HTTPServer', (["('0.0.0.0', 8080)", 'SimpleHTTPRequestHandler'], {}), "(('0.0.0.0', 8080), SimpleHTTPRequestHandler)\n", (2533, 2578), False, 'from http.server import HTTPServer, BaseHTTPRequestHandler\n'), ((2453, 2468), 'json.dumps', 'toJSON', (['db_dump'], {}), '(db_dump)\n', (2459, 2468), True, 'from json import dumps as toJSON\n')] |
from inspect import currentframe
__all__ = ["add_props_to_ns", "add_classprops_to_ns", "classproperty", "props_as_dict"]
def prop_getsetdel(property_name, prefix="_", read_only=False, deletable=False):
internal_attr = prefix + property_name
def prop_getter(internal_attr):
def getter_func(self):
return getattr(self, internal_attr)
return getter_func
def prop_setter(internal_attr):
def setter_func(self, val):
setattr(self, internal_attr, val)
return setter_func
def prop_deleter(internal_attr):
def deleter_func(self):
delattr(self, internal_attr)
return deleter_func
pget = prop_getter(internal_attr)
pset = prop_setter(internal_attr)
pdel = prop_deleter(internal_attr)
if read_only:
if deletable:
return pget, None, pdel # Leave pset `None`
else:
return tuple([pget])
else:
if deletable:
return pget, pset, pdel # Full house !
else:
return pget, pset
def property_maker(property_name, prefix="_", read_only=False, deletable=False):
pgsd = prop_getsetdel(property_name, prefix, read_only, deletable)
return property(*pgsd)
def classproperty_maker(property_name, prefix="_", read_only=False, deletable=False):
pgsd = prop_getsetdel(property_name, prefix, read_only, deletable)
return classproperty(*pgsd)
def props_as_dict(prop_names, prefix="_", read_only=False, deletable=False):
l = [(p, property_maker(p, prefix, read_only, deletable)) for p in prop_names]
return dict(l)
def classprops_as_dict(prop_names, prefix="_", read_only=False, deletable=False):
l = [(p, classproperty_maker(p, prefix, read_only, deletable)) for p in prop_names]
return dict(l)
def add_props_to_ns(property_list, prefix="_", read_only=False, deletable=False):
try:
frame = currentframe()
callers_ns = frame.f_back.f_locals
d = props_as_dict(property_list, prefix, read_only, deletable)
callers_ns.update(d)
finally:
del frame
return
def add_classprops_to_ns(property_list, prefix="_", read_only=False, deletable=False):
try:
frame = currentframe()
callers_ns = frame.f_back.f_locals
d = classprops_as_dict(property_list, prefix, read_only, deletable)
callers_ns.update(d)
finally:
del frame
return
# use within a class definition as:
# add_props_to_ns(["attr1", "attr2"])
# Decorate a class method to get a static method @property,
# if used to access a __private attribute it makes it immutable
class classproperty(property):
def __get__(self, cls, owner):
return classmethod(self.fget).__get__(None, owner)()
| [
"inspect.currentframe"
] | [((1908, 1922), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (1920, 1922), False, 'from inspect import currentframe\n'), ((2221, 2235), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (2233, 2235), False, 'from inspect import currentframe\n')] |
# coding: utf-8
"""
Sematext Cloud API
API Explorer provides access and documentation for Sematext REST API. The REST API requires the API Key to be sent as part of `Authorization` header. E.g.: `Authorization : apiKey e5f18450-205a-48eb-8589-7d49edaea813`. # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AlertNotification(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'app_name': 'str',
'app_type': 'str',
'back_to_normal': 'bool',
'create_time': 'str',
'sent': 'bool',
'text': 'str',
'when': 'str'
}
attribute_map = {
'app_name': 'appName',
'app_type': 'appType',
'back_to_normal': 'backToNormal',
'create_time': 'createTime',
'sent': 'sent',
'text': 'text',
'when': 'when'
}
def __init__(self, app_name=None, app_type=None, back_to_normal=None, create_time=None, sent=None, text=None, when=None): # noqa: E501
"""AlertNotification - a model defined in Swagger""" # noqa: E501
self._app_name = None
self._app_type = None
self._back_to_normal = None
self._create_time = None
self._sent = None
self._text = None
self._when = None
self.discriminator = None
if app_name is not None:
self.app_name = app_name
if app_type is not None:
self.app_type = app_type
if back_to_normal is not None:
self.back_to_normal = back_to_normal
if create_time is not None:
self.create_time = create_time
if sent is not None:
self.sent = sent
if text is not None:
self.text = text
if when is not None:
self.when = when
@property
def app_name(self):
"""Gets the app_name of this AlertNotification. # noqa: E501
:return: The app_name of this AlertNotification. # noqa: E501
:rtype: str
"""
return self._app_name
@app_name.setter
def app_name(self, app_name):
"""Sets the app_name of this AlertNotification.
:param app_name: The app_name of this AlertNotification. # noqa: E501
:type: str
"""
self._app_name = app_name
@property
def app_type(self):
"""Gets the app_type of this AlertNotification. # noqa: E501
:return: The app_type of this AlertNotification. # noqa: E501
:rtype: str
"""
return self._app_type
@app_type.setter
def app_type(self, app_type):
"""Sets the app_type of this AlertNotification.
:param app_type: The app_type of this AlertNotification. # noqa: E501
:type: str
"""
self._app_type = app_type
@property
def back_to_normal(self):
"""Gets the back_to_normal of this AlertNotification. # noqa: E501
:return: The back_to_normal of this AlertNotification. # noqa: E501
:rtype: bool
"""
return self._back_to_normal
@back_to_normal.setter
def back_to_normal(self, back_to_normal):
"""Sets the back_to_normal of this AlertNotification.
:param back_to_normal: The back_to_normal of this AlertNotification. # noqa: E501
:type: bool
"""
self._back_to_normal = back_to_normal
@property
def create_time(self):
"""Gets the create_time of this AlertNotification. # noqa: E501
:return: The create_time of this AlertNotification. # noqa: E501
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this AlertNotification.
:param create_time: The create_time of this AlertNotification. # noqa: E501
:type: str
"""
self._create_time = create_time
@property
def sent(self):
"""Gets the sent of this AlertNotification. # noqa: E501
:return: The sent of this AlertNotification. # noqa: E501
:rtype: bool
"""
return self._sent
@sent.setter
def sent(self, sent):
"""Sets the sent of this AlertNotification.
:param sent: The sent of this AlertNotification. # noqa: E501
:type: bool
"""
self._sent = sent
@property
def text(self):
"""Gets the text of this AlertNotification. # noqa: E501
:return: The text of this AlertNotification. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this AlertNotification.
:param text: The text of this AlertNotification. # noqa: E501
:type: str
"""
self._text = text
@property
def when(self):
"""Gets the when of this AlertNotification. # noqa: E501
:return: The when of this AlertNotification. # noqa: E501
:rtype: str
"""
return self._when
@when.setter
def when(self, when):
"""Sets the when of this AlertNotification.
:param when: The when of this AlertNotification. # noqa: E501
:type: str
"""
self._when = when
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlertNotification, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AlertNotification):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((5875, 5908), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (5888, 5908), False, 'import six\n')] |
"""
Root PlotPlayer Package contains all the data structures, modules and classes used by the
PlotPlayer project.
Public Modules:
* plotplayer - Contains the PlotPlayer interface and functionality; this is the most
common entry point for most usages
Subpackages:
* helpers - Contains various modules containing miscellaneous helper methods
* managers - Contains modules related to managing the plotplayer functionality
* validators - Contains modules related to input and type validation
"""
# Prevent usage of backends other than Tkinter
import matplotlib
matplotlib.use("TkAgg")
| [
"matplotlib.use"
] | [((576, 599), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (590, 599), False, 'import matplotlib\n')] |
from copy import copy
import json
import os
import re
from pprint import pprint as print
from typing import Union
import networkx as nx
import numpy as np
from dpath.util import get
from matplotlib import pyplot as plt
def md_to_dict(text, order, n=0, max_depth=100):
try:
maxh = max([x.count("#") for x in text.split("\n") if "-" not in x])
except:
pass
try:
minh = min(
[
x.count("#")
for x in text.split("\n")
if x.count("#") != 0 and "-" not in x
]
)
except:
minh = -1
if n == 0:
title = [x for x in text.split("\n") if "# " in x][0]
mindmap = {}
mindmap[title] = md_to_dict(
text.replace(
text[text.find(title) : text.find(title) + len(title) + 1], ""
).strip(),
order,
n + 1,
)
return mindmap
elif text != "" and n <= max_depth:
if "\n#" in text:
mindmap = {}
for branch in text.split("\n" + "#" * minh + " "):
if branch.split("\n")[0] not in order:
mindmap[
"#" * minh + " " + branch.split("\n")[0].replace("#", "")
] = md_to_dict("\n".join(branch.split("\n")[1:])[:-1], order, n + 1)
else:
mindmap[branch.split("\n")[0]] = md_to_dict(
"\n".join(branch.split("\n")[1:])[:-1], order, n + 1
)
return mindmap
else:
l = min([x.find("-") for x in text.split("\n")])
mindmap = {}
for branch in text.split("\n" + " " * l + "-"):
key = branch.split("\n")[0].strip()
if key[0] == "-":
key = key[1:].strip()
mindmap[key] = md_to_dict(
"\n".join(branch.split("\n")[1:]), order, n + 1
)
return mindmap
else:
return {}
def get_maxh(text):
n = 1
while n < 7:
if "#" * n not in text:
return n - 1
break
n += 1
def gen_order(text):
return [x.strip() for x in text.split("\n") if x.strip() != ""]
def get_config(fpath: str) -> dict:
with open(fpath, "r") as f:
data = [("a" + x).strip()[1:] for x in f.read().split("<!-- $$ -->\n")]
return dict(zip(["start", "text", "line", "end"], data))
def find_path(dict_obj, key, order):
idx = order.index(key)
prevkey = None
if idx == 0:
if key[0] == "-":
key = key[1:].strip()
return [key]
keys = list(dict_obj.keys())
if key[0] == "-":
tmpkey = key[1:].strip()
else:
tmpkey = key
if tmpkey in keys:
return [key]
for org in order[:idx][::-1]:
if len(org) > 0 and org[0] == "-":
el = org[1:].strip()
else:
el = org
if el in keys:
if el == key:
return [el]
else:
return [el] + find_path(
dict_obj[el], key, order[order.index(org) + 1 :]
)
else:
raise KeyError(key)
def gen_coords(dic: dict, order: Union[list, np.array], maxh=3):
coords = {}
y = 0
for n, line in enumerate(order):
hn = 0
if len(line) > 0 and line.replace("-", "").strip()[0] == "#":
hn = line.count("#")
text = line.strip()
if text[0] == "-":
text = text[1:].strip()[hn:].strip()
else:
text = text[hn:].strip()
if len(text) > 0 and text[0] == "-":
text = text[1:].strip()
path = find_path(dic, line, order)
if hn != 0:
y = y + 20 + int(7.5 * (maxh - line.count("#") + 1))
else:
y = y + 25
line += "@#$" + str(n)
coords[line] = {
"text": text,
"path": path,
"type": str(hn),
"x": str((len(path) - 1) * 100 + 10),
"y": str(y),
}
return coords
def get_srtiped_ppath(uksorder, value, parent):
if len(value) > 0 and value[0] == "-":
value = value[1:].strip()
if len(parent) > 0 and parent[0] == "-":
parent = parent[1:].strip()
idx = uksorder.index(value)
for val in uksorder[:idx][::-1]:
if parent == val.split("@#$")[0][-len(parent) :]:
return val
else:
return uksorder[0]
def get_ppath(ukorder, uksorder, value, parent):
if len(value) > 0 and value[0] == "-":
value = value[1:].strip()
if len(parent) > 0 and parent[0] == "-":
parent = parent[1:].strip()
idx = uksorder.index(value)
for val in ukorder[:idx][::-1]:
if parent in val.split("@#$")[0][-len(parent) :]:
print(parent + " | " + val)
return val
else:
return ukorder[0]
def dict_to_mindmap(
dic: dict, order: Union[list, np.array], name=None, config=None, n=0, maxh=3
):
if config == None:
config = get_config("base.html")
elif type(config) == str:
config = get_config(config)
coords = gen_coords(dic, order, maxh=maxh)
html = (
config["start"]
.replace("$1", coords[order[0] + "@#$0"]["text"])
.replace(
"$2", str(int(coords[order[-1] + "@#$" + str(len(order) - 1)]["y"]) + 100)
)
.replace("$3", "1920")
)
stripedorder = [x[1:].strip() if x[0] == "-" else x for x in order]
unic_key_order = [x + "@#$" + str(n) for n, x in enumerate(order)]
unic_key_striped_order = [x + "@#$" + str(n) for n, x in enumerate(stripedorder)]
for n, org in enumerate(order):
post = "@#$" + str(n)
org += post
coordsval = coords[org]
if len(coords[org]["path"]) == 0:
ppath = order[0]
elif len(coords[org]["path"]) < 2:
ppath = 0
ppath = coords[org]["path"][ppath]
else:
ppath = -2
ppath = coords[org]["path"][ppath]
if len(org) > 0 and org[0] == "-":
el = org[1:].strip()
else:
el = org
html += "\n" + config["text"].replace("$1", coords[org]["x"]).replace(
"$2", coords[org]["y"]
).replace("$3", coords[org]["type"]).replace("$4", coords[org]["text"])
html += "\n" + config["line"].replace(
"$1", str(int(coords[org]["x"]) - 50)
).replace("$2", str(int(coords[org]["y"]) - 7)).replace(
"$3", str(int(coords[org]["x"]) - 10)
).replace(
"$4", str(int(coords[org]["y"]) - 7)
).replace(
"$5",
get_l_type(
coords[
order[
unic_key_striped_order.index(
get_srtiped_ppath(unic_key_striped_order, org, ppath)
)
]
+ "@#$"
+ str(
unic_key_striped_order.index(
get_srtiped_ppath(unic_key_striped_order, org, ppath)
)
)
]["type"]
),
)
norg = order[(n + 1) % len(order)] + "@#$" + str((n + 1) % len(order))
if len(coords[org]["path"]) > len(coords[norg]["path"]):
html += "\n" + config["line"].replace(
"$1", str(int(coords[org]["x"]) - 50)
).replace(
"$2",
str(
int(int(
coords[
order[
unic_key_striped_order.index(
get_srtiped_ppath(
unic_key_striped_order, org, ppath
)
)
]
+ "@#$"
+ str(
unic_key_striped_order.index(
get_srtiped_ppath(
unic_key_striped_order, org, ppath
)
)
)
]["y"]
)
+ (
int(
coords[
get_ppath(
unic_key_order, unic_key_striped_order, org, ppath
)
]["type"]
)
!= 0
)*(
maxh
- int(
coords[
get_ppath(
unic_key_order, unic_key_striped_order, org, ppath
)
]["type"]
)
)
% maxh * 2)+(
int(
coords[
get_ppath(
unic_key_order, unic_key_striped_order, org, ppath
)
]["type"]
)
== 0
)*(3)
),
).replace(
"$3", str(int(coords[org]["x"]) - 50)
).replace(
"$4", str(int(coords[org]["y"]) - 7)
).replace(
"$5",
get_l_type(
coords[
order[
unic_key_striped_order.index(
get_srtiped_ppath(unic_key_striped_order, org, ppath)
)
]
+ "@#$"
+ str(
unic_key_striped_order.index(
get_srtiped_ppath(unic_key_striped_order, org, ppath)
)
)
]["type"]
),
)
return html + "\n" + config["end"]
def get_l_type(type):
type = int(type)
if type == 0:
return "9"
else:
return str(16 - type)
if __name__ == "__main__":
file = "test.md"
with open(file, "r") as f:
text = f.read()
order = gen_order(text)
dic = md_to_dict("\n".join([x for x in text.split("\n") if x != ""]), order)
maxh = get_maxh(text)
html = dict_to_mindmap(dic, order, maxh=maxh)
with open(file[:-3] + ".json", "w") as f:
json.dump(dic, f, indent=1)
with open(file[:-3] + ".html", "w") as f:
f.write(html)
| [
"pprint.pprint",
"json.dump"
] | [((10819, 10846), 'json.dump', 'json.dump', (['dic', 'f'], {'indent': '(1)'}), '(dic, f, indent=1)\n', (10828, 10846), False, 'import json\n'), ((4858, 4885), 'pprint.pprint', 'print', (["(parent + ' | ' + val)"], {}), "(parent + ' | ' + val)\n", (4863, 4885), True, 'from pprint import pprint as print\n')] |
"""
This script contains several car-following control models for flow-controlled
vehicles.
Controllers can have their output delayed by some duration. Each controller
includes functions
get_accel(self, env) -> acc
- using the current state of the world and existing parameters,
uses the control model to return a vehicle acceleration.
reset_delay(self) -> None
- clears the queue of acceleration outputs used to generate
delayed output. used when the experiment is reset to clear out
old actions based on old states.
"""
import random
import math
from flow.controllers.base_controller import BaseController
import collections
import numpy as np
class CFMController(BaseController):
def __init__(self, veh_id, k_d=1, k_v=1, k_c=1, d_des=1, v_des=8,
accel_max=20, decel_max=-5, tau=0.5, dt=0.1, noise=0):
"""
Instantiates a CFM controller
Attributes
----------
veh_id: str
Vehicle ID for SUMO identification
k_d: float
headway gain (default: 1)
k_v: float, optional
gain on difference between lead velocity and current (default: 1)
k_c: float, optional
gain on difference from desired velocity to current (default: 1)
d_des: float, optional
desired headway (default: 1)
v_des: float, optional
desired velocity (default: 8)
accel_max: float
max acceleration (default: 20)
decel_max: float
max deceleration (default: -5)
tau: float, optional
time delay (default: 0)
dt: float, optional
timestep (default: 0.1)
noise: float, optional
std dev of normal perturbation to the acceleration (default: 0)
"""
controller_params = {"delay": tau/dt, "max_deaccel": decel_max,
"noise": noise}
BaseController.__init__(self, veh_id, controller_params)
self.veh_id = veh_id
self.k_d = k_d
self.k_v = k_v
self.k_c = k_c
self.d_des = d_des
self.v_des = v_des
self.accel_max = accel_max
self.accel_queue = collections.deque()
def get_accel(self, env):
lead_id = env.vehicles.get_leader(self.veh_id)
if not lead_id: # no car ahead
return self.accel_max
lead_vel = env.vehicles.get_speed(lead_id)
this_vel = env.vehicles.get_speed(self.veh_id)
d_l = env.vehicles.get_headway(self.veh_id)
acc = self.k_d*(d_l - self.d_des) + self.k_v*(lead_vel - this_vel) + \
self.k_c*(self.v_des - this_vel)
while len(self.accel_queue) <= self.delay:
# Some behavior here for initial states - extrapolation, dumb
# filling (currently), etc
self.accel_queue.appendleft(acc)
return min(self.accel_queue.pop(), self.accel_max)
def reset_delay(self, env):
self.accel_queue.clear()
class BCMController(BaseController):
def __init__(self, veh_id, k_d=1, k_v=1, k_c=1, d_des=1, v_des=8,
accel_max=15, decel_max=-5, tau=0.5, dt=0.1, noise=0):
"""
Instantiates a Bilateral car-following model controller. Looks ahead
and behind.
Attributes
----------
veh_id: str
Vehicle ID for SUMO identification
k_d: float, optional
gain on distances to lead/following cars (default: 1)
k_v: float, optional
gain on vehicle velocity differences (default: 1)
k_c: float, optional
gain on difference from desired velocity to current (default: 1)
d_des: float, optional
desired headway (default: 1)
v_des: float, optional
desired velocity (default: 8)
accel_max: float, optional
max acceleration (default: 15)
decel_max: float
max deceleration (default: -5)
tau: float, optional
time delay (default: 0.5)
dt: float, optional
timestep (default: 0.1)
noise: float, optional
std dev of normal perturbation to the acceleration (default: 0)
"""
controller_params = {"delay": tau / dt, "max_deaccel": decel_max,
"noise": noise}
BaseController.__init__(self, veh_id, controller_params)
self.veh_id = veh_id
self.k_d = k_d
self.k_v = k_v
self.k_c = k_c
self.d_des = d_des
self.v_des = v_des
self.accel_max = accel_max
self.accel_queue = collections.deque()
def get_accel(self, env):
"""
From the paper:
There would also be additional control rules that take
into account minimum safe separation, relative speeds,
speed limits, weather and lighting conditions, traffic density
and traffic advisories
"""
lead_id = env.vehicles.get_leader(self.veh_id)
if not lead_id: # no car ahead
return self.accel_max
lead_vel = env.vehicles.get_speed(lead_id)
this_vel = env.vehicles.get_speed(self.veh_id)
trail_id = env.vehicles.get_follower(self.veh_id)
trail_vel = env.vehicles.get_speed(trail_id)
headway = env.vehicles.get_headway(self.veh_id)
footway = env.vehicles.get_headway(trail_id)
acc = self.k_d * (headway - footway) + \
self.k_v * ((lead_vel - this_vel) - (this_vel - trail_vel)) + \
self.k_c * (self.v_des - this_vel)
while len(self.accel_queue) <= self.delay:
# Some behavior here for initial states - extrapolation, dumb
# filling (currently), etc
self.accel_queue.appendleft(acc)
return min(self.accel_queue.pop(), self.accel_max)
def reset_delay(self, env):
self.accel_queue.clear()
class OVMController(BaseController):
def __init__(self, veh_id, alpha=1, beta=1, h_st=2, h_go=15, v_max=30,
accel_max=15, decel_max=-5, tau=0.5, dt=0.1, noise=0):
"""
Instantiates an Optimal Vehicle Model controller.
Attributes
----------
veh_id: str
Vehicle ID for SUMO identification
alpha: float, optional
gain on desired velocity to current velocity difference
(default: 0.6)
beta: float, optional
gain on lead car velocity and self velocity difference
(default: 0.9)
h_st: float, optional
headway for stopping (default: 5)
h_go: float, optional
headway for full speed (default: 35)
v_max: float, optional
max velocity (default: 30)
accel_max: float, optional
max acceleration (default: 15)
decel_max: float, optional
max deceleration (default: -5)
tau: float, optional
time delay (default: 0.5)
dt: float, optional
timestep (default: 0.1)
noise: float, optional
std dev of normal perturbation to the acceleration (default: 0)
"""
controller_params = {"delay": tau/dt, "max_deaccel": decel_max,
"noise": noise}
BaseController.__init__(self, veh_id, controller_params)
self.accel_queue = collections.deque()
self.decel_max = decel_max
self.accel_max = accel_max
self.veh_id = veh_id
self.v_max = v_max
self.alpha = alpha
self.beta = beta
self.h_st = h_st
self.h_go = h_go
self.tau = tau
self.dt = dt
def get_accel(self, env):
lead_id = env.vehicles.get_leader(self.veh_id)
if not lead_id: # no car ahead
return self.accel_max
lead_vel = env.vehicles.get_speed(lead_id)
this_vel = env.vehicles.get_speed(self.veh_id)
h = env.vehicles.get_headway(self.veh_id)
h_dot = lead_vel - this_vel
# V function here - input: h, output : Vh
if h <= self.h_st:
Vh = 0
elif self.h_st < h < self.h_go:
Vh = self.v_max / 2 * (1 - math.cos(math.pi * (h - self.h_st) /
(self.h_go - self.h_st)))
else:
Vh = self.v_max
acc = self.alpha*(Vh - this_vel) + self.beta*(h_dot)
while len(self.accel_queue) <= self.delay:
# Some behavior here for initial states - extrapolation, dumb
# filling (currently), etc
self.accel_queue.appendleft(acc)
return max(min(self.accel_queue.pop(), self.accel_max),
-1 * abs(self.decel_max))
def reset_delay(self, env):
self.accel_queue.clear()
class LinearOVM(BaseController):
def __init__(self, veh_id, v_max=30, accel_max=15, decel_max=-5,
adaptation=0.65, h_st=5, tau=0.5, dt=0.1, noise=0):
"""
Instantiates a Linear OVM controller
Attributes
----------
veh_id: str
Vehicle ID for SUMO identification
v_max: float, optional
max velocity (default: 30)
accel_max: float, optional
max acceleration (default: 15)
decel_max: float, optional
max deceleration (default: -5)
adaptation: float
adaptation constant (default: 0.65)
h_st: float, optional
headway for stopping (default: 5)
tau: float, optional
time delay (default: 0.5)
dt: float, optional
timestep (default: 0.1)
noise: float, optional
std dev of normal perturbation to the acceleration (default: 0)
"""
controller_params = {"delay": tau / dt, "max_deaccel": decel_max,
"noise": noise}
BaseController.__init__(self, veh_id, controller_params)
self.accel_queue = collections.deque()
self.decel_max = decel_max
self.acc_max = accel_max
self.veh_id = veh_id
# 4.8*1.85 for case I, 3.8*1.85 for case II, per Nakayama
self.v_max = v_max
# TAU in Traffic Flow Dynamics textbook
self.adaptation = adaptation
self.h_st = h_st
self.delay_time = tau
self.dt = dt
def get_accel(self, env):
this_vel = env.vehicles.get_speed(self.veh_id)
h = env.vehicles.get_headway(self.veh_id)
# V function here - input: h, output : Vh
alpha = 1.689 # the average value from Nakayama paper
if h < self.h_st:
Vh = 0
elif self.h_st <= h <= self.h_st + self.v_max/alpha:
Vh = alpha * (h - self.h_st)
else:
Vh = self.v_max
acc = (Vh - this_vel) / self.adaptation
while len(self.accel_queue) <= self.delay:
# Some behavior here for initial states - extrapolation, dumb
# filling (currently), etc
self.accel_queue.appendleft(acc)
return max(min(self.accel_queue.pop(), self.acc_max),
-1 * abs(self.decel_max))
def reset_delay(self, env):
self.accel_queue.clear()
class IDMController(BaseController):
def __init__(self, veh_id, v0=30, T=1, a=1, b=1.5, delta=4, s0=2, s1=0,
decel_max=-5, dt=0.1, noise=0):
"""
Instantiates an Intelligent Driver Model (IDM) controller
Attributes
----------
veh_id: str
Vehicle ID for SUMO identification
v0: float, optional
desirable velocity, in m/s (default: 30)
T: float, optional
safe time headway, in s (default: 1)
a: float, optional
maximum acceleration, in m/s2 (default: 1)
b: float, optional
comfortable deceleration, in m/s2 (default: 1.5)
delta: float, optional
acceleration exponent (default: 4)
s0: float, optional
linear jam distance, in m (default: 2)
s1: float, optional
nonlinear jam distance, in m (default: 0)
decel_max: float, optional
max deceleration, in m/s2 (default: -5)
dt: float, optional
timestep, in s (default: 0.1)
noise: float, optional
std dev of normal perturbation to the acceleration (default: 0)
"""
tau = T # the time delay is taken to be the safe time headway
controller_params = {"delay": tau / dt, "max_deaccel": decel_max,
"noise": noise}
BaseController.__init__(self, veh_id, controller_params)
self.v0 = v0
self.T = T
self.a = a
self.b = b
self.delta = delta
self.s0 = s0
self.s1 = s1
self.max_deaccel = decel_max
self.dt = dt
def get_accel(self, env):
this_vel = env.vehicles.get_speed(self.veh_id)
lead_id = env.vehicles.get_leader(self.veh_id)
h = env.vehicles.get_headway(self.veh_id)
# negative headways may be registered by sumo at intersections/junctions
# setting them to 0 causes vehicles to not move; therefore, we maintain
# these negative headways to let sumo control the dynamics as it sees
# fit at these points
if abs(h) < 1e-3:
h = 1e-3
if lead_id is None or lead_id == '': # no car ahead
s_star = 0
else:
lead_vel = env.vehicles.get_speed(lead_id)
s_star = \
self.s0 + max([0, this_vel*self.T + this_vel*(this_vel-lead_vel)
/ (2 * np.sqrt(self.a * self.b))])
return self.a * (1 - (this_vel/self.v0)**self.delta - (s_star/h)**2)
def reset_delay(self, env):
pass
class RandomController(BaseController):
def __init__(self, veh_id, v0=30, T=1, a=1, b=1.5, delta=4, s0=2, s1=0,
decel_max=-5, dt=0.1, noise=0):
"""
Instantiates an Intelligent Driver Model (IDM) controller
Attributes
----------
veh_id: str
Vehicle ID for SUMO identification
v0: float, optional
desirable velocity, in m/s (default: 30)
T: float, optional
safe time headway, in s (default: 1)
a: float, optional
maximum acceleration, in m/s2 (default: 1)
b: float, optional
comfortable deceleration, in m/s2 (default: 1.5)
delta: float, optional
acceleration exponent (default: 4)
s0: float, optional
linear jam distance, in m (default: 2)
s1: float, optional
nonlinear jam distance, in m (default: 0)
decel_max: float, optional
max deceleration, in m/s2 (default: -5)
dt: float, optional
timestep, in s (default: 0.1)
noise: float, optional
std dev of normal perturbation to the acceleration (default: 0)
"""
tau = T # the time delay is taken to be the safe time headway
controller_params = {"delay": tau / dt, "max_deaccel": decel_max,
"noise": noise}
BaseController.__init__(self, veh_id, controller_params)
self.v0 = v0
self.T = T
self.a = a
self.b = b
self.delta = delta
self.s0 = s0
self.s1 = s1
self.max_deaccel = decel_max
self.dt = dt
def get_accel(self, env):
return np.clip(np.random.normal(self.a/2, 2*np.abs(self.a)), self.max_deaccel, self.a)
#return np.random.uniform(self.max_deaccel, self.a)
def reset_delay(self, env):
pass
| [
"numpy.abs",
"flow.controllers.base_controller.BaseController.__init__",
"collections.deque",
"numpy.sqrt",
"math.cos"
] | [((1966, 2022), 'flow.controllers.base_controller.BaseController.__init__', 'BaseController.__init__', (['self', 'veh_id', 'controller_params'], {}), '(self, veh_id, controller_params)\n', (1989, 2022), False, 'from flow.controllers.base_controller import BaseController\n'), ((2237, 2256), 'collections.deque', 'collections.deque', ([], {}), '()\n', (2254, 2256), False, 'import collections\n'), ((4403, 4459), 'flow.controllers.base_controller.BaseController.__init__', 'BaseController.__init__', (['self', 'veh_id', 'controller_params'], {}), '(self, veh_id, controller_params)\n', (4426, 4459), False, 'from flow.controllers.base_controller import BaseController\n'), ((4674, 4693), 'collections.deque', 'collections.deque', ([], {}), '()\n', (4691, 4693), False, 'import collections\n'), ((7337, 7393), 'flow.controllers.base_controller.BaseController.__init__', 'BaseController.__init__', (['self', 'veh_id', 'controller_params'], {}), '(self, veh_id, controller_params)\n', (7360, 7393), False, 'from flow.controllers.base_controller import BaseController\n'), ((7421, 7440), 'collections.deque', 'collections.deque', ([], {}), '()\n', (7438, 7440), False, 'import collections\n'), ((9941, 9997), 'flow.controllers.base_controller.BaseController.__init__', 'BaseController.__init__', (['self', 'veh_id', 'controller_params'], {}), '(self, veh_id, controller_params)\n', (9964, 9997), False, 'from flow.controllers.base_controller import BaseController\n'), ((10025, 10044), 'collections.deque', 'collections.deque', ([], {}), '()\n', (10042, 10044), False, 'import collections\n'), ((12657, 12713), 'flow.controllers.base_controller.BaseController.__init__', 'BaseController.__init__', (['self', 'veh_id', 'controller_params'], {}), '(self, veh_id, controller_params)\n', (12680, 12713), False, 'from flow.controllers.base_controller import BaseController\n'), ((15265, 15321), 'flow.controllers.base_controller.BaseController.__init__', 'BaseController.__init__', (['self', 'veh_id', 'controller_params'], {}), '(self, veh_id, controller_params)\n', (15288, 15321), False, 'from flow.controllers.base_controller import BaseController\n'), ((15610, 15624), 'numpy.abs', 'np.abs', (['self.a'], {}), '(self.a)\n', (15616, 15624), True, 'import numpy as np\n'), ((8250, 8311), 'math.cos', 'math.cos', (['(math.pi * (h - self.h_st) / (self.h_go - self.h_st))'], {}), '(math.pi * (h - self.h_st) / (self.h_go - self.h_st))\n', (8258, 8311), False, 'import math\n'), ((13723, 13747), 'numpy.sqrt', 'np.sqrt', (['(self.a * self.b)'], {}), '(self.a * self.b)\n', (13730, 13747), True, 'import numpy as np\n')] |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.urls import reverse_lazy
from django.views import View
from django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView
from bookstore.news.forms import ArticleForm, ArticleCommentForm
from .models import ArticleComment
from .signals import *
from django.db.models import signals
class ListArticlesView(ListView):
template_name = 'articles/news.html'
context_object_name = 'articles'
model = Article
paginate_by = 12
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['articles'] = Article.objects.order_by('-date_posted')
return context
class AddArticleView(LoginRequiredMixin, CreateView):
model = Article
form_class = ArticleForm
success_url = reverse_lazy('news')
template_name = 'articles/add_article.html'
def post(self, request, *args, **kwargs):
form = ArticleForm(request.POST, request.FILES)
if form.is_valid():
signals.pre_save.disconnect(receiver=delete_old_image_on_article_change, sender=Article)
article = form.save(commit=False)
article.user = self.request.user
article.save()
signals.pre_save.connect(receiver=delete_old_image_on_article_change, sender=Article)
return redirect('news')
else:
return render(request, 'articles/add_article.html', {'form': form})
class ArticleDetailsView(DetailView):
model = Article
template_name = 'articles/article_details.html'
context_object_name = 'article'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
article = context['article']
is_owner = article.user == self.request.user
context['form'] = ArticleCommentForm()
context['comments'] = article.articlecomment_set.all().order_by('date_posted')
context['comments_count'] = article.articlecomment_set.count()
context['is_owner'] = is_owner
return context
class EditArticleView(LoginRequiredMixin, UpdateView):
model = Article
form_class = ArticleForm
success_url = reverse_lazy('news')
template_name = 'articles/edit_article.html'
class DeleteArticleView(LoginRequiredMixin, DeleteView):
def get(self, request, *args, **kwargs):
article = Article.objects.get(pk=self.kwargs['pk'])
article.delete()
return redirect('news')
class CommentArticleView(LoginRequiredMixin, View):
form_class = ArticleCommentForm
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
article = Article.objects.get(pk=self.kwargs['pk'])
comment = ArticleComment(
text=form.cleaned_data['text'],
article=article,
user=self.request.user,
)
comment.save()
return redirect('article details', article.id)
return redirect('article details', self.kwargs['pk'])
class DeleteArticleCommentView(LoginRequiredMixin, DeleteView):
def get(self, request, *args, **kwargs):
comment = ArticleComment.objects.get(pk=self.kwargs['cpk'])
comment.delete()
return redirect('article details', self.kwargs['apk'])
| [
"django.shortcuts.render",
"bookstore.news.forms.ArticleForm",
"django.shortcuts.redirect",
"django.db.models.signals.pre_save.connect",
"bookstore.news.forms.ArticleCommentForm",
"django.urls.reverse_lazy",
"django.db.models.signals.pre_save.disconnect"
] | [((891, 911), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""news"""'], {}), "('news')\n", (903, 911), False, 'from django.urls import reverse_lazy\n'), ((2266, 2286), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""news"""'], {}), "('news')\n", (2278, 2286), False, 'from django.urls import reverse_lazy\n'), ((1022, 1062), 'bookstore.news.forms.ArticleForm', 'ArticleForm', (['request.POST', 'request.FILES'], {}), '(request.POST, request.FILES)\n', (1033, 1062), False, 'from bookstore.news.forms import ArticleForm, ArticleCommentForm\n'), ((1900, 1920), 'bookstore.news.forms.ArticleCommentForm', 'ArticleCommentForm', ([], {}), '()\n', (1918, 1920), False, 'from bookstore.news.forms import ArticleForm, ArticleCommentForm\n'), ((2541, 2557), 'django.shortcuts.redirect', 'redirect', (['"""news"""'], {}), "('news')\n", (2549, 2557), False, 'from django.shortcuts import redirect, render\n'), ((3108, 3154), 'django.shortcuts.redirect', 'redirect', (['"""article details"""', "self.kwargs['pk']"], {}), "('article details', self.kwargs['pk'])\n", (3116, 3154), False, 'from django.shortcuts import redirect, render\n'), ((3375, 3422), 'django.shortcuts.redirect', 'redirect', (['"""article details"""', "self.kwargs['apk']"], {}), "('article details', self.kwargs['apk'])\n", (3383, 3422), False, 'from django.shortcuts import redirect, render\n'), ((1103, 1195), 'django.db.models.signals.pre_save.disconnect', 'signals.pre_save.disconnect', ([], {'receiver': 'delete_old_image_on_article_change', 'sender': 'Article'}), '(receiver=delete_old_image_on_article_change,\n sender=Article)\n', (1130, 1195), False, 'from django.db.models import signals\n'), ((1322, 1411), 'django.db.models.signals.pre_save.connect', 'signals.pre_save.connect', ([], {'receiver': 'delete_old_image_on_article_change', 'sender': 'Article'}), '(receiver=delete_old_image_on_article_change,\n sender=Article)\n', (1346, 1411), False, 'from django.db.models import signals\n'), ((1427, 1443), 'django.shortcuts.redirect', 'redirect', (['"""news"""'], {}), "('news')\n", (1435, 1443), False, 'from django.shortcuts import redirect, render\n'), ((1477, 1537), 'django.shortcuts.render', 'render', (['request', '"""articles/add_article.html"""', "{'form': form}"], {}), "(request, 'articles/add_article.html', {'form': form})\n", (1483, 1537), False, 'from django.shortcuts import redirect, render\n'), ((3052, 3091), 'django.shortcuts.redirect', 'redirect', (['"""article details"""', 'article.id'], {}), "('article details', article.id)\n", (3060, 3091), False, 'from django.shortcuts import redirect, render\n')] |
"""
Unchanged from https://github.com/hughsalimbeni/DGPs_with_IWVI/blob/master/tests/test_gp_layer.py
"""
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.FATAL)
import gpflow
from dgps_with_iwvi.layers import GPLayer
from dgps_with_iwvi.models import DGP_VI
def test_gp_layer():
N = 10001
M = 100
Dy = 1
np.random.seed(0)
X = np.linspace(0, 1, N).reshape(-1, 1)
Z = np.linspace(0, 1, M).reshape(-1, 1)
Xs = np.linspace(0, 1, N - 1).reshape(-1, 1)
Y = np.concatenate([np.sin(10 * X), np.cos(10 * X)], 1)[:, 0:1]
kern = gpflow.kernels.Matern52(1, lengthscales=0.1)
mean_function = gpflow.mean_functions.Linear(A=np.random.randn(1, Dy))
lik = gpflow.likelihoods.Gaussian(variance=1e-1)
m_vgp = gpflow.models.SVGP(X, Y, kern, lik, Z=Z, mean_function=mean_function)
q_mu = np.random.randn(M, Dy)
q_sqrt = np.random.randn(Dy, M, M)
m_vgp.q_mu = q_mu
m_vgp.q_sqrt = q_sqrt
m1, v1 = m_vgp.predict_f_full_cov(Xs)
L1 = m_vgp.compute_log_likelihood()
m_dgp = DGP_VI(X, Y, [GPLayer(kern, Z, Dy, mean_function)], lik, num_samples=1)
m_dgp.layers[0].q_mu = q_mu
m_dgp.layers[0].q_sqrt = q_sqrt
m2, v2 = m_dgp.predict_f_full_cov(Xs)
L2 = m_dgp.compute_log_likelihood()
np.testing.assert_allclose(L1, L2)
np.testing.assert_allclose(m1, m2)
np.testing.assert_allclose(v1, v2)
def test_dgp_zero_inner_layers():
N = 10
Dy = 2
X = np.linspace(0, 1, N).reshape(-1, 1)
Xs = np.linspace(0, 1, N - 1).reshape(-1, 1)
Y = np.concatenate([np.sin(10 * X), np.cos(10 * X)], 1)
kern = gpflow.kernels.Matern52(1, lengthscales=0.1)
mean_function = gpflow.mean_functions.Linear(A=np.random.randn(1, 2))
lik = gpflow.likelihoods.Gaussian(variance=1e-1)
m_vgp = gpflow.models.SVGP(X, Y, kern, lik, Z=X, mean_function=mean_function)
q_mu = np.random.randn(N, Dy)
q_sqrt = np.random.randn(Dy, N, N)
m_vgp.q_mu = q_mu
m_vgp.q_sqrt = q_sqrt
m1, v1 = m_vgp.predict_f_full_cov(Xs)
custom_config = gpflow.settings.get_settings()
custom_config.numerics.jitter_level = 1e-18
with gpflow.settings.temp_settings(custom_config):
m_dgp = DGP_VI(
X,
Y,
[
GPLayer(
gpflow.kernels.RBF(1, variance=1e-6), X, 1, gpflow.mean_functions.Identity()
),
GPLayer(kern, X, Dy, mean_function),
],
lik,
)
m_dgp.layers[-1].q_mu = q_mu
m_dgp.layers[-1].q_sqrt = q_sqrt
m_dgp.layers[0].q_sqrt = m_dgp.layers[0].q_sqrt.read_value() * 1e-12
m2, v2 = m_dgp.predict_f_full_cov(Xs)
np.testing.assert_allclose(m1, m2, atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(v1, v2, atol=1e-5, rtol=1e-5)
| [
"gpflow.kernels.Matern52",
"gpflow.settings.get_settings",
"numpy.testing.assert_allclose",
"tensorflow.logging.set_verbosity",
"gpflow.settings.temp_settings",
"numpy.linspace",
"numpy.random.randn",
"gpflow.models.SVGP",
"numpy.random.seed",
"numpy.cos",
"gpflow.kernels.RBF",
"numpy.sin",
... | [((152, 194), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.FATAL'], {}), '(tf.logging.FATAL)\n', (176, 194), True, 'import tensorflow as tf\n'), ((359, 376), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (373, 376), True, 'import numpy as np\n'), ((596, 640), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', (['(1)'], {'lengthscales': '(0.1)'}), '(1, lengthscales=0.1)\n', (619, 640), False, 'import gpflow\n'), ((726, 767), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {'variance': '(0.1)'}), '(variance=0.1)\n', (753, 767), False, 'import gpflow\n'), ((782, 851), 'gpflow.models.SVGP', 'gpflow.models.SVGP', (['X', 'Y', 'kern', 'lik'], {'Z': 'Z', 'mean_function': 'mean_function'}), '(X, Y, kern, lik, Z=Z, mean_function=mean_function)\n', (800, 851), False, 'import gpflow\n'), ((864, 886), 'numpy.random.randn', 'np.random.randn', (['M', 'Dy'], {}), '(M, Dy)\n', (879, 886), True, 'import numpy as np\n'), ((900, 925), 'numpy.random.randn', 'np.random.randn', (['Dy', 'M', 'M'], {}), '(Dy, M, M)\n', (915, 925), True, 'import numpy as np\n'), ((1300, 1334), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['L1', 'L2'], {}), '(L1, L2)\n', (1326, 1334), True, 'import numpy as np\n'), ((1339, 1373), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['m1', 'm2'], {}), '(m1, m2)\n', (1365, 1373), True, 'import numpy as np\n'), ((1378, 1412), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['v1', 'v2'], {}), '(v1, v2)\n', (1404, 1412), True, 'import numpy as np\n'), ((1638, 1682), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', (['(1)'], {'lengthscales': '(0.1)'}), '(1, lengthscales=0.1)\n', (1661, 1682), False, 'import gpflow\n'), ((1767, 1808), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {'variance': '(0.1)'}), '(variance=0.1)\n', (1794, 1808), False, 'import gpflow\n'), ((1823, 1892), 'gpflow.models.SVGP', 'gpflow.models.SVGP', (['X', 'Y', 'kern', 'lik'], {'Z': 'X', 'mean_function': 'mean_function'}), '(X, Y, kern, lik, Z=X, mean_function=mean_function)\n', (1841, 1892), False, 'import gpflow\n'), ((1905, 1927), 'numpy.random.randn', 'np.random.randn', (['N', 'Dy'], {}), '(N, Dy)\n', (1920, 1927), True, 'import numpy as np\n'), ((1941, 1966), 'numpy.random.randn', 'np.random.randn', (['Dy', 'N', 'N'], {}), '(Dy, N, N)\n', (1956, 1966), True, 'import numpy as np\n'), ((2080, 2110), 'gpflow.settings.get_settings', 'gpflow.settings.get_settings', ([], {}), '()\n', (2108, 2110), False, 'import gpflow\n'), ((2168, 2212), 'gpflow.settings.temp_settings', 'gpflow.settings.temp_settings', (['custom_config'], {}), '(custom_config)\n', (2197, 2212), False, 'import gpflow\n'), ((2731, 2789), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['m1', 'm2'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(m1, m2, atol=1e-05, rtol=1e-05)\n', (2757, 2789), True, 'import numpy as np\n'), ((2796, 2854), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['v1', 'v2'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(v1, v2, atol=1e-05, rtol=1e-05)\n', (2822, 2854), True, 'import numpy as np\n'), ((386, 406), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (397, 406), True, 'import numpy as np\n'), ((430, 450), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'M'], {}), '(0, 1, M)\n', (441, 450), True, 'import numpy as np\n'), ((475, 499), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(N - 1)'], {}), '(0, 1, N - 1)\n', (486, 499), True, 'import numpy as np\n'), ((692, 714), 'numpy.random.randn', 'np.random.randn', (['(1)', 'Dy'], {}), '(1, Dy)\n', (707, 714), True, 'import numpy as np\n'), ((1085, 1120), 'dgps_with_iwvi.layers.GPLayer', 'GPLayer', (['kern', 'Z', 'Dy', 'mean_function'], {}), '(kern, Z, Dy, mean_function)\n', (1092, 1120), False, 'from dgps_with_iwvi.layers import GPLayer\n'), ((1480, 1500), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1491, 1500), True, 'import numpy as np\n'), ((1525, 1549), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(N - 1)'], {}), '(0, 1, N - 1)\n', (1536, 1549), True, 'import numpy as np\n'), ((1590, 1604), 'numpy.sin', 'np.sin', (['(10 * X)'], {}), '(10 * X)\n', (1596, 1604), True, 'import numpy as np\n'), ((1606, 1620), 'numpy.cos', 'np.cos', (['(10 * X)'], {}), '(10 * X)\n', (1612, 1620), True, 'import numpy as np\n'), ((1734, 1755), 'numpy.random.randn', 'np.random.randn', (['(1)', '(2)'], {}), '(1, 2)\n', (1749, 1755), True, 'import numpy as np\n'), ((540, 554), 'numpy.sin', 'np.sin', (['(10 * X)'], {}), '(10 * X)\n', (546, 554), True, 'import numpy as np\n'), ((556, 570), 'numpy.cos', 'np.cos', (['(10 * X)'], {}), '(10 * X)\n', (562, 570), True, 'import numpy as np\n'), ((2439, 2474), 'dgps_with_iwvi.layers.GPLayer', 'GPLayer', (['kern', 'X', 'Dy', 'mean_function'], {}), '(kern, X, Dy, mean_function)\n', (2446, 2474), False, 'from dgps_with_iwvi.layers import GPLayer\n'), ((2327, 2364), 'gpflow.kernels.RBF', 'gpflow.kernels.RBF', (['(1)'], {'variance': '(1e-06)'}), '(1, variance=1e-06)\n', (2345, 2364), False, 'import gpflow\n'), ((2371, 2403), 'gpflow.mean_functions.Identity', 'gpflow.mean_functions.Identity', ([], {}), '()\n', (2401, 2403), False, 'import gpflow\n')] |
import mne
raw = mne.io.read_raw_fif('raw.fif', preload=True) # load data
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
raw.filter(l_freq=None, h_freq=40.0) # low-pass filter data
# Extract epochs and save them:
picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True,
exclude='bads')
events = mne.find_events(raw)
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5,
proj=True, picks=picks, baseline=(None, 0),
preload=True, reject=reject)
# Compute evoked response and noise covariance
evoked = epochs.average()
cov = mne.compute_covariance(epochs, tmax=0)
evoked.plot() # plot evoked
# Compute inverse operator:
fwd_fname = 'sample_audvis−meg−eeg−oct−6−fwd.fif'
fwd = mne.read_forward_solution(fwd_fname, surf_ori=True)
inv = mne.minimum_norm.make_inverse_operator(raw.info, fwd,
cov, loose=0.2)
# Compute inverse solution:
stc = mne.minimum_norm.apply_inverse(evoked, inv, lambda2=1./9.,
method='dSPM')
# Morph it to average brain for group study and plot it
stc_avg = mne.morph_data('sample', 'fsaverage', stc, 5, smooth=5)
stc_avg.plot() | [
"mne.minimum_norm.make_inverse_operator",
"mne.morph_data",
"mne.find_events",
"mne.pick_types",
"mne.io.read_raw_fif",
"mne.minimum_norm.apply_inverse",
"mne.Epochs",
"mne.read_forward_solution",
"mne.compute_covariance"
] | [((19, 63), 'mne.io.read_raw_fif', 'mne.io.read_raw_fif', (['"""raw.fif"""'], {'preload': '(True)'}), "('raw.fif', preload=True)\n", (38, 63), False, 'import mne\n'), ((248, 318), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '(True)', 'eeg': '(True)', 'eog': '(True)', 'exclude': '"""bads"""'}), "(raw.info, meg=True, eeg=True, eog=True, exclude='bads')\n", (262, 318), False, 'import mne\n'), ((355, 375), 'mne.find_events', 'mne.find_events', (['raw'], {}), '(raw)\n', (370, 375), False, 'import mne\n'), ((441, 575), 'mne.Epochs', 'mne.Epochs', (['raw', 'events'], {'event_id': '(1)', 'tmin': '(-0.2)', 'tmax': '(0.5)', 'proj': '(True)', 'picks': 'picks', 'baseline': '(None, 0)', 'preload': '(True)', 'reject': 'reject'}), '(raw, events, event_id=1, tmin=-0.2, tmax=0.5, proj=True, picks=\n picks, baseline=(None, 0), preload=True, reject=reject)\n', (451, 575), False, 'import mne\n'), ((698, 736), 'mne.compute_covariance', 'mne.compute_covariance', (['epochs'], {'tmax': '(0)'}), '(epochs, tmax=0)\n', (720, 736), False, 'import mne\n'), ((856, 907), 'mne.read_forward_solution', 'mne.read_forward_solution', (['fwd_fname'], {'surf_ori': '(True)'}), '(fwd_fname, surf_ori=True)\n', (881, 907), False, 'import mne\n'), ((916, 985), 'mne.minimum_norm.make_inverse_operator', 'mne.minimum_norm.make_inverse_operator', (['raw.info', 'fwd', 'cov'], {'loose': '(0.2)'}), '(raw.info, fwd, cov, loose=0.2)\n', (954, 985), False, 'import mne\n'), ((1069, 1146), 'mne.minimum_norm.apply_inverse', 'mne.minimum_norm.apply_inverse', (['evoked', 'inv'], {'lambda2': '(1.0 / 9.0)', 'method': '"""dSPM"""'}), "(evoked, inv, lambda2=1.0 / 9.0, method='dSPM')\n", (1099, 1146), False, 'import mne\n'), ((1250, 1305), 'mne.morph_data', 'mne.morph_data', (['"""sample"""', '"""fsaverage"""', 'stc', '(5)'], {'smooth': '(5)'}), "('sample', 'fsaverage', stc, 5, smooth=5)\n", (1264, 1305), False, 'import mne\n')] |
"""
The script that creates the neural network architecture based on the concept.txt. The script uses command-line arguments
for specifying the structure of the network and the hyperparameters for the training.
use ->
User:~$ python Additive_Network --help
for the usage information of this module
"""
from __future__ import print_function
from __future__ import division # this allows for the division to perform true division directly
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os
base_data_path = "../../Data/"
mnist_data_path = os.path.join(base_data_path, "MNIST_data")
base_model_path = "../../Models/IDEA_4/"
# define the tensorflow flags mechanism
flags = tf.app.flags
FLAGS = flags.FLAGS
# define the function that consturcts the tensorflow computational graph
def mk_graph(img_dim, num_labels, poly_width = 3, depth = 3, hidd_repr_size = 512):
""" The function that creates and returns the graph required to
img_dim = image dimensions (Note, that the image needs to be flattened out before feeding here)
num_labels = no_of classes to classify into
"""
comp_graph = tf.Graph()
with comp_graph.as_default():
# step 1: Create the input placeholders for the input to the computation
with tf.name_scope("Input"):
tf_input_images = tf.placeholder(tf.float32, shape=(None, img_dim), name="Input_Labels")
tf_input_labels = tf.placeholder(tf.float32, shape=(None, num_labels), name="Input_Labels")
print("\nInput Placeholder Tensors:", tf_input_images, tf_input_labels)
# step 2: Construct the network architecture based on the width and the depth specified
# Note that this is static graph creation
# There doesn't seem to be any reason for dynamic graph building
def neural_layer(input, out_dim, step):
""" The method that defines a single neural layer
"""
# method to calculate the factorial of a number
factorial = lambda x: 1 if(x <= 1) else x * factorial(x - 1)
with tf.variable_scope("neural_layer"+str(step)):
# create the variable tensors ->
# additive bias
bias = tf.get_variable("bias", shape=(out_dim), initializer=tf.zeros_initializer())
# additive weight transformations
inp_dim = input.get_shape()[-1]
weights = [tf.get_variable("weight"+str(i), shape=(inp_dim, out_dim),
initializer=tf.contrib.layers.xavier_initializer(seed = FLAGS.seed_value))
for i in range(1, poly_width)]
# attach the summary ops to the biases and weights
bias_summary = tf.summary.histogram("Layer"+str(step)+"/bias", bias)
weights_summary = [tf.summary.histogram("Layer"+str(step)+"/"+weight.name, weight)
for weight in weights]
# define the compuataion ops for this layer
out = bias # initialize the output tensor
for degree in range(1, poly_width):
out = out + tf.matmul(tf.pow(input, degree) / factorial(degree), weights[degree - 1])
return out # return the calculated tensor
if(depth > 1):
lay1_out = neural_layer(tf_input_images, hidd_repr_size, 1)
else:
lay1_out = neural_layer(tf_input_images, num_labels, 1)
# define the while loop for creating the hidden layer computations
lay_out = lay1_out # initialize to output of first layer
for lay_no in range(2, depth):
lay_out = neural_layer(lay_out, hidd_repr_size, lay_no)
# define the output layer
if(depth > 1):
output = neural_layer(lay_out, num_labels, depth)
else:
output = lay1_out
print("Final output:", output)
return comp_graph, {"output": output, "labels": tf_input_labels, "input": tf_input_images}
def setup_MNIST_data():
""" Function for setting up the mnist Data
Uses the tensorflow examples dataset
"""
print("\nDownloading the dataset (if required) ...")
mnist_data = input_data.read_data_sets(mnist_data_path, one_hot=True, )
# Create the train_X, train_Y
train_X = mnist_data.train.images; train_Y = mnist_data.train.labels
dev_X = mnist_data.validation.images; dev_Y = mnist_data.validation.labels
test_X = mnist_data.test.images; test_Y = mnist_data.test.labels
# return these
return (train_X, train_Y, dev_X, dev_Y, test_X, test_Y)
def main(_):
""" The main function for binding the app together
"""
"""
============================================================================
|| HYPERPARAMETERS TWEAKABLE THROUGH COMMAND_LINE ARGS
============================================================================
"""
model_name = str(FLAGS.network_depth) + "-deep-"+str(FLAGS.network_width) + "-wide-"
model_name += str(FLAGS.hidden_representation_size) + "-hdr-" + str(FLAGS.epochs) + "-epochs"
no_of_epochs = FLAGS.epochs
learning_rate = FLAGS.learning_rate
training_batch_size = FLAGS.batch_size
"""
============================================================================
"""
# obtain the mnist data for working with
train_X, train_Y, dev_X, dev_Y, test_X, test_Y = setup_MNIST_data()
total_train_examples = train_X.shape[0]
# print a description of the obtained data
print("\n\nObtained Dataset Information")
print("Training_set shapes:", train_X.shape, train_Y.shape)
print("Development_set shapes:", dev_X.shape, dev_Y.shape)
print("Test_set shapes:", test_X.shape, test_Y.shape)
# get the computation Graph
cmp_graph, int_dict = mk_graph(train_X.shape[-1], train_Y.shape[-1],
FLAGS.network_width, FLAGS.network_depth, FLAGS.hidden_representation_size)
# add the training and runner ops to this computation graph
with cmp_graph.as_default():
# define the predictions from the output tensor
output = int_dict["output"]; labels = int_dict["labels"]
tf_input_images = int_dict["input"]
with tf.name_scope("Predictions"):
predictions = tf.nn.softmax(output) # obtain the softmax
with tf.name_scope("Loss"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=labels))
loss_summary = tf.summary.scalar("Loss", loss)
# define the trainer
with tf.name_scope("Trainer"):
optmizer = tf.train.AdamOptimizer(learning_rate)
train_step = optmizer.minimize(loss)
# define the accuracy
with tf.name_scope("Accuracy"):
correct = tf.equal(tf.argmax(predictions, axis=-1), tf.argmax(labels, axis=-1))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / tf.cast(tf.shape(labels)[0], tf.float32)
accuracy_summary = tf.summary.scalar("Accuracy", accuracy)
# finally define the required errands:
with tf.name_scope("Errands"):
init = tf.global_variables_initializer()
all_sums = tf.summary.merge_all()
# Run the Session for training the graph
with tf.Session(graph=cmp_graph) as sess:
# create a tensorboard writer
model_save_path = os.path.join(base_model_path, model_name)
tensorboard_writer = tf.summary.FileWriter(logdir=model_save_path, graph=sess.graph, filename_suffix=".bot")
# create a saver
saver = tf.train.Saver(max_to_keep=2)
# restore the session if the checkpoint exists:
if(os.path.isfile(os.path.join(model_save_path, "checkpoint"))):
saver.restore(sess, tf.train.latest_checkpoint(model_save_path))
else: # initialize all the variables:
sess.run(init)
global_step = 0
print("Starting the training process . . .")
for epoch in range(no_of_epochs):
# run through the batches of the data:
accuracies = [] # initialize this to an empty list
runs = int((total_train_examples / training_batch_size) + 0.5)
checkpoint = runs / 10
for batch in range(runs):
start = batch * training_batch_size; end = start + training_batch_size
# extract the relevant data:
batch_data_X = train_X[start: end]
batch_data_Y = train_Y[start: end]
# This is batch gradient descent: (We are running it only on first 512 images)
_, cost, acc, sums = sess.run([train_step, loss, accuracy, all_sums],
feed_dict={tf_input_images: batch_data_X,
labels: batch_data_Y})
# append the acc to the accuracies list
accuracies.append(acc)
# save the summarys
if(batch % checkpoint == 0):
tensorboard_writer.add_summary(sums, global_step)
# increment the global step
global_step += 1
print("\nepoch = ", epoch, "cost = ", cost)
# evaluate the accuracy of the whole dataset:
print("accuracy = ", sum(accuracies) / len(accuracies))
# evaluate the accuracy for the dev set
dev_acc = sess.run(accuracy, feed_dict={tf_input_images: dev_X, labels: dev_Y})
print("dev_accuracy = ", dev_acc)
# save the model after every epoch
saver.save(sess, os.path.join(model_save_path, model_name), global_step=(epoch + 10))
# Once, the training is complete:
# print the test accuracy:
acc = sess.run(accuracy, feed_dict={tf_input_images: test_X, labels: test_Y})
print("Training complete . . .")
print("Obtained Test accuracy = ", acc)
if(__name__ == "__main__"):
# use the FLAGS mechanism to parse the arguments and test
flags.DEFINE_integer("network_width", 2,
"The highest (degree - 1) till which the taylor series is expanded")
flags.DEFINE_integer("network_depth", 1,
"The depth of the composition of the polynomials")
flags.DEFINE_integer("hidden_representation_size", 512,
"The size (dimensionality) of the hidden representations")
flags.DEFINE_integer("seed_value", 3,
"The seed value for initialization of the weight matrices")
flags.DEFINE_integer("epochs", 12,
"The number of epochs for which the model is to be trained")
flags.DEFINE_integer("learning_rate", 3e-4,
"The learning rate for Adam Optimizer")
flags.DEFINE_integer("batch_size", 64,
"The batch size for Adam in SGD settings")
tf.app.run(main)
| [
"tensorflow.shape",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.softmax",
"tensorflow.zeros_initializer",
"tensorflow.cast",
"tensorflow.app.run",
"tensorflow.Graph",
"tensorflow.pow",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.nn.softmax_cros... | [((603, 645), 'os.path.join', 'os.path.join', (['base_data_path', '"""MNIST_data"""'], {}), "(base_data_path, 'MNIST_data')\n", (615, 645), False, 'import os\n'), ((1176, 1186), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1184, 1186), True, 'import tensorflow as tf\n'), ((4257, 4313), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['mnist_data_path'], {'one_hot': '(True)'}), '(mnist_data_path, one_hot=True)\n', (4282, 4313), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((10948, 10964), 'tensorflow.app.run', 'tf.app.run', (['main'], {}), '(main)\n', (10958, 10964), True, 'import tensorflow as tf\n'), ((7359, 7386), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'cmp_graph'}), '(graph=cmp_graph)\n', (7369, 7386), True, 'import tensorflow as tf\n'), ((7460, 7501), 'os.path.join', 'os.path.join', (['base_model_path', 'model_name'], {}), '(base_model_path, model_name)\n', (7472, 7501), False, 'import os\n'), ((7531, 7622), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', ([], {'logdir': 'model_save_path', 'graph': 'sess.graph', 'filename_suffix': '""".bot"""'}), "(logdir=model_save_path, graph=sess.graph,\n filename_suffix='.bot')\n", (7552, 7622), True, 'import tensorflow as tf\n'), ((7661, 7690), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(2)'}), '(max_to_keep=2)\n', (7675, 7690), True, 'import tensorflow as tf\n'), ((1316, 1338), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (1329, 1338), True, 'import tensorflow as tf\n'), ((1370, 1440), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, img_dim)', 'name': '"""Input_Labels"""'}), "(tf.float32, shape=(None, img_dim), name='Input_Labels')\n", (1384, 1440), True, 'import tensorflow as tf\n'), ((1471, 1544), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, num_labels)', 'name': '"""Input_Labels"""'}), "(tf.float32, shape=(None, num_labels), name='Input_Labels')\n", (1485, 1544), True, 'import tensorflow as tf\n'), ((6293, 6321), 'tensorflow.name_scope', 'tf.name_scope', (['"""Predictions"""'], {}), "('Predictions')\n", (6306, 6321), True, 'import tensorflow as tf\n'), ((6349, 6370), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['output'], {}), '(output)\n', (6362, 6370), True, 'import tensorflow as tf\n'), ((6406, 6427), 'tensorflow.name_scope', 'tf.name_scope', (['"""Loss"""'], {}), "('Loss')\n", (6419, 6427), True, 'import tensorflow as tf\n'), ((6562, 6593), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'loss'], {}), "('Loss', loss)\n", (6579, 6593), True, 'import tensorflow as tf\n'), ((6637, 6661), 'tensorflow.name_scope', 'tf.name_scope', (['"""Trainer"""'], {}), "('Trainer')\n", (6650, 6661), True, 'import tensorflow as tf\n'), ((6686, 6723), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (6708, 6723), True, 'import tensorflow as tf\n'), ((6817, 6842), 'tensorflow.name_scope', 'tf.name_scope', (['"""Accuracy"""'], {}), "('Accuracy')\n", (6830, 6842), True, 'import tensorflow as tf\n'), ((7078, 7117), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'accuracy'], {}), "('Accuracy', accuracy)\n", (7095, 7117), True, 'import tensorflow as tf\n'), ((7179, 7203), 'tensorflow.name_scope', 'tf.name_scope', (['"""Errands"""'], {}), "('Errands')\n", (7192, 7203), True, 'import tensorflow as tf\n'), ((7224, 7257), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7255, 7257), True, 'import tensorflow as tf\n'), ((7281, 7303), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (7301, 7303), True, 'import tensorflow as tf\n'), ((7774, 7817), 'os.path.join', 'os.path.join', (['model_save_path', '"""checkpoint"""'], {}), "(model_save_path, 'checkpoint')\n", (7786, 7817), False, 'import os\n'), ((6463, 6532), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'output', 'labels': 'labels'}), '(logits=output, labels=labels)\n', (6502, 6532), True, 'import tensorflow as tf\n'), ((6875, 6906), 'tensorflow.argmax', 'tf.argmax', (['predictions'], {'axis': '(-1)'}), '(predictions, axis=-1)\n', (6884, 6906), True, 'import tensorflow as tf\n'), ((6908, 6934), 'tensorflow.argmax', 'tf.argmax', (['labels'], {'axis': '(-1)'}), '(labels, axis=-1)\n', (6917, 6934), True, 'import tensorflow as tf\n'), ((7853, 7896), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_save_path'], {}), '(model_save_path)\n', (7879, 7896), True, 'import tensorflow as tf\n'), ((9735, 9776), 'os.path.join', 'os.path.join', (['model_save_path', 'model_name'], {}), '(model_save_path, model_name)\n', (9747, 9776), False, 'import os\n'), ((6973, 7001), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (6980, 7001), True, 'import tensorflow as tf\n'), ((2325, 2347), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (2345, 2347), True, 'import tensorflow as tf\n'), ((7013, 7029), 'tensorflow.shape', 'tf.shape', (['labels'], {}), '(labels)\n', (7021, 7029), True, 'import tensorflow as tf\n'), ((2574, 2633), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': 'FLAGS.seed_value'}), '(seed=FLAGS.seed_value)\n', (2610, 2633), True, 'import tensorflow as tf\n'), ((3212, 3233), 'tensorflow.pow', 'tf.pow', (['input', 'degree'], {}), '(input, degree)\n', (3218, 3233), True, 'import tensorflow as tf\n')] |
"""A collection of function for doing my project."""
import time
import random
#Background Introduction
def Intro():
print("Hello, how are you doing after the long journey? ")
time.sleep(2)
print("You are now at Pallet Town,Kanto.")
time.sleep(2)
print("A place where is abounded with amazing creatures we call -Pokemon ")
time.sleep(2)
print("Here pokemon and people work and live together peacefully")
time.sleep(2)
print("I am Professor Oak, a Pokemon researcher. ")
time.sleep(2.5)
print("Now it is your time to start your new own adventure.")
time.sleep(2)
print("Before you go, I have some gifts for you ")
time.sleep(2)
print("I have three adorable Pokemon for you. You can pick one of them as your friend! ")
time.sleep(3)
print()
def choosePokemon():#a function that allow user choose a pokemon and store its value
""" """
Pokemon=""
while Pokemon != "charmander" and Pokemon != "squirtle" and Pokemon != "bulbasaur":#allow user choose again if the input is not one of the three
Pokemon = input("Which Pokemon would you like to choose (Charmander or Squirtle or Bulbasaur): ")
Pokemon = Pokemon.lower()#allow user to put lower capital input
return Pokemon
def chooseSkill1():#choose a Skill and store the value
Skill1= ""
while Skill1 !="bubble"and Skill1!= "aqua tail":#choose again if input is not one of two skill
Skill1 = input("Which Skill will you use against? (Bubble or Aqua Tail): ")
Skill1 = Skill1.lower() #allow lower case
return Skill1
def checkPath1(chosenSkill1):#show the result of choice of skill
print("OK....")
time.sleep(2)
print("if this is your final decision")
time.sleep(2)
print("Let's see what will happen")
print()
time.sleep(2)
List_1=["bubble","aqua tail"]
correctchoice=random.choice(List_1)#allow random choice so that every time the result will be random
if chosenSkill1 == correctchoice :
print("Oh, that skill seems super effective.")
print("The enemy is down, you win your first fight!")
print("Exp+100, Gold+50")
else:
print("What a pity")
print("You missed!")
print("The enemy fight back and you lose=_=")
def chooseSkill2():
"""WHAT DOES THIS DO."""
Skill2= ""
while Skill2!="Ember"and Skill2!="Flame Charge":
Skill2 = input("Which Skill will you use against? (Ember or Flame Charge): ")
return Skill2
def checkPath2(chosenSkill2):
print("OK....")
time.sleep(2)
print("if this is your final decision")
time.sleep(2)
print("Let's see what will happen")
print()
time.sleep(2)
List_1=["Ember","Flame Charge"]
correctchoice=random.choice(List_1)
if chosenSkill2 == correctchoice :
print("Oh, that skill seems super effective.")
print("The enemy is down, you win your first fight!")
print("Exp+100, Gold+50")
else:
print("What a pity")
print("You missed!")
print("The enemy fight back and you lose=_=")
def chooseSkill3():
Skill3= ""
while Skill3!="Seed Bomb"and Skill3!="Wine Whip":
Skill3 = input("Which Skill will you use against? (Seed Bomb or Wine Whip): ")
return Skill3
def checkPath3(chosenSkill3):
print("OK....")
time.sleep(2)
print("if this is your final decision")
time.sleep(2)
print("Let's see what will happen")
print()
time.sleep(2)
List_1=["Wine Whip","Seed Bomb"]
correctchoice=random.choice(List_1)
if chosenSkill3 == correctchoice :
print("Oh, that skill seems super effective.")
print("The enemy is down, you win your first fight!")
print("Exp+100, Gold+50")
else:
print("What a pity")
print("You missed!")
print("The enemy fight back and you lose=_=")
| [
"random.choice",
"time.sleep"
] | [((192, 205), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (202, 205), False, 'import time\n'), ((257, 270), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (267, 270), False, 'import time\n'), ((355, 368), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (365, 368), False, 'import time\n'), ((444, 457), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (454, 457), False, 'import time\n'), ((518, 533), 'time.sleep', 'time.sleep', (['(2.5)'], {}), '(2.5)\n', (528, 533), False, 'import time\n'), ((604, 617), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (614, 617), False, 'import time\n'), ((677, 690), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (687, 690), False, 'import time\n'), ((790, 803), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (800, 803), False, 'import time\n'), ((1745, 1758), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1755, 1758), False, 'import time\n'), ((1807, 1820), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1817, 1820), False, 'import time\n'), ((1877, 1890), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1887, 1890), False, 'import time\n'), ((1949, 1970), 'random.choice', 'random.choice', (['List_1'], {}), '(List_1)\n', (1962, 1970), False, 'import random\n'), ((2667, 2680), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2677, 2680), False, 'import time\n'), ((2729, 2742), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2739, 2742), False, 'import time\n'), ((2799, 2812), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2809, 2812), False, 'import time\n'), ((2873, 2894), 'random.choice', 'random.choice', (['List_1'], {}), '(List_1)\n', (2886, 2894), False, 'import random\n'), ((3520, 3533), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3530, 3533), False, 'import time\n'), ((3582, 3595), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3592, 3595), False, 'import time\n'), ((3652, 3665), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3662, 3665), False, 'import time\n'), ((3727, 3748), 'random.choice', 'random.choice', (['List_1'], {}), '(List_1)\n', (3740, 3748), False, 'import random\n')] |
from django.conf.urls import include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path
urlpatterns = [
path("", include("changelogs.urls")),
path("admin/", admin.site.urls),
path("accounts/", include("django.contrib.auth.urls")),
path("login/", auth_views.LoginView.as_view()),
path("logout/", auth_views.LogoutView.as_view()),
path("oauth/", include("social_django.urls", namespace="social")),
]
| [
"django.conf.urls.include",
"django.contrib.auth.views.LoginView.as_view",
"django.urls.path",
"django.contrib.auth.views.LogoutView.as_view"
] | [((214, 245), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (218, 245), False, 'from django.urls import path\n'), ((181, 207), 'django.conf.urls.include', 'include', (['"""changelogs.urls"""'], {}), "('changelogs.urls')\n", (188, 207), False, 'from django.conf.urls import include\n'), ((269, 304), 'django.conf.urls.include', 'include', (['"""django.contrib.auth.urls"""'], {}), "('django.contrib.auth.urls')\n", (276, 304), False, 'from django.conf.urls import include\n'), ((326, 356), 'django.contrib.auth.views.LoginView.as_view', 'auth_views.LoginView.as_view', ([], {}), '()\n', (354, 356), True, 'from django.contrib.auth import views as auth_views\n'), ((379, 410), 'django.contrib.auth.views.LogoutView.as_view', 'auth_views.LogoutView.as_view', ([], {}), '()\n', (408, 410), True, 'from django.contrib.auth import views as auth_views\n'), ((432, 481), 'django.conf.urls.include', 'include', (['"""social_django.urls"""'], {'namespace': '"""social"""'}), "('social_django.urls', namespace='social')\n", (439, 481), False, 'from django.conf.urls import include\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import tensorflow as tf
import tensorflow_hub as hub
from rasa_nlu.featurizers import Featurizer
class UniversalSentenceEncoderFeaturizer(Featurizer):
"""Appends a universal sentence encoding to the message's text_features."""
# URL of the TensorFlow Hub Module
TFHUB_URL = "https://tfhub.dev/google/universal-sentence-encoder/2"
name = "universal_sentence_encoder_featurizer"
requires = []
provides = ["text_features"]
stopwords = set(
["thank", "you", "the", "please", "me", "her", "his", "will", "just", "myself", "ourselves", "I", "yes"])
spliter = re.compile("([#()!><])")
def __init__(self, component_config):
super(UniversalSentenceEncoderFeaturizer, self).__init__(component_config)
sentence_encoder = hub.Module(self.TFHUB_URL)
# Create a TensorFlow placeholder for the input string
self.input_string = tf.placeholder(tf.string, shape=[None])
# Invoke `sentence_encoder` in order to create the encoding tensor
self.encoding = sentence_encoder(self.input_string)
self._WORD_SPLIT = re.compile(u"([.,!?\"'-<>:;)(])")
self.session = tf.Session()
self.session.run([tf.global_variables_initializer(),
tf.tables_initializer()])
def train(self, training_data, config, **kwargs):
for example in training_data.training_examples:
self.process(example)
def process(self, message, **kwargs):
# Get the sentence encoding by feeding the message text and computing
# the encoding tensor.
raw_text = message.text
# text = self._split_delimiter(raw_text)
text = self._clean_stop_words(raw_text)
if len(text) == 0 or len(text.split()) <= 2:
text = message.text
text = self._split(text)
feature_vector = self.session.run(self.encoding,
{self.input_string: [text]})[0]
# Concatenate the feature vector with any existing text features
features = self._combine_with_existing_text_features(message, feature_vector)
# Set the feature, overwriting any existing `text_features`
message.set("text_features", features)
def _split(self, line):
words = []
for fragment in line.strip().split():
for token in re.split(self._WORD_SPLIT, fragment):
words.append(token)
return " ".join(words)
def _clean_stop_words(self, line):
tokens = [token for token in line.split(" ") if token not in self.stopwords]
return " ".join(tokens)
def _split_delimiter(self, text):
return self.spliter.sub(" \\1", text)
| [
"re.split",
"tensorflow_hub.Module",
"re.compile",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.tables_initializer"
] | [((757, 781), 're.compile', 're.compile', (['"""([#()!><])"""'], {}), "('([#()!><])')\n", (767, 781), False, 'import re\n'), ((935, 961), 'tensorflow_hub.Module', 'hub.Module', (['self.TFHUB_URL'], {}), '(self.TFHUB_URL)\n', (945, 961), True, 'import tensorflow_hub as hub\n'), ((1053, 1092), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {'shape': '[None]'}), '(tf.string, shape=[None])\n', (1067, 1092), True, 'import tensorflow as tf\n'), ((1255, 1288), 're.compile', 're.compile', (['u"""([.,!?"\'-<>:;)(])"""'], {}), '(u\'([.,!?"\\\'-<>:;)(])\')\n', (1265, 1288), False, 'import re\n'), ((1313, 1325), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1323, 1325), True, 'import tensorflow as tf\n'), ((2507, 2543), 're.split', 're.split', (['self._WORD_SPLIT', 'fragment'], {}), '(self._WORD_SPLIT, fragment)\n', (2515, 2543), False, 'import re\n'), ((1352, 1385), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1383, 1385), True, 'import tensorflow as tf\n'), ((1413, 1436), 'tensorflow.tables_initializer', 'tf.tables_initializer', ([], {}), '()\n', (1434, 1436), True, 'import tensorflow as tf\n')] |
import pandas as pd
import numpy as np
from automl_pn.utils.models import models_list, MODELS_STR_TO_OBJECT
from automl_pn.utils.metrics import score_func, higher_better
from automl_pn.utils.preprocessor import Preprocessor
from sklearn.model_selection import train_test_split
from joblib import Parallel, delayed
class BinaryClassifier:
"""
Automated binary classifier automates some parts of basic ml pipeline.
Includes data preprocessing (scaling, one-hot encoding, feature generation, feature selection),
training multiple models, choosing best model.
"""
def __init__(
self,
ensemble: bool = False,
random_state: int = 42,
n_jobs: int = -1,
metric: str = 'roc_auc',
preprocess_data: bool = True,
test_size: float = 0.2,
fill_method='mean',
):
"""
Parameters
----------
ensemble: bool
Not implemented yet, does nothing
random_state: int
Random state
n_jobs: int
Number of parallel workers
metric: str
Which metric to use. Available metrics : 'roc_auc', 'accuracy', 'f1', 'precision', 'recall'
preprocess_data: bool
Set True to use built in data preprocessing
test_size: float
Test size to use inside if no test data given on fit
fill_method: str
Method to fill missing values, only with preprocess_data=True. Available methods: 'mean', 'median', 'ffill',
'bfill', 'interpolate'
"""
self._ensemble = ensemble
self._random_state = random_state
self._n_jobs = n_jobs
self._metric = metric
self._preprocess_data = preprocess_data
self._verbose_train = True
self._fill_method = fill_method
self._n_jobs = n_jobs
if test_size:
if not (0 < test_size < 1):
raise ValueError(' test_size must be > 0 and < 1')
self._test_size = test_size
self.cat_features = None
self.best_model = None
self.best_model_name = None
self.models_list = models_list
self._preprocessor = Preprocessor(fill_method)
self.models = {}
self.models_score = {}
self._trained = False
def fit(self,
X: pd.DataFrame or np.ndarray,
y: pd.DataFrame or np.ndarray,
X_test: pd.DataFrame or np.array = None,
y_test: pd.DataFrame or np.array = None,
cat_features: [str] = None,
verbose: bool = True):
"""
Fit method.
Parameters
----------
X: pd.DataFrame or np.ndarray
features array
y: pd.DataFrame or np.ndarray
targets array
X_test: pd.DataFrame or np.ndarray
optional test features array
y_test: pd.DataFrame or np.ndarray
optional test targets array
cat_features: [str]
optional list of categorical features
verbose: bool
Set true to have log during training
Returns
-------
"""
self._verbose_train = verbose
self.cat_features = cat_features
X_train, y_train, X_test, y_test = self._check_convert_inputs(X, y, X_test, y_test)
# data preprocessing
if self._preprocess_data:
X_train = self._preprocess_train(X_train, y_train)
X_test = self._preprocess_test(X_test)
# Set worst score
if higher_better[self._metric]:
best_score = -np.inf
else:
best_score = np.inf
# pool for parallel model training
if self._verbose_train:
verb = 100
else:
verb = 0
pool = Parallel(n_jobs=self._n_jobs, verbose=verb, pre_dispatch='all', backend='multiprocessing')
# Train models
models = pool(delayed(self._train_model)(X_train, y_train, X_test, y_test, model) for model in self.models_list)
models_scores = pool(delayed(self._score_model)(model, X_test, y_test) for model in models)
for i, model in enumerate(self.models_list):
self.models[model] = models[i]
self.models_score[model] = models_scores[i]
# Score model
if higher_better[self._metric]:
if self.models_score[model] > best_score:
best_score = self.models_score[model]
self.best_model_name = model
self.best_model = self.models[model]
else:
if self.models_score[model] < best_score:
best_score = self.models_score[model]
self.best_model_name = model
self.best_model = self.models[model]
print(f'Fit done, best model: {self.best_model_name} with score '
f'{self._metric}: {self.models_score[self.best_model_name]}')
self._trained = True
def _train_model(self, X: pd.DataFrame or np.ndarray,
y: pd.DataFrame or np.ndarray,
X_test: pd.DataFrame or np.ndarray,
y_test: pd.DataFrame or np.ndarray,
model: str):
"""
Return trained given model with given X_train, y_train
Parameters
----------
X: pd.DataFrame or np.ndarray
y: pd.DataFrame or np.ndarray
X_test: pd.DataFrame or np.ndarray
y_test: pd.DataFrame or np.ndarray
model: str
str from models list
Returns
-------
Trained model instance
"""
# TODO make hyperparameters tuning with hyperopt, etc
model = MODELS_STR_TO_OBJECT[model]
model.fit(X, y)
return model
def _score_model(self, model, X_test, y_test) -> float:
"""
Score model against metric
Parameters
----------
model: Object with predict method
X_test: pd.DataFrame or np.ndarray
y_test: pd.DataFrame or np.ndarray
Returns
-------
float
"""
return score_func[self._metric](y_test, model.predict(X_test))
def _preprocess_train(self,
X_train: pd.DataFrame or np.ndarray,
y_train: pd.DataFrame or np.ndarray) -> pd.DataFrame:
"""
Method to perform preprocessing on traing data
Parameters
----------
X_train: pd.DataFrame or np.ndarray
y_train: pd.DataFrame or np.ndarray
Returns
-------
pd.DataFrame
processed X_train
"""
X_train = self._preprocessor.fit_transform(X_train, y_train, self.cat_features)
return X_train
def _preprocess_test(self, X_test: pd.DataFrame or np.ndarray) -> pd.DataFrame:
"""
Method to perform preprocessing on test data
Parameters
----------
X_test: pd.DataFrame or np.ndarray
Returns
-------
pd.DataFrame
processed X_test
"""
X_test = self._preprocessor.transform(X_test)
return X_test
def predict(self, X: pd.DataFrame or np.ndarray) -> np.ndarray:
"""
Predict method.
Parameters
----------
X: pd.DataFrame or np.ndarray
Returns
-------
np.ndarray
array with predicted classes
"""
if not self._trained:
raise NotImplementedError("First do fit")
if type(X) is np.ndarray:
X = pd.DataFrame(X, columns=list(range(X.shape[1])))
if self._preprocess_data:
X = self._preprocessor.transform(X)
return self.best_model.predict(X)
def predict_proba(self, X: pd.DataFrame or np.ndarray) -> np.ndarray:
"""
Predict proba method.
Parameters
----------
X: pd.DataFrame or np.ndarray
Returns
-------
np.ndarray
array with predicted classes
"""
if not self._trained:
raise NotImplementedError("First do fit")
if type(X) is np.ndarray:
X = pd.DataFrame(X, columns=list(range(X.shape[1])))
if self._preprocess_data:
X = self._preprocessor.transform(X)
return self.best_model.predict_proba(X)
def _check_convert_inputs(self, X, y, X_test, y_test):
"""
Check input types and do conversion if needed.
Split X, y to X_train, y_train, X_test, y_test if no test data given
Parameters
----------
X
y
X_test
y_test
Returns
-------
X_train, y_train, X_test, y_test
"""
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=list(range(X.shape[1])))
if isinstance(y, pd.DataFrame):
y = y.values.reshape(-1)
if isinstance(X_test, np.ndarray):
X_test = pd.DataFrame(X_test, columns=list(range(X.shape[1])))
if isinstance(y_test, pd.DataFrame):
y_test = y_test.values.reshape(-1)
if (not X_test and y_test) or (not y_test and X_test):
raise AttributeError('X_test and y_test must be both set or unset')
elif not X_test and not y_test:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self._test_size,
random_state=self._random_state)
else:
X_train, y_train = X, y
return X_train, y_train, X_test, y_test
| [
"joblib.Parallel",
"joblib.delayed",
"sklearn.model_selection.train_test_split",
"automl_pn.utils.preprocessor.Preprocessor"
] | [((2227, 2252), 'automl_pn.utils.preprocessor.Preprocessor', 'Preprocessor', (['fill_method'], {}), '(fill_method)\n', (2239, 2252), False, 'from automl_pn.utils.preprocessor import Preprocessor\n'), ((3831, 3926), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self._n_jobs', 'verbose': 'verb', 'pre_dispatch': '"""all"""', 'backend': '"""multiprocessing"""'}), "(n_jobs=self._n_jobs, verbose=verb, pre_dispatch='all', backend=\n 'multiprocessing')\n", (3839, 3926), False, 'from joblib import Parallel, delayed\n'), ((9430, 9517), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'self._test_size', 'random_state': 'self._random_state'}), '(X, y, test_size=self._test_size, random_state=self.\n _random_state)\n', (9446, 9517), False, 'from sklearn.model_selection import train_test_split\n'), ((3968, 3994), 'joblib.delayed', 'delayed', (['self._train_model'], {}), '(self._train_model)\n', (3975, 3994), False, 'from joblib import Parallel, delayed\n'), ((4096, 4122), 'joblib.delayed', 'delayed', (['self._score_model'], {}), '(self._score_model)\n', (4103, 4122), False, 'from joblib import Parallel, delayed\n')] |
#!/usr/bin/python3
#-*- coding: utf-8 -*-
from PyQt5.QtWidgets import QApplication
from Main.SFA_main import SFA_window
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
window = SFA_window()
window.show()
sys.exit(app.exec_())
| [
"Main.SFA_main.SFA_window",
"PyQt5.QtWidgets.QApplication"
] | [((170, 192), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (182, 192), False, 'from PyQt5.QtWidgets import QApplication\n'), ((206, 218), 'Main.SFA_main.SFA_window', 'SFA_window', ([], {}), '()\n', (216, 218), False, 'from Main.SFA_main import SFA_window\n')] |
__author__ = 'Jason'
import itertools
from Graph import *
from GraphStatistics import *
def get_cliques(graph, k, proc_pool):
persons = []
for idx in graph.dictionary:
persons.append(idx)
cliques = []
combinations = itertools.combinations(persons, k)
arg = []
for comb in combinations:
arg.append((comb, graph))
results = proc_pool.map(test, arg)
for result in results:
if result[0] == True:
cliques.append(result[1])
return cliques
def test(arg):
combination = arg[0]
graph = arg[1]
local_comb = list(combination)
for idx in combination:
others = list(local_comb)
others.remove(idx)
links = graph.dictionary[idx].links
linked_ids = []
for edge in links:
linked_ids.append(edge.edge_end)
for neighbour in others:
if neighbour not in linked_ids:
return False, None
local_comb.remove(idx)
return True, combination
def percolation_method(graph, k, proc_pool):
l_cliques = get_cliques(graph, k, proc_pool)
g_cliques = Graph()
for i in range(len(l_cliques)):
node = Node(i, [], [])
g_cliques.insert_node(node)
arg = []
for i in range(len(l_cliques)):
arg.append((i, l_cliques, k))
results = proc_pool.map(get_hyper_edges, arg)
for result in results:
for hyper_edge in result:
edge = Edge(hyper_edge[1], [])
g_cliques.insert_edge(hyper_edge[0], edge)
cc = get_connected_components(g_cliques)
communities = []
counter = 1
for component in cc:
persons = []
for clique_id in component:
for p_id in l_cliques[clique_id]:
if p_id not in persons:
persons.append(p_id)
persons.sort()
communities.append((counter, persons))
counter += 1
return communities
def get_hyper_edges(arg):
clique_id = arg[0]
l_cliques = arg[1]
k = arg[2]
result = []
main_clique = l_cliques[clique_id]
for i in range(len(l_cliques)):
counter = 0
if i != clique_id:
for val in main_clique:
if val in l_cliques[i]:
counter += 1
if counter >= k - 1:
result.append((clique_id, i))
return result
| [
"itertools.combinations"
] | [((257, 291), 'itertools.combinations', 'itertools.combinations', (['persons', 'k'], {}), '(persons, k)\n', (279, 291), False, 'import itertools\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import argparse
def main():
p = argparse.ArgumentParser()
p.add_argument("-token", default="")
p.add_argument("-room")
p.add_argument("-text")
args = p.parse_args()
# define a variable for the hostname of Spark
hostname = "api.ciscospark.com"
# login to developer.ciscospark.com and copy your access token here
# Never hard-code access token in production environment
token = "Bearer " + args.token
# add authorization to the header
header = {"Authorization": "%s" % token, "content-type": "application/json"}
# specify request url
post_message_url = "https://" + hostname + "/hydra/api/v1/messages"
# create message in Spark room
payload = {
"roomId": args.room,
"text": args.text
}
# create POST request do not verify SSL certificate for simplicity of this example
api_response = requests.post(post_message_url, json=payload, headers=header, verify=False)
# get the response status code
response_status = api_response.status_code
# return the text value
print(response_status)
if __name__ == '__main__':
main() | [
"requests.post",
"argparse.ArgumentParser"
] | [((598, 623), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (621, 623), False, 'import argparse\n'), ((1442, 1517), 'requests.post', 'requests.post', (['post_message_url'], {'json': 'payload', 'headers': 'header', 'verify': '(False)'}), '(post_message_url, json=payload, headers=header, verify=False)\n', (1455, 1517), False, 'import requests\n')] |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils import timezone
from django.conf import settings
class Category(models.Model):
""" Each blog post belongs to a pre-defined category.
Categories are set up on the admin panel """
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Post(models.Model):
"""
Each blog post contains its own fields and is liked to a user
(author) and to a category.
Blog posts are created on the admin panel
Score is calculated on the post_voteup and post_votedown views
when the user vote on the post_detail page
Views are incremented on the post_detail view every time the
post_detail template is rendered
"""
author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='blog')
title = models.CharField(max_length=255)
content = models.TextField()
created_date = models.DateTimeField(auto_now_add=True)
published_date = models.DateTimeField(blank=True, null=True)
category = models.ForeignKey(Category, related_name='blog')
image = models.ImageField(upload_to="blogimage/", blank=True, null=True)
views = models.IntegerField(default=0)
score = models.IntegerField(default=0)
def publish(self):
self.published_date = timezone.now()
self.save()
def __unicode__(self):
return self.title
class Vote(models.Model):
"""
Votes for blog posts are stored in a different model to
avoid a user voting multiple times.
Votes are linked to users and posts, allowing only one
vote per user per post.
The vote value is saved on the post_voteup and post_votedown
views when the user vote on the post_detail page
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='votes')
post = models.ForeignKey(Post, related_name='votes')
vote = models.IntegerField(default=0)
def __unicode__(self):
return '%d-%s' % (self.pk, self.post.title)
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.utils.timezone.now",
"django.db.models.ImageField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((271, 303), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (287, 303), False, 'from django.db import models\n'), ((775, 839), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'related_name': '"""blog"""'}), "(settings.AUTH_USER_MODEL, related_name='blog')\n", (792, 839), False, 'from django.db import models\n'), ((852, 884), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (868, 884), False, 'from django.db import models\n'), ((899, 917), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (915, 917), False, 'from django.db import models\n'), ((937, 976), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (957, 976), False, 'from django.db import models\n'), ((998, 1041), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1018, 1041), False, 'from django.db import models\n'), ((1057, 1105), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Category'], {'related_name': '"""blog"""'}), "(Category, related_name='blog')\n", (1074, 1105), False, 'from django.db import models\n'), ((1118, 1182), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""blogimage/"""', 'blank': '(True)', 'null': '(True)'}), "(upload_to='blogimage/', blank=True, null=True)\n", (1135, 1182), False, 'from django.db import models\n'), ((1195, 1225), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1214, 1225), False, 'from django.db import models\n'), ((1238, 1268), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1257, 1268), False, 'from django.db import models\n'), ((1773, 1838), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'related_name': '"""votes"""'}), "(settings.AUTH_USER_MODEL, related_name='votes')\n", (1790, 1838), False, 'from django.db import models\n'), ((1850, 1895), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Post'], {'related_name': '"""votes"""'}), "(Post, related_name='votes')\n", (1867, 1895), False, 'from django.db import models\n'), ((1907, 1937), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1926, 1937), False, 'from django.db import models\n'), ((1323, 1337), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1335, 1337), False, 'from django.utils import timezone\n')] |
import pytest
from lib.durak.draw_pile import DrawPile
@pytest.fixture
def cards():
return ["10D", "10C", "2S", "5C", "8D", "2C"]
@pytest.fixture
def mocked_draw_cards(cards, get_draw_pile_cards):
return get_draw_pile_cards(cards)
@pytest.fixture
def default_parameters():
return {
"drawn_cards": [],
"seed": 0.4,
"lowest_rank": "2",
}
def test_draw_pile_size(default_parameters, mocked_draw_cards):
subject = DrawPile(**default_parameters)
assert subject.serialize() == {
**default_parameters,
"drawn_cards": set(),
"cards_left": 6,
"last_card": "2C",
"trump_suit": "clubs",
}
mocked_draw_cards.assert_called_with(default_parameters["seed"])
def test_drawn_cards(default_parameters, mocked_draw_cards):
args = {**default_parameters, "drawn_cards": ["10D", "10C"]}
subject = DrawPile(**args)
assert subject.serialize() == {
**default_parameters,
"drawn_cards": set(["10D", "10C"]),
"cards_left": 4,
"last_card": "2C",
"trump_suit": "clubs",
}
mocked_draw_cards.assert_called_with(default_parameters["seed"])
def test_lowest_rank_is_six(default_parameters, mocked_draw_cards):
args = {**default_parameters, "lowest_rank": "6"}
subject = DrawPile(**args)
assert subject.serialize() == {
**default_parameters,
"drawn_cards": set(),
"cards_left": 3,
"last_card": "8D",
"lowest_rank": "6",
"trump_suit": "diamonds",
}
mocked_draw_cards.assert_called_with(default_parameters["seed"])
def test_draw_from_pile(default_parameters, mocked_draw_cards):
subject = DrawPile(**default_parameters)
assert subject.draw(count=2) == ["10D", "10C"]
assert subject.serialize() == {
**default_parameters,
"drawn_cards": set(["10D", "10C"]),
"cards_left": 4,
"last_card": "2C",
"trump_suit": "clubs",
}
mocked_draw_cards.assert_called_with(default_parameters["seed"])
| [
"lib.durak.draw_pile.DrawPile"
] | [((463, 493), 'lib.durak.draw_pile.DrawPile', 'DrawPile', ([], {}), '(**default_parameters)\n', (471, 493), False, 'from lib.durak.draw_pile import DrawPile\n'), ((890, 906), 'lib.durak.draw_pile.DrawPile', 'DrawPile', ([], {}), '(**args)\n', (898, 906), False, 'from lib.durak.draw_pile import DrawPile\n'), ((1313, 1329), 'lib.durak.draw_pile.DrawPile', 'DrawPile', ([], {}), '(**args)\n', (1321, 1329), False, 'from lib.durak.draw_pile import DrawPile\n'), ((1695, 1725), 'lib.durak.draw_pile.DrawPile', 'DrawPile', ([], {}), '(**default_parameters)\n', (1703, 1725), False, 'from lib.durak.draw_pile import DrawPile\n')] |
from datetime import datetime
from email.policy import default
from flask import current_app
from api.extensions import db, Base
class Issue(db.Model):
"""
This model holds information about an Issue
"""
__tablename__ = "issue"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(128), nullable=False)
description = db.Column(db.String(256))
project_id = db.Column(db.Integer,
db.ForeignKey('project.id', ondelete="cascade", onupdate="cascade"),
nullable=False)
created_by = db.Column(db.Integer,
db.ForeignKey('user.id', ondelete="cascade", onupdate="cascade"),
nullable=False)
assignee_id = db.Column(db.Integer,
db.ForeignKey('user.id', ondelete="cascade", onupdate="cascade"),
nullable=True)
team_id = db.Column(db.Integer,
db.ForeignKey('team.id', ondelete="cascade", onupdate="cascade"),
nullable=True)
category = db.Column(db.String(20), nullable=False)
priority = db.Column(db.String(20), nullable=False, default="Low")
status = db.Column(db.String(20), nullable=False, default="Open")
entity_type = db.Column(db.String(10))
entity_id = db.Column(db.Integer)
due_date = db.Column(db.DateTime)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
def __init__(self, title, description, project_id, created_by, category, status="Open", priority="Low", team_id=None, assignee_id=None, entity_type=None, entity_id=None, due_date=None):
self.title = title
self.description = description
self.project_id = project_id
self.created_by = created_by
self.assignee_id = assignee_id
self.team_id = team_id
self.category = category
self.status = status
self.priority = priority
self.entity_type = entity_type
self.entity_id = entity_id
self.due_date = due_date
def __repr__(self):
"""
Returns the object representation
"""
return "<Issue(issue_id='%s', issue_title='%s', issue_description='%s')>" % (self.id, self.title, self.description) | [
"api.extensions.db.Column",
"api.extensions.db.String",
"api.extensions.db.ForeignKey"
] | [((268, 307), 'api.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (277, 307), False, 'from api.extensions import db, Base\n'), ((1368, 1389), 'api.extensions.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (1377, 1389), False, 'from api.extensions import db, Base\n'), ((1406, 1428), 'api.extensions.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (1415, 1428), False, 'from api.extensions import db, Base\n'), ((1447, 1510), 'api.extensions.db.Column', 'db.Column', (['db.DateTime'], {'nullable': '(False)', 'default': 'datetime.utcnow'}), '(db.DateTime, nullable=False, default=datetime.utcnow)\n', (1456, 1510), False, 'from api.extensions import db, Base\n'), ((1529, 1602), 'api.extensions.db.Column', 'db.Column', (['db.DateTime'], {'default': 'datetime.utcnow', 'onupdate': 'datetime.utcnow'}), '(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)\n', (1538, 1602), False, 'from api.extensions import db, Base\n'), ((331, 345), 'api.extensions.db.String', 'db.String', (['(128)'], {}), '(128)\n', (340, 345), False, 'from api.extensions import db, Base\n'), ((392, 406), 'api.extensions.db.String', 'db.String', (['(256)'], {}), '(256)\n', (401, 406), False, 'from api.extensions import db, Base\n'), ((474, 541), 'api.extensions.db.ForeignKey', 'db.ForeignKey', (['"""project.id"""'], {'ondelete': '"""cascade"""', 'onupdate': '"""cascade"""'}), "('project.id', ondelete='cascade', onupdate='cascade')\n", (487, 541), False, 'from api.extensions import db, Base\n'), ((650, 714), 'api.extensions.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {'ondelete': '"""cascade"""', 'onupdate': '"""cascade"""'}), "('user.id', ondelete='cascade', onupdate='cascade')\n", (663, 714), False, 'from api.extensions import db, Base\n'), ((824, 888), 'api.extensions.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {'ondelete': '"""cascade"""', 'onupdate': '"""cascade"""'}), "('user.id', ondelete='cascade', onupdate='cascade')\n", (837, 888), False, 'from api.extensions import db, Base\n'), ((993, 1057), 'api.extensions.db.ForeignKey', 'db.ForeignKey', (['"""team.id"""'], {'ondelete': '"""cascade"""', 'onupdate': '"""cascade"""'}), "('team.id', ondelete='cascade', onupdate='cascade')\n", (1006, 1057), False, 'from api.extensions import db, Base\n'), ((1133, 1146), 'api.extensions.db.String', 'db.String', (['(20)'], {}), '(20)\n', (1142, 1146), False, 'from api.extensions import db, Base\n'), ((1190, 1203), 'api.extensions.db.String', 'db.String', (['(20)'], {}), '(20)\n', (1199, 1203), False, 'from api.extensions import db, Base\n'), ((1260, 1273), 'api.extensions.db.String', 'db.String', (['(20)'], {}), '(20)\n', (1269, 1273), False, 'from api.extensions import db, Base\n'), ((1336, 1349), 'api.extensions.db.String', 'db.String', (['(10)'], {}), '(10)\n', (1345, 1349), False, 'from api.extensions import db, Base\n')] |
def valentine_tellings():
import random
valentine = [
"\033[1;36m"
"All of How I met your Mother"
"\nFlirts in One thread"
"\033[0m"
"\n\n"
"I think you fell from heaven,"
"\nbecause you look like an angel."
"\n\n\n"
"I thought you must have injured yourself"
"\nwhen you fell from heaven, angel."
"\n\n\n"
"I'm awesome."
"\nYou're awesome."
"\nLet's be awesome together!"
"\n\n\n"
""
]
result = random.choice(valentine)
return result
| [
"random.choice"
] | [((450, 474), 'random.choice', 'random.choice', (['valentine'], {}), '(valentine)\n', (463, 474), False, 'import random\n')] |
from flask_restful import marshal, abort, Resource
from app.models import Schedule2
from app.apiv2.decorators import permission_sudo
from app.apiv2.marshal import tasking_schedule_fields
class MobiusTaskApi(Resource):
method_decorators = [permission_sudo]
def get(self, schedule_id):
""" Peek at a schedule """
s = Schedule2.query.get_or_404(schedule_id)
return marshal(s, tasking_schedule_fields)
def delete(self, schedule_id):
""" Mark a task as done """
s = Schedule2.query.get_or_404(schedule_id)
if s.state != "mobius-processing":
abort(400)
s.transition_to_published()
return "{}", 204
| [
"flask_restful.marshal",
"app.models.Schedule2.query.get_or_404",
"flask_restful.abort"
] | [((343, 382), 'app.models.Schedule2.query.get_or_404', 'Schedule2.query.get_or_404', (['schedule_id'], {}), '(schedule_id)\n', (369, 382), False, 'from app.models import Schedule2\n'), ((399, 434), 'flask_restful.marshal', 'marshal', (['s', 'tasking_schedule_fields'], {}), '(s, tasking_schedule_fields)\n', (406, 434), False, 'from flask_restful import marshal, abort, Resource\n'), ((519, 558), 'app.models.Schedule2.query.get_or_404', 'Schedule2.query.get_or_404', (['schedule_id'], {}), '(schedule_id)\n', (545, 558), False, 'from app.models import Schedule2\n'), ((614, 624), 'flask_restful.abort', 'abort', (['(400)'], {}), '(400)\n', (619, 624), False, 'from flask_restful import marshal, abort, Resource\n')] |
import os, sys
def main():
path = sys.argv[1]
for round_id in range(1, len(os.listdir(path))+1):
acc_path = os.path.join(path, str(round_id))
if os.path.isdir(acc_path):
if os.path.exists(os.path.join(acc_path, "accuracy.json")):
with open(os.path.join(acc_path, "accuracy.json")) as acc_fd:
line = acc_fd.readline()
print(round_id, line)
if __name__ == "__main__":
main()
| [
"os.listdir",
"os.path.isdir",
"os.path.join"
] | [((170, 193), 'os.path.isdir', 'os.path.isdir', (['acc_path'], {}), '(acc_path)\n', (183, 193), False, 'import os, sys\n'), ((84, 100), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (94, 100), False, 'import os, sys\n'), ((225, 264), 'os.path.join', 'os.path.join', (['acc_path', '"""accuracy.json"""'], {}), "(acc_path, 'accuracy.json')\n", (237, 264), False, 'import os, sys\n'), ((293, 332), 'os.path.join', 'os.path.join', (['acc_path', '"""accuracy.json"""'], {}), "(acc_path, 'accuracy.json')\n", (305, 332), False, 'import os, sys\n')] |