hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70aa1d2ea6632a54b826457990b43dc4fd8bddf
| 753
|
py
|
Python
|
app.py
|
MattJAshworth/UniSecrets
|
9a6bd50cf32cf5231e68c7cd465ad19aa06a95df
|
[
"MIT"
] | null | null | null |
app.py
|
MattJAshworth/UniSecrets
|
9a6bd50cf32cf5231e68c7cd465ad19aa06a95df
|
[
"MIT"
] | null | null | null |
app.py
|
MattJAshworth/UniSecrets
|
9a6bd50cf32cf5231e68c7cd465ad19aa06a95df
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask, flash, render_template, request
from helpers import *
app = Flask(__name__)
app.secret_key = 'dkjkffksks'
@app.route('/', methods=["GET", "POST"])
def index():
"""Index page"""
if request.method == "POST":
msg = request.form.get("textarea")
img = request.form.get("output_image")
if msg:
fbpost(msg, img)
flash('Successfully posted!')
return render_template('index.html')
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, unexpected error: {}'.format(e), 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
if __name__ == '__main__':
app.run()
| 22.147059
| 56
| 0.671979
|
import os
from flask import Flask, flash, render_template, request
from helpers import *
app = Flask(__name__)
app.secret_key = 'dkjkffksks'
@app.route('/', methods=["GET", "POST"])
def index():
if request.method == "POST":
msg = request.form.get("textarea")
img = request.form.get("output_image")
if msg:
fbpost(msg, img)
flash('Successfully posted!')
return render_template('index.html')
@app.errorhandler(404)
def page_not_found(e):
return 'Sorry, unexpected error: {}'.format(e), 404
@app.errorhandler(500)
def application_error(e):
return 'Sorry, unexpected error: {}'.format(e), 500
if __name__ == '__main__':
app.run()
| true
| true
|
f70aa1f851e87de45e05a82e45e38f5a66e1e297
| 1,417
|
py
|
Python
|
flaskgallery/models/photos_azuretable.py
|
davidrpk/FlaskGallery
|
f9d83e5fa4047b06bc30e0df2dd5372b3843ae52
|
[
"MIT"
] | 1
|
2018-12-27T09:56:21.000Z
|
2018-12-27T09:56:21.000Z
|
flaskgallery/models/photos_azuretable.py
|
davidrpk/FlaskGallery
|
f9d83e5fa4047b06bc30e0df2dd5372b3843ae52
|
[
"MIT"
] | null | null | null |
flaskgallery/models/photos_azuretable.py
|
davidrpk/FlaskGallery
|
f9d83e5fa4047b06bc30e0df2dd5372b3843ae52
|
[
"MIT"
] | null | null | null |
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity
import uuid
class PhotoCollectionAzureTable:
_connectionstring = ''
def __init__(self, connectionstring):
self._connectionstring = connectionstring
def fetchall(self):
table_service = TableService(connection_string=self._connectionstring)
photos = table_service.query_entities('phototable').items
[photo.pop('etag', None) for photo in photos]
[photo.pop('Timestamp', None) for photo in photos]
return photos
def fetchone(self, objectID):
table_service = TableService(connection_string=self._connectionstring)
photos = table_service.query_entities('phototable',
"RowKey eq '" + objectID + "'"
).items
[photo.pop('etag', None) for photo in photos]
[photo.pop('Timestamp', None) for photo in photos]
if photos:
return photos[0]
return None
def addone(self, photo):
table_service = TableService(connection_string=self._connectionstring)
photoAzure = photo
photoAzure['PartitionKey'] = photo['taken']
photoAzure['RowKey'] = str(uuid.uuid4())
photoAzure['objectID'] = photoAzure['RowKey']
table_service.insert_entity('phototable', photoAzure)
| 38.297297
| 78
| 0.642202
|
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity
import uuid
class PhotoCollectionAzureTable:
_connectionstring = ''
def __init__(self, connectionstring):
self._connectionstring = connectionstring
def fetchall(self):
table_service = TableService(connection_string=self._connectionstring)
photos = table_service.query_entities('phototable').items
[photo.pop('etag', None) for photo in photos]
[photo.pop('Timestamp', None) for photo in photos]
return photos
def fetchone(self, objectID):
table_service = TableService(connection_string=self._connectionstring)
photos = table_service.query_entities('phototable',
"RowKey eq '" + objectID + "'"
).items
[photo.pop('etag', None) for photo in photos]
[photo.pop('Timestamp', None) for photo in photos]
if photos:
return photos[0]
return None
def addone(self, photo):
table_service = TableService(connection_string=self._connectionstring)
photoAzure = photo
photoAzure['PartitionKey'] = photo['taken']
photoAzure['RowKey'] = str(uuid.uuid4())
photoAzure['objectID'] = photoAzure['RowKey']
table_service.insert_entity('phototable', photoAzure)
| true
| true
|
f70aa22a002782b4022717ef4422ec8b2e7b9632
| 2,670
|
py
|
Python
|
ml_source/src/blocktorch/blocktorch/tests/data_checks_tests/test_data_check_action.py
|
blocktorch/blocktorch
|
044aa269813ab22c5fd27f84272e5fb540fc522b
|
[
"MIT"
] | 1
|
2021-09-23T12:23:02.000Z
|
2021-09-23T12:23:02.000Z
|
ml_source/src/blocktorch/blocktorch/tests/data_checks_tests/test_data_check_action.py
|
blocktorch/blocktorch
|
044aa269813ab22c5fd27f84272e5fb540fc522b
|
[
"MIT"
] | null | null | null |
ml_source/src/blocktorch/blocktorch/tests/data_checks_tests/test_data_check_action.py
|
blocktorch/blocktorch
|
044aa269813ab22c5fd27f84272e5fb540fc522b
|
[
"MIT"
] | null | null | null |
from blocktorch.data_checks import DataCheckAction, DataCheckActionCode
def test_data_check_action_attributes():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
assert data_check_action.action_code == DataCheckActionCode.DROP_COL
assert data_check_action.metadata == {}
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL, {})
assert data_check_action.action_code == DataCheckActionCode.DROP_COL
assert data_check_action.metadata == {}
data_check_action = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"columns": [1, 2]}
)
assert data_check_action.action_code == DataCheckActionCode.DROP_COL
assert data_check_action.metadata == {"columns": [1, 2]}
def test_data_check_action_equality():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
data_check_action_eq = DataCheckAction(DataCheckActionCode.DROP_COL)
assert data_check_action == data_check_action
assert data_check_action == data_check_action_eq
assert data_check_action_eq == data_check_action
data_check_action = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"same detail": "same same same"}
)
data_check_action_eq = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"same detail": "same same same"}
)
assert data_check_action == data_check_action
assert data_check_action == data_check_action_eq
assert data_check_action_eq == data_check_action
def test_data_check_action_inequality():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
data_check_action_diff = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"metadata": ["this is different"]}
)
assert data_check_action != data_check_action_diff
assert data_check_action_diff != data_check_action
def test_data_check_action_to_dict():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
data_check_action_empty_metadata = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={}
)
data_check_action_with_metadata = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"some detail": ["this is different"]}
)
assert data_check_action.to_dict() == {
"code": DataCheckActionCode.DROP_COL.name,
"metadata": {},
}
assert data_check_action_empty_metadata.to_dict() == {
"code": DataCheckActionCode.DROP_COL.name,
"metadata": {},
}
assert data_check_action_with_metadata.to_dict() == {
"code": DataCheckActionCode.DROP_COL.name,
"metadata": {"some detail": ["this is different"]},
}
| 37.605634
| 85
| 0.749438
|
from blocktorch.data_checks import DataCheckAction, DataCheckActionCode
def test_data_check_action_attributes():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
assert data_check_action.action_code == DataCheckActionCode.DROP_COL
assert data_check_action.metadata == {}
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL, {})
assert data_check_action.action_code == DataCheckActionCode.DROP_COL
assert data_check_action.metadata == {}
data_check_action = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"columns": [1, 2]}
)
assert data_check_action.action_code == DataCheckActionCode.DROP_COL
assert data_check_action.metadata == {"columns": [1, 2]}
def test_data_check_action_equality():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
data_check_action_eq = DataCheckAction(DataCheckActionCode.DROP_COL)
assert data_check_action == data_check_action
assert data_check_action == data_check_action_eq
assert data_check_action_eq == data_check_action
data_check_action = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"same detail": "same same same"}
)
data_check_action_eq = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"same detail": "same same same"}
)
assert data_check_action == data_check_action
assert data_check_action == data_check_action_eq
assert data_check_action_eq == data_check_action
def test_data_check_action_inequality():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
data_check_action_diff = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"metadata": ["this is different"]}
)
assert data_check_action != data_check_action_diff
assert data_check_action_diff != data_check_action
def test_data_check_action_to_dict():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
data_check_action_empty_metadata = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={}
)
data_check_action_with_metadata = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"some detail": ["this is different"]}
)
assert data_check_action.to_dict() == {
"code": DataCheckActionCode.DROP_COL.name,
"metadata": {},
}
assert data_check_action_empty_metadata.to_dict() == {
"code": DataCheckActionCode.DROP_COL.name,
"metadata": {},
}
assert data_check_action_with_metadata.to_dict() == {
"code": DataCheckActionCode.DROP_COL.name,
"metadata": {"some detail": ["this is different"]},
}
| true
| true
|
f70aa42301809fd9f98ccec8576480d2fba80fc4
| 1,656
|
py
|
Python
|
tests/test_api_sync.py
|
irux/pdfgen-python
|
fe7f6beb9dda8e1ddd23356ee44dd89c8367bc02
|
[
"MIT"
] | null | null | null |
tests/test_api_sync.py
|
irux/pdfgen-python
|
fe7f6beb9dda8e1ddd23356ee44dd89c8367bc02
|
[
"MIT"
] | null | null | null |
tests/test_api_sync.py
|
irux/pdfgen-python
|
fe7f6beb9dda8e1ddd23356ee44dd89c8367bc02
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import codecs
import io
import os
import sys
import unittest
import pytest
import pdfgen
from pdfgen.errors import InvalidSourceError
TEST_PATH = os.path.dirname(os.path.realpath(__file__))
EXAMPLE_HTML_FILE = f'{TEST_PATH}/fixtures/example.html'
class TestPdfGenerationSyncApi(unittest.TestCase):
"""Test to_pdf() method in Synchronous world"""
def setUp(self):
pass
def tearDown(self):
if os.path.exists('out.pdf'):
os.remove('out.pdf')
def test_pdf_generation_from_html(self):
pdf = pdfgen.sync.from_string('html', 'out.pdf', options={'format': 'Letter'})
self.assertEqual(pdf, 'out.pdf')
def test_pdf_generation_from_url(self):
pdf = pdfgen.sync.from_url('http://networkcheck.kde.org', 'out.pdf', options={'format': 'Letter'})
self.assertEqual(pdf, 'out.pdf')
def test_raise_error_with_invalid_url(self):
with self.assertRaises(InvalidSourceError):
pdf = pdfgen.sync.from_url('wrongurl.com', 'out.pdf')
def test_raise_error_with_invalid_file_path(self):
paths = ['frongpath.html', 'wrongpath2.html']
with self.assertRaises(InvalidSourceError):
pdfgen.sync.from_file(paths, 'file')
def test_pdf_generation_from_file(self):
pdf = pdfgen.sync.from_file(EXAMPLE_HTML_FILE, 'out.pdf')
self.assertEqual(pdf, 'out.pdf')
def test_pdf_generation_from_file_like(self):
with open(EXAMPLE_HTML_FILE, 'r') as f:
pdf = pdfgen.sync.from_file(f)
self.assertEqual(pdf[:4].decode('utf-8'), '%PDF')
if __name__ == "__main__":
unittest.main()
| 30.666667
| 106
| 0.675725
|
import codecs
import io
import os
import sys
import unittest
import pytest
import pdfgen
from pdfgen.errors import InvalidSourceError
TEST_PATH = os.path.dirname(os.path.realpath(__file__))
EXAMPLE_HTML_FILE = f'{TEST_PATH}/fixtures/example.html'
class TestPdfGenerationSyncApi(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
if os.path.exists('out.pdf'):
os.remove('out.pdf')
def test_pdf_generation_from_html(self):
pdf = pdfgen.sync.from_string('html', 'out.pdf', options={'format': 'Letter'})
self.assertEqual(pdf, 'out.pdf')
def test_pdf_generation_from_url(self):
pdf = pdfgen.sync.from_url('http://networkcheck.kde.org', 'out.pdf', options={'format': 'Letter'})
self.assertEqual(pdf, 'out.pdf')
def test_raise_error_with_invalid_url(self):
with self.assertRaises(InvalidSourceError):
pdf = pdfgen.sync.from_url('wrongurl.com', 'out.pdf')
def test_raise_error_with_invalid_file_path(self):
paths = ['frongpath.html', 'wrongpath2.html']
with self.assertRaises(InvalidSourceError):
pdfgen.sync.from_file(paths, 'file')
def test_pdf_generation_from_file(self):
pdf = pdfgen.sync.from_file(EXAMPLE_HTML_FILE, 'out.pdf')
self.assertEqual(pdf, 'out.pdf')
def test_pdf_generation_from_file_like(self):
with open(EXAMPLE_HTML_FILE, 'r') as f:
pdf = pdfgen.sync.from_file(f)
self.assertEqual(pdf[:4].decode('utf-8'), '%PDF')
if __name__ == "__main__":
unittest.main()
| true
| true
|
f70aa4a2de2bf42538645b9735d80df41cc00a94
| 6,317
|
py
|
Python
|
SimG4CMS/HcalTestBeam/test/python/run2006_33_cfg.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 2
|
2020-01-27T15:21:37.000Z
|
2020-05-11T11:13:18.000Z
|
SimG4CMS/HcalTestBeam/test/python/run2006_33_cfg.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 8
|
2020-03-20T23:18:36.000Z
|
2020-05-27T11:00:06.000Z
|
SimG4CMS/HcalTestBeam/test/python/run2006_33_cfg.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 3
|
2017-06-07T15:22:28.000Z
|
2019-02-28T20:48:30.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load('SimG4CMS.HcalTestBeam.TB2006Geometry33XML_cfi')
process.load('SimGeneral.HepPDTESSource.pdt_cfi')
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("Geometry.EcalCommonData.ecalSimulationParameters_cff")
process.load('Geometry.HcalTestBeamData.hcalDDDSimConstants_cff')
process.load('Configuration.EventContent.EventContent_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.generatorSmeared_cfi')
process.load('SimG4Core.Application.g4SimHits_cfi')
process.load('IOMC.RandomEngine.IOMC_cff')
if hasattr(process,'MessageLogger'):
process.MessageLogger.categories.append('HCalGeom')
process.MessageLogger.categories.append('HcalSim')
process.TFileService = cms.Service("TFileService",
fileName = cms.string('hcaltb06_33.root')
)
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
process.common_beam_direction_parameters = cms.PSet(
MinE = cms.double(50.0),
MaxE = cms.double(50.0),
PartID = cms.vint32(-211),
MinEta = cms.double(0.2175),
MaxEta = cms.double(0.2175),
MinPhi = cms.double(-0.1309),
MaxPhi = cms.double(-0.1309),
BeamPosition = cms.double(-800.0)
)
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1)
)
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
process.common_beam_direction_parameters,
),
Verbosity = cms.untracked.int32(0),
AddAntiParticle = cms.bool(False)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(25000)
)
process.o1 = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('sim2006_33.root')
)
process.Timing = cms.Service("Timing")
from IOMC.EventVertexGenerators.VtxSmearedParameters_cfi import *
process.VtxSmeared = cms.EDProducer("BeamProfileVtxGenerator",
process.common_beam_direction_parameters,
VtxSmearedCommon,
BeamMeanX = cms.double(0.0),
BeamMeanY = cms.double(0.0),
BeamSigmaX = cms.double(0.0001),
BeamSigmaY = cms.double(0.0001),
Psi = cms.double(999.9),
GaussianProfile = cms.bool(False),
BinX = cms.int32(50),
BinY = cms.int32(50),
File = cms.string('beam.profile'),
UseFile = cms.bool(False),
TimeOffset = cms.double(0.)
)
process.testbeam = cms.EDAnalyzer("HcalTB06Analysis",
process.common_beam_direction_parameters,
ECAL = cms.bool(True),
TestBeamAnalysis = cms.PSet(
EHCalMax = cms.untracked.double(400.0),
ETtotMax = cms.untracked.double(400.0),
beamEnergy = cms.untracked.double(50.),
TimeLimit = cms.double(180.0),
EcalWidth = cms.double(0.362),
HcalWidth = cms.double(0.640),
EcalFactor = cms.double(1.0),
HcalFactor = cms.double(100.0),
MIP = cms.double(0.8),
Verbose = cms.untracked.bool(True),
MakeTree = cms.untracked.bool(True)
)
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits*process.testbeam)
#process.outpath = cms.EndPath(process.o1)
process.g4SimHits.NonBeamEvent = True
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/QGSP_FTFP_BERT_EML'
process.g4SimHits.Physics.Region = 'HcalRegion'
process.g4SimHits.Physics.DefaultCutValue = 1.
process.g4SimHits.ECalSD.UseBirkLaw = True
process.g4SimHits.ECalSD.BirkL3Parametrization = True
process.g4SimHits.ECalSD.BirkC1 = 0.033
process.g4SimHits.ECalSD.BirkC2 = 0.0
process.g4SimHits.ECalSD.SlopeLightYield = 0.02
process.g4SimHits.HCalSD.UseBirkLaw = True
process.g4SimHits.HCalSD.BirkC1 = 0.0052
process.g4SimHits.HCalSD.BirkC2 = 0.142
process.g4SimHits.HCalSD.BirkC3 = 1.75
process.g4SimHits.HCalSD.UseLayerWt = False
process.g4SimHits.HCalSD.WtFile = ' '
process.g4SimHits.HCalSD.UseShowerLibrary = False
process.g4SimHits.HCalSD.TestNumberingScheme = False
process.g4SimHits.HCalSD.UseHF = False
process.g4SimHits.HCalSD.ForTBHCAL = True
process.g4SimHits.HCalSD.ForTBH2 = True
process.g4SimHits.CaloSD = cms.PSet(
process.common_beam_direction_parameters,
process.common_heavy_suppression,
EminTrack = cms.double(1.0),
TmaxHit = cms.double(1000.0),
EminHits = cms.vdouble(0.0,0.0,0.0,0.0),
EminHitsDepth = cms.vdouble(0.0,0.0,0.0,0.0),
TmaxHits = cms.vdouble(1000.0,1000.0,1000.0,1000.0),
HCNames = cms.vstring('EcalHitsEB','EcalHitsEE','EcalHitsES','HcalHits'),
UseResponseTables = cms.vint32(0,0,0,0),
SuppressHeavy = cms.bool(False),
CheckHits = cms.untracked.int32(25),
UseMap = cms.untracked.bool(True),
Verbosity = cms.untracked.int32(0),
DetailedTiming = cms.untracked.bool(False),
CorrectTOFBeam = cms.bool(False)
)
process.g4SimHits.CaloTrkProcessing.TestBeam = True
| 43.868056
| 119
| 0.610891
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load('SimG4CMS.HcalTestBeam.TB2006Geometry33XML_cfi')
process.load('SimGeneral.HepPDTESSource.pdt_cfi')
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("Geometry.EcalCommonData.ecalSimulationParameters_cff")
process.load('Geometry.HcalTestBeamData.hcalDDDSimConstants_cff')
process.load('Configuration.EventContent.EventContent_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.generatorSmeared_cfi')
process.load('SimG4Core.Application.g4SimHits_cfi')
process.load('IOMC.RandomEngine.IOMC_cff')
if hasattr(process,'MessageLogger'):
process.MessageLogger.categories.append('HCalGeom')
process.MessageLogger.categories.append('HcalSim')
process.TFileService = cms.Service("TFileService",
fileName = cms.string('hcaltb06_33.root')
)
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
process.common_beam_direction_parameters = cms.PSet(
MinE = cms.double(50.0),
MaxE = cms.double(50.0),
PartID = cms.vint32(-211),
MinEta = cms.double(0.2175),
MaxEta = cms.double(0.2175),
MinPhi = cms.double(-0.1309),
MaxPhi = cms.double(-0.1309),
BeamPosition = cms.double(-800.0)
)
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1)
)
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
process.common_beam_direction_parameters,
),
Verbosity = cms.untracked.int32(0),
AddAntiParticle = cms.bool(False)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(25000)
)
process.o1 = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('sim2006_33.root')
)
process.Timing = cms.Service("Timing")
from IOMC.EventVertexGenerators.VtxSmearedParameters_cfi import *
process.VtxSmeared = cms.EDProducer("BeamProfileVtxGenerator",
process.common_beam_direction_parameters,
VtxSmearedCommon,
BeamMeanX = cms.double(0.0),
BeamMeanY = cms.double(0.0),
BeamSigmaX = cms.double(0.0001),
BeamSigmaY = cms.double(0.0001),
Psi = cms.double(999.9),
GaussianProfile = cms.bool(False),
BinX = cms.int32(50),
BinY = cms.int32(50),
File = cms.string('beam.profile'),
UseFile = cms.bool(False),
TimeOffset = cms.double(0.)
)
process.testbeam = cms.EDAnalyzer("HcalTB06Analysis",
process.common_beam_direction_parameters,
ECAL = cms.bool(True),
TestBeamAnalysis = cms.PSet(
EHCalMax = cms.untracked.double(400.0),
ETtotMax = cms.untracked.double(400.0),
beamEnergy = cms.untracked.double(50.),
TimeLimit = cms.double(180.0),
EcalWidth = cms.double(0.362),
HcalWidth = cms.double(0.640),
EcalFactor = cms.double(1.0),
HcalFactor = cms.double(100.0),
MIP = cms.double(0.8),
Verbose = cms.untracked.bool(True),
MakeTree = cms.untracked.bool(True)
)
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits*process.testbeam)
process.g4SimHits.NonBeamEvent = True
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/QGSP_FTFP_BERT_EML'
process.g4SimHits.Physics.Region = 'HcalRegion'
process.g4SimHits.Physics.DefaultCutValue = 1.
process.g4SimHits.ECalSD.UseBirkLaw = True
process.g4SimHits.ECalSD.BirkL3Parametrization = True
process.g4SimHits.ECalSD.BirkC1 = 0.033
process.g4SimHits.ECalSD.BirkC2 = 0.0
process.g4SimHits.ECalSD.SlopeLightYield = 0.02
process.g4SimHits.HCalSD.UseBirkLaw = True
process.g4SimHits.HCalSD.BirkC1 = 0.0052
process.g4SimHits.HCalSD.BirkC2 = 0.142
process.g4SimHits.HCalSD.BirkC3 = 1.75
process.g4SimHits.HCalSD.UseLayerWt = False
process.g4SimHits.HCalSD.WtFile = ' '
process.g4SimHits.HCalSD.UseShowerLibrary = False
process.g4SimHits.HCalSD.TestNumberingScheme = False
process.g4SimHits.HCalSD.UseHF = False
process.g4SimHits.HCalSD.ForTBHCAL = True
process.g4SimHits.HCalSD.ForTBH2 = True
process.g4SimHits.CaloSD = cms.PSet(
process.common_beam_direction_parameters,
process.common_heavy_suppression,
EminTrack = cms.double(1.0),
TmaxHit = cms.double(1000.0),
EminHits = cms.vdouble(0.0,0.0,0.0,0.0),
EminHitsDepth = cms.vdouble(0.0,0.0,0.0,0.0),
TmaxHits = cms.vdouble(1000.0,1000.0,1000.0,1000.0),
HCNames = cms.vstring('EcalHitsEB','EcalHitsEE','EcalHitsES','HcalHits'),
UseResponseTables = cms.vint32(0,0,0,0),
SuppressHeavy = cms.bool(False),
CheckHits = cms.untracked.int32(25),
UseMap = cms.untracked.bool(True),
Verbosity = cms.untracked.int32(0),
DetailedTiming = cms.untracked.bool(False),
CorrectTOFBeam = cms.bool(False)
)
process.g4SimHits.CaloTrkProcessing.TestBeam = True
| true
| true
|
f70aa50090b5c9275e05b9d12c46f122f2b59ef0
| 20,793
|
py
|
Python
|
website/canvas/migrations/0200_auto__del_field_comment_reply_text.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 19
|
2015-11-10T17:36:20.000Z
|
2021-04-12T07:36:00.000Z
|
website/canvas/migrations/0200_auto__del_field_comment_reply_text.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 1
|
2021-06-09T03:45:34.000Z
|
2021-06-09T03:45:34.000Z
|
website/canvas/migrations/0200_auto__del_field_comment_reply_text.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 6
|
2015-11-11T00:38:38.000Z
|
2020-07-25T20:10:08.000Z
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Comment.reply_text'
db.delete_column(u'canvas_comment', 'reply_text')
def backwards(self, orm):
# Adding field 'Comment.reply_text'
db.add_column(u'canvas_comment', 'reply_text',
self.gf('django.db.models.fields.CharField')(default='', max_length=2000, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.APIApp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'attribution_copy': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'attribution_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Category']"}),
'created_on_iphone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Comment']"}),
'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'star_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'ugq': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['auth.User']"})
},
u'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': u"orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
u'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': u"orm['canvas.Comment']"}),
'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'canvas.commentstickerlog': {
'Meta': {'object_name': 'CommentStickerLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': u"orm['canvas.Content']"}),
'stroke_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"})
},
u'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_by': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['canvas.FacebookUser']", 'symmetrical': 'False', 'blank': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': u"orm['canvas.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': u"orm['auth.User']"})
},
u'canvas.friendjoinednotificationreceipt': {
'Meta': {'unique_together': "(('actor', 'recipient'),)", 'object_name': 'FriendJoinedNotificationReceipt'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"})
},
u'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']", 'null': 'True'}),
'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'follower_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']", 'null': 'True'}),
'trust_changed': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'blank': 'True'}),
'trusted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': u"orm['auth.User']"})
},
u'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': u"orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
u'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
| 80.281853
| 198
| 0.559611
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.delete_column(u'canvas_comment', 'reply_text')
def backwards(self, orm):
db.add_column(u'canvas_comment', 'reply_text',
self.gf('django.db.models.fields.CharField')(default='', max_length=2000, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.APIApp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'attribution_copy': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'attribution_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Category']"}),
'created_on_iphone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Comment']"}),
'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'star_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'ugq': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['auth.User']"})
},
u'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': u"orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
u'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': u"orm['canvas.Comment']"}),
'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'canvas.commentstickerlog': {
'Meta': {'object_name': 'CommentStickerLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': u"orm['canvas.Content']"}),
'stroke_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"})
},
u'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_by': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['canvas.FacebookUser']", 'symmetrical': 'False', 'blank': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': u"orm['canvas.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': u"orm['auth.User']"})
},
u'canvas.friendjoinednotificationreceipt': {
'Meta': {'unique_together': "(('actor', 'recipient'),)", 'object_name': 'FriendJoinedNotificationReceipt'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"})
},
u'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']", 'null': 'True'}),
'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'follower_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']", 'null': 'True'}),
'trust_changed': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'blank': 'True'}),
'trusted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': u"orm['auth.User']"})
},
u'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': u"orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
u'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
| true
| true
|
f70aa5dbf19b752d396afe92c981d393af43e061
| 496
|
py
|
Python
|
povary/apps/gallery/urls.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
povary/apps/gallery/urls.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
povary/apps/gallery/urls.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'povary.views.home', name='home'),
# url(r'^povary/', include('povary.foo.urls')),
url(r'^recipe_gallery/(?P<recipe_slug>.*)/$',
'gallery.views.recipe_gallery_upload',
name='recipe_gallery_upload'
),
# url(r'^$', 'recipes.views.recipe_list', name='recipe_list'),
# url(r'^(?P<recipe_slug>.*)/$', 'recipes.views.recipe_details', name='recipe_details'),
)
| 29.176471
| 89
| 0.643145
|
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^recipe_gallery/(?P<recipe_slug>.*)/$',
'gallery.views.recipe_gallery_upload',
name='recipe_gallery_upload'
),
)
| true
| true
|
f70aa5dfbb682865ae09f99631d5f9e4a343d737
| 3,128
|
py
|
Python
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/hdfs_fsimage_job.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/hdfs_fsimage_job.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/hdfs_fsimage_job.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_9_0_0.models.hdfs_fsimage_job_job import HdfsFsimageJobJob # noqa: F401,E501
class HdfsFsimageJob(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'job': 'HdfsFsimageJobJob'
}
attribute_map = {
'job': 'job'
}
def __init__(self, job=None): # noqa: E501
"""HdfsFsimageJob - a model defined in Swagger""" # noqa: E501
self._job = None
self.discriminator = None
if job is not None:
self.job = job
@property
def job(self):
"""Gets the job of this HdfsFsimageJob. # noqa: E501
Information about job that generates FSImage. # noqa: E501
:return: The job of this HdfsFsimageJob. # noqa: E501
:rtype: HdfsFsimageJobJob
"""
return self._job
@job.setter
def job(self, job):
"""Sets the job of this HdfsFsimageJob.
Information about job that generates FSImage. # noqa: E501
:param job: The job of this HdfsFsimageJob. # noqa: E501
:type: HdfsFsimageJobJob
"""
self._job = job
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HdfsFsimageJob):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.735043
| 90
| 0.563299
|
import pprint
import re
import six
from isi_sdk_9_0_0.models.hdfs_fsimage_job_job import HdfsFsimageJobJob
class HdfsFsimageJob(object):
swagger_types = {
'job': 'HdfsFsimageJobJob'
}
attribute_map = {
'job': 'job'
}
def __init__(self, job=None):
self._job = None
self.discriminator = None
if job is not None:
self.job = job
@property
def job(self):
return self._job
@job.setter
def job(self, job):
self._job = job
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, HdfsFsimageJob):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f70aa6ade73961c39dcb9421c207c2719fa61e4d
| 4,492
|
py
|
Python
|
src/you_get/extractors/universal.py
|
whohow123/you-get
|
00f3dfa71f53abd495424c527b5ef3debc6fb6d2
|
[
"MIT"
] | 1
|
2021-01-18T06:10:46.000Z
|
2021-01-18T06:10:46.000Z
|
src/you_get/extractors/universal.py
|
whohow123/you-get
|
00f3dfa71f53abd495424c527b5ef3debc6fb6d2
|
[
"MIT"
] | null | null | null |
src/you_get/extractors/universal.py
|
whohow123/you-get
|
00f3dfa71f53abd495424c527b5ef3debc6fb6d2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
__all__ = ['universal_download']
from ..common import *
from .embed import *
def universal_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
try:
content_type = get_head(url, headers=fake_headers)['Content-Type']
except:
content_type = get_head(url, headers=fake_headers, get_method='GET')['Content-Type']
if content_type.startswith('text/html'):
try:
embed_download(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
except Exception:
pass
else:
return
domains = url.split('/')[2].split('.')
if len(domains) > 2: domains = domains[1:]
site_info = '.'.join(domains)
if content_type.startswith('text/html'):
# extract an HTML page
response = get_response(url, faker=True)
page = str(response.data)
page_title = r1(r'<title>([^<]*)', page)
if page_title:
page_title = unescape_html(page_title)
hls_urls = re.findall(r'(https?://[^;"\'\\]+' + '\.m3u8?' +
r'[^;"\'\\]*)', page)
if hls_urls:
for hls_url in hls_urls:
type_, ext, size = url_info(hls_url)
print_info(site_info, page_title, type_, size)
if not info_only:
download_url_ffmpeg(url=hls_url, title=page_title,
ext='mp4', output_dir=output_dir)
return
# most common media file extensions on the Internet
media_exts = ['\.flv', '\.mp3', '\.mp4', '\.webm',
'[-_]1\d\d\d\.jpe?g', '[-_][6-9]\d\d\.jpe?g', # tumblr
'[-_]1\d\d\dx[6-9]\d\d\.jpe?g',
'[-_][6-9]\d\dx1\d\d\d\.jpe?g',
'[-_][6-9]\d\dx[6-9]\d\d\.jpe?g',
's1600/[\w%]+\.jpe?g', # blogger
'img[6-9]\d\d/[\w%]+\.jpe?g' # oricon?
]
urls = []
for i in media_exts:
urls += re.findall(r'(https?://[^;"\'\\]+' + i + r'[^;"\'\\]*)', page)
p_urls = re.findall(r'(https?%3A%2F%2F[^;&]+' + i + r'[^;&]*)', page)
urls += [parse.unquote(url) for url in p_urls]
q_urls = re.findall(r'(https?:\\\\/\\\\/[^;"\']+' + i + r'[^;"\']*)', page)
urls += [url.replace('\\\\/', '/') for url in q_urls]
# a link href to an image is often an interesting one
urls += re.findall(r'href="(https?://[^"]+\.jpe?g)"', page)
urls += re.findall(r'href="(https?://[^"]+\.png)"', page)
urls += re.findall(r'href="(https?://[^"]+\.gif)"', page)
# MPEG-DASH MPD
mpd_urls = re.findall(r'src="(https?://[^"]+\.mpd)"', page)
for mpd_url in mpd_urls:
cont = get_content(mpd_url)
base_url = r1(r'<BaseURL>(.*)</BaseURL>', cont)
urls += [ r1(r'(.*/)[^/]*', mpd_url) + base_url ]
# have some candy!
candies = []
i = 1
for url in set(urls):
filename = parse.unquote(url.split('/')[-1])
if 5 <= len(filename) <= 80:
title = '.'.join(filename.split('.')[:-1])
else:
title = '%s' % i
i += 1
candies.append({'url': url,
'title': title})
for candy in candies:
try:
mime, ext, size = url_info(candy['url'], faker=True)
if not size: size = float('Int')
except:
continue
else:
print_info(site_info, candy['title'], ext, size)
if not info_only:
download_urls([candy['url']], candy['title'], ext, size,
output_dir=output_dir, merge=merge,
faker=True)
return
else:
# direct download
filename = parse.unquote(url.split('/')[-1])
title = '.'.join(filename.split('.')[:-1])
ext = filename.split('.')[-1]
_, _, size = url_info(url, faker=True)
print_info(site_info, title, ext, size)
if not info_only:
download_urls([url], title, ext, size,
output_dir=output_dir, merge=merge,
faker=True)
return
site_info = None
download = universal_download
download_playlist = playlist_not_supported('universal')
| 37.123967
| 98
| 0.477293
|
__all__ = ['universal_download']
from ..common import *
from .embed import *
def universal_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
try:
content_type = get_head(url, headers=fake_headers)['Content-Type']
except:
content_type = get_head(url, headers=fake_headers, get_method='GET')['Content-Type']
if content_type.startswith('text/html'):
try:
embed_download(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
except Exception:
pass
else:
return
domains = url.split('/')[2].split('.')
if len(domains) > 2: domains = domains[1:]
site_info = '.'.join(domains)
if content_type.startswith('text/html'):
response = get_response(url, faker=True)
page = str(response.data)
page_title = r1(r'<title>([^<]*)', page)
if page_title:
page_title = unescape_html(page_title)
hls_urls = re.findall(r'(https?://[^;"\'\\]+' + '\.m3u8?' +
r'[^;"\'\\]*)', page)
if hls_urls:
for hls_url in hls_urls:
type_, ext, size = url_info(hls_url)
print_info(site_info, page_title, type_, size)
if not info_only:
download_url_ffmpeg(url=hls_url, title=page_title,
ext='mp4', output_dir=output_dir)
return
media_exts = ['\.flv', '\.mp3', '\.mp4', '\.webm',
'[-_]1\d\d\d\.jpe?g', '[-_][6-9]\d\d\.jpe?g', '[-_]1\d\d\dx[6-9]\d\d\.jpe?g',
'[-_][6-9]\d\dx1\d\d\d\.jpe?g',
'[-_][6-9]\d\dx[6-9]\d\d\.jpe?g',
's1600/[\w%]+\.jpe?g', 'img[6-9]\d\d/[\w%]+\.jpe?g' ]
urls = []
for i in media_exts:
urls += re.findall(r'(https?://[^;"\'\\]+' + i + r'[^;"\'\\]*)', page)
p_urls = re.findall(r'(https?%3A%2F%2F[^;&]+' + i + r'[^;&]*)', page)
urls += [parse.unquote(url) for url in p_urls]
q_urls = re.findall(r'(https?:\\\\/\\\\/[^;"\']+' + i + r'[^;"\']*)', page)
urls += [url.replace('\\\\/', '/') for url in q_urls]
urls += re.findall(r'href="(https?://[^"]+\.jpe?g)"', page)
urls += re.findall(r'href="(https?://[^"]+\.png)"', page)
urls += re.findall(r'href="(https?://[^"]+\.gif)"', page)
# MPEG-DASH MPD
mpd_urls = re.findall(r'src="(https?://[^"]+\.mpd)"', page)
for mpd_url in mpd_urls:
cont = get_content(mpd_url)
base_url = r1(r'<BaseURL>(.*)</BaseURL>', cont)
urls += [ r1(r'(.*/)[^/]*', mpd_url) + base_url ]
candies = []
i = 1
for url in set(urls):
filename = parse.unquote(url.split('/')[-1])
if 5 <= len(filename) <= 80:
title = '.'.join(filename.split('.')[:-1])
else:
title = '%s' % i
i += 1
candies.append({'url': url,
'title': title})
for candy in candies:
try:
mime, ext, size = url_info(candy['url'], faker=True)
if not size: size = float('Int')
except:
continue
else:
print_info(site_info, candy['title'], ext, size)
if not info_only:
download_urls([candy['url']], candy['title'], ext, size,
output_dir=output_dir, merge=merge,
faker=True)
return
else:
filename = parse.unquote(url.split('/')[-1])
title = '.'.join(filename.split('.')[:-1])
ext = filename.split('.')[-1]
_, _, size = url_info(url, faker=True)
print_info(site_info, title, ext, size)
if not info_only:
download_urls([url], title, ext, size,
output_dir=output_dir, merge=merge,
faker=True)
return
site_info = None
download = universal_download
download_playlist = playlist_not_supported('universal')
| true
| true
|
f70aa74839e42512f38a961e1c90480641251648
| 618
|
py
|
Python
|
dataset/corpus_to_txts.py
|
fubiye/edgar-abs-kg
|
3973059c7b1cdaab8a4e857a43c702ac0be7e725
|
[
"MIT"
] | null | null | null |
dataset/corpus_to_txts.py
|
fubiye/edgar-abs-kg
|
3973059c7b1cdaab8a4e857a43c702ac0be7e725
|
[
"MIT"
] | null | null | null |
dataset/corpus_to_txts.py
|
fubiye/edgar-abs-kg
|
3973059c7b1cdaab8a4e857a43c702ac0be7e725
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import pandas as pd
from pathlib import Path
# extract corpus to seprate files
OUT_PUT_DIR = r'D:\data\edgar\example\documents'
df = pd.read_csv(r'D:\data\edgar\example\corpus.csv')
# def write_to_file(cik,filingId,fileName,content):
def write_to_file(cik,filingId,fileName,content):
base_dir = Path(OUT_PUT_DIR)
file_name = str(cik) + '+' + str(filingId) + '+' + str(fileName)
file_name = file_name.replace('.htm', '.txt')
(base_dir/file_name).write_text(content,encoding='utf-8')
df.apply(lambda row: write_to_file(row['CIK'],row['FilingId'],row['FileName'],row['Content']), axis=1)
| 38.625
| 102
| 0.718447
|
import pandas as pd
from pathlib import Path
OUT_PUT_DIR = r'D:\data\edgar\example\documents'
df = pd.read_csv(r'D:\data\edgar\example\corpus.csv')
def write_to_file(cik,filingId,fileName,content):
base_dir = Path(OUT_PUT_DIR)
file_name = str(cik) + '+' + str(filingId) + '+' + str(fileName)
file_name = file_name.replace('.htm', '.txt')
(base_dir/file_name).write_text(content,encoding='utf-8')
df.apply(lambda row: write_to_file(row['CIK'],row['FilingId'],row['FileName'],row['Content']), axis=1)
| true
| true
|
f70aa7996c2228497dd2bb330beb6f83430d396c
| 1,084
|
py
|
Python
|
var/spack/repos/builtin/packages/r-adsplit/package.py
|
nkianggiss/spack
|
3477d3375142a30f5714bb5966a6d8bb22c33c06
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3
|
2019-06-27T13:26:50.000Z
|
2019-07-01T16:24:54.000Z
|
var/spack/repos/builtin/packages/r-adsplit/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75
|
2016-07-27T11:43:00.000Z
|
2020-12-08T15:56:53.000Z
|
var/spack/repos/builtin/packages/r-adsplit/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8
|
2015-10-16T13:51:49.000Z
|
2021-10-18T13:58:03.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAdsplit(RPackage):
"""This package implements clustering of microarray gene expression
profiles according to functional annotations. For each term genes
are annotated to, splits into two subclasses are computed and a
significance of the supporting gene set is determined."""
homepage = "https://www.bioconductor.org/packages/adSplit/"
git = "https://git.bioconductor.org/packages/adSplit.git"
version('1.46.0', commit='7e81a83f34d371447f491b3a146bf6851e260c7c')
depends_on('r@3.4.0:3.4.9', when='@1.46.0')
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-kegg-db', type=('build', 'run'))
depends_on('r-multtest', type=('build', 'run'))
| 40.148148
| 73
| 0.697417
|
from spack import *
class RAdsplit(RPackage):
homepage = "https://www.bioconductor.org/packages/adSplit/"
git = "https://git.bioconductor.org/packages/adSplit.git"
version('1.46.0', commit='7e81a83f34d371447f491b3a146bf6851e260c7c')
depends_on('r@3.4.0:3.4.9', when='@1.46.0')
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-kegg-db', type=('build', 'run'))
depends_on('r-multtest', type=('build', 'run'))
| true
| true
|
f70aa8f33cdb5f92221c46798f909eb1746b2cac
| 3,187
|
py
|
Python
|
paintball-pointofsale/group.py
|
alecspringel/paintball-pointofsale
|
a1f776e36881afa2815db8babdb1c0aa04a3a33b
|
[
"MIT"
] | 1
|
2020-10-28T01:49:16.000Z
|
2020-10-28T01:49:16.000Z
|
paintball-pointofsale/group.py
|
alecspringel/paintball-pointofsale
|
a1f776e36881afa2815db8babdb1c0aa04a3a33b
|
[
"MIT"
] | null | null | null |
paintball-pointofsale/group.py
|
alecspringel/paintball-pointofsale
|
a1f776e36881afa2815db8babdb1c0aa04a3a33b
|
[
"MIT"
] | null | null | null |
class Group:
"""
name: Name of group (String)
deposit: $ Amount required to book the group (Float)
type: Speedball, Recball, Rental (String)
players: ([Object])
paint_bags: list of paint the group has purchased ([Int])
transactions: ([Object])
"""
def __init__(self, name, deposit, type):
self.name = name
self.deposit = deposit
self.type = type
self.players = []
self.paint_bags = []
self.transactions = []
def get_name(self):
return self.name
def get_type(self):
return self.type
def number_of_players(self):
return len(self.players)
def total_spent(self):
total_spent_by_group = 0.0
for transaction in self.transactions:
total_spent_by_group += transaction.amount
return total_spent_by_group
def get_deposit(self):
return self.deposit
def grand_total(self):
return self.total_spent() + self.deposit
def check_if_players_paid(self):
if len(self.players) == 0:
return False
for player in self.players:
if not player.paid:
return False
return True
def number_players_paid(self):
players_who_paid = 0
for player in self.players:
if player.paid:
players_who_paid += 1
return players_who_paid
def total_bags_and_cases(self):
cases = sum(self.paint_bags) // 4
bags = sum(self.paint_bags) % 4
return bags, cases
def get_players(self):
return self.players
def add_player(self, player):
self.players.append(player)
def get_transactions(self):
return self.transactions
def paint_length(self):
return len(self.paint_bags)
def delete_last_paint(self):
del self.paint_bags[-1]
class Player:
def __init__(self, name):
self.name = name
self.paid = False # 2
self.selected = False # 6
def change_select_status(self):
if not self.selected:
self.selected = True
else:
self.selected = False
def get_name(self):
return self.name
def mark_paid(self):
self.paid = True
def mark_unpaid(self):
self.paid = False
def did_pay(self):
return self.paid
def change_pay_status(self):
if self.paid:
self.paid = False
else:
self.paid = True
def is_selected(self):
return self.selected
def deselect(self):
self.selected = False
class Transaction:
def __init__(self, amount, type):
self.amount = amount
self.type = type
self.selected = False
def change_select_status(self):
if not self.selected:
self.selected = True
else:
self.selected = False
def get_type(self):
return self.type
def get_amount(self):
return self.amount
def is_selected(self):
return self.selected
| 24.328244
| 62
| 0.566991
|
class Group:
def __init__(self, name, deposit, type):
self.name = name
self.deposit = deposit
self.type = type
self.players = []
self.paint_bags = []
self.transactions = []
def get_name(self):
return self.name
def get_type(self):
return self.type
def number_of_players(self):
return len(self.players)
def total_spent(self):
total_spent_by_group = 0.0
for transaction in self.transactions:
total_spent_by_group += transaction.amount
return total_spent_by_group
def get_deposit(self):
return self.deposit
def grand_total(self):
return self.total_spent() + self.deposit
def check_if_players_paid(self):
if len(self.players) == 0:
return False
for player in self.players:
if not player.paid:
return False
return True
def number_players_paid(self):
players_who_paid = 0
for player in self.players:
if player.paid:
players_who_paid += 1
return players_who_paid
def total_bags_and_cases(self):
cases = sum(self.paint_bags) // 4
bags = sum(self.paint_bags) % 4
return bags, cases
def get_players(self):
return self.players
def add_player(self, player):
self.players.append(player)
def get_transactions(self):
return self.transactions
def paint_length(self):
return len(self.paint_bags)
def delete_last_paint(self):
del self.paint_bags[-1]
class Player:
def __init__(self, name):
self.name = name
self.paid = False self.selected = False
def change_select_status(self):
if not self.selected:
self.selected = True
else:
self.selected = False
def get_name(self):
return self.name
def mark_paid(self):
self.paid = True
def mark_unpaid(self):
self.paid = False
def did_pay(self):
return self.paid
def change_pay_status(self):
if self.paid:
self.paid = False
else:
self.paid = True
def is_selected(self):
return self.selected
def deselect(self):
self.selected = False
class Transaction:
def __init__(self, amount, type):
self.amount = amount
self.type = type
self.selected = False
def change_select_status(self):
if not self.selected:
self.selected = True
else:
self.selected = False
def get_type(self):
return self.type
def get_amount(self):
return self.amount
def is_selected(self):
return self.selected
| true
| true
|
f70aa9af67a7af7f95fc82fb4874f9b5bfbd4072
| 21,125
|
py
|
Python
|
ezclimate/optimization.py
|
Yili-Yang/Litterman_Carbon_Pricing
|
71eeefc5e2d9b4c1473a9a6ae85c33b019e32d84
|
[
"MIT"
] | null | null | null |
ezclimate/optimization.py
|
Yili-Yang/Litterman_Carbon_Pricing
|
71eeefc5e2d9b4c1473a9a6ae85c33b019e32d84
|
[
"MIT"
] | null | null | null |
ezclimate/optimization.py
|
Yili-Yang/Litterman_Carbon_Pricing
|
71eeefc5e2d9b4c1473a9a6ae85c33b019e32d84
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function
import numpy as np
import multiprocessing
from tools import _pickle_method, _unpickle_method
try:
import copy_reg
except:
import copyreg as copy_reg
import types
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
class GeneticAlgorithm(object):
"""Optimization algorithm for the EZ-Climate model.
Parameters
----------
pop_amount : int
number of individuals in the population
num_feature : int
number of elements in each individual, i.e. number of nodes in tree-model
num_generations : int
number of generations of the populations to be evaluated
bound : float
upper bound of mitigation in each node
cx_prob : float
probability of mating
mut_prob : float
probability of mutation.
utility : `Utility` object
object of utility class
fixed_values : ndarray, optional
nodes to keep fixed
fixed_indicies : ndarray, optional
indicies of nodes to keep fixed
print_progress : bool, optional
if the progress of the evolution should be printed
Attributes
----------
pop_amount : int
number of individuals in the population
num_feature : int
number of elements in each individual, i.e. number of nodes in tree-model
num_generations : int
number of generations of the populations to be evaluated
bound : float
upper bound of mitigation in each node
cx_prob : float
probability of mating
mut_prob : float
probability of mutation.
u : `Utility` object
object of utility class
fixed_values : ndarray, optional
nodes to keep fixed
fixed_indicies : ndarray, optional
indicies of nodes to keep fixed
print_progress : bool, optional
if the progress of the evolution should be printed
"""
def __init__(self, pop_amount, num_generations, cx_prob, mut_prob, bound, num_feature, utility,
fixed_values=None, fixed_indicies=None, print_progress=False):
self.num_feature = num_feature
self.pop_amount = pop_amount
self.num_gen = num_generations
self.cx_prob = cx_prob
self.mut_prob = mut_prob
self.u = utility
self.bound = bound
self.fixed_values = fixed_values
self.fixed_indicies = fixed_indicies
self.print_progress = print_progress
def _generate_population(self, size):
"""Return 1D-array of random values in the given bound as the initial population."""
pop = np.random.random([size, self.num_feature])*self.bound
if self.fixed_values is not None:
for ind in pop:
ind[self.fixed_indicies] = self.fixed_values # override fix values
return pop
def _evaluate(self, indvidual):
"""Returns the utility of given individual."""
return self.u.utility(indvidual)
def _select(self, pop, rate):
"""Returns a 1D-array of selected individuals.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
rate : float
the probability of an individual being selected
Returns
-------
ndarray
selected individuals
"""
index = np.random.choice(self.pop_amount, int(rate*self.pop_amount), replace=False)
return pop[index,:] #return a list of random instance of pop
def _random_index(self, individuals, size):
"""Generate a random index of individuals of size 'size'.
Parameters
----------
individuals : ndarray or list
2D-array of individuals
size : int
number of indices to generate
Returns
-------
ndarray
1D-array of indices
"""
inds_size = len(individuals)
return np.random.choice(inds_size, size)
def _selection_tournament(self, pop, k, tournsize, fitness):
"""Select `k` individuals from the input `individuals` using `k`
tournaments of `tournsize` individuals.
Parameters
----------
individuals : ndarray or list
2D-array of individuals to select from
k : int
number of individuals to select
tournsize : int
number of individuals participating in each tournament
fitness :
utility in our model
Returns
-------
ndarray s
selected individuals
"""
chosen = []
# for k times, randomly choose a tournsize number of index and pick up the one with the highest fitness
for i in xrange(k):
index = self._random_index(pop, tournsize)
aspirants = pop[index]
aspirants_fitness = fitness[index]
chosen_index = np.where(aspirants_fitness == np.max(aspirants_fitness))[0]
if len(chosen_index) != 0:
chosen_index = chosen_index[0]
chosen.append(aspirants[chosen_index])
return np.array(chosen)
def _two_point_cross_over(self, pop):
"""Performs a two-point cross-over of the population.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
"""
child_group1 = pop[::2] # instance with even index
child_group2 = pop[1::2]# instance with odd index
for child1, child2 in zip(child_group1, child_group2):
if np.random.random() <= self.cx_prob:
#generates 2 random index for the swap, can be done much better.
cxpoint1 = np.random.randint(1, self.num_feature)
cxpoint2 = np.random.randint(1, self.num_feature - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else: # Swap the two cx points
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
child1[cxpoint1:cxpoint2], child2[cxpoint1:cxpoint2] \
= child2[cxpoint1:cxpoint2].copy(), child1[cxpoint1:cxpoint2].copy()
if self.fixed_values is not None:
child1[self.fixed_indicies] = self.fixed_values
child2[self.fixed_indicies] = self.fixed_values
def _uniform_cross_over(self, pop, ind_prob):
"""Performs a uniform cross-over of the population.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
ind_prob : float
probability of feature cross-over
"""
child_group1 = pop[::2]
child_group2 = pop[1::2]
for child1, child2 in zip(child_group1, child_group2):
size = min(len(child1), len(child2))
for i in range(size):
if np.random.random() < ind_prob:
child1[i], child2[i] = child2[i], child1[i]
def _mutate(self, pop, ind_prob, scale=2.0):
"""Mutates individual's elements. The individual has a probability of `mut_prob` of
beeing selected and every element in this individual has a probability `ind_prob` of beeing
mutated. The mutated value is a random number.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
ind_prob : float
probability of feature mutation
scale : float
scaling constant of the random generated number for mutation
"""
# it is using a expectation of prob. Can be done much better.
pop_tmp = np.copy(pop)
mutate_index = np.random.choice(self.pop_amount, int(self.mut_prob * self.pop_amount), replace=False)
for i in mutate_index:
feature_index = np.random.choice(self.num_feature, int(ind_prob * self.num_feature), replace=False)
for j in feature_index:
if self.fixed_indicies is not None and j in self.fixed_indicies:
continue
else:
pop[i][j] = max(0.0, pop[i][j]+(np.random.random()-0.5)*scale)
def _uniform_mutation(self, pop, ind_prob, scale=2.0):
"""Mutates individual's elements. The individual has a probability of `mut_prob` of
beeing selected and every element in this individual has a probability `ind_prob` of beeing
mutated. The mutated value is the current value plus a scaled uniform [-0.5,0.5] random value.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
ind_prob : float
probability of feature mutation
scale : float
scaling constant of the random generated number for mutation
"""
pop_len = len(pop)
mutate_index = np.random.choice(pop_len, int(self.mut_prob * pop_len), replace=False)
for i in mutate_index:
prob = np.random.random(self.num_feature)
inc = (np.random.random(self.num_feature) - 0.5)*scale
pop[i] += (prob > (1.0-ind_prob)).astype(int)*inc
pop[i] = np.maximum(1e-5, pop[i])
if self.fixed_values is not None:
pop[i][self.fixed_indicies] = self.fixed_values
def _show_evolution(self, fits, pop):
"""Print statistics of the evolution of the population."""
length = len(pop)
mean = fits.mean()
std = fits.std()
min_val = fits.min()
max_val = fits.max()
print (" Min {} \n Max {} \n Avg {}".format(min_val, max_val, mean))
print (" Std {} \n Population Size {}".format(std, length))
print (" Best Individual: ", pop[np.argmax(fits)])
def _survive(self, pop_tmp, fitness_tmp):
"""The 80 percent of the individuals with best fitness survives to
the next generation.
Parameters
----------
pop_tmp : ndarray
population
fitness_tmp : ndarray
fitness values of `pop_temp`
Returns
-------
ndarray
individuals that survived
"""
index_fits = np.argsort(fitness_tmp)[::-1]
fitness = fitness_tmp[index_fits]
pop = pop_tmp[index_fits]
num_survive = int(0.8*self.pop_amount)
survive_pop = np.copy(pop[:num_survive])
survive_fitness = np.copy(fitness[:num_survive])
return np.copy(survive_pop), np.copy(survive_fitness)
def run(self):
"""Start the evolution process.
The evolution steps are:
1. Select the individuals to perform cross-over and mutation.
2. Cross over among the selected candidate.
3. Mutate result as offspring.
4. Combine the result of offspring and parent together. And selected the top
80 percent of original population amount.
5. Random Generate 20 percent of original population amount new individuals
and combine the above new population.
Returns
-------
tuple
final population and the fitness for the final population
Note
----
Uses the :mod:`~multiprocessing` package.
"""
print("----------------Genetic Evolution Starting----------------")
pop = self._generate_population(self.pop_amount)
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
fitness = pool.map(self._evaluate, pop) # how do we know pop[i] belongs to fitness[i]?
fitness = np.array([val[0] for val in fitness])
u_hist = np.zeros(self.num_gen) # not been used ...
for g in range(0, self.num_gen):
print ("-- Generation {} --".format(g+1))
pop_select = self._select(np.copy(pop), rate=1)
self._uniform_cross_over(pop_select, 0.50)
self._uniform_mutation(pop_select, 0.25, np.exp(-float(g)/self.num_gen)**2)
#self._mutate(pop_select, 0.05)
fitness_select = pool.map(self._evaluate, pop_select)
fitness_select = np.array([val[0] for val in fitness_select])
pop_tmp = np.append(pop, pop_select, axis=0)
fitness_tmp = np.append(fitness, fitness_select, axis=0)
pop_survive, fitness_survive = self._survive(pop_tmp, fitness_tmp)
pop_new = self._generate_population(self.pop_amount - len(pop_survive))
fitness_new = pool.map(self._evaluate, pop_new)
fitness_new = np.array([val[0] for val in fitness_new])
pop = np.append(pop_survive, pop_new, axis=0)
fitness = np.append(fitness_survive, fitness_new, axis=0)
if self.print_progress:
self._show_evolution(fitness, pop)
u_hist[g] = fitness[0]
fitness = pool.map(self._evaluate, pop)
fitness = np.array([val[0] for val in fitness])
return pop, fitness
class GradientSearch(object) :
"""Gradient search optimization algorithm for the EZ-Climate model.
Parameters
----------
utility : `Utility` object
object of utility class
learning_rate : float
starting learning rate of gradient descent
var_nums : int
number of elements in array to optimize
accuracy : float
stop value for the gradient descent
fixed_values : ndarray, optional
nodes to keep fixed
fixed_indicies : ndarray, optional
indicies of nodes to keep fixed
print_progress : bool, optional
if the progress of the evolution should be printed
scale_alpha : ndarray, optional
array to scale the learning rate
Attributes
----------
utility : `Utility` object
object of utility class
learning_rate : float
starting learning rate of gradient descent
var_nums : int
number of elements in array to optimize
accuracy : float
stop value for the gradient descent
fixed_values : ndarray, optional
nodes to keep fixed
fixed_indicies : ndarray, optional
indicies of nodes to keep fixed
print_progress : bool, optional
if the progress of the evolution should be printed
scale_alpha : ndarray, optional
array to scale the learning rate
"""
def __init__(self, utility, var_nums, accuracy=1e-06, iterations=100, fixed_values=None,
fixed_indicies=None, print_progress=False, scale_alpha=None):
self.u = utility
self.var_nums = var_nums
self.accuracy = accuracy
self.iterations = iterations
self.fixed_values = fixed_values
self.fixed_indicies = fixed_indicies
self.print_progress = print_progress
self.scale_alpha = scale_alpha
if scale_alpha is None:
self.scale_alpha = np.exp(np.linspace(0.0, 3.0, var_nums))
def _partial_grad(self, i):
"""Calculate the ith element of the gradient vector."""
m_copy = self.m.copy()
m_copy[i] = m_copy[i] - self.delta if (m_copy[i] - self.delta)>=0 else 0.0
minus_utility = self.u.utility(m_copy)
m_copy[i] += 2*self.delta
plus_utility = self.u.utility(m_copy)
grad = (plus_utility-minus_utility) / (2*self.delta) # the math is trival
return grad, i
def numerical_gradient(self, m, delta=1e-08, fixed_indicies=None):
"""Calculate utility gradient numerically.
Parameters
----------
m : ndarray or list
array of mitigation
delta : float, optional
change in mitigation
fixed_indicies : ndarray or list, optional
indicies of gradient that should not be calculated
Returns
-------
ndarray
gradient
"""
self.delta = delta
self.m = m
if fixed_indicies is None:
fixed_indicies = []
grad = np.zeros(len(m))
if not isinstance(m, np.ndarray):
self.m = np.array(m)
pool = multiprocessing.Pool()
indicies = np.delete(range(len(m)), fixed_indicies)
res = pool.map(self._partial_grad, indicies)
for g, i in res:
grad[i] = g
pool.close()
pool.join()
del self.m
del self.delta
return grad
def _partial_grad_cons(self, i):
"""Calculate the ith element of the gradient vector."""
m_copy = self.m.copy()
m_copy[i] = m_copy[i] - self.delta if (m_copy[i] - self.delta)>=0 else 0.0
minus_utility = self.u.adjusted_utility(m_copy,first_period_consadj=self.cons)
m_copy[i] += 2*self.delta
plus_utility = self.u.adjusted_utility(m_copy,first_period_consadj=self.cons)
grad = (plus_utility-minus_utility) / (2*self.delta) # the math is trival
return grad, i
def numerical_gradient_cons(self, m, cons,delta=1e-08):
"""Calculate utility gradient numerically.
Parameters
----------
m : ndarray or list
array of mitigation
delta : float, optional
change in mitigation
fixed_indicies : ndarray or list, optional
indicies of gradient that should not be calculated
Returns
-------
ndarray
gradient
"""
self.delta = delta
self.m = m
self.cons = cons
grad = np.zeros(len(m))
if not isinstance(m, np.ndarray):
self.m = np.array(m)
pool = multiprocessing.Pool()
indicies = np.array(range(len(m)))
res = pool.map(self._partial_grad_cons, indicies)
for g, i in res:
grad[i] = g
pool.close()
pool.join()
del self.m
del self.delta
del self.cons
return grad
def _accelerate_scale(self, accelerator, prev_grad, grad):
sign_vector = np.sign(prev_grad * grad)
scale_vector = np.ones(self.var_nums) * ( 1 + 0.10)
accelerator[sign_vector <= 0] = 1
accelerator *= scale_vector
return accelerator
def gradient_descent(self, initial_point, return_last=False):
"""Gradient descent algorithm. The `initial_point` is updated using the
Adam algorithm. Adam uses the history of the gradient to compute individual
step sizes for each element in the mitigation vector. The vector of step
sizes are calculated using estimates of the first and second moments of
the gradient.
Parameters
----------
initial_point : ndarray
initial guess of the mitigation
return_last : bool, optional
if True the function returns the last point, else the point
with highest utility
Returns
-------
tuple
(best point, best utility)
"""
num_decision_nodes = initial_point.shape[0]
x_hist = np.zeros((self.iterations+1, num_decision_nodes))
u_hist = np.zeros(self.iterations+1)
u_hist[0] = self.u.utility(initial_point)
x_hist[0] = initial_point
beta1, beta2 = 0.90, 0.90
eta = 0.0015 # learning rate
eps = 1e-3
m_t, v_t = 0, 0
prev_grad = 0.0
accelerator = np.ones(self.var_nums)
# formula at http://sebastianruder.com/optimizing-gradient-descent/index.html#fnref:15
for i in range(self.iterations):
grad = self.numerical_gradient(x_hist[i], fixed_indicies=self.fixed_indicies)
m_t = beta1*m_t + (1-beta1)*grad
v_t = beta2*v_t + (1-beta2)*np.power(grad, 2)
m_hat = m_t / (1-beta1**(i+1))
v_hat = v_t / (1-beta2**(i+1))
if i != 0:
accelerator = self._accelerate_scale(accelerator, prev_grad, grad)
new_x = x_hist[i] + ((eta*m_hat)/(np.square(v_hat)+eps)) * accelerator # empirical acceleration, parameter =1.1 is need to be proved later on
new_x[new_x < 0] = 0.0
if self.fixed_values is not None:
new_x[self.fixed_indicies] = self.fixed_values
x_hist[i+1] = new_x
u_hist[i+1] = self.u.utility(new_x)[0]
prev_grad = grad.copy()
if self.print_progress:
print("-- Iteration {} -- \n Current Utility: {}".format(i+1, u_hist[i+1]))
print(new_x)
if return_last:
return x_hist[i+1], u_hist[i+1]
best_index = np.argmax(u_hist)
return x_hist[best_index], u_hist[best_index]
def run(self, initial_point_list, topk=4):
"""Initiate the gradient search algorithm.
Parameters
----------
initial_point_list : list
list of initial points to select from
topk : int, optional
select and run gradient descent on the `topk` first points of
`initial_point_list`
Returns
-------
tuple
best mitigation point and the utility of the best mitigation point
Raises
------
ValueError
If `topk` is larger than the length of `initial_point_list`.
Note
----
Uses the :mod:`~multiprocessing` package.
"""
print("----------------Gradient Search Starting----------------")
if topk > len(initial_point_list):
raise ValueError("topk {} > number of initial points {}".format(topk, len(initial_point_list)))
candidate_points = initial_point_list[:topk]
mitigations = []
utilities = np.zeros(topk)
for cp, count in zip(candidate_points, range(topk)):
if not isinstance(cp, np.ndarray):
cp = np.array(cp)
print("Starting process {} of Gradient Descent".format(count+1))
m, u = self.gradient_descent(cp)
mitigations.append(m)
utilities[count] = u
best_index = np.argmax(utilities)
return mitigations[best_index], utilities[best_index]
class CoordinateDescent(object):
"""Coordinate descent optimization algorithm for the EZ-Climate model.
Parameters
----------
utility : `Utility` object
object of utility class
var_nums : int
number of elements in array to optimize
accuracy : float
stop value for the utility increase
iterations : int
maximum number of iterations
Attributes
----------
utility : `Utility` object
object of utility class
var_nums : int
number of elements in array to optimize
accuracy : float
stop value for the utility increase
iterations : int
maximum number of iterations
"""
def __init__(self, utility, var_nums, accuracy=1e-4, iterations=100):
self.u = utility
self.var_nums = var_nums
self.accuracy = accuracy
self.iterations = iterations
def _min_func(self, x, m, i):
m_copy = m.copy()
m_copy[i] = x
return -self.u.utility(m_copy)[0]
def _minimize_node(self, node, m):
from scipy.optimize import fmin
return fmin(self._min_func, x0=m[node], args=(m, node), disp=False)
def run(self, m):
"""Run the coordinate descent iterations.
Parameters
----------
m : initial point
Returns
-------
tuple
best mitigation point and the utility of the best mitigation point
Note
----
Uses the :mod:`~scipy` package.
"""
num_decision_nodes = m.shape[0]
x_hist = []
u_hist = []
nodes = range(self.var_nums)
x_hist.append(m.copy())
u_hist.append(self.u.utility(m)[0])
print("----------------Coordinate Descent Starting----------------")
print("Starting Utility: {}".format(u_hist[0]))
for i in range(self.iterations):
print("-- Iteration {} --".format(i+1))
node_iteration = np.random.choice(nodes, replace=False, size=len(nodes))
for node in node_iteration:
m[node] = max(0.0, self._minimize_node(node, m))
x_hist.append(m.copy())
u_hist.append(self.u.utility(m)[0])
print("Current Utility: {}".format(u_hist[i+1]))
if np.abs(u_hist[i+1] - u_hist[i]) < self.accuracy:
break
return x_hist[-1], u_hist[-1]
| 30.660377
| 144
| 0.696
|
from __future__ import division, print_function
import numpy as np
import multiprocessing
from tools import _pickle_method, _unpickle_method
try:
import copy_reg
except:
import copyreg as copy_reg
import types
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
class GeneticAlgorithm(object):
def __init__(self, pop_amount, num_generations, cx_prob, mut_prob, bound, num_feature, utility,
fixed_values=None, fixed_indicies=None, print_progress=False):
self.num_feature = num_feature
self.pop_amount = pop_amount
self.num_gen = num_generations
self.cx_prob = cx_prob
self.mut_prob = mut_prob
self.u = utility
self.bound = bound
self.fixed_values = fixed_values
self.fixed_indicies = fixed_indicies
self.print_progress = print_progress
def _generate_population(self, size):
pop = np.random.random([size, self.num_feature])*self.bound
if self.fixed_values is not None:
for ind in pop:
ind[self.fixed_indicies] = self.fixed_values return pop
def _evaluate(self, indvidual):
return self.u.utility(indvidual)
def _select(self, pop, rate):
index = np.random.choice(self.pop_amount, int(rate*self.pop_amount), replace=False)
return pop[index,:]
def _random_index(self, individuals, size):
inds_size = len(individuals)
return np.random.choice(inds_size, size)
def _selection_tournament(self, pop, k, tournsize, fitness):
chosen = []
for i in xrange(k):
index = self._random_index(pop, tournsize)
aspirants = pop[index]
aspirants_fitness = fitness[index]
chosen_index = np.where(aspirants_fitness == np.max(aspirants_fitness))[0]
if len(chosen_index) != 0:
chosen_index = chosen_index[0]
chosen.append(aspirants[chosen_index])
return np.array(chosen)
def _two_point_cross_over(self, pop):
child_group1 = pop[::2] child_group2 = pop[1::2] for child1, child2 in zip(child_group1, child_group2):
if np.random.random() <= self.cx_prob:
cxpoint1 = np.random.randint(1, self.num_feature)
cxpoint2 = np.random.randint(1, self.num_feature - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else: cxpoint1, cxpoint2 = cxpoint2, cxpoint1
child1[cxpoint1:cxpoint2], child2[cxpoint1:cxpoint2] \
= child2[cxpoint1:cxpoint2].copy(), child1[cxpoint1:cxpoint2].copy()
if self.fixed_values is not None:
child1[self.fixed_indicies] = self.fixed_values
child2[self.fixed_indicies] = self.fixed_values
def _uniform_cross_over(self, pop, ind_prob):
child_group1 = pop[::2]
child_group2 = pop[1::2]
for child1, child2 in zip(child_group1, child_group2):
size = min(len(child1), len(child2))
for i in range(size):
if np.random.random() < ind_prob:
child1[i], child2[i] = child2[i], child1[i]
def _mutate(self, pop, ind_prob, scale=2.0):
pop_tmp = np.copy(pop)
mutate_index = np.random.choice(self.pop_amount, int(self.mut_prob * self.pop_amount), replace=False)
for i in mutate_index:
feature_index = np.random.choice(self.num_feature, int(ind_prob * self.num_feature), replace=False)
for j in feature_index:
if self.fixed_indicies is not None and j in self.fixed_indicies:
continue
else:
pop[i][j] = max(0.0, pop[i][j]+(np.random.random()-0.5)*scale)
def _uniform_mutation(self, pop, ind_prob, scale=2.0):
pop_len = len(pop)
mutate_index = np.random.choice(pop_len, int(self.mut_prob * pop_len), replace=False)
for i in mutate_index:
prob = np.random.random(self.num_feature)
inc = (np.random.random(self.num_feature) - 0.5)*scale
pop[i] += (prob > (1.0-ind_prob)).astype(int)*inc
pop[i] = np.maximum(1e-5, pop[i])
if self.fixed_values is not None:
pop[i][self.fixed_indicies] = self.fixed_values
def _show_evolution(self, fits, pop):
length = len(pop)
mean = fits.mean()
std = fits.std()
min_val = fits.min()
max_val = fits.max()
print (" Min {} \n Max {} \n Avg {}".format(min_val, max_val, mean))
print (" Std {} \n Population Size {}".format(std, length))
print (" Best Individual: ", pop[np.argmax(fits)])
def _survive(self, pop_tmp, fitness_tmp):
index_fits = np.argsort(fitness_tmp)[::-1]
fitness = fitness_tmp[index_fits]
pop = pop_tmp[index_fits]
num_survive = int(0.8*self.pop_amount)
survive_pop = np.copy(pop[:num_survive])
survive_fitness = np.copy(fitness[:num_survive])
return np.copy(survive_pop), np.copy(survive_fitness)
def run(self):
print("----------------Genetic Evolution Starting----------------")
pop = self._generate_population(self.pop_amount)
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
fitness = pool.map(self._evaluate, pop) fitness = np.array([val[0] for val in fitness])
u_hist = np.zeros(self.num_gen) for g in range(0, self.num_gen):
print ("-- Generation {} --".format(g+1))
pop_select = self._select(np.copy(pop), rate=1)
self._uniform_cross_over(pop_select, 0.50)
self._uniform_mutation(pop_select, 0.25, np.exp(-float(g)/self.num_gen)**2)
fitness_select = pool.map(self._evaluate, pop_select)
fitness_select = np.array([val[0] for val in fitness_select])
pop_tmp = np.append(pop, pop_select, axis=0)
fitness_tmp = np.append(fitness, fitness_select, axis=0)
pop_survive, fitness_survive = self._survive(pop_tmp, fitness_tmp)
pop_new = self._generate_population(self.pop_amount - len(pop_survive))
fitness_new = pool.map(self._evaluate, pop_new)
fitness_new = np.array([val[0] for val in fitness_new])
pop = np.append(pop_survive, pop_new, axis=0)
fitness = np.append(fitness_survive, fitness_new, axis=0)
if self.print_progress:
self._show_evolution(fitness, pop)
u_hist[g] = fitness[0]
fitness = pool.map(self._evaluate, pop)
fitness = np.array([val[0] for val in fitness])
return pop, fitness
class GradientSearch(object) :
def __init__(self, utility, var_nums, accuracy=1e-06, iterations=100, fixed_values=None,
fixed_indicies=None, print_progress=False, scale_alpha=None):
self.u = utility
self.var_nums = var_nums
self.accuracy = accuracy
self.iterations = iterations
self.fixed_values = fixed_values
self.fixed_indicies = fixed_indicies
self.print_progress = print_progress
self.scale_alpha = scale_alpha
if scale_alpha is None:
self.scale_alpha = np.exp(np.linspace(0.0, 3.0, var_nums))
def _partial_grad(self, i):
m_copy = self.m.copy()
m_copy[i] = m_copy[i] - self.delta if (m_copy[i] - self.delta)>=0 else 0.0
minus_utility = self.u.utility(m_copy)
m_copy[i] += 2*self.delta
plus_utility = self.u.utility(m_copy)
grad = (plus_utility-minus_utility) / (2*self.delta) return grad, i
def numerical_gradient(self, m, delta=1e-08, fixed_indicies=None):
self.delta = delta
self.m = m
if fixed_indicies is None:
fixed_indicies = []
grad = np.zeros(len(m))
if not isinstance(m, np.ndarray):
self.m = np.array(m)
pool = multiprocessing.Pool()
indicies = np.delete(range(len(m)), fixed_indicies)
res = pool.map(self._partial_grad, indicies)
for g, i in res:
grad[i] = g
pool.close()
pool.join()
del self.m
del self.delta
return grad
def _partial_grad_cons(self, i):
m_copy = self.m.copy()
m_copy[i] = m_copy[i] - self.delta if (m_copy[i] - self.delta)>=0 else 0.0
minus_utility = self.u.adjusted_utility(m_copy,first_period_consadj=self.cons)
m_copy[i] += 2*self.delta
plus_utility = self.u.adjusted_utility(m_copy,first_period_consadj=self.cons)
grad = (plus_utility-minus_utility) / (2*self.delta) return grad, i
def numerical_gradient_cons(self, m, cons,delta=1e-08):
self.delta = delta
self.m = m
self.cons = cons
grad = np.zeros(len(m))
if not isinstance(m, np.ndarray):
self.m = np.array(m)
pool = multiprocessing.Pool()
indicies = np.array(range(len(m)))
res = pool.map(self._partial_grad_cons, indicies)
for g, i in res:
grad[i] = g
pool.close()
pool.join()
del self.m
del self.delta
del self.cons
return grad
def _accelerate_scale(self, accelerator, prev_grad, grad):
sign_vector = np.sign(prev_grad * grad)
scale_vector = np.ones(self.var_nums) * ( 1 + 0.10)
accelerator[sign_vector <= 0] = 1
accelerator *= scale_vector
return accelerator
def gradient_descent(self, initial_point, return_last=False):
num_decision_nodes = initial_point.shape[0]
x_hist = np.zeros((self.iterations+1, num_decision_nodes))
u_hist = np.zeros(self.iterations+1)
u_hist[0] = self.u.utility(initial_point)
x_hist[0] = initial_point
beta1, beta2 = 0.90, 0.90
eta = 0.0015 eps = 1e-3
m_t, v_t = 0, 0
prev_grad = 0.0
accelerator = np.ones(self.var_nums)
for i in range(self.iterations):
grad = self.numerical_gradient(x_hist[i], fixed_indicies=self.fixed_indicies)
m_t = beta1*m_t + (1-beta1)*grad
v_t = beta2*v_t + (1-beta2)*np.power(grad, 2)
m_hat = m_t / (1-beta1**(i+1))
v_hat = v_t / (1-beta2**(i+1))
if i != 0:
accelerator = self._accelerate_scale(accelerator, prev_grad, grad)
new_x = x_hist[i] + ((eta*m_hat)/(np.square(v_hat)+eps)) * accelerator new_x[new_x < 0] = 0.0
if self.fixed_values is not None:
new_x[self.fixed_indicies] = self.fixed_values
x_hist[i+1] = new_x
u_hist[i+1] = self.u.utility(new_x)[0]
prev_grad = grad.copy()
if self.print_progress:
print("-- Iteration {} -- \n Current Utility: {}".format(i+1, u_hist[i+1]))
print(new_x)
if return_last:
return x_hist[i+1], u_hist[i+1]
best_index = np.argmax(u_hist)
return x_hist[best_index], u_hist[best_index]
def run(self, initial_point_list, topk=4):
print("----------------Gradient Search Starting----------------")
if topk > len(initial_point_list):
raise ValueError("topk {} > number of initial points {}".format(topk, len(initial_point_list)))
candidate_points = initial_point_list[:topk]
mitigations = []
utilities = np.zeros(topk)
for cp, count in zip(candidate_points, range(topk)):
if not isinstance(cp, np.ndarray):
cp = np.array(cp)
print("Starting process {} of Gradient Descent".format(count+1))
m, u = self.gradient_descent(cp)
mitigations.append(m)
utilities[count] = u
best_index = np.argmax(utilities)
return mitigations[best_index], utilities[best_index]
class CoordinateDescent(object):
def __init__(self, utility, var_nums, accuracy=1e-4, iterations=100):
self.u = utility
self.var_nums = var_nums
self.accuracy = accuracy
self.iterations = iterations
def _min_func(self, x, m, i):
m_copy = m.copy()
m_copy[i] = x
return -self.u.utility(m_copy)[0]
def _minimize_node(self, node, m):
from scipy.optimize import fmin
return fmin(self._min_func, x0=m[node], args=(m, node), disp=False)
def run(self, m):
num_decision_nodes = m.shape[0]
x_hist = []
u_hist = []
nodes = range(self.var_nums)
x_hist.append(m.copy())
u_hist.append(self.u.utility(m)[0])
print("----------------Coordinate Descent Starting----------------")
print("Starting Utility: {}".format(u_hist[0]))
for i in range(self.iterations):
print("-- Iteration {} --".format(i+1))
node_iteration = np.random.choice(nodes, replace=False, size=len(nodes))
for node in node_iteration:
m[node] = max(0.0, self._minimize_node(node, m))
x_hist.append(m.copy())
u_hist.append(self.u.utility(m)[0])
print("Current Utility: {}".format(u_hist[i+1]))
if np.abs(u_hist[i+1] - u_hist[i]) < self.accuracy:
break
return x_hist[-1], u_hist[-1]
| true
| true
|
f70aa9c8456546362c9a960d81b4e7ddc3d4290f
| 56,584
|
py
|
Python
|
tests/test_disparity.py
|
steuxyo/Pandora
|
57db04f31d6cecba93fa3bc0091f624c8b8ec5f1
|
[
"Apache-2.0"
] | 1
|
2021-03-05T17:35:43.000Z
|
2021-03-05T17:35:43.000Z
|
tests/test_disparity.py
|
steuxyo/Pandora
|
57db04f31d6cecba93fa3bc0091f624c8b8ec5f1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_disparity.py
|
steuxyo/Pandora
|
57db04f31d6cecba93fa3bc0091f624c8b8ec5f1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions to test the disparity module.
"""
import unittest
import numpy as np
import xarray as xr
import common
import pandora
import pandora.constants as cst
import pandora.disparity as disparity
import pandora.matching_cost as matching_cost
from pandora.img_tools import read_img
from pandora.state_machine import PandoraMachine
class TestDisparity(unittest.TestCase):
"""
TestDisparity class allows to test the disparity module
"""
def setUp(self):
"""
Method called to prepare the test fixture
"""
# Create stereo images
data = np.array(([[1, 2, 4, 6],
[2, 4, 1, 6],
[6, 7, 8, 10]]), dtype=np.float64)
self.left = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
self.left.attrs = {'valid_pixels': 0, 'no_data_mask': 1}
data = np.array(([[6, 1, 2, 4],
[6, 2, 4, 1],
[10, 6, 7, 8]]), dtype=np.float64)
self.right = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
self.right.attrs = {'valid_pixels': 0, 'no_data_mask': 1}
def test_to_disp(self):
"""
Test the to disp method
"""
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
# Disparity map ground truth, for the images described in the setUp method
gt_disp = np.array([[1, 1, 1, -3],
[1, 1, 1, -3],
[1, 1, 1, -3]])
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with negative disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Disparity map ground truth
gt_disp = np.array([[0, -1, -2, -3],
[0, -1, -1, -3],
[0, -1, -2, -3]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with positive disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 3)
# Disparity map ground truth
gt_disp = np.array([[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 1, 1, 0]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
# Test disp_indices copy
# Modify the disparity map
disp['disparity_map'].data[0, 0] = -95
# Check if the xarray disp_indices is equal to the ground truth disparity map
np.testing.assert_array_equal(cv['disp_indices'].data, gt_disp)
def test_to_disp_with_offset(self):
"""
Test the to disp method with window_size > 1
"""
# Create the left cost volume, with SAD measure window size 3, subpixel 1, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
# Disparity map ground truth, for the images described in the setUp method
# Check if gt is full size and border (i.e [offset:-offset] equal to invalid_disparity
gt_disp = np.array([[-99, -99, -99, -99],
[-99, 1, 0, -99],
[-99, -99, -99, -99]])
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': -99})
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with negative disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Disparity map ground truth
gt_disp = np.array([[-99, -99, -99, -99],
[-99, -99, -1, -99],
[-99, -99, -99, -99]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with positive disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 3)
# Disparity map ground truth
gt_disp = np.array([[-99, -99, -99, -99],
[-99, 1, -99, -99],
[-99, -99, -99, -99]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
# Test disp_indices copy
# Modify the disparity map
disp['disparity_map'].data[0, 0] = -95
# Check if the xarray disp_indices is equal to the ground truth disparity map
np.testing.assert_array_equal(cv['disp_indices'].data, gt_disp)
def test_argmin_split(self):
"""
Test the argmin_split method
"""
# Create the left cost volume, with SAD measure, window size 1, subpixel 2, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 2})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
indices_nan = np.isnan(cv['cost_volume'].data)
cv['cost_volume'].data[indices_nan] = np.inf
# ground truth
gt_disp = np.array([[1., 1., 1., -3.],
[1., -0.5, 1., -3.],
[1., 1., -1.5, -3]], dtype=np.float32)
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp = disparity_.argmin_split(cv)
# Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(gt_disp, disp)
def test_argmax_split(self):
"""
Test the argmax_split method
"""
# Create the left cost volume, with ZNCC measure, window size 1, subpixel 2, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'zncc', 'window_size': 1,
'subpix': 2})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
indices_nan = np.isnan(cv['cost_volume'].data)
cv['cost_volume'].data[indices_nan] = -np.inf
# ground truth
gt_disp = np.array([[0., -1., -2., -3.],
[0., -1., -2., -3.],
[0., -1., -2., -3.]], dtype=np.float32)
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp = disparity_.argmax_split(cv)
# Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(gt_disp, disp)
def test_coefficient_map(self):
"""
Test the method coefficient map
"""
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disparity_.to_disp(cv)
# Coefficient map ground truth, for the images described in the setUp method
gt_coeff = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
# Compute the disparity, and the coefficient map
coeff = disparity_.coefficient_map(cv)
# Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(coeff.data, gt_coeff)
def test_approximate_right_disparity(self):
"""
Test the approximate_right_disparity method
"""
# Create the left cost volume, with SAD measure window size 3 and subpixel 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)
# Right disparity map ground truth, for the images described in the setUp method
gt_disp = np.array([[0, 0, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 0]])
# Compute the right disparity map
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp_r = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp_r['disparity_map'].data, gt_disp)
def test_right_disparity_subpixel(self):
"""
Test the right disparity method, with subpixel disparity
"""
# Create the left cost volume, with SAD measure window size 3 and subpixel 4
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 4})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)
# Right disparity map ground truth
gt_disp = np.array([[0, 0, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 0]])
# Compute the right disparity map
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp_r = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp_r['disparity_map'].data, gt_disp)
@staticmethod
def test_right_disparity_comparaison():
"""
Test the right disparity method by comparing the right disparity map calculated from scratch with the one
calculated with the fast method
"""
# Build the default configuration
default_cfg = pandora.check_json.default_short_configuration
pandora_left = read_img('tests/pandora/left.png', no_data=np.nan, mask=None)
pandora_right = read_img('tests/pandora/right.png', no_data=np.nan, mask=None)
fast_cfg = {
'pipeline': {
'right_disp_map': {
'method': 'accurate'
},
'matching_cost': {
'matching_cost_method': 'census'
},
'disparity': {
'disparity_method': 'wta'
},
'refinement': {
'refinement_method': 'vfit'
},
'validation': {
'validation_method': 'cross_checking',
'right_left_mode': 'approximate'
}
}
}
pandora_machine_fast = PandoraMachine()
cfg = pandora.check_json.update_conf(default_cfg, fast_cfg)
left, right_fast = \
pandora.run(pandora_machine_fast, pandora_left, pandora_right, -60, 0, cfg['pipeline']) # pylint: disable=unused-variable
acc_cfg = {
'pipeline':
{
'right_disp_map': {
'method': 'accurate'
},
'matching_cost': {
'matching_cost_method': 'census'
},
'disparity': {
'disparity_method': 'wta'
},
'refinement': {
'refinement_method': 'vfit'
},
'validation': {
'validation_method': 'cross_checking',
'right_left_mode': 'accurate',
}
}
}
pandora_machine_acc = PandoraMachine()
cfg = pandora.check_json.update_conf(default_cfg, acc_cfg)
left, right_acc = pandora.run(pandora_machine_acc, pandora_left, pandora_right, -60, 0, cfg['pipeline'])
# Check if the calculated disparity map in fast mode is equal to the disparity map in accurate mode
np.testing.assert_array_equal(right_fast['disparity_map'].data, right_acc['disparity_map'].data)
# Check if the calculated coefficient map in fast mode is equal to the coefficient map in accurate mode
np.testing.assert_array_equal(right_fast['interpolated_coeff'].data, right_acc['interpolated_coeff'].data)
def test_to_disp_validity_mask(self):
"""
Test the generated validity mask in the to_disp method
# If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)
"""
# ------ Negative disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Compute the disparity map and validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min 1 disp_max 2
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]],
dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Negative and positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -1 disp_max 1
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -1, 1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE]],
dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Variable grids of disparities ------
# Disp_min and disp_max
disp_min_grid = np.array([[-3, -2, -3, -1],
[-2, -2, -1, -3],
[-1, -2, -2, -3]])
disp_max_grid = np.array([[-1, -1, -2, 0],
[0, -1, 0, 0],
[0, 0, -1, -1]])
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
dmin, dmax = matching_cost_plugin.dmin_dmax(disp_min_grid, disp_max_grid)
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, dmin, dmax)
matching_cost_plugin.cv_masked(self.left, self.right, cv, disp_min_grid, disp_max_grid)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
def test_to_disp_validity_mask_with_offset(self):
"""
Test the generated validity mask in the to_disp method
# If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)
"""
# ------ Negative disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Compute the disparity map and validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min 1 disp_max 2
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Negative and positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -1 disp_max 1
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -1, 1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Variable grids of disparities ------
# Disp_min and disp_max
disp_min_grid = np.array([[-3, -2, -3, -1],
[-2, -2, -1, -3],
[-1, -2, -2, -3]])
disp_max_grid = np.array([[-1, -1, -2, 0],
[0, -1, 0, 0],
[0, 0, -1, -1]])
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
dmin, dmax = matching_cost_plugin.dmin_dmax(disp_min_grid, disp_max_grid)
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, dmin, dmax)
matching_cost_plugin.cv_masked(self.left, self.right, cv, disp_min_grid, disp_max_grid)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
def test_approximate_right_disparity_validity_mask(self):
"""
Test the generated validity mask in the right_disparity method
# If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)
"""
# Create the left cost volume, with SAD measure window size 1 and subpixel 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
# ------ Negative and positive disparities ------
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)
# Validity mask ground truth ( for disparities -1 0 1 2 )
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE]], dtype=np.uint16)
# Compute the right disparity map and the validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Negative disparities ------
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)
# Validity mask ground truth ( for disparities -2 -1 )
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
0, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
0, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
0, 0]], dtype=np.uint16)
# Compute the right disparity map and the validity mask
dataset = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Positive disparities ------
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, -1)
# Validity mask ground truth ( for disparities 1 2 )
gt_mask = np.array([[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]], dtype=np.uint16)
# Compute the right disparity map and the validity mask
dataset = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
@staticmethod
def test_validity_mask():
"""
# If bit 0 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)
# If bit 6 == 1 : Invalid pixel : invalidated by the validity mask of the left image given as input
# If bit 7 == 1 : Invalid pixel : right positions invalidated by the mask of the right image given as
# input
"""
# Masks convention
# 1 = valid
# 2 = no_data
# ---------------------- Test with positive and negative disparity range ----------------------
data = np.array(([[1, 2, 4, 6],
[2, 4, 1, 6],
[6, 7, 8, 10]]), dtype=np.float64)
left_mask = np.array([[2, 1, 1, 1],
[1, 2, 4, 1],
[5, 1, 1, 2]], dtype=np.uint8)
left = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], left_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
left.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
data = np.array(([[6, 1, 2, 4],
[6, 2, 4, 1],
[10, 6, 7, 8]]), dtype=np.float64)
right_mask = np.array([[1, 1, 3, 5],
[4, 1, 1, 1],
[2, 2, 4, 6]], dtype=np.uint8)
right = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], right_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
right.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(left, right, -1, 1)
# Compute the disparity map and validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array(
[[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT]], dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with negative disparity range ----------------------
cv = matching_cost_plugin.compute_cost_volume(left, right, -2, -1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]],
dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with positive disparity range ----------------------
cv = matching_cost_plugin.compute_cost_volume(left, right, 1, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER, cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT +
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT +
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]],
dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with positive and negative disparity range and window size = 3----------------
data = np.array(([[1, 2, 4, 6, 1],
[2, 4, 1, 6, 1],
[6, 7, 8, 10, 1],
[0, 5, 6, 7, 8]]), dtype=np.float64)
left_mask = np.array([[2, 1, 1, 1, 1],
[1, 2, 4, 1, 1],
[5, 2, 1, 1, 1],
[1, 1, 1, 1, 1]], dtype=np.uint8)
left = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], left_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
left.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
data = np.array(([[6, 1, 2, 4, 1],
[6, 2, 4, 1, 6],
[10, 6, 7, 8, 1],
[5, 6, 7, 8, 0]]), dtype=np.float64)
right_mask = np.array([[1, 1, 1, 2, 1],
[5, 1, 1, 1, 1],
[2, 1, 1, 6, 1],
[0, 1, 1, 1, 1]], dtype=np.uint8)
right = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], right_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
right.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(left, right, -1, 1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array(
[[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
],
dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with positive and negative disparity range on flag 1 ----------------------
# Masks convention
# 1 = valid
# 0 = no_data
data = np.ones((10, 10), dtype=np.float64)
left_mask = np.ones((10, 10), dtype=np.uint8)
left = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], left_mask)},
coords={'row': np.arange(5, data.shape[0] + 5), 'col': np.arange(4, data.shape[1] + 4)})
left.attrs = {'valid_pixels': 1, 'no_data_mask': 0}
data = np.ones((10, 10), dtype=np.float64)
right_mask = np.ones((10, 10), dtype=np.uint8)
right_mask = np.tril(right_mask, -1.5)
right = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], right_mask)},
coords={'row': np.arange(5, data.shape[0] + 5), 'col': np.arange(4, data.shape[1] + 4)})
right.attrs = {'valid_pixels': 1, 'no_data_mask': 0}
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(left, right, -3, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]
],
dtype=np.uint8)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
if __name__ == '__main__':
common.setup_logging()
unittest.main()
| 56.358566
| 134
| 0.598579
|
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
import xarray as xr
import common
import pandora
import pandora.constants as cst
import pandora.disparity as disparity
import pandora.matching_cost as matching_cost
from pandora.img_tools import read_img
from pandora.state_machine import PandoraMachine
class TestDisparity(unittest.TestCase):
def setUp(self):
# Create stereo images
data = np.array(([[1, 2, 4, 6],
[2, 4, 1, 6],
[6, 7, 8, 10]]), dtype=np.float64)
self.left = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
self.left.attrs = {'valid_pixels': 0, 'no_data_mask': 1}
data = np.array(([[6, 1, 2, 4],
[6, 2, 4, 1],
[10, 6, 7, 8]]), dtype=np.float64)
self.right = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
self.right.attrs = {'valid_pixels': 0, 'no_data_mask': 1}
def test_to_disp(self):
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
# Disparity map ground truth, for the images described in the setUp method
gt_disp = np.array([[1, 1, 1, -3],
[1, 1, 1, -3],
[1, 1, 1, -3]])
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with negative disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Disparity map ground truth
gt_disp = np.array([[0, -1, -2, -3],
[0, -1, -1, -3],
[0, -1, -2, -3]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with positive disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 3)
# Disparity map ground truth
gt_disp = np.array([[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 1, 1, 0]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
# Test disp_indices copy
# Modify the disparity map
disp['disparity_map'].data[0, 0] = -95
# Check if the xarray disp_indices is equal to the ground truth disparity map
np.testing.assert_array_equal(cv['disp_indices'].data, gt_disp)
def test_to_disp_with_offset(self):
# Create the left cost volume, with SAD measure window size 3, subpixel 1, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
# Disparity map ground truth, for the images described in the setUp method
# Check if gt is full size and border (i.e [offset:-offset] equal to invalid_disparity
gt_disp = np.array([[-99, -99, -99, -99],
[-99, 1, 0, -99],
[-99, -99, -99, -99]])
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': -99})
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with negative disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Disparity map ground truth
gt_disp = np.array([[-99, -99, -99, -99],
[-99, -99, -1, -99],
[-99, -99, -99, -99]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with positive disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 3)
# Disparity map ground truth
gt_disp = np.array([[-99, -99, -99, -99],
[-99, 1, -99, -99],
[-99, -99, -99, -99]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
# Test disp_indices copy
# Modify the disparity map
disp['disparity_map'].data[0, 0] = -95
# Check if the xarray disp_indices is equal to the ground truth disparity map
np.testing.assert_array_equal(cv['disp_indices'].data, gt_disp)
def test_argmin_split(self):
# Create the left cost volume, with SAD measure, window size 1, subpixel 2, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 2})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
indices_nan = np.isnan(cv['cost_volume'].data)
cv['cost_volume'].data[indices_nan] = np.inf
# ground truth
gt_disp = np.array([[1., 1., 1., -3.],
[1., -0.5, 1., -3.],
[1., 1., -1.5, -3]], dtype=np.float32)
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp = disparity_.argmin_split(cv)
# Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(gt_disp, disp)
def test_argmax_split(self):
# Create the left cost volume, with ZNCC measure, window size 1, subpixel 2, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'zncc', 'window_size': 1,
'subpix': 2})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
indices_nan = np.isnan(cv['cost_volume'].data)
cv['cost_volume'].data[indices_nan] = -np.inf
# ground truth
gt_disp = np.array([[0., -1., -2., -3.],
[0., -1., -2., -3.],
[0., -1., -2., -3.]], dtype=np.float32)
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp = disparity_.argmax_split(cv)
# Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(gt_disp, disp)
def test_coefficient_map(self):
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disparity_.to_disp(cv)
# Coefficient map ground truth, for the images described in the setUp method
gt_coeff = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
# Compute the disparity, and the coefficient map
coeff = disparity_.coefficient_map(cv)
# Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(coeff.data, gt_coeff)
def test_approximate_right_disparity(self):
# Create the left cost volume, with SAD measure window size 3 and subpixel 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)
# Right disparity map ground truth, for the images described in the setUp method
gt_disp = np.array([[0, 0, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 0]])
# Compute the right disparity map
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp_r = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp_r['disparity_map'].data, gt_disp)
def test_right_disparity_subpixel(self):
# Create the left cost volume, with SAD measure window size 3 and subpixel 4
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 4})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)
# Right disparity map ground truth
gt_disp = np.array([[0, 0, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 0]])
# Compute the right disparity map
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp_r = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp_r['disparity_map'].data, gt_disp)
@staticmethod
def test_right_disparity_comparaison():
# Build the default configuration
default_cfg = pandora.check_json.default_short_configuration
pandora_left = read_img('tests/pandora/left.png', no_data=np.nan, mask=None)
pandora_right = read_img('tests/pandora/right.png', no_data=np.nan, mask=None)
fast_cfg = {
'pipeline': {
'right_disp_map': {
'method': 'accurate'
},
'matching_cost': {
'matching_cost_method': 'census'
},
'disparity': {
'disparity_method': 'wta'
},
'refinement': {
'refinement_method': 'vfit'
},
'validation': {
'validation_method': 'cross_checking',
'right_left_mode': 'approximate'
}
}
}
pandora_machine_fast = PandoraMachine()
cfg = pandora.check_json.update_conf(default_cfg, fast_cfg)
left, right_fast = \
pandora.run(pandora_machine_fast, pandora_left, pandora_right, -60, 0, cfg['pipeline']) # pylint: disable=unused-variable
acc_cfg = {
'pipeline':
{
'right_disp_map': {
'method': 'accurate'
},
'matching_cost': {
'matching_cost_method': 'census'
},
'disparity': {
'disparity_method': 'wta'
},
'refinement': {
'refinement_method': 'vfit'
},
'validation': {
'validation_method': 'cross_checking',
'right_left_mode': 'accurate',
}
}
}
pandora_machine_acc = PandoraMachine()
cfg = pandora.check_json.update_conf(default_cfg, acc_cfg)
left, right_acc = pandora.run(pandora_machine_acc, pandora_left, pandora_right, -60, 0, cfg['pipeline'])
# Check if the calculated disparity map in fast mode is equal to the disparity map in accurate mode
np.testing.assert_array_equal(right_fast['disparity_map'].data, right_acc['disparity_map'].data)
# Check if the calculated coefficient map in fast mode is equal to the coefficient map in accurate mode
np.testing.assert_array_equal(right_fast['interpolated_coeff'].data, right_acc['interpolated_coeff'].data)
def test_to_disp_validity_mask(self):
# ------ Negative disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Compute the disparity map and validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min 1 disp_max 2
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]],
dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Negative and positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -1 disp_max 1
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -1, 1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE]],
dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Variable grids of disparities ------
# Disp_min and disp_max
disp_min_grid = np.array([[-3, -2, -3, -1],
[-2, -2, -1, -3],
[-1, -2, -2, -3]])
disp_max_grid = np.array([[-1, -1, -2, 0],
[0, -1, 0, 0],
[0, 0, -1, -1]])
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
dmin, dmax = matching_cost_plugin.dmin_dmax(disp_min_grid, disp_max_grid)
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, dmin, dmax)
matching_cost_plugin.cv_masked(self.left, self.right, cv, disp_min_grid, disp_max_grid)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
def test_to_disp_validity_mask_with_offset(self):
# ------ Negative disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Compute the disparity map and validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min 1 disp_max 2
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Negative and positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -1 disp_max 1
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -1, 1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Variable grids of disparities ------
# Disp_min and disp_max
disp_min_grid = np.array([[-3, -2, -3, -1],
[-2, -2, -1, -3],
[-1, -2, -2, -3]])
disp_max_grid = np.array([[-1, -1, -2, 0],
[0, -1, 0, 0],
[0, 0, -1, -1]])
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
dmin, dmax = matching_cost_plugin.dmin_dmax(disp_min_grid, disp_max_grid)
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, dmin, dmax)
matching_cost_plugin.cv_masked(self.left, self.right, cv, disp_min_grid, disp_max_grid)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
def test_approximate_right_disparity_validity_mask(self):
# Create the left cost volume, with SAD measure window size 1 and subpixel 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
# ------ Negative and positive disparities ------
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)
# Validity mask ground truth ( for disparities -1 0 1 2 )
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE]], dtype=np.uint16)
# Compute the right disparity map and the validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Negative disparities ------
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)
# Validity mask ground truth ( for disparities -2 -1 )
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
0, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
0, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
0, 0]], dtype=np.uint16)
# Compute the right disparity map and the validity mask
dataset = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Positive disparities ------
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, -1)
# Validity mask ground truth ( for disparities 1 2 )
gt_mask = np.array([[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]], dtype=np.uint16)
# Compute the right disparity map and the validity mask
dataset = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
@staticmethod
def test_validity_mask():
# Masks convention
# 1 = valid
# 2 = no_data
# ---------------------- Test with positive and negative disparity range ----------------------
data = np.array(([[1, 2, 4, 6],
[2, 4, 1, 6],
[6, 7, 8, 10]]), dtype=np.float64)
left_mask = np.array([[2, 1, 1, 1],
[1, 2, 4, 1],
[5, 1, 1, 2]], dtype=np.uint8)
left = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], left_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
left.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
data = np.array(([[6, 1, 2, 4],
[6, 2, 4, 1],
[10, 6, 7, 8]]), dtype=np.float64)
right_mask = np.array([[1, 1, 3, 5],
[4, 1, 1, 1],
[2, 2, 4, 6]], dtype=np.uint8)
right = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], right_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
right.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(left, right, -1, 1)
# Compute the disparity map and validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array(
[[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT]], dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with negative disparity range ----------------------
cv = matching_cost_plugin.compute_cost_volume(left, right, -2, -1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]],
dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with positive disparity range ----------------------
cv = matching_cost_plugin.compute_cost_volume(left, right, 1, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER, cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT +
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT +
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]],
dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with positive and negative disparity range and window size = 3----------------
data = np.array(([[1, 2, 4, 6, 1],
[2, 4, 1, 6, 1],
[6, 7, 8, 10, 1],
[0, 5, 6, 7, 8]]), dtype=np.float64)
left_mask = np.array([[2, 1, 1, 1, 1],
[1, 2, 4, 1, 1],
[5, 2, 1, 1, 1],
[1, 1, 1, 1, 1]], dtype=np.uint8)
left = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], left_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
left.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
data = np.array(([[6, 1, 2, 4, 1],
[6, 2, 4, 1, 6],
[10, 6, 7, 8, 1],
[5, 6, 7, 8, 0]]), dtype=np.float64)
right_mask = np.array([[1, 1, 1, 2, 1],
[5, 1, 1, 1, 1],
[2, 1, 1, 6, 1],
[0, 1, 1, 1, 1]], dtype=np.uint8)
right = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], right_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
right.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(left, right, -1, 1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array(
[[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
],
dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with positive and negative disparity range on flag 1 ----------------------
# Masks convention
# 1 = valid
# 0 = no_data
data = np.ones((10, 10), dtype=np.float64)
left_mask = np.ones((10, 10), dtype=np.uint8)
left = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], left_mask)},
coords={'row': np.arange(5, data.shape[0] + 5), 'col': np.arange(4, data.shape[1] + 4)})
left.attrs = {'valid_pixels': 1, 'no_data_mask': 0}
data = np.ones((10, 10), dtype=np.float64)
right_mask = np.ones((10, 10), dtype=np.uint8)
right_mask = np.tril(right_mask, -1.5)
right = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], right_mask)},
coords={'row': np.arange(5, data.shape[0] + 5), 'col': np.arange(4, data.shape[1] + 4)})
right.attrs = {'valid_pixels': 1, 'no_data_mask': 0}
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(left, right, -3, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]
],
dtype=np.uint8)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
if __name__ == '__main__':
common.setup_logging()
unittest.main()
| true
| true
|
f70aaa0071d96e3d126bd914751dd5bae717ae54
| 3,238
|
py
|
Python
|
first_lambda/service.py
|
mylar-pr/DaaS
|
e41fa9e9fbda66d7150f00e6db13dd3a76cd3501
|
[
"MIT"
] | null | null | null |
first_lambda/service.py
|
mylar-pr/DaaS
|
e41fa9e9fbda66d7150f00e6db13dd3a76cd3501
|
[
"MIT"
] | null | null | null |
first_lambda/service.py
|
mylar-pr/DaaS
|
e41fa9e9fbda66d7150f00e6db13dd3a76cd3501
|
[
"MIT"
] | null | null | null |
import datetime
import json
import os
import boto3
import pandas as pd
import io
import requests
import numpy as np
from io import StringIO
import uuid
s3 = boto3.resource(
service_name='s3',
region_name='us-east-2')
bucket_name = 'secom-daas-bucket' # already created on S3
link1 = 'https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom.data'
link2 = "https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom_labels.data"
links = [link1,link2]
path = "/tmp/"
timestamp = str(int(datetime.datetime.timestamp(datetime.datetime.now())))
def timestampify(link,timestamp):
return link.split("/")[-1].split(".")[0]+"_"+timestamp+".data"
data_filename = timestampify(link1,timestamp)
label_filename = timestampify(link2,timestamp)
def download_data():
url = link1
r = requests.get(url)
with open(path + data_filename, 'wb') as f:
f.write(r.content)
files = r.content
f.close()
print("Downloaded Secom data.")
url = link2
r = requests.get(url)
with open(path + label_filename, 'wb') as f:
f.write(r.content)
files = r.content
f.close()
print("Downloaded Secom labels.")
#time_stamp = str(int(datetime.datetime.timestamp(datetime.datetime.now())))
def process_time(secom_labels):
return [" ".join(i.decode("utf-8").split()[1:]).split('"')[1] for i in secom_labels]
def process_data(secom):
return np.array([pd.to_numeric(bytearray(i).decode("UTF-8").split(),errors='coerce') for i in secom]).astype(str)
def process_dataset(secom_path,secom_labels_path):
print("processing dataset from {} and {}".format(secom_path,secom_labels_path))
#read the downloaded .data files
with open(secom_path,'rb') as myfile:
secom= myfile.readlines()
myfile.close()
with open(secom_labels_path,'rb') as myfile:
secom_labels= myfile.readlines()
myfile.close()
columns1= ["Time"]
df1 = pd.DataFrame(data=process_time(secom_labels),
columns=columns1)
df1
features_size = len(secom[0].split())
columns2 = ["feature "+ str(i) for i in range(features_size)]
df2 = pd.DataFrame(data=process_data(secom),
columns=columns2)
df2.fillna(df2.mean(),inplace=True)
df3 = pd.concat([df1,df2],axis=1).reset_index()
df3 = df3.rename(columns = {'index':'secomId'})
#set the secomId as unique ids
df3['secomId'] = pd.Series([int(uuid.uuid4().int/(10**30)) for i in range(df3.shape[0])])
return df3
#bucket = 'my_bucket_name' # already created on S3
def upload_to_s3(df,bucket_name,dest_path='df.csv'):
csv_buffer = StringIO()
df.to_csv(csv_buffer)
#s3_resource = boto3.resource('s3')
s3.Object(bucket_name, dest_path).put(Body=csv_buffer.getvalue())
print("Succesfully stored csv file into S3...")
def handler(event, context):
# Your code goes here!
startTime = datetime.datetime.now()
download_data()
df = process_dataset(path + data_filename,path + label_filename)
upload_to_s3(df, bucket_name, 'processed/processed_'+timestamp+".csv" )
print(datetime.datetime.now() - startTime)
handler(1,1)
| 26.540984
| 117
| 0.665843
|
import datetime
import json
import os
import boto3
import pandas as pd
import io
import requests
import numpy as np
from io import StringIO
import uuid
s3 = boto3.resource(
service_name='s3',
region_name='us-east-2')
bucket_name = 'secom-daas-bucket'
link1 = 'https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom.data'
link2 = "https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom_labels.data"
links = [link1,link2]
path = "/tmp/"
timestamp = str(int(datetime.datetime.timestamp(datetime.datetime.now())))
def timestampify(link,timestamp):
return link.split("/")[-1].split(".")[0]+"_"+timestamp+".data"
data_filename = timestampify(link1,timestamp)
label_filename = timestampify(link2,timestamp)
def download_data():
url = link1
r = requests.get(url)
with open(path + data_filename, 'wb') as f:
f.write(r.content)
files = r.content
f.close()
print("Downloaded Secom data.")
url = link2
r = requests.get(url)
with open(path + label_filename, 'wb') as f:
f.write(r.content)
files = r.content
f.close()
print("Downloaded Secom labels.")
def process_time(secom_labels):
return [" ".join(i.decode("utf-8").split()[1:]).split('"')[1] for i in secom_labels]
def process_data(secom):
return np.array([pd.to_numeric(bytearray(i).decode("UTF-8").split(),errors='coerce') for i in secom]).astype(str)
def process_dataset(secom_path,secom_labels_path):
print("processing dataset from {} and {}".format(secom_path,secom_labels_path))
#read the downloaded .data files
with open(secom_path,'rb') as myfile:
secom= myfile.readlines()
myfile.close()
with open(secom_labels_path,'rb') as myfile:
secom_labels= myfile.readlines()
myfile.close()
columns1= ["Time"]
df1 = pd.DataFrame(data=process_time(secom_labels),
columns=columns1)
df1
features_size = len(secom[0].split())
columns2 = ["feature "+ str(i) for i in range(features_size)]
df2 = pd.DataFrame(data=process_data(secom),
columns=columns2)
df2.fillna(df2.mean(),inplace=True)
df3 = pd.concat([df1,df2],axis=1).reset_index()
df3 = df3.rename(columns = {'index':'secomId'})
#set the secomId as unique ids
df3['secomId'] = pd.Series([int(uuid.uuid4().int/(10**30)) for i in range(df3.shape[0])])
return df3
#bucket = 'my_bucket_name' # already created on S3
def upload_to_s3(df,bucket_name,dest_path='df.csv'):
csv_buffer = StringIO()
df.to_csv(csv_buffer)
#s3_resource = boto3.resource('s3')
s3.Object(bucket_name, dest_path).put(Body=csv_buffer.getvalue())
print("Succesfully stored csv file into S3...")
def handler(event, context):
# Your code goes here!
startTime = datetime.datetime.now()
download_data()
df = process_dataset(path + data_filename,path + label_filename)
upload_to_s3(df, bucket_name, 'processed/processed_'+timestamp+".csv" )
print(datetime.datetime.now() - startTime)
handler(1,1)
| true
| true
|
f70aaad9e57fa7eb04163e7602797b675c9f999e
| 1,763
|
py
|
Python
|
import_dataset/check-triggers.py
|
MarliesG/Alice
|
661a010a2ecf56aec48dcb407d07ae1b0df6915a
|
[
"MIT"
] | 3
|
2021-08-14T16:18:12.000Z
|
2022-01-11T01:27:34.000Z
|
import_dataset/check-triggers.py
|
MarliesG/Alice
|
661a010a2ecf56aec48dcb407d07ae1b0df6915a
|
[
"MIT"
] | 1
|
2021-12-15T09:18:42.000Z
|
2021-12-29T21:00:58.000Z
|
import_dataset/check-triggers.py
|
MarliesG/Alice
|
661a010a2ecf56aec48dcb407d07ae1b0df6915a
|
[
"MIT"
] | 2
|
2021-08-12T13:32:28.000Z
|
2021-12-10T10:01:47.000Z
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Check alignments
# Check alignemnts of stimuli with the EEG data. The EEG recording contains a record of the acoustic stimulus, which can be compare with the stimulus itself. This loads the events through the pipeline in `alice.py`, i.e. the trigger correction is already applied and all subjects should have the correct alignment.
# +
# %matplotlib inline
from eelbrain import *
from alice import alice
# load the acoustic envelope predictor for each stimulus
gt = {f'{i}': alice.load_predictor(f'{i}~gammatone-1', 0.002, 1000, name='WAV') for i in range(1, 13)}
for y in gt.values():
y /= y.std()
# -
for subject in alice:
events = alice.load_events(raw='raw', data_raw=True)
raw = events.info['raw']
raw.load_data()
# S16, S22 have broken AUX channels
if subject in ['S05', 'S38']:
continue # no AUD channel
for name in ['AUD', 'Aux5']:
if name in raw.ch_names:
ch = raw.ch_names.index(name)
break
else:
print(subject, raw.ch_names)
raise
xs = []
# extract audio from EEG
for segment, i0 in events.zip('event', 'i_start'):
x = NDVar(raw._data[ch, i0:i0+1000], UTS(0, 0.002, 1000), name='EEG')
x -= x.min()
x /= x.std()
xs.append([x, gt[segment]])
p = plot.UTS(xs, axh=2, w=10, ncol=1, title=subject, axtitle=events['trigger'])
# display and close to avoid having too many open figures
display(p)
p.close()
| 30.396552
| 314
| 0.626773
|
from eelbrain import *
from alice import alice
gt = {f'{i}': alice.load_predictor(f'{i}~gammatone-1', 0.002, 1000, name='WAV') for i in range(1, 13)}
for y in gt.values():
y /= y.std()
for subject in alice:
events = alice.load_events(raw='raw', data_raw=True)
raw = events.info['raw']
raw.load_data()
if subject in ['S05', 'S38']:
continue for name in ['AUD', 'Aux5']:
if name in raw.ch_names:
ch = raw.ch_names.index(name)
break
else:
print(subject, raw.ch_names)
raise
xs = []
for segment, i0 in events.zip('event', 'i_start'):
x = NDVar(raw._data[ch, i0:i0+1000], UTS(0, 0.002, 1000), name='EEG')
x -= x.min()
x /= x.std()
xs.append([x, gt[segment]])
p = plot.UTS(xs, axh=2, w=10, ncol=1, title=subject, axtitle=events['trigger'])
display(p)
p.close()
| true
| true
|
f70aad19b18d8123fb6c2b4551fa9c099adc5484
| 10,940
|
py
|
Python
|
pytorch_lightning/callbacks/lr_monitor.py
|
calebrob6/pytorch-lightning
|
4c79b3a5b343866217784c66d122819c59a92c1d
|
[
"Apache-2.0"
] | 1
|
2021-07-22T14:06:43.000Z
|
2021-07-22T14:06:43.000Z
|
pytorch_lightning/callbacks/lr_monitor.py
|
calebrob6/pytorch-lightning
|
4c79b3a5b343866217784c66d122819c59a92c1d
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/callbacks/lr_monitor.py
|
calebrob6/pytorch-lightning
|
4c79b3a5b343866217784c66d122819c59a92c1d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Learning Rate Monitor
=====================
Monitor and logs learning rate for lr schedulers during training.
"""
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, Optional, Set, Type
from torch.optim.optimizer import Optimizer
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class LearningRateMonitor(Callback):
r"""
Automatically monitor and logs learning rate for learning rate schedulers during training.
Args:
logging_interval: set to ``'epoch'`` or ``'step'`` to log ``lr`` of all optimizers
at the same interval, set to ``None`` to log at individual interval
according to the ``interval`` key of each scheduler. Defaults to ``None``.
log_momentum: option to also log the momentum values of the optimizer, if the optimizer
has the ``momentum`` or ``betas`` attribute. Defaults to ``False``.
Raises:
MisconfigurationException:
If ``logging_interval`` is none of ``"step"``, ``"epoch"``, or ``None``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import LearningRateMonitor
>>> lr_monitor = LearningRateMonitor(logging_interval='step')
>>> trainer = Trainer(callbacks=[lr_monitor])
Logging names are automatically determined based on optimizer class name.
In case of multiple optimizers of same type, they will be named ``Adam``,
``Adam-1`` etc. If a optimizer has multiple parameter groups they will
be named ``Adam/pg1``, ``Adam/pg2`` etc. To control naming, pass in a
``name`` keyword in the construction of the learning rate schedulers.
A ``name`` keyword can also be used for parameter groups in the
construction of the optimizer.
Example::
def configure_optimizer(self):
optimizer = torch.optim.Adam(...)
lr_scheduler = {
'scheduler': torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
'name': 'my_logging_name'
}
return [optimizer], [lr_scheduler]
Example::
def configure_optimizer(self):
optimizer = torch.optim.SGD(
[{
'params': [p for p in self.parameters()],
'name': 'my_parameter_group_name'
}],
lr=0.1
)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
return [optimizer], [lr_scheduler]
"""
def __init__(self, logging_interval: Optional[str] = None, log_momentum: bool = False):
if logging_interval not in (None, 'step', 'epoch'):
raise MisconfigurationException('logging_interval should be `step` or `epoch` or `None`.')
self.logging_interval = logging_interval
self.log_momentum = log_momentum
self.lrs = None
self.lr_sch_names = []
def on_train_start(self, trainer, *args, **kwargs):
"""
Called before training, determines unique names for all lr
schedulers in the case of multiple of the same type or in
the case of multiple parameter groups
Raises:
MisconfigurationException:
If ``Trainer`` has no ``logger``.
"""
if not trainer.logger:
raise MisconfigurationException(
'Cannot use `LearningRateMonitor` callback with `Trainer` that has no logger.'
)
if not trainer.lr_schedulers:
rank_zero_warn(
'You are using `LearningRateMonitor` callback with models that'
' have no learning rate schedulers. Please see documentation'
' for `configure_optimizers` method.', RuntimeWarning
)
if self.log_momentum:
def _check_no_key(key):
return any(key not in sch['scheduler'].optimizer.defaults for sch in trainer.lr_schedulers)
if _check_no_key('momentum') and _check_no_key('betas'):
rank_zero_warn(
"You have set log_momentum=True, but some optimizers do not"
" have momentum. This will log a value 0 for the momentum.", RuntimeWarning
)
# Find names for schedulers
names = self._find_names(trainer.lr_schedulers)
# Initialize for storing values
self.lrs = {name: [] for name in names}
self.last_momentum_values = {name + "-momentum": None for name in names}
def on_train_batch_start(self, trainer, *args, **kwargs):
if not self._should_log(trainer):
return
if self.logging_interval != 'epoch':
interval = 'step' if self.logging_interval is None else 'any'
latest_stat = self._extract_stats(trainer, interval)
if latest_stat:
trainer.logger.log_metrics(latest_stat, step=trainer.global_step)
def on_train_epoch_start(self, trainer, *args, **kwargs):
if self.logging_interval != 'step':
interval = 'epoch' if self.logging_interval is None else 'any'
latest_stat = self._extract_stats(trainer, interval)
if latest_stat:
trainer.logger.log_metrics(latest_stat, step=trainer.global_step)
def _extract_stats(self, trainer, interval: str) -> Dict[str, float]:
latest_stat = {}
names = self._find_names(trainer.lr_schedulers, add_lr_sch_names=False)
self._remap_keys(names)
for name, scheduler in zip(self.lr_sch_names, trainer.lr_schedulers):
if scheduler['interval'] == interval or interval == 'any':
opt = scheduler['scheduler'].optimizer
param_groups = opt.param_groups
use_betas = 'betas' in opt.defaults
for i, pg in enumerate(param_groups):
name_and_suffix = self._add_suffix(name, param_groups, i)
lr = self._extract_lr(pg, name_and_suffix)
latest_stat.update(lr)
momentum = self._extract_momentum(
param_group=pg, name=name_and_suffix.replace(name, f'{name}-momentum'), use_betas=use_betas
)
latest_stat.update(momentum)
return latest_stat
def _extract_lr(self, param_group: Dict[str, Any], name: str) -> Dict[str, Any]:
lr = param_group.get('lr')
self.lrs[name].append(lr)
return {name: lr}
def _remap_keys(self, names: List[str], token: str = '/pg1') -> None:
"""
This function is used the remap the keys if param groups for a given optimizer increased.
"""
for new_name in names:
old_name = new_name.replace(token, '')
if token in new_name and old_name in self.lrs:
self.lrs[new_name] = self.lrs.pop(old_name)
elif new_name not in self.lrs:
self.lrs[new_name] = []
def _extract_momentum(self, param_group: Dict[str, Any], name: str, use_betas: bool) -> Dict[str, float]:
if not self.log_momentum:
return {}
momentum = param_group.get('betas')[0] if use_betas else param_group.get('momentum', 0)
self.last_momentum_values[name] = momentum
return {name: momentum}
def _add_prefix(
self, name: str, optimizer_cls: Type[Optimizer], seen_optimizer_types: DefaultDict[Type[Optimizer], int]
) -> str:
if optimizer_cls not in seen_optimizer_types:
return name
count = seen_optimizer_types[optimizer_cls]
return name + f'-{count - 1}' if count > 1 else name
def _add_suffix(self, name: str, param_groups: List[Dict], param_group_index: int, use_names: bool = True) -> str:
if len(param_groups) > 1:
if not use_names:
return f'{name}/pg{param_group_index+1}'
pg_name = param_groups[param_group_index].get('name', f'pg{param_group_index+1}')
return f'{name}/{pg_name}'
elif use_names:
pg_name = param_groups[param_group_index].get('name')
return f'{name}/{pg_name}' if pg_name else name
return name
def _duplicate_param_group_names(self, param_groups: List[Dict]) -> Set[str]:
names = [pg.get('name', f'pg{i}') for i, pg in enumerate(param_groups, start=1)]
unique = set(names)
if len(names) == len(unique):
return set()
return {n for n in names if names.count(n) > 1}
def _find_names(self, lr_schedulers: List, add_lr_sch_names: bool = True) -> List[str]:
# Create unique names in the case we have multiple of the same learning
# rate scheduler + multiple parameter groups
names = []
seen_optimizers = []
seen_optimizer_types = defaultdict(int)
for scheduler in lr_schedulers:
sch = scheduler['scheduler']
if scheduler['name'] is not None:
name = scheduler['name']
else:
name = 'lr-' + sch.optimizer.__class__.__name__
seen_optimizers.append(sch.optimizer)
optimizer_cls = type(sch.optimizer)
if scheduler['name'] is None:
seen_optimizer_types[optimizer_cls] += 1
# Multiple param groups for the same scheduler
param_groups = sch.optimizer.param_groups
duplicates = self._duplicate_param_group_names(param_groups)
if duplicates:
raise MisconfigurationException(
'A single `Optimizer` cannot have multiple parameter groups with identical '
f'`name` values. {name} has duplicated parameter group names {duplicates}'
)
name = self._add_prefix(name, optimizer_cls, seen_optimizer_types)
names.extend(self._add_suffix(name, param_groups, i) for i in range(len(param_groups)))
if add_lr_sch_names:
self.lr_sch_names.append(name)
return names
@staticmethod
def _should_log(trainer) -> bool:
return (trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop
| 40.973783
| 118
| 0.625686
|
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, Optional, Set, Type
from torch.optim.optimizer import Optimizer
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class LearningRateMonitor(Callback):
def __init__(self, logging_interval: Optional[str] = None, log_momentum: bool = False):
if logging_interval not in (None, 'step', 'epoch'):
raise MisconfigurationException('logging_interval should be `step` or `epoch` or `None`.')
self.logging_interval = logging_interval
self.log_momentum = log_momentum
self.lrs = None
self.lr_sch_names = []
def on_train_start(self, trainer, *args, **kwargs):
if not trainer.logger:
raise MisconfigurationException(
'Cannot use `LearningRateMonitor` callback with `Trainer` that has no logger.'
)
if not trainer.lr_schedulers:
rank_zero_warn(
'You are using `LearningRateMonitor` callback with models that'
' have no learning rate schedulers. Please see documentation'
' for `configure_optimizers` method.', RuntimeWarning
)
if self.log_momentum:
def _check_no_key(key):
return any(key not in sch['scheduler'].optimizer.defaults for sch in trainer.lr_schedulers)
if _check_no_key('momentum') and _check_no_key('betas'):
rank_zero_warn(
"You have set log_momentum=True, but some optimizers do not"
" have momentum. This will log a value 0 for the momentum.", RuntimeWarning
)
names = self._find_names(trainer.lr_schedulers)
self.lrs = {name: [] for name in names}
self.last_momentum_values = {name + "-momentum": None for name in names}
def on_train_batch_start(self, trainer, *args, **kwargs):
if not self._should_log(trainer):
return
if self.logging_interval != 'epoch':
interval = 'step' if self.logging_interval is None else 'any'
latest_stat = self._extract_stats(trainer, interval)
if latest_stat:
trainer.logger.log_metrics(latest_stat, step=trainer.global_step)
def on_train_epoch_start(self, trainer, *args, **kwargs):
if self.logging_interval != 'step':
interval = 'epoch' if self.logging_interval is None else 'any'
latest_stat = self._extract_stats(trainer, interval)
if latest_stat:
trainer.logger.log_metrics(latest_stat, step=trainer.global_step)
def _extract_stats(self, trainer, interval: str) -> Dict[str, float]:
latest_stat = {}
names = self._find_names(trainer.lr_schedulers, add_lr_sch_names=False)
self._remap_keys(names)
for name, scheduler in zip(self.lr_sch_names, trainer.lr_schedulers):
if scheduler['interval'] == interval or interval == 'any':
opt = scheduler['scheduler'].optimizer
param_groups = opt.param_groups
use_betas = 'betas' in opt.defaults
for i, pg in enumerate(param_groups):
name_and_suffix = self._add_suffix(name, param_groups, i)
lr = self._extract_lr(pg, name_and_suffix)
latest_stat.update(lr)
momentum = self._extract_momentum(
param_group=pg, name=name_and_suffix.replace(name, f'{name}-momentum'), use_betas=use_betas
)
latest_stat.update(momentum)
return latest_stat
def _extract_lr(self, param_group: Dict[str, Any], name: str) -> Dict[str, Any]:
lr = param_group.get('lr')
self.lrs[name].append(lr)
return {name: lr}
def _remap_keys(self, names: List[str], token: str = '/pg1') -> None:
for new_name in names:
old_name = new_name.replace(token, '')
if token in new_name and old_name in self.lrs:
self.lrs[new_name] = self.lrs.pop(old_name)
elif new_name not in self.lrs:
self.lrs[new_name] = []
def _extract_momentum(self, param_group: Dict[str, Any], name: str, use_betas: bool) -> Dict[str, float]:
if not self.log_momentum:
return {}
momentum = param_group.get('betas')[0] if use_betas else param_group.get('momentum', 0)
self.last_momentum_values[name] = momentum
return {name: momentum}
def _add_prefix(
self, name: str, optimizer_cls: Type[Optimizer], seen_optimizer_types: DefaultDict[Type[Optimizer], int]
) -> str:
if optimizer_cls not in seen_optimizer_types:
return name
count = seen_optimizer_types[optimizer_cls]
return name + f'-{count - 1}' if count > 1 else name
def _add_suffix(self, name: str, param_groups: List[Dict], param_group_index: int, use_names: bool = True) -> str:
if len(param_groups) > 1:
if not use_names:
return f'{name}/pg{param_group_index+1}'
pg_name = param_groups[param_group_index].get('name', f'pg{param_group_index+1}')
return f'{name}/{pg_name}'
elif use_names:
pg_name = param_groups[param_group_index].get('name')
return f'{name}/{pg_name}' if pg_name else name
return name
def _duplicate_param_group_names(self, param_groups: List[Dict]) -> Set[str]:
names = [pg.get('name', f'pg{i}') for i, pg in enumerate(param_groups, start=1)]
unique = set(names)
if len(names) == len(unique):
return set()
return {n for n in names if names.count(n) > 1}
def _find_names(self, lr_schedulers: List, add_lr_sch_names: bool = True) -> List[str]:
names = []
seen_optimizers = []
seen_optimizer_types = defaultdict(int)
for scheduler in lr_schedulers:
sch = scheduler['scheduler']
if scheduler['name'] is not None:
name = scheduler['name']
else:
name = 'lr-' + sch.optimizer.__class__.__name__
seen_optimizers.append(sch.optimizer)
optimizer_cls = type(sch.optimizer)
if scheduler['name'] is None:
seen_optimizer_types[optimizer_cls] += 1
param_groups = sch.optimizer.param_groups
duplicates = self._duplicate_param_group_names(param_groups)
if duplicates:
raise MisconfigurationException(
'A single `Optimizer` cannot have multiple parameter groups with identical '
f'`name` values. {name} has duplicated parameter group names {duplicates}'
)
name = self._add_prefix(name, optimizer_cls, seen_optimizer_types)
names.extend(self._add_suffix(name, param_groups, i) for i in range(len(param_groups)))
if add_lr_sch_names:
self.lr_sch_names.append(name)
return names
@staticmethod
def _should_log(trainer) -> bool:
return (trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop
| true
| true
|
f70aaedf8077bde4e0dc6f024456789860057f34
| 16,381
|
py
|
Python
|
vsurfree/lib/python2.7/site-packages/fire/fire_test.py
|
hexnor/SurfFree----Web-Proxy----Django
|
ab3d0f6d3a3eb06bd532ac2b4f8c0950608b90ba
|
[
"MIT"
] | 1
|
2021-02-08T07:49:35.000Z
|
2021-02-08T07:49:35.000Z
|
vsurfree/lib/python2.7/site-packages/fire/fire_test.py
|
yokeshrana/SurfFree
|
ab3d0f6d3a3eb06bd532ac2b4f8c0950608b90ba
|
[
"MIT"
] | 2
|
2021-06-01T22:03:20.000Z
|
2022-01-13T00:43:38.000Z
|
vsurfree/lib/python2.7/site-packages/fire/fire_test.py
|
yokeshrana/SurfFree
|
ab3d0f6d3a3eb06bd532ac2b4f8c0950608b90ba
|
[
"MIT"
] | 1
|
2020-11-04T08:39:52.000Z
|
2020-11-04T08:39:52.000Z
|
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fire
from fire import test_components as tc
from fire import trace
import unittest
class FireTest(unittest.TestCase):
def testFire(self):
fire.Fire(tc.Empty)
fire.Fire(tc.OldStyleEmpty)
fire.Fire(tc.WithInit)
self.assertEqual(fire.Fire(tc.NoDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.NoDefaults, 'triple 4'), 12)
self.assertEqual(fire.Fire(tc.WithDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.WithDefaults, 'triple 4'), 12)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'triple 4'), 12)
def testFireNoArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'ten'), 10)
def testFireExceptions(self):
# Exceptions of Fire are printed to stderr and None is returned.
self.assertIsNone(fire.Fire(tc.Empty, 'nomethod')) # Member doesn't exist.
self.assertIsNone(fire.Fire(tc.NoDefaults, 'double')) # Missing argument.
self.assertIsNone(fire.Fire(tc.TypedProperties, 'delta x')) # Missing key.
# Exceptions of the target components are still raised.
with self.assertRaises(ZeroDivisionError):
fire.Fire(tc.NumberDefaults, 'reciprocal 0.0')
def testFireNamedArgs(self):
self.assertEqual(fire.Fire(tc.WithDefaults, 'double --count 5'), 10)
self.assertEqual(fire.Fire(tc.WithDefaults, 'triple --count 5'), 15)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'double --count 5'), 10)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'triple --count 5'), 15)
def testFireNamedArgsWithEquals(self):
self.assertEqual(fire.Fire(tc.WithDefaults, 'double --count=5'), 10)
self.assertEqual(fire.Fire(tc.WithDefaults, 'triple --count=5'), 15)
def testFireAllNamedArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --alpha 1 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --beta 1 2'), 4)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1 --alpha 2'), 4)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1 --beta 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --alpha 1 --beta 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --beta 1 --alpha 2'), 4)
def testFireAllNamedArgsOneMissing(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum'), 0)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1'), 1)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --alpha 1'), 1)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --beta 2'), 4)
def testFirePartialNamedArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity 1 2'), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 1 2'), (1, 2))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity --beta 1 2'), (2, 1))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity 1 --alpha 2'), (2, 1))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity 1 --beta 2'), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 1 --beta 2'), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --beta 1 --alpha 2'), (2, 1))
def testFirePartialNamedArgsOneMissing(self):
# By default, errors are written to standard out and None is returned.
self.assertIsNone( # Identity needs an arg.
fire.Fire(tc.MixedDefaults, 'identity'))
self.assertIsNone( # Identity needs a value for alpha.
fire.Fire(tc.MixedDefaults, 'identity --beta 2'))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity 1'), (1, '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 1'), (1, '0'))
def testFireProperties(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'alpha'), True)
self.assertEqual(fire.Fire(tc.TypedProperties, 'beta'), (1, 2, 3))
def testFireRecursion(self):
self.assertEqual(
fire.Fire(tc.TypedProperties, 'charlie double hello'), 'hellohello')
self.assertEqual(fire.Fire(tc.TypedProperties, 'charlie triple w'), 'www')
def testFireVarArgs(self):
self.assertEqual(
fire.Fire(tc.VarArgs, 'cumsums a b c d'), ['a', 'ab', 'abc', 'abcd'])
self.assertEqual(fire.Fire(tc.VarArgs, 'cumsums 1 2 3 4'), [1, 3, 6, 10])
def testFireVarArgsWithNamedArgs(self):
self.assertEqual(fire.Fire(tc.VarArgs, 'varchars 1 2 c d'), (1, 2, 'cd'))
self.assertEqual(fire.Fire(tc.VarArgs, 'varchars 3 4 c d e'), (3, 4, 'cde'))
def testFireKeywordArgs(self):
self.assertEqual(fire.Fire(tc.Kwargs, 'props --name David --age 24'),
{'name': 'David', 'age': 24})
self.assertEqual(
fire.Fire(tc.Kwargs,
'props --message "This is a message it has -- in it"'),
{'message': 'This is a message it has -- in it'})
self.assertEqual(fire.Fire(tc.Kwargs, 'upper --alpha A --beta B'),
'ALPHA BETA')
self.assertEqual(fire.Fire(tc.Kwargs, 'upper --alpha A --beta B - lower'),
'alpha beta')
def testFireKeywordArgsWithMissingPositionalArgs(self):
self.assertEqual(fire.Fire(tc.Kwargs, 'run Hello World --cell is'),
('Hello', 'World', {'cell': 'is'}))
self.assertEqual(fire.Fire(tc.Kwargs, 'run Hello --cell ok'),
('Hello', None, {'cell': 'ok'}))
def testFireObject(self):
self.assertEqual(fire.Fire(tc.WithDefaults(), 'double --count 5'), 10)
self.assertEqual(fire.Fire(tc.WithDefaults(), 'triple --count 5'), 15)
def testFireDict(self):
component = {
'double': lambda x=0: 2 * x,
'cheese': 'swiss',
}
self.assertEqual(fire.Fire(component, 'double 5'), 10)
self.assertEqual(fire.Fire(component, 'cheese'), 'swiss')
def testFireObjectWithDict(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'delta echo'), 'E')
self.assertEqual(fire.Fire(tc.TypedProperties, 'delta echo lower'), 'e')
self.assertIsInstance(fire.Fire(tc.TypedProperties, 'delta nest'), dict)
self.assertEqual(fire.Fire(tc.TypedProperties, 'delta nest 0'), 'a')
def testFireList(self):
component = ['zero', 'one', 'two', 'three']
self.assertEqual(fire.Fire(component, '2'), 'two')
self.assertEqual(fire.Fire(component, '3'), 'three')
self.assertEqual(fire.Fire(component, '-1'), 'three')
def testFireObjectWithList(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'echo 0'), 'alex')
self.assertEqual(fire.Fire(tc.TypedProperties, 'echo 1'), 'bethany')
def testFireObjectWithTuple(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'fox 0'), 'carry')
self.assertEqual(fire.Fire(tc.TypedProperties, 'fox 1'), 'divide')
def testFireNoComponent(self):
self.assertEqual(fire.Fire(command='tc WithDefaults double 10'), 20)
last_char = lambda text: text[-1] # pylint: disable=unused-variable
self.assertEqual(fire.Fire(command='last_char "Hello"'), 'o')
self.assertEqual(fire.Fire(command='last-char "World"'), 'd')
rset = lambda count=0: set(range(count)) # pylint: disable=unused-variable
self.assertEqual(fire.Fire(command='rset 5'), {0, 1, 2, 3, 4})
def testFireUnderscores(self):
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore-example'), 'fish fingers')
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore_example'), 'fish fingers')
def testFireUnderscoresInArg(self):
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore-function example'), 'example')
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore_function --underscore-arg=score'),
'score')
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore_function --underscore_arg=score'),
'score')
def testBoolParsing(self):
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool True'), True)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool False'), False)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --arg=True'), True)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --arg=False'), False)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --arg'), True)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --noarg'), False)
def testBoolParsingContinued(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity True False'), (True, False))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha=False 10'), (False, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha --beta 10'), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha --beta=10'), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --noalpha --beta'), (False, True))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity 10 --beta'), (10, True))
def testBoolParsingLessExpectedCases(self):
# Note: Does not return (True, 10).
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 10'), (10, '0'))
# To get (True, 10), use one of the following:
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha --beta=10'), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity True 10'), (True, 10))
# Note: Does not return ('--test', '0').
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity --alpha --test'),
(True, '--test'))
# To get ('--test', '0'), use one of the following:
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity --alpha=--test'),
('--test', '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults, r'identity --alpha \"--test\"'),
('--test', '0'))
def testBoolParsingWithNo(self):
# In these examples --nothing always refers to the nothing argument:
def fn1(thing, nothing):
return thing, nothing
self.assertEqual(fire.Fire(fn1, '--thing --nothing'), (True, True))
self.assertEqual(fire.Fire(fn1, '--thing --nonothing'), (True, False))
# In the next example nothing=False (since rightmost setting of a flag gets
# precedence), but it errors because thing has no value.
self.assertEqual(fire.Fire(fn1, '--nothing --nonothing'), None)
# In these examples, --nothing sets thing=False:
def fn2(thing, **kwargs):
return thing, kwargs
self.assertEqual(fire.Fire(fn2, '--thing'), (True, {}))
self.assertEqual(fire.Fire(fn2, '--nothing'), (False, {}))
# In the next one, nothing=True, but it errors because thing has no value.
self.assertEqual(fire.Fire(fn2, '--nothing=True'), None)
self.assertEqual(fire.Fire(fn2, '--nothing --nothing=True'),
(False, {'nothing': True}))
def fn3(arg, **kwargs):
return arg, kwargs
self.assertEqual(fire.Fire(fn3, '--arg=value --thing'),
('value', {'thing': True}))
self.assertEqual(fire.Fire(fn3, '--arg=value --nothing'),
('value', {'thing': False}))
self.assertEqual(fire.Fire(fn3, '--arg=value --nonothing'),
('value', {'nothing': False}))
def testTraceFlag(self):
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- --trace'), trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- -t'), trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, '-- --trace'), trace.FireTrace)
def testHelpFlag(self):
self.assertIsNone(fire.Fire(tc.BoolConverter, 'as-bool True -- --help'))
self.assertIsNone(fire.Fire(tc.BoolConverter, 'as-bool True -- -h'))
self.assertIsNone(fire.Fire(tc.BoolConverter, '-- --help'))
def testHelpFlagAndTraceFlag(self):
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- --help --trace'),
trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- -h -t'), trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, '-- -h --trace'), trace.FireTrace)
def testTabCompletionNoName(self):
with self.assertRaises(ValueError):
fire.Fire(tc.NoDefaults, '-- --completion')
def testTabCompletion(self):
completion_script = fire.Fire(tc.NoDefaults, '-- --completion', name='c')
self.assertIn('double', completion_script)
self.assertIn('triple', completion_script)
def testTabCompletionWithDict(self):
actions = {'multiply': lambda a, b: a * b}
completion_script = fire.Fire(actions, '-- --completion', name='actCLI')
self.assertIn('actCLI', completion_script)
self.assertIn('multiply', completion_script)
def testBasicSeparator(self):
# '-' is the default separator.
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity + _'), ('+', '_'))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity _ + -'), ('_', '+'))
# If we change the separator we can use '-' as an argument.
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity - _ -- --separator &'),
('-', '_'))
# The separator triggers a function call, but there aren't enough arguments.
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity - _ +'), None)
def testExtraSeparators(self):
self.assertEqual(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 - - as-bool True'), True)
self.assertEqual(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 - - - as-bool True'), True)
def testSeparatorForChaining(self):
# Without a separator all args are consumed by get_obj.
self.assertIsInstance(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 as-bool True'),
tc.BoolConverter)
# With a separator only the preceeding args are consumed by get_obj.
self.assertEqual(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 - as-bool True'), True)
self.assertEqual(
fire.Fire(tc.ReturnsObj,
'get-obj arg1 arg2 & as-bool True -- --separator &'),
True)
self.assertEqual(
fire.Fire(tc.ReturnsObj,
'get-obj arg1 $$ as-bool True -- --separator $$'),
True)
def testFloatForExpectedInt(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'sum --alpha 2.2 --beta 3.0'), 8.2)
self.assertEqual(
fire.Fire(tc.NumberDefaults, 'integer_reciprocal --divisor 5.0'), 0.2)
self.assertEqual(
fire.Fire(tc.NumberDefaults, 'integer_reciprocal 4.0'), 0.25)
def testClassInstantiation(self):
self.assertIsInstance(fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2'),
tc.InstanceVars)
# Cannot instantiate a class with positional args by default.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2'))
def testTraceErrors(self):
# Class needs additional value but runs out of args.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1'))
self.assertIsNone(fire.Fire(tc.InstanceVars, '--arg1=a1'))
# Routine needs additional value but runs out of args.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2 - run b1'))
self.assertIsNone(
fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2 - run b1'))
# Extra args cannot be consumed.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2 - run b1 b2 b3'))
self.assertIsNone(
fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2 - run b1 b2 b3'))
# Cannot find member to access.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2 - jog'))
self.assertIsNone(fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2 - jog'))
if __name__ == '__main__':
unittest.main()
| 44.034946
| 80
| 0.663635
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fire
from fire import test_components as tc
from fire import trace
import unittest
class FireTest(unittest.TestCase):
def testFire(self):
fire.Fire(tc.Empty)
fire.Fire(tc.OldStyleEmpty)
fire.Fire(tc.WithInit)
self.assertEqual(fire.Fire(tc.NoDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.NoDefaults, 'triple 4'), 12)
self.assertEqual(fire.Fire(tc.WithDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.WithDefaults, 'triple 4'), 12)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'triple 4'), 12)
def testFireNoArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'ten'), 10)
def testFireExceptions(self):
self.assertIsNone(fire.Fire(tc.Empty, 'nomethod')) self.assertIsNone(fire.Fire(tc.NoDefaults, 'double')) # Missing argument.
self.assertIsNone(fire.Fire(tc.TypedProperties, 'delta x')) # Missing key.
# Exceptions of the target components are still raised.
with self.assertRaises(ZeroDivisionError):
fire.Fire(tc.NumberDefaults, 'reciprocal 0.0')
def testFireNamedArgs(self):
self.assertEqual(fire.Fire(tc.WithDefaults, 'double --count 5'), 10)
self.assertEqual(fire.Fire(tc.WithDefaults, 'triple --count 5'), 15)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'double --count 5'), 10)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'triple --count 5'), 15)
def testFireNamedArgsWithEquals(self):
self.assertEqual(fire.Fire(tc.WithDefaults, 'double --count=5'), 10)
self.assertEqual(fire.Fire(tc.WithDefaults, 'triple --count=5'), 15)
def testFireAllNamedArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --alpha 1 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --beta 1 2'), 4)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1 --alpha 2'), 4)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1 --beta 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --alpha 1 --beta 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --beta 1 --alpha 2'), 4)
def testFireAllNamedArgsOneMissing(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum'), 0)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1'), 1)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --alpha 1'), 1)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --beta 2'), 4)
def testFirePartialNamedArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity 1 2'), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 1 2'), (1, 2))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity --beta 1 2'), (2, 1))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity 1 --alpha 2'), (2, 1))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity 1 --beta 2'), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 1 --beta 2'), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --beta 1 --alpha 2'), (2, 1))
def testFirePartialNamedArgsOneMissing(self):
# By default, errors are written to standard out and None is returned.
self.assertIsNone( # Identity needs an arg.
fire.Fire(tc.MixedDefaults, 'identity'))
self.assertIsNone( # Identity needs a value for alpha.
fire.Fire(tc.MixedDefaults, 'identity --beta 2'))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity 1'), (1, '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 1'), (1, '0'))
def testFireProperties(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'alpha'), True)
self.assertEqual(fire.Fire(tc.TypedProperties, 'beta'), (1, 2, 3))
def testFireRecursion(self):
self.assertEqual(
fire.Fire(tc.TypedProperties, 'charlie double hello'), 'hellohello')
self.assertEqual(fire.Fire(tc.TypedProperties, 'charlie triple w'), 'www')
def testFireVarArgs(self):
self.assertEqual(
fire.Fire(tc.VarArgs, 'cumsums a b c d'), ['a', 'ab', 'abc', 'abcd'])
self.assertEqual(fire.Fire(tc.VarArgs, 'cumsums 1 2 3 4'), [1, 3, 6, 10])
def testFireVarArgsWithNamedArgs(self):
self.assertEqual(fire.Fire(tc.VarArgs, 'varchars 1 2 c d'), (1, 2, 'cd'))
self.assertEqual(fire.Fire(tc.VarArgs, 'varchars 3 4 c d e'), (3, 4, 'cde'))
def testFireKeywordArgs(self):
self.assertEqual(fire.Fire(tc.Kwargs, 'props --name David --age 24'),
{'name': 'David', 'age': 24})
self.assertEqual(
fire.Fire(tc.Kwargs,
'props --message "This is a message it has -- in it"'),
{'message': 'This is a message it has -- in it'})
self.assertEqual(fire.Fire(tc.Kwargs, 'upper --alpha A --beta B'),
'ALPHA BETA')
self.assertEqual(fire.Fire(tc.Kwargs, 'upper --alpha A --beta B - lower'),
'alpha beta')
def testFireKeywordArgsWithMissingPositionalArgs(self):
self.assertEqual(fire.Fire(tc.Kwargs, 'run Hello World --cell is'),
('Hello', 'World', {'cell': 'is'}))
self.assertEqual(fire.Fire(tc.Kwargs, 'run Hello --cell ok'),
('Hello', None, {'cell': 'ok'}))
def testFireObject(self):
self.assertEqual(fire.Fire(tc.WithDefaults(), 'double --count 5'), 10)
self.assertEqual(fire.Fire(tc.WithDefaults(), 'triple --count 5'), 15)
def testFireDict(self):
component = {
'double': lambda x=0: 2 * x,
'cheese': 'swiss',
}
self.assertEqual(fire.Fire(component, 'double 5'), 10)
self.assertEqual(fire.Fire(component, 'cheese'), 'swiss')
def testFireObjectWithDict(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'delta echo'), 'E')
self.assertEqual(fire.Fire(tc.TypedProperties, 'delta echo lower'), 'e')
self.assertIsInstance(fire.Fire(tc.TypedProperties, 'delta nest'), dict)
self.assertEqual(fire.Fire(tc.TypedProperties, 'delta nest 0'), 'a')
def testFireList(self):
component = ['zero', 'one', 'two', 'three']
self.assertEqual(fire.Fire(component, '2'), 'two')
self.assertEqual(fire.Fire(component, '3'), 'three')
self.assertEqual(fire.Fire(component, '-1'), 'three')
def testFireObjectWithList(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'echo 0'), 'alex')
self.assertEqual(fire.Fire(tc.TypedProperties, 'echo 1'), 'bethany')
def testFireObjectWithTuple(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'fox 0'), 'carry')
self.assertEqual(fire.Fire(tc.TypedProperties, 'fox 1'), 'divide')
def testFireNoComponent(self):
self.assertEqual(fire.Fire(command='tc WithDefaults double 10'), 20)
last_char = lambda text: text[-1] # pylint: disable=unused-variable
self.assertEqual(fire.Fire(command='last_char "Hello"'), 'o')
self.assertEqual(fire.Fire(command='last-char "World"'), 'd')
rset = lambda count=0: set(range(count)) # pylint: disable=unused-variable
self.assertEqual(fire.Fire(command='rset 5'), {0, 1, 2, 3, 4})
def testFireUnderscores(self):
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore-example'), 'fish fingers')
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore_example'), 'fish fingers')
def testFireUnderscoresInArg(self):
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore-function example'), 'example')
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore_function --underscore-arg=score'),
'score')
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore_function --underscore_arg=score'),
'score')
def testBoolParsing(self):
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool True'), True)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool False'), False)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --arg=True'), True)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --arg=False'), False)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --arg'), True)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --noarg'), False)
def testBoolParsingContinued(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity True False'), (True, False))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha=False 10'), (False, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha --beta 10'), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha --beta=10'), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --noalpha --beta'), (False, True))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity 10 --beta'), (10, True))
def testBoolParsingLessExpectedCases(self):
# Note: Does not return (True, 10).
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 10'), (10, '0'))
# To get (True, 10), use one of the following:
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha --beta=10'), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity True 10'), (True, 10))
# Note: Does not return ('--test', '0').
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity --alpha --test'),
(True, '--test'))
# To get ('--test', '0'), use one of the following:
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity --alpha=--test'),
('--test', '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults, r'identity --alpha \"--test\"'),
('--test', '0'))
def testBoolParsingWithNo(self):
# In these examples --nothing always refers to the nothing argument:
def fn1(thing, nothing):
return thing, nothing
self.assertEqual(fire.Fire(fn1, '--thing --nothing'), (True, True))
self.assertEqual(fire.Fire(fn1, '--thing --nonothing'), (True, False))
# In the next example nothing=False (since rightmost setting of a flag gets
# precedence), but it errors because thing has no value.
self.assertEqual(fire.Fire(fn1, '--nothing --nonothing'), None)
# In these examples, --nothing sets thing=False:
def fn2(thing, **kwargs):
return thing, kwargs
self.assertEqual(fire.Fire(fn2, '--thing'), (True, {}))
self.assertEqual(fire.Fire(fn2, '--nothing'), (False, {}))
# In the next one, nothing=True, but it errors because thing has no value.
self.assertEqual(fire.Fire(fn2, '--nothing=True'), None)
self.assertEqual(fire.Fire(fn2, '--nothing --nothing=True'),
(False, {'nothing': True}))
def fn3(arg, **kwargs):
return arg, kwargs
self.assertEqual(fire.Fire(fn3, '--arg=value --thing'),
('value', {'thing': True}))
self.assertEqual(fire.Fire(fn3, '--arg=value --nothing'),
('value', {'thing': False}))
self.assertEqual(fire.Fire(fn3, '--arg=value --nonothing'),
('value', {'nothing': False}))
def testTraceFlag(self):
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- --trace'), trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- -t'), trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, '-- --trace'), trace.FireTrace)
def testHelpFlag(self):
self.assertIsNone(fire.Fire(tc.BoolConverter, 'as-bool True -- --help'))
self.assertIsNone(fire.Fire(tc.BoolConverter, 'as-bool True -- -h'))
self.assertIsNone(fire.Fire(tc.BoolConverter, '-- --help'))
def testHelpFlagAndTraceFlag(self):
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- --help --trace'),
trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- -h -t'), trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, '-- -h --trace'), trace.FireTrace)
def testTabCompletionNoName(self):
with self.assertRaises(ValueError):
fire.Fire(tc.NoDefaults, '-- --completion')
def testTabCompletion(self):
completion_script = fire.Fire(tc.NoDefaults, '-- --completion', name='c')
self.assertIn('double', completion_script)
self.assertIn('triple', completion_script)
def testTabCompletionWithDict(self):
actions = {'multiply': lambda a, b: a * b}
completion_script = fire.Fire(actions, '-- --completion', name='actCLI')
self.assertIn('actCLI', completion_script)
self.assertIn('multiply', completion_script)
def testBasicSeparator(self):
# '-' is the default separator.
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity + _'), ('+', '_'))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity _ + -'), ('_', '+'))
# If we change the separator we can use '-' as an argument.
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity - _ -- --separator &'),
('-', '_'))
# The separator triggers a function call, but there aren't enough arguments.
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity - _ +'), None)
def testExtraSeparators(self):
self.assertEqual(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 - - as-bool True'), True)
self.assertEqual(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 - - - as-bool True'), True)
def testSeparatorForChaining(self):
self.assertIsInstance(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 as-bool True'),
tc.BoolConverter)
self.assertEqual(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 - as-bool True'), True)
self.assertEqual(
fire.Fire(tc.ReturnsObj,
'get-obj arg1 arg2 & as-bool True -- --separator &'),
True)
self.assertEqual(
fire.Fire(tc.ReturnsObj,
'get-obj arg1 $$ as-bool True -- --separator $$'),
True)
def testFloatForExpectedInt(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'sum --alpha 2.2 --beta 3.0'), 8.2)
self.assertEqual(
fire.Fire(tc.NumberDefaults, 'integer_reciprocal --divisor 5.0'), 0.2)
self.assertEqual(
fire.Fire(tc.NumberDefaults, 'integer_reciprocal 4.0'), 0.25)
def testClassInstantiation(self):
self.assertIsInstance(fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2'),
tc.InstanceVars)
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2'))
def testTraceErrors(self):
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1'))
self.assertIsNone(fire.Fire(tc.InstanceVars, '--arg1=a1'))
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2 - run b1'))
self.assertIsNone(
fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2 - run b1'))
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2 - run b1 b2 b3'))
self.assertIsNone(
fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2 - run b1 b2 b3'))
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2 - jog'))
self.assertIsNone(fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2 - jog'))
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70aaf1faf473c2707952e2460f03972a42dbb44
| 69
|
py
|
Python
|
app/config/secure.py
|
ZeroLoo/FlaskAPI
|
3dd89e83bd99b2de1796a9dfb52dad7b659e6ad2
|
[
"MIT"
] | null | null | null |
app/config/secure.py
|
ZeroLoo/FlaskAPI
|
3dd89e83bd99b2de1796a9dfb52dad7b659e6ad2
|
[
"MIT"
] | null | null | null |
app/config/secure.py
|
ZeroLoo/FlaskAPI
|
3dd89e83bd99b2de1796a9dfb52dad7b659e6ad2
|
[
"MIT"
] | null | null | null |
# -*-coding:utf-8-*-
from flask import Flask
__author__ = 'ZeroLoo'
| 13.8
| 23
| 0.681159
|
from flask import Flask
__author__ = 'ZeroLoo'
| true
| true
|
f70ab0237408983ce5e23acbdb327225ffb587d6
| 4,041
|
py
|
Python
|
others/pytrends.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 4
|
2016-12-17T20:06:10.000Z
|
2021-11-19T04:45:29.000Z
|
others/pytrends.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 11
|
2021-01-06T05:35:11.000Z
|
2022-03-11T23:28:31.000Z
|
others/pytrends.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 3
|
2015-06-12T10:44:16.000Z
|
2021-07-26T18:39:47.000Z
|
import http.client
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import re
import csv
from http.cookiejar import CookieJar
class pyGTrends(object):
"""
Google Trends API
Recommended usage:
from csv import DictReader
r = pyGTrends(username, password)
r.download_report(('pants', 'skirt'))
d = DictReader(r.csv().split('\n'))
"""
def __init__(self, username, password):
"""
provide login and password to be used to connect to Google Analytics
all immutable system variables are also defined here
website_id is the ID of the specific site on google analytics
"""
self.login_params = {
"continue": 'http://www.google.com/trends',
"PersistentCookie": "yes",
"Email": username,
"Passwd": password,
}
self.headers = [("Referrer", "https://www.google.com/accounts/ServiceLoginBoxAuth"),
("Content-type", "application/x-www-form-urlencoded"),
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.21 (KHTML, like Gecko) Chrome/19.0.1042.0 Safari/535.21'),
("Accept", "text/plain")]
self.url_ServiceLoginBoxAuth = 'https://accounts.google.com/ServiceLoginBoxAuth'
self.url_Export = 'http://www.google.com/trends/viz'
self.url_CookieCheck = 'https://www.google.com/accounts/CheckCookie?chtml=LoginDoneHtml'
self.header_dictionary = {}
self._connect()
def _connect(self):
"""
connect to Google Trends
"""
self.cj = CookieJar()
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))
self.opener.addheaders = self.headers
galx = re.compile('<input type="hidden" name="GALX" value="(?P<galx>[a-zA-Z0-9_-]+)">')
resp = self.opener.open(self.url_ServiceLoginBoxAuth).read()
m = galx.search(resp)
if not m:
raise Exception("Cannot parse GALX out of login page")
self.login_params['GALX'] = m.group('galx')
params = urllib.parse.urlencode(self.login_params)
self.opener.open(self.url_ServiceLoginBoxAuth, params)
self.opener.open(self.url_CookieCheck)
def download_report(self, keywords, date='all', geo='all', geor='all', graph = 'all_csv', sort=0, scale=0, sa='N'):
"""
download a specific report
date, geo, geor, graph, sort, scale and sa
are all Google Trends specific ways to slice the data
"""
if type(keywords) not in (type([]), type(('tuple',))):
keywords = [keywords]
params = urllib.parse.urlencode({
'q': ",".join(keywords),
'date': date,
'graph': graph,
'geo': geo,
'geor': geor,
'sort': str(sort),
'scale': str(scale),
'sa': sa
})
self.raw_data = self.opener.open('http://www.google.com/trends/viz?' + params).read()
if self.raw_data in ['You must be signed in to export data from Google Trends']:
raise Exception(self.raw_data)
def csv(self, section="main", as_list=False):
"""
Returns a CSV of a specific segment of the data.
Available segments include Main, Language, City and Region.
"""
if section == "main":
section = ("Week","Year","Day","Month")
else:
section = (section,)
segments = self.raw_data.split('\n\n\n')
for s in segments:
if s.partition(',')[0] in section:
if as_list:
return [line for line in csv.reader(s.split('\n'))]
else:
return s
raise Exception("Could not find requested section")
| 38.122642
| 146
| 0.560257
|
import http.client
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import re
import csv
from http.cookiejar import CookieJar
class pyGTrends(object):
def __init__(self, username, password):
self.login_params = {
"continue": 'http://www.google.com/trends',
"PersistentCookie": "yes",
"Email": username,
"Passwd": password,
}
self.headers = [("Referrer", "https://www.google.com/accounts/ServiceLoginBoxAuth"),
("Content-type", "application/x-www-form-urlencoded"),
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.21 (KHTML, like Gecko) Chrome/19.0.1042.0 Safari/535.21'),
("Accept", "text/plain")]
self.url_ServiceLoginBoxAuth = 'https://accounts.google.com/ServiceLoginBoxAuth'
self.url_Export = 'http://www.google.com/trends/viz'
self.url_CookieCheck = 'https://www.google.com/accounts/CheckCookie?chtml=LoginDoneHtml'
self.header_dictionary = {}
self._connect()
def _connect(self):
self.cj = CookieJar()
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))
self.opener.addheaders = self.headers
galx = re.compile('<input type="hidden" name="GALX" value="(?P<galx>[a-zA-Z0-9_-]+)">')
resp = self.opener.open(self.url_ServiceLoginBoxAuth).read()
m = galx.search(resp)
if not m:
raise Exception("Cannot parse GALX out of login page")
self.login_params['GALX'] = m.group('galx')
params = urllib.parse.urlencode(self.login_params)
self.opener.open(self.url_ServiceLoginBoxAuth, params)
self.opener.open(self.url_CookieCheck)
def download_report(self, keywords, date='all', geo='all', geor='all', graph = 'all_csv', sort=0, scale=0, sa='N'):
if type(keywords) not in (type([]), type(('tuple',))):
keywords = [keywords]
params = urllib.parse.urlencode({
'q': ",".join(keywords),
'date': date,
'graph': graph,
'geo': geo,
'geor': geor,
'sort': str(sort),
'scale': str(scale),
'sa': sa
})
self.raw_data = self.opener.open('http://www.google.com/trends/viz?' + params).read()
if self.raw_data in ['You must be signed in to export data from Google Trends']:
raise Exception(self.raw_data)
def csv(self, section="main", as_list=False):
if section == "main":
section = ("Week","Year","Day","Month")
else:
section = (section,)
segments = self.raw_data.split('\n\n\n')
for s in segments:
if s.partition(',')[0] in section:
if as_list:
return [line for line in csv.reader(s.split('\n'))]
else:
return s
raise Exception("Could not find requested section")
| true
| true
|
f70ab04d45cd2b3288ee4efa477201d0066849a2
| 1,293
|
py
|
Python
|
app/api/utils/readInstanceDetails.py
|
nurely/lxdui
|
8cb31dc1117719b140f440f8a705282781db7b35
|
[
"Apache-2.0"
] | null | null | null |
app/api/utils/readInstanceDetails.py
|
nurely/lxdui
|
8cb31dc1117719b140f440f8a705282781db7b35
|
[
"Apache-2.0"
] | null | null | null |
app/api/utils/readInstanceDetails.py
|
nurely/lxdui
|
8cb31dc1117719b140f440f8a705282781db7b35
|
[
"Apache-2.0"
] | null | null | null |
import platform, sys, os, subprocess
import psutil
from app.api.models.LXDModule import LXDModule
import logging
def readInstanceDetails():
instanceDetails = ("Python Version: {}".format(platform.python_version()))
instanceDetails +=("\nPython Path: {}".format(' '.join(path for path in sys.path)))
instanceDetails +=("\nLXD Version: {}".format(getLXDInfo()['environment']['server_version']))
instanceDetails +=("\nLXD Status: {}".format(getLXDInfo()['api_status']))
instanceDetails +=("\nOS: {}".format(platform.platform()))
instanceDetails +=("\nLXDUI Path: {}".format(sys.path[0]))
instanceDetails +=("\nCPU Count: {}".format(getProcessorDetails()))
instanceDetails +=("\nMemory: {}MB".format(getMemory()))
instanceDetails +=("\nDisk used percent: {}".format(getDiskDetails()))
logging.info(instanceDetails)
def getLXDInfo():
try:
info = LXDModule().config()
return info
except:
return {
'environment': {
'server_version': 'N/A'
},
'api_status': 'N/A'
}
def getMemory():
return int(psutil.virtual_memory().total / (1024*1024))
def getProcessorDetails():
return psutil.cpu_count()
def getDiskDetails():
return psutil.disk_usage('/').percent
| 33.153846
| 97
| 0.641145
|
import platform, sys, os, subprocess
import psutil
from app.api.models.LXDModule import LXDModule
import logging
def readInstanceDetails():
instanceDetails = ("Python Version: {}".format(platform.python_version()))
instanceDetails +=("\nPython Path: {}".format(' '.join(path for path in sys.path)))
instanceDetails +=("\nLXD Version: {}".format(getLXDInfo()['environment']['server_version']))
instanceDetails +=("\nLXD Status: {}".format(getLXDInfo()['api_status']))
instanceDetails +=("\nOS: {}".format(platform.platform()))
instanceDetails +=("\nLXDUI Path: {}".format(sys.path[0]))
instanceDetails +=("\nCPU Count: {}".format(getProcessorDetails()))
instanceDetails +=("\nMemory: {}MB".format(getMemory()))
instanceDetails +=("\nDisk used percent: {}".format(getDiskDetails()))
logging.info(instanceDetails)
def getLXDInfo():
try:
info = LXDModule().config()
return info
except:
return {
'environment': {
'server_version': 'N/A'
},
'api_status': 'N/A'
}
def getMemory():
return int(psutil.virtual_memory().total / (1024*1024))
def getProcessorDetails():
return psutil.cpu_count()
def getDiskDetails():
return psutil.disk_usage('/').percent
| true
| true
|
f70ab0747198d4d8109e8dca9b2fd2a0a75a1492
| 241
|
py
|
Python
|
python_work/salvar_nun_predileto.py
|
lucas-jsvd/python_crash_course_2nd
|
8404e7769bef7b90b9b0897996c3a3f969bb72bd
|
[
"Unlicense"
] | null | null | null |
python_work/salvar_nun_predileto.py
|
lucas-jsvd/python_crash_course_2nd
|
8404e7769bef7b90b9b0897996c3a3f969bb72bd
|
[
"Unlicense"
] | null | null | null |
python_work/salvar_nun_predileto.py
|
lucas-jsvd/python_crash_course_2nd
|
8404e7769bef7b90b9b0897996c3a3f969bb72bd
|
[
"Unlicense"
] | null | null | null |
import json
filename = "num_predileto.txt"
try:
numero = int(input("Qual o seu numero predileto? "))
except ValueError:
print("Você digitou um valor incorreto.")
else:
with open(filename, "w") as f:
json.dump(numero, f)
| 21.909091
| 56
| 0.66805
|
import json
filename = "num_predileto.txt"
try:
numero = int(input("Qual o seu numero predileto? "))
except ValueError:
print("Você digitou um valor incorreto.")
else:
with open(filename, "w") as f:
json.dump(numero, f)
| true
| true
|
f70ab27bcb01927f35c8740cbd127b50bca48faf
| 4,338
|
py
|
Python
|
fastai2/data/block.py
|
bearpelican/fastai2
|
445fa28e42b8d6205adc135527c22883fcfbef41
|
[
"Apache-2.0"
] | null | null | null |
fastai2/data/block.py
|
bearpelican/fastai2
|
445fa28e42b8d6205adc135527c22883fcfbef41
|
[
"Apache-2.0"
] | null | null | null |
fastai2/data/block.py
|
bearpelican/fastai2
|
445fa28e42b8d6205adc135527c22883fcfbef41
|
[
"Apache-2.0"
] | 1
|
2020-08-20T14:20:47.000Z
|
2020-08-20T14:20:47.000Z
|
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/07_data.block.ipynb (unless otherwise specified).
__all__ = ['TransformBlock', 'CategoryBlock', 'MultiCategoryBlock', 'DataBlock']
#Cell
from ..torch_basics import *
from ..test import *
from .core import *
from .load import *
from .external import *
from .transforms import *
#Cell
class TransformBlock():
"A basic wrapper that links defaults transforms for the data block API"
def __init__(self, type_tfms=None, item_tfms=None, batch_tfms=Cuda, dl_type=None, dbunch_kwargs=None):
self.type_tfms = L(type_tfms)
self.item_tfms = ToTensor + L(item_tfms)
self.batch_tfms = Cuda + L(batch_tfms)
self.dl_type,self.dbunch_kwargs = dl_type,({} if dbunch_kwargs is None else dbunch_kwargs)
#Cell
def CategoryBlock(vocab=None, add_na=False):
"`TransformBlock` for single-label categorical targets"
return TransformBlock(type_tfms=Categorize(vocab=vocab, add_na=add_na))
#Cell
def MultiCategoryBlock(encoded=False, vocab=None, add_na=False):
"`TransformBlock` for multi-label categorical targets"
tfm = EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode]
return TransformBlock(type_tfms=tfm)
#Cell
from inspect import isfunction,ismethod
#Cell
def _merge_tfms(*tfms):
"Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating"
g = groupby(concat(*tfms), lambda o:
o if isinstance(o, type) else o.__qualname__ if (isfunction(o) or ismethod(o)) else o.__class__)
return L(v[-1] for k,v in g.items()).map(instantiate)
#Cell
@docs
@funcs_kwargs
class DataBlock():
"Generic container to quickly build `DataSource` and `DataBunch`"
get_x=get_items=splitter=get_y = None
dl_type = TfmdDL
_methods = 'get_items splitter get_y get_x'.split()
def __init__(self, blocks=None, dl_type=None, getters=None, n_inp=None, **kwargs):
blocks = L(getattr(self,'blocks',(TransformBlock,TransformBlock)) if blocks is None else blocks)
blocks = L(b() if callable(b) else b for b in blocks)
self.default_type_tfms = blocks.attrgot('type_tfms', L())
self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L()))
self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L()))
for t in blocks:
if getattr(t, 'dl_type', None) is not None: self.dl_type = t.dl_type
if dl_type is not None: self.dl_type = dl_type
self.databunch = delegates(self.dl_type.__init__)(self.databunch)
self.dbunch_kwargs = merge(*blocks.attrgot('dbunch_kwargs', {}))
self.n_inp,self.getters = n_inp,L(getters)
if getters is not None: assert self.get_x is None and self.get_y is None
assert not kwargs
def datasource(self, source, type_tfms=None):
self.source = source
items = (self.get_items or noop)(source)
if isinstance(items,tuple):
items = L(items).zip()
labellers = [itemgetter(i) for i in range_of(self.default_type_tfms)]
else: labellers = [noop] * len(self.default_type_tfms)
splits = (self.splitter or noop)(items)
if self.get_x: labellers[0] = self.get_x
if self.get_y: labellers[1] = self.get_y
if self.getters: labellers = self.getters
if type_tfms is None: type_tfms = [L() for t in self.default_type_tfms]
type_tfms = L([self.default_type_tfms, type_tfms, labellers]).map_zip(
lambda tt,tfm,l: L(l) + _merge_tfms(tt, tfm))
return DataSource(items, tfms=type_tfms, splits=splits, dl_type=self.dl_type, n_inp=self.n_inp)
def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
dsrc = self.datasource(source, type_tfms=type_tfms)
item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
kwargs = {**self.dbunch_kwargs, **kwargs}
return dsrc.databunch(path=path, after_item=item_tfms, after_batch=batch_tfms, **kwargs)
_docs = dict(datasource="Create a `Datasource` from `source` with `type_tfms`",
databunch="Create a `DataBunch` from `source` with `item_tfms` and `batch_tfms`")
| 48.2
| 121
| 0.693407
|
__all__ = ['TransformBlock', 'CategoryBlock', 'MultiCategoryBlock', 'DataBlock']
from ..torch_basics import *
from ..test import *
from .core import *
from .load import *
from .external import *
from .transforms import *
class TransformBlock():
def __init__(self, type_tfms=None, item_tfms=None, batch_tfms=Cuda, dl_type=None, dbunch_kwargs=None):
self.type_tfms = L(type_tfms)
self.item_tfms = ToTensor + L(item_tfms)
self.batch_tfms = Cuda + L(batch_tfms)
self.dl_type,self.dbunch_kwargs = dl_type,({} if dbunch_kwargs is None else dbunch_kwargs)
def CategoryBlock(vocab=None, add_na=False):
return TransformBlock(type_tfms=Categorize(vocab=vocab, add_na=add_na))
def MultiCategoryBlock(encoded=False, vocab=None, add_na=False):
tfm = EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode]
return TransformBlock(type_tfms=tfm)
from inspect import isfunction,ismethod
def _merge_tfms(*tfms):
g = groupby(concat(*tfms), lambda o:
o if isinstance(o, type) else o.__qualname__ if (isfunction(o) or ismethod(o)) else o.__class__)
return L(v[-1] for k,v in g.items()).map(instantiate)
@docs
@funcs_kwargs
class DataBlock():
get_x=get_items=splitter=get_y = None
dl_type = TfmdDL
_methods = 'get_items splitter get_y get_x'.split()
def __init__(self, blocks=None, dl_type=None, getters=None, n_inp=None, **kwargs):
blocks = L(getattr(self,'blocks',(TransformBlock,TransformBlock)) if blocks is None else blocks)
blocks = L(b() if callable(b) else b for b in blocks)
self.default_type_tfms = blocks.attrgot('type_tfms', L())
self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L()))
self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L()))
for t in blocks:
if getattr(t, 'dl_type', None) is not None: self.dl_type = t.dl_type
if dl_type is not None: self.dl_type = dl_type
self.databunch = delegates(self.dl_type.__init__)(self.databunch)
self.dbunch_kwargs = merge(*blocks.attrgot('dbunch_kwargs', {}))
self.n_inp,self.getters = n_inp,L(getters)
if getters is not None: assert self.get_x is None and self.get_y is None
assert not kwargs
def datasource(self, source, type_tfms=None):
self.source = source
items = (self.get_items or noop)(source)
if isinstance(items,tuple):
items = L(items).zip()
labellers = [itemgetter(i) for i in range_of(self.default_type_tfms)]
else: labellers = [noop] * len(self.default_type_tfms)
splits = (self.splitter or noop)(items)
if self.get_x: labellers[0] = self.get_x
if self.get_y: labellers[1] = self.get_y
if self.getters: labellers = self.getters
if type_tfms is None: type_tfms = [L() for t in self.default_type_tfms]
type_tfms = L([self.default_type_tfms, type_tfms, labellers]).map_zip(
lambda tt,tfm,l: L(l) + _merge_tfms(tt, tfm))
return DataSource(items, tfms=type_tfms, splits=splits, dl_type=self.dl_type, n_inp=self.n_inp)
def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
dsrc = self.datasource(source, type_tfms=type_tfms)
item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
kwargs = {**self.dbunch_kwargs, **kwargs}
return dsrc.databunch(path=path, after_item=item_tfms, after_batch=batch_tfms, **kwargs)
_docs = dict(datasource="Create a `Datasource` from `source` with `type_tfms`",
databunch="Create a `DataBunch` from `source` with `item_tfms` and `batch_tfms`")
| true
| true
|
f70ab2806f62f35dba6a9c4d850e2fbfb76dd7b6
| 840
|
py
|
Python
|
setup.py
|
qri-io/qri-python
|
ed7b9a0047b3d50623cef40211e9aebf45c05e42
|
[
"MIT"
] | 6
|
2019-09-25T20:35:04.000Z
|
2021-02-12T16:33:25.000Z
|
setup.py
|
qri-io/qri-python
|
ed7b9a0047b3d50623cef40211e9aebf45c05e42
|
[
"MIT"
] | 27
|
2018-08-29T13:50:02.000Z
|
2020-10-28T16:52:54.000Z
|
setup.py
|
qri-io/qri-python
|
ed7b9a0047b3d50623cef40211e9aebf45c05e42
|
[
"MIT"
] | 3
|
2020-07-21T20:18:09.000Z
|
2021-01-16T09:31:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
with open('README.md', 'r') as fp:
long_description = fp.read()
pos = long_description.find('# Development')
if pos > -1:
long_description = long_description[:pos]
setuptools.setup(
name='qri',
version='0.1.5',
author='Dustin Long',
author_email='dustmop@qri.io',
description='qri python client',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/qri-io/qri-python',
packages=setuptools.find_packages(),
install_requires=[
'pandas==1.0.0',
'Markdown==3.2.2',
'requests==2.24.0',
],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
],
python_requires='>=3.6'
)
| 25.454545
| 50
| 0.620238
|
import setuptools
with open('README.md', 'r') as fp:
long_description = fp.read()
pos = long_description.find('# Development')
if pos > -1:
long_description = long_description[:pos]
setuptools.setup(
name='qri',
version='0.1.5',
author='Dustin Long',
author_email='dustmop@qri.io',
description='qri python client',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/qri-io/qri-python',
packages=setuptools.find_packages(),
install_requires=[
'pandas==1.0.0',
'Markdown==3.2.2',
'requests==2.24.0',
],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
],
python_requires='>=3.6'
)
| true
| true
|
f70ab361101331e9cbcc4a524b6be7427a7c3df4
| 40
|
py
|
Python
|
projeto0/test_testando.py
|
Matheus-Zauza-Maschietto/Python-Django
|
8f42489ebadbef53863ad00ab474bb213a6cc4bc
|
[
"MIT"
] | null | null | null |
projeto0/test_testando.py
|
Matheus-Zauza-Maschietto/Python-Django
|
8f42489ebadbef53863ad00ab474bb213a6cc4bc
|
[
"MIT"
] | null | null | null |
projeto0/test_testando.py
|
Matheus-Zauza-Maschietto/Python-Django
|
8f42489ebadbef53863ad00ab474bb213a6cc4bc
|
[
"MIT"
] | null | null | null |
def test_something():
assert 1 == 1
| 13.333333
| 21
| 0.625
|
def test_something():
assert 1 == 1
| true
| true
|
f70ab5c7458892a86950720ad6a41431776a170d
| 355
|
py
|
Python
|
tests/views.py
|
iLoveTux/django-slick-reporting
|
ef88f3bab3094e976bd306a112501d547c88fed1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/views.py
|
iLoveTux/django-slick-reporting
|
ef88f3bab3094e976bd306a112501d547c88fed1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/views.py
|
iLoveTux/django-slick-reporting
|
ef88f3bab3094e976bd306a112501d547c88fed1
|
[
"BSD-3-Clause"
] | null | null | null |
from slick_reporting.views import SampleReportView
from .models import OrderLine
class MonthlyProductSales(SampleReportView):
report_model = OrderLine
date_field = 'date_placed' # or 'order__date_placed'
group_by = 'product'
columns = ['name', 'sku']
time_series_pattern = 'monthly'
time_series_columns = ['__total_quantity__']
| 29.583333
| 57
| 0.746479
|
from slick_reporting.views import SampleReportView
from .models import OrderLine
class MonthlyProductSales(SampleReportView):
report_model = OrderLine
date_field = 'date_placed' group_by = 'product'
columns = ['name', 'sku']
time_series_pattern = 'monthly'
time_series_columns = ['__total_quantity__']
| true
| true
|
f70ab5e049ad23ad9364f4d20975e6dd5a3fcac6
| 7,595
|
py
|
Python
|
utilities/generate_schema.py
|
hep-gc/cloud-scheduler-2
|
180d9dc4f8751cf8c8254518e46f83f118187e84
|
[
"Apache-2.0"
] | 3
|
2020-03-03T03:25:36.000Z
|
2021-12-03T15:31:39.000Z
|
utilities/generate_schema.py
|
hep-gc/cloud-scheduler-2
|
180d9dc4f8751cf8c8254518e46f83f118187e84
|
[
"Apache-2.0"
] | 341
|
2017-06-08T17:27:59.000Z
|
2022-01-28T19:37:57.000Z
|
utilities/generate_schema.py
|
hep-gc/cloud-scheduler-2
|
180d9dc4f8751cf8c8254518e46f83f118187e84
|
[
"Apache-2.0"
] | 3
|
2018-04-25T16:13:20.000Z
|
2020-04-15T20:03:46.000Z
|
#!/usr/bin/env python3
"""
Synopsis: utilities/generate_schema.py > lib/schema.py
This routine pulls the current table definitions from the csv2 database and writes the
schema to stdout. To use the schema definitions:
from lib.schema import <view_or_table_name_1>, <view_or_table_name_2>, ...
"""
from subprocess import Popen, PIPE
from tempfile import mkdtemp
import json
import os
import sys
import yaml
REMOVE_BRACKETS = str.maketrans('()', ' ')
def main(args):
"""
This does everything:
o Writes the schema header to stdout.
o Retrieves the list of tables from the csv2 database.
o Then for each table:
- Resets the variable _stdout to just the table header.
- Retrieves the column list for the table.
- Then for each column:
+ Appends the column definition to _stdout.-
- Appends the table footer to _stdout.
- Writes the table definition to stdout.
"""
gvar = {}
fd = open('/etc/cloudscheduler/cloudscheduler.yaml')
gvar['csv2_config'] = yaml.full_load(fd.read())
fd.close()
# Schema_na_path has been updated to point to the same file as the original schema
# half of this code can probably be removed since it's overwriting the same file
# we need to check if there is any required computing done in the first loop that is reused in the second
# or if we can just remove the first (sqlalchemy) version
gvar['cmd_path'] = os.path.abspath(args[0])
gvar['cmd_path_stat'] = os.stat(gvar['cmd_path'])
gvar['path_info'] = gvar['cmd_path'].split('/')
gvar['ix'] = gvar['path_info'].index('cloudscheduler')
gvar['schema_path'] = '%s/lib/schema.py' % '/'.join(gvar['path_info'][:gvar['ix']+1])
gvar['schema_na_path'] = '%s/lib/schema.py' % '/'.join(gvar['path_info'][:gvar['ix']+1])
gvar['fd'] = open(gvar['schema_path'], 'w')
gvar['schema_na'] = {}
_p1 = Popen(
[
'mysql',
'-u%s' % gvar['csv2_config']['database']['db_user'],
'-p%s' % gvar['csv2_config']['database']['db_password'],
'-h%s' % gvar['csv2_config']['database']['db_host'],
'-e',
'show tables;',
gvar['csv2_config']['database']['db_name']
],
stdout=PIPE,
stderr=PIPE
)
_p2 = Popen(
[
'awk',
'!/Tables_in_csv2/ {print $1}'
],
stdin=_p1.stdout,
stdout=PIPE,
stderr=PIPE
)
stdout, stderr = _p2.communicate()
if _p2.returncode != 0:
print('Failed to retrieve table list.')
exit(1)
gvar['fd'].write(
"if 'Table' not in locals() and 'Table' not in globals():\n" + \
" from sqlalchemy import Table, Column, Float, Integer, String, MetaData, ForeignKey\n" + \
" metadata = MetaData()\n\n"
)
tables = stdout.decode('ascii').split()
for table in tables:
_stdout = ["%s = Table('%s', metadata,\n" % (table, table)]
gvar['schema_na'][table] = {'keys': [], 'columns': {}}
_p1 = Popen(
[
'mysql',
'-u%s' % gvar['csv2_config']['database']['db_user'],
'-p%s' % gvar['csv2_config']['database']['db_password'],
'-h%s' % gvar['csv2_config']['database']['db_host'],
'-e',
'show columns from %s;' % table,
gvar['csv2_config']['database']['db_name']
],
stdout=PIPE,
stderr=PIPE
)
_p2 = Popen(
[
'awk',
'!/^+/'
],
stdin=_p1.stdout,
stdout=PIPE,
stderr=PIPE
)
stdout, stderr = _p2.communicate()
if _p2.returncode != 0:
print('Failed to retrieve table columns.')
exit(1)
columns = stdout.decode('ascii').split("\n")
for _ix in range(1, len(columns)):
_w = columns[_ix].split()
if len(_w) > 2:
_stdout.append(" Column('%s'," % _w[0])
# gvar['schema_na'][table]['columns'][_w[0]] = []
if _w[1][:5] == 'char(' or \
_w[1][:8] == 'varchar(':
_w2 = _w[1].translate(REMOVE_BRACKETS).split()
_stdout.append(" String(%s)" % _w2[1])
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'str', 'len': _w2[1], 'nulls': _w[2]}
elif _w[1][:4] == 'int(' or \
_w[1][:6] == 'bigint' or \
_w[1][:7] == 'decimal' or \
_w[1][:8] == 'smallint' or \
_w[1][:7] == 'tinyint':
_stdout.append(" Integer")
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'int'}
elif _w[1] == 'text' or \
_w[1][:4] == 'date' or \
_w[1][:8] == 'datetime' or \
_w[1][:4] == 'time' or \
_w[1][:9] == 'timestamp' or \
_w[1] == 'tinytext' or \
_w[1] == 'longtext' or \
_w[1] == 'mediumtext':
_stdout.append(" String")
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'str', 'nulls': _w[2]}
elif _w[1][:7] == 'double' or \
_w[1][:5] == 'float':
_stdout.append(" Float")
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'float'}
else:
print('Table %s, unknown data type for column: %s' % (table, columns[_ix]))
exit(1)
if len(_w) > 3 and _w[3] == 'PRI':
_stdout.append(", primary_key=True")
gvar['schema_na'][table]['keys'].append(_w[0])
if _ix < len(columns) - 2:
_stdout.append("),\n")
else:
_stdout.append(")\n )\n")
gvar['fd'].write('%s\n' % ''.join(_stdout))
gvar['fd'].close()
gvar['fd'] = open(gvar['schema_na_path'], 'w')
gvar['fd'].write('schema = {\n')
tix = 0
for table in sorted(gvar['schema_na']):
gvar['fd'].write(' "%s": {\n "keys": [\n' % table)
ix = 0
for key in gvar['schema_na'][table]['keys']:
if ix < len(gvar['schema_na'][table]['keys'])-1:
gvar['fd'].write(' "%s",\n' % key)
else:
gvar['fd'].write(' "%s"\n' % key)
ix += 1
gvar['fd'].write(' ],\n "columns": {\n')
ix = 0
for column in gvar['schema_na'][table]['columns']:
if ix < len(gvar['schema_na'][table]['columns'])-1:
gvar['fd'].write(' "%s": %s,\n' % (column, json.dumps(gvar['schema_na'][table]['columns'][column])))
else:
gvar['fd'].write(' "%s": %s\n' % (column, json.dumps(gvar['schema_na'][table]['columns'][column])))
ix += 1
if tix < len(gvar['schema_na'])-1:
gvar['fd'].write(' }\n },\n')
else:
gvar['fd'].write(' }\n }\n }\n')
tix += 1
gvar['fd'].close()
_p1 = Popen(
[
'chown',
'%s.%s' % (gvar['cmd_path_stat'].st_uid, gvar['cmd_path_stat'].st_gid),
gvar['schema_path']
]
)
_p1.communicate()
if __name__ == "__main__":
main(sys.argv)
| 35.325581
| 127
| 0.474523
|
from subprocess import Popen, PIPE
from tempfile import mkdtemp
import json
import os
import sys
import yaml
REMOVE_BRACKETS = str.maketrans('()', ' ')
def main(args):
gvar = {}
fd = open('/etc/cloudscheduler/cloudscheduler.yaml')
gvar['csv2_config'] = yaml.full_load(fd.read())
fd.close()
# we need to check if there is any required computing done in the first loop that is reused in the second
# or if we can just remove the first (sqlalchemy) version
gvar['cmd_path'] = os.path.abspath(args[0])
gvar['cmd_path_stat'] = os.stat(gvar['cmd_path'])
gvar['path_info'] = gvar['cmd_path'].split('/')
gvar['ix'] = gvar['path_info'].index('cloudscheduler')
gvar['schema_path'] = '%s/lib/schema.py' % '/'.join(gvar['path_info'][:gvar['ix']+1])
gvar['schema_na_path'] = '%s/lib/schema.py' % '/'.join(gvar['path_info'][:gvar['ix']+1])
gvar['fd'] = open(gvar['schema_path'], 'w')
gvar['schema_na'] = {}
_p1 = Popen(
[
'mysql',
'-u%s' % gvar['csv2_config']['database']['db_user'],
'-p%s' % gvar['csv2_config']['database']['db_password'],
'-h%s' % gvar['csv2_config']['database']['db_host'],
'-e',
'show tables;',
gvar['csv2_config']['database']['db_name']
],
stdout=PIPE,
stderr=PIPE
)
_p2 = Popen(
[
'awk',
'!/Tables_in_csv2/ {print $1}'
],
stdin=_p1.stdout,
stdout=PIPE,
stderr=PIPE
)
stdout, stderr = _p2.communicate()
if _p2.returncode != 0:
print('Failed to retrieve table list.')
exit(1)
gvar['fd'].write(
"if 'Table' not in locals() and 'Table' not in globals():\n" + \
" from sqlalchemy import Table, Column, Float, Integer, String, MetaData, ForeignKey\n" + \
" metadata = MetaData()\n\n"
)
tables = stdout.decode('ascii').split()
for table in tables:
_stdout = ["%s = Table('%s', metadata,\n" % (table, table)]
gvar['schema_na'][table] = {'keys': [], 'columns': {}}
_p1 = Popen(
[
'mysql',
'-u%s' % gvar['csv2_config']['database']['db_user'],
'-p%s' % gvar['csv2_config']['database']['db_password'],
'-h%s' % gvar['csv2_config']['database']['db_host'],
'-e',
'show columns from %s;' % table,
gvar['csv2_config']['database']['db_name']
],
stdout=PIPE,
stderr=PIPE
)
_p2 = Popen(
[
'awk',
'!/^+/'
],
stdin=_p1.stdout,
stdout=PIPE,
stderr=PIPE
)
stdout, stderr = _p2.communicate()
if _p2.returncode != 0:
print('Failed to retrieve table columns.')
exit(1)
columns = stdout.decode('ascii').split("\n")
for _ix in range(1, len(columns)):
_w = columns[_ix].split()
if len(_w) > 2:
_stdout.append(" Column('%s'," % _w[0])
# gvar['schema_na'][table]['columns'][_w[0]] = []
if _w[1][:5] == 'char(' or \
_w[1][:8] == 'varchar(':
_w2 = _w[1].translate(REMOVE_BRACKETS).split()
_stdout.append(" String(%s)" % _w2[1])
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'str', 'len': _w2[1], 'nulls': _w[2]}
elif _w[1][:4] == 'int(' or \
_w[1][:6] == 'bigint' or \
_w[1][:7] == 'decimal' or \
_w[1][:8] == 'smallint' or \
_w[1][:7] == 'tinyint':
_stdout.append(" Integer")
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'int'}
elif _w[1] == 'text' or \
_w[1][:4] == 'date' or \
_w[1][:8] == 'datetime' or \
_w[1][:4] == 'time' or \
_w[1][:9] == 'timestamp' or \
_w[1] == 'tinytext' or \
_w[1] == 'longtext' or \
_w[1] == 'mediumtext':
_stdout.append(" String")
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'str', 'nulls': _w[2]}
elif _w[1][:7] == 'double' or \
_w[1][:5] == 'float':
_stdout.append(" Float")
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'float'}
else:
print('Table %s, unknown data type for column: %s' % (table, columns[_ix]))
exit(1)
if len(_w) > 3 and _w[3] == 'PRI':
_stdout.append(", primary_key=True")
gvar['schema_na'][table]['keys'].append(_w[0])
if _ix < len(columns) - 2:
_stdout.append("),\n")
else:
_stdout.append(")\n )\n")
gvar['fd'].write('%s\n' % ''.join(_stdout))
gvar['fd'].close()
gvar['fd'] = open(gvar['schema_na_path'], 'w')
gvar['fd'].write('schema = {\n')
tix = 0
for table in sorted(gvar['schema_na']):
gvar['fd'].write(' "%s": {\n "keys": [\n' % table)
ix = 0
for key in gvar['schema_na'][table]['keys']:
if ix < len(gvar['schema_na'][table]['keys'])-1:
gvar['fd'].write(' "%s",\n' % key)
else:
gvar['fd'].write(' "%s"\n' % key)
ix += 1
gvar['fd'].write(' ],\n "columns": {\n')
ix = 0
for column in gvar['schema_na'][table]['columns']:
if ix < len(gvar['schema_na'][table]['columns'])-1:
gvar['fd'].write(' "%s": %s,\n' % (column, json.dumps(gvar['schema_na'][table]['columns'][column])))
else:
gvar['fd'].write(' "%s": %s\n' % (column, json.dumps(gvar['schema_na'][table]['columns'][column])))
ix += 1
if tix < len(gvar['schema_na'])-1:
gvar['fd'].write(' }\n },\n')
else:
gvar['fd'].write(' }\n }\n }\n')
tix += 1
gvar['fd'].close()
_p1 = Popen(
[
'chown',
'%s.%s' % (gvar['cmd_path_stat'].st_uid, gvar['cmd_path_stat'].st_gid),
gvar['schema_path']
]
)
_p1.communicate()
if __name__ == "__main__":
main(sys.argv)
| true
| true
|
f70ab5e26ff2ae94d430049a05b5c236c13075a0
| 8,872
|
py
|
Python
|
train.py
|
antonyvigouret/Text-Recognition-PyTorch
|
7576480684612e856602169b3229fe6c8f4b4b9d
|
[
"MIT"
] | 2
|
2020-11-12T17:28:30.000Z
|
2020-11-13T14:45:52.000Z
|
train.py
|
antonyvigouret/Text-Recognition-PyTorch
|
7576480684612e856602169b3229fe6c8f4b4b9d
|
[
"MIT"
] | null | null | null |
train.py
|
antonyvigouret/Text-Recognition-PyTorch
|
7576480684612e856602169b3229fe6c8f4b4b9d
|
[
"MIT"
] | null | null | null |
import string
import torch
from torch.nn import CrossEntropyLoss
from torch.nn import CTCLoss
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
from tqdm import tqdm
from cnn_seq2seq import ConvSeq2Seq
from cnn_seq2seq import Decoder
from cnn_seq2seq import Encoder
from cnn_seq2seq_att import ConvSeq2SeqAtt
from crnn import CRNN
from data_utils import FakeTextImageGenerator
from utils import labels_to_text
from utils import text_to_labels
def train(path=None):
dataset = FakeTextImageGenerator(batch_size=16).iter()
criterion = CTCLoss(reduction="mean", zero_infinity=True)
net = CRNN(nclass=100).float()
optimizer = optim.Adam(net.parameters(), lr=0.001)
if path:
checkpoint = torch.load(path)
net.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
epoch = checkpoint["epoch"]
loss = checkpoint["loss"]
print(f"model current epoch: {epoch} with loss: {loss}")
# loop over the dataset multiple times
for epoch in range(1, 1000):
running_loss = 0.0
loop = tqdm(range(100))
for i in loop:
data = next(dataset)
images = data["the_inputs"]
labels = data["the_labels"]
input_length = data["input_length"]
label_length = data["label_length"]
targets = data["targets"]
# print("target", targets)
# print("target l", targets.size())
# print("label_l", label_length)
# print("label_l l", label_length.size())
# print("pred_l", input_length)
# print("pred_l l", input_length.size())
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(images.float())
# print(outputs[8, 0, :])
# print(outputs[:, 0, :])
# print(outputs.size())
loss = criterion(outputs, labels, input_length, label_length)
# print(loss.item())
loss.backward()
optimizer.step()
running_loss += loss.item()
loop.set_postfix(epoch=epoch, loss=(running_loss / (i + 1)))
# print(f"Epoch: {epoch} | Loss: {running_loss/100}")
torch.save(
{
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": running_loss,
},
"checkpoint5.pt",
)
print("Finished Training")
def train_cs2s(path=None):
alphabet = string.printable
nclass = len(alphabet)
writer = SummaryWriter()
dataset = FakeTextImageGenerator(batch_size=4).iter()
criterion = CrossEntropyLoss(ignore_index=97)
encoder = Encoder(512, 512, 1, 0)
decoder = Decoder(512, 100, 100, 1, 0)
net = ConvSeq2Seq(encoder, decoder, nclass=nclass).float()
optimizer = optim.Adam(net.parameters(), lr=0.003)
if path:
net2 = CRNN(nclass=100).float()
checkpoint = torch.load(path)
net2.load_state_dict(checkpoint["model_state_dict"])
# optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
# epoch = checkpoint["epoch"]
# loss = checkpoint["loss"]
# print(f"model current epoch: {epoch} with loss: {loss}")
print(net2)
net.conv1.load_state_dict(net2.conv1.state_dict())
net.conv2.load_state_dict(net2.conv2.state_dict())
net.conv3.load_state_dict(net2.conv3.state_dict())
net.conv4.load_state_dict(net2.conv4.state_dict())
net.conv5.load_state_dict(net2.conv5.state_dict())
net.conv6.load_state_dict(net2.conv6.state_dict())
net.conv7.load_state_dict(net2.conv7.state_dict())
net.train()
# loop over the dataset multiple times
step = 0
for epoch in range(1, 1000):
running_loss = 0.0
loop = tqdm(range(100))
for i in loop:
data = next(dataset)
images = data["the_inputs"]
labels = data["the_labels"]
input_length = data["input_length"]
label_length = data["label_length"]
targets = data["targets"]
# print("target", targets)
# print("target l", targets.size())
# print("label_l", label_length)
# print("label_l l", label_length.size())
# print("pred_l", input_length)
# print("pred_l l", input_length.size())
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(images.float(), labels, 0.5)
# permute batchsize and seq_len dim to match labels when using .view(-1, output.size()[2])
outputs = outputs.permute(1, 0, 2)
# print(outputs[8, 0, :])
# print(outputs[:, 0, :])
# print(outputs.size())
# print(labels.size())
output_argmax = outputs.argmax(2)
# print(output_argmax.view(-1))
# print(labels.reshape(-1))
loss = criterion(outputs.reshape(-1, 100), labels.reshape(-1))
writer.add_scalar("loss", loss.item(), step)
step += 1
loss.backward()
# torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
running_loss += loss.item()
loop.set_postfix(epoch=epoch, Loss=(running_loss / (i + 1)))
# print(f"Epoch: {epoch} | Loss: {running_loss/100}")
torch.save(
{
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": running_loss,
},
"cs2s_good.pt",
)
torch.save(net, "model_test_pretrained.pt")
print("Finished Training")
def train_cs2satt(path=None):
writer = SummaryWriter()
dataset = FakeTextImageGenerator(batch_size=8).iter()
criterion = CrossEntropyLoss(ignore_index=97)
net = ConvSeq2SeqAtt(nclass=100).float()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
if path:
checkpoint = torch.load(path)
net.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
epoch = checkpoint["epoch"]
loss = checkpoint["loss"]
print(f"model current epoch: {epoch} with loss: {loss}")
net.train()
# loop over the dataset multiple times
step = 0
for epoch in range(1, 1000):
running_loss = 0.0
loop = tqdm(range(100))
for i in loop:
data = next(dataset)
images = data["the_inputs"]
labels = data["the_labels"]
input_length = data["input_length"]
label_length = data["label_length"]
targets = data["targets"]
# print("target", targets)
# print("target l", targets.size())
# print("label_l", label_length)
# print("label_l l", label_length.size())
# print("pred_l", input_length)
# print("pred_l l", input_length.size())
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(images.float(), labels, 0.5)
# permute batchsize and seq_len dim to match labels when using .view(-1, output.size()[2])
outputs = outputs.permute(1, 0, 2)
# print(outputs[8, 0, :])
# print(outputs[:, 0, :])
# print(outputs.size())
# print(labels.size())
output_argmax = outputs.argmax(2)
# print(output_argmax.view(-1))
# print(labels.reshape(-1))
loss = criterion(outputs.reshape(-1, 100), labels.reshape(-1))
# print(loss.item())
writer.add_scalar("loss", loss.item(), step)
step += 1
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
running_loss += loss.item()
loop.set_postfix(epoch=epoch, Loss=(running_loss / (i + 1)))
print(f"Epoch: {epoch} | Loss: {running_loss/100}")
torch.save(
{
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": running_loss,
},
"cs2satt_good.pt",
)
# torch.save(net, "model_test_pretrained.pt")
print("Finished Training")
if __name__ == "__main__":
train_cs2satt("cs2satt_good.pt")
| 33.479245
| 102
| 0.577435
|
import string
import torch
from torch.nn import CrossEntropyLoss
from torch.nn import CTCLoss
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
from tqdm import tqdm
from cnn_seq2seq import ConvSeq2Seq
from cnn_seq2seq import Decoder
from cnn_seq2seq import Encoder
from cnn_seq2seq_att import ConvSeq2SeqAtt
from crnn import CRNN
from data_utils import FakeTextImageGenerator
from utils import labels_to_text
from utils import text_to_labels
def train(path=None):
dataset = FakeTextImageGenerator(batch_size=16).iter()
criterion = CTCLoss(reduction="mean", zero_infinity=True)
net = CRNN(nclass=100).float()
optimizer = optim.Adam(net.parameters(), lr=0.001)
if path:
checkpoint = torch.load(path)
net.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
epoch = checkpoint["epoch"]
loss = checkpoint["loss"]
print(f"model current epoch: {epoch} with loss: {loss}")
for epoch in range(1, 1000):
running_loss = 0.0
loop = tqdm(range(100))
for i in loop:
data = next(dataset)
images = data["the_inputs"]
labels = data["the_labels"]
input_length = data["input_length"]
label_length = data["label_length"]
targets = data["targets"]
optimizer.zero_grad()
outputs = net(images.float())
loss = criterion(outputs, labels, input_length, label_length)
loss.backward()
optimizer.step()
running_loss += loss.item()
loop.set_postfix(epoch=epoch, loss=(running_loss / (i + 1)))
torch.save(
{
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": running_loss,
},
"checkpoint5.pt",
)
print("Finished Training")
def train_cs2s(path=None):
alphabet = string.printable
nclass = len(alphabet)
writer = SummaryWriter()
dataset = FakeTextImageGenerator(batch_size=4).iter()
criterion = CrossEntropyLoss(ignore_index=97)
encoder = Encoder(512, 512, 1, 0)
decoder = Decoder(512, 100, 100, 1, 0)
net = ConvSeq2Seq(encoder, decoder, nclass=nclass).float()
optimizer = optim.Adam(net.parameters(), lr=0.003)
if path:
net2 = CRNN(nclass=100).float()
checkpoint = torch.load(path)
net2.load_state_dict(checkpoint["model_state_dict"])
print(net2)
net.conv1.load_state_dict(net2.conv1.state_dict())
net.conv2.load_state_dict(net2.conv2.state_dict())
net.conv3.load_state_dict(net2.conv3.state_dict())
net.conv4.load_state_dict(net2.conv4.state_dict())
net.conv5.load_state_dict(net2.conv5.state_dict())
net.conv6.load_state_dict(net2.conv6.state_dict())
net.conv7.load_state_dict(net2.conv7.state_dict())
net.train()
step = 0
for epoch in range(1, 1000):
running_loss = 0.0
loop = tqdm(range(100))
for i in loop:
data = next(dataset)
images = data["the_inputs"]
labels = data["the_labels"]
input_length = data["input_length"]
label_length = data["label_length"]
targets = data["targets"]
optimizer.zero_grad()
outputs = net(images.float(), labels, 0.5)
outputs = outputs.permute(1, 0, 2)
output_argmax = outputs.argmax(2)
loss = criterion(outputs.reshape(-1, 100), labels.reshape(-1))
writer.add_scalar("loss", loss.item(), step)
step += 1
loss.backward()
optimizer.step()
running_loss += loss.item()
loop.set_postfix(epoch=epoch, Loss=(running_loss / (i + 1)))
torch.save(
{
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": running_loss,
},
"cs2s_good.pt",
)
torch.save(net, "model_test_pretrained.pt")
print("Finished Training")
def train_cs2satt(path=None):
writer = SummaryWriter()
dataset = FakeTextImageGenerator(batch_size=8).iter()
criterion = CrossEntropyLoss(ignore_index=97)
net = ConvSeq2SeqAtt(nclass=100).float()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
if path:
checkpoint = torch.load(path)
net.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
epoch = checkpoint["epoch"]
loss = checkpoint["loss"]
print(f"model current epoch: {epoch} with loss: {loss}")
net.train()
step = 0
for epoch in range(1, 1000):
running_loss = 0.0
loop = tqdm(range(100))
for i in loop:
data = next(dataset)
images = data["the_inputs"]
labels = data["the_labels"]
input_length = data["input_length"]
label_length = data["label_length"]
targets = data["targets"]
optimizer.zero_grad()
outputs = net(images.float(), labels, 0.5)
outputs = outputs.permute(1, 0, 2)
output_argmax = outputs.argmax(2)
loss = criterion(outputs.reshape(-1, 100), labels.reshape(-1))
writer.add_scalar("loss", loss.item(), step)
step += 1
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
running_loss += loss.item()
loop.set_postfix(epoch=epoch, Loss=(running_loss / (i + 1)))
print(f"Epoch: {epoch} | Loss: {running_loss/100}")
torch.save(
{
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": running_loss,
},
"cs2satt_good.pt",
)
print("Finished Training")
if __name__ == "__main__":
train_cs2satt("cs2satt_good.pt")
| true
| true
|
f70ab5f20cd07c566f212fef5ff14cea04806a5f
| 4,204
|
py
|
Python
|
tests/services/test_file_svc.py
|
emmanvg/caldera
|
633a3f1ab06543d737791186f9a22c6587e5fa44
|
[
"Apache-2.0"
] | 1
|
2021-05-24T08:44:09.000Z
|
2021-05-24T08:44:09.000Z
|
tests/services/test_file_svc.py
|
watchmen-coder/caldera
|
f13521b69ce959dad31911d4afa4e20a21790875
|
[
"Apache-2.0"
] | 1
|
2021-04-16T00:03:00.000Z
|
2021-04-16T00:03:00.000Z
|
tests/services/test_file_svc.py
|
watchmen-coder/caldera
|
f13521b69ce959dad31911d4afa4e20a21790875
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
import yaml
from tests import AsyncMock
from asyncio import Future
from app.utility.file_decryptor import decrypt
@pytest.mark.usefixtures(
'init_base_world'
)
class TestFileService:
def test_save_file(self, loop, file_svc, tmp_path):
filename = "test_file.txt"
payload = b'These are the file contents.'
# Save temporary test file
loop.run_until_complete(file_svc.save_file(filename, payload, tmp_path, encrypt=False))
file_location = tmp_path / filename
# Read file contents from saved file
file_contents = open(file_location, "r")
assert os.path.isfile(file_location)
assert payload.decode("utf-8") == file_contents.read()
def test_create_exfil_sub_directory(self, loop, file_svc):
exfil_dir_name = 'unit-testing-Rocks'
new_dir = loop.run_until_complete(file_svc.create_exfil_sub_directory(exfil_dir_name))
assert os.path.isdir(new_dir)
os.rmdir(new_dir)
def test_read_write_result_file(self, tmpdir, file_svc):
link_id = '12345'
output = 'output testing unit'
# write output data
file_svc.write_result_file(link_id=link_id, output=output, location=tmpdir)
# read output data
output_data = file_svc.read_result_file(link_id=link_id, location=tmpdir)
assert output_data == output
def test_pack_file(self, loop, mocker, tmpdir, file_svc, data_svc):
payload = 'unittestpayload'
payload_content = b'content'
new_payload_content = b'new_content'
packer_name = 'test'
# create temp files
file = tmpdir.join(payload)
file.write(payload_content)
# start mocking up methods
packer = mocker.Mock(return_value=Future())
packer.return_value = packer
packer.pack = AsyncMock(return_value=(payload, new_payload_content))
data_svc.locate = AsyncMock(return_value=[])
module = mocker.Mock()
module.Packer = packer
file_svc.packers[packer_name] = module
file_svc.data_svc = data_svc
file_svc.read_file = AsyncMock(return_value=(payload, payload_content))
file_path, content, display_name = loop.run_until_complete(file_svc.get_file(headers=dict(file='%s:%s' % (packer_name, payload))))
packer.pack.assert_called_once()
assert payload == file_path
assert content == new_payload_content
def test_upload_file(self, loop, file_svc):
upload_dir = loop.run_until_complete(file_svc.create_exfil_sub_directory('test-upload'))
upload_filename = 'uploadedfile.txt'
upload_content = b'this is a test upload file'
loop.run_until_complete(file_svc.save_file(upload_filename, upload_content, upload_dir, encrypt=False))
uploaded_file_path = os.path.join(upload_dir, upload_filename)
assert os.path.isfile(uploaded_file_path)
with open(uploaded_file_path, 'rb') as file:
written_data = file.read()
assert written_data == upload_content
os.remove(uploaded_file_path)
os.rmdir(upload_dir)
def test_encrypt_upload(self, loop, file_svc):
upload_dir = loop.run_until_complete(file_svc.create_exfil_sub_directory('test-encrypted-upload'))
upload_filename = 'encryptedupload.txt'
upload_content = b'this is a test upload file'
loop.run_until_complete(file_svc.save_file(upload_filename, upload_content, upload_dir))
uploaded_file_path = os.path.join(upload_dir, upload_filename)
decrypted_file_path = upload_filename + '_decrypted'
config_to_use = 'conf/default.yml'
with open(config_to_use, encoding='utf-8') as conf:
config = list(yaml.load_all(conf, Loader=yaml.FullLoader))[0]
decrypt(uploaded_file_path, config, output_file=decrypted_file_path)
assert os.path.isfile(decrypted_file_path)
with open(decrypted_file_path, 'rb') as decrypted_file:
decrypted_data = decrypted_file.read()
assert decrypted_data == upload_content
os.remove(uploaded_file_path)
os.remove(decrypted_file_path)
os.rmdir(upload_dir)
| 41.623762
| 138
| 0.697193
|
import os
import pytest
import yaml
from tests import AsyncMock
from asyncio import Future
from app.utility.file_decryptor import decrypt
@pytest.mark.usefixtures(
'init_base_world'
)
class TestFileService:
def test_save_file(self, loop, file_svc, tmp_path):
filename = "test_file.txt"
payload = b'These are the file contents.'
loop.run_until_complete(file_svc.save_file(filename, payload, tmp_path, encrypt=False))
file_location = tmp_path / filename
file_contents = open(file_location, "r")
assert os.path.isfile(file_location)
assert payload.decode("utf-8") == file_contents.read()
def test_create_exfil_sub_directory(self, loop, file_svc):
exfil_dir_name = 'unit-testing-Rocks'
new_dir = loop.run_until_complete(file_svc.create_exfil_sub_directory(exfil_dir_name))
assert os.path.isdir(new_dir)
os.rmdir(new_dir)
def test_read_write_result_file(self, tmpdir, file_svc):
link_id = '12345'
output = 'output testing unit'
file_svc.write_result_file(link_id=link_id, output=output, location=tmpdir)
output_data = file_svc.read_result_file(link_id=link_id, location=tmpdir)
assert output_data == output
def test_pack_file(self, loop, mocker, tmpdir, file_svc, data_svc):
payload = 'unittestpayload'
payload_content = b'content'
new_payload_content = b'new_content'
packer_name = 'test'
file = tmpdir.join(payload)
file.write(payload_content)
packer = mocker.Mock(return_value=Future())
packer.return_value = packer
packer.pack = AsyncMock(return_value=(payload, new_payload_content))
data_svc.locate = AsyncMock(return_value=[])
module = mocker.Mock()
module.Packer = packer
file_svc.packers[packer_name] = module
file_svc.data_svc = data_svc
file_svc.read_file = AsyncMock(return_value=(payload, payload_content))
file_path, content, display_name = loop.run_until_complete(file_svc.get_file(headers=dict(file='%s:%s' % (packer_name, payload))))
packer.pack.assert_called_once()
assert payload == file_path
assert content == new_payload_content
def test_upload_file(self, loop, file_svc):
upload_dir = loop.run_until_complete(file_svc.create_exfil_sub_directory('test-upload'))
upload_filename = 'uploadedfile.txt'
upload_content = b'this is a test upload file'
loop.run_until_complete(file_svc.save_file(upload_filename, upload_content, upload_dir, encrypt=False))
uploaded_file_path = os.path.join(upload_dir, upload_filename)
assert os.path.isfile(uploaded_file_path)
with open(uploaded_file_path, 'rb') as file:
written_data = file.read()
assert written_data == upload_content
os.remove(uploaded_file_path)
os.rmdir(upload_dir)
def test_encrypt_upload(self, loop, file_svc):
upload_dir = loop.run_until_complete(file_svc.create_exfil_sub_directory('test-encrypted-upload'))
upload_filename = 'encryptedupload.txt'
upload_content = b'this is a test upload file'
loop.run_until_complete(file_svc.save_file(upload_filename, upload_content, upload_dir))
uploaded_file_path = os.path.join(upload_dir, upload_filename)
decrypted_file_path = upload_filename + '_decrypted'
config_to_use = 'conf/default.yml'
with open(config_to_use, encoding='utf-8') as conf:
config = list(yaml.load_all(conf, Loader=yaml.FullLoader))[0]
decrypt(uploaded_file_path, config, output_file=decrypted_file_path)
assert os.path.isfile(decrypted_file_path)
with open(decrypted_file_path, 'rb') as decrypted_file:
decrypted_data = decrypted_file.read()
assert decrypted_data == upload_content
os.remove(uploaded_file_path)
os.remove(decrypted_file_path)
os.rmdir(upload_dir)
| true
| true
|
f70ab6303c20b904700391f719907f8e8cc2decf
| 1,225
|
py
|
Python
|
cifrari-kali/cbc_decode.py
|
mfranzil/unitn-reti-avanzate
|
802438239b3b5ff2bdce6e50a60da1c945892def
|
[
"MIT"
] | null | null | null |
cifrari-kali/cbc_decode.py
|
mfranzil/unitn-reti-avanzate
|
802438239b3b5ff2bdce6e50a60da1c945892def
|
[
"MIT"
] | null | null | null |
cifrari-kali/cbc_decode.py
|
mfranzil/unitn-reti-avanzate
|
802438239b3b5ff2bdce6e50a60da1c945892def
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# cbc_decode.py
#
# Programma minimale per l'applicazione di un cifrario
# a blocchi su un messaggio, in modalità CBC (Chained Block Cypher)
# in cui ogni blocco viene messo in OR esclusivo con il codice
# del blocco precedente prima di essere cifrato.
#
# Nel nostro caso, un blocco corrisponde a un byte, e l'algoritmo
# di cifratura consiste nell'OR esclusivo con una chiave fissa a 8 bit.
#
# Istruzioni:
#
# - creare il file codice.bin come descritto in cbc_encode.py
# - decifrare il codice con il comando:
# python cbc_decode.py codice.bin 154 decodifica.txt
# - verificare che decodifica.txt e messaggio.txt sono uguali.
#
# Attenzione: il codice ha scopo puramente dimostrativo.
###################
#
# Importazione dei pacchetti
#
import sys
######################
#
# Lettura dei dati di input (messaggio e chiave)
#
f = open(sys.argv[1], 'r')
c = f.read()
f.close()
k = int(sys.argv[2])
#########################
#
# Decifrazione del codice
#
m = ''
c0 = 0
for i in range(len(c)):
v = ord(c[i])
m = m + chr((v ^ k) ^ c0)
c0 = v
##########################
#
# Scrittura del messaggio decifrato
#
f = open(sys.argv[3], 'w')
f.write(m)
f.close()
| 18.560606
| 71
| 0.634286
|
# a blocchi su un messaggio, in modalità CBC (Chained Block Cypher)
# in cui ogni blocco viene messo in OR esclusivo con il codice
# del blocco precedente prima di essere cifrato.
#
# Nel nostro caso, un blocco corrisponde a un byte, e l'algoritmo
#
# Istruzioni:
#
# - creare il file codice.bin come descritto in cbc_encode.py
# - decifrare il codice con il comando:
# python cbc_decode.py codice.bin 154 decodifica.txt
# - verificare che decodifica.txt e messaggio.txt sono uguali.
#
# Attenzione: il codice ha scopo puramente dimostrativo.
###################
#
# Importazione dei pacchetti
#
import sys
######################
#
# Lettura dei dati di input (messaggio e chiave)
#
f = open(sys.argv[1], 'r')
c = f.read()
f.close()
k = int(sys.argv[2])
#########################
#
# Decifrazione del codice
#
m = ''
c0 = 0
for i in range(len(c)):
v = ord(c[i])
m = m + chr((v ^ k) ^ c0)
c0 = v
##########################
#
# Scrittura del messaggio decifrato
#
f = open(sys.argv[3], 'w')
f.write(m)
f.close()
| true
| true
|
f70ab708cedc7d676fb9539fa566730b57172e01
| 3,437
|
py
|
Python
|
uvicorn/_handlers/http.py
|
pacoyang/uvicorn
|
27f76476a14dac68a62dc2998717997607a36197
|
[
"BSD-3-Clause"
] | 1
|
2021-07-05T21:49:51.000Z
|
2021-07-05T21:49:51.000Z
|
uvicorn/_handlers/http.py
|
pacoyang/uvicorn
|
27f76476a14dac68a62dc2998717997607a36197
|
[
"BSD-3-Clause"
] | null | null | null |
uvicorn/_handlers/http.py
|
pacoyang/uvicorn
|
27f76476a14dac68a62dc2998717997607a36197
|
[
"BSD-3-Clause"
] | 1
|
2022-02-03T09:38:16.000Z
|
2022-02-03T09:38:16.000Z
|
import asyncio
from typing import TYPE_CHECKING
from uvicorn.config import Config
if TYPE_CHECKING: # pragma: no cover
from uvicorn.server import ServerState
async def handle_http(
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
server_state: "ServerState",
config: Config,
) -> None:
# Run transport/protocol session from streams.
#
# This is a bit fiddly, so let me explain why we do this in the first place.
#
# This was introduced to switch to the asyncio streams API while retaining our
# existing protocols-based code.
#
# The aim was to:
# * Make it easier to support alternative async libaries (all of which expose
# a streams API, rather than anything similar to asyncio's transports and
# protocols) while keeping the change footprint (and risk) at a minimum.
# * Keep a "fast track" for asyncio that's as efficient as possible, by reusing
# our asyncio-optimized protocols-based implementation.
#
# See: https://github.com/encode/uvicorn/issues/169
# See: https://github.com/encode/uvicorn/pull/869
# Use a future to coordinate between the protocol and this handler task.
# https://docs.python.org/3/library/asyncio-protocol.html#connecting-existing-sockets
loop = asyncio.get_event_loop()
connection_lost = loop.create_future()
# Switch the protocol from the stream reader to our own HTTP protocol class.
protocol = config.http_protocol_class( # type: ignore[call-arg, operator]
config=config,
server_state=server_state,
on_connection_lost=lambda: connection_lost.set_result(True),
)
transport = writer.transport
transport.set_protocol(protocol)
# Asyncio stream servers don't `await` handler tasks (like the one we're currently
# running), so we must make sure exceptions that occur in protocols but outside the
# ASGI cycle (e.g. bugs) are properly retrieved and logged.
# Vanilla asyncio handles exceptions properly out-of-the-box, but uvloop doesn't.
# So we need to attach a callback to handle exceptions ourselves for that case.
# (It's not easy to know which loop we're effectively running on, so we attach the
# callback in all cases. In practice it won't be called on vanilla asyncio.)
task = asyncio.current_task()
assert task is not None
@task.add_done_callback
def retrieve_exception(task: asyncio.Task) -> None:
exc = task.exception()
if exc is None:
return
loop.call_exception_handler(
{
"message": "Fatal error in server handler",
"exception": exc,
"transport": transport,
"protocol": protocol,
}
)
# Hang up the connection so the client doesn't wait forever.
transport.close()
# Kick off the HTTP protocol.
protocol.connection_made(transport)
# Pass any data already in the read buffer.
# The assumption here is that we haven't read any data off the stream reader
# yet: all data that the client might have already sent since the connection has
# been established is in the `_buffer`.
data = reader._buffer # type: ignore
if data:
protocol.data_received(data)
# Let the transport run in the background. When closed, this future will complete
# and we'll exit here.
await connection_lost
| 38.617978
| 89
| 0.686936
|
import asyncio
from typing import TYPE_CHECKING
from uvicorn.config import Config
if TYPE_CHECKING: from uvicorn.server import ServerState
async def handle_http(
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
server_state: "ServerState",
config: Config,
) -> None:
# protocols) while keeping the change footprint (and risk) at a minimum.
# * Keep a "fast track" for asyncio that's as efficient as possible, by reusing
loop = asyncio.get_event_loop()
connection_lost = loop.create_future()
protocol = config.http_protocol_class( config=config,
server_state=server_state,
on_connection_lost=lambda: connection_lost.set_result(True),
)
transport = writer.transport
transport.set_protocol(protocol)
# So we need to attach a callback to handle exceptions ourselves for that case.
# (It's not easy to know which loop we're effectively running on, so we attach the
# callback in all cases. In practice it won't be called on vanilla asyncio.)
task = asyncio.current_task()
assert task is not None
@task.add_done_callback
def retrieve_exception(task: asyncio.Task) -> None:
exc = task.exception()
if exc is None:
return
loop.call_exception_handler(
{
"message": "Fatal error in server handler",
"exception": exc,
"transport": transport,
"protocol": protocol,
}
)
transport.close()
# Kick off the HTTP protocol.
protocol.connection_made(transport)
# Pass any data already in the read buffer.
# The assumption here is that we haven't read any data off the stream reader
data = reader._buffer if data:
protocol.data_received(data)
await connection_lost
| true
| true
|
f70ab8d503bc1a07f2845052f215ca548687a1c6
| 985
|
py
|
Python
|
axelrod/tests/unit/test_appeaser.py
|
lipingzhu/Zero-determinant
|
6e30aa72358d5dfc3975abe433d0d13cc3a750a1
|
[
"MIT"
] | null | null | null |
axelrod/tests/unit/test_appeaser.py
|
lipingzhu/Zero-determinant
|
6e30aa72358d5dfc3975abe433d0d13cc3a750a1
|
[
"MIT"
] | null | null | null |
axelrod/tests/unit/test_appeaser.py
|
lipingzhu/Zero-determinant
|
6e30aa72358d5dfc3975abe433d0d13cc3a750a1
|
[
"MIT"
] | null | null | null |
"""Test for the appeaser strategy."""
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestAppeaser(TestPlayer):
name = "Appeaser"
player = axelrod.Appeaser
expected_classifier = {
'memory_depth': float('inf'), # Depends on internal memory.
'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating."""
self.first_play_test(C)
def test_effect_of_strategy(self):
P1 = axelrod.Appeaser()
P2 = axelrod.Cooperator()
self.assertEqual(P1.strategy(P2), C)
self.responses_test([C], [C], [C, C, C])
self.responses_test([C, D, C, D], [C, C, D], [D])
self.responses_test([C, D, C, D, C], [C, C, D, D], [C])
self.responses_test([C, D, C, D, C, D], [C, C, D, D, D], [D])
| 26.621622
| 69
| 0.587817
|
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestAppeaser(TestPlayer):
name = "Appeaser"
player = axelrod.Appeaser
expected_classifier = {
'memory_depth': float('inf'), 'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
self.first_play_test(C)
def test_effect_of_strategy(self):
P1 = axelrod.Appeaser()
P2 = axelrod.Cooperator()
self.assertEqual(P1.strategy(P2), C)
self.responses_test([C], [C], [C, C, C])
self.responses_test([C, D, C, D], [C, C, D], [D])
self.responses_test([C, D, C, D, C], [C, C, D, D], [C])
self.responses_test([C, D, C, D, C, D], [C, C, D, D, D], [D])
| true
| true
|
f70ab935221e9aefaf57a36d017a5208e50b8892
| 2,327
|
py
|
Python
|
_Figure_S18.py
|
aspuru-guzik-group/routescore
|
3adedbc1d6193751bd1cd0af33395572b35a8e43
|
[
"MIT"
] | 1
|
2021-11-05T00:49:40.000Z
|
2021-11-05T00:49:40.000Z
|
_Figure_S18.py
|
aspuru-guzik-group/routescore
|
3adedbc1d6193751bd1cd0af33395572b35a8e43
|
[
"MIT"
] | null | null | null |
_Figure_S18.py
|
aspuru-guzik-group/routescore
|
3adedbc1d6193751bd1cd0af33395572b35a8e43
|
[
"MIT"
] | 1
|
2021-08-18T02:54:49.000Z
|
2021-08-18T02:54:49.000Z
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Custom style
plt.style.use('scientific')
# absolute tolerances for chimera
absolutes = np.array([0.67, 1080000, 0.2, 0.15848931924611134])
# load in gryffin runs with Naive score as objective
df_naive = pd.read_pickle('Optimization/runs/gryffin_runs_naive.pkl')
# make the plot
fig, axes = plt.subplots(nrows=4, ncols=1, sharex=True, figsize=(8, 10))
sns.lineplot(x='eval', y='peak_score', data=df_naive, ax=axes[0], label='Naive Score Included')
axes[0].axhline(absolutes[0], ls='--', linewidth=2, c='k', alpha=0.6)
axes[0].fill_between(df_naive['eval'], absolutes[0], np.amin(df_naive['peak_score']), color='#8C9196', alpha=0.25)
axes[0].set_ylim(0.25, 0.9)
axes[0].set_ylabel('Peak score ', fontsize=15)
axes[0].tick_params(labelsize=13)
axes[0].legend(loc='lower right', ncol=1, fontsize=15)
sns.lineplot(x='eval', y='naive_score', data=df_naive, ax=axes[1])
axes[1].set_yscale('log')
axes[1].axhline(absolutes[1], ls='--', linewidth=2, c='k', alpha=0.6)
axes[1].fill_between(df_naive['eval'], absolutes[1], np.amax(df_naive['naive_score']), color='#8C9196', alpha=0.25)
axes[1].set_ylim(np.amin(df_naive['naive_score']), np.amax(df_naive['naive_score']))
axes[1].set_ylabel('Naive score \n$( \$ \cdot (mol \ target)^{-1}$)', fontsize=15)
axes[1].tick_params(labelsize=13)
sns.lineplot(x='eval', y='spectral_overlap', data=df_naive, ax=axes[2])
axes[2].axhline(absolutes[2], ls='--', linewidth=2, c='k', alpha=0.6)
axes[2].fill_between(df_naive['eval'], absolutes[2], np.amax(df_naive['spectral_overlap']), color='#8C9196', alpha=0.25)
axes[2].set_ylim(0., 0.3)
axes[2].set_ylabel('Spectral \noverlap', fontsize=15)
axes[2].tick_params(labelsize=13)
sns.lineplot(x='eval', y='fluo_rate', data=df_naive, ax=axes[3])
axes[3].axhline(absolutes[3], ls='--', linewidth=2, c='k', alpha=0.6)
axes[3].fill_between(df_naive['eval'], absolutes[3], np.amin(df_naive['fluo_rate']), color='#8C9196', alpha=0.25)
axes[3].set_ylim(0., 0.6)
axes[3].set_ylabel('Fluorescence \nrate (ns$^{-1}$)', fontsize=15)
axes[3].tick_params(labelsize=13)
axes[3].set_xlabel('Number of evaluations', fontsize=15)
for ax in axes:
ax.set_xlim(0, 500)
plt.tight_layout()
plt.savefig('Figure_S18.png', dpi=300)
plt.show()
| 39.440678
| 120
| 0.706919
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('scientific')
absolutes = np.array([0.67, 1080000, 0.2, 0.15848931924611134])
df_naive = pd.read_pickle('Optimization/runs/gryffin_runs_naive.pkl')
fig, axes = plt.subplots(nrows=4, ncols=1, sharex=True, figsize=(8, 10))
sns.lineplot(x='eval', y='peak_score', data=df_naive, ax=axes[0], label='Naive Score Included')
axes[0].axhline(absolutes[0], ls='--', linewidth=2, c='k', alpha=0.6)
axes[0].fill_between(df_naive['eval'], absolutes[0], np.amin(df_naive['peak_score']), color='#8C9196', alpha=0.25)
axes[0].set_ylim(0.25, 0.9)
axes[0].set_ylabel('Peak score ', fontsize=15)
axes[0].tick_params(labelsize=13)
axes[0].legend(loc='lower right', ncol=1, fontsize=15)
sns.lineplot(x='eval', y='naive_score', data=df_naive, ax=axes[1])
axes[1].set_yscale('log')
axes[1].axhline(absolutes[1], ls='--', linewidth=2, c='k', alpha=0.6)
axes[1].fill_between(df_naive['eval'], absolutes[1], np.amax(df_naive['naive_score']), color='#8C9196', alpha=0.25)
axes[1].set_ylim(np.amin(df_naive['naive_score']), np.amax(df_naive['naive_score']))
axes[1].set_ylabel('Naive score \n$( \$ \cdot (mol \ target)^{-1}$)', fontsize=15)
axes[1].tick_params(labelsize=13)
sns.lineplot(x='eval', y='spectral_overlap', data=df_naive, ax=axes[2])
axes[2].axhline(absolutes[2], ls='--', linewidth=2, c='k', alpha=0.6)
axes[2].fill_between(df_naive['eval'], absolutes[2], np.amax(df_naive['spectral_overlap']), color='#8C9196', alpha=0.25)
axes[2].set_ylim(0., 0.3)
axes[2].set_ylabel('Spectral \noverlap', fontsize=15)
axes[2].tick_params(labelsize=13)
sns.lineplot(x='eval', y='fluo_rate', data=df_naive, ax=axes[3])
axes[3].axhline(absolutes[3], ls='--', linewidth=2, c='k', alpha=0.6)
axes[3].fill_between(df_naive['eval'], absolutes[3], np.amin(df_naive['fluo_rate']), color='#8C9196', alpha=0.25)
axes[3].set_ylim(0., 0.6)
axes[3].set_ylabel('Fluorescence \nrate (ns$^{-1}$)', fontsize=15)
axes[3].tick_params(labelsize=13)
axes[3].set_xlabel('Number of evaluations', fontsize=15)
for ax in axes:
ax.set_xlim(0, 500)
plt.tight_layout()
plt.savefig('Figure_S18.png', dpi=300)
plt.show()
| true
| true
|
f70ab9f4ce715b85813408d26899167161dfba41
| 205
|
py
|
Python
|
save_df.py
|
ersilia-os/bioassay-db
|
095ceb93e31577085c23105929ccf271b0dcd8f3
|
[
"MIT"
] | null | null | null |
save_df.py
|
ersilia-os/bioassay-db
|
095ceb93e31577085c23105929ccf271b0dcd8f3
|
[
"MIT"
] | null | null | null |
save_df.py
|
ersilia-os/bioassay-db
|
095ceb93e31577085c23105929ccf271b0dcd8f3
|
[
"MIT"
] | null | null | null |
from src.json2df import PubChemBioAssayJsonConverter
c = PubChemBioAssayJsonConverter("./examples", "PUBCHEM400.json")
df = c.get_all_results()
c.save_df(df, "./examples")
c.get_description("./examples")
| 29.285714
| 65
| 0.77561
|
from src.json2df import PubChemBioAssayJsonConverter
c = PubChemBioAssayJsonConverter("./examples", "PUBCHEM400.json")
df = c.get_all_results()
c.save_df(df, "./examples")
c.get_description("./examples")
| true
| true
|
f70aba518e557932c07883c2cc83635918beed14
| 5,378
|
py
|
Python
|
sctransfer/network.py
|
jingshuw/sctransfer
|
380c3f26934c26cd177e63aacf4f3bdcf9a29c47
|
[
"MIT"
] | 4
|
2019-10-22T21:21:14.000Z
|
2022-01-05T01:10:37.000Z
|
sctransfer/network.py
|
jingshuw/sctransfer
|
380c3f26934c26cd177e63aacf4f3bdcf9a29c47
|
[
"MIT"
] | 2
|
2020-03-08T03:27:24.000Z
|
2020-03-23T21:43:27.000Z
|
sctransfer/network.py
|
jingshuw/sctransfer
|
380c3f26934c26cd177e63aacf4f3bdcf9a29c47
|
[
"MIT"
] | null | null | null |
## code simplified from the dca package
import os
import numpy as np
import scanpy.api as sc
import keras
from keras.layers import Input, Dense, Dropout, Activation, BatchNormalization
from keras.models import Model
from keras.objectives import mean_squared_error
from keras import backend as K
import tensorflow as tf
from .loss import NB
from .layers import ConstantDispersionLayer, ColWiseMultLayer
MeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
class Autoencoder():
def __init__(self,
input_size,
output_size=None,
hidden_size=(64, 32, 64),
hidden_dropout=0.,
input_dropout=0.,
batchnorm=True,
activation='relu',
init='glorot_uniform',
nonmissing_indicator = None,
debug = False):
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.hidden_dropout = hidden_dropout
self.input_dropout = input_dropout
self.batchnorm = batchnorm
self.activation = activation
self.init = init
self.loss = None
self.extra_models = {}
self.model = None
self.input_layer = None
self.sf_layer = None
self.debug = debug
self.nonmissing_indicator = nonmissing_indicator
if self.output_size is None:
self.output_size = input_size
if isinstance(self.hidden_dropout, list):
assert len(self.hidden_dropout) == len(self.hidden_size)
else:
self.hidden_dropout = [self.hidden_dropout]*len(self.hidden_size)
def build(self):
self.input_layer = Input(shape=(self.input_size,), name='count')
self.sf_layer = Input(shape=(1,), name='size_factors')
last_hidden = self.input_layer
if self.input_dropout > 0.0:
last_hidden = Dropout(self.input_dropout, name='input_dropout')(last_hidden)
for i, (hid_size, hid_drop) in enumerate(zip(self.hidden_size, self.hidden_dropout)):
center_idx = int(np.floor(len(self.hidden_size) / 2.0))
if i == center_idx:
layer_name = 'center'
stage = 'center' # let downstream know where we are
elif i < center_idx:
layer_name = 'enc%s' % i
stage = 'encoder'
else:
layer_name = 'dec%s' % (i-center_idx)
stage = 'decoder'
last_hidden = Dense(hid_size, activation=None, kernel_initializer=self.init,
name=layer_name)(last_hidden)
if self.batchnorm:
last_hidden = BatchNormalization(center=True, scale=False)(last_hidden)
### TODO: check why scale = False
last_hidden = Activation(self.activation, name='%s_act'%layer_name)(last_hidden)
if hid_drop > 0.0:
last_hidden = Dropout(hid_drop, name='%s_drop'%layer_name)(last_hidden)
self.decoder_output = last_hidden
self.build_output()
def build_output(self):
## For Gaussian loss
self.loss = mean_squared_error
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
name='mean')(self.decoder_output)
output = ColWiseMultLayer(name='output')([mean, self.sf_layer])
# keep unscaled output as an extra model
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)
######## ADD WEIGHTS ###########
def load_weights(self, filename):
self.model.load_weights(filename)
def predict(self, adata, colnames=None, dimreduce=True, reconstruct=True, error=True):
res = {}
colnames = adata.var_names.values if colnames is None else colnames
rownames = adata.obs_names.values
# print('Calculating reconstructions...')
res['mean_norm'] = self.extra_models['mean_norm'].predict(adata.X)
return res
class NBConstantDispAutoencoder(Autoencoder):
def build_output(self):
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
name='mean')(self.decoder_output)
# Plug in dispersion parameters via fake dispersion layer
disp = ConstantDispersionLayer(name='dispersion')
mean = disp(mean)
output = ColWiseMultLayer(name='output')([mean, self.sf_layer])
nb = NB(disp.theta_exp, nonmissing_indicator = self.nonmissing_indicator)
self.extra_models['dispersion'] = lambda :K.function([], [nb.theta])([])[0].squeeze()
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)
def predict(self, adata, colnames=None, **kwargs):
colnames = adata.var_names.values if colnames is None else colnames
rownames = adata.obs_names.values
res = super().predict(adata, colnames=colnames, **kwargs)
res['dispersion'] = self.extra_models['dispersion']()
return res
| 34.254777
| 93
| 0.626255
|
import os
import numpy as np
import scanpy.api as sc
import keras
from keras.layers import Input, Dense, Dropout, Activation, BatchNormalization
from keras.models import Model
from keras.objectives import mean_squared_error
from keras import backend as K
import tensorflow as tf
from .loss import NB
from .layers import ConstantDispersionLayer, ColWiseMultLayer
MeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
class Autoencoder():
def __init__(self,
input_size,
output_size=None,
hidden_size=(64, 32, 64),
hidden_dropout=0.,
input_dropout=0.,
batchnorm=True,
activation='relu',
init='glorot_uniform',
nonmissing_indicator = None,
debug = False):
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.hidden_dropout = hidden_dropout
self.input_dropout = input_dropout
self.batchnorm = batchnorm
self.activation = activation
self.init = init
self.loss = None
self.extra_models = {}
self.model = None
self.input_layer = None
self.sf_layer = None
self.debug = debug
self.nonmissing_indicator = nonmissing_indicator
if self.output_size is None:
self.output_size = input_size
if isinstance(self.hidden_dropout, list):
assert len(self.hidden_dropout) == len(self.hidden_size)
else:
self.hidden_dropout = [self.hidden_dropout]*len(self.hidden_size)
def build(self):
self.input_layer = Input(shape=(self.input_size,), name='count')
self.sf_layer = Input(shape=(1,), name='size_factors')
last_hidden = self.input_layer
if self.input_dropout > 0.0:
last_hidden = Dropout(self.input_dropout, name='input_dropout')(last_hidden)
for i, (hid_size, hid_drop) in enumerate(zip(self.hidden_size, self.hidden_dropout)):
center_idx = int(np.floor(len(self.hidden_size) / 2.0))
if i == center_idx:
layer_name = 'center'
stage = 'center' elif i < center_idx:
layer_name = 'enc%s' % i
stage = 'encoder'
else:
layer_name = 'dec%s' % (i-center_idx)
stage = 'decoder'
last_hidden = Dense(hid_size, activation=None, kernel_initializer=self.init,
name=layer_name)(last_hidden)
if self.batchnorm:
last_hidden = BatchNormalization(center=True, scale=False)(last_hidden)
last_hidden = Activation(self.activation, name='%s_act'%layer_name)(last_hidden)
if hid_drop > 0.0:
last_hidden = Dropout(hid_drop, name='%s_drop'%layer_name)(last_hidden)
self.decoder_output = last_hidden
self.build_output()
def build_output(self):
self.loss = mean_squared_error
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
name='mean')(self.decoder_output)
output = ColWiseMultLayer(name='output')([mean, self.sf_layer])
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)
def load_weights(self, filename):
self.model.load_weights(filename)
def predict(self, adata, colnames=None, dimreduce=True, reconstruct=True, error=True):
res = {}
colnames = adata.var_names.values if colnames is None else colnames
rownames = adata.obs_names.values
res['mean_norm'] = self.extra_models['mean_norm'].predict(adata.X)
return res
class NBConstantDispAutoencoder(Autoencoder):
def build_output(self):
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
name='mean')(self.decoder_output)
disp = ConstantDispersionLayer(name='dispersion')
mean = disp(mean)
output = ColWiseMultLayer(name='output')([mean, self.sf_layer])
nb = NB(disp.theta_exp, nonmissing_indicator = self.nonmissing_indicator)
self.extra_models['dispersion'] = lambda :K.function([], [nb.theta])([])[0].squeeze()
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)
def predict(self, adata, colnames=None, **kwargs):
colnames = adata.var_names.values if colnames is None else colnames
rownames = adata.obs_names.values
res = super().predict(adata, colnames=colnames, **kwargs)
res['dispersion'] = self.extra_models['dispersion']()
return res
| true
| true
|
f70aba529a4df60d1cdd1a8cfd66159d60f34dc8
| 4,649
|
py
|
Python
|
seamm_dashboard/routes/admin/forms.py
|
paulsaxe/seamm_dashboard
|
66049c8c58fd34af3bd143157d0138e8fb737f9b
|
[
"BSD-3-Clause"
] | 5
|
2020-04-17T16:34:13.000Z
|
2021-12-09T17:24:01.000Z
|
seamm_dashboard/routes/admin/forms.py
|
paulsaxe/seamm_dashboard
|
66049c8c58fd34af3bd143157d0138e8fb737f9b
|
[
"BSD-3-Clause"
] | 55
|
2020-02-26T20:47:52.000Z
|
2022-03-12T14:22:10.000Z
|
seamm_dashboard/routes/admin/forms.py
|
paulsaxe/seamm_dashboard
|
66049c8c58fd34af3bd143157d0138e8fb737f9b
|
[
"BSD-3-Clause"
] | 4
|
2019-10-15T18:34:14.000Z
|
2022-01-04T20:50:43.000Z
|
from flask_wtf import FlaskForm
from wtforms import (
StringField,
PasswordField,
SubmitField,
SelectMultipleField,
BooleanField,
)
try:
from wtforms.fields import EmailField
except ImportError:
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from seamm_datastore.database.models import User, Group
def _validate_group(self, field):
if Group.query.filter(Group.name == field.data).first():
raise ValidationError(
f"Group name '{field.data}' already in use. Please pick a different group "
"name."
)
def _validate_user_delete(self, field):
raise ValidationError("Input username does not match user ID.")
def _validate_group_delete(self, field):
raise ValidationError("Input group name does not match group ID.")
def _validate_username(self, field):
if User.query.filter(User.username == field.data).first():
raise ValidationError(
f"Username {field.data} already in use. Please pick a different username"
)
def _validate_email(self, field):
if User.query.filter(User.email == field.data).first():
raise ValidationError(
f"Email address {field.data} already in use. Please pick a different email "
"address."
)
def _password_none_or_usual(self, field):
"""
This validator is for the manage user form. Either the password is not changed
(len 0), or the password is changed and should meet the usual length requirement.
"""
if 0 < len(field.data) < 7:
raise ValidationError("Passwords must be at least 7 characters in length.")
# Common username field
_username = StringField(
"Username",
validators=[
_validate_username,
DataRequired(),
Length(3, 64),
Regexp(
"^[A-Za-z][A-Za-z0-9_.]*$",
0,
"Usernames must have only letters, numbers, dots or " "underscores",
),
],
)
class CreateUsernamePasswordForm(FlaskForm):
"""
A subform for creating a new username and password.
"""
username = _username
password2 = PasswordField("Confirm password", validators=[DataRequired()])
password = PasswordField(
"Password",
validators=[
DataRequired(),
Length(min=7),
EqualTo("password2", message="Passwords must match."),
],
)
class EditUsernamePasswordForm(FlaskForm):
"""
A subform for editing username and password.
"""
username = _username
password = PasswordField(
"Password",
validators=[
_password_none_or_usual,
EqualTo("password2", message="Passwords must match."),
],
)
password2 = PasswordField("Confirm Password")
class ContactInformationForm(FlaskForm):
"""
A form for adding or updating contact information.
"""
first_name = StringField("First Name", validators=[Length(2, 64)])
last_name = StringField("Last Name", validators=[Length(2, 64)])
email = EmailField(
"Email Address",
validators=[
DataRequired(),
Email(),
_validate_email,
],
)
class CreateUserForm(CreateUsernamePasswordForm, ContactInformationForm):
"""
Form for adding or updating a user
"""
roles = SelectMultipleField("User Roles", choices=[])
groups = SelectMultipleField("User Groups", choices=[])
submit = SubmitField("Create New User")
class ManageUserFormAdmin(EditUsernamePasswordForm, ContactInformationForm):
"""
Form for adding or updating a user
"""
roles = SelectMultipleField("User Roles", choices=[])
groups = SelectMultipleField("User Groups", choices=[])
submit = SubmitField("Update User Information")
class EditGroupForm(FlaskForm):
"""
Form for adding or editing a group
"""
group_name = StringField(
"Group Name", validators=[Length(2, 64), DataRequired(), _validate_group]
)
group_members = SelectMultipleField("Group Members", choices=[])
submit = SubmitField("Submit")
class DeleteUserForm(FlaskForm):
"""
Form for deleting a user.
"""
username = _username
confirm = BooleanField("Confirm")
submit = SubmitField("Delete User")
class DeleteGroupForm(FlaskForm):
"""
Form for deleting a user.
"""
group_name = StringField("Group Name", validators=[Length(2, 64), DataRequired()])
confirm = BooleanField("Confirm")
submit = SubmitField("Delete Group")
| 24.860963
| 88
| 0.649817
|
from flask_wtf import FlaskForm
from wtforms import (
StringField,
PasswordField,
SubmitField,
SelectMultipleField,
BooleanField,
)
try:
from wtforms.fields import EmailField
except ImportError:
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from seamm_datastore.database.models import User, Group
def _validate_group(self, field):
if Group.query.filter(Group.name == field.data).first():
raise ValidationError(
f"Group name '{field.data}' already in use. Please pick a different group "
"name."
)
def _validate_user_delete(self, field):
raise ValidationError("Input username does not match user ID.")
def _validate_group_delete(self, field):
raise ValidationError("Input group name does not match group ID.")
def _validate_username(self, field):
if User.query.filter(User.username == field.data).first():
raise ValidationError(
f"Username {field.data} already in use. Please pick a different username"
)
def _validate_email(self, field):
if User.query.filter(User.email == field.data).first():
raise ValidationError(
f"Email address {field.data} already in use. Please pick a different email "
"address."
)
def _password_none_or_usual(self, field):
if 0 < len(field.data) < 7:
raise ValidationError("Passwords must be at least 7 characters in length.")
_username = StringField(
"Username",
validators=[
_validate_username,
DataRequired(),
Length(3, 64),
Regexp(
"^[A-Za-z][A-Za-z0-9_.]*$",
0,
"Usernames must have only letters, numbers, dots or " "underscores",
),
],
)
class CreateUsernamePasswordForm(FlaskForm):
username = _username
password2 = PasswordField("Confirm password", validators=[DataRequired()])
password = PasswordField(
"Password",
validators=[
DataRequired(),
Length(min=7),
EqualTo("password2", message="Passwords must match."),
],
)
class EditUsernamePasswordForm(FlaskForm):
username = _username
password = PasswordField(
"Password",
validators=[
_password_none_or_usual,
EqualTo("password2", message="Passwords must match."),
],
)
password2 = PasswordField("Confirm Password")
class ContactInformationForm(FlaskForm):
first_name = StringField("First Name", validators=[Length(2, 64)])
last_name = StringField("Last Name", validators=[Length(2, 64)])
email = EmailField(
"Email Address",
validators=[
DataRequired(),
Email(),
_validate_email,
],
)
class CreateUserForm(CreateUsernamePasswordForm, ContactInformationForm):
roles = SelectMultipleField("User Roles", choices=[])
groups = SelectMultipleField("User Groups", choices=[])
submit = SubmitField("Create New User")
class ManageUserFormAdmin(EditUsernamePasswordForm, ContactInformationForm):
roles = SelectMultipleField("User Roles", choices=[])
groups = SelectMultipleField("User Groups", choices=[])
submit = SubmitField("Update User Information")
class EditGroupForm(FlaskForm):
group_name = StringField(
"Group Name", validators=[Length(2, 64), DataRequired(), _validate_group]
)
group_members = SelectMultipleField("Group Members", choices=[])
submit = SubmitField("Submit")
class DeleteUserForm(FlaskForm):
username = _username
confirm = BooleanField("Confirm")
submit = SubmitField("Delete User")
class DeleteGroupForm(FlaskForm):
group_name = StringField("Group Name", validators=[Length(2, 64), DataRequired()])
confirm = BooleanField("Confirm")
submit = SubmitField("Delete Group")
| true
| true
|
f70abbfb7844e4c8dd5a47e9d91ebc2a9a7fe405
| 3,910
|
py
|
Python
|
aiohue/lights.py
|
spasche/aiohue
|
65798ed56f6f123a24a961ac87f604d79a221540
|
[
"Apache-2.0"
] | 27
|
2020-04-15T18:08:49.000Z
|
2022-03-30T10:12:05.000Z
|
aiohue/lights.py
|
spasche/aiohue
|
65798ed56f6f123a24a961ac87f604d79a221540
|
[
"Apache-2.0"
] | 38
|
2020-06-29T20:32:47.000Z
|
2022-03-24T16:23:17.000Z
|
aiohue/lights.py
|
spasche/aiohue
|
65798ed56f6f123a24a961ac87f604d79a221540
|
[
"Apache-2.0"
] | 10
|
2020-05-26T07:34:09.000Z
|
2022-03-29T10:59:39.000Z
|
from collections import namedtuple
from .api import APIItems
# Represents a CIE 1931 XY coordinate pair.
XYPoint = namedtuple("XYPoint", ["x", "y"])
# Represents the Gamut of a light.
GamutType = namedtuple("GamutType", ["red", "green", "blue"])
class Lights(APIItems):
"""Represents Hue Lights.
https://developers.meethue.com/documentation/lights-api
"""
def __init__(self, logger, raw, v2_resources, request):
super().__init__(logger, raw, v2_resources, request, "lights", Light)
class Light:
"""Represents a Hue light."""
ITEM_TYPE = "lights"
def __init__(self, id, raw, v2_resources, request):
self.id = id
self.raw = raw
self._request = request
@property
def uniqueid(self):
return self.raw["uniqueid"]
@property
def manufacturername(self):
return self.raw["manufacturername"]
@property
def modelid(self):
return self.raw["modelid"]
@property
def productname(self):
# productname added in Bridge API 1.24 (published 03/05/2018)
return self.raw.get("productname")
@property
def name(self):
return self.raw["name"]
@property
def state(self):
return self.raw["state"]
@property
def type(self):
return self.raw["type"]
@property
def swversion(self):
"""Software version of the light."""
return self.raw["swversion"]
@property
def swupdatestate(self):
"""Software update state of the light."""
return self.raw.get("swupdate", {}).get("state")
@property
def controlcapabilities(self):
"""Capabilities that the light has to control it."""
return self.raw.get("capabilities", {}).get("control", {})
@property
def colorgamuttype(self):
"""The color gamut type of the light."""
light_spec = self.controlcapabilities
return light_spec.get("colorgamuttype", "None")
@property
def colorgamut(self):
"""The color gamut information of the light."""
try:
light_spec = self.controlcapabilities
gtup = tuple([XYPoint(*x) for x in light_spec["colorgamut"]])
color_gamut = GamutType(*gtup)
except KeyError:
color_gamut = None
return color_gamut
def process_update_event(self, update):
state = dict(self.state)
if color := update.get("color"):
state["xy"] = [color["xy"]["x"], color["xy"]["y"]]
if ct := update.get("color_temperature"):
state["ct"] = ct["mirek"]
if "on" in update:
state["on"] = update["on"]["on"]
if dimming := update.get("dimming"):
state["bri"] = int(dimming["brightness"] / 100 * 254)
state["reachable"] = True
self.raw = {**self.raw, "state": state}
async def set_state(
self,
on=None,
bri=None,
hue=None,
sat=None,
xy=None,
ct=None,
alert=None,
effect=None,
transitiontime=None,
bri_inc=None,
sat_inc=None,
hue_inc=None,
ct_inc=None,
xy_inc=None,
):
"""Change state of a light."""
data = {
key: value
for key, value in {
"on": on,
"bri": bri,
"hue": hue,
"sat": sat,
"xy": xy,
"ct": ct,
"alert": alert,
"effect": effect,
"transitiontime": transitiontime,
"bri_inc": bri_inc,
"sat_inc": sat_inc,
"hue_inc": hue_inc,
"ct_inc": ct_inc,
"xy_inc": xy_inc,
}.items()
if value is not None
}
await self._request("put", "lights/{}/state".format(self.id), json=data)
| 25.38961
| 80
| 0.541944
|
from collections import namedtuple
from .api import APIItems
XYPoint = namedtuple("XYPoint", ["x", "y"])
GamutType = namedtuple("GamutType", ["red", "green", "blue"])
class Lights(APIItems):
def __init__(self, logger, raw, v2_resources, request):
super().__init__(logger, raw, v2_resources, request, "lights", Light)
class Light:
ITEM_TYPE = "lights"
def __init__(self, id, raw, v2_resources, request):
self.id = id
self.raw = raw
self._request = request
@property
def uniqueid(self):
return self.raw["uniqueid"]
@property
def manufacturername(self):
return self.raw["manufacturername"]
@property
def modelid(self):
return self.raw["modelid"]
@property
def productname(self):
return self.raw.get("productname")
@property
def name(self):
return self.raw["name"]
@property
def state(self):
return self.raw["state"]
@property
def type(self):
return self.raw["type"]
@property
def swversion(self):
return self.raw["swversion"]
@property
def swupdatestate(self):
return self.raw.get("swupdate", {}).get("state")
@property
def controlcapabilities(self):
return self.raw.get("capabilities", {}).get("control", {})
@property
def colorgamuttype(self):
light_spec = self.controlcapabilities
return light_spec.get("colorgamuttype", "None")
@property
def colorgamut(self):
try:
light_spec = self.controlcapabilities
gtup = tuple([XYPoint(*x) for x in light_spec["colorgamut"]])
color_gamut = GamutType(*gtup)
except KeyError:
color_gamut = None
return color_gamut
def process_update_event(self, update):
state = dict(self.state)
if color := update.get("color"):
state["xy"] = [color["xy"]["x"], color["xy"]["y"]]
if ct := update.get("color_temperature"):
state["ct"] = ct["mirek"]
if "on" in update:
state["on"] = update["on"]["on"]
if dimming := update.get("dimming"):
state["bri"] = int(dimming["brightness"] / 100 * 254)
state["reachable"] = True
self.raw = {**self.raw, "state": state}
async def set_state(
self,
on=None,
bri=None,
hue=None,
sat=None,
xy=None,
ct=None,
alert=None,
effect=None,
transitiontime=None,
bri_inc=None,
sat_inc=None,
hue_inc=None,
ct_inc=None,
xy_inc=None,
):
data = {
key: value
for key, value in {
"on": on,
"bri": bri,
"hue": hue,
"sat": sat,
"xy": xy,
"ct": ct,
"alert": alert,
"effect": effect,
"transitiontime": transitiontime,
"bri_inc": bri_inc,
"sat_inc": sat_inc,
"hue_inc": hue_inc,
"ct_inc": ct_inc,
"xy_inc": xy_inc,
}.items()
if value is not None
}
await self._request("put", "lights/{}/state".format(self.id), json=data)
| true
| true
|
f70abd05078edcca034b41322d960ead8ee31528
| 44
|
py
|
Python
|
python-tkinter-card-game/python-tkinter-card-game/main (i.e. start here).py
|
lull-the-unknown/python-tkinter-card-game
|
bc7a1e62e8d6e29017af505dcab4dda2bd73be52
|
[
"Unlicense"
] | 2
|
2019-10-13T23:36:06.000Z
|
2020-04-08T12:40:30.000Z
|
python-tkinter-card-game/python-tkinter-card-game/main (i.e. start here).py
|
lull-the-unknown/python-tkinter-card-game
|
bc7a1e62e8d6e29017af505dcab4dda2bd73be52
|
[
"Unlicense"
] | null | null | null |
python-tkinter-card-game/python-tkinter-card-game/main (i.e. start here).py
|
lull-the-unknown/python-tkinter-card-game
|
bc7a1e62e8d6e29017af505dcab4dda2bd73be52
|
[
"Unlicense"
] | 2
|
2020-04-10T13:05:53.000Z
|
2020-07-01T08:15:41.000Z
|
import app
gameApp = app.app()
gameApp.Run()
| 14.666667
| 19
| 0.727273
|
import app
gameApp = app.app()
gameApp.Run()
| true
| true
|
f70abd71a15afc4c6c020151c56ad1c6df1a0f50
| 14,347
|
py
|
Python
|
lux/vis/Vis.py
|
thyneb19/lux
|
07a282d6a5f60c05942d866fa6f33636c3428abc
|
[
"Apache-2.0"
] | null | null | null |
lux/vis/Vis.py
|
thyneb19/lux
|
07a282d6a5f60c05942d866fa6f33636c3428abc
|
[
"Apache-2.0"
] | null | null | null |
lux/vis/Vis.py
|
thyneb19/lux
|
07a282d6a5f60c05942d866fa6f33636c3428abc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Callable, Union
from lux.vis.Clause import Clause
from lux.utils.utils import check_import_lux_widget
import lux
import warnings
class Vis:
"""
Vis Object represents a collection of fully fleshed out specifications required for data fetching and visualization.
"""
def __init__(self, intent, source=None, title="", score=0.0):
self._intent = intent # user's original intent to Vis
self._inferred_intent = intent # re-written, expanded version of user's original intent
self._source = source # original data attached to the Vis
self._vis_data = None # processed data for Vis (e.g., selected, aggregated, binned)
self._code = None
self._mark = ""
self._min_max = {}
self._postbin = None
self.title = title
self.score = score
self.refresh_source(self._source)
def __repr__(self):
all_clause = all([isinstance(unit, lux.Clause) for unit in self._inferred_intent])
if all_clause:
filter_intents = None
channels, additional_channels = [], []
for clause in self._inferred_intent:
if hasattr(clause, "value"):
if clause.value != "":
filter_intents = clause
if hasattr(clause, "attribute"):
if clause.attribute != "":
if clause.aggregation != "" and clause.aggregation is not None:
attribute = f"{clause._aggregation_name.upper()}({clause.attribute})"
elif clause.bin_size > 0:
attribute = f"BIN({clause.attribute})"
else:
attribute = clause.attribute
if clause.channel == "x":
channels.insert(0, [clause.channel, attribute])
elif clause.channel == "y":
channels.insert(1, [clause.channel, attribute])
elif clause.channel != "":
additional_channels.append([clause.channel, attribute])
channels.extend(additional_channels)
str_channels = ""
for channel in channels:
str_channels += f"{channel[0]}: {channel[1]}, "
if filter_intents:
return f"<Vis ({str_channels[:-2]} -- [{filter_intents.attribute}{filter_intents.filter_op}{filter_intents.value}]) mark: {self._mark}, score: {self.score} >"
else:
return f"<Vis ({str_channels[:-2]}) mark: {self._mark}, score: {self.score} >"
else:
# When Vis not compiled (e.g., when self._source not populated), print original intent
return f"<Vis ({str(self._intent)}) mark: {self._mark}, score: {self.score} >"
@property
def data(self):
return self._vis_data
@property
def code(self):
return self._code
@property
def mark(self):
return self._mark
@property
def min_max(self):
return self._min_max
@property
def intent(self):
return self._intent
@intent.setter
def intent(self, intent: List[Clause]) -> None:
self.set_intent(intent)
def set_intent(self, intent: List[Clause]) -> None:
"""
Sets the intent of the Vis and refresh the source based on the new intent
Parameters
----------
intent : List[Clause]
Query specifying the desired VisList
"""
self._intent = intent
self.refresh_source(self._source)
def _repr_html_(self):
from IPython.display import display
check_import_lux_widget()
import luxwidget
if self.data is None:
raise Exception(
"No data is populated in Vis. In order to generate data required for the vis, use the 'refresh_source' function to populate the Vis with a data source (e.g., vis.refresh_source(df))."
)
else:
from lux.core.frame import LuxDataFrame
widget = luxwidget.LuxWidget(
currentVis=LuxDataFrame.current_vis_to_JSON([self]),
recommendations=[],
intent="",
message="",
)
display(widget)
def get_attr_by_attr_name(self, attr_name):
return list(filter(lambda x: x.attribute == attr_name, self._inferred_intent))
def get_attr_by_channel(self, channel):
spec_obj = list(
filter(
lambda x: x.channel == channel and x.value == "" if hasattr(x, "channel") else False,
self._inferred_intent,
)
)
return spec_obj
def get_attr_by_data_model(self, dmodel, exclude_record=False):
if exclude_record:
return list(
filter(
lambda x: x.data_model == dmodel and x.value == ""
if x.attribute != "Record" and hasattr(x, "data_model")
else False,
self._inferred_intent,
)
)
else:
return list(
filter(
lambda x: x.data_model == dmodel and x.value == ""
if hasattr(x, "data_model")
else False,
self._inferred_intent,
)
)
def get_attr_by_data_type(self, dtype):
return list(
filter(
lambda x: x.data_type == dtype and x.value == "" if hasattr(x, "data_type") else False,
self._inferred_intent,
)
)
def remove_filter_from_spec(self, value):
new_intent = list(filter(lambda x: x.value != value, self._inferred_intent))
self.set_intent(new_intent)
def remove_column_from_spec(self, attribute, remove_first: bool = False):
"""
Removes an attribute from the Vis's clause
Parameters
----------
attribute : str
attribute to be removed
remove_first : bool, optional
Boolean flag to determine whether to remove all instances of the attribute or only one (first) instance, by default False
"""
if not remove_first:
new_inferred = list(filter(lambda x: x.attribute != attribute, self._inferred_intent))
self._inferred_intent = new_inferred
self._intent = new_inferred
elif remove_first:
new_inferred = []
skip_check = False
for i in range(0, len(self._inferred_intent)):
if self._inferred_intent[i].value == "": # clause is type attribute
column_spec = []
column_names = self._inferred_intent[i].attribute
# if only one variable in a column, columnName results in a string and not a list so
# you need to differentiate the cases
if isinstance(column_names, list):
for column in column_names:
if (column != attribute) or skip_check:
column_spec.append(column)
elif remove_first:
remove_first = True
new_inferred.append(Clause(column_spec))
else:
if column_names != attribute or skip_check:
new_inferred.append(Clause(attribute=column_names))
elif remove_first:
skip_check = True
else:
new_inferred.append(self._inferred_intent[i])
self._intent = new_inferred
self._inferred_intent = new_inferred
def to_Altair(self, standalone=False) -> str:
"""
Generate minimal Altair code to visualize the Vis
Parameters
----------
standalone : bool, optional
Flag to determine if outputted code uses user-defined variable names or can be run independently, by default False
Returns
-------
str
String version of the Altair code. Need to print out the string to apply formatting.
"""
from lux.vislib.altair.AltairRenderer import AltairRenderer
renderer = AltairRenderer(output_type="Altair")
self._code = renderer.create_vis(self, standalone)
return self._code
def to_matplotlib(self) -> str:
"""
Generate minimal Matplotlib code to visualize the Vis
Returns
-------
str
String version of the Matplotlib code. Need to print out the string to apply formatting.
"""
from lux.vislib.matplotlib.MatplotlibRenderer import MatplotlibRenderer
renderer = MatplotlibRenderer(output_type="matplotlib")
self._code = renderer.create_vis(self)
return self._code
def to_matplotlib_code(self) -> str:
"""
Generate minimal Matplotlib code to visualize the Vis
Returns
-------
str
String version of the Matplotlib code. Need to print out the string to apply formatting.
"""
from lux.vislib.matplotlib.MatplotlibRenderer import MatplotlibRenderer
renderer = MatplotlibRenderer(output_type="matplotlib_code")
self._code = renderer.create_vis(self)
return self._code
def to_VegaLite(self, prettyOutput=True) -> Union[dict, str]:
"""
Generate minimal Vega-Lite code to visualize the Vis
Returns
-------
Union[dict,str]
String or Dictionary of the VegaLite JSON specification
"""
import json
from lux.vislib.altair.AltairRenderer import AltairRenderer
renderer = AltairRenderer(output_type="VegaLite")
self._code = renderer.create_vis(self)
if prettyOutput:
return (
"** Remove this comment -- Copy Text Below to Vega Editor(vega.github.io/editor) to visualize and edit **\n"
+ json.dumps(self._code, indent=2)
)
else:
return self._code
def to_code(self, language="vegalite", **kwargs):
"""
Export Vis object to code specification
Parameters
----------
language : str, optional
choice of target language to produce the visualization code in, by default "vegalite"
Returns
-------
spec:
visualization specification corresponding to the Vis object
"""
if language == "vegalite":
return self.to_VegaLite(**kwargs)
elif language == "altair":
return self.to_Altair(**kwargs)
elif language == "matplotlib":
return self.to_matplotlib()
elif language == "matplotlib_code":
return self.to_matplotlib_code()
else:
warnings.warn(
"Unsupported plotting backend. Lux currently only support 'altair', 'vegalite', or 'matplotlib'",
stacklevel=2,
)
def refresh_source(self, ldf): # -> Vis:
"""
Loading the source data into the Vis by instantiating the specification and
populating the Vis based on the source data, effectively "materializing" the Vis.
Parameters
----------
ldf : LuxDataframe
Input Dataframe to be attached to the Vis
Returns
-------
Vis
Complete Vis with fully-specified fields
See Also
--------
lux.Vis.VisList.refresh_source
Note
----
Function derives a new _inferred_intent by instantiating the intent specification on the new data
"""
if ldf is not None:
from lux.processor.Parser import Parser
from lux.processor.Validator import Validator
from lux.processor.Compiler import Compiler
self.check_not_vislist_intent()
ldf.maintain_metadata()
self._source = ldf
self._inferred_intent = Parser.parse(self._intent)
Validator.validate_intent(self._inferred_intent, ldf)
vlist = [Compiler.compile_vis(ldf, self)]
lux.config.executor.execute(vlist, ldf)
# Copying properties over since we can not redefine `self` within class function
if len(vlist) > 0:
vis = vlist[0]
self.title = vis.title
self._mark = vis._mark
self._inferred_intent = vis._inferred_intent
self._vis_data = vis.data
self._min_max = vis._min_max
self._postbin = vis._postbin
Compiler.compile_vis(ldf, self)
lux.config.executor.execute([self], ldf)
def check_not_vislist_intent(self):
syntaxMsg = (
"The intent that you specified corresponds to more than one visualization. "
"Please replace the Vis constructor with VisList to generate a list of visualizations. "
"For more information, see: https://lux-api.readthedocs.io/en/latest/source/guide/vis.html#working-with-collections-of-visualization-with-vislist"
)
for i in range(len(self._intent)):
clause = self._intent[i]
if isinstance(clause, str):
if "|" in clause or "?" in clause:
raise TypeError(syntaxMsg)
if isinstance(clause, list):
raise TypeError(syntaxMsg)
| 37.45953
| 199
| 0.572454
|
from typing import List, Callable, Union
from lux.vis.Clause import Clause
from lux.utils.utils import check_import_lux_widget
import lux
import warnings
class Vis:
def __init__(self, intent, source=None, title="", score=0.0):
self._intent = intent self._inferred_intent = intent # re-written, expanded version of user's original intent
self._source = source self._vis_data = None self._code = None
self._mark = ""
self._min_max = {}
self._postbin = None
self.title = title
self.score = score
self.refresh_source(self._source)
def __repr__(self):
all_clause = all([isinstance(unit, lux.Clause) for unit in self._inferred_intent])
if all_clause:
filter_intents = None
channels, additional_channels = [], []
for clause in self._inferred_intent:
if hasattr(clause, "value"):
if clause.value != "":
filter_intents = clause
if hasattr(clause, "attribute"):
if clause.attribute != "":
if clause.aggregation != "" and clause.aggregation is not None:
attribute = f"{clause._aggregation_name.upper()}({clause.attribute})"
elif clause.bin_size > 0:
attribute = f"BIN({clause.attribute})"
else:
attribute = clause.attribute
if clause.channel == "x":
channels.insert(0, [clause.channel, attribute])
elif clause.channel == "y":
channels.insert(1, [clause.channel, attribute])
elif clause.channel != "":
additional_channels.append([clause.channel, attribute])
channels.extend(additional_channels)
str_channels = ""
for channel in channels:
str_channels += f"{channel[0]}: {channel[1]}, "
if filter_intents:
return f"<Vis ({str_channels[:-2]} -- [{filter_intents.attribute}{filter_intents.filter_op}{filter_intents.value}]) mark: {self._mark}, score: {self.score} >"
else:
return f"<Vis ({str_channels[:-2]}) mark: {self._mark}, score: {self.score} >"
else:
return f"<Vis ({str(self._intent)}) mark: {self._mark}, score: {self.score} >"
@property
def data(self):
return self._vis_data
@property
def code(self):
return self._code
@property
def mark(self):
return self._mark
@property
def min_max(self):
return self._min_max
@property
def intent(self):
return self._intent
@intent.setter
def intent(self, intent: List[Clause]) -> None:
self.set_intent(intent)
def set_intent(self, intent: List[Clause]) -> None:
self._intent = intent
self.refresh_source(self._source)
def _repr_html_(self):
from IPython.display import display
check_import_lux_widget()
import luxwidget
if self.data is None:
raise Exception(
"No data is populated in Vis. In order to generate data required for the vis, use the 'refresh_source' function to populate the Vis with a data source (e.g., vis.refresh_source(df))."
)
else:
from lux.core.frame import LuxDataFrame
widget = luxwidget.LuxWidget(
currentVis=LuxDataFrame.current_vis_to_JSON([self]),
recommendations=[],
intent="",
message="",
)
display(widget)
def get_attr_by_attr_name(self, attr_name):
return list(filter(lambda x: x.attribute == attr_name, self._inferred_intent))
def get_attr_by_channel(self, channel):
spec_obj = list(
filter(
lambda x: x.channel == channel and x.value == "" if hasattr(x, "channel") else False,
self._inferred_intent,
)
)
return spec_obj
def get_attr_by_data_model(self, dmodel, exclude_record=False):
if exclude_record:
return list(
filter(
lambda x: x.data_model == dmodel and x.value == ""
if x.attribute != "Record" and hasattr(x, "data_model")
else False,
self._inferred_intent,
)
)
else:
return list(
filter(
lambda x: x.data_model == dmodel and x.value == ""
if hasattr(x, "data_model")
else False,
self._inferred_intent,
)
)
def get_attr_by_data_type(self, dtype):
return list(
filter(
lambda x: x.data_type == dtype and x.value == "" if hasattr(x, "data_type") else False,
self._inferred_intent,
)
)
def remove_filter_from_spec(self, value):
new_intent = list(filter(lambda x: x.value != value, self._inferred_intent))
self.set_intent(new_intent)
def remove_column_from_spec(self, attribute, remove_first: bool = False):
if not remove_first:
new_inferred = list(filter(lambda x: x.attribute != attribute, self._inferred_intent))
self._inferred_intent = new_inferred
self._intent = new_inferred
elif remove_first:
new_inferred = []
skip_check = False
for i in range(0, len(self._inferred_intent)):
if self._inferred_intent[i].value == "": column_spec = []
column_names = self._inferred_intent[i].attribute
if isinstance(column_names, list):
for column in column_names:
if (column != attribute) or skip_check:
column_spec.append(column)
elif remove_first:
remove_first = True
new_inferred.append(Clause(column_spec))
else:
if column_names != attribute or skip_check:
new_inferred.append(Clause(attribute=column_names))
elif remove_first:
skip_check = True
else:
new_inferred.append(self._inferred_intent[i])
self._intent = new_inferred
self._inferred_intent = new_inferred
def to_Altair(self, standalone=False) -> str:
from lux.vislib.altair.AltairRenderer import AltairRenderer
renderer = AltairRenderer(output_type="Altair")
self._code = renderer.create_vis(self, standalone)
return self._code
def to_matplotlib(self) -> str:
from lux.vislib.matplotlib.MatplotlibRenderer import MatplotlibRenderer
renderer = MatplotlibRenderer(output_type="matplotlib")
self._code = renderer.create_vis(self)
return self._code
def to_matplotlib_code(self) -> str:
from lux.vislib.matplotlib.MatplotlibRenderer import MatplotlibRenderer
renderer = MatplotlibRenderer(output_type="matplotlib_code")
self._code = renderer.create_vis(self)
return self._code
def to_VegaLite(self, prettyOutput=True) -> Union[dict, str]:
import json
from lux.vislib.altair.AltairRenderer import AltairRenderer
renderer = AltairRenderer(output_type="VegaLite")
self._code = renderer.create_vis(self)
if prettyOutput:
return (
"** Remove this comment -- Copy Text Below to Vega Editor(vega.github.io/editor) to visualize and edit **\n"
+ json.dumps(self._code, indent=2)
)
else:
return self._code
def to_code(self, language="vegalite", **kwargs):
if language == "vegalite":
return self.to_VegaLite(**kwargs)
elif language == "altair":
return self.to_Altair(**kwargs)
elif language == "matplotlib":
return self.to_matplotlib()
elif language == "matplotlib_code":
return self.to_matplotlib_code()
else:
warnings.warn(
"Unsupported plotting backend. Lux currently only support 'altair', 'vegalite', or 'matplotlib'",
stacklevel=2,
)
def refresh_source(self, ldf): if ldf is not None:
from lux.processor.Parser import Parser
from lux.processor.Validator import Validator
from lux.processor.Compiler import Compiler
self.check_not_vislist_intent()
ldf.maintain_metadata()
self._source = ldf
self._inferred_intent = Parser.parse(self._intent)
Validator.validate_intent(self._inferred_intent, ldf)
vlist = [Compiler.compile_vis(ldf, self)]
lux.config.executor.execute(vlist, ldf)
if len(vlist) > 0:
vis = vlist[0]
self.title = vis.title
self._mark = vis._mark
self._inferred_intent = vis._inferred_intent
self._vis_data = vis.data
self._min_max = vis._min_max
self._postbin = vis._postbin
Compiler.compile_vis(ldf, self)
lux.config.executor.execute([self], ldf)
def check_not_vislist_intent(self):
syntaxMsg = (
"The intent that you specified corresponds to more than one visualization. "
"Please replace the Vis constructor with VisList to generate a list of visualizations. "
"For more information, see: https://lux-api.readthedocs.io/en/latest/source/guide/vis.html#working-with-collections-of-visualization-with-vislist"
)
for i in range(len(self._intent)):
clause = self._intent[i]
if isinstance(clause, str):
if "|" in clause or "?" in clause:
raise TypeError(syntaxMsg)
if isinstance(clause, list):
raise TypeError(syntaxMsg)
| true
| true
|
f70abe164281395130c469a9d83bf0c6bc202f8f
| 1,855
|
py
|
Python
|
stubs/m5stack_flowui-v1_4_0-beta/flowlib/m5mqtt.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/m5stack_flowui-v1_4_0-beta/flowlib/m5mqtt.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/m5stack_flowui-v1_4_0-beta/flowlib/m5mqtt.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
"""
Module: 'flowlib.m5mqtt' on M5 FlowUI v1.4.0-beta
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32')
# Stubber: 1.3.1 - updated
from typing import Any
class M5mqtt:
""""""
def _daemonTask(self, *argv) -> Any:
pass
def _msg_deal(self, *argv) -> Any:
pass
def _on_data(self, *argv) -> Any:
pass
def on_connect(self, *argv) -> Any:
pass
def publish(self, *argv) -> Any:
pass
def start(self, *argv) -> Any:
pass
def subscribe(self, *argv) -> Any:
pass
def unsubscribe(self, *argv) -> Any:
pass
class MQTTClient:
""""""
def _clean_sock_buffer(self, *argv) -> Any:
pass
def _recv_len(self, *argv) -> Any:
pass
def _send_str(self, *argv) -> Any:
pass
def check_msg(self, *argv) -> Any:
pass
def connect(self, *argv) -> Any:
pass
def disconnect(self, *argv) -> Any:
pass
def lock_msg_rec(self, *argv) -> Any:
pass
def ping(self, *argv) -> Any:
pass
def publish(self, *argv) -> Any:
pass
def set_block(self, *argv) -> Any:
pass
def set_callback(self, *argv) -> Any:
pass
def set_last_will(self, *argv) -> Any:
pass
def socket_connect(self, *argv) -> Any:
pass
def subscribe(self, *argv) -> Any:
pass
def topic_get(self, *argv) -> Any:
pass
def topic_msg_get(self, *argv) -> Any:
pass
def unlock_msg_rec(self, *argv) -> Any:
pass
def wait_msg(self, *argv) -> Any:
pass
_thread = None
def autoConnect():
pass
lcd = None
m5base = None
machine = None
def reconnect():
pass
time = None
wlan_sta = None
| 16.415929
| 141
| 0.547709
|
from typing import Any
class M5mqtt:
def _daemonTask(self, *argv) -> Any:
pass
def _msg_deal(self, *argv) -> Any:
pass
def _on_data(self, *argv) -> Any:
pass
def on_connect(self, *argv) -> Any:
pass
def publish(self, *argv) -> Any:
pass
def start(self, *argv) -> Any:
pass
def subscribe(self, *argv) -> Any:
pass
def unsubscribe(self, *argv) -> Any:
pass
class MQTTClient:
def _clean_sock_buffer(self, *argv) -> Any:
pass
def _recv_len(self, *argv) -> Any:
pass
def _send_str(self, *argv) -> Any:
pass
def check_msg(self, *argv) -> Any:
pass
def connect(self, *argv) -> Any:
pass
def disconnect(self, *argv) -> Any:
pass
def lock_msg_rec(self, *argv) -> Any:
pass
def ping(self, *argv) -> Any:
pass
def publish(self, *argv) -> Any:
pass
def set_block(self, *argv) -> Any:
pass
def set_callback(self, *argv) -> Any:
pass
def set_last_will(self, *argv) -> Any:
pass
def socket_connect(self, *argv) -> Any:
pass
def subscribe(self, *argv) -> Any:
pass
def topic_get(self, *argv) -> Any:
pass
def topic_msg_get(self, *argv) -> Any:
pass
def unlock_msg_rec(self, *argv) -> Any:
pass
def wait_msg(self, *argv) -> Any:
pass
_thread = None
def autoConnect():
pass
lcd = None
m5base = None
machine = None
def reconnect():
pass
time = None
wlan_sta = None
| true
| true
|
f70abea11c4ac66c4b8b1ef3a65628f1877a4566
| 3,940
|
py
|
Python
|
guilded/ext/commands/context.py
|
DakshG07/KOOLIOMAN
|
84d851f9d88e99e884dc6cc38a5638af0c29da9c
|
[
"MIT"
] | null | null | null |
guilded/ext/commands/context.py
|
DakshG07/KOOLIOMAN
|
84d851f9d88e99e884dc6cc38a5638af0c29da9c
|
[
"MIT"
] | null | null | null |
guilded/ext/commands/context.py
|
DakshG07/KOOLIOMAN
|
84d851f9d88e99e884dc6cc38a5638af0c29da9c
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020-present shay (shayypy)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
This project includes code from https://github.com/Rapptz/discord.py, which is
available under the MIT license:
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import guilded.abc
class Context(guilded.abc.Messageable):
def __init__(self, **attrs):
self.message = attrs.pop('message', None)
self._state = attrs.pop('state', self.message._state)
self.bot = attrs.pop('bot', None)
self.args = attrs.pop('args', [])
self.kwargs = attrs.pop('kwargs', {})
self.prefix = attrs.pop('prefix')
self.command = attrs.pop('command', None)
self.view = attrs.pop('view', None)
self.invoked_with = attrs.pop('invoked_with', None)
self.invoked_parents = attrs.pop('invoked_parents', [])
self.invoked_subcommand = attrs.pop('invoked_subcommand', None)
self.subcommand_passed = attrs.pop('subcommand_passed', None)
self.command_failed = attrs.pop('command_failed', False)
@property
def valid(self):
return self.prefix is not None and self.command is not None
@property
def cog(self):
if self.command is None:
return None
return self.command.cog
@property
def channel(self):
return self.message.channel
@property
def _channel_id(self):
return self.message.channel_id
@property
def team(self):
return self.message.team
@property
def guild(self):
return self.team
@property
def author(self):
return self.message.author
@property
def me(self):
return self.team.me if self.team else self.bot.user
#def reply(self, *content, **kwargs):
# return self.message.reply(*content, **kwargs)
| 36.82243
| 78
| 0.712944
|
import guilded.abc
class Context(guilded.abc.Messageable):
def __init__(self, **attrs):
self.message = attrs.pop('message', None)
self._state = attrs.pop('state', self.message._state)
self.bot = attrs.pop('bot', None)
self.args = attrs.pop('args', [])
self.kwargs = attrs.pop('kwargs', {})
self.prefix = attrs.pop('prefix')
self.command = attrs.pop('command', None)
self.view = attrs.pop('view', None)
self.invoked_with = attrs.pop('invoked_with', None)
self.invoked_parents = attrs.pop('invoked_parents', [])
self.invoked_subcommand = attrs.pop('invoked_subcommand', None)
self.subcommand_passed = attrs.pop('subcommand_passed', None)
self.command_failed = attrs.pop('command_failed', False)
@property
def valid(self):
return self.prefix is not None and self.command is not None
@property
def cog(self):
if self.command is None:
return None
return self.command.cog
@property
def channel(self):
return self.message.channel
@property
def _channel_id(self):
return self.message.channel_id
@property
def team(self):
return self.message.team
@property
def guild(self):
return self.team
@property
def author(self):
return self.message.author
@property
def me(self):
return self.team.me if self.team else self.bot.user
| true
| true
|
f70ac048c7ab163d01374fa17be97ba5e98dd62a
| 5,492
|
py
|
Python
|
ioos_qc/results.py
|
glos/ioos_qc
|
17e69ad582275be7ad0f5a2af40c11d810b344e8
|
[
"Apache-2.0"
] | 31
|
2019-10-09T15:08:38.000Z
|
2022-01-21T23:45:22.000Z
|
ioos_qc/results.py
|
glos/ioos_qc
|
17e69ad582275be7ad0f5a2af40c11d810b344e8
|
[
"Apache-2.0"
] | 49
|
2019-10-09T18:58:29.000Z
|
2022-02-08T22:52:34.000Z
|
ioos_qc/results.py
|
glos/ioos_qc
|
17e69ad582275be7ad0f5a2af40c11d810b344e8
|
[
"Apache-2.0"
] | 13
|
2019-10-08T19:47:34.000Z
|
2022-03-19T18:42:25.000Z
|
#!/usr/bin/env python
# coding=utf-8
import logging
from typing import NamedTuple, List
from dataclasses import dataclass
from collections import OrderedDict as odict, defaultdict
import numpy as np
from ioos_qc.qartod import QartodFlags
L = logging.getLogger(__name__) # noqa
class CallResult(NamedTuple):
package: str
test: str
function: callable
results: np.ndarray
def __repr__(self):
return f'<CallResult package={self.package} test={self.test}>'
class ContextResult(NamedTuple):
stream_id: str
results: List[CallResult]
subset_indexes: np.ndarray
data: np.ndarray = None
tinp: np.ndarray = None
zinp: np.ndarray = None
lat: np.ndarray = None
lon: np.ndarray = None
def __repr__(self):
return f'<ContextResult stream_id={self.stream_id}>'
@dataclass
class CollectedResult:
stream_id: str
package: str
test: str
function: callable
results: np.ma.core.MaskedArray = None
data: np.ndarray = None
tinp: np.ndarray = None
zinp: np.ndarray = None
lat: np.ndarray = None
lon: np.ndarray = None
def __repr__(self):
return f'<CollectedResult stream_id={self.stream_id} package={self.package} test={self.test}>'
def function_name(self) -> str:
return self.function.__name__
@property
def hash_key(self) -> str:
return f'{self.stream_id}:{self.package}.{self.test}'
def collect_results(results, how='list'):
if how in ['list', list]:
return collect_results_list(results)
elif how in ['dict', dict]:
return collect_results_dict(results)
def collect_results_list(results):
""" Turns a list of ContextResult objects into an iterator of CollectedResult objects
by combining the subset_index information in each ContextResult together into
a single array of results.
"""
collected = odict()
# ContextResults
for r in results:
cr = None
# Shortcut for CallResult objects when someone uses QcConfig.run() directly
# and doesn't go through a Stream object
if isinstance(r, CallResult):
cr = CollectedResult(
stream_id=None,
package=r.package,
test=r.test,
function=r.function,
results=r.results,
)
collected[cr.hash_key] = cr
continue
# CallResults
for tr in r.results:
cr = CollectedResult(
stream_id=r.stream_id,
package=tr.package,
test=tr.test,
function=tr.function
)
if cr.hash_key not in collected:
# Set the initial values
cr.results = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=tr.results.dtype)
cr.data = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.data.dtype)
cr.tinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.tinp.dtype)
cr.zinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.zinp.dtype)
cr.lat = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lat.dtype)
cr.lon = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lon.dtype)
collected[cr.hash_key] = cr
collected[cr.hash_key].results[r.subset_indexes] = tr.results
if cr is not None:
if r.subset_indexes.all():
collected[cr.hash_key].data = r.data
collected[cr.hash_key].tinp = r.tinp
collected[cr.hash_key].zinp = r.zinp
collected[cr.hash_key].lat = r.lat
collected[cr.hash_key].lon = r.lon
else:
collected[cr.hash_key].data[r.subset_indexes] = r.data
collected[cr.hash_key].tinp[r.subset_indexes] = r.tinp
collected[cr.hash_key].zinp[r.subset_indexes] = r.zinp
collected[cr.hash_key].lat[r.subset_indexes] = r.lat
collected[cr.hash_key].lon[r.subset_indexes] = r.lon
return list(collected.values())
def collect_results_dict(results):
""" Turns a list of ContextResult objects into a dictionary of test results
by combining the subset_index information in each ContextResult together into
a single array of results. This is mostly here for historical purposes. Users
should migrate to using the Result objects directly.
"""
# Magic for nested key generation
# https://stackoverflow.com/a/27809959
collected = defaultdict(lambda: defaultdict(odict))
# ContextResults
for r in results:
# Shortcut for CallResult objects when someone uses QcConfig.run() directly
# and doesn't go through a Stream object
if isinstance(r, CallResult):
collected[r.package][r.test] = r.results
continue
flag_arr = np.ma.empty_like(r.subset_indexes, dtype='uint8')
flag_arr.fill(QartodFlags.UNKNOWN)
# iterate over the CallResults
for tr in r.results:
testpackage = tr.package
testname = tr.test
testresults = tr.results
if testname not in collected[r.stream_id][testpackage]:
collected[r.stream_id][testpackage][testname] = np.copy(flag_arr)
collected[r.stream_id][testpackage][testname][r.subset_indexes] = testresults
return collected
| 33.284848
| 102
| 0.629825
|
import logging
from typing import NamedTuple, List
from dataclasses import dataclass
from collections import OrderedDict as odict, defaultdict
import numpy as np
from ioos_qc.qartod import QartodFlags
L = logging.getLogger(__name__)
class CallResult(NamedTuple):
package: str
test: str
function: callable
results: np.ndarray
def __repr__(self):
return f'<CallResult package={self.package} test={self.test}>'
class ContextResult(NamedTuple):
stream_id: str
results: List[CallResult]
subset_indexes: np.ndarray
data: np.ndarray = None
tinp: np.ndarray = None
zinp: np.ndarray = None
lat: np.ndarray = None
lon: np.ndarray = None
def __repr__(self):
return f'<ContextResult stream_id={self.stream_id}>'
@dataclass
class CollectedResult:
stream_id: str
package: str
test: str
function: callable
results: np.ma.core.MaskedArray = None
data: np.ndarray = None
tinp: np.ndarray = None
zinp: np.ndarray = None
lat: np.ndarray = None
lon: np.ndarray = None
def __repr__(self):
return f'<CollectedResult stream_id={self.stream_id} package={self.package} test={self.test}>'
def function_name(self) -> str:
return self.function.__name__
@property
def hash_key(self) -> str:
return f'{self.stream_id}:{self.package}.{self.test}'
def collect_results(results, how='list'):
if how in ['list', list]:
return collect_results_list(results)
elif how in ['dict', dict]:
return collect_results_dict(results)
def collect_results_list(results):
collected = odict()
for r in results:
cr = None
if isinstance(r, CallResult):
cr = CollectedResult(
stream_id=None,
package=r.package,
test=r.test,
function=r.function,
results=r.results,
)
collected[cr.hash_key] = cr
continue
# CallResults
for tr in r.results:
cr = CollectedResult(
stream_id=r.stream_id,
package=tr.package,
test=tr.test,
function=tr.function
)
if cr.hash_key not in collected:
# Set the initial values
cr.results = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=tr.results.dtype)
cr.data = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.data.dtype)
cr.tinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.tinp.dtype)
cr.zinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.zinp.dtype)
cr.lat = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lat.dtype)
cr.lon = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lon.dtype)
collected[cr.hash_key] = cr
collected[cr.hash_key].results[r.subset_indexes] = tr.results
if cr is not None:
if r.subset_indexes.all():
collected[cr.hash_key].data = r.data
collected[cr.hash_key].tinp = r.tinp
collected[cr.hash_key].zinp = r.zinp
collected[cr.hash_key].lat = r.lat
collected[cr.hash_key].lon = r.lon
else:
collected[cr.hash_key].data[r.subset_indexes] = r.data
collected[cr.hash_key].tinp[r.subset_indexes] = r.tinp
collected[cr.hash_key].zinp[r.subset_indexes] = r.zinp
collected[cr.hash_key].lat[r.subset_indexes] = r.lat
collected[cr.hash_key].lon[r.subset_indexes] = r.lon
return list(collected.values())
def collect_results_dict(results):
# Magic for nested key generation
# https://stackoverflow.com/a/27809959
collected = defaultdict(lambda: defaultdict(odict))
# ContextResults
for r in results:
# Shortcut for CallResult objects when someone uses QcConfig.run() directly
# and doesn't go through a Stream object
if isinstance(r, CallResult):
collected[r.package][r.test] = r.results
continue
flag_arr = np.ma.empty_like(r.subset_indexes, dtype='uint8')
flag_arr.fill(QartodFlags.UNKNOWN)
for tr in r.results:
testpackage = tr.package
testname = tr.test
testresults = tr.results
if testname not in collected[r.stream_id][testpackage]:
collected[r.stream_id][testpackage][testname] = np.copy(flag_arr)
collected[r.stream_id][testpackage][testname][r.subset_indexes] = testresults
return collected
| true
| true
|
f70ac14f2dee5acf23ce8ff4dca5cf048c116003
| 7,350
|
py
|
Python
|
getProductInfo.py
|
dongil618/Cafe24toSmartstore
|
909e2cdf2927d5ecacb7a6484c84f18de67cb47e
|
[
"MIT"
] | null | null | null |
getProductInfo.py
|
dongil618/Cafe24toSmartstore
|
909e2cdf2927d5ecacb7a6484c84f18de67cb47e
|
[
"MIT"
] | null | null | null |
getProductInfo.py
|
dongil618/Cafe24toSmartstore
|
909e2cdf2927d5ecacb7a6484c84f18de67cb47e
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup as bs
import requests
from urllib.request import urlopen
from urllib.parse import quote
import re
import time
def getProductInfo(productNameIndex):
headers = {"User-Agent": "Mozilla/5.0"}
color = []
size = []
price = ""
instruction = ""
sizeGuide = ""
category = ""
url = (
"https://www.ficelle.co.kr/product/"
+ quote(productNameIndex["productName"])
+ "/"
+ quote(productNameIndex["productIndex"])
+ "/category/25/display/1/"
)
response = requests.get(url, headers=headers)
if response.status_code == 200:
html = urlopen(url)
soup = bs(html, "html.parser")
# Color Crawling
c = soup.find("ul", attrs={"ec-dev-id": "product_option_id1"})
colors = c.find_all("span")
# print(colors)
for i in colors:
productColor = i.text
# print("productColor : ", productColor)
color.append(productColor)
# c = soup.find_all("ul", attrs={"class": "ec-product-button ec-product-preview"})
# if not c:
# print(soup)
# c = soup.find_all("select", attrs={"id": "product_option_id1"})
# if c:
# colors = c[0].find_all("option")
# for i in range(2, len(colors)):
# productColor = colors[i].text
# print(productColor)
# color.append(productColor)
# else:
# colors = c[0].find_all("li")
# for i in colors:
# productColor = i.find("span").text
# print(productColor)
# color.append(productColor)
# Size Crawling
sizes = soup.find_all("li", attrs={"class": "ec-product-disabled"})
if not sizes:
sizes = soup.find_all("select", attrs={"id": "product_option_id2"})
if sizes:
s = sizes[0].find_all("option")
for i in range(2, len(s)):
productSize = s[i].text
# print(productSize)
size.append(productSize)
else:
size.append("Free")
else:
for i in sizes:
productSize = i.find("span").text
# print(productSize)
size.append(productSize)
# Product Name Crawling
# productName = soup.find(
# "span", attrs={"style": "font-size:16px;color:#555555;"}
# ).text
# category
# productName으로 분류할것!
try:
productNameSplitList = productNameIndex["productName"].split(" ")
# print(productNameSplitList)
productNameSplitList.sort()
# print(productNameSplitList)
pants = ["Pants", "Slacks"]
knit_sweater = ["Knit", "Sweater"]
blouse_shirt = ["Blouse", "Shirt", "Shirts"]
skirt = ["Skirt"]
onepiece = ["Onepiece", "Dress"]
jacket = ["Jacket"]
jumper = ["Jumper"]
jumpsuit = ["Jumpsuit"]
jeans = ["Denim", "Jeans"]
cardigan = ["Cardigan"]
coat = ["Coat"]
sports_wear = ["Jogger"]
t_shirt = ["T", "Sweat shirt", "Top", "Sleeveless", "MTM"]
codie_set = ["Set", "&"]
bag = ["Bag"]
sandal = ["Sandal"]
slipper = ["slipper", "Flip"]
middle_boots = ["Middle"]
long_boots = ["Long"]
bloafaer = ["Bloafer"]
flat = ["Flat"]
for productNameValue in productNameSplitList:
if productNameValue in codie_set:
category = "패션의류 여성의류 코디세트"
break
else:
if productNameValue in pants:
category = "패션의류 여성의류 바지"
break
elif productNameValue in blouse_shirt:
category = "패션의류 여성의류 블라우스/셔츠"
break
elif productNameValue in skirt:
category = "패션의류 여성의류 스커트"
break
elif productNameValue in onepiece:
category = "패션의류 여성의류 원피스"
break
elif productNameValue in jacket:
category = "패션의류 여성의류 재킷"
break
elif productNameValue in jumper:
category = "패션의류 여성의류 점퍼"
break
elif productNameValue in jeans:
category = "패션의류 여성의류 청바지"
break
elif productNameValue in cardigan:
category = "패션의류 여성의류 카디건"
break
elif productNameValue in coat:
category = "패션의류 여성의류 코트"
break
elif productNameValue in sports_wear:
category = "패션의류 여성의류 트레이닝복"
break
elif productNameValue in knit_sweater:
category = "패션의류 여성의류 니트/스웨터"
break
elif productNameValue in jumpsuit:
category = "패션의류 여성의류 점프슈트"
break
elif productNameValue in t_shirt:
category = "패션의류 여성의류 티셔츠"
break
elif productNameValue in bag:
category = "패션잡화 여성가방 숄더백"
break
elif productNameValue in sandal:
category = "패션잡화 여성신발 샌들 스트랩샌들"
break
elif productNameValue in slipper:
category = "패션잡화 여성신발 슬리퍼"
break
elif productNameValue in middle_boots:
category = "패션잡화 여성신발 부츠 미들부츠"
break
elif productNameValue in long_boots:
category = "패션잡화 여성신발 부츠 롱부츠"
break
elif productNameValue in bloafaer:
category = "패션잡화 여성신발 샌들 뮬/블로퍼"
break
elif productNameValue in flat:
category = "패션잡화 여성신발 단화 플랫"
break
except:
print("Non-Existent Categories")
# Instruction and Size Guide Crawling
price = soup.find("strong", attrs={"id": "span_product_price_text"}).text
# price string process
price = re.sub(",|원", "", price)
price = int(price) + 500
instruction = soup.find("div", attrs={"id": "view1"}).find("p").text
sizeGuide = soup.find("div", attrs={"id": "view2"}).find("p").text
time.sleep(3)
return {
"productName": productNameIndex["productName"],
"price": price,
"colors": color,
"sizes": size,
"instruction": instruction,
"sizeGuide": sizeGuide,
"category": category,
}
else:
print(response.status_code)
| 37.692308
| 90
| 0.460408
|
from bs4 import BeautifulSoup as bs
import requests
from urllib.request import urlopen
from urllib.parse import quote
import re
import time
def getProductInfo(productNameIndex):
headers = {"User-Agent": "Mozilla/5.0"}
color = []
size = []
price = ""
instruction = ""
sizeGuide = ""
category = ""
url = (
"https://www.ficelle.co.kr/product/"
+ quote(productNameIndex["productName"])
+ "/"
+ quote(productNameIndex["productIndex"])
+ "/category/25/display/1/"
)
response = requests.get(url, headers=headers)
if response.status_code == 200:
html = urlopen(url)
soup = bs(html, "html.parser")
c = soup.find("ul", attrs={"ec-dev-id": "product_option_id1"})
colors = c.find_all("span")
for i in colors:
productColor = i.text
color.append(productColor)
sizes = soup.find_all("li", attrs={"class": "ec-product-disabled"})
if not sizes:
sizes = soup.find_all("select", attrs={"id": "product_option_id2"})
if sizes:
s = sizes[0].find_all("option")
for i in range(2, len(s)):
productSize = s[i].text
size.append(productSize)
else:
size.append("Free")
else:
for i in sizes:
productSize = i.find("span").text
size.append(productSize)
try:
productNameSplitList = productNameIndex["productName"].split(" ")
productNameSplitList.sort()
pants = ["Pants", "Slacks"]
knit_sweater = ["Knit", "Sweater"]
blouse_shirt = ["Blouse", "Shirt", "Shirts"]
skirt = ["Skirt"]
onepiece = ["Onepiece", "Dress"]
jacket = ["Jacket"]
jumper = ["Jumper"]
jumpsuit = ["Jumpsuit"]
jeans = ["Denim", "Jeans"]
cardigan = ["Cardigan"]
coat = ["Coat"]
sports_wear = ["Jogger"]
t_shirt = ["T", "Sweat shirt", "Top", "Sleeveless", "MTM"]
codie_set = ["Set", "&"]
bag = ["Bag"]
sandal = ["Sandal"]
slipper = ["slipper", "Flip"]
middle_boots = ["Middle"]
long_boots = ["Long"]
bloafaer = ["Bloafer"]
flat = ["Flat"]
for productNameValue in productNameSplitList:
if productNameValue in codie_set:
category = "패션의류 여성의류 코디세트"
break
else:
if productNameValue in pants:
category = "패션의류 여성의류 바지"
break
elif productNameValue in blouse_shirt:
category = "패션의류 여성의류 블라우스/셔츠"
break
elif productNameValue in skirt:
category = "패션의류 여성의류 스커트"
break
elif productNameValue in onepiece:
category = "패션의류 여성의류 원피스"
break
elif productNameValue in jacket:
category = "패션의류 여성의류 재킷"
break
elif productNameValue in jumper:
category = "패션의류 여성의류 점퍼"
break
elif productNameValue in jeans:
category = "패션의류 여성의류 청바지"
break
elif productNameValue in cardigan:
category = "패션의류 여성의류 카디건"
break
elif productNameValue in coat:
category = "패션의류 여성의류 코트"
break
elif productNameValue in sports_wear:
category = "패션의류 여성의류 트레이닝복"
break
elif productNameValue in knit_sweater:
category = "패션의류 여성의류 니트/스웨터"
break
elif productNameValue in jumpsuit:
category = "패션의류 여성의류 점프슈트"
break
elif productNameValue in t_shirt:
category = "패션의류 여성의류 티셔츠"
break
elif productNameValue in bag:
category = "패션잡화 여성가방 숄더백"
break
elif productNameValue in sandal:
category = "패션잡화 여성신발 샌들 스트랩샌들"
break
elif productNameValue in slipper:
category = "패션잡화 여성신발 슬리퍼"
break
elif productNameValue in middle_boots:
category = "패션잡화 여성신발 부츠 미들부츠"
break
elif productNameValue in long_boots:
category = "패션잡화 여성신발 부츠 롱부츠"
break
elif productNameValue in bloafaer:
category = "패션잡화 여성신발 샌들 뮬/블로퍼"
break
elif productNameValue in flat:
category = "패션잡화 여성신발 단화 플랫"
break
except:
print("Non-Existent Categories")
price = soup.find("strong", attrs={"id": "span_product_price_text"}).text
price = re.sub(",|원", "", price)
price = int(price) + 500
instruction = soup.find("div", attrs={"id": "view1"}).find("p").text
sizeGuide = soup.find("div", attrs={"id": "view2"}).find("p").text
time.sleep(3)
return {
"productName": productNameIndex["productName"],
"price": price,
"colors": color,
"sizes": size,
"instruction": instruction,
"sizeGuide": sizeGuide,
"category": category,
}
else:
print(response.status_code)
| true
| true
|
f70ac22e6b088bf21a6bc6c89c1e2ab6834b5bfe
| 5,592
|
py
|
Python
|
alchemist_py/project_manager.py
|
Kenta11/alchemist_py
|
49d013dde4688f663eb2d35519347047739ecace
|
[
"MIT"
] | null | null | null |
alchemist_py/project_manager.py
|
Kenta11/alchemist_py
|
49d013dde4688f663eb2d35519347047739ecace
|
[
"MIT"
] | 1
|
2021-08-04T14:14:09.000Z
|
2021-08-04T14:14:09.000Z
|
alchemist_py/project_manager.py
|
Kenta11/alchemist_py
|
49d013dde4688f663eb2d35519347047739ecace
|
[
"MIT"
] | 1
|
2021-07-15T07:05:42.000Z
|
2021-07-15T07:05:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import git
import os
import re
import sys
import toml
from pathlib import Path
from alchemist_py.brokergen import createProject
from alchemist_py.deviceinfo import searchDevice
from alchemist_py.plugin_manager import PluginManager
class Manager(object):
def __init__(self):
config = toml.load(open("Alchemist.toml"))
self.board = config["board"]
self.nodes = config["nodes"]
self.topics = config["topics"]
self.fpga, self.clock = searchDevice(self.board)
self.topic_table = {}
for topic in self.topics:
self.topic_table[topic["name"]] =\
"struct {name} {{\n {message}}};".format(
name=topic["name"], message=topic["message"]
)
self.p_manager = PluginManager()
self.ports = []
for ps in list(map(lambda x:x["ports"], self.nodes)):
self.ports.extend(ps)
def updateNode(self, node):
path_to_project = Path("nodes")/node["name"]
# make mini alchemist data for the node
mini_alchemist = {
"device": {
"board": self.board,
"fpga": self.fpga,
"clock": self.clock
},
"node": node,
"topics": []
}
for port in node["ports"]:
for topic in self.topics:
if port["attribute"] in ["wire"]:
break
elif port["attribute"] in ["publisher", "subscriber"] and port["topic"] == topic["name"]:
mini_alchemist["topics"].append(topic)
break
else:
print("Unknown topic:", port["topic"], file=sys.stderr)
print("node:", node["name"])
exit(1)
# write mini alchemist to TOML
os.makedirs(path_to_project)
toml.dump(mini_alchemist, open(path_to_project/".Alchemist.toml", "w"))
# update project
plugin = self.p_manager.loadPlugin(node["plugin"])
plugin.createProject(node["name"])
def updateNodes(self):
# update projects for nodes
for node in self.nodes:
path_to_project = Path("nodes")/node["name"]
# if no project for a node, make a directory and Alchemist.toml
if not os.path.exists(path_to_project):
if "repo" in node.keys():
git.Repo.clone_from(node["repo"], "nodes")
else:
self.updateNode(node)
# if Alchemist.toml was updated, update mini Alchemist.toml
t_alchemist = os.path.getatime("Alchemist.toml")
t_mini_alchemist = os.path.getatime(path_to_project/".Alchemist.toml")
if t_alchemist > t_mini_alchemist:
if "repo" in node.keys():
git.Repo.clone_from(node["repo"], "nodes")
else:
self.updateNode(node)
def updateTopic(self, topic:dict):
path_to_project = Path("brokers") / ("broker"+topic["name"])
if not os.path.exists(path_to_project):
byte = 0
for m in re.finditer(r"(?P<type>((unsigned\s+){0,1}(char|short|int|long)|(float|double)|(ap_(u){0,1}int\s*\<\s*[1-9]{1,4}\s*>)))\s+(?P<var>([a-zA-Z_][a-zA-Z0-9_]*(\s*\[\s*([0-9]|[1-9][0-9]*)\s*\]){0,1}))\s*;", topic["message"]):
byte += self.getByte(m.group("type"), m.group("var"))
mini_alchemist = {
"device": {
"board": self.board,
"fpga": self.fpga,
"clock": self.clock
},
"topic": topic,
}
mini_alchemist["topic"]["pub"] = len(list(filter(
lambda x: x["attribute"] == "publisher" and x["topic"] == topic["name"],
self.ports
)))
mini_alchemist["topic"]["sub"] = len(list(filter(
lambda x: x["attribute"] == "subscriber" and x["topic"] == topic["name"],
self.ports
)))
mini_alchemist["topic"]["width"] = 64
mini_alchemist["topic"]["count"] = int(byte / 8)
os.makedirs(path_to_project)
toml.dump(mini_alchemist, open(path_to_project / ".Alchemist.toml", "w"))
createProject(topic["name"])
def updateTopics(self):
for topic in self.topics:
self.updateTopic(topic)
def getByte(self, vType:str, var:str):
width_of_type = 0
if vType == "char":
width_of_type = 1
elif vType == "short":
width_of_type = 2
elif vType == "int":
width_of_type = 4
elif vType == "long":
width_of_type = 8
elif vType.split()[0] == "unsigned":
if vType.split()[1] == "char":
width_of_type = 1
elif vType.split()[1] == "short":
width_of_type = 2
elif vType.split()[1] == "int":
width_of_type = 4
elif vType.split()[1] == "long":
width_of_type = 8
else:
print("Unknown type!")
exit(1)
else:
print("Unknown type!")
exit(1)
length_of_var = 1
m = re.match(
r"[a-zA-Z_][a-zA-Z0-9_]*\s*\[\s*(?P<length>[1-9][0-9]*)\s*\]",
var
)
if m:
length_of_var = int(m.group("length"))
return width_of_type * length_of_var
| 34.732919
| 240
| 0.505722
|
import git
import os
import re
import sys
import toml
from pathlib import Path
from alchemist_py.brokergen import createProject
from alchemist_py.deviceinfo import searchDevice
from alchemist_py.plugin_manager import PluginManager
class Manager(object):
def __init__(self):
config = toml.load(open("Alchemist.toml"))
self.board = config["board"]
self.nodes = config["nodes"]
self.topics = config["topics"]
self.fpga, self.clock = searchDevice(self.board)
self.topic_table = {}
for topic in self.topics:
self.topic_table[topic["name"]] =\
"struct {name} {{\n {message}}};".format(
name=topic["name"], message=topic["message"]
)
self.p_manager = PluginManager()
self.ports = []
for ps in list(map(lambda x:x["ports"], self.nodes)):
self.ports.extend(ps)
def updateNode(self, node):
path_to_project = Path("nodes")/node["name"]
mini_alchemist = {
"device": {
"board": self.board,
"fpga": self.fpga,
"clock": self.clock
},
"node": node,
"topics": []
}
for port in node["ports"]:
for topic in self.topics:
if port["attribute"] in ["wire"]:
break
elif port["attribute"] in ["publisher", "subscriber"] and port["topic"] == topic["name"]:
mini_alchemist["topics"].append(topic)
break
else:
print("Unknown topic:", port["topic"], file=sys.stderr)
print("node:", node["name"])
exit(1)
os.makedirs(path_to_project)
toml.dump(mini_alchemist, open(path_to_project/".Alchemist.toml", "w"))
plugin = self.p_manager.loadPlugin(node["plugin"])
plugin.createProject(node["name"])
def updateNodes(self):
for node in self.nodes:
path_to_project = Path("nodes")/node["name"]
if not os.path.exists(path_to_project):
if "repo" in node.keys():
git.Repo.clone_from(node["repo"], "nodes")
else:
self.updateNode(node)
t_alchemist = os.path.getatime("Alchemist.toml")
t_mini_alchemist = os.path.getatime(path_to_project/".Alchemist.toml")
if t_alchemist > t_mini_alchemist:
if "repo" in node.keys():
git.Repo.clone_from(node["repo"], "nodes")
else:
self.updateNode(node)
def updateTopic(self, topic:dict):
path_to_project = Path("brokers") / ("broker"+topic["name"])
if not os.path.exists(path_to_project):
byte = 0
for m in re.finditer(r"(?P<type>((unsigned\s+){0,1}(char|short|int|long)|(float|double)|(ap_(u){0,1}int\s*\<\s*[1-9]{1,4}\s*>)))\s+(?P<var>([a-zA-Z_][a-zA-Z0-9_]*(\s*\[\s*([0-9]|[1-9][0-9]*)\s*\]){0,1}))\s*;", topic["message"]):
byte += self.getByte(m.group("type"), m.group("var"))
mini_alchemist = {
"device": {
"board": self.board,
"fpga": self.fpga,
"clock": self.clock
},
"topic": topic,
}
mini_alchemist["topic"]["pub"] = len(list(filter(
lambda x: x["attribute"] == "publisher" and x["topic"] == topic["name"],
self.ports
)))
mini_alchemist["topic"]["sub"] = len(list(filter(
lambda x: x["attribute"] == "subscriber" and x["topic"] == topic["name"],
self.ports
)))
mini_alchemist["topic"]["width"] = 64
mini_alchemist["topic"]["count"] = int(byte / 8)
os.makedirs(path_to_project)
toml.dump(mini_alchemist, open(path_to_project / ".Alchemist.toml", "w"))
createProject(topic["name"])
def updateTopics(self):
for topic in self.topics:
self.updateTopic(topic)
def getByte(self, vType:str, var:str):
width_of_type = 0
if vType == "char":
width_of_type = 1
elif vType == "short":
width_of_type = 2
elif vType == "int":
width_of_type = 4
elif vType == "long":
width_of_type = 8
elif vType.split()[0] == "unsigned":
if vType.split()[1] == "char":
width_of_type = 1
elif vType.split()[1] == "short":
width_of_type = 2
elif vType.split()[1] == "int":
width_of_type = 4
elif vType.split()[1] == "long":
width_of_type = 8
else:
print("Unknown type!")
exit(1)
else:
print("Unknown type!")
exit(1)
length_of_var = 1
m = re.match(
r"[a-zA-Z_][a-zA-Z0-9_]*\s*\[\s*(?P<length>[1-9][0-9]*)\s*\]",
var
)
if m:
length_of_var = int(m.group("length"))
return width_of_type * length_of_var
| true
| true
|
f70ac248b9ef28a76627a9460455648ecfe49916
| 1,177
|
py
|
Python
|
src/services/incomesService.py
|
TTIP-UNQ-Team6/gastapp_back
|
0613aba610f765b55cb3bb10fec4d0d5f3685f88
|
[
"MIT"
] | null | null | null |
src/services/incomesService.py
|
TTIP-UNQ-Team6/gastapp_back
|
0613aba610f765b55cb3bb10fec4d0d5f3685f88
|
[
"MIT"
] | null | null | null |
src/services/incomesService.py
|
TTIP-UNQ-Team6/gastapp_back
|
0613aba610f765b55cb3bb10fec4d0d5f3685f88
|
[
"MIT"
] | null | null | null |
import pymongo
from bson import ObjectId
from src.services import config
collection = config.db.incomes
def search_by_user_email(user_email, itype):
return collection.find({"user_email": user_email, "itype": itype})
def sum_amounts_by_user(user_email, itype):
pipeline = [{"$match": {"user_email": user_email, "itype": itype}}, {"$group": {"_id": "null", "total": {"$sum": "$amount"}}}]
return collection.aggregate(pipeline)
def save(income):
collection.insert_one(income.__dict__)
def save_all(incomes):
collection.insert_many(incomes)
def update(income_id, income):
collection.find_one_and_update(
{"_id": ObjectId(income_id)},
{"$set": income.__dict__},
upsert=True)
def delete(income_id):
collection.delete_one({"_id": ObjectId(income_id)})
def filter(user_email, category, date, account, itype):
pipeline = [{
"$match": {
"user_email": user_email,
"category": category,
"date": date,
"account": account,
"itype": itype
}},
{"$sort": {"date": pymongo.DESCENDING}}
]
return collection.aggregate(pipeline)
| 24.020408
| 130
| 0.634664
|
import pymongo
from bson import ObjectId
from src.services import config
collection = config.db.incomes
def search_by_user_email(user_email, itype):
return collection.find({"user_email": user_email, "itype": itype})
def sum_amounts_by_user(user_email, itype):
pipeline = [{"$match": {"user_email": user_email, "itype": itype}}, {"$group": {"_id": "null", "total": {"$sum": "$amount"}}}]
return collection.aggregate(pipeline)
def save(income):
collection.insert_one(income.__dict__)
def save_all(incomes):
collection.insert_many(incomes)
def update(income_id, income):
collection.find_one_and_update(
{"_id": ObjectId(income_id)},
{"$set": income.__dict__},
upsert=True)
def delete(income_id):
collection.delete_one({"_id": ObjectId(income_id)})
def filter(user_email, category, date, account, itype):
pipeline = [{
"$match": {
"user_email": user_email,
"category": category,
"date": date,
"account": account,
"itype": itype
}},
{"$sort": {"date": pymongo.DESCENDING}}
]
return collection.aggregate(pipeline)
| true
| true
|
f70ac2d9c5ad8d01fcd7341073e93a56903015f3
| 737
|
py
|
Python
|
Operations/SC_DFA.py
|
ClarkLabUVA/hctsa-py
|
4382a7e852d21cdfefdac1a4a09ea6e11abd9be1
|
[
"MIT"
] | 6
|
2020-08-14T00:16:19.000Z
|
2022-01-20T05:49:12.000Z
|
Operations/SC_DFA.py
|
fairscape/hctsa-py
|
4382a7e852d21cdfefdac1a4a09ea6e11abd9be1
|
[
"MIT"
] | null | null | null |
Operations/SC_DFA.py
|
fairscape/hctsa-py
|
4382a7e852d21cdfefdac1a4a09ea6e11abd9be1
|
[
"MIT"
] | 4
|
2020-08-14T00:22:45.000Z
|
2021-02-18T05:31:14.000Z
|
def SC_DFA(y):
N = len(y)
tau = int(np.floor(N/2))
y = y - np.mean(y)
x = np.cumsum(y)
taus = np.arange(5,tau+1)
ntau = len(taus)
F = np.zeros(ntau)
for i in range(ntau):
t = int(taus[i])
x_buff = x[:N - N % t]
x_buff = x_buff.reshape((int(N / t),t))
y_buff = np.zeros((int(N / t),t))
for j in range(int(N / t)):
tt = range(0,int(t))
p = np.polyfit(tt,x_buff[j,:],1)
y_buff[j,:] = np.power(x_buff[j,:] - np.polyval(p,tt),2)
y_buff.reshape((N - N % t,1))
F[i] = np.sqrt(np.mean(y_buff))
logtaur = np.log(taus)
logF = np.log(F)
p = np.polyfit(logtaur,logF,1)
return p[0]
| 14.45098
| 69
| 0.464043
|
def SC_DFA(y):
N = len(y)
tau = int(np.floor(N/2))
y = y - np.mean(y)
x = np.cumsum(y)
taus = np.arange(5,tau+1)
ntau = len(taus)
F = np.zeros(ntau)
for i in range(ntau):
t = int(taus[i])
x_buff = x[:N - N % t]
x_buff = x_buff.reshape((int(N / t),t))
y_buff = np.zeros((int(N / t),t))
for j in range(int(N / t)):
tt = range(0,int(t))
p = np.polyfit(tt,x_buff[j,:],1)
y_buff[j,:] = np.power(x_buff[j,:] - np.polyval(p,tt),2)
y_buff.reshape((N - N % t,1))
F[i] = np.sqrt(np.mean(y_buff))
logtaur = np.log(taus)
logF = np.log(F)
p = np.polyfit(logtaur,logF,1)
return p[0]
| true
| true
|
f70ac35da3aea0122566e6991b7d2d9cdf82c5b6
| 473
|
py
|
Python
|
chemreg/utils/management/commands/lint.py
|
Chemical-Curation/chemcurator
|
bcd7fab84e407f06502e6873c38820724d4e54e7
|
[
"MIT"
] | 1
|
2020-10-05T18:02:24.000Z
|
2020-10-05T18:02:24.000Z
|
chemreg/utils/management/commands/lint.py
|
Chemical-Curation/chemcurator_django
|
bcd7fab84e407f06502e6873c38820724d4e54e7
|
[
"MIT"
] | 207
|
2020-01-30T19:17:44.000Z
|
2021-02-24T19:45:29.000Z
|
chemreg/utils/management/commands/lint.py
|
Chemical-Curation/chemcurator_django
|
bcd7fab84e407f06502e6873c38820724d4e54e7
|
[
"MIT"
] | null | null | null |
import subprocess
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Automatically fixes formatting issues and reports any other linting errors"
def handle(self, *args, **options):
subprocess.run(["isort", "--apply", "--quiet"], cwd=settings.ROOT_DIR)
subprocess.run(["black", "--quiet", "."], cwd=settings.ROOT_DIR)
subprocess.run(["flake8"], cwd=settings.ROOT_DIR)
| 33.785714
| 87
| 0.701903
|
import subprocess
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Automatically fixes formatting issues and reports any other linting errors"
def handle(self, *args, **options):
subprocess.run(["isort", "--apply", "--quiet"], cwd=settings.ROOT_DIR)
subprocess.run(["black", "--quiet", "."], cwd=settings.ROOT_DIR)
subprocess.run(["flake8"], cwd=settings.ROOT_DIR)
| true
| true
|
f70ac41ed03f636596fe6dd578ad492699f6b41c
| 410
|
py
|
Python
|
template_extends/template_extends/wsgi.py
|
BillionsRichard/pycharmWorkspace
|
709e2681fc6d85ff52fb25717215a365f51073aa
|
[
"Apache-2.0"
] | null | null | null |
template_extends/template_extends/wsgi.py
|
BillionsRichard/pycharmWorkspace
|
709e2681fc6d85ff52fb25717215a365f51073aa
|
[
"Apache-2.0"
] | null | null | null |
template_extends/template_extends/wsgi.py
|
BillionsRichard/pycharmWorkspace
|
709e2681fc6d85ff52fb25717215a365f51073aa
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for template_extends project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "template_extends.settings")
application = get_wsgi_application()
| 24.117647
| 78
| 0.795122
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "template_extends.settings")
application = get_wsgi_application()
| true
| true
|
f70ac69af650810765016d290bfa000e9f4b2f74
| 30,759
|
py
|
Python
|
tests/test_assets.py
|
semio/zipline
|
f13e9fd1253a500771bf10217b1d37031272c03c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_assets.py
|
semio/zipline
|
f13e9fd1253a500771bf10217b1d37031272c03c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_assets.py
|
semio/zipline
|
f13e9fd1253a500771bf10217b1d37031272c03c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import datetime, timedelta
import pickle
import uuid
import warnings
import pandas as pd
from pandas.tseries.tools import normalize_date
from pandas.util.testing import assert_frame_equal
from nose_parameterized import parameterized
from numpy import full
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.assets.futures import FutureChain
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
RootSymbolNotFound,
)
from zipline.finance.trading import with_environment
from zipline.utils.test_utils import (
all_subindices,
make_rotating_asset_info,
)
def build_lookup_generic_cases():
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
frame = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
finder = AssetFinder(metadata=frame)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'duplicated', dupe_0_start, dupe_0),
(finder, 'duplicated', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'unique', unique_start, unique),
(finder, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(finder, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'end_date',
'asset_name',
'start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(
2468,
symbol='OMH15',
root_symbol='OM',
notice_date=pd.Timestamp('2014-01-20', tz='UTC'),
expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),
contract_multiplier=500
)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMH15" in reprd)
self.assertTrue("root_symbol='OM'" in reprd)
self.assertTrue(("notice_date=Timestamp('2014-01-20 00:00:00+0000', "
"tz='UTC')") in reprd)
self.assertTrue("expiration_date=Timestamp('2014-02-20 00:00:00+0000'"
in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('root_symbol' in dictd)
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
class AssetFinderTestCase(TestCase):
def test_lookup_symbol_fuzzy(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST@%d' % i,
'company_name': "company%d" % i,
'start_date_nano': as_of.value,
'end_date_nano': as_of.value,
'exchange': uuid.uuid4().hex,
}
for i in range(3)
]
)
finder = AssetFinder(frame, fuzzy_char='@')
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
for i in range(2): # we do it twice to test for caching bugs
self.assertIsNone(finder.lookup_symbol('test', as_of))
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of)
)
# Adding an unnecessary fuzzy shouldn't matter.
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of, fuzzy=True)
)
# Shouldn't find this with no fuzzy_str passed.
self.assertIsNone(finder.lookup_symbol('test1', as_of))
# Should find exact match.
self.assertEqual(
asset_1,
finder.lookup_symbol('test1', as_of, fuzzy=True),
)
def test_lookup_symbol_resolve_multiple(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'existing',
'company_name': 'existing',
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
finder = AssetFinder(df)
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol_resolve_multiple('non_existing', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol_resolve_multiple('existing', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
@parameterized.expand(
build_lookup_generic_cases()
)
def test_lookup_generic(self, finder, symbols, reference_date, expected):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
# Sids that will be found when we do lookups.
{
'sid': 0,
'file_name': 'real',
'company_name': 'real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'file_name': 'also_real',
'company_name': 'also_real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'file_name': 'real_but_old',
'company_name': 'real_but_old',
'start_date_nano': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 3,
'file_name': 'real_but_in_the_future',
'company_name': 'real_but_in_the_future',
'start_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
finder = AssetFinder(data)
results, missing = finder.lookup_generic(
['real', 1, 'fake', 'real_but_old', 'real_but_in_the_future'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'real')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'also_real')
self.assertEqual(results[1].sid, 1)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'fake')
self.assertEqual(missing[1], 'real_but_in_the_future')
def test_insert_metadata(self):
finder = AssetFinder()
finder.insert_metadata(0,
asset_type='equity',
start_date='2014-01-01',
end_date='2015-01-01',
symbol="PLAY",
foo_data="FOO",)
# Test proper insertion
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),
equity.end_date)
# Test invalid field
self.assertFalse('foo_data' in finder.metadata_cache[0])
def test_consume_metadata(self):
# Test dict consumption
finder = AssetFinder()
dict_to_consume = {0: {'symbol': 'PLAY'},
1: {'symbol': 'MSFT'}}
finder.consume_metadata(dict_to_consume)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
finder = AssetFinder()
# Test dataframe consumption
df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])
df['asset_name'][0] = "Dave'N'Busters"
df['exchange'][0] = "NASDAQ"
df['asset_name'][1] = "Microsoft"
df['exchange'][1] = "NYSE"
finder.consume_metadata(df)
self.assertEqual('NASDAQ', finder.metadata_cache[0]['exchange'])
self.assertEqual('Microsoft', finder.metadata_cache[1]['asset_name'])
def test_consume_asset_as_identifier(self):
# Build some end dates
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
fut_end = pd.Timestamp('2008-01-01', tz='UTC')
# Build some simple Assets
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
future_asset = Future(200, symbol="TESTFUT", end_date=fut_end)
# Consume the Assets
finder = AssetFinder()
finder.consume_identifiers([equity_asset, future_asset])
# Test equality with newly built Assets
self.assertEqual(equity_asset, finder.retrieve_asset(1))
self.assertEqual(future_asset, finder.retrieve_asset(200))
self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)
self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)
def test_sid_assignment(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))
# Build a finder that is allowed to assign sids
finder = AssetFinder(metadata=metadata,
allow_sid_assignment=True)
# Verify that Assets were built and different sids were assigned
play = finder.lookup_symbol('PLAY', today)
msft = finder.lookup_symbol('MSFT', today)
self.assertEqual('PLAY', play.symbol)
self.assertIsNotNone(play.sid)
self.assertNotEqual(play.sid, msft.sid)
def test_sid_assignment_failure(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
# Build a finder that is not allowed to assign sids, asserting failure
with self.assertRaises(SidAssignmentError):
AssetFinder(metadata=metadata, allow_sid_assignment=False)
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_lookup_future_chain(self):
metadata = {
# Notice day is today, so not valid
2: {
'symbol': 'ADN15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
1: {
'symbol': 'ADV15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-08-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
# Starts trading today, so should be valid.
0: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-05-14', tz='UTC')
},
# Copy of the above future, but starts trading in August,
# so it isn't valid.
3: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
}
finder = AssetFinder(metadata=metadata)
dt = pd.Timestamp('2015-05-14', tz='UTC')
last_year = pd.Timestamp('2014-01-01', tz='UTC')
first_day = pd.Timestamp('2015-01-01', tz='UTC')
# Check that we get the expected number of contracts, in the
# right order
ad_contracts = finder.lookup_future_chain('AD', dt, dt)
self.assertEqual(len(ad_contracts), 2)
self.assertEqual(ad_contracts[0].sid, 1)
self.assertEqual(ad_contracts[1].sid, 0)
# Check that we get nothing if our knowledge date is last year
ad_contracts = finder.lookup_future_chain('AD', dt, last_year)
self.assertEqual(len(ad_contracts), 0)
# Check that we get things that start on the knowledge date
ad_contracts = finder.lookup_future_chain('AD', dt, first_day)
self.assertEqual(len(ad_contracts), 1)
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = AssetFinder()
asset1 = Equity(1, symbol="AAPL")
asset2 = Equity(2, symbol="GOOG")
asset200 = Future(200, symbol="CLK15")
asset201 = Future(201, symbol="CLM15")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
@with_environment()
def test_compute_lifetimes(self, env=None):
num_assets = 4
trading_day = env.trading_day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_asset_info(
num_assets=num_assets,
first_start=first_start,
frequency=env.trading_day,
periods_between_starts=3,
asset_lifetime=5
)
finder = AssetFinder(frame)
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_mask = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
if start <= date <= end:
expected_mask[i, j] = True
# Filter out columns with all-empty columns.
expected_result = pd.DataFrame(
data=expected_mask,
index=dates,
columns=frame.sid.values,
)
actual_result = finder.lifetimes(dates)
assert_frame_equal(actual_result, expected_result)
class TestFutureChain(TestCase):
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
asset_finder = AssetFinder(metadata=metadata)
def test_len(self):
""" Test the __len__ method of FutureChain.
"""
# None of the contracts have started yet.
cl = FutureChain(self.asset_finder, lambda: '2005-11-30', 'CL')
self.assertEqual(len(cl), 0)
# Sids 0, 1, & 2 have started, 3 has not yet started.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is still valid the day before its notice date.
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is now invalid, leaving only Sids 1 & 2 valid.
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(len(cl), 2)
# Sid 3 has started, so 1, 2, & 3 are now valid.
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(len(cl), 3)
# All contracts are no longer valid.
cl = FutureChain(self.asset_finder, lambda: '2006-09-20', 'CL')
self.assertEqual(len(cl), 0)
def test_getitem(self):
""" Test the __getitem__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl[0], 0)
self.assertEqual(cl[1], 1)
self.assertEqual(cl[2], 2)
with self.assertRaises(IndexError):
cl[3]
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(cl[0], 0)
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(cl[0], 1)
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(cl[-1], 3)
def test_root_symbols(self):
""" Test that different variations on root symbols are handled
as expected.
"""
# Make sure this successfully gets the chain for CL.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl.root_symbol, 'CL')
# These root symbols don't exist, so RootSymbolNotFound should
# be raised immediately.
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', '')
def test_repr(self):
""" Test the __repr__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',
as_of_date='2006-02-01')
# The default chain should not include the as of date.
self.assertEqual(repr(cl), "FutureChain(root_symbol='CL')")
# An explicit as of date should show up in the repr.
self.assertEqual(
repr(cl_feb),
("FutureChain(root_symbol='CL', "
"as_of_date='2006-02-01 00:00:00+00:00')")
)
def test_as_of(self):
""" Test the as_of method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that the as_of_date is set correctly to the future
feb = '2006-02-01'
cl_feb = cl.as_of(feb)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
# Test that the as_of_date is set correctly to the past, with
# args of str, datetime.datetime, and pd.Timestamp.
feb_prev = '2005-02-01'
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = datetime(year=2005, month=2, day=1)
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp('2005-02-01')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
# The chain as of the current dt should always be the same as
# the defualt chain. Tests date as str, pd.Timestamp, and
# datetime.datetime.
self.assertEqual(cl[0], cl.as_of('2005-12-01')[0])
self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])
self.assertEqual(
cl[0],
cl.as_of(datetime(year=2005, month=12, day=1))[0]
)
def test_offset(self):
""" Test the offset method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that an offset forward sets as_of_date as expected
self.assertEqual(
cl.offset('3 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=3)
)
# Test that an offset backward sets as_of_date as expected, with
# time delta given as str, datetime.timedelta, and pd.Timedelta.
self.assertEqual(
cl.offset('-1000 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(timedelta(days=-1000)).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(pd.Timedelta('-1000 days')).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
# An offset of zero should give the original chain.
self.assertEqual(cl[0], cl.offset(0)[0])
self.assertEqual(cl[0], cl.offset("0 days")[0])
# A string that doesn't represent a time delta should raise a
# ValueError.
with self.assertRaises(ValueError):
cl.offset("blah")
| 35.933411
| 79
| 0.559934
|
import sys
from unittest import TestCase
from datetime import datetime, timedelta
import pickle
import uuid
import warnings
import pandas as pd
from pandas.tseries.tools import normalize_date
from pandas.util.testing import assert_frame_equal
from nose_parameterized import parameterized
from numpy import full
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.assets.futures import FutureChain
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
RootSymbolNotFound,
)
from zipline.finance.trading import with_environment
from zipline.utils.test_utils import (
all_subindices,
make_rotating_asset_info,
)
def build_lookup_generic_cases():
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
frame = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
finder = AssetFinder(metadata=frame)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
(finder, 'duplicated', dupe_0_start, dupe_0),
(finder, 'duplicated', dupe_1_start, dupe_1),
(finder, 'unique', unique_start, unique),
(finder, 'unique', None, unique),
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
(finder, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(finder, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
(finder,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'end_date',
'asset_name',
'start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(
2468,
symbol='OMH15',
root_symbol='OM',
notice_date=pd.Timestamp('2014-01-20', tz='UTC'),
expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),
contract_multiplier=500
)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMH15" in reprd)
self.assertTrue("root_symbol='OM'" in reprd)
self.assertTrue(("notice_date=Timestamp('2014-01-20 00:00:00+0000', "
"tz='UTC')") in reprd)
self.assertTrue("expiration_date=Timestamp('2014-02-20 00:00:00+0000'"
in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('root_symbol' in dictd)
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
class AssetFinderTestCase(TestCase):
def test_lookup_symbol_fuzzy(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST@%d' % i,
'company_name': "company%d" % i,
'start_date_nano': as_of.value,
'end_date_nano': as_of.value,
'exchange': uuid.uuid4().hex,
}
for i in range(3)
]
)
finder = AssetFinder(frame, fuzzy_char='@')
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
for i in range(2): self.assertIsNone(finder.lookup_symbol('test', as_of))
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of)
)
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of, fuzzy=True)
)
# Shouldn't find this with no fuzzy_str passed.
self.assertIsNone(finder.lookup_symbol('test1', as_of))
self.assertEqual(
asset_1,
finder.lookup_symbol('test1', as_of, fuzzy=True),
)
def test_lookup_symbol_resolve_multiple(self):
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'existing',
'company_name': 'existing',
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
finder = AssetFinder(df)
for _ in range(2): with self.assertRaises(SymbolNotFound):
finder.lookup_symbol_resolve_multiple('non_existing', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol_resolve_multiple('existing', None)
for i, date in enumerate(dates):
result = finder.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
@parameterized.expand(
build_lookup_generic_cases()
)
def test_lookup_generic(self, finder, symbols, reference_date, expected):
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'real',
'company_name': 'real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'file_name': 'also_real',
'company_name': 'also_real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 2,
'file_name': 'real_but_old',
'company_name': 'real_but_old',
'start_date_nano': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 3,
'file_name': 'real_but_in_the_future',
'company_name': 'real_but_in_the_future',
'start_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
finder = AssetFinder(data)
results, missing = finder.lookup_generic(
['real', 1, 'fake', 'real_but_old', 'real_but_in_the_future'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'real')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'also_real')
self.assertEqual(results[1].sid, 1)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'fake')
self.assertEqual(missing[1], 'real_but_in_the_future')
def test_insert_metadata(self):
finder = AssetFinder()
finder.insert_metadata(0,
asset_type='equity',
start_date='2014-01-01',
end_date='2015-01-01',
symbol="PLAY",
foo_data="FOO",)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),
equity.end_date)
self.assertFalse('foo_data' in finder.metadata_cache[0])
def test_consume_metadata(self):
finder = AssetFinder()
dict_to_consume = {0: {'symbol': 'PLAY'},
1: {'symbol': 'MSFT'}}
finder.consume_metadata(dict_to_consume)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
finder = AssetFinder()
df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])
df['asset_name'][0] = "Dave'N'Busters"
df['exchange'][0] = "NASDAQ"
df['asset_name'][1] = "Microsoft"
df['exchange'][1] = "NYSE"
finder.consume_metadata(df)
self.assertEqual('NASDAQ', finder.metadata_cache[0]['exchange'])
self.assertEqual('Microsoft', finder.metadata_cache[1]['asset_name'])
def test_consume_asset_as_identifier(self):
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
fut_end = pd.Timestamp('2008-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
future_asset = Future(200, symbol="TESTFUT", end_date=fut_end)
finder = AssetFinder()
finder.consume_identifiers([equity_asset, future_asset])
self.assertEqual(equity_asset, finder.retrieve_asset(1))
self.assertEqual(future_asset, finder.retrieve_asset(200))
self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)
self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)
def test_sid_assignment(self):
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))
finder = AssetFinder(metadata=metadata,
allow_sid_assignment=True)
play = finder.lookup_symbol('PLAY', today)
msft = finder.lookup_symbol('MSFT', today)
self.assertEqual('PLAY', play.symbol)
self.assertIsNotNone(play.sid)
self.assertNotEqual(play.sid, msft.sid)
def test_sid_assignment_failure(self):
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
with self.assertRaises(SidAssignmentError):
AssetFinder(metadata=metadata, allow_sid_assignment=False)
def test_security_dates_warning(self):
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_lookup_future_chain(self):
metadata = {
2: {
'symbol': 'ADN15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
1: {
'symbol': 'ADV15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-08-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
0: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-05-14', tz='UTC')
},
3: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
}
finder = AssetFinder(metadata=metadata)
dt = pd.Timestamp('2015-05-14', tz='UTC')
last_year = pd.Timestamp('2014-01-01', tz='UTC')
first_day = pd.Timestamp('2015-01-01', tz='UTC')
# Check that we get the expected number of contracts, in the
# right order
ad_contracts = finder.lookup_future_chain('AD', dt, dt)
self.assertEqual(len(ad_contracts), 2)
self.assertEqual(ad_contracts[0].sid, 1)
self.assertEqual(ad_contracts[1].sid, 0)
# Check that we get nothing if our knowledge date is last year
ad_contracts = finder.lookup_future_chain('AD', dt, last_year)
self.assertEqual(len(ad_contracts), 0)
# Check that we get things that start on the knowledge date
ad_contracts = finder.lookup_future_chain('AD', dt, first_day)
self.assertEqual(len(ad_contracts), 1)
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = AssetFinder()
asset1 = Equity(1, symbol="AAPL")
asset2 = Equity(2, symbol="GOOG")
asset200 = Future(200, symbol="CLK15")
asset201 = Future(201, symbol="CLM15")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
@with_environment()
def test_compute_lifetimes(self, env=None):
num_assets = 4
trading_day = env.trading_day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_asset_info(
num_assets=num_assets,
first_start=first_start,
frequency=env.trading_day,
periods_between_starts=3,
asset_lifetime=5
)
finder = AssetFinder(frame)
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_mask = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
if start <= date <= end:
expected_mask[i, j] = True
# Filter out columns with all-empty columns.
expected_result = pd.DataFrame(
data=expected_mask,
index=dates,
columns=frame.sid.values,
)
actual_result = finder.lifetimes(dates)
assert_frame_equal(actual_result, expected_result)
class TestFutureChain(TestCase):
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
asset_finder = AssetFinder(metadata=metadata)
def test_len(self):
# None of the contracts have started yet.
cl = FutureChain(self.asset_finder, lambda: '2005-11-30', 'CL')
self.assertEqual(len(cl), 0)
# Sids 0, 1, & 2 have started, 3 has not yet started.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is still valid the day before its notice date.
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is now invalid, leaving only Sids 1 & 2 valid.
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(len(cl), 2)
# Sid 3 has started, so 1, 2, & 3 are now valid.
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(len(cl), 3)
# All contracts are no longer valid.
cl = FutureChain(self.asset_finder, lambda: '2006-09-20', 'CL')
self.assertEqual(len(cl), 0)
def test_getitem(self):
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl[0], 0)
self.assertEqual(cl[1], 1)
self.assertEqual(cl[2], 2)
with self.assertRaises(IndexError):
cl[3]
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(cl[0], 0)
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(cl[0], 1)
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(cl[-1], 3)
def test_root_symbols(self):
# Make sure this successfully gets the chain for CL.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl.root_symbol, 'CL')
# These root symbols don't exist, so RootSymbolNotFound should
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', '')
def test_repr(self):
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',
as_of_date='2006-02-01')
self.assertEqual(repr(cl), "FutureChain(root_symbol='CL')")
self.assertEqual(
repr(cl_feb),
("FutureChain(root_symbol='CL', "
"as_of_date='2006-02-01 00:00:00+00:00')")
)
def test_as_of(self):
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
feb = '2006-02-01'
cl_feb = cl.as_of(feb)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
feb_prev = '2005-02-01'
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = datetime(year=2005, month=2, day=1)
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp('2005-02-01')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
self.assertEqual(cl[0], cl.as_of('2005-12-01')[0])
self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])
self.assertEqual(
cl[0],
cl.as_of(datetime(year=2005, month=12, day=1))[0]
)
def test_offset(self):
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(
cl.offset('3 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=3)
)
self.assertEqual(
cl.offset('-1000 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(timedelta(days=-1000)).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(pd.Timedelta('-1000 days')).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(cl[0], cl.offset(0)[0])
self.assertEqual(cl[0], cl.offset("0 days")[0])
# ValueError.
with self.assertRaises(ValueError):
cl.offset("blah")
| true
| true
|
f70ac6e62fb9f9e5e0c7c2c31fe3b1b5c0bbd9d6
| 539
|
py
|
Python
|
manage.py
|
Yaawei/allfeed
|
1739c975e541eead4c14b7b4fc28ccf755c356f3
|
[
"MIT"
] | null | null | null |
manage.py
|
Yaawei/allfeed
|
1739c975e541eead4c14b7b4fc28ccf755c356f3
|
[
"MIT"
] | null | null | null |
manage.py
|
Yaawei/allfeed
|
1739c975e541eead4c14b7b4fc28ccf755c356f3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "allfeed.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.6875
| 73
| 0.686456
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "allfeed.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true
| true
|
f70ac71ee9ad98039917cae3b7458fec58f7ca1d
| 760
|
py
|
Python
|
mvpsite/users/constants.py
|
mianamir/advance_django_rest_framework_project
|
3870f2dbe7b585a236928f90c1792cd337ce8911
|
[
"MIT"
] | null | null | null |
mvpsite/users/constants.py
|
mianamir/advance_django_rest_framework_project
|
3870f2dbe7b585a236928f90c1792cd337ce8911
|
[
"MIT"
] | null | null | null |
mvpsite/users/constants.py
|
mianamir/advance_django_rest_framework_project
|
3870f2dbe7b585a236928f90c1792cd337ce8911
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
__author__ = "Amir Savvy"
__copyright__ = "Copyright 2021, MVP Vending Machine Project"
__credits__ = ["amir savvy"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Amir Savvy"
__email__ = "mianamirlahore@gmail.com"
__status__ = "Production"
# User info
TEST_NORMAL_USER_EMAIL = f"normal@user.com"
TEST_SUPER_USER_EMAIL = f"super@user.com"
TEST_PASSWORD = f"@#$%123456)(*!@#$"
ADMIN = 1
SELLER = 2
BUYER = 3
AMOUNT_DATA = (5, 10, 20, 50, 100)
UNSAFE_REQUEST_METHODS = ('POST', 'PUT', 'PATCH', 'DELETE')
SAFE_REQUEST_METHODS = ('GET', 'HEAD', 'OPTIONS')
EMPTY_RESPONSE = dict()
MESSAGE_KEY = f'message'
ERROR_MESSAGE_KEY = f'error_message'
DATA_KEY = f'data'
IS_SUCCESSFULL = "is_successfull"
IS_FAILED = "is_failed"
| 21.111111
| 61
| 0.717105
|
__author__ = "Amir Savvy"
__copyright__ = "Copyright 2021, MVP Vending Machine Project"
__credits__ = ["amir savvy"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Amir Savvy"
__email__ = "mianamirlahore@gmail.com"
__status__ = "Production"
TEST_NORMAL_USER_EMAIL = f"normal@user.com"
TEST_SUPER_USER_EMAIL = f"super@user.com"
TEST_PASSWORD = f"@#$%123456)(*!@#$"
ADMIN = 1
SELLER = 2
BUYER = 3
AMOUNT_DATA = (5, 10, 20, 50, 100)
UNSAFE_REQUEST_METHODS = ('POST', 'PUT', 'PATCH', 'DELETE')
SAFE_REQUEST_METHODS = ('GET', 'HEAD', 'OPTIONS')
EMPTY_RESPONSE = dict()
MESSAGE_KEY = f'message'
ERROR_MESSAGE_KEY = f'error_message'
DATA_KEY = f'data'
IS_SUCCESSFULL = "is_successfull"
IS_FAILED = "is_failed"
| true
| true
|
f70ac738976aa113d492dc6d741bdbfabbc75b3f
| 852
|
py
|
Python
|
air_pollution_death_rate_related/scripts/air_pollution/feature_generating.py
|
nghitrampham/air_pollution_death_rate_related
|
3fd72b9684e8362de5706ba37c1d90b844d4afe0
|
[
"MIT"
] | null | null | null |
air_pollution_death_rate_related/scripts/air_pollution/feature_generating.py
|
nghitrampham/air_pollution_death_rate_related
|
3fd72b9684e8362de5706ba37c1d90b844d4afe0
|
[
"MIT"
] | 15
|
2019-12-10T02:05:58.000Z
|
2022-03-12T00:06:38.000Z
|
air_pollution_death_rate_related/scripts/air_pollution/feature_generating.py
|
nghitrampham/CSE583_FinalProject
|
3fd72b9684e8362de5706ba37c1d90b844d4afe0
|
[
"MIT"
] | 1
|
2020-06-04T17:48:21.000Z
|
2020-06-04T17:48:21.000Z
|
"""
This module is mainly used to conduct feature engineering for predicting air quality index model
"""
import warnings
import helpers
warnings.filterwarnings('ignore')
if __name__ == '__main__':
PATH = r'air_pollution_death_rate_related/data/data_air_raw/daily_aqi_by_county_'
### use most recent 3 years to train model
RAW_DATA = helpers.read_raw_data(PATH, [2016, 2017, 2018])
DATA = helpers.data_cleaning(RAW_DATA) ### clean data before doing feature engineering
for county_name in list(DATA["state_county"].unique()): #### we do feature engineering
#### on each county independently
#### feature engineering for model
df = (helpers.feature_engineering_for_aqi(DATA, 30, county_name,\
"air_pollution_death_rate_related/data/county_features_data/county_features_train/"))
| 42.6
| 96
| 0.725352
|
import warnings
import helpers
warnings.filterwarnings('ignore')
if __name__ == '__main__':
PATH = r'air_pollution_death_rate_related/data/data_air_raw/daily_aqi_by_county_'
RAW_DATA = helpers.read_raw_data(PATH, [2016, 2017, 2018])
DATA = helpers.data_cleaning(RAW_DATA)
for county_name in list(DATA["state_county"].unique()): df = (helpers.feature_engineering_for_aqi(DATA, 30, county_name,\
"air_pollution_death_rate_related/data/county_features_data/county_features_train/"))
| true
| true
|
f70ac98cc6fb632c2f2d00ce1693c59f019da7a1
| 2,537
|
py
|
Python
|
xlsxwriter/test/comparison/test_autofilter09.py
|
Aeon1/XlsxWriter
|
6871b6c3fe6c294632054ea91f23d9e27068bcc1
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2019-07-25T06:08:09.000Z
|
2019-11-01T02:33:56.000Z
|
xlsxwriter/test/comparison/test_autofilter09.py
|
Aeon1/XlsxWriter
|
6871b6c3fe6c294632054ea91f23d9e27068bcc1
|
[
"BSD-2-Clause-FreeBSD"
] | 13
|
2019-07-14T00:29:05.000Z
|
2019-11-26T06:16:46.000Z
|
xlsxwriter/test/comparison/test_autofilter09.py
|
Aeon1/XlsxWriter
|
6871b6c3fe6c294632054ea91f23d9e27068bcc1
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('autofilter09.xlsx')
self.set_text_file('autofilter_data.txt')
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
This test checks a filter list.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter('A1:D51')
# Add filter criteria.
worksheet.filter_column_list(0, ['East', 'South', 'North'])
# Open a text file with autofilter example data.
textfile = open(self.txt_filename)
# Read the headers from the first line of the input file.
headers = textfile.readline().strip("\n").split()
# Write out the headers.
worksheet.write_row('A1', headers)
# Start writing data after the headers.
row = 1
# Read the rest of the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Simulate a blank cell in the data.
if row == 6:
data[0] = ''
# Get some of the field data.
region = data[0]
# Check for rows that match the filter.
if region == 'North' or region == 'South' or region == 'East':
# Row matches the filter, no further action required.
pass
else:
# We need to hide rows that don't match the filter.
worksheet.set_row(row, options={'hidden': True})
# Write out the row data.
worksheet.write_row(row, 0, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
| 28.829545
| 79
| 0.551439
|
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
def setUp(self):
self.set_filename('autofilter09.xlsx')
self.set_text_file('autofilter_data.txt')
def test_create_file(self):
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.autofilter('A1:D51')
worksheet.filter_column_list(0, ['East', 'South', 'North'])
textfile = open(self.txt_filename)
headers = textfile.readline().strip("\n").split()
worksheet.write_row('A1', headers)
row = 1
for line in textfile:
data = line.strip("\n").split()
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
if row == 6:
data[0] = ''
region = data[0]
if region == 'North' or region == 'South' or region == 'East':
pass
else:
worksheet.set_row(row, options={'hidden': True})
# Write out the row data.
worksheet.write_row(row, 0, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
| true
| true
|
f70ac9c888e434188cbef3a72ff9b7b53e5fafd7
| 6,079
|
py
|
Python
|
moai/metadata/didl.py
|
TPY17/moai
|
6a57069489bcbb0d084f3220bfae5b5d7aac945d
|
[
"BSD-3-Clause"
] | 10
|
2015-05-10T21:23:04.000Z
|
2020-07-01T05:49:15.000Z
|
moai/metadata/didl.py
|
TPY17/moai
|
6a57069489bcbb0d084f3220bfae5b5d7aac945d
|
[
"BSD-3-Clause"
] | 4
|
2015-01-13T20:53:51.000Z
|
2022-03-15T10:28:51.000Z
|
moai/metadata/didl.py
|
TPY17/moai
|
6a57069489bcbb0d084f3220bfae5b5d7aac945d
|
[
"BSD-3-Clause"
] | 11
|
2015-04-08T13:29:28.000Z
|
2021-06-25T10:31:27.000Z
|
from lxml.builder import ElementMaker
from moai.metadata.mods import NL_MODS, XSI_NS
class DIDL(object):
"""A metadata prefix implementing the DARE DIDL metadata format
this format is registered under the name "didl"
Note that this format re-uses oai_dc and mods formats that come with
MOAI by default
"""
def __init__(self, prefix, config, db):
self.prefix = prefix
self.config = config
self.db = db
self.ns = {'didl': "urn:mpeg:mpeg21:2002:02-DIDL-NS",
'dii': "urn:mpeg:mpeg21:2002:01-DII-NS",
'dip': "urn:mpeg:mpeg21:2005:01-DIP-NS",
'dcterms': "http://purl.org/dc/terms/",
'xsi': "http://www.w3.org/2001/XMLSchema-instance",
'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
'dc': 'http://purl.org/dc/elements/1.1/',
}
self.schemas = {'didl':'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/did/didl.xsd',
'dii': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dii/dii.xsd',
'dip': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dip/dip.xsd'}
def get_namespace(self):
return self.ns[self.prefix]
def get_schema_location(self):
return self.schemas[self.prefix]
def __call__(self, element, metadata):
data = metadata.record
DIDL = ElementMaker(namespace=self.ns['didl'], nsmap=self.ns)
DII = ElementMaker(namespace=self.ns['dii'])
DIP = ElementMaker(namespace=self.ns['dip'])
RDF = ElementMaker(namespace=self.ns['rdf'])
DCTERMS = ElementMaker(namespace=self.ns['dcterms'])
oai_url = (self.config.url+'?verb=GetRecord&'
'metadataPrefix=%s&identifier=%s' % (
self.prefix,
data['id']))
id_url = data['metadata'].get('url', [None])[0]
# generate mods for this feed
mods_data = DIDL.Resource(mimeType="application/xml")
NL_MODS('mods', self.config, self.db)(mods_data, metadata)
asset_data = []
descriptive_metadata = RDF.type()
descriptive_metadata.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/descriptiveMetadata')
didl = DIDL.DIDL(
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(
DCTERMS.modified(data['modified'].isoformat().split('.')[0]),
mimeType="application/xml"
)
),
DIDL.Component(
DIDL.Resource(ref=id_url or oai_url,mimeType="application/xml")
),
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(descriptive_metadata, mimeType="application/xml")
),
DIDL.Component(
DIDL.Descriptor(
DIDL.Statement("mods", mimeType="text/plain")),
mods_data)
),
)
)
object_file = RDF.type()
object_file.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/objectFile')
for root_item in didl:
for asset in data['metadata'].get('asset', []):
url = asset['url']
if not url.startswith('http://'):
url = self.config.url.rstrip('/') + '/' + url.lstrip('/')
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(object_file, mimeType="application/xml")
)
)
access = asset.get('access')
if access == 'open':
access = (
'http://purl.org/eprint/accessRights/OpenAccess')
elif access == 'restricted':
access = (
'http://purl.org/eprint/accessRights/RestrictedAccess')
elif access == 'closed':
access = (
'http://purl.org/eprint/accessRights/ClosedAccess')
if access:
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.accessRights(access),
mimeType="application/xml")))
for modified in asset.get('modified', []):
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.modified(modified),
mimeType="application/xml")))
item.append(
DIDL.Component(
DIDL.Resource(mimeType=asset['mimetype'],
ref=url)
)
)
root_item.append(item)
break
human_start_page = RDF.type()
human_start_page.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/humanStartPage')
if data['metadata'].get('url'):
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(human_start_page, mimeType="application/xml")
),
DIDL.Component(
DIDL.Resource(mimeType="text/html", ref=data['metadata']['url'][0])
)
)
root_item.append(item)
didl.attrib['{%s}schemaLocation' % XSI_NS] = (
'%s %s %s %s %s %s' % (self.ns['didl'],
self.schemas['didl'],
self.ns['dii'],
self.schemas['dii'],
self.ns['dip'],
self.schemas['dip']))
element.append(didl)
| 39.474026
| 124
| 0.481165
|
from lxml.builder import ElementMaker
from moai.metadata.mods import NL_MODS, XSI_NS
class DIDL(object):
def __init__(self, prefix, config, db):
self.prefix = prefix
self.config = config
self.db = db
self.ns = {'didl': "urn:mpeg:mpeg21:2002:02-DIDL-NS",
'dii': "urn:mpeg:mpeg21:2002:01-DII-NS",
'dip': "urn:mpeg:mpeg21:2005:01-DIP-NS",
'dcterms': "http://purl.org/dc/terms/",
'xsi': "http://www.w3.org/2001/XMLSchema-instance",
'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
'dc': 'http://purl.org/dc/elements/1.1/',
}
self.schemas = {'didl':'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/did/didl.xsd',
'dii': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dii/dii.xsd',
'dip': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dip/dip.xsd'}
def get_namespace(self):
return self.ns[self.prefix]
def get_schema_location(self):
return self.schemas[self.prefix]
def __call__(self, element, metadata):
data = metadata.record
DIDL = ElementMaker(namespace=self.ns['didl'], nsmap=self.ns)
DII = ElementMaker(namespace=self.ns['dii'])
DIP = ElementMaker(namespace=self.ns['dip'])
RDF = ElementMaker(namespace=self.ns['rdf'])
DCTERMS = ElementMaker(namespace=self.ns['dcterms'])
oai_url = (self.config.url+'?verb=GetRecord&'
'metadataPrefix=%s&identifier=%s' % (
self.prefix,
data['id']))
id_url = data['metadata'].get('url', [None])[0]
mods_data = DIDL.Resource(mimeType="application/xml")
NL_MODS('mods', self.config, self.db)(mods_data, metadata)
asset_data = []
descriptive_metadata = RDF.type()
descriptive_metadata.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/descriptiveMetadata')
didl = DIDL.DIDL(
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(
DCTERMS.modified(data['modified'].isoformat().split('.')[0]),
mimeType="application/xml"
)
),
DIDL.Component(
DIDL.Resource(ref=id_url or oai_url,mimeType="application/xml")
),
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(descriptive_metadata, mimeType="application/xml")
),
DIDL.Component(
DIDL.Descriptor(
DIDL.Statement("mods", mimeType="text/plain")),
mods_data)
),
)
)
object_file = RDF.type()
object_file.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/objectFile')
for root_item in didl:
for asset in data['metadata'].get('asset', []):
url = asset['url']
if not url.startswith('http://'):
url = self.config.url.rstrip('/') + '/' + url.lstrip('/')
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(object_file, mimeType="application/xml")
)
)
access = asset.get('access')
if access == 'open':
access = (
'http://purl.org/eprint/accessRights/OpenAccess')
elif access == 'restricted':
access = (
'http://purl.org/eprint/accessRights/RestrictedAccess')
elif access == 'closed':
access = (
'http://purl.org/eprint/accessRights/ClosedAccess')
if access:
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.accessRights(access),
mimeType="application/xml")))
for modified in asset.get('modified', []):
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.modified(modified),
mimeType="application/xml")))
item.append(
DIDL.Component(
DIDL.Resource(mimeType=asset['mimetype'],
ref=url)
)
)
root_item.append(item)
break
human_start_page = RDF.type()
human_start_page.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/humanStartPage')
if data['metadata'].get('url'):
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(human_start_page, mimeType="application/xml")
),
DIDL.Component(
DIDL.Resource(mimeType="text/html", ref=data['metadata']['url'][0])
)
)
root_item.append(item)
didl.attrib['{%s}schemaLocation' % XSI_NS] = (
'%s %s %s %s %s %s' % (self.ns['didl'],
self.schemas['didl'],
self.ns['dii'],
self.schemas['dii'],
self.ns['dip'],
self.schemas['dip']))
element.append(didl)
| true
| true
|
f70acb1d9d6ab4d5e2a9f26d917e68d79dd1a235
| 4,081
|
py
|
Python
|
pyleecan/Generator/run_generate_classes.py
|
stephane-eisen/pyleecan
|
8444b8131c9eff11a616da8277fb1f280c8f70e5
|
[
"Apache-2.0"
] | 1
|
2021-07-08T01:27:24.000Z
|
2021-07-08T01:27:24.000Z
|
pyleecan/Generator/run_generate_classes.py
|
ecs-kev/pyleecan
|
1faedde4b24acc6361fa1fdd4e980eaec4ca3a62
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Generator/run_generate_classes.py
|
ecs-kev/pyleecan
|
1faedde4b24acc6361fa1fdd4e980eaec4ca3a62
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
from os.path import dirname, abspath, normpath, join, realpath
from os import listdir, remove, system
import json
from datetime import datetime
begin = len(normpath(abspath(join(dirname(__file__), "../.."))))
end = len(normpath(abspath(join(dirname(__file__), ".."))))
MAIN_DIR = dirname(realpath(__file__))
package_name = MAIN_DIR[begin + 1 : end]
# Add the directory to the python path
sys.path.append(MAIN_DIR[:begin])
exec(
"from "
+ package_name
+ ".Generator.ClassGenerator.class_generator import generate_class"
)
exec("from " + package_name + ".Generator.read_fct import read_all")
exec("from " + package_name + ".definitions import MAIN_DIR, DOC_DIR, INT_DIR")
# List of the main packages (to sort the classes)
PACKAGE_LIST = ["Geometry", "Machine", "Material", "Slot", "Import"]
def generate_code(root_path, gen_dict=None):
"""Generate pyleecan Classes code according to doc in root_path
Parameters
----------
root_path : str
Path to the main folder of Pyleecan
gen_dict : dict
Generation dictionary (contains all the csv data)
Returns
-------
None
"""
CLASS_DIR = join(root_path, "Classes")
FUNC_DIR = join(root_path, "Functions")
DOC_DIR = join(root_path, "Generator", "ClassesRef")
print("Reading classes csv in: " + DOC_DIR)
print("Saving generated files in: " + CLASS_DIR)
path = __file__[__file__.index(package_name) :]
path = path.replace("\\", "/")
# Deleting all the previous class
print("Deleting old class files...")
for file_name in listdir(CLASS_DIR):
if file_name[0] != "_":
remove(join(CLASS_DIR, file_name))
# A file to import every classes quickly
import_file = open(join(CLASS_DIR, "import_all.py"), "w")
import_file.write("# -*- coding: utf-8 -*-\n\n")
import_file.write('"""File generated by generate_code() - \n')
import_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
# A file to select the constructor according to a string
load_file = open(join(FUNC_DIR, "load_switch.py"), "w")
load_file.write("# -*- coding: utf-8 -*-\n")
load_file.write('"""File generated by generate_code() - \n')
load_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
load_file.write("from ..Classes.import_all import *\n\n")
load_file.write("load_switch = {\n")
# Read all the csv files
if gen_dict is None:
gen_dict = read_all(DOC_DIR)
# Generate all the class files (sorted to remove "commit noise")
for class_name, _ in iter(sorted(list(gen_dict.items()))):
import_file.write(
"from ..Classes." + class_name + " import " + class_name + "\n"
)
load_file.write(' "' + class_name + '": ' + class_name + ",\n")
print("Generation of " + class_name + " class")
generate_class(gen_dict, class_name, CLASS_DIR)
import_file.close()
load_file.write("}\n")
load_file.close()
print("Generation of load_switch.py")
print("Generation of import_all.py")
# Save gen_dict
class_dict_file = join(CLASS_DIR, "Class_Dict.json")
with open(class_dict_file, "w") as json_file:
json.dump(gen_dict, json_file, sort_keys=True, indent=4, separators=(",", ": "))
if __name__ == "__main__":
gen_dict = read_all(DOC_DIR, is_internal=False, in_path=INT_DIR)
generate_code(MAIN_DIR, gen_dict)
# Run black
try:
import black
system('"{}" -m black .'.format(sys.executable))
if black.__version__.split(".")[0] != "20":
print("\n############################################")
print(
"WARNING: The official version of black for pyleecan is 20, please update your black version"
)
print("############################################\n")
except ImportError:
print("/!\\ Please install and run black (version 20) /!\\")
now = datetime.now()
print("End at: ", now.strftime("%H:%M:%S"))
| 34.880342
| 109
| 0.626562
|
import sys
from os.path import dirname, abspath, normpath, join, realpath
from os import listdir, remove, system
import json
from datetime import datetime
begin = len(normpath(abspath(join(dirname(__file__), "../.."))))
end = len(normpath(abspath(join(dirname(__file__), ".."))))
MAIN_DIR = dirname(realpath(__file__))
package_name = MAIN_DIR[begin + 1 : end]
sys.path.append(MAIN_DIR[:begin])
exec(
"from "
+ package_name
+ ".Generator.ClassGenerator.class_generator import generate_class"
)
exec("from " + package_name + ".Generator.read_fct import read_all")
exec("from " + package_name + ".definitions import MAIN_DIR, DOC_DIR, INT_DIR")
PACKAGE_LIST = ["Geometry", "Machine", "Material", "Slot", "Import"]
def generate_code(root_path, gen_dict=None):
CLASS_DIR = join(root_path, "Classes")
FUNC_DIR = join(root_path, "Functions")
DOC_DIR = join(root_path, "Generator", "ClassesRef")
print("Reading classes csv in: " + DOC_DIR)
print("Saving generated files in: " + CLASS_DIR)
path = __file__[__file__.index(package_name) :]
path = path.replace("\\", "/")
print("Deleting old class files...")
for file_name in listdir(CLASS_DIR):
if file_name[0] != "_":
remove(join(CLASS_DIR, file_name))
import_file = open(join(CLASS_DIR, "import_all.py"), "w")
import_file.write("# -*- coding: utf-8 -*-\n\n")
import_file.write('"""File generated by generate_code() - \n')
import_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
load_file = open(join(FUNC_DIR, "load_switch.py"), "w")
load_file.write("# -*- coding: utf-8 -*-\n")
load_file.write('"""File generated by generate_code() - \n')
load_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
load_file.write("from ..Classes.import_all import *\n\n")
load_file.write("load_switch = {\n")
if gen_dict is None:
gen_dict = read_all(DOC_DIR)
for class_name, _ in iter(sorted(list(gen_dict.items()))):
import_file.write(
"from ..Classes." + class_name + " import " + class_name + "\n"
)
load_file.write(' "' + class_name + '": ' + class_name + ",\n")
print("Generation of " + class_name + " class")
generate_class(gen_dict, class_name, CLASS_DIR)
import_file.close()
load_file.write("}\n")
load_file.close()
print("Generation of load_switch.py")
print("Generation of import_all.py")
class_dict_file = join(CLASS_DIR, "Class_Dict.json")
with open(class_dict_file, "w") as json_file:
json.dump(gen_dict, json_file, sort_keys=True, indent=4, separators=(",", ": "))
if __name__ == "__main__":
gen_dict = read_all(DOC_DIR, is_internal=False, in_path=INT_DIR)
generate_code(MAIN_DIR, gen_dict)
try:
import black
system('"{}" -m black .'.format(sys.executable))
if black.__version__.split(".")[0] != "20":
print("\n############################################")
print(
"WARNING: The official version of black for pyleecan is 20, please update your black version"
)
print("############################################\n")
except ImportError:
print("/!\\ Please install and run black (version 20) /!\\")
now = datetime.now()
print("End at: ", now.strftime("%H:%M:%S"))
| true
| true
|
f70accc5c7667aed9b51008137aea4509675a89c
| 432
|
py
|
Python
|
CONTENT/DS-n-Algos/ALGO/__PYTHON/word_count.py
|
Bryan-Guner-Backup/DS-ALGO-OFFICIAL
|
2ef3b4518389274da25f526c928d880b4e4ec26f
|
[
"Apache-2.0"
] | null | null | null |
CONTENT/DS-n-Algos/ALGO/__PYTHON/word_count.py
|
Bryan-Guner-Backup/DS-ALGO-OFFICIAL
|
2ef3b4518389274da25f526c928d880b4e4ec26f
|
[
"Apache-2.0"
] | null | null | null |
CONTENT/DS-n-Algos/ALGO/__PYTHON/word_count.py
|
Bryan-Guner-Backup/DS-ALGO-OFFICIAL
|
2ef3b4518389274da25f526c928d880b4e4ec26f
|
[
"Apache-2.0"
] | null | null | null |
file=open("sample.txt","r")
d=dict()
for lines in file:
lines=lines.strip()
lines=lines.lower()
words=lines.split(" ")
for word in words:
if word in d:
d[word]=d[word]+1
else:
d[word]=1
find=str(input("enter the word to count: "))
find=find.lower()
if find in list(d.keys()):
print(f"{find} : "+ str(d.get(find)))
else:
print("word not present!! ")
| 22.736842
| 45
| 0.534722
|
file=open("sample.txt","r")
d=dict()
for lines in file:
lines=lines.strip()
lines=lines.lower()
words=lines.split(" ")
for word in words:
if word in d:
d[word]=d[word]+1
else:
d[word]=1
find=str(input("enter the word to count: "))
find=find.lower()
if find in list(d.keys()):
print(f"{find} : "+ str(d.get(find)))
else:
print("word not present!! ")
| true
| true
|
f70acd48249c6db38e2012a0abcc3a8d09d9c8f7
| 2,069
|
py
|
Python
|
pybamm/models/submodels/thermal/x_full/x_full_no_current_collector.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/submodels/thermal/x_full/x_full_no_current_collector.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/submodels/thermal/x_full/x_full_no_current_collector.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Class for full thermal submodel
#
import pybamm
from .base_x_full import BaseModel
class NoCurrentCollector(BaseModel):
"""Class for full x-direction thermal submodel without current collectors
Parameters
----------
param : parameter class
The parameters to use for this submodel
**Extends:** :class:`pybamm.thermal.x_full.BaseModel`
"""
def __init__(self, param):
super().__init__(param)
def set_rhs(self, variables):
T = variables["Cell temperature"]
q = variables["Heat flux"]
Q = variables["Total heating"]
self.rhs = {
T: (-pybamm.div(q) / self.param.delta ** 2 + self.param.B * Q)
/ (self.param.C_th * self.param.rho_k)
}
def set_boundary_conditions(self, variables):
T = variables["Cell temperature"]
T_n_left = pybamm.boundary_value(T, "left")
T_p_right = pybamm.boundary_value(T, "right")
T_amb = variables["Ambient temperature"]
self.boundary_conditions = {
T: {
"left": (
self.param.h * (T_n_left - T_amb) / self.param.lambda_n,
"Neumann",
),
"right": (
-self.param.h * (T_p_right - T_amb) / self.param.lambda_p,
"Neumann",
),
}
}
def _current_collector_heating(self, variables):
"""Returns zeros for current collector heat source terms"""
Q_s_cn = pybamm.Scalar(0)
Q_s_cp = pybamm.Scalar(0)
return Q_s_cn, Q_s_cp
def _yz_average(self, var):
"""
Computes the y-z average by integration over y and z
In this case this is just equal to the input variable
"""
return var
def _x_average(self, var, var_cn, var_cp):
"""
Computes the X-average over the whole cell *not* including current
collectors. This overwrites the default behaviour of 'base_thermal'.
"""
return pybamm.x_average(var)
| 28.736111
| 78
| 0.569841
|
import pybamm
from .base_x_full import BaseModel
class NoCurrentCollector(BaseModel):
def __init__(self, param):
super().__init__(param)
def set_rhs(self, variables):
T = variables["Cell temperature"]
q = variables["Heat flux"]
Q = variables["Total heating"]
self.rhs = {
T: (-pybamm.div(q) / self.param.delta ** 2 + self.param.B * Q)
/ (self.param.C_th * self.param.rho_k)
}
def set_boundary_conditions(self, variables):
T = variables["Cell temperature"]
T_n_left = pybamm.boundary_value(T, "left")
T_p_right = pybamm.boundary_value(T, "right")
T_amb = variables["Ambient temperature"]
self.boundary_conditions = {
T: {
"left": (
self.param.h * (T_n_left - T_amb) / self.param.lambda_n,
"Neumann",
),
"right": (
-self.param.h * (T_p_right - T_amb) / self.param.lambda_p,
"Neumann",
),
}
}
def _current_collector_heating(self, variables):
Q_s_cn = pybamm.Scalar(0)
Q_s_cp = pybamm.Scalar(0)
return Q_s_cn, Q_s_cp
def _yz_average(self, var):
return var
def _x_average(self, var, var_cn, var_cp):
return pybamm.x_average(var)
| true
| true
|
f70acd8bcb229f5b9f12c1e63b3ec56b34848bb9
| 1,023
|
py
|
Python
|
leetcode/array/medium/combinationSum.py
|
joway/PyAlgorithm
|
0420fbcbebad3b746db63b9e9a5878b4af8ad6ac
|
[
"MIT"
] | 1
|
2016-08-23T14:24:44.000Z
|
2016-08-23T14:24:44.000Z
|
leetcode/array/medium/combinationSum.py
|
joway/PyAlgorithm
|
0420fbcbebad3b746db63b9e9a5878b4af8ad6ac
|
[
"MIT"
] | null | null | null |
leetcode/array/medium/combinationSum.py
|
joway/PyAlgorithm
|
0420fbcbebad3b746db63b9e9a5878b4af8ad6ac
|
[
"MIT"
] | null | null | null |
""" Summary
"""
class Solution(object):
"""
Problem:
https://leetcode.com/problems/combination-sum/
Example:
given candidate set [2, 3, 6, 7] and target 7,
A solution set is:
[
[7],
[2, 2, 3]
]
"""
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
candidates.sort()
rets = []
for i in candidates:
if i > target:
break
elif i == target:
rets.append([i])
else:
rets += ([sorted([i] + x) for x in self.combinationSum(candidates, target - i)])
result = []
for r in rets:
if r not in result:
result.append(r)
return result
if __name__ == '__main__':
candidates = [2, 3, 6, 7]
target = 7
result = Solution().combinationSum(candidates, 7)
print(result)
| 21.765957
| 96
| 0.474096
|
class Solution(object):
def combinationSum(self, candidates, target):
candidates.sort()
rets = []
for i in candidates:
if i > target:
break
elif i == target:
rets.append([i])
else:
rets += ([sorted([i] + x) for x in self.combinationSum(candidates, target - i)])
result = []
for r in rets:
if r not in result:
result.append(r)
return result
if __name__ == '__main__':
candidates = [2, 3, 6, 7]
target = 7
result = Solution().combinationSum(candidates, 7)
print(result)
| true
| true
|
f70ace3bc9ff4b2e0804d716b66fba3887fa8cf9
| 582
|
py
|
Python
|
kpop_project/data_preprocessing_execution.py
|
chunjuihsu/chunjuihsu.github.io
|
2256b7d340393351a484215f3d23841944d4b3ea
|
[
"CC-BY-3.0"
] | null | null | null |
kpop_project/data_preprocessing_execution.py
|
chunjuihsu/chunjuihsu.github.io
|
2256b7d340393351a484215f3d23841944d4b3ea
|
[
"CC-BY-3.0"
] | null | null | null |
kpop_project/data_preprocessing_execution.py
|
chunjuihsu/chunjuihsu.github.io
|
2256b7d340393351a484215f3d23841944d4b3ea
|
[
"CC-BY-3.0"
] | null | null | null |
import pandas as pd
import youtube_api_comments_to_mongodb as ym
import text_classification_and_sentiment_analysis as ta
dbpw = 'kpop'
collection_name = 'comments'
data = ym.mongo_to_dataframe(dbpw, collection_name)
allcomments, englishcomments = ta.dataframe_preparation(data)
tt_set, englishcomments = ta.classify_facilitator(englishcomments, 300,
['quality', 'nationalist_ethnicist', 'kpop'])
allcomments.to_pickle('allcomments.pickle')
englishcomments.to_pickle('englishcomments.pickle')
tt_set.to_pickle('tt_set.pickle')
| 30.631579
| 78
| 0.752577
|
import pandas as pd
import youtube_api_comments_to_mongodb as ym
import text_classification_and_sentiment_analysis as ta
dbpw = 'kpop'
collection_name = 'comments'
data = ym.mongo_to_dataframe(dbpw, collection_name)
allcomments, englishcomments = ta.dataframe_preparation(data)
tt_set, englishcomments = ta.classify_facilitator(englishcomments, 300,
['quality', 'nationalist_ethnicist', 'kpop'])
allcomments.to_pickle('allcomments.pickle')
englishcomments.to_pickle('englishcomments.pickle')
tt_set.to_pickle('tt_set.pickle')
| true
| true
|
f70acec2724ccc45198a62b6a42519afcda8cad5
| 205
|
py
|
Python
|
basic-lang-fun/oop.py
|
diegopacheco/python-playground
|
8e6ba427df6922fb578c2328babbf3466687ccbf
|
[
"Unlicense"
] | null | null | null |
basic-lang-fun/oop.py
|
diegopacheco/python-playground
|
8e6ba427df6922fb578c2328babbf3466687ccbf
|
[
"Unlicense"
] | null | null | null |
basic-lang-fun/oop.py
|
diegopacheco/python-playground
|
8e6ba427df6922fb578c2328babbf3466687ccbf
|
[
"Unlicense"
] | null | null | null |
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return "{" + self.name + " " + str(self.age) + "}"
p1 = Person("John", 36)
print(p1)
| 18.636364
| 58
| 0.556098
|
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return "{" + self.name + " " + str(self.age) + "}"
p1 = Person("John", 36)
print(p1)
| true
| true
|
f70acf373ecdb330c0ce11c3f2115bd7f4f066b1
| 6,494
|
py
|
Python
|
toolbox/sampling/__init__.py
|
keunhong/toolbox
|
e8d1dadab4d9ccf8d78fe86ea933819ac6a07fca
|
[
"MIT"
] | null | null | null |
toolbox/sampling/__init__.py
|
keunhong/toolbox
|
e8d1dadab4d9ccf8d78fe86ea933819ac6a07fca
|
[
"MIT"
] | null | null | null |
toolbox/sampling/__init__.py
|
keunhong/toolbox
|
e8d1dadab4d9ccf8d78fe86ea933819ac6a07fca
|
[
"MIT"
] | null | null | null |
import logging
import random
from typing import List, Tuple
import numpy as np
from skimage.transform import resize
from scipy.ndimage import zoom
from toolbox import images
from toolbox.images import crop, mask_bbox
from .poisson_disk import sample_poisson_uniform
logger = logging.getLogger(__name__)
class PatchType:
S2F_MASKED_BLACK = 'cropped_scaled_to_fit'
S2F_MASKED_WHITE = 'cropped_scaled_to_fit_white'
S2F = 'scaled_to_fit'
RANDOM = 'random2'
def sample_poisson_mask(mask, r, k):
ymin, ymax, xmin, xmax = mask_bbox(mask)
height = ymax - ymin
width = xmax - xmin
points = np.array(sample_poisson_uniform(height, width, r, k,
mask[ymin:ymax, xmin:xmax]))
points[:, 0] += ymin
points[:, 1] += xmin
points = np.floor(points).astype(int)
return points
def generate_dense_bboxes(
mask: np.ndarray,
scale=0.23,
min_dist=0.091):
mask_height, mask_width = mask.shape
min_length = min(mask_height, mask_width)
patch_sample_size = scale * min_length
centers = sample_poisson_mask(mask, min_length * min_dist, 1000)
half = int(patch_sample_size / 2)
bboxes = []
for center in centers:
ycent, xcent = center
bbox = (ycent - half,
ycent + half + 1,
xcent - half,
xcent + half + 1)
if (bbox[0] >= 0 and bbox[1] < mask_height
and bbox[2] >= 0 and bbox[3] < mask_width):
bboxes.append(bbox)
print('bboxes={} centers={}, mask_size={}, min_dist={}'.format(
len(bboxes), len(centers), mask.shape, min_length * min_dist))
return bboxes
def random_crops(image, patch_size, num_crops):
border_mask = np.ones(image.shape[:2], dtype=bool)
left = patch_size/2
right = image.shape[1] - patch_size/2
top = patch_size/2
bottom = image.shape[0] - patch_size/2
border_mask[:, :left] = False
border_mask[:, right:] = False
border_mask[:top, :] = False
border_mask[bottom:, :] = False
yinds, xinds = np.where(border_mask)
bboxes = []
for i in range(num_crops):
point_idx = np.random.randint(0, len(yinds))
ycent, xcent = yinds[point_idx], xinds[point_idx]
half = int(patch_size / 2)
# Just squash the patch if it's out of bounds.
bbox = (ycent - half,
ycent + half + 1,
xcent - half,
xcent + half + 1)
bboxes.append(bbox)
return bboxes_to_patches(image, bboxes, patch_size)
def generate_random_bboxes(mask: np.ndarray, scale_range=(1.0, 1.0),
num_patches=5, fixed_size=None):
"""
Generates random bounding boxes at random scales with centroid within the
mask.
:param mask: The contrained area for the centroid of the patch.
:param min_scale: The min scale (multiple of the minimum length of the
input mask) of the sampling.
:param max_scale: The max scale (multiple of the minimum length of the
input mask) of the sampling.
:param num_patches: Number of patches to generate.
:return: Bounding boxes.
"""
mask_height, mask_width = mask.shape[:2]
min_length = min(mask_height, mask_width)
yinds, xinds = np.where(mask)
patch_bboxes = []
patch_scales = []
tries = 0
while len(patch_bboxes) < num_patches:
scale = random.uniform(*scale_range)
patch_scales.append(scale)
patch_size = scale * fixed_size if fixed_size else int(scale * min_length)
point_idx = np.random.randint(0, len(yinds))
ycent, xcent = yinds[point_idx], xinds[point_idx]
half = int(patch_size / 2)
# Just squash the patch if it's out of bounds.
if (ycent - half < 0 or ycent + half > mask.shape[0] or
xcent - half < 0 or xcent + half > mask.shape[1]):
if tries < 100:
tries += 1
continue
bbox = (max(ycent - half, 0),
min(ycent + half + 1, mask.shape[0]),
max(xcent - half, 0),
min(xcent + half + 1, mask.shape[1]))
patch_bboxes.append(bbox)
return patch_bboxes, patch_scales
def bboxes_to_patches(im: np.ndarray,
bboxes: List[Tuple[int, int, int, int]],
patch_size: int, use_pil=False):
"""
Converts bounding boxes to actual patches. Patches are all resized to the
patch size regardless of the original bounding box size.
:param im: To crop patch from.
:param bboxes: Boxes defining the patch.
:param patch_size: Patch size to return.
:return: Image patches.
"""
patches = []
for bbox in bboxes:
cropped = crop(im, bbox)
if cropped.shape[0] != patch_size or cropped.shape[1] != patch_size:
scale = [patch_size/cropped.shape[0], patch_size/cropped.shape[1]]
if len(im.shape) == 3:
scale.append(1.0)
if use_pil:
cropped = resize(cropped, (patch_size, patch_size)) \
.astype(dtype=np.float32)
else:
cropped = zoom(cropped, scale, im.dtype, order=1)
patches.append(cropped)
return patches
def compute_mask_tight_patch(im: np.ndarray,
mask: np.ndarray,
patch_size: int):
"""
Computes a patch which contains all the pixels active in the mask scaled to
the patch size.
:param im:
:param mask:
:param patch_size:
:return:
"""
bbox = images.compute_mask_bbox(mask)
cropped = images.crop(im, bbox)
resized = imresize(cropped, (patch_size, patch_size, cropped.shape[2]))
return resized
def compute_minmax_thickness(mask):
max_width = 0
max_height = 0
for row_id in range(mask.shape[0]):
row = mask[row_id, :]
split_locs = np.where(np.diff(row) != 0)[0] + 1
for segment in (np.split(row, split_locs)):
if segment[0] != 0:
max_width = max(max_width, len(segment))
for col_id in range(mask.shape[1]):
col = mask[:, col_id]
split_locs = np.where(np.diff(col) != 0)[0] + 1
for segment in (np.split(col, split_locs)):
if segment[0] != 0:
max_height = max(max_height, len(segment))
return min(max_width, max_height), max(max_width, max_height)
| 33.474227
| 82
| 0.59963
|
import logging
import random
from typing import List, Tuple
import numpy as np
from skimage.transform import resize
from scipy.ndimage import zoom
from toolbox import images
from toolbox.images import crop, mask_bbox
from .poisson_disk import sample_poisson_uniform
logger = logging.getLogger(__name__)
class PatchType:
S2F_MASKED_BLACK = 'cropped_scaled_to_fit'
S2F_MASKED_WHITE = 'cropped_scaled_to_fit_white'
S2F = 'scaled_to_fit'
RANDOM = 'random2'
def sample_poisson_mask(mask, r, k):
ymin, ymax, xmin, xmax = mask_bbox(mask)
height = ymax - ymin
width = xmax - xmin
points = np.array(sample_poisson_uniform(height, width, r, k,
mask[ymin:ymax, xmin:xmax]))
points[:, 0] += ymin
points[:, 1] += xmin
points = np.floor(points).astype(int)
return points
def generate_dense_bboxes(
mask: np.ndarray,
scale=0.23,
min_dist=0.091):
mask_height, mask_width = mask.shape
min_length = min(mask_height, mask_width)
patch_sample_size = scale * min_length
centers = sample_poisson_mask(mask, min_length * min_dist, 1000)
half = int(patch_sample_size / 2)
bboxes = []
for center in centers:
ycent, xcent = center
bbox = (ycent - half,
ycent + half + 1,
xcent - half,
xcent + half + 1)
if (bbox[0] >= 0 and bbox[1] < mask_height
and bbox[2] >= 0 and bbox[3] < mask_width):
bboxes.append(bbox)
print('bboxes={} centers={}, mask_size={}, min_dist={}'.format(
len(bboxes), len(centers), mask.shape, min_length * min_dist))
return bboxes
def random_crops(image, patch_size, num_crops):
border_mask = np.ones(image.shape[:2], dtype=bool)
left = patch_size/2
right = image.shape[1] - patch_size/2
top = patch_size/2
bottom = image.shape[0] - patch_size/2
border_mask[:, :left] = False
border_mask[:, right:] = False
border_mask[:top, :] = False
border_mask[bottom:, :] = False
yinds, xinds = np.where(border_mask)
bboxes = []
for i in range(num_crops):
point_idx = np.random.randint(0, len(yinds))
ycent, xcent = yinds[point_idx], xinds[point_idx]
half = int(patch_size / 2)
bbox = (ycent - half,
ycent + half + 1,
xcent - half,
xcent + half + 1)
bboxes.append(bbox)
return bboxes_to_patches(image, bboxes, patch_size)
def generate_random_bboxes(mask: np.ndarray, scale_range=(1.0, 1.0),
num_patches=5, fixed_size=None):
mask_height, mask_width = mask.shape[:2]
min_length = min(mask_height, mask_width)
yinds, xinds = np.where(mask)
patch_bboxes = []
patch_scales = []
tries = 0
while len(patch_bboxes) < num_patches:
scale = random.uniform(*scale_range)
patch_scales.append(scale)
patch_size = scale * fixed_size if fixed_size else int(scale * min_length)
point_idx = np.random.randint(0, len(yinds))
ycent, xcent = yinds[point_idx], xinds[point_idx]
half = int(patch_size / 2)
# Just squash the patch if it's out of bounds.
if (ycent - half < 0 or ycent + half > mask.shape[0] or
xcent - half < 0 or xcent + half > mask.shape[1]):
if tries < 100:
tries += 1
continue
bbox = (max(ycent - half, 0),
min(ycent + half + 1, mask.shape[0]),
max(xcent - half, 0),
min(xcent + half + 1, mask.shape[1]))
patch_bboxes.append(bbox)
return patch_bboxes, patch_scales
def bboxes_to_patches(im: np.ndarray,
bboxes: List[Tuple[int, int, int, int]],
patch_size: int, use_pil=False):
patches = []
for bbox in bboxes:
cropped = crop(im, bbox)
if cropped.shape[0] != patch_size or cropped.shape[1] != patch_size:
scale = [patch_size/cropped.shape[0], patch_size/cropped.shape[1]]
if len(im.shape) == 3:
scale.append(1.0)
if use_pil:
cropped = resize(cropped, (patch_size, patch_size)) \
.astype(dtype=np.float32)
else:
cropped = zoom(cropped, scale, im.dtype, order=1)
patches.append(cropped)
return patches
def compute_mask_tight_patch(im: np.ndarray,
mask: np.ndarray,
patch_size: int):
bbox = images.compute_mask_bbox(mask)
cropped = images.crop(im, bbox)
resized = imresize(cropped, (patch_size, patch_size, cropped.shape[2]))
return resized
def compute_minmax_thickness(mask):
max_width = 0
max_height = 0
for row_id in range(mask.shape[0]):
row = mask[row_id, :]
split_locs = np.where(np.diff(row) != 0)[0] + 1
for segment in (np.split(row, split_locs)):
if segment[0] != 0:
max_width = max(max_width, len(segment))
for col_id in range(mask.shape[1]):
col = mask[:, col_id]
split_locs = np.where(np.diff(col) != 0)[0] + 1
for segment in (np.split(col, split_locs)):
if segment[0] != 0:
max_height = max(max_height, len(segment))
return min(max_width, max_height), max(max_width, max_height)
| true
| true
|
f70acf587d8569458f8fa30ddf05f354f81bb523
| 3,453
|
py
|
Python
|
SCANNER_FTX_PERP.py
|
medialandstudio/bias
|
9548a2b66c0134c797fa3d00de3711cfef9dbb70
|
[
"MIT"
] | null | null | null |
SCANNER_FTX_PERP.py
|
medialandstudio/bias
|
9548a2b66c0134c797fa3d00de3711cfef9dbb70
|
[
"MIT"
] | null | null | null |
SCANNER_FTX_PERP.py
|
medialandstudio/bias
|
9548a2b66c0134c797fa3d00de3711cfef9dbb70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 12:02:50 2021
@author: ministudio
"""
from datetime import datetime, timezone
import pandas as pd
import numpy as np
from alive_progress import alive_bar
def get_all_futures(ftx_client):
tickers = ftx_client.fetchMarkets()
list_perp =[]
#with alive_bar(len(tickers),length=20) as bar:
for ticker in tickers:
if 'PERP' in ticker['id']:
list_perp.append(ticker['id'])
#bar()
return list_perp
def scanner(day,month,year,ticker,ftx):
results = pd.DataFrame(columns=['P/L %'])
start_trade = datetime(year, month, day, 0, 0, 0)
timestamp = start_trade.replace(tzinfo=timezone.utc).timestamp()
candles = ftx.fetchOHLCV(ticker, timeframe='1h', since=timestamp*1000, limit=5000)
candles_df = pd.DataFrame(candles, columns=['MTS','OPEN','HIGH','LOW','CLOSE','VOLUME'])
volume = candles_df.VOLUME.sum()
for j in range(0,24):
# algoritmo per andare di candela in candela
ledger = pd.DataFrame(columns=['POSITION','ENTRY PRICE','P_L SINGLE','P_L TOTAL'])
long = True
time_scanner = ''
# calcolo l'offset tra una candela e l'altra di mio interesse
offset = 12
if j != 0:
candles = candles[1:]
try:
for i in range(0,len(candles),offset):
entry_price = candles[i][1]
if i == 0:
start = datetime.utcfromtimestamp(candles[i][0]/1000)
end = datetime.utcfromtimestamp(candles[i+offset][0]/1000) #datetime.utcfromtimestamp(candles[i+offset+10][0]/1000)
#print('FROM',start.strftime("%H:%M"),'TO',end.strftime("%H:%M"))
var_pct = p_l_total = 0
position = 'LONG'
time_scanner = f'{start.strftime("%H:%M")} to {end.strftime("%H:%M")}'
else:
#r_exit_entry = candles[i][4]/candles[i-offset][4] #if not long else candles[i][4]/candles[i-offset][4]
# calcolo il profitto
if long:
var_pct = round((candles[i-offset][1] - candles[i][1])/candles[i-offset][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if not long:
var_pct = round((candles[i][1]-candles[i-offset][1])/candles[i][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if long:
date = datetime.utcfromtimestamp(candles[i][0]/1000)
position = 'LONG'
long = False
else:
# quindi vado in short
date = datetime.utcfromtimestamp(candles[i][0]/1000) #candles[i+10][0]/1000
position = 'SHORT'
long = True
ledger.loc[date] = [position, entry_price, var_pct, p_l_total]
results.loc[time_scanner] = round(ledger['P_L TOTAL'][-1],2)
#print('P/L TOTAL :\t',round(ledger['P_L TOTAL'][-1],2), '%\n')
except Exception as e:
results.loc[time_scanner] = np.NAN
return results, volume
| 37.532609
| 135
| 0.515204
|
from datetime import datetime, timezone
import pandas as pd
import numpy as np
from alive_progress import alive_bar
def get_all_futures(ftx_client):
tickers = ftx_client.fetchMarkets()
list_perp =[]
for ticker in tickers:
if 'PERP' in ticker['id']:
list_perp.append(ticker['id'])
return list_perp
def scanner(day,month,year,ticker,ftx):
results = pd.DataFrame(columns=['P/L %'])
start_trade = datetime(year, month, day, 0, 0, 0)
timestamp = start_trade.replace(tzinfo=timezone.utc).timestamp()
candles = ftx.fetchOHLCV(ticker, timeframe='1h', since=timestamp*1000, limit=5000)
candles_df = pd.DataFrame(candles, columns=['MTS','OPEN','HIGH','LOW','CLOSE','VOLUME'])
volume = candles_df.VOLUME.sum()
for j in range(0,24):
ledger = pd.DataFrame(columns=['POSITION','ENTRY PRICE','P_L SINGLE','P_L TOTAL'])
long = True
time_scanner = ''
offset = 12
if j != 0:
candles = candles[1:]
try:
for i in range(0,len(candles),offset):
entry_price = candles[i][1]
if i == 0:
start = datetime.utcfromtimestamp(candles[i][0]/1000)
end = datetime.utcfromtimestamp(candles[i+offset][0]/1000) var_pct = p_l_total = 0
position = 'LONG'
time_scanner = f'{start.strftime("%H:%M")} to {end.strftime("%H:%M")}'
else:
if long:
var_pct = round((candles[i-offset][1] - candles[i][1])/candles[i-offset][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if not long:
var_pct = round((candles[i][1]-candles[i-offset][1])/candles[i][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if long:
date = datetime.utcfromtimestamp(candles[i][0]/1000)
position = 'LONG'
long = False
else:
date = datetime.utcfromtimestamp(candles[i][0]/1000) position = 'SHORT'
long = True
ledger.loc[date] = [position, entry_price, var_pct, p_l_total]
results.loc[time_scanner] = round(ledger['P_L TOTAL'][-1],2)
except Exception as e:
results.loc[time_scanner] = np.NAN
return results, volume
| true
| true
|
f70ad0a021e9df323e1559c8795babc352e2834b
| 144
|
py
|
Python
|
app/http/http_statuses.py
|
dimamik/AGH_Learning_Cards
|
bef1ce8e763fb7b21058f918ec6f02be41bb7a11
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
app/http/http_statuses.py
|
dimamik/AGH_Learning_Cards
|
bef1ce8e763fb7b21058f918ec6f02be41bb7a11
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
app/http/http_statuses.py
|
dimamik/AGH_Learning_Cards
|
bef1ce8e763fb7b21058f918ec6f02be41bb7a11
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_NO_CONTENT = 204
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_CONFLICT = 409
| 16
| 23
| 0.798611
|
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_NO_CONTENT = 204
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_CONFLICT = 409
| true
| true
|
f70ad21fcb02d7452db608ef78fa61ed151d9c8f
| 1,560
|
py
|
Python
|
Cryptocurrency/Dashcoin/dash-gbp.py
|
uberfastman/bitbar-plugins
|
b61903dc31360d67c63ed24abdba3ba71ace3d56
|
[
"MIT"
] | null | null | null |
Cryptocurrency/Dashcoin/dash-gbp.py
|
uberfastman/bitbar-plugins
|
b61903dc31360d67c63ed24abdba3ba71ace3d56
|
[
"MIT"
] | 1
|
2019-11-21T07:31:36.000Z
|
2019-11-21T07:31:36.000Z
|
Cryptocurrency/Dashcoin/dash-gbp.py
|
uberfastman/bitbar-plugins
|
b61903dc31360d67c63ed24abdba3ba71ace3d56
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# coding=utf-8
#
# <bitbar.title>Dashcoin Ticker (£1GBP)</bitbar.title>
# <bitbar.version>v1.0</bitbar.version>
# <bitbar.author>impshum</bitbar.author>
# <bitbar.author.github>impshum</bitbar.author.github>
# <bitbar.desc>Displays current Dashcoin price for £1 from Coinmarketcap</bitbar.desc>
# <bitbar.image>https://i.imgur.com/KZH5B8s.jpg/bitbar.image>
#
# by impshum
from urllib import urlopen
url = urlopen('https://coinmarketcap-nexuist.rhcloud.com/api/dash').read()
import json
result = json.loads(url)
def flow():
if result ['change'] > '0':
print (' £%.4f | image=iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QAyQACAALwzISXAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4AQHACkSBTjB+AAAALNJREFUOMvVk70NAjEMhb87WYiGBZAQU7ABNSVSWpZgEEagsJDoKBELUCEKFuBuCKTw0xyQC0lICe5i+/k9/wT+3opUUJQhcAUqa8I5ZQT4tANwioGTCkQZA9vmOQE2oUJFhL0DXBz33RpKUfCLfLTQJMx9IlEWuQr6QB3prGtNS1lwiMvEYo7ekNsKRBkB+y+rH1hDFVOwy7ids+gbVzrsM6CXeYDTF85xroB1ZoHb73ymB5RhJkpZTihGAAAAAElFTkSuQmCC color=#000000'% float(result['price']['gbp']))
else:
print (' £%.4f | image=iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QABACnAADQ9FZaAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4AQHACQ1FZwK3gAAAMRJREFUOMvNkjEKAjEQRZ+jKNjYKh5AbzCdjVcQj+BFPIKlp7EMeAJrUbASQVCEr80uG9cNbqe/Cgn/5WUI/DqNfBHM+kCzbs+lPUAr2pwBq5qABbB+M8gszkDvS/kOdAG5VBgEM4ApsP0CGLukjxlEoA0wSZR3Lo0qhxhZDIBDAmDA0wsBLD51CZeOwLKivHbprZx6AkAHuEXbD5fawYwywMqAzOKeDTTPvKqcTGZBMLsGs0utn5gADYEHcKp9e9ni//MCDtNCE3qjsIwAAAAASUVORK5CYII= color=#000000'% float(result['price']['gbp']))
flow()
| 60
| 494
| 0.830769
|
from urllib import urlopen
url = urlopen('https://coinmarketcap-nexuist.rhcloud.com/api/dash').read()
import json
result = json.loads(url)
def flow():
if result ['change'] > '0':
print (' £%.4f | image=iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QAyQACAALwzISXAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4AQHACkSBTjB+AAAALNJREFUOMvVk70NAjEMhb87WYiGBZAQU7ABNSVSWpZgEEagsJDoKBELUCEKFuBuCKTw0xyQC0lICe5i+/k9/wT+3opUUJQhcAUqa8I5ZQT4tANwioGTCkQZA9vmOQE2oUJFhL0DXBz33RpKUfCLfLTQJMx9IlEWuQr6QB3prGtNS1lwiMvEYo7ekNsKRBkB+y+rH1hDFVOwy7ids+gbVzrsM6CXeYDTF85xroB1ZoHb73ymB5RhJkpZTihGAAAAAElFTkSuQmCC color=#000000'% float(result['price']['gbp']))
else:
print (' £%.4f | image=iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QABACnAADQ9FZaAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4AQHACQ1FZwK3gAAAMRJREFUOMvNkjEKAjEQRZ+jKNjYKh5AbzCdjVcQj+BFPIKlp7EMeAJrUbASQVCEr80uG9cNbqe/Cgn/5WUI/DqNfBHM+kCzbs+lPUAr2pwBq5qABbB+M8gszkDvS/kOdAG5VBgEM4ApsP0CGLukjxlEoA0wSZR3Lo0qhxhZDIBDAmDA0wsBLD51CZeOwLKivHbprZx6AkAHuEXbD5fawYwywMqAzOKeDTTPvKqcTGZBMLsGs0utn5gADYEHcKp9e9ni//MCDtNCE3qjsIwAAAAASUVORK5CYII= color=#000000'% float(result['price']['gbp']))
flow()
| true
| true
|
f70ad326400c79c347d2103ccb8c135960a427fe
| 3,622
|
py
|
Python
|
e2e/Tests/Consensus/Verification/UnknownInBlockTest.py
|
kayabaNerve/Currency
|
260ebc20f1704f42ad6183fee39ad58ec6d07961
|
[
"CC0-1.0"
] | 66
|
2019-01-14T08:39:52.000Z
|
2022-01-06T11:39:15.000Z
|
e2e/Tests/Consensus/Verification/UnknownInBlockTest.py
|
kayabaNerve/Currency
|
260ebc20f1704f42ad6183fee39ad58ec6d07961
|
[
"CC0-1.0"
] | 228
|
2019-01-16T15:42:44.000Z
|
2022-02-05T07:48:07.000Z
|
e2e/Tests/Consensus/Verification/UnknownInBlockTest.py
|
kayabaNerve/Currency
|
260ebc20f1704f42ad6183fee39ad58ec6d07961
|
[
"CC0-1.0"
] | 19
|
2019-01-14T08:53:04.000Z
|
2021-11-03T20:19:28.000Z
|
#Tests proper handling of Verifications with Transactions which don't exist.
from typing import Dict, List, Any
import json
from pytest import raises
from e2e.Libs.Minisketch import Sketch
from e2e.Classes.Merit.Block import Block
from e2e.Classes.Merit.Merit import Merit
from e2e.Classes.Consensus.VerificationPacket import VerificationPacket
from e2e.Meros.RPC import RPC
from e2e.Meros.Meros import MessageType
from e2e.Meros.Liver import Liver
from e2e.Tests.Errors import TestError, SuccessError
#pylint: disable=too-many-statements
def VUnknownInBlockTest(
rpc: RPC
) -> None:
vectors: Dict[str, Any]
with open("e2e/Vectors/Consensus/Verification/Parsable.json", "r") as file:
vectors = json.loads(file.read())
merit: Merit = Merit.fromJSON(vectors["blockchain"])
#Custom function to send the last Block and verify it errors at the right place.
def checkFail() -> None:
#This Block should cause the node to disconnect us AFTER it attempts to sync our Transaction.
syncedTX: bool = False
#Grab the Block.
block: Block = merit.blockchain.blocks[2]
#Send the Block.
rpc.meros.liveBlockHeader(block.header)
rpc.meros.handleBlockBody(block)
#Handle sync requests.
reqHash: bytes = bytes()
while True:
if syncedTX:
#Try receiving from the Live socket, where Meros sends keep-alives.
try:
if len(rpc.meros.live.recv()) != 0:
raise Exception()
except TestError:
raise SuccessError("Node disconnected us after we sent a parsable, yet invalid, Verification.")
except Exception:
raise TestError("Meros sent a keep-alive.")
msg: bytes = rpc.meros.sync.recv()
if MessageType(msg[0]) == MessageType.SketchHashesRequest:
if not block.body.packets:
raise TestError("Meros asked for Sketch Hashes from a Block without any.")
reqHash = msg[1 : 33]
if reqHash != block.header.hash:
raise TestError("Meros asked for Sketch Hashes that didn't belong to the Block we just sent it.")
#Create the hashes.
hashes: List[int] = []
for packet in block.body.packets:
hashes.append(Sketch.hash(block.header.sketchSalt, packet))
#Send the Sketch Hashes.
rpc.meros.sketchHashes(hashes)
elif MessageType(msg[0]) == MessageType.SketchHashRequests:
if not block.body.packets:
raise TestError("Meros asked for Verification Packets from a Block without any.")
reqHash = msg[1 : 33]
if reqHash != block.header.hash:
raise TestError("Meros asked for Verification Packets that didn't belong to the Block we just sent it.")
#Create a lookup of hash to packets.
packets: Dict[int, VerificationPacket] = {}
for packet in block.body.packets:
packets[Sketch.hash(block.header.sketchSalt, packet)] = packet
#Look up each requested packet and respond accordingly.
for h in range(int.from_bytes(msg[33 : 37], byteorder="little")):
sketchHash: int = int.from_bytes(msg[37 + (h * 8) : 45 + (h * 8)], byteorder="little")
if sketchHash not in packets:
raise TestError("Meros asked for a non-existent Sketch Hash.")
rpc.meros.packet(packets[sketchHash])
elif MessageType(msg[0]) == MessageType.TransactionRequest:
rpc.meros.dataMissing()
syncedTX = True
else:
raise TestError("Unexpected message sent: " + msg.hex().upper())
with raises(SuccessError):
Liver(rpc, vectors["blockchain"], callbacks={1: checkFail}).live()
| 35.861386
| 114
| 0.67725
|
from typing import Dict, List, Any
import json
from pytest import raises
from e2e.Libs.Minisketch import Sketch
from e2e.Classes.Merit.Block import Block
from e2e.Classes.Merit.Merit import Merit
from e2e.Classes.Consensus.VerificationPacket import VerificationPacket
from e2e.Meros.RPC import RPC
from e2e.Meros.Meros import MessageType
from e2e.Meros.Liver import Liver
from e2e.Tests.Errors import TestError, SuccessError
#pylint: disable=too-many-statements
def VUnknownInBlockTest(
rpc: RPC
) -> None:
vectors: Dict[str, Any]
with open("e2e/Vectors/Consensus/Verification/Parsable.json", "r") as file:
vectors = json.loads(file.read())
merit: Merit = Merit.fromJSON(vectors["blockchain"])
#Custom function to send the last Block and verify it errors at the right place.
def checkFail() -> None:
#This Block should cause the node to disconnect us AFTER it attempts to sync our Transaction.
syncedTX: bool = False
#Grab the Block.
block: Block = merit.blockchain.blocks[2]
#Send the Block.
rpc.meros.liveBlockHeader(block.header)
rpc.meros.handleBlockBody(block)
#Handle sync requests.
reqHash: bytes = bytes()
while True:
if syncedTX:
#Try receiving from the Live socket, where Meros sends keep-alives.
try:
if len(rpc.meros.live.recv()) != 0:
raise Exception()
except TestError:
raise SuccessError("Node disconnected us after we sent a parsable, yet invalid, Verification.")
except Exception:
raise TestError("Meros sent a keep-alive.")
msg: bytes = rpc.meros.sync.recv()
if MessageType(msg[0]) == MessageType.SketchHashesRequest:
if not block.body.packets:
raise TestError("Meros asked for Sketch Hashes from a Block without any.")
reqHash = msg[1 : 33]
if reqHash != block.header.hash:
raise TestError("Meros asked for Sketch Hashes that didn't belong to the Block we just sent it.")
hashes: List[int] = []
for packet in block.body.packets:
hashes.append(Sketch.hash(block.header.sketchSalt, packet))
rpc.meros.sketchHashes(hashes)
elif MessageType(msg[0]) == MessageType.SketchHashRequests:
if not block.body.packets:
raise TestError("Meros asked for Verification Packets from a Block without any.")
reqHash = msg[1 : 33]
if reqHash != block.header.hash:
raise TestError("Meros asked for Verification Packets that didn't belong to the Block we just sent it.")
#Create a lookup of hash to packets.
packets: Dict[int, VerificationPacket] = {}
for packet in block.body.packets:
packets[Sketch.hash(block.header.sketchSalt, packet)] = packet
#Look up each requested packet and respond accordingly.
for h in range(int.from_bytes(msg[33 : 37], byteorder="little")):
sketchHash: int = int.from_bytes(msg[37 + (h * 8) : 45 + (h * 8)], byteorder="little")
if sketchHash not in packets:
raise TestError("Meros asked for a non-existent Sketch Hash.")
rpc.meros.packet(packets[sketchHash])
elif MessageType(msg[0]) == MessageType.TransactionRequest:
rpc.meros.dataMissing()
syncedTX = True
else:
raise TestError("Unexpected message sent: " + msg.hex().upper())
with raises(SuccessError):
Liver(rpc, vectors["blockchain"], callbacks={1: checkFail}).live()
| true
| true
|
f70ad3439734e192b877fd56e646278509bf7ab3
| 5,191
|
py
|
Python
|
epytope/Data/pssms/arb/mat/B_4002_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/arb/mat/B_4002_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/arb/mat/B_4002_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
B_4002_10 = {0: {'A': 0.22337100816803507, 'C': -0.08721732138853625, 'E': -0.05776024940539231, 'D': -0.8062336491499029, 'G': -0.22235775138309136, 'F': 0.41616940014979253, 'I': -0.2625598958640791, 'H': -0.2842266678402531, 'K': -0.11806916630138095, 'M': 0.3503963704784862, 'L': -0.11175681610077592, 'N': -0.6559751061375433, 'Q': 0.42709232284615184, 'P': -0.6562206710837208, 'S': -0.02028872713419685, 'R': 0.7053425369818895, 'T': -0.16988396865190242, 'W': 0.5294490014218092, 'V': -0.5397379396163317, 'Y': 0.6224062516023391}, 1: {'A': -4.0, 'C': -1.4792466334792438, 'E': 1.9520597704545073, 'D': -1.3065688764576122, 'G': -4.0, 'F': -1.2998595004729445, 'I': -1.7137811098439848, 'H': -1.5503864274444812, 'K': -1.5503864274444812, 'M': -1.7137811098439848, 'L': -1.5873303359490254, 'N': -4.0, 'Q': -4.0, 'P': -1.485221375614014, 'S': -1.4792466334792438, 'R': -1.516442783779311, 'T': -1.3722901525124491, 'W': -1.2998595004729445, 'V': -1.7027070282103178, 'Y': -1.2998595004729445}, 2: {'A': 0.2563438176533894, 'C': 0.005946632197619582, 'E': -0.01583980936870634, 'D': -0.28769896803687756, 'G': -0.6954066625927517, 'F': 0.12061097626119485, 'I': 0.3350620409473355, 'H': -0.1694896011839807, 'K': 0.3362119351909843, 'M': 0.21123743247221569, 'L': 0.3569719895865599, 'N': 0.5952112576301342, 'Q': 0.07334278898807628, 'P': -0.4863848827377961, 'S': 0.5774056906967757, 'R': -0.6603164669657029, 'T': -0.5333967423641524, 'W': 0.07155631156720803, 'V': -0.3025209922484634, 'Y': -0.3412456411532286}, 3: {'A': 0.5977554346220839, 'C': 0.014950504192544605, 'E': -0.14276836811036525, 'D': -0.566829217593357, 'G': 0.43366216673597924, 'F': 0.18735599610913023, 'I': -0.5941476733420843, 'H': 0.7148611685591905, 'K': -0.25892998681258395, 'M': 0.24255037248622957, 'L': 0.1922371468778731, 'N': -0.8992543313554157, 'Q': -0.0066294697791371, 'P': -0.17868447116149977, 'S': 0.5575118094930324, 'R': 0.4354350798832712, 'T': -0.6863999529014213, 'W': -0.9040043361451602, 'V': -0.09610557652101945, 'Y': 0.25017165150118675}, 4: {'A': -0.36211990116689025, 'C': 1.0140227890402689, 'E': -0.10425685745040862, 'D': 0.07615994218018855, 'G': -0.8009857839941741, 'F': 0.24667787490362253, 'I': -0.17420628325418536, 'H': -0.7132294203626169, 'K': 0.12801265861459374, 'M': 0.7179341611730891, 'L': 0.2421722453426991, 'N': 0.25183605308664186, 'Q': -0.7166747952837604, 'P': -0.130679376377445, 'S': 0.3742768715087381, 'R': -0.44531439192302325, 'T': 1.0778574915916541, 'W': -0.7242004826221311, 'V': -0.10127761501276543, 'Y': -0.5187144195614529}, 5: {'A': 0.5744121217096468, 'C': 0.5415630199991066, 'E': -0.5530536302143234, 'D': -0.28640127477331273, 'G': -0.729203233404597, 'F': -0.11418154127673937, 'I': 0.37603616107858134, 'H': -0.9148359315846256, 'K': -0.18293738749007366, 'M': 0.4430551493441148, 'L': 0.028940205572376906, 'N': -0.015688177174764985, 'Q': 0.3334643637995271, 'P': 0.3653661968849176, 'S': 0.24310899420114637, 'R': 0.3683640838816875, 'T': -0.41546224081805533, 'W': -0.17011116673092944, 'V': -0.16028570036270884, 'Y': 0.037898267913432676}, 6: {'A': -0.2449698680605102, 'C': 0.21891284135185457, 'E': -0.1914970740107789, 'D': -0.6845824833815898, 'G': -0.23680284287562992, 'F': -0.047735228056870374, 'I': -0.14535092817743472, 'H': -0.7904575078177513, 'K': -0.3522379408807177, 'M': 0.4651584476619752, 'L': 0.3633365762076467, 'N': -0.1906297329391477, 'Q': -0.17917612566613594, 'P': -0.09502957757299856, 'S': 0.1613073465286862, 'R': -0.2735299808531706, 'T': 0.577678919761243, 'W': 0.21111680192906904, 'V': 0.24561358020466897, 'Y': 0.5422742451008902}, 7: {'A': -0.26126715312869003, 'C': 0.04523513286573463, 'E': 0.3009863034413461, 'D': -0.23595975352186618, 'G': -0.04401611182001157, 'F': 0.8106155298979435, 'I': -0.6959114020958657, 'H': 0.7274217689457967, 'K': -1.0948083223532759, 'M': 0.7971560783910433, 'L': -0.4799785717728068, 'N': -1.047191366836869, 'Q': 0.03006318067260729, 'P': 0.6499374087495984, 'S': 0.09020424788565452, 'R': -0.6399431218454593, 'T': 0.09387374649172615, 'W': 0.38231537787910685, 'V': 0.29085420864742834, 'Y': 0.10502029689790073}, 8: {'A': -0.17624591714060261, 'C': -0.44594096205809025, 'E': 0.2717227979727722, 'D': -0.012845762584315317, 'G': -0.2375535720710233, 'F': 0.16487310250932152, 'I': 0.00804494192498933, 'H': -0.8499150101901889, 'K': -0.8296394058347988, 'M': -0.5893452296325081, 'L': 0.24782037761046985, 'N': -0.42682194513580807, 'Q': -0.2002625627126248, 'P': 0.7689731259954051, 'S': 0.29368829704065275, 'R': -0.6530871271743546, 'T': 0.4318928627874784, 'W': -0.9240865611446291, 'V': 0.26557804589733297, 'Y': 0.038742804015257794}, 9: {'A': 0.39435936592627074, 'C': -0.2506580204205583, 'E': -4.0, 'D': -4.0, 'G': -0.3259787590539208, 'F': -0.16992688879892542, 'I': 0.2967586631856834, 'H': -1.787811063406423, 'K': -1.757918849655444, 'M': 0.8174924984834613, 'L': 1.0839069131169379, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': -0.007550469044766957, 'R': -1.787811063406423, 'T': -0.31612993413343005, 'W': -0.9384717898759554, 'V': 0.1245946934278608, 'Y': -1.046424620597781}, -1: {'slope': 0.13311575086207222, 'intercept': -0.5859339389711538}}
| 5,191
| 5,191
| 0.695434
|
B_4002_10 = {0: {'A': 0.22337100816803507, 'C': -0.08721732138853625, 'E': -0.05776024940539231, 'D': -0.8062336491499029, 'G': -0.22235775138309136, 'F': 0.41616940014979253, 'I': -0.2625598958640791, 'H': -0.2842266678402531, 'K': -0.11806916630138095, 'M': 0.3503963704784862, 'L': -0.11175681610077592, 'N': -0.6559751061375433, 'Q': 0.42709232284615184, 'P': -0.6562206710837208, 'S': -0.02028872713419685, 'R': 0.7053425369818895, 'T': -0.16988396865190242, 'W': 0.5294490014218092, 'V': -0.5397379396163317, 'Y': 0.6224062516023391}, 1: {'A': -4.0, 'C': -1.4792466334792438, 'E': 1.9520597704545073, 'D': -1.3065688764576122, 'G': -4.0, 'F': -1.2998595004729445, 'I': -1.7137811098439848, 'H': -1.5503864274444812, 'K': -1.5503864274444812, 'M': -1.7137811098439848, 'L': -1.5873303359490254, 'N': -4.0, 'Q': -4.0, 'P': -1.485221375614014, 'S': -1.4792466334792438, 'R': -1.516442783779311, 'T': -1.3722901525124491, 'W': -1.2998595004729445, 'V': -1.7027070282103178, 'Y': -1.2998595004729445}, 2: {'A': 0.2563438176533894, 'C': 0.005946632197619582, 'E': -0.01583980936870634, 'D': -0.28769896803687756, 'G': -0.6954066625927517, 'F': 0.12061097626119485, 'I': 0.3350620409473355, 'H': -0.1694896011839807, 'K': 0.3362119351909843, 'M': 0.21123743247221569, 'L': 0.3569719895865599, 'N': 0.5952112576301342, 'Q': 0.07334278898807628, 'P': -0.4863848827377961, 'S': 0.5774056906967757, 'R': -0.6603164669657029, 'T': -0.5333967423641524, 'W': 0.07155631156720803, 'V': -0.3025209922484634, 'Y': -0.3412456411532286}, 3: {'A': 0.5977554346220839, 'C': 0.014950504192544605, 'E': -0.14276836811036525, 'D': -0.566829217593357, 'G': 0.43366216673597924, 'F': 0.18735599610913023, 'I': -0.5941476733420843, 'H': 0.7148611685591905, 'K': -0.25892998681258395, 'M': 0.24255037248622957, 'L': 0.1922371468778731, 'N': -0.8992543313554157, 'Q': -0.0066294697791371, 'P': -0.17868447116149977, 'S': 0.5575118094930324, 'R': 0.4354350798832712, 'T': -0.6863999529014213, 'W': -0.9040043361451602, 'V': -0.09610557652101945, 'Y': 0.25017165150118675}, 4: {'A': -0.36211990116689025, 'C': 1.0140227890402689, 'E': -0.10425685745040862, 'D': 0.07615994218018855, 'G': -0.8009857839941741, 'F': 0.24667787490362253, 'I': -0.17420628325418536, 'H': -0.7132294203626169, 'K': 0.12801265861459374, 'M': 0.7179341611730891, 'L': 0.2421722453426991, 'N': 0.25183605308664186, 'Q': -0.7166747952837604, 'P': -0.130679376377445, 'S': 0.3742768715087381, 'R': -0.44531439192302325, 'T': 1.0778574915916541, 'W': -0.7242004826221311, 'V': -0.10127761501276543, 'Y': -0.5187144195614529}, 5: {'A': 0.5744121217096468, 'C': 0.5415630199991066, 'E': -0.5530536302143234, 'D': -0.28640127477331273, 'G': -0.729203233404597, 'F': -0.11418154127673937, 'I': 0.37603616107858134, 'H': -0.9148359315846256, 'K': -0.18293738749007366, 'M': 0.4430551493441148, 'L': 0.028940205572376906, 'N': -0.015688177174764985, 'Q': 0.3334643637995271, 'P': 0.3653661968849176, 'S': 0.24310899420114637, 'R': 0.3683640838816875, 'T': -0.41546224081805533, 'W': -0.17011116673092944, 'V': -0.16028570036270884, 'Y': 0.037898267913432676}, 6: {'A': -0.2449698680605102, 'C': 0.21891284135185457, 'E': -0.1914970740107789, 'D': -0.6845824833815898, 'G': -0.23680284287562992, 'F': -0.047735228056870374, 'I': -0.14535092817743472, 'H': -0.7904575078177513, 'K': -0.3522379408807177, 'M': 0.4651584476619752, 'L': 0.3633365762076467, 'N': -0.1906297329391477, 'Q': -0.17917612566613594, 'P': -0.09502957757299856, 'S': 0.1613073465286862, 'R': -0.2735299808531706, 'T': 0.577678919761243, 'W': 0.21111680192906904, 'V': 0.24561358020466897, 'Y': 0.5422742451008902}, 7: {'A': -0.26126715312869003, 'C': 0.04523513286573463, 'E': 0.3009863034413461, 'D': -0.23595975352186618, 'G': -0.04401611182001157, 'F': 0.8106155298979435, 'I': -0.6959114020958657, 'H': 0.7274217689457967, 'K': -1.0948083223532759, 'M': 0.7971560783910433, 'L': -0.4799785717728068, 'N': -1.047191366836869, 'Q': 0.03006318067260729, 'P': 0.6499374087495984, 'S': 0.09020424788565452, 'R': -0.6399431218454593, 'T': 0.09387374649172615, 'W': 0.38231537787910685, 'V': 0.29085420864742834, 'Y': 0.10502029689790073}, 8: {'A': -0.17624591714060261, 'C': -0.44594096205809025, 'E': 0.2717227979727722, 'D': -0.012845762584315317, 'G': -0.2375535720710233, 'F': 0.16487310250932152, 'I': 0.00804494192498933, 'H': -0.8499150101901889, 'K': -0.8296394058347988, 'M': -0.5893452296325081, 'L': 0.24782037761046985, 'N': -0.42682194513580807, 'Q': -0.2002625627126248, 'P': 0.7689731259954051, 'S': 0.29368829704065275, 'R': -0.6530871271743546, 'T': 0.4318928627874784, 'W': -0.9240865611446291, 'V': 0.26557804589733297, 'Y': 0.038742804015257794}, 9: {'A': 0.39435936592627074, 'C': -0.2506580204205583, 'E': -4.0, 'D': -4.0, 'G': -0.3259787590539208, 'F': -0.16992688879892542, 'I': 0.2967586631856834, 'H': -1.787811063406423, 'K': -1.757918849655444, 'M': 0.8174924984834613, 'L': 1.0839069131169379, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': -0.007550469044766957, 'R': -1.787811063406423, 'T': -0.31612993413343005, 'W': -0.9384717898759554, 'V': 0.1245946934278608, 'Y': -1.046424620597781}, -1: {'slope': 0.13311575086207222, 'intercept': -0.5859339389711538}}
| true
| true
|
f70ad3c71c856f0bf20b70565cd7cc3539e3b885
| 1,071
|
py
|
Python
|
examples/ethercat/ecat_load_save_config.py
|
ingeniamc/ingenialink-python
|
6011931697e48456f5638c2848303aac2e5bcb75
|
[
"MIT"
] | 15
|
2017-08-30T13:43:14.000Z
|
2022-03-29T07:04:30.000Z
|
examples/ethercat/ecat_load_save_config.py
|
ingeniamc/ingenialink-python
|
6011931697e48456f5638c2848303aac2e5bcb75
|
[
"MIT"
] | 11
|
2017-08-28T11:23:18.000Z
|
2022-03-28T23:48:11.000Z
|
examples/ethercat/ecat_load_save_config.py
|
ingeniamc/ingenialink-python
|
6011931697e48456f5638c2848303aac2e5bcb75
|
[
"MIT"
] | 9
|
2017-09-30T08:28:42.000Z
|
2022-03-12T19:11:43.000Z
|
import sys
from ingenialink.ethercat.network import EthercatNetwork
def connect_slave():
net = EthercatNetwork("\\Device\\NPF_{192D1D2F-C684-467D-A637-EC07BD434A63}")
servo = net.connect_to_slave(
target=1,
dictionary='../../resources/dictionaries/cap-net-e_eoe_0.7.1.xdf')
return servo, net
def load_config_example():
"""Loads a given configuration file into the drive."""
servo, net = connect_slave()
servo.load_configuration('ecat_config.xcf')
servo.load_configuration('ecat_config_0.xcf', subnode=0)
servo.load_configuration('ecat_config_1.xcf', subnode=1)
net.disconnect_from_slave(servo)
def save_config_example():
"""Saves the drive configuration into a file."""
servo, net = connect_slave()
servo.save_configuration('ecat_config.xcf')
servo.save_configuration('ecat_config_0.xcf', subnode=0)
servo.save_configuration('ecat_config_1.xcf', subnode=1)
net.disconnect_from_slave(servo)
if __name__ == '__main__':
save_config_example()
load_config_example()
sys.exit()
| 28.184211
| 81
| 0.722689
|
import sys
from ingenialink.ethercat.network import EthercatNetwork
def connect_slave():
net = EthercatNetwork("\\Device\\NPF_{192D1D2F-C684-467D-A637-EC07BD434A63}")
servo = net.connect_to_slave(
target=1,
dictionary='../../resources/dictionaries/cap-net-e_eoe_0.7.1.xdf')
return servo, net
def load_config_example():
servo, net = connect_slave()
servo.load_configuration('ecat_config.xcf')
servo.load_configuration('ecat_config_0.xcf', subnode=0)
servo.load_configuration('ecat_config_1.xcf', subnode=1)
net.disconnect_from_slave(servo)
def save_config_example():
servo, net = connect_slave()
servo.save_configuration('ecat_config.xcf')
servo.save_configuration('ecat_config_0.xcf', subnode=0)
servo.save_configuration('ecat_config_1.xcf', subnode=1)
net.disconnect_from_slave(servo)
if __name__ == '__main__':
save_config_example()
load_config_example()
sys.exit()
| true
| true
|
f70ad3db5e51eb1972ea537132216676ff99ecf8
| 917
|
py
|
Python
|
test/python/importer/jit_ir/node_import/function-block-arg-adjustment.py
|
denolf/torch-mlir
|
d3a4a7f5d40e11f5dc3fb33fcfee4c2305ccb7c3
|
[
"Apache-2.0"
] | 213
|
2021-09-24T03:26:53.000Z
|
2022-03-30T07:11:48.000Z
|
test/python/importer/jit_ir/node_import/function-block-arg-adjustment.py
|
denolf/torch-mlir
|
d3a4a7f5d40e11f5dc3fb33fcfee4c2305ccb7c3
|
[
"Apache-2.0"
] | 247
|
2021-09-23T18:49:45.000Z
|
2022-03-31T17:19:02.000Z
|
test/python/importer/jit_ir/node_import/function-block-arg-adjustment.py
|
denolf/torch-mlir
|
d3a4a7f5d40e11f5dc3fb33fcfee4c2305ccb7c3
|
[
"Apache-2.0"
] | 68
|
2021-09-23T18:23:20.000Z
|
2022-03-29T11:18:58.000Z
|
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.
from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder
from utils import create_script_function
# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s
mb = ModuleBuilder()
# CHECK-LABEL: func @__torch__.refined_block_arg(
# CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.tensor {
# CHECK: %[[REFINED:.*]] = torch.tensor_static_info_cast %[[ARG]] : !torch.tensor to !torch.tensor<[1,384],f32>
# CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[REFINED]] : !torch.tensor<[1,384],f32> to !torch.tensor
# CHECK: return %[[RESULT]] : !torch.tensor
mb.import_function(create_script_function("__torch__.refined_block_arg", """
graph(%0 : Float(1, 384)):
return (%0)
"""))
mb.module.operation.print()
print()
| 36.68
| 124
| 0.652126
|
from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder
from utils import create_script_function
mb = ModuleBuilder()
mb.import_function(create_script_function("__torch__.refined_block_arg", """
graph(%0 : Float(1, 384)):
return (%0)
"""))
mb.module.operation.print()
print()
| true
| true
|
f70ad4082dd1bb7084d23df4a8316b5464b670e8
| 198
|
py
|
Python
|
Rank of a matrix.py
|
srijithmass/RANK-OF-A-MATRIX
|
f0b2dacac02159a1385cfa23b180859444013911
|
[
"BSD-3-Clause"
] | null | null | null |
Rank of a matrix.py
|
srijithmass/RANK-OF-A-MATRIX
|
f0b2dacac02159a1385cfa23b180859444013911
|
[
"BSD-3-Clause"
] | null | null | null |
Rank of a matrix.py
|
srijithmass/RANK-OF-A-MATRIX
|
f0b2dacac02159a1385cfa23b180859444013911
|
[
"BSD-3-Clause"
] | null | null | null |
#Program to find the rank of a matrix.
#Developed by: SRIJITH R
#RegisterNumber: 21004191
import numpy as np
A=np.array([[5,-3,-10],[2,2,-3],[-3,-1,5]])
val=np.linalg.matrix_rank(A)
print(val)
| 28.285714
| 44
| 0.676768
|
import numpy as np
A=np.array([[5,-3,-10],[2,2,-3],[-3,-1,5]])
val=np.linalg.matrix_rank(A)
print(val)
| true
| true
|
f70ad45594920c02ebd62ebd037cc86e54c3965b
| 386
|
py
|
Python
|
OOP_formy/src/pages/file_upload.py
|
AntonioIonica/Automation_testing
|
6f7c94c55677b0958e6fada24058f1a00d2c0d0e
|
[
"MIT"
] | null | null | null |
OOP_formy/src/pages/file_upload.py
|
AntonioIonica/Automation_testing
|
6f7c94c55677b0958e6fada24058f1a00d2c0d0e
|
[
"MIT"
] | null | null | null |
OOP_formy/src/pages/file_upload.py
|
AntonioIonica/Automation_testing
|
6f7c94c55677b0958e6fada24058f1a00d2c0d0e
|
[
"MIT"
] | null | null | null |
"""
File upload page using a png file
"""
from selenium.webdriver.common.by import By
from pages.base_page import BasePage
class FileUpload(BasePage):
FILE_UP = (By.ID, 'file-upload-field')
def upload_file(self):
file_up = self.driver.find_element(*self.FILE_UP)
file_up.send_keys('C:/Users/anton/PycharmProjects/Automation_testing/exercices_todo/blue.png')
| 25.733333
| 102
| 0.735751
|
from selenium.webdriver.common.by import By
from pages.base_page import BasePage
class FileUpload(BasePage):
FILE_UP = (By.ID, 'file-upload-field')
def upload_file(self):
file_up = self.driver.find_element(*self.FILE_UP)
file_up.send_keys('C:/Users/anton/PycharmProjects/Automation_testing/exercices_todo/blue.png')
| true
| true
|
f70ad46c0497f4e4a064bac799bd8fe96b0efbdf
| 24,834
|
py
|
Python
|
pandaclient/PdbUtils.py
|
matthewfeickert/panda-client
|
077bb692a6f42ced0b388c96b8fd64ca032d6df7
|
[
"Apache-2.0"
] | 7
|
2016-01-26T21:37:26.000Z
|
2020-09-10T07:44:54.000Z
|
pandaclient/PdbUtils.py
|
matthewfeickert/panda-client
|
077bb692a6f42ced0b388c96b8fd64ca032d6df7
|
[
"Apache-2.0"
] | 12
|
2017-10-11T09:15:01.000Z
|
2021-11-17T00:23:18.000Z
|
pandaclient/PdbUtils.py
|
matthewfeickert/panda-client
|
077bb692a6f42ced0b388c96b8fd64ca032d6df7
|
[
"Apache-2.0"
] | 9
|
2017-07-20T08:06:36.000Z
|
2021-11-15T04:22:06.000Z
|
import os
import re
import sys
import time
import datetime
from .MiscUtils import commands_get_status_output
try:
long()
except Exception:
long = int
from . import PLogger
from .LocalJobSpec import LocalJobSpec
from .LocalJobsetSpec import LocalJobsetSpec
class PdbProxy:
# constructor
def __init__(self,verbose=False):
# database engine
self.engine = 'sqlite3'
# version of database schema
self.version = '0_0_1'
# database file name
self.filename = 'pandajob.db'
# database dir
self.database_dir = os.path.expanduser(os.environ['PANDA_CONFIG_ROOT'])
# full path of database file
self.database = '%s/%s' % (self.database_dir,self.filename)
# table name
self.tablename = 'jobtable_%s' % self.version
# verbose
self.verbose = verbose
# connection
self.con = None
# logger
self.log = PLogger.getPandaLogger()
# set verbose
def setVerbose(self,verbose):
# verbose
self.verbose = verbose
# execute SQL
def execute(self,sql,var={}):
# logger
tmpLog = PLogger.getPandaLogger()
# expand variables
for tmpKey in var:
tmpVal = var[tmpKey]
sql = sql.replqce(tmpKey,str(tmpVal))
# construct command
com = '%s %s "%s"' % (self.engine,self.database,sql)
if self.verbose:
tmpLog.debug("DB Req : " + com)
# execute
nTry = 5
status =0
for iTry in range(nTry):
if self.verbose:
tmpLog.debug(" Try : %s/%s" % (iTry,nTry))
status,output = commands_get_status_output(com)
status %= 255
if status == 0:
break
if iTry+1 < nTry:
time.sleep(2)
# return
if status != 0:
tmpLog.error(status)
tmpLog.error(output)
return False,output
else:
if self.verbose:
tmpLog.debug(" Ret : " + output)
outList = output.split('\n')
# remove ''
try:
outList.remove('')
except Exception:
pass
# remove junk messages
ngStrings = ['Loading resources from']
for tmpStr in tuple(outList):
# look for NG strings
flagNG = False
for ngStr in ngStrings:
match = re.search(ngStr,tmpStr,re.I)
if match is not None:
flagNG = True
break
# remove
if flagNG:
try:
outList.remove(tmpStr)
except Exception:
pass
return True,outList
# execute SQL
def execute_direct(self, sql, var=None, fetch=False):
if self.con is None:
import sqlite3
self.con = sqlite3.connect(self.database, check_same_thread=False)
if self.verbose:
self.log.debug("DB Req : {0} var={1}".format(sql, str(var)))
cur = self.con.cursor()
try:
if var is None:
var = {}
cur.execute(sql, var)
retVal = True
except Exception:
retVal = False
if not self.verbose:
self.log.error("DB Req : {0} var={1}".format(sql, str(var)))
err_type, err_value = sys.exc_info()[:2]
err_str = "{0} {1}".format(err_type.__name__, err_value)
self.log.error(err_str)
if self.verbose:
self.log.debug(retVal)
outList = []
if retVal:
if fetch:
outList = cur.fetchall()
if self.verbose:
for item in outList:
self.log.debug(" Ret : " + str(item))
self.con.commit()
return retVal, outList
# remove old database
def deleteDatabase(self):
commands_get_status_output('rm -f %s' % self.database)
# initialize database
def initialize(self):
# import sqlite3
# check if sqlite3 is available
com = 'which %s' % self.engine
status,output = commands_get_status_output(com)
if status != 0:
errstr = "\n\n"
errstr += "ERROR : %s is not available in PATH\n\n" % self.engine
errstr += "There are some possible solutions\n"
errstr += " * run this application under Athena runtime with Release 14 or higher. e.g.,\n"
errstr += " $ source setup.sh -tag=14.2.24,32,setup\n"
errstr += " $ source .../etc/panda/panda_setup.sh\n\n"
errstr += " * set PATH and LD_LIBRARY_PATH to include %s. e.g., at CERN\n" % self.engine
errstr += " $ export PATH=/afs/cern.ch/sw/lcg/external/sqlite/3.4.0/slc3_ia32_gcc323/bin:$PATH\n"
errstr += " $ export LD_LIBRARY_PATH=/afs/cern.ch/sw/lcg/external/sqlite/3.4.0/slc3_ia32_gcc323/lib:$LD_LIBRARY_PATH\n"
errstr += " $ source .../etc/panda/panda_setup.sh\n\n"
errstr += " * install %s from the standard SL4 repository. e.g.,\n" % self.engine
errstr += " $ yum install %s\n\n" % self.engine
errstr += " * use SLC5\n"
raise RuntimeError(errstr)
# create dir for DB
if not os.path.exists(self.database_dir):
os.makedirs(self.database_dir)
# the table already exist
if self.checkTable():
return
# create table
self.createTable()
return
# check table
def checkTable(self):
# get tables
retS,retV = self.execute('.table')
if not retS:
raise RuntimeError("cannot get tables")
# the table already exist or not
if retV == []:
return False
if self.tablename not in retV[-1].split():
return False
# check schema
self.checkSchema()
return True
# check schema
def checkSchema(self,noAdd=False):
# get colum names
retS,retV = self.execute('PRAGMA table_info(%s)' % self.tablename)
if not retS:
raise RuntimeError("cannot get table_info")
# parse
columns = []
for line in retV:
items = line.split('|')
if len(items) > 1:
columns.append(items[1])
# check
for tmpC in LocalJobSpec.appended:
tmpA = LocalJobSpec.appended[tmpC]
if tmpC not in columns:
if noAdd:
raise RuntimeError("%s not found in database schema" % tmpC)
# add column
retS,retV = self.execute("ALTER TABLE %s ADD COLUMN '%s' %s" % \
(self.tablename,tmpC,tmpA))
if not retS:
raise RuntimeError("cannot add %s to database schema" % tmpC)
if noAdd:
return
# check whole schema just in case
self.checkSchema(noAdd=True)
# create table
def createTable(self):
# ver 0_1_1
sql = "CREATE TABLE %s (" % self.tablename
sql += "'id' INTEGER PRIMARY KEY,"
sql += "'JobID' INTEGER,"
sql += "'PandaID' TEXT,"
sql += "'jobStatus' TEXT,"
sql += "'site' VARCHAR(128),"
sql += "'cloud' VARCHAR(20),"
sql += "'jobType' VARCHAR(20),"
sql += "'jobName' VARCHAR(128),"
sql += "'inDS' TEXT,"
sql += "'outDS' TEXT,"
sql += "'libDS' VARCHAR(255),"
sql += "'jobParams' TEXT,"
sql += "'retryID' INTEGER,"
sql += "'provenanceID' INTEGER,"
sql += "'creationTime' TIMESTAMP,"
sql += "'lastUpdate' TIMESTAMP,"
sql += "'dbStatus' VARCHAR(20),"
sql += "'buildStatus' VARCHAR(20),"
sql += "'commandToPilot' VARCHAR(20),"
for tmpC in LocalJobSpec.appended:
tmpA = LocalJobSpec.appended[tmpC]
sql += "'%s' %s," % (tmpC,tmpA)
sql = sql[:-1]
sql += ")"
# execute
retS,retV = self.execute(sql)
if not retS:
raise RuntimeError("failed to create %s" % self.tablename)
# confirm
if not self.checkTable():
raise RuntimeError("failed to confirm %s" % self.tablename)
# convert Panda jobs to DB representation
def convertPtoD(pandaJobList,pandaIDstatus,localJob=None,fileInfo={},pandaJobForSiteID=None):
statusOnly = False
if localJob is not None:
# update status only
ddata = localJob
statusOnly = True
else:
# create new spec
ddata = LocalJobSpec()
# sort by PandaID
pandIDs = list(pandaIDstatus)
pandIDs.sort()
pStr = ''
sStr = ''
ddata.commandToPilot = ''
for tmpID in pandIDs:
# PandaID
pStr += '%s,' % tmpID
# status
sStr += '%s,' % pandaIDstatus[tmpID][0]
# commandToPilot
if pandaIDstatus[tmpID][1] == 'tobekilled':
ddata.commandToPilot = 'tobekilled'
pStr = pStr[:-1]
sStr = sStr[:-1]
# job status
ddata.jobStatus = sStr
# PandaID
ddata.PandaID = pStr
# get panda Job
pandaJob = None
if pandaJobList != []:
# look for buildJob since it doesn't have the first PandaID when retried
for pandaJob in pandaJobList:
if pandaJob.prodSourceLabel == 'panda':
break
elif pandaJobForSiteID is not None:
pandaJob = pandaJobForSiteID
# extract libDS
if pandaJob is not None:
if pandaJob.prodSourceLabel == 'panda':
# build Jobs
ddata.buildStatus = pandaJob.jobStatus
for tmpFile in pandaJob.Files:
if tmpFile.type == 'output':
ddata.libDS = tmpFile.dataset
break
else:
# noBuild or libDS
ddata.buildStatus = ''
for tmpFile in pandaJob.Files:
if tmpFile.type == 'input' and tmpFile.lfn.endswith('.lib.tgz'):
ddata.libDS = tmpFile.dataset
break
# release
ddata.releaseVar = pandaJob.AtlasRelease
# cache
tmpCache = re.sub('^[^-]+-*','',pandaJob.homepackage)
tmpCache = re.sub('_','-',tmpCache)
ddata.cacheVar = tmpCache
# return if update status only
if statusOnly:
# build job
if ddata.buildStatus != '':
ddata.buildStatus = sStr.split(',')[0]
# set computingSite mainly for rebrokerage
if pandaJobForSiteID is not None:
ddata.site = pandaJobForSiteID.computingSite
ddata.nRebro = pandaJobForSiteID.specialHandling.split(',').count('rebro') + \
pandaJobForSiteID.specialHandling.split(',').count('sretry')
# return
return ddata
# job parameters
ddata.jobParams = pandaJob.metadata
# extract datasets
iDSlist = []
oDSlist = []
if fileInfo != {}:
if 'inDS' in fileInfo:
iDSlist = fileInfo['inDS']
if 'outDS' in fileInfo:
oDSlist = fileInfo['outDS']
else:
for pandaJob in pandaJobList:
for tmpFile in pandaJob.Files:
if tmpFile.type == 'input' and not tmpFile.lfn.endswith('.lib.tgz'):
if tmpFile.dataset not in iDSlist:
iDSlist.append(tmpFile.dataset)
elif tmpFile.type == 'output' and not tmpFile.lfn.endswith('.lib.tgz'):
if tmpFile.dataset not in oDSlist:
oDSlist.append(tmpFile.dataset)
# convert to string
ddata.inDS = ''
for iDS in iDSlist:
ddata.inDS += '%s,' % iDS
ddata.inDS = ddata.inDS[:-1]
ddata.outDS = ''
for oDS in oDSlist:
ddata.outDS += '%s,' % oDS
ddata.outDS = ddata.outDS[:-1]
# job name
ddata.jobName = pandaJob.jobName
# creation time
ddata.creationTime = pandaJob.creationTime
# job type
ddata.jobType = pandaJob.prodSeriesLabel
# site
ddata.site = pandaJob.computingSite
# cloud
ddata.cloud = pandaJob.cloud
# job ID
ddata.JobID = pandaJob.jobDefinitionID
# retry ID
ddata.retryID = 0
# provenance ID
ddata.provenanceID = pandaJob.jobExecutionID
# groupID
ddata.groupID = pandaJob.jobsetID
ddata.retryJobsetID = -1
if pandaJob.sourceSite not in ['NULL',None,'']:
ddata.parentJobsetID = long(pandaJob.sourceSite)
else:
ddata.parentJobsetID = -1
# job type
ddata.jobType = pandaJob.processingType
# the number of rebrokerage actions
ddata.nRebro = pandaJob.specialHandling.split(',').count('rebro')
# jediTaskID
ddata.jediTaskID = -1
# return
return ddata
# convert JediTask to DB representation
def convertJTtoD(jediTaskDict,localJob=None):
statusOnly = False
if localJob is not None:
# update status only
ddata = localJob
statusOnly = True
else:
# create new spec
ddata = LocalJobSpec()
# max IDs
maxIDs = 20
# task status
ddata.taskStatus = jediTaskDict['status']
# statistic
ddata.jobStatus = jediTaskDict['statistics']
# PandaID
ddata.PandaID = ''
for tmpPandaID in jediTaskDict['PandaID'][:maxIDs]:
ddata.PandaID += '%s,' % tmpPandaID
ddata.PandaID = ddata.PandaID[:-1]
if len(jediTaskDict['PandaID']) > maxIDs:
ddata.PandaID += ',+%sIDs' % (len(jediTaskDict['PandaID'])-maxIDs)
# merge status
if 'mergeStatus' not in jediTaskDict or jediTaskDict['mergeStatus'] is None:
ddata.mergeJobStatus = 'NA'
else:
ddata.mergeJobStatus = jediTaskDict['mergeStatus']
# merge PandaID
ddata.mergeJobID = ''
for tmpPandaID in jediTaskDict['mergePandaID'][:maxIDs]:
ddata.mergeJobID += '%s,' % tmpPandaID
ddata.mergeJobID = ddata.mergeJobID[:-1]
if len(jediTaskDict['mergePandaID']) > maxIDs:
ddata.mergeJobID += ',+%sIDs' % (len(jediTaskDict['mergePandaID'])-maxIDs)
# return if update status only
if statusOnly:
return ddata
# release
ddata.releaseVar = jediTaskDict['transUses']
# cache
if jediTaskDict['transHome'] is None:
tmpCache = ''
else:
tmpCache = re.sub('^[^-]+-*','',jediTaskDict['transHome'])
tmpCache = re.sub('_','-',tmpCache)
ddata.cacheVar = tmpCache
# job parameters
try:
if isinstance(jediTaskDict['cliParams'],unicode):
ddata.jobParams = jediTaskDict['cliParams'].encode('utf_8')
else:
ddata.jobParams = jediTaskDict['cliParams']
# truncate
ddata.jobParams = ddata.jobParams[:1024]
except Exception:
pass
# input datasets
try:
# max number of datasets to show
maxDS = 20
inDSs = jediTaskDict['inDS'].split(',')
strInDS = ''
# concatenate
for tmpInDS in inDSs[:maxDS]:
strInDS += "%s," % tmpInDS
strInDS = strInDS[:-1]
# truncate
if len(inDSs) > maxDS:
strInDS += ',+{0}DSs'.format(len(inDSs)-maxDS)
ddata.inDS = strInDS
except Exception:
ddata.inDS = jediTaskDict['inDS']
# output datasets
ddata.outDS = jediTaskDict['outDS']
# job name
ddata.jobName = jediTaskDict['taskName']
# creation time
ddata.creationTime = jediTaskDict['creationDate']
# job type
ddata.jobType = jediTaskDict['processingType']
# site
ddata.site = jediTaskDict['site']
# cloud
ddata.cloud = jediTaskDict['cloud']
# job ID
ddata.JobID = jediTaskDict['reqID']
# retry ID
ddata.retryID = 0
# provenance ID
ddata.provenanceID = 0
# groupID
ddata.groupID = jediTaskDict['reqID']
# jediTaskID
ddata.jediTaskID = jediTaskDict['jediTaskID']
# IDs for retry
ddata.retryJobsetID = -1
ddata.parentJobsetID = -1
# the number of rebrokerage actions
ddata.nRebro = 0
# return
return ddata
# instantiate database proxy
pdbProxy = PdbProxy()
# just initialize DB
def initialzieDB(verbose=False,restoreDB=False):
if restoreDB:
pdbProxy.deleteDatabase()
pdbProxy.initialize()
pdbProxy.setVerbose(verbose)
# insert job info to DB
def insertJobDB(job,verbose=False):
tmpLog = PLogger.getPandaLogger()
# set update time
job.lastUpdate = datetime.datetime.utcnow()
# make sql
sql1 = "INSERT INTO %s (%s) " % (pdbProxy.tablename,LocalJobSpec.columnNames())
sql1+= "VALUES " + job.values()
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to insert job")
# update job info in DB
def updateJobDB(job,verbose=False,updateTime=None):
# make sql
sql1 = "UPDATE %s SET " % pdbProxy.tablename
sql1 += job.values(forUpdate=True)
sql1 += " WHERE JobID=%s " % job.JobID
# set update time
if updateTime is not None:
job.lastUpdate = updateTime
sql1 += " AND lastUpdate<'%s' " % updateTime.strftime('%Y-%m-%d %H:%M:%S')
else:
job.lastUpdate = datetime.datetime.utcnow()
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to update job")
# set retryID
def setRetryID(job,verbose=False):
# make sql
sql1 = "UPDATE %s SET " % pdbProxy.tablename
sql1 += "retryID=%s,retryJobsetID=%s " % (job.JobID,job.groupID)
sql1 += " WHERE JobID=%s AND (nRebro IS NULL OR nRebro=%s)" % (job.provenanceID,job.nRebro)
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to set retryID")
# delete old jobs
def deleteOldJobs(days,verbose=False):
# time limit
limit = datetime.datetime.utcnow() - datetime.timedelta(days=days)
# make sql
sql1 = "DELETE FROM %s " % pdbProxy.tablename
sql1 += " WHERE creationTime<'%s' " % limit.strftime('%Y-%m-%d %H:%M:%S')
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to delete old jobs")
# read job info from DB
def readJobDB(JobID,verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE JobID=%s" % JobID
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get JobID=%s" % JobID)
if len(out) == 0:
return None
# instantiate LocalJobSpec
for values in out:
job = LocalJobSpec()
job.pack(values)
# return frozen job if exists
if job.dbStatus == 'frozen':
return job
# return any
return job
# read jobset info from DB
def readJobsetDB(JobsetID,verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE groupID=%s" % JobsetID
# execute
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to get JobsetID=%s" % JobsetID)
if len(out) == 0:
return None
# instantiate LocalJobSpec
tmpJobMap = {}
for tmpStr in out:
values = tmpStr.split('|')
job = LocalJobSpec()
job.pack(values)
# return frozen job if exists
if job.dbStatus == 'frozen' or job.JobID not in tmpJobMap:
tmpJobMap[job.JobID] = job
# make jobset
jobset = LocalJobsetSpec()
# set jobs
jobset.setJobs(tmpJobMap.values())
# return any
return jobset
# check jobset status in DB
def checkJobsetStatus(JobsetID,verbose=False):
# logger
tmpLog = PLogger.getPandaLogger()
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE groupID=%s" % JobsetID
failedRet = False,None
# execute
status,out = pdbProxy.execute(sql1)
if not status:
tmpLog.error(out)
tmpLog.error("failed to access local DB")
return failedRet
if len(out) == 0:
tmpLog.error("failed to get JobsetID=%s from local DB" % JobsetID)
return None
# instantiate LocalJobSpec
jobMap = {}
for tmpStr in out:
values = tmpStr.split('|')
job = LocalJobSpec()
job.pack(values)
# use frozen job if exists
if job.JobID not in jobMap or job.dbStatus == 'frozen':
jobMap[job.JobID] = job
# check all job status
for tmpJobID in jobMap:
tmpJobSpec = jobMap[tmpJobID]
if tmpJobSpec != 'frozen':
return True,'running'
# return
return True,'frozen'
# bulk read job info from DB
def bulkReadJobDB(verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get jobs")
if len(out) == 0:
return []
# instantiate LocalJobSpec
retMap = {}
jobsetMap = {}
for values in out:
job = LocalJobSpec()
job.pack(values)
# use frozen job if exists
if job.JobID not in retMap or job.dbStatus == 'frozen':
if job.groupID in [0,'0','NULL',-1,'-1']:
retMap[long(job.JobID)] = job
else:
# add jobset
tmpJobsetID = long(job.groupID)
if tmpJobsetID not in retMap or tmpJobsetID not in jobsetMap:
jobsetMap[tmpJobsetID] = []
jobset = LocalJobsetSpec()
retMap[tmpJobsetID] = jobset
# add job
jobsetMap[tmpJobsetID].append(job)
# add jobs to jobset
for tmpJobsetID in jobsetMap:
tmpJobList = jobsetMap[tmpJobsetID]
retMap[tmpJobsetID].setJobs(tmpJobList)
# sort
ids = list(retMap)
ids.sort()
retVal = []
for id in ids:
retVal.append(retMap[id])
# return
return retVal
# get list of JobID
def getListOfJobIDs(nonFrozen=False,verbose=False):
# make sql
sql1 = "SELECT JobID,dbStatus FROM %s " % pdbProxy.tablename
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allList = []
frozenList = []
for item in out:
# extract JobID
tmpID = long(item[0])
# status in DB
tmpStatus = item[-1]
# keep all jobs
if tmpID not in allList:
allList.append(tmpID)
# keep frozen jobs
if nonFrozen and tmpStatus == 'frozen':
if tmpID not in frozenList:
frozenList.append(tmpID)
# remove redundant jobs
retVal = []
for item in allList:
if item not in frozenList:
retVal.append(item)
# sort
retVal.sort()
# return
return retVal
# get map of jobsetID and JobIDs
def getMapJobsetIDJobIDs(verbose=False):
# make sql
sql1 = "SELECT groupID,JobID FROM %s WHERE groupID is not NULL and groupID != 0 and groupID != ''" % pdbProxy.tablename
# execute
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allMap = {}
for item in out:
# JobsetID
tmpJobsetID = long(item.split('|')[0])
# JobID
tmpJobID = long(item.split('|')[-1])
# append
if tmpJobsetID not in allMap:
allMap[tmpJobsetID] = []
if tmpJobID not in allMap[tmpJobsetID]:
allMap[tmpJobsetID].append(tmpJobID)
# sort
for tmpKey in allMap.keys():
allMap[tmpKey].sort()
# return
return allMap
# make JobSetSpec
def makeJobsetSpec(jobList):
jobset = LocalJobsetSpec()
jobset.setJobs(jobList)
return jobset
# get map of jobsetID and jediTaskID
def getJobsetTaskMap(verbose=False):
# make sql
sql1 = "SELECT groupID,jediTaskID FROM %s WHERE groupID is not NULL and groupID != 0 and groupID != '' and jediTaskID is not null and jediTaskID != ''" % pdbProxy.tablename
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allMap = {}
for item in out:
# JobsetID
tmpJobsetID = long(item[0])
# JobID
jediTaskID = long(item[-1])
# append
allMap[jediTaskID] = tmpJobsetID
# return
return allMap
| 32.168394
| 176
| 0.573488
|
import os
import re
import sys
import time
import datetime
from .MiscUtils import commands_get_status_output
try:
long()
except Exception:
long = int
from . import PLogger
from .LocalJobSpec import LocalJobSpec
from .LocalJobsetSpec import LocalJobsetSpec
class PdbProxy:
def __init__(self,verbose=False):
self.engine = 'sqlite3'
self.version = '0_0_1'
self.filename = 'pandajob.db'
self.database_dir = os.path.expanduser(os.environ['PANDA_CONFIG_ROOT'])
self.database = '%s/%s' % (self.database_dir,self.filename)
self.tablename = 'jobtable_%s' % self.version
self.verbose = verbose
self.con = None
self.log = PLogger.getPandaLogger()
def setVerbose(self,verbose):
self.verbose = verbose
def execute(self,sql,var={}):
tmpLog = PLogger.getPandaLogger()
for tmpKey in var:
tmpVal = var[tmpKey]
sql = sql.replqce(tmpKey,str(tmpVal))
com = '%s %s "%s"' % (self.engine,self.database,sql)
if self.verbose:
tmpLog.debug("DB Req : " + com)
nTry = 5
status =0
for iTry in range(nTry):
if self.verbose:
tmpLog.debug(" Try : %s/%s" % (iTry,nTry))
status,output = commands_get_status_output(com)
status %= 255
if status == 0:
break
if iTry+1 < nTry:
time.sleep(2)
if status != 0:
tmpLog.error(status)
tmpLog.error(output)
return False,output
else:
if self.verbose:
tmpLog.debug(" Ret : " + output)
outList = output.split('\n')
try:
outList.remove('')
except Exception:
pass
ngStrings = ['Loading resources from']
for tmpStr in tuple(outList):
flagNG = False
for ngStr in ngStrings:
match = re.search(ngStr,tmpStr,re.I)
if match is not None:
flagNG = True
break
if flagNG:
try:
outList.remove(tmpStr)
except Exception:
pass
return True,outList
def execute_direct(self, sql, var=None, fetch=False):
if self.con is None:
import sqlite3
self.con = sqlite3.connect(self.database, check_same_thread=False)
if self.verbose:
self.log.debug("DB Req : {0} var={1}".format(sql, str(var)))
cur = self.con.cursor()
try:
if var is None:
var = {}
cur.execute(sql, var)
retVal = True
except Exception:
retVal = False
if not self.verbose:
self.log.error("DB Req : {0} var={1}".format(sql, str(var)))
err_type, err_value = sys.exc_info()[:2]
err_str = "{0} {1}".format(err_type.__name__, err_value)
self.log.error(err_str)
if self.verbose:
self.log.debug(retVal)
outList = []
if retVal:
if fetch:
outList = cur.fetchall()
if self.verbose:
for item in outList:
self.log.debug(" Ret : " + str(item))
self.con.commit()
return retVal, outList
def deleteDatabase(self):
commands_get_status_output('rm -f %s' % self.database)
def initialize(self):
com = 'which %s' % self.engine
status,output = commands_get_status_output(com)
if status != 0:
errstr = "\n\n"
errstr += "ERROR : %s is not available in PATH\n\n" % self.engine
errstr += "There are some possible solutions\n"
errstr += " * run this application under Athena runtime with Release 14 or higher. e.g.,\n"
errstr += " $ source setup.sh -tag=14.2.24,32,setup\n"
errstr += " $ source .../etc/panda/panda_setup.sh\n\n"
errstr += " * set PATH and LD_LIBRARY_PATH to include %s. e.g., at CERN\n" % self.engine
errstr += " $ export PATH=/afs/cern.ch/sw/lcg/external/sqlite/3.4.0/slc3_ia32_gcc323/bin:$PATH\n"
errstr += " $ export LD_LIBRARY_PATH=/afs/cern.ch/sw/lcg/external/sqlite/3.4.0/slc3_ia32_gcc323/lib:$LD_LIBRARY_PATH\n"
errstr += " $ source .../etc/panda/panda_setup.sh\n\n"
errstr += " * install %s from the standard SL4 repository. e.g.,\n" % self.engine
errstr += " $ yum install %s\n\n" % self.engine
errstr += " * use SLC5\n"
raise RuntimeError(errstr)
if not os.path.exists(self.database_dir):
os.makedirs(self.database_dir)
if self.checkTable():
return
self.createTable()
return
def checkTable(self):
retS,retV = self.execute('.table')
if not retS:
raise RuntimeError("cannot get tables")
if retV == []:
return False
if self.tablename not in retV[-1].split():
return False
self.checkSchema()
return True
def checkSchema(self,noAdd=False):
retS,retV = self.execute('PRAGMA table_info(%s)' % self.tablename)
if not retS:
raise RuntimeError("cannot get table_info")
columns = []
for line in retV:
items = line.split('|')
if len(items) > 1:
columns.append(items[1])
for tmpC in LocalJobSpec.appended:
tmpA = LocalJobSpec.appended[tmpC]
if tmpC not in columns:
if noAdd:
raise RuntimeError("%s not found in database schema" % tmpC)
retS,retV = self.execute("ALTER TABLE %s ADD COLUMN '%s' %s" % \
(self.tablename,tmpC,tmpA))
if not retS:
raise RuntimeError("cannot add %s to database schema" % tmpC)
if noAdd:
return
self.checkSchema(noAdd=True)
def createTable(self):
sql = "CREATE TABLE %s (" % self.tablename
sql += "'id' INTEGER PRIMARY KEY,"
sql += "'JobID' INTEGER,"
sql += "'PandaID' TEXT,"
sql += "'jobStatus' TEXT,"
sql += "'site' VARCHAR(128),"
sql += "'cloud' VARCHAR(20),"
sql += "'jobType' VARCHAR(20),"
sql += "'jobName' VARCHAR(128),"
sql += "'inDS' TEXT,"
sql += "'outDS' TEXT,"
sql += "'libDS' VARCHAR(255),"
sql += "'jobParams' TEXT,"
sql += "'retryID' INTEGER,"
sql += "'provenanceID' INTEGER,"
sql += "'creationTime' TIMESTAMP,"
sql += "'lastUpdate' TIMESTAMP,"
sql += "'dbStatus' VARCHAR(20),"
sql += "'buildStatus' VARCHAR(20),"
sql += "'commandToPilot' VARCHAR(20),"
for tmpC in LocalJobSpec.appended:
tmpA = LocalJobSpec.appended[tmpC]
sql += "'%s' %s," % (tmpC,tmpA)
sql = sql[:-1]
sql += ")"
retS,retV = self.execute(sql)
if not retS:
raise RuntimeError("failed to create %s" % self.tablename)
if not self.checkTable():
raise RuntimeError("failed to confirm %s" % self.tablename)
def convertPtoD(pandaJobList,pandaIDstatus,localJob=None,fileInfo={},pandaJobForSiteID=None):
statusOnly = False
if localJob is not None:
ddata = localJob
statusOnly = True
else:
ddata = LocalJobSpec()
pandIDs = list(pandaIDstatus)
pandIDs.sort()
pStr = ''
sStr = ''
ddata.commandToPilot = ''
for tmpID in pandIDs:
pStr += '%s,' % tmpID
sStr += '%s,' % pandaIDstatus[tmpID][0]
if pandaIDstatus[tmpID][1] == 'tobekilled':
ddata.commandToPilot = 'tobekilled'
pStr = pStr[:-1]
sStr = sStr[:-1]
ddata.jobStatus = sStr
ddata.PandaID = pStr
pandaJob = None
if pandaJobList != []:
for pandaJob in pandaJobList:
if pandaJob.prodSourceLabel == 'panda':
break
elif pandaJobForSiteID is not None:
pandaJob = pandaJobForSiteID
# extract libDS
if pandaJob is not None:
if pandaJob.prodSourceLabel == 'panda':
# build Jobs
ddata.buildStatus = pandaJob.jobStatus
for tmpFile in pandaJob.Files:
if tmpFile.type == 'output':
ddata.libDS = tmpFile.dataset
break
else:
# noBuild or libDS
ddata.buildStatus = ''
for tmpFile in pandaJob.Files:
if tmpFile.type == 'input' and tmpFile.lfn.endswith('.lib.tgz'):
ddata.libDS = tmpFile.dataset
break
# release
ddata.releaseVar = pandaJob.AtlasRelease
# cache
tmpCache = re.sub('^[^-]+-*','',pandaJob.homepackage)
tmpCache = re.sub('_','-',tmpCache)
ddata.cacheVar = tmpCache
# return if update status only
if statusOnly:
# build job
if ddata.buildStatus != '':
ddata.buildStatus = sStr.split(',')[0]
# set computingSite mainly for rebrokerage
if pandaJobForSiteID is not None:
ddata.site = pandaJobForSiteID.computingSite
ddata.nRebro = pandaJobForSiteID.specialHandling.split(',').count('rebro') + \
pandaJobForSiteID.specialHandling.split(',').count('sretry')
# return
return ddata
# job parameters
ddata.jobParams = pandaJob.metadata
# extract datasets
iDSlist = []
oDSlist = []
if fileInfo != {}:
if 'inDS' in fileInfo:
iDSlist = fileInfo['inDS']
if 'outDS' in fileInfo:
oDSlist = fileInfo['outDS']
else:
for pandaJob in pandaJobList:
for tmpFile in pandaJob.Files:
if tmpFile.type == 'input' and not tmpFile.lfn.endswith('.lib.tgz'):
if tmpFile.dataset not in iDSlist:
iDSlist.append(tmpFile.dataset)
elif tmpFile.type == 'output' and not tmpFile.lfn.endswith('.lib.tgz'):
if tmpFile.dataset not in oDSlist:
oDSlist.append(tmpFile.dataset)
# convert to string
ddata.inDS = ''
for iDS in iDSlist:
ddata.inDS += '%s,' % iDS
ddata.inDS = ddata.inDS[:-1]
ddata.outDS = ''
for oDS in oDSlist:
ddata.outDS += '%s,' % oDS
ddata.outDS = ddata.outDS[:-1]
# job name
ddata.jobName = pandaJob.jobName
# creation time
ddata.creationTime = pandaJob.creationTime
# job type
ddata.jobType = pandaJob.prodSeriesLabel
# site
ddata.site = pandaJob.computingSite
# cloud
ddata.cloud = pandaJob.cloud
# job ID
ddata.JobID = pandaJob.jobDefinitionID
# retry ID
ddata.retryID = 0
# provenance ID
ddata.provenanceID = pandaJob.jobExecutionID
# groupID
ddata.groupID = pandaJob.jobsetID
ddata.retryJobsetID = -1
if pandaJob.sourceSite not in ['NULL',None,'']:
ddata.parentJobsetID = long(pandaJob.sourceSite)
else:
ddata.parentJobsetID = -1
# job type
ddata.jobType = pandaJob.processingType
# the number of rebrokerage actions
ddata.nRebro = pandaJob.specialHandling.split(',').count('rebro')
# jediTaskID
ddata.jediTaskID = -1
# return
return ddata
# convert JediTask to DB representation
def convertJTtoD(jediTaskDict,localJob=None):
statusOnly = False
if localJob is not None:
# update status only
ddata = localJob
statusOnly = True
else:
# create new spec
ddata = LocalJobSpec()
# max IDs
maxIDs = 20
# task status
ddata.taskStatus = jediTaskDict['status']
# statistic
ddata.jobStatus = jediTaskDict['statistics']
# PandaID
ddata.PandaID = ''
for tmpPandaID in jediTaskDict['PandaID'][:maxIDs]:
ddata.PandaID += '%s,' % tmpPandaID
ddata.PandaID = ddata.PandaID[:-1]
if len(jediTaskDict['PandaID']) > maxIDs:
ddata.PandaID += ',+%sIDs' % (len(jediTaskDict['PandaID'])-maxIDs)
# merge status
if 'mergeStatus' not in jediTaskDict or jediTaskDict['mergeStatus'] is None:
ddata.mergeJobStatus = 'NA'
else:
ddata.mergeJobStatus = jediTaskDict['mergeStatus']
# merge PandaID
ddata.mergeJobID = ''
for tmpPandaID in jediTaskDict['mergePandaID'][:maxIDs]:
ddata.mergeJobID += '%s,' % tmpPandaID
ddata.mergeJobID = ddata.mergeJobID[:-1]
if len(jediTaskDict['mergePandaID']) > maxIDs:
ddata.mergeJobID += ',+%sIDs' % (len(jediTaskDict['mergePandaID'])-maxIDs)
# return if update status only
if statusOnly:
return ddata
# release
ddata.releaseVar = jediTaskDict['transUses']
# cache
if jediTaskDict['transHome'] is None:
tmpCache = ''
else:
tmpCache = re.sub('^[^-]+-*','',jediTaskDict['transHome'])
tmpCache = re.sub('_','-',tmpCache)
ddata.cacheVar = tmpCache
# job parameters
try:
if isinstance(jediTaskDict['cliParams'],unicode):
ddata.jobParams = jediTaskDict['cliParams'].encode('utf_8')
else:
ddata.jobParams = jediTaskDict['cliParams']
# truncate
ddata.jobParams = ddata.jobParams[:1024]
except Exception:
pass
# input datasets
try:
# max number of datasets to show
maxDS = 20
inDSs = jediTaskDict['inDS'].split(',')
strInDS = ''
# concatenate
for tmpInDS in inDSs[:maxDS]:
strInDS += "%s," % tmpInDS
strInDS = strInDS[:-1]
# truncate
if len(inDSs) > maxDS:
strInDS += ',+{0}DSs'.format(len(inDSs)-maxDS)
ddata.inDS = strInDS
except Exception:
ddata.inDS = jediTaskDict['inDS']
# output datasets
ddata.outDS = jediTaskDict['outDS']
# job name
ddata.jobName = jediTaskDict['taskName']
# creation time
ddata.creationTime = jediTaskDict['creationDate']
# job type
ddata.jobType = jediTaskDict['processingType']
# site
ddata.site = jediTaskDict['site']
# cloud
ddata.cloud = jediTaskDict['cloud']
# job ID
ddata.JobID = jediTaskDict['reqID']
# retry ID
ddata.retryID = 0
# provenance ID
ddata.provenanceID = 0
# groupID
ddata.groupID = jediTaskDict['reqID']
# jediTaskID
ddata.jediTaskID = jediTaskDict['jediTaskID']
# IDs for retry
ddata.retryJobsetID = -1
ddata.parentJobsetID = -1
# the number of rebrokerage actions
ddata.nRebro = 0
# return
return ddata
# instantiate database proxy
pdbProxy = PdbProxy()
# just initialize DB
def initialzieDB(verbose=False,restoreDB=False):
if restoreDB:
pdbProxy.deleteDatabase()
pdbProxy.initialize()
pdbProxy.setVerbose(verbose)
# insert job info to DB
def insertJobDB(job,verbose=False):
tmpLog = PLogger.getPandaLogger()
# set update time
job.lastUpdate = datetime.datetime.utcnow()
# make sql
sql1 = "INSERT INTO %s (%s) " % (pdbProxy.tablename,LocalJobSpec.columnNames())
sql1+= "VALUES " + job.values()
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to insert job")
# update job info in DB
def updateJobDB(job,verbose=False,updateTime=None):
# make sql
sql1 = "UPDATE %s SET " % pdbProxy.tablename
sql1 += job.values(forUpdate=True)
sql1 += " WHERE JobID=%s " % job.JobID
# set update time
if updateTime is not None:
job.lastUpdate = updateTime
sql1 += " AND lastUpdate<'%s' " % updateTime.strftime('%Y-%m-%d %H:%M:%S')
else:
job.lastUpdate = datetime.datetime.utcnow()
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to update job")
# set retryID
def setRetryID(job,verbose=False):
# make sql
sql1 = "UPDATE %s SET " % pdbProxy.tablename
sql1 += "retryID=%s,retryJobsetID=%s " % (job.JobID,job.groupID)
sql1 += " WHERE JobID=%s AND (nRebro IS NULL OR nRebro=%s)" % (job.provenanceID,job.nRebro)
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to set retryID")
# delete old jobs
def deleteOldJobs(days,verbose=False):
# time limit
limit = datetime.datetime.utcnow() - datetime.timedelta(days=days)
# make sql
sql1 = "DELETE FROM %s " % pdbProxy.tablename
sql1 += " WHERE creationTime<'%s' " % limit.strftime('%Y-%m-%d %H:%M:%S')
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to delete old jobs")
# read job info from DB
def readJobDB(JobID,verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE JobID=%s" % JobID
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get JobID=%s" % JobID)
if len(out) == 0:
return None
# instantiate LocalJobSpec
for values in out:
job = LocalJobSpec()
job.pack(values)
# return frozen job if exists
if job.dbStatus == 'frozen':
return job
# return any
return job
# read jobset info from DB
def readJobsetDB(JobsetID,verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE groupID=%s" % JobsetID
# execute
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to get JobsetID=%s" % JobsetID)
if len(out) == 0:
return None
# instantiate LocalJobSpec
tmpJobMap = {}
for tmpStr in out:
values = tmpStr.split('|')
job = LocalJobSpec()
job.pack(values)
# return frozen job if exists
if job.dbStatus == 'frozen' or job.JobID not in tmpJobMap:
tmpJobMap[job.JobID] = job
# make jobset
jobset = LocalJobsetSpec()
# set jobs
jobset.setJobs(tmpJobMap.values())
# return any
return jobset
# check jobset status in DB
def checkJobsetStatus(JobsetID,verbose=False):
# logger
tmpLog = PLogger.getPandaLogger()
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE groupID=%s" % JobsetID
failedRet = False,None
# execute
status,out = pdbProxy.execute(sql1)
if not status:
tmpLog.error(out)
tmpLog.error("failed to access local DB")
return failedRet
if len(out) == 0:
tmpLog.error("failed to get JobsetID=%s from local DB" % JobsetID)
return None
# instantiate LocalJobSpec
jobMap = {}
for tmpStr in out:
values = tmpStr.split('|')
job = LocalJobSpec()
job.pack(values)
# use frozen job if exists
if job.JobID not in jobMap or job.dbStatus == 'frozen':
jobMap[job.JobID] = job
# check all job status
for tmpJobID in jobMap:
tmpJobSpec = jobMap[tmpJobID]
if tmpJobSpec != 'frozen':
return True,'running'
# return
return True,'frozen'
# bulk read job info from DB
def bulkReadJobDB(verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get jobs")
if len(out) == 0:
return []
# instantiate LocalJobSpec
retMap = {}
jobsetMap = {}
for values in out:
job = LocalJobSpec()
job.pack(values)
# use frozen job if exists
if job.JobID not in retMap or job.dbStatus == 'frozen':
if job.groupID in [0,'0','NULL',-1,'-1']:
retMap[long(job.JobID)] = job
else:
# add jobset
tmpJobsetID = long(job.groupID)
if tmpJobsetID not in retMap or tmpJobsetID not in jobsetMap:
jobsetMap[tmpJobsetID] = []
jobset = LocalJobsetSpec()
retMap[tmpJobsetID] = jobset
# add job
jobsetMap[tmpJobsetID].append(job)
# add jobs to jobset
for tmpJobsetID in jobsetMap:
tmpJobList = jobsetMap[tmpJobsetID]
retMap[tmpJobsetID].setJobs(tmpJobList)
# sort
ids = list(retMap)
ids.sort()
retVal = []
for id in ids:
retVal.append(retMap[id])
# return
return retVal
# get list of JobID
def getListOfJobIDs(nonFrozen=False,verbose=False):
# make sql
sql1 = "SELECT JobID,dbStatus FROM %s " % pdbProxy.tablename
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allList = []
frozenList = []
for item in out:
# extract JobID
tmpID = long(item[0])
# status in DB
tmpStatus = item[-1]
# keep all jobs
if tmpID not in allList:
allList.append(tmpID)
# keep frozen jobs
if nonFrozen and tmpStatus == 'frozen':
if tmpID not in frozenList:
frozenList.append(tmpID)
# remove redundant jobs
retVal = []
for item in allList:
if item not in frozenList:
retVal.append(item)
# sort
retVal.sort()
# return
return retVal
# get map of jobsetID and JobIDs
def getMapJobsetIDJobIDs(verbose=False):
# make sql
sql1 = "SELECT groupID,JobID FROM %s WHERE groupID is not NULL and groupID != 0 and groupID != ''" % pdbProxy.tablename
# execute
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allMap = {}
for item in out:
# JobsetID
tmpJobsetID = long(item.split('|')[0])
# JobID
tmpJobID = long(item.split('|')[-1])
# append
if tmpJobsetID not in allMap:
allMap[tmpJobsetID] = []
if tmpJobID not in allMap[tmpJobsetID]:
allMap[tmpJobsetID].append(tmpJobID)
# sort
for tmpKey in allMap.keys():
allMap[tmpKey].sort()
# return
return allMap
# make JobSetSpec
def makeJobsetSpec(jobList):
jobset = LocalJobsetSpec()
jobset.setJobs(jobList)
return jobset
# get map of jobsetID and jediTaskID
def getJobsetTaskMap(verbose=False):
# make sql
sql1 = "SELECT groupID,jediTaskID FROM %s WHERE groupID is not NULL and groupID != 0 and groupID != '' and jediTaskID is not null and jediTaskID != ''" % pdbProxy.tablename
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allMap = {}
for item in out:
# JobsetID
tmpJobsetID = long(item[0])
# JobID
jediTaskID = long(item[-1])
# append
allMap[jediTaskID] = tmpJobsetID
# return
return allMap
| true
| true
|
f70ad47ffabe1481941f0bc8e4a61baa6b6b05a1
| 1,012
|
py
|
Python
|
examples/projections/azim/azim_gnomonic.py
|
jbusecke/pygmt
|
9ef6338dbb9bdd4c31dda94da6d4126852a6cd85
|
[
"BSD-3-Clause"
] | 326
|
2019-02-13T09:33:39.000Z
|
2022-03-25T17:24:05.000Z
|
examples/projections/azim/azim_gnomonic.py
|
jbusecke/pygmt
|
9ef6338dbb9bdd4c31dda94da6d4126852a6cd85
|
[
"BSD-3-Clause"
] | 1,153
|
2019-01-22T19:14:32.000Z
|
2022-03-31T22:07:03.000Z
|
examples/projections/azim/azim_gnomonic.py
|
jbusecke/pygmt
|
9ef6338dbb9bdd4c31dda94da6d4126852a6cd85
|
[
"BSD-3-Clause"
] | 160
|
2019-02-10T15:24:19.000Z
|
2022-03-31T09:07:41.000Z
|
r"""
Gnomonic
========
The point of perspective of the gnomonic projection lies at the center of the
earth. As a consequence great circles (orthodromes) on the surface of the Earth
are displayed as straight lines, which makes it suitable for distance
estimation for navigational purposes. It is neither conformal nor equal-area
and the distortion increases greatly with distance to the projection center. It
follows that the scope of application is restricted to a small area around the
projection center (at a maximum of 60°).
**f**\ *lon0/lat0*\ [*/horizon*\ ]\ */scale*
or **F**\ *lon0/lat0*\ [*/horizon*\ ]\ */width*
**f** or **F** specifies the projection type, *lon0/lat0* specifies the
projection center, the optional parameter *horizon* specifies the maximum
distance from projection center (in degrees, < 90, default 60), and *scale* or
*width* sets the size of the figure.
"""
import pygmt
fig = pygmt.Figure()
fig.coast(projection="F-90/15/12c", region="g", frame="20g20", land="gray")
fig.show()
| 38.923077
| 79
| 0.737154
|
import pygmt
fig = pygmt.Figure()
fig.coast(projection="F-90/15/12c", region="g", frame="20g20", land="gray")
fig.show()
| true
| true
|
f70ad5093842eab9fb077c1fdda2fe3c11e10e3c
| 656
|
py
|
Python
|
Data Science salary prediction/FlaskAPI/app.py
|
negiaditya/PROJECTS-Data_Science
|
d26e1fdfc6ce51f02e65c4dbca3edfb5cd97f0a1
|
[
"Apache-2.0"
] | null | null | null |
Data Science salary prediction/FlaskAPI/app.py
|
negiaditya/PROJECTS-Data_Science
|
d26e1fdfc6ce51f02e65c4dbca3edfb5cd97f0a1
|
[
"Apache-2.0"
] | null | null | null |
Data Science salary prediction/FlaskAPI/app.py
|
negiaditya/PROJECTS-Data_Science
|
d26e1fdfc6ce51f02e65c4dbca3edfb5cd97f0a1
|
[
"Apache-2.0"
] | null | null | null |
import flask
from flask import Flask,jsonify,request
import json
from data_input import data_in
import numpy as np
import pickle
def load_models():
file_name = './models/model_file.p'
with open(file_name,'rb') as pickled:
data = pickle.load(pickled)
model = data['model']
return model
app = Flask(__name__)
@app.route('/predict',methods=['GET'])
def predict():
request_json = request.get_json()
x = request_json['input']
x_in = np.array(x).reshape(1,-1)
model = load_models()
prediction = model.predict(x_in)[0]
response = json.dumps({'response': prediction})
return response,200
if __name__ == '__main__':
application.run(debug=True)
| 21.16129
| 48
| 0.72561
|
import flask
from flask import Flask,jsonify,request
import json
from data_input import data_in
import numpy as np
import pickle
def load_models():
file_name = './models/model_file.p'
with open(file_name,'rb') as pickled:
data = pickle.load(pickled)
model = data['model']
return model
app = Flask(__name__)
@app.route('/predict',methods=['GET'])
def predict():
request_json = request.get_json()
x = request_json['input']
x_in = np.array(x).reshape(1,-1)
model = load_models()
prediction = model.predict(x_in)[0]
response = json.dumps({'response': prediction})
return response,200
if __name__ == '__main__':
application.run(debug=True)
| true
| true
|
f70ad5c86243064fffc2399ecd32d4857976c4ce
| 1,585
|
py
|
Python
|
system_tests/conftest.py
|
Juliana-Morais/data-attribute-recommendation-python-sdk
|
95afcfff97ec4f71c5bf10953c0dfa813635636e
|
[
"Apache-2.0"
] | null | null | null |
system_tests/conftest.py
|
Juliana-Morais/data-attribute-recommendation-python-sdk
|
95afcfff97ec4f71c5bf10953c0dfa813635636e
|
[
"Apache-2.0"
] | null | null | null |
system_tests/conftest.py
|
Juliana-Morais/data-attribute-recommendation-python-sdk
|
95afcfff97ec4f71c5bf10953c0dfa813635636e
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from sap.aibus.dar.client.data_manager_client import DataManagerClient
from sap.aibus.dar.client.inference_client import InferenceClient
from sap.aibus.dar.client.model_manager_client import ModelManagerClient
from sap.aibus.dar.client.util.credentials import OnlineCredentialsSource
from sap.aibus.dar.client.workflow.model import ModelCreator
@pytest.fixture()
def dar_url():
return os.environ["DAR_URL"]
@pytest.fixture()
def dar_client_id():
return os.environ["DAR_CLIENT_ID"]
@pytest.fixture()
def dar_client_secret():
return os.environ["DAR_CLIENT_SECRET"]
@pytest.fixture()
def dar_uaa_url():
return os.environ["DAR_AUTH_URL"]
# For the following fixtures, the parameters to the functions
# will be provided by existing fixtures of the same name!
@pytest.fixture()
def credentials_source(dar_client_id, dar_client_secret, dar_uaa_url):
return OnlineCredentialsSource(dar_uaa_url, dar_client_id, dar_client_secret)
@pytest.fixture()
def data_manager_client(dar_url, credentials_source):
client = DataManagerClient(dar_url, credentials_source)
return client
@pytest.fixture()
def model_manager_client(dar_url, credentials_source):
client = ModelManagerClient(dar_url, credentials_source)
return client
@pytest.fixture()
def inference_client(dar_url, credentials_source):
client = InferenceClient(dar_url, credentials_source)
return client
@pytest.fixture()
def model_creator(dar_url, credentials_source):
create_model = ModelCreator(dar_url, credentials_source)
return create_model
| 25.15873
| 81
| 0.796215
|
import os
import pytest
from sap.aibus.dar.client.data_manager_client import DataManagerClient
from sap.aibus.dar.client.inference_client import InferenceClient
from sap.aibus.dar.client.model_manager_client import ModelManagerClient
from sap.aibus.dar.client.util.credentials import OnlineCredentialsSource
from sap.aibus.dar.client.workflow.model import ModelCreator
@pytest.fixture()
def dar_url():
return os.environ["DAR_URL"]
@pytest.fixture()
def dar_client_id():
return os.environ["DAR_CLIENT_ID"]
@pytest.fixture()
def dar_client_secret():
return os.environ["DAR_CLIENT_SECRET"]
@pytest.fixture()
def dar_uaa_url():
return os.environ["DAR_AUTH_URL"]
@pytest.fixture()
def credentials_source(dar_client_id, dar_client_secret, dar_uaa_url):
return OnlineCredentialsSource(dar_uaa_url, dar_client_id, dar_client_secret)
@pytest.fixture()
def data_manager_client(dar_url, credentials_source):
client = DataManagerClient(dar_url, credentials_source)
return client
@pytest.fixture()
def model_manager_client(dar_url, credentials_source):
client = ModelManagerClient(dar_url, credentials_source)
return client
@pytest.fixture()
def inference_client(dar_url, credentials_source):
client = InferenceClient(dar_url, credentials_source)
return client
@pytest.fixture()
def model_creator(dar_url, credentials_source):
create_model = ModelCreator(dar_url, credentials_source)
return create_model
| true
| true
|
f70ad74e8814bb9a9280d0b92fbb15dd2c7d28a8
| 12,779
|
py
|
Python
|
parse_scripts/parquet_parsers/galaxy_to_parquet.py
|
lfdversluis/wta-tools
|
e9d505df03fff9bb57208dfb82212977ef5e7ca2
|
[
"Apache-2.0"
] | 3
|
2019-08-19T10:38:36.000Z
|
2020-06-18T10:36:36.000Z
|
parse_scripts/parquet_parsers/galaxy_to_parquet.py
|
lfdversluis/wta-tools
|
e9d505df03fff9bb57208dfb82212977ef5e7ca2
|
[
"Apache-2.0"
] | 8
|
2020-02-12T09:53:53.000Z
|
2021-03-29T11:16:20.000Z
|
parse_scripts/parquet_parsers/galaxy_to_parquet.py
|
lfdversluis/wta-tools
|
e9d505df03fff9bb57208dfb82212977ef5e7ca2
|
[
"Apache-2.0"
] | 2
|
2020-06-17T08:46:02.000Z
|
2020-11-26T11:23:48.000Z
|
import json
import os
import sys
from datetime import datetime
import numpy as np
import pandas as pd
from objects.task import Task
from objects.workflow import Workflow
from objects.workload import Workload
pd.set_option('display.max_columns', None)
USAGE = 'Usage: python(3) ./galaxy_to_parquet.py galaxy_folder'
NAME = 'Galaxy'
TARGET_DIR = os.path.join(os.path.dirname(os.getcwd()), 'output_parquet', NAME)
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
EPOCH = datetime(1970, 1, 1)
JOBS = None
METRICS = None
WORKFLOWS = None
WORKFLOW_INVOCATIONS = None
WORKFLOW_STEPS = None
WORKFLOW_INVOKE_STEPS = None
WORKFLOW_CONNECTIONS = None
WORKFLOW_STEP_INPUT = None
def read_files(folder_path):
global METRICS
METRICS = pd.read_csv(os.path.join(folder_path, 'job_metrics_numeric.csv'),
names=["id", "job_id", "plugin", "metric_name", "metric_value"], dtype={
"id": np.float,
"job_id": np.float,
"plugin": np.str,
"metric_name": np.str,
"metric_value": np.float,
})
print("Done with reading metrics")
global WORKFLOWS
WORKFLOWS = pd.read_csv(os.path.join(folder_path, 'workflows.csv'),
names=["id", "create_time", "update_time", "stored_workflow_id", "has_cycles", "has_errors",
"parent_workflow_id", "uuid"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"stored_workflow_id": np.float,
"has_cycles": np.str,
"has_errors": np.str,
"parent_workflow_id": np.float,
"uuid": np.str,
})
print("Done with reading workflows")
global WORKFLOW_INVOCATIONS
WORKFLOW_INVOCATIONS = pd.read_csv(os.path.join(folder_path, 'workflow-invocations.csv'),
names=["id", "create_time", "update_time", "workflow_id", "state", "scheduler",
"handler"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"workflow_id": np.float,
"state": np.str,
"scheduler": np.str,
"handler": np.str,
})
print("Done with reading workflow invocations")
global WORKFLOW_STEPS
WORKFLOW_STEPS = pd.read_csv(os.path.join(folder_path, 'workflow-steps.csv'),
names=["id", "create_time", "update_time", "workflow_id", "type", "tool_id",
"tool_version", "order_index", "subworkflow_id", "dynamic_tool_id"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"workflow_id": np.float,
"type": np.str,
"tool_id": np.str,
"tool_version": np.str,
"order_index": np.float,
"subworkflow_id": np.str,
"dynamic_tool_id": np.str,
})
print("Done with reading workflow steps")
global WORKFLOW_INVOKE_STEPS
WORKFLOW_INVOKE_STEPS = pd.read_csv(os.path.join(folder_path, 'workflow-invoke-steps.csv'), keep_default_na=True,
names=["id", "create_time", "update_time", "workflow_invocation_id",
"workflow_step_id", "job_id", "state"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"workflow_invocation_id": np.float,
"workflow_step_id": np.float,
"job_id": np.float,
"state": np.str,
})
print("Done with reading workflow invocation steps")
global WORKFLOW_CONNECTIONS
WORKFLOW_CONNECTIONS = pd.read_csv(os.path.join(folder_path, 'workflow-connections.csv'),
names=["id", "output_step_id", "input_step_input_id", "output_name",
"input_subworkflow_step_id"], dtype={
"id": np.float,
"output_step_id": np.float,
"input_step_input_id": np.float,
"output_name": np.str,
"input_subworkflow_step_id": np.float,
})
print("Done with reading workflow connections")
global WORKFLOW_STEP_INPUT
WORKFLOW_STEP_INPUT = pd.read_csv(os.path.join(folder_path, 'workflow-step-input.csv'),
names=["id", "workflow_step_id", "name"], dtype={
"id": np.float,
"workflow_step_id": np.float,
"name": np.str,
})
print("Done with reading workflow step input")
def check_if_empty(*args):
for field in args:
if np.isnan(field):
return True
def compute_children(step_job_ids, tasks_in_workflow):
for task in tasks_in_workflow:
step_id = None
for pair in step_job_ids:
# find task id's corresponding step id
if pair[1] == task.id:
step_id = pair[0]
children = set()
df = WORKFLOW_CONNECTIONS.loc[(WORKFLOW_CONNECTIONS["output_step_id"] == step_id)]
if df.empty:
task.children = children
continue
for wc_row in df.itertuples():
# find id for subsequent connected step
row = WORKFLOW_STEP_INPUT.loc[(WORKFLOW_STEP_INPUT["id"] == wc_row[3])]
child_step_id = row.iloc[0]["workflow_step_id"]
# find child_step_id in step-job pairs and add corresponding job_id to children set
for pair2 in step_job_ids:
if pair2[0] == child_step_id:
children.add(np.int64(pair2[1]))
for child in tasks_in_workflow:
if child.id == pair2[1]:
child.parents.append(np.int64(task.id))
break
break
task.children = children
for task2 in tasks_in_workflow:
unique_parents = set(task2.parents)
unique_parents_list = list(unique_parents)
task2.parents = unique_parents_list
return tasks_in_workflow
def parse():
os.makedirs(TARGET_DIR, exist_ok=True)
task_counter = 0
workflow_counter = 0
processed_workflows = []
final_workflows = []
final_tasks = []
task_offset = 0
workflow_offset = None
for wi_row in WORKFLOW_INVOCATIONS.itertuples():
flag = False
# only use one execution of a workflow
if wi_row[4] in processed_workflows:
continue
# check if workflow contains cycles
workflow_row = WORKFLOWS.loc[(WORKFLOWS["id"] == getattr(wi_row, "workflow_id"))]
if workflow_row.iloc[0]["has_cycles"] == "t":
continue
# workflows contain a number of workflow steps but this is not the ID of their actual execution
# this list is used to tie the workflow steps to their actual execution ID
step_job_ids = []
tasks_in_workflow = []
workflow_index = wi_row[4]
# check if workflow id is null
if pd.isnull(workflow_index):
continue
df = WORKFLOW_INVOKE_STEPS.loc[(WORKFLOW_INVOKE_STEPS["workflow_invocation_id"] == getattr(wi_row, "id"))]
# check if workflow is not empty
if df.empty:
processed_workflows.append(workflow_index)
continue
for wis_row in df.itertuples():
# check if entry in WF_INVOKE_STEPS has the same wf_invocation_id
if getattr(wis_row, "workflow_invocation_id") == getattr(wi_row, "id"):
# check if required fields are not empty
if check_if_empty(getattr(wis_row, "workflow_step_id"), getattr(wis_row, "job_id")):
processed_workflows.append(workflow_index)
flag = True
break
# get step id and corresponding execution id
step_job_pair = [getattr(wis_row, "workflow_step_id"), getattr(wis_row, "job_id")]
step_job_ids.append(step_job_pair)
job_id = getattr(wis_row, "job_id")
submit_time = int(((datetime.strptime(getattr(wis_row, "create_time"),DATETIME_FORMAT) - EPOCH).total_seconds()) * 1000)
job_metrics = METRICS.loc[(METRICS["job_id"] == job_id)]
runtime = job_metrics.loc[(job_metrics["metric_name"] == "runtime_seconds"), 'metric_value'] * 1000
memory = job_metrics.loc[(job_metrics["metric_name"] == "memory.memsw.max_usage_in_bytes"), 'metric_value']
cpu_time = job_metrics.loc[(job_metrics["metric_name"] == "cpuacct.usage"), 'metric_value']
# check if any required fields are empty
if runtime.empty or memory.empty or cpu_time.empty:
processed_workflows.append(workflow_index)
flag = True
break
# used to find the task with lowest submit time, this time will be used ass offset
if task_offset == 0:
task_offset = submit_time
elif submit_time < task_offset:
task_offset = submit_time
runtime = runtime.iloc[0]
memory = memory.iloc[0]
cpu_time = cpu_time.iloc[0] / 1000000
if cpu_time > runtime:
cpu_time = runtime
task = Task(np.int64(job_id), "Composite", submit_time, 0, runtime, 1, None, workflow_index, -1, "cpu-time",resource=cpu_time, memory_requested=memory)
task_counter += 1
tasks_in_workflow.append(task)
flag = False
# if flag is true, a task in the workflow is not usable to we skip it
if flag:
processed_workflows.append((workflow_index))
continue
# compute children of tasks
final_tasks.extend(compute_children(step_job_ids, tasks_in_workflow))
workflow_submit_time = int(((datetime.strptime(getattr(wi_row, "create_time"),DATETIME_FORMAT) - EPOCH).total_seconds()) * 1000)
# find smallest workflow submit time as offset
if workflow_offset is None:
workflow_offset = workflow_submit_time
elif workflow_submit_time < workflow_offset:
workflow_offset = workflow_submit_time
workflow = Workflow(workflow_index, workflow_submit_time, tasks_in_workflow, "core", "Engineering",
"Galaxy", "Biological Engineering")
workflow.compute_critical_path()
processed_workflows.append(workflow_index)
final_workflows.append(workflow)
workflow_counter += 1
# apply offset
for x in final_tasks:
x.ts_submit = x.ts_submit - task_offset
# apply offset
for y in final_workflows:
y.ts_submit = y.ts_submit - workflow_offset
# make tasks dataframe
task_df = pd.DataFrame([t.get_parquet_dict() for t in final_tasks])
# create parquet file in specified folder
os.makedirs(os.path.join(TARGET_DIR, Task.output_path()), exist_ok=True)
task_df.to_parquet(os.path.join(TARGET_DIR, Task.output_path(), "part.0.parquet"), engine="pyarrow")
# make workflows dataframe
workflow_df = pd.DataFrame([w.get_parquet_dict() for w in final_workflows])
# create parquet file in specified folder
os.makedirs(os.path.join(TARGET_DIR, Workflow.output_path()), exist_ok=True)
workflow_df.to_parquet(os.path.join(TARGET_DIR, Workflow.output_path(), "part.0.parquet"), engine="pyarrow")
json_dict = Workload.get_json_dict_from_pandas_task_dataframe(task_df,
domain="Biological Engineering",
authors=["Jaro Bosch", "Laurens Versluis"],
workload_description="Traces from different biomedical research workflows, executed on the public Galaxy server in Europe."
)
os.makedirs(os.path.join(TARGET_DIR, Workload.output_path()), exist_ok=True)
with open(os.path.join(TARGET_DIR, Workload.output_path(), "generic_information.json"), "w") as file:
# Need this on 32-bit python.
def default(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
file.write(json.dumps(json_dict, default=default))
if __name__ == '__main__':
if len(sys.argv) != 2:
print(USAGE)
sys.exit(1)
folder_path = sys.argv[1]
read_files(folder_path)
parse()
| 39.686335
| 189
| 0.587683
|
import json
import os
import sys
from datetime import datetime
import numpy as np
import pandas as pd
from objects.task import Task
from objects.workflow import Workflow
from objects.workload import Workload
pd.set_option('display.max_columns', None)
USAGE = 'Usage: python(3) ./galaxy_to_parquet.py galaxy_folder'
NAME = 'Galaxy'
TARGET_DIR = os.path.join(os.path.dirname(os.getcwd()), 'output_parquet', NAME)
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
EPOCH = datetime(1970, 1, 1)
JOBS = None
METRICS = None
WORKFLOWS = None
WORKFLOW_INVOCATIONS = None
WORKFLOW_STEPS = None
WORKFLOW_INVOKE_STEPS = None
WORKFLOW_CONNECTIONS = None
WORKFLOW_STEP_INPUT = None
def read_files(folder_path):
global METRICS
METRICS = pd.read_csv(os.path.join(folder_path, 'job_metrics_numeric.csv'),
names=["id", "job_id", "plugin", "metric_name", "metric_value"], dtype={
"id": np.float,
"job_id": np.float,
"plugin": np.str,
"metric_name": np.str,
"metric_value": np.float,
})
print("Done with reading metrics")
global WORKFLOWS
WORKFLOWS = pd.read_csv(os.path.join(folder_path, 'workflows.csv'),
names=["id", "create_time", "update_time", "stored_workflow_id", "has_cycles", "has_errors",
"parent_workflow_id", "uuid"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"stored_workflow_id": np.float,
"has_cycles": np.str,
"has_errors": np.str,
"parent_workflow_id": np.float,
"uuid": np.str,
})
print("Done with reading workflows")
global WORKFLOW_INVOCATIONS
WORKFLOW_INVOCATIONS = pd.read_csv(os.path.join(folder_path, 'workflow-invocations.csv'),
names=["id", "create_time", "update_time", "workflow_id", "state", "scheduler",
"handler"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"workflow_id": np.float,
"state": np.str,
"scheduler": np.str,
"handler": np.str,
})
print("Done with reading workflow invocations")
global WORKFLOW_STEPS
WORKFLOW_STEPS = pd.read_csv(os.path.join(folder_path, 'workflow-steps.csv'),
names=["id", "create_time", "update_time", "workflow_id", "type", "tool_id",
"tool_version", "order_index", "subworkflow_id", "dynamic_tool_id"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"workflow_id": np.float,
"type": np.str,
"tool_id": np.str,
"tool_version": np.str,
"order_index": np.float,
"subworkflow_id": np.str,
"dynamic_tool_id": np.str,
})
print("Done with reading workflow steps")
global WORKFLOW_INVOKE_STEPS
WORKFLOW_INVOKE_STEPS = pd.read_csv(os.path.join(folder_path, 'workflow-invoke-steps.csv'), keep_default_na=True,
names=["id", "create_time", "update_time", "workflow_invocation_id",
"workflow_step_id", "job_id", "state"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"workflow_invocation_id": np.float,
"workflow_step_id": np.float,
"job_id": np.float,
"state": np.str,
})
print("Done with reading workflow invocation steps")
global WORKFLOW_CONNECTIONS
WORKFLOW_CONNECTIONS = pd.read_csv(os.path.join(folder_path, 'workflow-connections.csv'),
names=["id", "output_step_id", "input_step_input_id", "output_name",
"input_subworkflow_step_id"], dtype={
"id": np.float,
"output_step_id": np.float,
"input_step_input_id": np.float,
"output_name": np.str,
"input_subworkflow_step_id": np.float,
})
print("Done with reading workflow connections")
global WORKFLOW_STEP_INPUT
WORKFLOW_STEP_INPUT = pd.read_csv(os.path.join(folder_path, 'workflow-step-input.csv'),
names=["id", "workflow_step_id", "name"], dtype={
"id": np.float,
"workflow_step_id": np.float,
"name": np.str,
})
print("Done with reading workflow step input")
def check_if_empty(*args):
for field in args:
if np.isnan(field):
return True
def compute_children(step_job_ids, tasks_in_workflow):
for task in tasks_in_workflow:
step_id = None
for pair in step_job_ids:
if pair[1] == task.id:
step_id = pair[0]
children = set()
df = WORKFLOW_CONNECTIONS.loc[(WORKFLOW_CONNECTIONS["output_step_id"] == step_id)]
if df.empty:
task.children = children
continue
for wc_row in df.itertuples():
# find id for subsequent connected step
row = WORKFLOW_STEP_INPUT.loc[(WORKFLOW_STEP_INPUT["id"] == wc_row[3])]
child_step_id = row.iloc[0]["workflow_step_id"]
# find child_step_id in step-job pairs and add corresponding job_id to children set
for pair2 in step_job_ids:
if pair2[0] == child_step_id:
children.add(np.int64(pair2[1]))
for child in tasks_in_workflow:
if child.id == pair2[1]:
child.parents.append(np.int64(task.id))
break
break
task.children = children
for task2 in tasks_in_workflow:
unique_parents = set(task2.parents)
unique_parents_list = list(unique_parents)
task2.parents = unique_parents_list
return tasks_in_workflow
def parse():
os.makedirs(TARGET_DIR, exist_ok=True)
task_counter = 0
workflow_counter = 0
processed_workflows = []
final_workflows = []
final_tasks = []
task_offset = 0
workflow_offset = None
for wi_row in WORKFLOW_INVOCATIONS.itertuples():
flag = False
# only use one execution of a workflow
if wi_row[4] in processed_workflows:
continue
# check if workflow contains cycles
workflow_row = WORKFLOWS.loc[(WORKFLOWS["id"] == getattr(wi_row, "workflow_id"))]
if workflow_row.iloc[0]["has_cycles"] == "t":
continue
# workflows contain a number of workflow steps but this is not the ID of their actual execution
# this list is used to tie the workflow steps to their actual execution ID
step_job_ids = []
tasks_in_workflow = []
workflow_index = wi_row[4]
# check if workflow id is null
if pd.isnull(workflow_index):
continue
df = WORKFLOW_INVOKE_STEPS.loc[(WORKFLOW_INVOKE_STEPS["workflow_invocation_id"] == getattr(wi_row, "id"))]
# check if workflow is not empty
if df.empty:
processed_workflows.append(workflow_index)
continue
for wis_row in df.itertuples():
# check if entry in WF_INVOKE_STEPS has the same wf_invocation_id
if getattr(wis_row, "workflow_invocation_id") == getattr(wi_row, "id"):
# check if required fields are not empty
if check_if_empty(getattr(wis_row, "workflow_step_id"), getattr(wis_row, "job_id")):
processed_workflows.append(workflow_index)
flag = True
break
# get step id and corresponding execution id
step_job_pair = [getattr(wis_row, "workflow_step_id"), getattr(wis_row, "job_id")]
step_job_ids.append(step_job_pair)
job_id = getattr(wis_row, "job_id")
submit_time = int(((datetime.strptime(getattr(wis_row, "create_time"),DATETIME_FORMAT) - EPOCH).total_seconds()) * 1000)
job_metrics = METRICS.loc[(METRICS["job_id"] == job_id)]
runtime = job_metrics.loc[(job_metrics["metric_name"] == "runtime_seconds"), 'metric_value'] * 1000
memory = job_metrics.loc[(job_metrics["metric_name"] == "memory.memsw.max_usage_in_bytes"), 'metric_value']
cpu_time = job_metrics.loc[(job_metrics["metric_name"] == "cpuacct.usage"), 'metric_value']
# check if any required fields are empty
if runtime.empty or memory.empty or cpu_time.empty:
processed_workflows.append(workflow_index)
flag = True
break
# used to find the task with lowest submit time, this time will be used ass offset
if task_offset == 0:
task_offset = submit_time
elif submit_time < task_offset:
task_offset = submit_time
runtime = runtime.iloc[0]
memory = memory.iloc[0]
cpu_time = cpu_time.iloc[0] / 1000000
if cpu_time > runtime:
cpu_time = runtime
task = Task(np.int64(job_id), "Composite", submit_time, 0, runtime, 1, None, workflow_index, -1, "cpu-time",resource=cpu_time, memory_requested=memory)
task_counter += 1
tasks_in_workflow.append(task)
flag = False
# if flag is true, a task in the workflow is not usable to we skip it
if flag:
processed_workflows.append((workflow_index))
continue
# compute children of tasks
final_tasks.extend(compute_children(step_job_ids, tasks_in_workflow))
workflow_submit_time = int(((datetime.strptime(getattr(wi_row, "create_time"),DATETIME_FORMAT) - EPOCH).total_seconds()) * 1000)
# find smallest workflow submit time as offset
if workflow_offset is None:
workflow_offset = workflow_submit_time
elif workflow_submit_time < workflow_offset:
workflow_offset = workflow_submit_time
workflow = Workflow(workflow_index, workflow_submit_time, tasks_in_workflow, "core", "Engineering",
"Galaxy", "Biological Engineering")
workflow.compute_critical_path()
processed_workflows.append(workflow_index)
final_workflows.append(workflow)
workflow_counter += 1
# apply offset
for x in final_tasks:
x.ts_submit = x.ts_submit - task_offset
# apply offset
for y in final_workflows:
y.ts_submit = y.ts_submit - workflow_offset
# make tasks dataframe
task_df = pd.DataFrame([t.get_parquet_dict() for t in final_tasks])
# create parquet file in specified folder
os.makedirs(os.path.join(TARGET_DIR, Task.output_path()), exist_ok=True)
task_df.to_parquet(os.path.join(TARGET_DIR, Task.output_path(), "part.0.parquet"), engine="pyarrow")
# make workflows dataframe
workflow_df = pd.DataFrame([w.get_parquet_dict() for w in final_workflows])
# create parquet file in specified folder
os.makedirs(os.path.join(TARGET_DIR, Workflow.output_path()), exist_ok=True)
workflow_df.to_parquet(os.path.join(TARGET_DIR, Workflow.output_path(), "part.0.parquet"), engine="pyarrow")
json_dict = Workload.get_json_dict_from_pandas_task_dataframe(task_df,
domain="Biological Engineering",
authors=["Jaro Bosch", "Laurens Versluis"],
workload_description="Traces from different biomedical research workflows, executed on the public Galaxy server in Europe."
)
os.makedirs(os.path.join(TARGET_DIR, Workload.output_path()), exist_ok=True)
with open(os.path.join(TARGET_DIR, Workload.output_path(), "generic_information.json"), "w") as file:
# Need this on 32-bit python.
def default(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
file.write(json.dumps(json_dict, default=default))
if __name__ == '__main__':
if len(sys.argv) != 2:
print(USAGE)
sys.exit(1)
folder_path = sys.argv[1]
read_files(folder_path)
parse()
| true
| true
|
f70ad7a5a099f3526ce640efd8badbc902145d66
| 4,566
|
py
|
Python
|
DeNN/visualization/gradcam.py
|
KillerStrike17/PyDeNN
|
2f0dfaf3e092a4f995ed30e2f8db946e30724551
|
[
"MIT"
] | null | null | null |
DeNN/visualization/gradcam.py
|
KillerStrike17/PyDeNN
|
2f0dfaf3e092a4f995ed30e2f8db946e30724551
|
[
"MIT"
] | null | null | null |
DeNN/visualization/gradcam.py
|
KillerStrike17/PyDeNN
|
2f0dfaf3e092a4f995ed30e2f8db946e30724551
|
[
"MIT"
] | null | null | null |
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import numpy as np
import cv2
from .cam import GradCAM
# def load_gradcam(images, labels, model, device, target_layers):
def load_gradcam(test, model, device, target_layers,size = 25,classified = True):
_images = []
_target = []
_pred = []
# model, device = self.trainer.model, self.trainer.device
# set the model to evaluation mode
model.eval()
# turn off gradients
with torch.no_grad():
for data, target in test:
# move them to respective device
data, target = data.to(device), target.to(device)
# do inferencing
output = model(data)
# print("output:",output[0])
# get the predicted output
pred = output.argmax(dim=1, keepdim=True)
# print(pred,pred.view_as(target))
# get the current misclassified in this batch
list_images = (target.eq(pred.view_as(target)) == classified)
batch_misclassified = data[list_images]
batch_mis_pred = pred[list_images]
batch_mis_target = target[list_images]
# batch_misclassified =
_images.append(batch_misclassified)
_pred.append(batch_mis_pred)
_target.append(batch_mis_target)
# group all the batched together
img = torch.cat(_images)
pred = torch.cat(_pred)
tar = torch.cat(_target)
# move the model to device
images = img[:size]
labels = tar[:size]
model.to(device)
# set the model in evaluation mode
model.eval()
# get the grad cam
gcam = GradCAM(model=model, candidate_layers=target_layers)
# images = torch.stack(images).to(device)
# predicted probabilities and class ids
pred_probs, pred_ids = gcam.forward(images)
# actual class ids
# target_ids = torch.LongTensor(labels).view(len(images), -1).to(device)
target_ids = labels.view(len(images), -1).to(device)
# backward pass wrt to the actual ids
gcam.backward(ids=target_ids)
# we will store the layers and correspondings images activations here
layers_region = {}
# fetch the grad cam layers of all the images
for target_layer in target_layers:
# Grad-CAM
regions = gcam.generate(target_layer=target_layer)
layers_region[target_layer] = regions
# we are done here, remove the hooks
gcam.remove_hook()
return layers_region, pred_probs, pred_ids,images, labels
sns.set()
# plt.style.use("dark_background")
def plot_gradcam(gcam_layers, images, target_labels, predicted_labels, class_labels, denormalize):
images = images.cpu()
# convert BCHW to BHWC for plotting stufffff
images = images.permute(0, 2, 3, 1)
target_labels = target_labels.cpu()
fig, axs = plt.subplots(nrows=len(images), ncols=len(
gcam_layers.keys())+1, figsize=((len(gcam_layers.keys()) + 2)*3, len(images)*3))
fig.suptitle("Grad-CAM", fontsize=16)
for image_idx, image in enumerate(images):
# denormalize the imaeg
denorm_img = denormalize(image.permute(2, 0, 1)).permute(1, 2, 0)
# axs[image_idx, 0].text(
# 0.5, 0.5, f'predicted: {class_labels[predicted_labels[image_idx][0] ]}\nactual: {class_labels[target_labels[image_idx]] }', horizontalalignment='center', verticalalignment='center', fontsize=14, )
# axs[image_idx, 0].axis('off')
axs[image_idx, 0].imshow(
(denorm_img.numpy() * 255).astype(np.uint8), interpolation='bilinear')
axs[image_idx, 0].axis('off')
for layer_idx, layer_name in enumerate(gcam_layers.keys()):
# gets H X W of the cam layer
_layer = gcam_layers[layer_name][image_idx].cpu().numpy()[0]
heatmap = 1 - _layer
heatmap = np.uint8(255 * heatmap)
heatmap_img = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
superimposed_img = cv2.addWeighted(
(denorm_img.numpy() * 255).astype(np.uint8), 0.6, heatmap_img, 0.4, 0)
axs[image_idx, layer_idx +
1].imshow(superimposed_img, interpolation='bilinear')
axs[image_idx, layer_idx+1].set_title(f'layer: {layer_name}')
axs[image_idx, layer_idx+1].axis('off')
axs[image_idx, 0].set_title(f'Predicted: {class_labels[predicted_labels[image_idx][0] ]}\nTarget: {class_labels[target_labels[image_idx]] }')
plt.tight_layout()
plt.subplots_adjust(top=0.95, wspace=0.2, hspace=0.2)
plt.show()
| 33.086957
| 210
| 0.644985
|
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import numpy as np
import cv2
from .cam import GradCAM
def load_gradcam(test, model, device, target_layers,size = 25,classified = True):
_images = []
_target = []
_pred = []
model.eval()
with torch.no_grad():
for data, target in test:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
list_images = (target.eq(pred.view_as(target)) == classified)
batch_misclassified = data[list_images]
batch_mis_pred = pred[list_images]
batch_mis_target = target[list_images]
_images.append(batch_misclassified)
_pred.append(batch_mis_pred)
_target.append(batch_mis_target)
img = torch.cat(_images)
pred = torch.cat(_pred)
tar = torch.cat(_target)
images = img[:size]
labels = tar[:size]
model.to(device)
model.eval()
gcam = GradCAM(model=model, candidate_layers=target_layers)
pred_probs, pred_ids = gcam.forward(images)
target_ids = labels.view(len(images), -1).to(device)
gcam.backward(ids=target_ids)
layers_region = {}
for target_layer in target_layers:
regions = gcam.generate(target_layer=target_layer)
layers_region[target_layer] = regions
gcam.remove_hook()
return layers_region, pred_probs, pred_ids,images, labels
sns.set()
def plot_gradcam(gcam_layers, images, target_labels, predicted_labels, class_labels, denormalize):
images = images.cpu()
images = images.permute(0, 2, 3, 1)
target_labels = target_labels.cpu()
fig, axs = plt.subplots(nrows=len(images), ncols=len(
gcam_layers.keys())+1, figsize=((len(gcam_layers.keys()) + 2)*3, len(images)*3))
fig.suptitle("Grad-CAM", fontsize=16)
for image_idx, image in enumerate(images):
denorm_img = denormalize(image.permute(2, 0, 1)).permute(1, 2, 0)
axs[image_idx, 0].imshow(
(denorm_img.numpy() * 255).astype(np.uint8), interpolation='bilinear')
axs[image_idx, 0].axis('off')
for layer_idx, layer_name in enumerate(gcam_layers.keys()):
_layer = gcam_layers[layer_name][image_idx].cpu().numpy()[0]
heatmap = 1 - _layer
heatmap = np.uint8(255 * heatmap)
heatmap_img = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
superimposed_img = cv2.addWeighted(
(denorm_img.numpy() * 255).astype(np.uint8), 0.6, heatmap_img, 0.4, 0)
axs[image_idx, layer_idx +
1].imshow(superimposed_img, interpolation='bilinear')
axs[image_idx, layer_idx+1].set_title(f'layer: {layer_name}')
axs[image_idx, layer_idx+1].axis('off')
axs[image_idx, 0].set_title(f'Predicted: {class_labels[predicted_labels[image_idx][0] ]}\nTarget: {class_labels[target_labels[image_idx]] }')
plt.tight_layout()
plt.subplots_adjust(top=0.95, wspace=0.2, hspace=0.2)
plt.show()
| true
| true
|
f70ad7d05a2e436c22816e9e6a1f162afbc6f7d6
| 1,239
|
py
|
Python
|
tests/common/factories/__init__.py
|
tgiardina/rpp-h
|
fece590f901b052a59c19a24acfeba52cee33c84
|
[
"BSD-2-Clause"
] | 2,103
|
2015-01-07T12:47:49.000Z
|
2022-03-29T02:38:25.000Z
|
tests/common/factories/__init__.py
|
tgiardina/rpp-h
|
fece590f901b052a59c19a24acfeba52cee33c84
|
[
"BSD-2-Clause"
] | 4,322
|
2015-01-04T17:18:01.000Z
|
2022-03-31T17:06:02.000Z
|
tests/common/factories/__init__.py
|
tgiardina/rpp-h
|
fece590f901b052a59c19a24acfeba52cee33c84
|
[
"BSD-2-Clause"
] | 389
|
2015-01-24T04:10:02.000Z
|
2022-03-28T08:00:16.000Z
|
"""Factory classes for easily generating test objects."""
from .activation import Activation
from .annotation import Annotation
from .annotation_moderation import AnnotationModeration
from .auth_client import AuthClient, ConfidentialAuthClient
from .auth_ticket import AuthTicket
from .authz_code import AuthzCode
from .base import set_session
from .document import Document, DocumentMeta, DocumentURI
from .feature import Feature
from .flag import Flag
from .group import Group, OpenGroup, RestrictedGroup
from .group_scope import GroupScope
from .job import Job, SyncAnnotationJob
from .organization import Organization
from .setting import Setting
from .token import DeveloperToken, OAuth2Token
from .user import User
from .user_identity import UserIdentity
__all__ = (
"Activation",
"Annotation",
"AnnotationModeration",
"AuthClient",
"AuthTicket",
"AuthzCode",
"ConfidentialAuthClient",
"DeveloperToken",
"Document",
"DocumentMeta",
"DocumentURI",
"Feature",
"Flag",
"Group",
"GroupScope",
"Job",
"OAuth2Token",
"OpenGroup",
"Organization",
"RestrictedGroup",
"Setting",
"SyncAnnotationJob",
"User",
"UserIdentity",
"set_session",
)
| 25.8125
| 59
| 0.736885
|
from .activation import Activation
from .annotation import Annotation
from .annotation_moderation import AnnotationModeration
from .auth_client import AuthClient, ConfidentialAuthClient
from .auth_ticket import AuthTicket
from .authz_code import AuthzCode
from .base import set_session
from .document import Document, DocumentMeta, DocumentURI
from .feature import Feature
from .flag import Flag
from .group import Group, OpenGroup, RestrictedGroup
from .group_scope import GroupScope
from .job import Job, SyncAnnotationJob
from .organization import Organization
from .setting import Setting
from .token import DeveloperToken, OAuth2Token
from .user import User
from .user_identity import UserIdentity
__all__ = (
"Activation",
"Annotation",
"AnnotationModeration",
"AuthClient",
"AuthTicket",
"AuthzCode",
"ConfidentialAuthClient",
"DeveloperToken",
"Document",
"DocumentMeta",
"DocumentURI",
"Feature",
"Flag",
"Group",
"GroupScope",
"Job",
"OAuth2Token",
"OpenGroup",
"Organization",
"RestrictedGroup",
"Setting",
"SyncAnnotationJob",
"User",
"UserIdentity",
"set_session",
)
| true
| true
|
f70ad80bd35605612bc45255182035b9ed96ec72
| 26,266
|
py
|
Python
|
sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | null | null | null |
sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | null | null | null |
sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import msrest.serialization
from .._generated.models import (
LexicalAnalyzer,
LexicalTokenizer,
AnalyzeRequest,
CustomAnalyzer as _CustomAnalyzer,
PatternAnalyzer as _PatternAnalyzer,
PatternTokenizer as _PatternTokenizer,
SearchResourceEncryptionKey as _SearchResourceEncryptionKey,
SearchIndexerDataSource as _SearchIndexerDataSource,
SynonymMap as _SynonymMap,
DataSourceCredentials,
AzureActiveDirectoryApplicationCredentials
)
DELIMITER = "|"
class AnalyzeTextOptions(msrest.serialization.Model):
"""Specifies some text and analysis components used to break that text into tokens.
All required parameters must be populated in order to send to Azure.
:param text: Required. The text to break into tokens.
:type text: str
:param analyzer_name: The name of the analyzer to use to break the given text. If this parameter is
not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are
mutually exclusive. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene",
"bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-
Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft",
"cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene",
"en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft",
"fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene",
"gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene",
"is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene",
"ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft",
"lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft",
"no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-
PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft",
"ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft",
"es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft",
"th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft",
"vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern",
"simple", "stop", "whitespace".
:type analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:param tokenizer_name: The name of the tokenizer to use to break the given text. If this parameter
is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters
are mutually exclusive. Possible values include: "classic", "edgeNGram", "keyword_v2",
"letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer",
"nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace".
:type tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
:param token_filters: An optional list of token filters to use when breaking the given text.
This parameter can only be set when using the tokenizer parameter.
:type token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:param char_filters: An optional list of character filters to use when breaking the given text.
This parameter can only be set when using the tokenizer parameter.
:type char_filters: list[str]
"""
_validation = {
'text': {'required': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'analyzer_name': {'key': 'analyzerName', 'type': 'str'},
'tokenizer_name': {'key': 'tokenizerName', 'type': 'str'},
'token_filters': {'key': 'tokenFilters', 'type': '[str]'},
'char_filters': {'key': 'charFilters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AnalyzeTextOptions, self).__init__(**kwargs)
self.text = kwargs['text']
self.analyzer_name = kwargs.get('analyzer_name', None)
self.tokenizer_name = kwargs.get('tokenizer_name', None)
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
def _to_analyze_request(self):
return AnalyzeRequest(
text=self.text,
analyzer=self.analyzer_name,
tokenizer=self.tokenizer_name,
token_filters=self.token_filters,
char_filters=self.char_filters
)
class CustomAnalyzer(LexicalAnalyzer):
"""Allows you to take control over the process of converting text into indexable/searchable tokens.
It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters.
The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens
emitted by the tokenizer.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
:type odata_type: str
:param name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:type name: str
:param tokenizer_name: Required. The name of the tokenizer to use to divide continuous text into a
sequence of tokens, such as breaking a sentence into words. Possible values include: "classic",
"edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer",
"microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern",
"standard_v2", "uax_url_email", "whitespace".
:type tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
:param token_filters: A list of token filters used to filter out or modify the tokens generated
by a tokenizer. For example, you can specify a lowercase filter that converts all characters to
lowercase. The filters are run in the order in which they are listed.
:type token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:param char_filters: A list of character filters used to prepare input text before it is
processed by the tokenizer. For instance, they can replace certain characters or symbols. The
filters are run in the order in which they are listed.
:type char_filters: list[str]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'tokenizer_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'tokenizer_name': {'key': 'tokenizerName', 'type': 'str'},
'token_filters': {'key': 'tokenFilters', 'type': '[str]'},
'char_filters': {'key': 'charFilters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(CustomAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer'
self.tokenizer_name = kwargs['tokenizer_name']
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
def _to_generated(self):
return _CustomAnalyzer(
name=self.name,
odata_type=self.odata_type,
tokenizer=self.tokenizer_name,
token_filters=self.token_filters,
char_filters=self.char_filters
)
@classmethod
def _from_generated(cls, custom_analyzer):
if not custom_analyzer:
return None
return cls(
name=custom_analyzer.name,
odata_type=custom_analyzer.odata_type,
tokenizer_name=custom_analyzer.tokenizer,
token_filters=custom_analyzer.token_filters,
char_filters=custom_analyzer.char_filters
)
class PatternAnalyzer(LexicalAnalyzer):
"""Flexibly separates text into terms via a regular expression.
This analyzer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:type name: str
:param lower_case_terms: A value indicating whether terms should be lower-cased. Default is
true.
:type lower_case_terms: bool
:param pattern: A regular expression to match token separators. Default is an
expression that matches one or more white space characters.
:type pattern: str
:param flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ',
'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'.
:type flags: list[str] or list[~search_service_client.models.RegexFlags]
:param stopwords: A list of stopwords.
:type stopwords: list[str]
"""
_validation = {"odata_type": {"required": True}, "name": {"required": True}}
_attribute_map = {
"odata_type": {"key": "@odata\\.type", "type": "str"},
"name": {"key": "name", "type": "str"},
"lower_case_terms": {"key": "lowercase", "type": "bool"},
"pattern": {"key": "pattern", "type": "str"},
"flags": {"key": "flags", "type": "[str]"},
"stopwords": {"key": "stopwords", "type": "[str]"},
}
def __init__(self, **kwargs):
super(PatternAnalyzer, self).__init__(**kwargs)
self.odata_type = "#Microsoft.Azure.Search.PatternAnalyzer"
self.lower_case_terms = kwargs.get("lower_case_terms", True)
self.pattern = kwargs.get("pattern", r"\W+")
self.flags = kwargs.get("flags", None)
self.stopwords = kwargs.get("stopwords", None)
def _to_generated(self):
if not self.flags:
flags = None
else:
flags = DELIMITER.join(self.flags)
return _PatternAnalyzer(
name=self.name,
lower_case_terms=self.lower_case_terms,
pattern=self.pattern,
flags=flags,
stopwords=self.stopwords,
)
@classmethod
def _from_generated(cls, pattern_analyzer):
if not pattern_analyzer:
return None
if not pattern_analyzer.flags:
flags = None
else:
flags = pattern_analyzer.flags.split(DELIMITER)
return cls(
name=pattern_analyzer.name,
lower_case_terms=pattern_analyzer.lower_case_terms,
pattern=pattern_analyzer.pattern,
flags=flags,
stopwords=pattern_analyzer.stopwords,
)
class PatternTokenizer(LexicalTokenizer):
"""Tokenizer that uses regex pattern matching to construct distinct tokens.
This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:type name: str
:param pattern: A regular expression to match token separators. Default is an
expression that matches one or more white space characters.
:type pattern: str
:param flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ',
'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'.
:type flags: list[str] or list[~search_service_client.models.RegexFlags]
:param group: The zero-based ordinal of the matching group in the regular expression to
extract into tokens. Use -1 if you want to use the entire pattern to split the input into
tokens, irrespective of matching groups. Default is -1.
:type group: int
"""
_validation = {"odata_type": {"required": True}, "name": {"required": True}}
_attribute_map = {
"odata_type": {"key": "@odata\\.type", "type": "str"},
"name": {"key": "name", "type": "str"},
"pattern": {"key": "pattern", "type": "str"},
"flags": {"key": "flags", "type": "[str]"},
"group": {"key": "group", "type": "int"},
}
def __init__(self, **kwargs):
super(PatternTokenizer, self).__init__(**kwargs)
self.odata_type = "#Microsoft.Azure.Search.PatternTokenizer"
self.pattern = kwargs.get("pattern", r"\W+")
self.flags = kwargs.get("flags", None)
self.group = kwargs.get("group", -1)
def _to_generated(self):
if not self.flags:
flags = None
else:
flags = DELIMITER.join(self.flags)
return _PatternTokenizer(
name=self.name,
pattern=self.pattern,
flags=flags,
group=self.group,
)
@classmethod
def _from_generated(cls, pattern_tokenizer):
if not pattern_tokenizer:
return None
if not pattern_tokenizer.flags:
flags = None
else:
flags = pattern_tokenizer.flags.split(DELIMITER)
return cls(
name=pattern_tokenizer.name,
pattern=pattern_tokenizer.pattern,
flags=flags,
group=pattern_tokenizer.group,
)
class SearchResourceEncryptionKey(msrest.serialization.Model):
"""A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be
used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps.
All required parameters must be populated in order to send to Azure.
:param key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data
at rest.
:type key_name: str
:param key_version: Required. The version of your Azure Key Vault key to be used to encrypt
your data at rest.
:type key_version: str
:param vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that
contains the key to be used to encrypt your data at rest. An example URI might be https://my-
keyvault-name.vault.azure.net.
:type vault_uri: str
:param application_id: Required. An AAD Application ID that was granted the required access
permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
Application ID should not be confused with the Object ID for your AAD Application.
:type application_id: str
:param application_secret: The authentication key of the specified AAD application.
:type application_secret: str
"""
_validation = {
'key_name': {'required': True},
'key_version': {'required': True},
'vault_uri': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyVaultKeyName', 'type': 'str'},
'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'},
'vault_uri': {'key': 'keyVaultUri', 'type': 'str'},
'application_id': {'key': 'applicationId', 'type': 'str'},
'application_secret': {'key': 'applicationSecret', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchResourceEncryptionKey, self).__init__(**kwargs)
self.key_name = kwargs['key_name']
self.key_version = kwargs['key_version']
self.vault_uri = kwargs['vault_uri']
self.application_id = kwargs.get('application_id', None)
self.application_secret = kwargs.get('application_secret', None)
def _to_generated(self):
if self.application_id and self.application_secret:
access_credentials = AzureActiveDirectoryApplicationCredentials(
application_id=self.application_id,
application_secret=self.application_secret
)
else:
access_credentials = None
return _SearchResourceEncryptionKey(
key_name=self.key_name,
key_version=self.key_version,
vault_uri=self.vault_uri,
access_credentials=access_credentials
)
@classmethod
def _from_generated(cls, search_resource_encryption_key):
if not search_resource_encryption_key:
return None
if search_resource_encryption_key.access_credentials:
application_id = search_resource_encryption_key.access_credentials.application_id
application_secret = search_resource_encryption_key.access_credentials.application_secret
else:
application_id = None
application_secret = None
return cls(
key_name=search_resource_encryption_key.key_name,
key_version=search_resource_encryption_key.key_version,
vault_uri=search_resource_encryption_key.vault_uri,
application_id=application_id,
application_secret=application_secret
)
class SynonymMap(msrest.serialization.Model):
"""Represents a synonym map definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the synonym map.
:type name: str
:ivar format: Required. The format of the synonym map. Only the 'solr' format is currently
supported. Default value: "solr".
:vartype format: str
:param synonyms: Required. A series of synonym rules in the specified synonym map format. The
rules must be separated by newlines.
:type synonyms: list[str]
:param encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
Search will ignore attempts to set this property to null. You can change this property as
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
:type encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:param e_tag: The ETag of the synonym map.
:type e_tag: str
"""
_validation = {
'name': {'required': True},
'format': {'required': True, 'constant': True},
'synonyms': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'format': {'key': 'format', 'type': 'str'},
'synonyms': {'key': 'synonyms', 'type': '[str]'},
'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
format = "solr"
def __init__(
self,
**kwargs
):
super(SynonymMap, self).__init__(**kwargs)
self.name = kwargs['name']
self.synonyms = kwargs['synonyms']
self.encryption_key = kwargs.get('encryption_key', None)
self.e_tag = kwargs.get('e_tag', None)
def _to_generated(self):
return _SynonymMap(
name=self.name,
synonyms="\n".join(self.synonyms),
encryption_key=self.encryption_key._to_generated() if self.encryption_key else None, # pylint:disable=protected-access
e_tag=self.e_tag
)
@classmethod
def _from_generated(cls, synonym_map):
if not synonym_map:
return None
return cls(
name=synonym_map.name,
synonyms=synonym_map.synonyms.split("\n"),
# pylint:disable=protected-access
encryption_key=SearchResourceEncryptionKey._from_generated(synonym_map.encryption_key),
e_tag=synonym_map.e_tag
)
@classmethod
def create_from_file(cls, name, file_path):
with open(file_path, "r") as f:
solr_format_synonyms = f.read()
synonyms = solr_format_synonyms.split("\n")
return cls(
name=name,
synonyms=synonyms
)
class SearchIndexerDataSourceConnection(msrest.serialization.Model):
"""Represents a datasource connection definition, which can be used to configure an indexer.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the datasource connection.
:type name: str
:param description: The description of the datasource connection.
:type description: str
:param type: Required. The type of the datasource connection. Possible values include: "azuresql",
"cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2".
:type type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
:param connection_string: The connection string for the datasource connection.
:type connection_string: str
:param container: Required. The data container for the datasource connection.
:type container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
:param data_change_detection_policy: The data change detection policy for the datasource connection.
:type data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy
:param data_deletion_detection_policy: The data deletion detection policy for the datasource connection.
:type data_deletion_detection_policy:
~azure.search.documents.models.DataDeletionDetectionPolicy
:param e_tag: The ETag of the data source.
:type e_tag: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'connection_string': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'},
'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'},
'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchIndexerDataSourceConnection, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
self.type = kwargs['type']
self.connection_string = kwargs['connection_string']
self.container = kwargs['container']
self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None)
self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None)
self.e_tag = kwargs.get('e_tag', None)
def _to_generated(self):
if self.connection_string is None or self.connection_string == "":
connection_string = "<unchanged>"
else:
connection_string = self.connection_string
credentials = DataSourceCredentials(
connection_string=connection_string
)
return _SearchIndexerDataSource(
name=self.name,
description=self.description,
type=self.type,
credentials=credentials,
container=self.container,
data_change_detection_policy=self.data_change_detection_policy,
data_deletion_detection_policy=self.data_deletion_detection_policy,
e_tag=self.e_tag
)
@classmethod
def _from_generated(cls, search_indexer_data_source):
if not search_indexer_data_source:
return None
connection_string = search_indexer_data_source.credentials.connection_string \
if search_indexer_data_source.credentials else None
return cls(
name=search_indexer_data_source.name,
description=search_indexer_data_source.description,
type=search_indexer_data_source.type,
connection_string=connection_string,
container=search_indexer_data_source.container,
data_change_detection_policy=search_indexer_data_source.data_change_detection_policy,
data_deletion_detection_policy=search_indexer_data_source.data_deletion_detection_policy,
e_tag=search_indexer_data_source.e_tag
)
def pack_analyzer(analyzer):
if not analyzer:
return None
if isinstance(analyzer, (PatternAnalyzer, CustomAnalyzer)):
return analyzer._to_generated() # pylint:disable=protected-access
return analyzer
def unpack_analyzer(analyzer):
if not analyzer:
return None
if isinstance(analyzer, _PatternAnalyzer):
return PatternAnalyzer._from_generated(analyzer) # pylint:disable=protected-access
if isinstance(analyzer, _CustomAnalyzer):
return CustomAnalyzer._from_generated(analyzer) # pylint:disable=protected-access
return analyzer
| 44.822526
| 130
| 0.666299
|
import msrest.serialization
from .._generated.models import (
LexicalAnalyzer,
LexicalTokenizer,
AnalyzeRequest,
CustomAnalyzer as _CustomAnalyzer,
PatternAnalyzer as _PatternAnalyzer,
PatternTokenizer as _PatternTokenizer,
SearchResourceEncryptionKey as _SearchResourceEncryptionKey,
SearchIndexerDataSource as _SearchIndexerDataSource,
SynonymMap as _SynonymMap,
DataSourceCredentials,
AzureActiveDirectoryApplicationCredentials
)
DELIMITER = "|"
class AnalyzeTextOptions(msrest.serialization.Model):
_validation = {
'text': {'required': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'analyzer_name': {'key': 'analyzerName', 'type': 'str'},
'tokenizer_name': {'key': 'tokenizerName', 'type': 'str'},
'token_filters': {'key': 'tokenFilters', 'type': '[str]'},
'char_filters': {'key': 'charFilters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AnalyzeTextOptions, self).__init__(**kwargs)
self.text = kwargs['text']
self.analyzer_name = kwargs.get('analyzer_name', None)
self.tokenizer_name = kwargs.get('tokenizer_name', None)
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
def _to_analyze_request(self):
return AnalyzeRequest(
text=self.text,
analyzer=self.analyzer_name,
tokenizer=self.tokenizer_name,
token_filters=self.token_filters,
char_filters=self.char_filters
)
class CustomAnalyzer(LexicalAnalyzer):
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'tokenizer_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'tokenizer_name': {'key': 'tokenizerName', 'type': 'str'},
'token_filters': {'key': 'tokenFilters', 'type': '[str]'},
'char_filters': {'key': 'charFilters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(CustomAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer'
self.tokenizer_name = kwargs['tokenizer_name']
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
def _to_generated(self):
return _CustomAnalyzer(
name=self.name,
odata_type=self.odata_type,
tokenizer=self.tokenizer_name,
token_filters=self.token_filters,
char_filters=self.char_filters
)
@classmethod
def _from_generated(cls, custom_analyzer):
if not custom_analyzer:
return None
return cls(
name=custom_analyzer.name,
odata_type=custom_analyzer.odata_type,
tokenizer_name=custom_analyzer.tokenizer,
token_filters=custom_analyzer.token_filters,
char_filters=custom_analyzer.char_filters
)
class PatternAnalyzer(LexicalAnalyzer):
_validation = {"odata_type": {"required": True}, "name": {"required": True}}
_attribute_map = {
"odata_type": {"key": "@odata\\.type", "type": "str"},
"name": {"key": "name", "type": "str"},
"lower_case_terms": {"key": "lowercase", "type": "bool"},
"pattern": {"key": "pattern", "type": "str"},
"flags": {"key": "flags", "type": "[str]"},
"stopwords": {"key": "stopwords", "type": "[str]"},
}
def __init__(self, **kwargs):
super(PatternAnalyzer, self).__init__(**kwargs)
self.odata_type = "#Microsoft.Azure.Search.PatternAnalyzer"
self.lower_case_terms = kwargs.get("lower_case_terms", True)
self.pattern = kwargs.get("pattern", r"\W+")
self.flags = kwargs.get("flags", None)
self.stopwords = kwargs.get("stopwords", None)
def _to_generated(self):
if not self.flags:
flags = None
else:
flags = DELIMITER.join(self.flags)
return _PatternAnalyzer(
name=self.name,
lower_case_terms=self.lower_case_terms,
pattern=self.pattern,
flags=flags,
stopwords=self.stopwords,
)
@classmethod
def _from_generated(cls, pattern_analyzer):
if not pattern_analyzer:
return None
if not pattern_analyzer.flags:
flags = None
else:
flags = pattern_analyzer.flags.split(DELIMITER)
return cls(
name=pattern_analyzer.name,
lower_case_terms=pattern_analyzer.lower_case_terms,
pattern=pattern_analyzer.pattern,
flags=flags,
stopwords=pattern_analyzer.stopwords,
)
class PatternTokenizer(LexicalTokenizer):
_validation = {"odata_type": {"required": True}, "name": {"required": True}}
_attribute_map = {
"odata_type": {"key": "@odata\\.type", "type": "str"},
"name": {"key": "name", "type": "str"},
"pattern": {"key": "pattern", "type": "str"},
"flags": {"key": "flags", "type": "[str]"},
"group": {"key": "group", "type": "int"},
}
def __init__(self, **kwargs):
super(PatternTokenizer, self).__init__(**kwargs)
self.odata_type = "#Microsoft.Azure.Search.PatternTokenizer"
self.pattern = kwargs.get("pattern", r"\W+")
self.flags = kwargs.get("flags", None)
self.group = kwargs.get("group", -1)
def _to_generated(self):
if not self.flags:
flags = None
else:
flags = DELIMITER.join(self.flags)
return _PatternTokenizer(
name=self.name,
pattern=self.pattern,
flags=flags,
group=self.group,
)
@classmethod
def _from_generated(cls, pattern_tokenizer):
if not pattern_tokenizer:
return None
if not pattern_tokenizer.flags:
flags = None
else:
flags = pattern_tokenizer.flags.split(DELIMITER)
return cls(
name=pattern_tokenizer.name,
pattern=pattern_tokenizer.pattern,
flags=flags,
group=pattern_tokenizer.group,
)
class SearchResourceEncryptionKey(msrest.serialization.Model):
_validation = {
'key_name': {'required': True},
'key_version': {'required': True},
'vault_uri': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyVaultKeyName', 'type': 'str'},
'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'},
'vault_uri': {'key': 'keyVaultUri', 'type': 'str'},
'application_id': {'key': 'applicationId', 'type': 'str'},
'application_secret': {'key': 'applicationSecret', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchResourceEncryptionKey, self).__init__(**kwargs)
self.key_name = kwargs['key_name']
self.key_version = kwargs['key_version']
self.vault_uri = kwargs['vault_uri']
self.application_id = kwargs.get('application_id', None)
self.application_secret = kwargs.get('application_secret', None)
def _to_generated(self):
if self.application_id and self.application_secret:
access_credentials = AzureActiveDirectoryApplicationCredentials(
application_id=self.application_id,
application_secret=self.application_secret
)
else:
access_credentials = None
return _SearchResourceEncryptionKey(
key_name=self.key_name,
key_version=self.key_version,
vault_uri=self.vault_uri,
access_credentials=access_credentials
)
@classmethod
def _from_generated(cls, search_resource_encryption_key):
if not search_resource_encryption_key:
return None
if search_resource_encryption_key.access_credentials:
application_id = search_resource_encryption_key.access_credentials.application_id
application_secret = search_resource_encryption_key.access_credentials.application_secret
else:
application_id = None
application_secret = None
return cls(
key_name=search_resource_encryption_key.key_name,
key_version=search_resource_encryption_key.key_version,
vault_uri=search_resource_encryption_key.vault_uri,
application_id=application_id,
application_secret=application_secret
)
class SynonymMap(msrest.serialization.Model):
_validation = {
'name': {'required': True},
'format': {'required': True, 'constant': True},
'synonyms': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'format': {'key': 'format', 'type': 'str'},
'synonyms': {'key': 'synonyms', 'type': '[str]'},
'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
format = "solr"
def __init__(
self,
**kwargs
):
super(SynonymMap, self).__init__(**kwargs)
self.name = kwargs['name']
self.synonyms = kwargs['synonyms']
self.encryption_key = kwargs.get('encryption_key', None)
self.e_tag = kwargs.get('e_tag', None)
def _to_generated(self):
return _SynonymMap(
name=self.name,
synonyms="\n".join(self.synonyms),
encryption_key=self.encryption_key._to_generated() if self.encryption_key else None, e_tag=self.e_tag
)
@classmethod
def _from_generated(cls, synonym_map):
if not synonym_map:
return None
return cls(
name=synonym_map.name,
synonyms=synonym_map.synonyms.split("\n"),
encryption_key=SearchResourceEncryptionKey._from_generated(synonym_map.encryption_key),
e_tag=synonym_map.e_tag
)
@classmethod
def create_from_file(cls, name, file_path):
with open(file_path, "r") as f:
solr_format_synonyms = f.read()
synonyms = solr_format_synonyms.split("\n")
return cls(
name=name,
synonyms=synonyms
)
class SearchIndexerDataSourceConnection(msrest.serialization.Model):
_validation = {
'name': {'required': True},
'type': {'required': True},
'connection_string': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'},
'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'},
'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchIndexerDataSourceConnection, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
self.type = kwargs['type']
self.connection_string = kwargs['connection_string']
self.container = kwargs['container']
self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None)
self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None)
self.e_tag = kwargs.get('e_tag', None)
def _to_generated(self):
if self.connection_string is None or self.connection_string == "":
connection_string = "<unchanged>"
else:
connection_string = self.connection_string
credentials = DataSourceCredentials(
connection_string=connection_string
)
return _SearchIndexerDataSource(
name=self.name,
description=self.description,
type=self.type,
credentials=credentials,
container=self.container,
data_change_detection_policy=self.data_change_detection_policy,
data_deletion_detection_policy=self.data_deletion_detection_policy,
e_tag=self.e_tag
)
@classmethod
def _from_generated(cls, search_indexer_data_source):
if not search_indexer_data_source:
return None
connection_string = search_indexer_data_source.credentials.connection_string \
if search_indexer_data_source.credentials else None
return cls(
name=search_indexer_data_source.name,
description=search_indexer_data_source.description,
type=search_indexer_data_source.type,
connection_string=connection_string,
container=search_indexer_data_source.container,
data_change_detection_policy=search_indexer_data_source.data_change_detection_policy,
data_deletion_detection_policy=search_indexer_data_source.data_deletion_detection_policy,
e_tag=search_indexer_data_source.e_tag
)
def pack_analyzer(analyzer):
if not analyzer:
return None
if isinstance(analyzer, (PatternAnalyzer, CustomAnalyzer)):
return analyzer._to_generated() return analyzer
def unpack_analyzer(analyzer):
if not analyzer:
return None
if isinstance(analyzer, _PatternAnalyzer):
return PatternAnalyzer._from_generated(analyzer) if isinstance(analyzer, _CustomAnalyzer):
return CustomAnalyzer._from_generated(analyzer) return analyzer
| true
| true
|
f70ad8f76ec5067e05cba3acf173b8f2bee21594
| 648
|
py
|
Python
|
train.py
|
21171-somesh/Document-Clusturer
|
43183c0b44b848e75999cee23e2dd8f8504f3c93
|
[
"MIT"
] | 2
|
2019-04-22T18:59:45.000Z
|
2019-06-03T15:45:00.000Z
|
train.py
|
21171-somesh/Document-Clusturer
|
43183c0b44b848e75999cee23e2dd8f8504f3c93
|
[
"MIT"
] | null | null | null |
train.py
|
21171-somesh/Document-Clusturer
|
43183c0b44b848e75999cee23e2dd8f8504f3c93
|
[
"MIT"
] | null | null | null |
import os
import nltk
import re
from gensim import corpora, models, similarities
from cleaning import clean
def train():
#Loads the data from the local storage
synopses = []
for filename in os.listdir('cnn-stories'):
with open('cnn-stories/' + filename, 'r') as infile:
synopses.append(infile.read())
#Cleans the data
corpus, dictionary = clean(synopses)
#Saves the model and the dictionary in local storage
corpora.Dictionary.save(dictionary, 'dictionary.dict')
lda = models.LdaModel(corpus, num_topics=10, id2word=dictionary, update_every=5, chunksize=10000, passes=100)
lda.save('lda.model')
if __name__ == "__main__":
train()
| 28.173913
| 110
| 0.746914
|
import os
import nltk
import re
from gensim import corpora, models, similarities
from cleaning import clean
def train():
synopses = []
for filename in os.listdir('cnn-stories'):
with open('cnn-stories/' + filename, 'r') as infile:
synopses.append(infile.read())
corpus, dictionary = clean(synopses)
corpora.Dictionary.save(dictionary, 'dictionary.dict')
lda = models.LdaModel(corpus, num_topics=10, id2word=dictionary, update_every=5, chunksize=10000, passes=100)
lda.save('lda.model')
if __name__ == "__main__":
train()
| true
| true
|
f70ad93723bc0cc59c9e4a2393a8c832aca01a12
| 17,701
|
py
|
Python
|
google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T05:23:29.000Z
|
2022-03-30T05:23:29.000Z
|
google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1.types import specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool_service
from google.longrunning import operations_pb2 # type: ignore
from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport):
"""gRPC backend transport for SpecialistPoolService.
A service for creating and managing Customer SpecialistPools.
When customers start Data Labeling jobs, they can reuse/create
Specialist Pools to bring their own Specialists to label the
data. Customers can add/remove Managers for the Specialist Pool
on Cloud console, then Managers will get email notifications to
manage Specialists and tasks on CrowdCompute console.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.CreateSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the create specialist pool method over gRPC.
Creates a SpecialistPool.
Returns:
Callable[[~.CreateSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_specialist_pool" not in self._stubs:
self._stubs["create_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool",
request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_specialist_pool"]
@property
def get_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.GetSpecialistPoolRequest],
specialist_pool.SpecialistPool,
]:
r"""Return a callable for the get specialist pool method over gRPC.
Gets a SpecialistPool.
Returns:
Callable[[~.GetSpecialistPoolRequest],
~.SpecialistPool]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_specialist_pool" not in self._stubs:
self._stubs["get_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool",
request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize,
response_deserializer=specialist_pool.SpecialistPool.deserialize,
)
return self._stubs["get_specialist_pool"]
@property
def list_specialist_pools(
self,
) -> Callable[
[specialist_pool_service.ListSpecialistPoolsRequest],
specialist_pool_service.ListSpecialistPoolsResponse,
]:
r"""Return a callable for the list specialist pools method over gRPC.
Lists SpecialistPools in a Location.
Returns:
Callable[[~.ListSpecialistPoolsRequest],
~.ListSpecialistPoolsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_specialist_pools" not in self._stubs:
self._stubs["list_specialist_pools"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools",
request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize,
response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize,
)
return self._stubs["list_specialist_pools"]
@property
def delete_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.DeleteSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete specialist pool method over gRPC.
Deletes a SpecialistPool as well as all Specialists
in the pool.
Returns:
Callable[[~.DeleteSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_specialist_pool" not in self._stubs:
self._stubs["delete_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool",
request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_specialist_pool"]
@property
def update_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.UpdateSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the update specialist pool method over gRPC.
Updates a SpecialistPool.
Returns:
Callable[[~.UpdateSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_specialist_pool" not in self._stubs:
self._stubs["update_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool",
request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_specialist_pool"]
def close(self):
self.grpc_channel.close()
__all__ = ("SpecialistPoolServiceGrpcTransport",)
| 44.032338
| 102
| 0.647591
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth from google.auth import credentials as ga_credentials from google.auth.transport.grpc import SslCredentials
import grpc
from google.cloud.aiplatform_v1.types import specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool_service
from google.longrunning import operations_pb2 from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport):
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
return self._operations_client
@property
def create_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.CreateSpecialistPoolRequest], operations_pb2.Operation
]:
if "create_specialist_pool" not in self._stubs:
self._stubs["create_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool",
request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_specialist_pool"]
@property
def get_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.GetSpecialistPoolRequest],
specialist_pool.SpecialistPool,
]:
if "get_specialist_pool" not in self._stubs:
self._stubs["get_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool",
request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize,
response_deserializer=specialist_pool.SpecialistPool.deserialize,
)
return self._stubs["get_specialist_pool"]
@property
def list_specialist_pools(
self,
) -> Callable[
[specialist_pool_service.ListSpecialistPoolsRequest],
specialist_pool_service.ListSpecialistPoolsResponse,
]:
if "list_specialist_pools" not in self._stubs:
self._stubs["list_specialist_pools"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools",
request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize,
response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize,
)
return self._stubs["list_specialist_pools"]
@property
def delete_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.DeleteSpecialistPoolRequest], operations_pb2.Operation
]:
if "delete_specialist_pool" not in self._stubs:
self._stubs["delete_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool",
request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_specialist_pool"]
@property
def update_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.UpdateSpecialistPoolRequest], operations_pb2.Operation
]:
if "update_specialist_pool" not in self._stubs:
self._stubs["update_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool",
request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_specialist_pool"]
def close(self):
self.grpc_channel.close()
__all__ = ("SpecialistPoolServiceGrpcTransport",)
| true
| true
|
f70ad9d8555a185e24666047cca27b4352cd70d8
| 1,322
|
py
|
Python
|
packages/weevely/core/config.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/weevely/core/config.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/weevely/core/config.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
# Base path for log files and sessions
base_path = '~/.weevely/'
# History path
history_path = '~/.weevely/history'
# Session path
sessions_path = '~/.weevely/sessions/'
sessions_ext = '.session'
# Supported Channels
channels = [
# Obfuscated channel inside POST requests introduced
# in Weevely 3.6
'ObfPost',
]
# Append random GET parameters to every request to
# make sure the page is not cache by proxies.
add_random_param_nocache = False
# Add additional headers to be sent at every request e.g.
# additional_headers = [
# ( 'Authentication', 'Basic QWxhZGRpbjpvcGVuIHNlc2FtBl==' )
# ]
additional_headers = []
# Agents and obfuscators used by generator.py
agent_templates_folder_path = 'bd/agents/'
obfuscators_templates_folder_path = 'bd/obfuscators/'
#######################################
# Resolve given paths - DO NOT CHANGE #
#######################################
import os, sys
base_path = os.path.expanduser(base_path)
history_path = os.path.expanduser(history_path)
sessions_path = os.path.expanduser(sessions_path)
weevely_path = os.path.dirname(os.path.realpath(sys.argv[0]))
agent_templates_folder_path = os.path.join(
weevely_path,
agent_templates_folder_path
)
obfuscators_templates_folder_path = os.path.join(
weevely_path,
obfuscators_templates_folder_path
)
| 26.979592
| 62
| 0.712557
|
base_path = '~/.weevely/'
history_path = '~/.weevely/history'
sessions_path = '~/.weevely/sessions/'
sessions_ext = '.session'
channels = [
'ObfPost',
]
add_random_param_nocache = False
additional_headers = []
agent_templates_folder_path = 'bd/agents/'
obfuscators_templates_folder_path = 'bd/obfuscators/'
import os, sys
base_path = os.path.expanduser(base_path)
history_path = os.path.expanduser(history_path)
sessions_path = os.path.expanduser(sessions_path)
weevely_path = os.path.dirname(os.path.realpath(sys.argv[0]))
agent_templates_folder_path = os.path.join(
weevely_path,
agent_templates_folder_path
)
obfuscators_templates_folder_path = os.path.join(
weevely_path,
obfuscators_templates_folder_path
)
| true
| true
|
f70ada9d3216e0fafca4da86d8a5c4c9d69bc80e
| 1,428
|
py
|
Python
|
demos/text_classification/train_text_classification_bert.py
|
yangheng95/LCF-ABSA
|
0eeb4788269a498d34c2aff942e03af78026617e
|
[
"MIT"
] | 31
|
2019-10-07T03:05:39.000Z
|
2020-06-17T01:34:21.000Z
|
demos/text_classification/train_text_classification_bert.py
|
yangheng95/LCF-ABSA
|
0eeb4788269a498d34c2aff942e03af78026617e
|
[
"MIT"
] | 7
|
2019-10-16T13:37:52.000Z
|
2020-03-30T03:40:56.000Z
|
demos/text_classification/train_text_classification_bert.py
|
yangheng95/LCF-ABSA
|
0eeb4788269a498d34c2aff942e03af78026617e
|
[
"MIT"
] | 3
|
2020-01-12T13:03:35.000Z
|
2020-06-11T08:26:01.000Z
|
# -*- coding: utf-8 -*-
# file: train_text_classification_bert.py
# time: 2021/8/5
# author: yangheng <yangheng@m.scnu.edu.cn>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
from pyabsa import TextClassificationTrainer, ClassificationConfigManager, ClassificationDatasetList
from pyabsa.functional import BERTClassificationModelList
classification_config_english = ClassificationConfigManager.get_classification_config_english()
classification_config_english.model = BERTClassificationModelList.BERT
classification_config_english.num_epoch = 10
classification_config_english.evaluate_begin = 0
classification_config_english.max_seq_len = 512
classification_config_english.log_step = 200
classification_config_english.dropout = 0.5
classification_config_english.cache_dataset = False
classification_config_english.seed = {42, 56, 1}
classification_config_english.l2reg = 1e-5
classification_config_english.learning_rate = 1e-5
classification_config_english.cross_validate_fold = 5
dataset = ClassificationDatasetList.SST2
text_classifier = TextClassificationTrainer(config=classification_config_english,
dataset=dataset,
checkpoint_save_mode=1,
auto_device=True
).load_trained_model()
| 47.6
| 101
| 0.734594
|
from pyabsa import TextClassificationTrainer, ClassificationConfigManager, ClassificationDatasetList
from pyabsa.functional import BERTClassificationModelList
classification_config_english = ClassificationConfigManager.get_classification_config_english()
classification_config_english.model = BERTClassificationModelList.BERT
classification_config_english.num_epoch = 10
classification_config_english.evaluate_begin = 0
classification_config_english.max_seq_len = 512
classification_config_english.log_step = 200
classification_config_english.dropout = 0.5
classification_config_english.cache_dataset = False
classification_config_english.seed = {42, 56, 1}
classification_config_english.l2reg = 1e-5
classification_config_english.learning_rate = 1e-5
classification_config_english.cross_validate_fold = 5
dataset = ClassificationDatasetList.SST2
text_classifier = TextClassificationTrainer(config=classification_config_english,
dataset=dataset,
checkpoint_save_mode=1,
auto_device=True
).load_trained_model()
| true
| true
|
f70adcd6f0c2d4785c71d8c4122e7bd260cf8c8b
| 1,559
|
py
|
Python
|
tests/cornac/datasets/test_movielens.py
|
carmanzhang/cornac
|
215efd0ffa7b8ee1afe1ac6b5cc650ee6303ace3
|
[
"Apache-2.0"
] | 597
|
2018-07-17T10:59:56.000Z
|
2022-03-31T07:59:36.000Z
|
tests/cornac/datasets/test_movielens.py
|
carmanzhang/cornac
|
215efd0ffa7b8ee1afe1ac6b5cc650ee6303ace3
|
[
"Apache-2.0"
] | 137
|
2018-10-12T10:52:11.000Z
|
2022-03-04T15:26:49.000Z
|
tests/cornac/datasets/test_movielens.py
|
carmanzhang/cornac
|
215efd0ffa7b8ee1afe1ac6b5cc650ee6303ace3
|
[
"Apache-2.0"
] | 112
|
2018-07-26T04:36:34.000Z
|
2022-03-31T02:29:34.000Z
|
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import unittest
import random
import time
from cornac.datasets import movielens
class TestMovieLens(unittest.TestCase):
def test_load_feedback(self):
# only run data download tests 20% of the time to speed up frequent testing
random.seed(time.time())
if random.random() > 0.8:
ml_100k = movielens.load_feedback()
self.assertEqual(len(ml_100k), 100000)
if random.random() > 0.8:
ml_1m = movielens.load_feedback(variant='1M')
self.assertEqual(len(ml_1m), 1000209)
def test_load_plot(self):
# only run data download tests 20% of the time to speed up frequent testing
random.seed(time.time())
if random.random() > 0.8:
plots, ids = movielens.load_plot()
self.assertEqual(len(ids), 10076)
if __name__ == '__main__':
unittest.main()
| 33.891304
| 83
| 0.654907
|
import unittest
import random
import time
from cornac.datasets import movielens
class TestMovieLens(unittest.TestCase):
def test_load_feedback(self):
random.seed(time.time())
if random.random() > 0.8:
ml_100k = movielens.load_feedback()
self.assertEqual(len(ml_100k), 100000)
if random.random() > 0.8:
ml_1m = movielens.load_feedback(variant='1M')
self.assertEqual(len(ml_1m), 1000209)
def test_load_plot(self):
random.seed(time.time())
if random.random() > 0.8:
plots, ids = movielens.load_plot()
self.assertEqual(len(ids), 10076)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70add09c6753971def6cb61aa3311e6e70eaed2
| 2,289
|
py
|
Python
|
store_data.py
|
jaeckie/covid19-containment-embeddings
|
e27e63266113231ee399f3a55f76b823d514c6f7
|
[
"MIT"
] | null | null | null |
store_data.py
|
jaeckie/covid19-containment-embeddings
|
e27e63266113231ee399f3a55f76b823d514c6f7
|
[
"MIT"
] | null | null | null |
store_data.py
|
jaeckie/covid19-containment-embeddings
|
e27e63266113231ee399f3a55f76b823d514c6f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 4 15:37:43 2020
@author: moder
"""
import os
from datetime import datetime
import pandas as pd
import urllib.request
from bs4 import BeautifulSoup
user_agent = "user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)"
def scrap_wikipedia_text(url):
request = urllib.request.Request(url, data=None, headers={'User-Agent' : user_agent})
html = urllib.request.urlopen(request).read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
content_div = soup.find('div', attrs={'id': 'mw-content-text'})
# remove tables and graphs
if content_div is not None:
for s in content_div.select('table'):
s.extract()
for s in content_div.select('img'):
s.extract()
# remove references
for s in content_div.select('div.reflist'):
s.extract()
print('div.reflist extracted from %s...' % url)
# iterate all p tags and append to text
tags = ['h1', 'h2', 'h3', 'li', 'p']
bodytext = ''
for con in content_div.find_all(tags):
bodytext += con.text
return bodytext
return None
if __name__ == '__main__':
print('store data started...')
# load containment history file from kaggle
df_contain = pd.read_csv(r'data/COVID 19 Containment measures data.csv')
# cfilter = df_contain['Country'].isin(['Austria', 'Germany', 'Italy', 'Spain', 'Denmark'])
# df_c = df_contain[cfilter]
df_c = df_contain
df = df_c[df_c['Source'].notna()]
df_drop = df.drop_duplicates(subset='Source', keep='last')
wfilter = df_drop['Source'].str.contains('en.wikipedia.org')
df_red = df_drop[wfilter]
df_res = df_red[['Date Start', 'Country', 'Keywords', 'Source']]
df_res.to_csv(r'data/covid19-all-countries.csv')
for index, row in df_res.iterrows():
text = scrap_wikipedia_text(row['Source'])
time = datetime.now().strftime('%Y%m%d_%H%M%S')
filename = '%s_%s_covid19-wikipedia.txt' % (time, row['Country'])
with open(os.path.join('data',filename), 'w', encoding='utf-8') as file:
file.write(text)
print('saved file %s ...' % filename)
file.close()
# \[\d+\]
| 34.681818
| 95
| 0.606815
|
import os
from datetime import datetime
import pandas as pd
import urllib.request
from bs4 import BeautifulSoup
user_agent = "user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)"
def scrap_wikipedia_text(url):
request = urllib.request.Request(url, data=None, headers={'User-Agent' : user_agent})
html = urllib.request.urlopen(request).read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
content_div = soup.find('div', attrs={'id': 'mw-content-text'})
# remove tables and graphs
if content_div is not None:
for s in content_div.select('table'):
s.extract()
for s in content_div.select('img'):
s.extract()
# remove references
for s in content_div.select('div.reflist'):
s.extract()
print('div.reflist extracted from %s...' % url)
# iterate all p tags and append to text
tags = ['h1', 'h2', 'h3', 'li', 'p']
bodytext = ''
for con in content_div.find_all(tags):
bodytext += con.text
return bodytext
return None
if __name__ == '__main__':
print('store data started...')
# load containment history file from kaggle
df_contain = pd.read_csv(r'data/COVID 19 Containment measures data.csv')
# cfilter = df_contain['Country'].isin(['Austria', 'Germany', 'Italy', 'Spain', 'Denmark'])
# df_c = df_contain[cfilter]
df_c = df_contain
df = df_c[df_c['Source'].notna()]
df_drop = df.drop_duplicates(subset='Source', keep='last')
wfilter = df_drop['Source'].str.contains('en.wikipedia.org')
df_red = df_drop[wfilter]
df_res = df_red[['Date Start', 'Country', 'Keywords', 'Source']]
df_res.to_csv(r'data/covid19-all-countries.csv')
for index, row in df_res.iterrows():
text = scrap_wikipedia_text(row['Source'])
time = datetime.now().strftime('%Y%m%d_%H%M%S')
filename = '%s_%s_covid19-wikipedia.txt' % (time, row['Country'])
with open(os.path.join('data',filename), 'w', encoding='utf-8') as file:
file.write(text)
print('saved file %s ...' % filename)
file.close()
# \[\d+\]
| true
| true
|
f70add5cf3160d549f4b2591ff4c1811d0af48bb
| 5,189
|
py
|
Python
|
loss.py
|
VIROBO-15/yolov1
|
b7824a6cc7e89a6c29ab63f636a236d923fa0a64
|
[
"MIT"
] | null | null | null |
loss.py
|
VIROBO-15/yolov1
|
b7824a6cc7e89a6c29ab63f636a236d923fa0a64
|
[
"MIT"
] | null | null | null |
loss.py
|
VIROBO-15/yolov1
|
b7824a6cc7e89a6c29ab63f636a236d923fa0a64
|
[
"MIT"
] | null | null | null |
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
LAMBDA_COORD = 5
LAMBDA_NOOBJ = 0.5
def calc_loss(inp , target, opt):
if inp.size(0) != target.size(0):
raise Exception("Batch size does not match")
total_loss = torch.tensor(0.0)
#total_loss = total_loss.dtype(tensor)
for i in range(inp.size(0)):
inp = inp[i]
target = target[i]
Q = predict_one_bbox(inp, target, opt)
total_loss = total_loss + calc_loss_single(Q, target, opt)
return total_loss
def predict_one_bbox(inp, target, opt):
Q = torch.zeros(opt.S, opt.S, 5 + opt.C)
select = torch.tensor(0).to(device)
for i in range(opt.S):
for j in range(opt.S):
for b in range(opt.B):
if b==0:
boxes = inp[i, j, b*5 : b*5+5].to(device)
else:
boxes = torch.stack((boxes, inp[i, j, b*5 : b*5+5])).to(device)
if len(target[i, j, :].nonzero()) > 1:
max_iou = torch.tensor([0.]).to(device)
groundtruth_box = target[i, j, :4].clone()
for b in range(opt.B):
iou = calc_IOU(groundtruth_box, boxes[b][:-1], device)
if iou > max_iou:
max_iou = iou
select = torch.tensor(b).to(device)
else:
max_confidence = torch.tensor(0.).to(device)
for b in range(opt.B):
confidence = boxes[b][-1]
if confidence > max_confidence:
max_confidence = confidence
select = torch.tensor(b).to(device)
Q[i, j, :5] = boxes[select]
Q[i, j, 5:] = inp[i, j, -opt.C:]
return Q
def calc_loss_single(inp, target, opt):
loss = torch.zeros(1)
for i in range(opt.S):
for j in range(opt.S):
# case 1: grid cell HAS object
if len(target[i, j, :].nonzero()) > 1:
# localization
loss = loss + LAMBDA_COORD * (torch.pow(inp[i, j, 0] - target[i, j, 0], 2) + torch.pow(inp[i, j, 1] - target[i, j, 1], 2))
loss = loss + LAMBDA_COORD * (torch.pow(torch.sqrt(torch.abs(inp[i, j, 2])) - torch.sqrt(torch.abs(target[i, j,2])), 2) \
+ torch.pow(torch.sqrt(torch.abs(inp[i, j, 3])) - torch.sqrt(torch.abs(target[i, j, 3])), 2)) # org
# loss = loss + LAMBDA_COORD * (torch.sqrt(torch.abs(P[i, j, 2] - G[i, j, 2])) +
# torch.sqrt(torch.abs(P[i, j, 3] - G[i, j, 3]))) # ZZ
loss = loss + torch.pow(inp[i, j, 4]-1, 2) # Ground truth confidence is constant 1
# classification
true_cls = target[i, j, -1].type(torch.int64)
true_cls_vec = torch.zeros(opt.C)
true_cls_vec[true_cls] = torch.tensor(1)
pred_cls_vec = inp[i, j, -opt.C:]
loss = loss + torch.sum(torch.pow(pred_cls_vec - true_cls_vec, 2))
# case 2: grid cell NO object
# classification
else:
loss = loss + LAMBDA_NOOBJ * torch.pow(inp[i, j, 4] - 0, 2) # Ground truth confidence is constant 0
return loss
def calc_IOU(box_1, box_2, device=torch.device('cpu'), use_float64=False):
"""
Tensor version of calc_IOU()
compute IOU between two bounding boxes
:param box_1: Detection x, y, w, h image coordinates in [0, 1]
:param box_2: GroundTruth x, y, w, h image coordinates in [0, 1]
:return:
"""
'''
x_min_1 = torch.clamp((box_1[0] - box_1[2] / 2), 0, 1).to(device)
x_max_1 = torch.clamp((box_1[0] + box_1[2] / 2), 0, 1).to(device)
y_min_1 = torch.clamp((box_1[1] - box_1[3] / 2), 0, 1).to(device)
y_max_1 = torch.clamp((box_1[1] + box_1[3] / 2), 0, 1).to(device)
'''
x_min_1 = torch.clamp((abs(box_1[0]) - abs(box_1[2]) / 2), 0, 1).to(device)
x_max_1 = torch.clamp((abs(box_1[0]) + abs(box_1[2]) / 2), 0, 1).to(device)
y_min_1 = torch.clamp((abs(box_1[1]) - abs(box_1[3]) / 2), 0, 1).to(device)
y_max_1 = torch.clamp((abs(box_1[1]) + abs(box_1[3]) / 2), 0, 1).to(device)
x_min_2 = torch.clamp((box_2[0] - box_2[2] / 2), 0, 1).to(device)
x_max_2 = torch.clamp((box_2[0] + box_2[2] / 2), 0, 1).to(device)
y_min_2 = torch.clamp((box_2[1] - box_2[3] / 2), 0, 1).to(device)
y_max_2 = torch.clamp((box_2[1] + box_2[3] / 2), 0, 1).to(device)
# z = torch.tensor(0, dtype=torch.float).to(device)
z = torch.tensor(0.).to(device)
a = torch.min(x_max_1, x_max_2)
b = torch.max(x_min_1, x_min_2)
c = torch.min(y_max_1, y_max_2)
d = torch.max(y_min_1, y_min_2)
overlap_width = torch.max(a-b, z)
overlap_height = torch.max(c-d, z)
overlap_area = overlap_width * overlap_height
union_area = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) \
+ (x_max_2 - x_min_2) * (y_max_2 - y_min_2) \
- overlap_area
intersection_over_union = overlap_area / union_area
return intersection_over_union
| 31.259036
| 138
| 0.532665
|
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
LAMBDA_COORD = 5
LAMBDA_NOOBJ = 0.5
def calc_loss(inp , target, opt):
if inp.size(0) != target.size(0):
raise Exception("Batch size does not match")
total_loss = torch.tensor(0.0)
for i in range(inp.size(0)):
inp = inp[i]
target = target[i]
Q = predict_one_bbox(inp, target, opt)
total_loss = total_loss + calc_loss_single(Q, target, opt)
return total_loss
def predict_one_bbox(inp, target, opt):
Q = torch.zeros(opt.S, opt.S, 5 + opt.C)
select = torch.tensor(0).to(device)
for i in range(opt.S):
for j in range(opt.S):
for b in range(opt.B):
if b==0:
boxes = inp[i, j, b*5 : b*5+5].to(device)
else:
boxes = torch.stack((boxes, inp[i, j, b*5 : b*5+5])).to(device)
if len(target[i, j, :].nonzero()) > 1:
max_iou = torch.tensor([0.]).to(device)
groundtruth_box = target[i, j, :4].clone()
for b in range(opt.B):
iou = calc_IOU(groundtruth_box, boxes[b][:-1], device)
if iou > max_iou:
max_iou = iou
select = torch.tensor(b).to(device)
else:
max_confidence = torch.tensor(0.).to(device)
for b in range(opt.B):
confidence = boxes[b][-1]
if confidence > max_confidence:
max_confidence = confidence
select = torch.tensor(b).to(device)
Q[i, j, :5] = boxes[select]
Q[i, j, 5:] = inp[i, j, -opt.C:]
return Q
def calc_loss_single(inp, target, opt):
loss = torch.zeros(1)
for i in range(opt.S):
for j in range(opt.S):
if len(target[i, j, :].nonzero()) > 1:
loss = loss + LAMBDA_COORD * (torch.pow(inp[i, j, 0] - target[i, j, 0], 2) + torch.pow(inp[i, j, 1] - target[i, j, 1], 2))
loss = loss + LAMBDA_COORD * (torch.pow(torch.sqrt(torch.abs(inp[i, j, 2])) - torch.sqrt(torch.abs(target[i, j,2])), 2) \
+ torch.pow(torch.sqrt(torch.abs(inp[i, j, 3])) - torch.sqrt(torch.abs(target[i, j, 3])), 2))
loss = loss + torch.pow(inp[i, j, 4]-1, 2) true_cls = target[i, j, -1].type(torch.int64)
true_cls_vec = torch.zeros(opt.C)
true_cls_vec[true_cls] = torch.tensor(1)
pred_cls_vec = inp[i, j, -opt.C:]
loss = loss + torch.sum(torch.pow(pred_cls_vec - true_cls_vec, 2))
else:
loss = loss + LAMBDA_NOOBJ * torch.pow(inp[i, j, 4] - 0, 2)
return loss
def calc_IOU(box_1, box_2, device=torch.device('cpu'), use_float64=False):
x_min_1 = torch.clamp((abs(box_1[0]) - abs(box_1[2]) / 2), 0, 1).to(device)
x_max_1 = torch.clamp((abs(box_1[0]) + abs(box_1[2]) / 2), 0, 1).to(device)
y_min_1 = torch.clamp((abs(box_1[1]) - abs(box_1[3]) / 2), 0, 1).to(device)
y_max_1 = torch.clamp((abs(box_1[1]) + abs(box_1[3]) / 2), 0, 1).to(device)
x_min_2 = torch.clamp((box_2[0] - box_2[2] / 2), 0, 1).to(device)
x_max_2 = torch.clamp((box_2[0] + box_2[2] / 2), 0, 1).to(device)
y_min_2 = torch.clamp((box_2[1] - box_2[3] / 2), 0, 1).to(device)
y_max_2 = torch.clamp((box_2[1] + box_2[3] / 2), 0, 1).to(device)
z = torch.tensor(0.).to(device)
a = torch.min(x_max_1, x_max_2)
b = torch.max(x_min_1, x_min_2)
c = torch.min(y_max_1, y_max_2)
d = torch.max(y_min_1, y_min_2)
overlap_width = torch.max(a-b, z)
overlap_height = torch.max(c-d, z)
overlap_area = overlap_width * overlap_height
union_area = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) \
+ (x_max_2 - x_min_2) * (y_max_2 - y_min_2) \
- overlap_area
intersection_over_union = overlap_area / union_area
return intersection_over_union
| true
| true
|
f70adda29dacc58b9008a96759b8020e2da89fb5
| 149
|
py
|
Python
|
Basic_API/myproject/basic_api/apps.py
|
garimazthakur/Learning_Django
|
1be5115a4fa9802993824b16ddc1009d1d1fc148
|
[
"Apache-2.0"
] | null | null | null |
Basic_API/myproject/basic_api/apps.py
|
garimazthakur/Learning_Django
|
1be5115a4fa9802993824b16ddc1009d1d1fc148
|
[
"Apache-2.0"
] | null | null | null |
Basic_API/myproject/basic_api/apps.py
|
garimazthakur/Learning_Django
|
1be5115a4fa9802993824b16ddc1009d1d1fc148
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class BasicApiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'basic_api'
| 21.285714
| 56
| 0.765101
|
from django.apps import AppConfig
class BasicApiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'basic_api'
| true
| true
|
f70addf5f1bdab1c36a8baa247be36b38e7ec07a
| 6,089
|
py
|
Python
|
law/job/dashboard.py
|
mschnepf/law
|
7e9e54bb13984a22226ed6f2313780af8dde118a
|
[
"BSD-3-Clause"
] | null | null | null |
law/job/dashboard.py
|
mschnepf/law
|
7e9e54bb13984a22226ed6f2313780af8dde118a
|
[
"BSD-3-Clause"
] | null | null | null |
law/job/dashboard.py
|
mschnepf/law
|
7e9e54bb13984a22226ed6f2313780af8dde118a
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
Definition of the job dashboard interface.
"""
__all__ = ["BaseJobDashboard", "NoJobDashboard", "cache_by_status"]
import time
import functools
from contextlib import contextmanager
from abc import ABCMeta, abstractmethod
import six
def cache_by_status(func):
"""
Decorator for :py:meth:`BaseJobDashboard.publish` (and inheriting classes) that caches the last
published status to decide if the a new publication is necessary or not. When the status did not
change since the last call, the actual publish method is not invoked and *None* is returned.
"""
@functools.wraps(func)
def wrapper(self, job_data, event, job_num, *args, **kwargs):
job_id = job_data["job_id"]
dashboard_status = self.map_status(job_data.get("status"), event)
# nothing to do when the status is invalid or did not change
if not dashboard_status or self._last_states.get(job_id) == dashboard_status:
return None
# set the new status
self._last_states[job_id] = dashboard_status
return func(self, job_data, event, job_num, *args, **kwargs)
return wrapper
@six.add_metaclass(ABCMeta)
class BaseJobDashboard(object):
"""
Base class of a minimal job dashboard interface that is used from within
:py:class:`law.workflow.remote.BaseRemoteWorkflow`'s.
.. py:classattribute:: persistent_attributes
type: list
List of instance attributes that should be marked as being persistent. This is (e.g.) used in
the :py:class:`law.workflow.remote.BaseRemoteWorkflow` when saving job and submission
information to submission files. Common use cases are user information.
.. py:attribute:: max_rate
type: int
Maximum number of events that can be published per second. :py:meth:`rate_guard` uses this
value to delay function calls.
"""
cache_by_status = None
persistent_attributes = []
def __init__(self, max_rate=0):
super(BaseJobDashboard, self).__init__()
# maximum number of events per second
self.max_rate = max_rate
# timestamp of last event, used to ensure that max_rate is not exceeded
self._last_event_time = 0.
# last dashboard status per job_id, used to prevent subsequent requests for jobs
# without any status change
self._last_states = {}
def get_persistent_config(self):
"""
Returns the values of all :py:attr:`persistent_attributes` of this instance in a dictionary.
"""
return {attr: getattr(self, attr) for attr in self.persistent_attributes}
def apply_config(self, config):
"""
Sets all attributes in a dictionary *config* to this instance. This can be understand as the
counterpart of :py:meth:`get_persistent_config`.
"""
for attr, value in six.iteritems(config):
if hasattr(self, attr):
setattr(self, attr, value)
@contextmanager
def rate_guard(self):
"""
Context guard that ensures that decorated contexts are delayed in order to limit the number
of status publications per second, defined by :py:attr:`max_rate`. Example:
.. code-block:: python
# print some numbers, which will take 10 / max_rate seconds
for i in range(10):
with self.rate_guard():
print(i)
"""
now = 0.
if self.max_rate > 0:
now = time.time()
diff = self._last_event_time + 1. / self.max_rate - now
if diff > 0:
time.sleep(diff)
try:
yield
finally:
self._last_event_time = now
def remote_hook_file(self):
"""
This method can return the path to a file that is considered as an input file to remote
jobs. This file can contain bash functions, environment variables, etc., that are necessary
to communicate with the implemented job dashboard. When *None* is returned, no file is sent.
"""
return None
def remote_hook_data(self, job_num, attempt):
"""
This method can return a dictionary that is sent with remote jobs in the format
``key1=value1 key2=value2 ...``. The returned dictionary should (but does not have to)
include the job number *job_num* and the retry *attempt*.
"""
return None
def create_tracking_url(self):
"""
This method can return a tracking url that refers to a web page that visualizes jobs. When
set, the url is shown in the central luigi scheduler.
"""
return None
@abstractmethod
def map_status(self, job_status, event):
"""
Maps the *job_status* (see :py:class:`law.job.base.BaseJobManager`) for a particular *event*
to the status name that is accepted by the implemented job dashobard. Possible events are:
- action.submit
- action.cancel
- status.pending
- status.running
- status.finished
- status.retry
- status.failed
"""
return
@abstractmethod
def publish(self, job_data, event, job_num, *args, **kwargs):
"""
Publishes the status of a job to the implemented job dashboard. *job_data* is a dictionary
that contains a *job_id* and a *status* string (see
:py:meth:`law.workflow.remote.StatusData.job_data`).
"""
return
BaseJobDashboard.cache_by_status = staticmethod(cache_by_status)
class NoJobDashboard(BaseJobDashboard):
"""
Null job dashboard implementation. Instances of this class actually does not publish any job
status. It can rather be used as a placeholder in situations where a job dashboard is required,
such as in :py:class:`law.workflow.remote.BaseRemoteWorkflow`.
"""
def map_status(self, *args, **kwargs):
""""""
return
def publish(self, *args, **kwargs):
""""""
return
| 32.736559
| 100
| 0.642634
|
__all__ = ["BaseJobDashboard", "NoJobDashboard", "cache_by_status"]
import time
import functools
from contextlib import contextmanager
from abc import ABCMeta, abstractmethod
import six
def cache_by_status(func):
@functools.wraps(func)
def wrapper(self, job_data, event, job_num, *args, **kwargs):
job_id = job_data["job_id"]
dashboard_status = self.map_status(job_data.get("status"), event)
if not dashboard_status or self._last_states.get(job_id) == dashboard_status:
return None
self._last_states[job_id] = dashboard_status
return func(self, job_data, event, job_num, *args, **kwargs)
return wrapper
@six.add_metaclass(ABCMeta)
class BaseJobDashboard(object):
cache_by_status = None
persistent_attributes = []
def __init__(self, max_rate=0):
super(BaseJobDashboard, self).__init__()
self.max_rate = max_rate
self._last_event_time = 0.
self._last_states = {}
def get_persistent_config(self):
return {attr: getattr(self, attr) for attr in self.persistent_attributes}
def apply_config(self, config):
for attr, value in six.iteritems(config):
if hasattr(self, attr):
setattr(self, attr, value)
@contextmanager
def rate_guard(self):
now = 0.
if self.max_rate > 0:
now = time.time()
diff = self._last_event_time + 1. / self.max_rate - now
if diff > 0:
time.sleep(diff)
try:
yield
finally:
self._last_event_time = now
def remote_hook_file(self):
return None
def remote_hook_data(self, job_num, attempt):
return None
def create_tracking_url(self):
return None
@abstractmethod
def map_status(self, job_status, event):
return
@abstractmethod
def publish(self, job_data, event, job_num, *args, **kwargs):
return
BaseJobDashboard.cache_by_status = staticmethod(cache_by_status)
class NoJobDashboard(BaseJobDashboard):
def map_status(self, *args, **kwargs):
return
def publish(self, *args, **kwargs):
return
| true
| true
|
f70adf2b66abb9478b88c14aa93f488e2872631b
| 5,168
|
py
|
Python
|
src/ansiblelint/formatters/__init__.py
|
xoxys/ansible-lint
|
a009515d2f9cebc147fb02a00ef897526018f1dd
|
[
"MIT"
] | null | null | null |
src/ansiblelint/formatters/__init__.py
|
xoxys/ansible-lint
|
a009515d2f9cebc147fb02a00ef897526018f1dd
|
[
"MIT"
] | null | null | null |
src/ansiblelint/formatters/__init__.py
|
xoxys/ansible-lint
|
a009515d2f9cebc147fb02a00ef897526018f1dd
|
[
"MIT"
] | null | null | null |
"""Output formatters."""
import os
from pathlib import Path
from typing import TYPE_CHECKING, Generic, TypeVar, Union
import rich
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
T = TypeVar('T', bound='BaseFormatter')
class BaseFormatter(Generic[T]):
"""Formatter of ansible-lint output.
Base class for output formatters.
Args:
base_dir (str|Path): reference directory against which display relative path.
display_relative_path (bool): whether to show path as relative or absolute
"""
def __init__(self, base_dir: Union[str, Path], display_relative_path: bool) -> None:
"""Initialize a BaseFormatter instance."""
if isinstance(base_dir, str):
base_dir = Path(base_dir)
if base_dir: # can be None
base_dir = base_dir.absolute()
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(base_dir, Path):
base_dir = str(base_dir) # Drop when Python 3.5 is no longer supported
self._base_dir = base_dir if display_relative_path else None
def _format_path(self, path: Union[str, Path]) -> str:
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(path, Path):
path = str(path) # Drop when Python 3.5 is no longer supported
if not self._base_dir:
return path
# Use os.path.relpath 'cause Path.relative_to() misbehaves
return os.path.relpath(path, start=self._base_dir)
def format(self, match: "MatchError") -> str:
return str(match)
def escape(self, text: str) -> str:
"""Escapes a string to avoid processing it as markup."""
return rich.markup.escape(text)
class Formatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
_id = getattr(match.rule, 'id', '000')
result = (
f"[error_code]{_id}[/][dim]:[/] [error_title]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
result += (
"\n"
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
if match.details:
result += f" [dim]{match.details}[/]"
result += "\n"
return result
class QuietFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
return (
f"[error_code]{match.rule.id}[/] "
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
class ParseableFormatter(BaseFormatter):
"""Parseable uses PEP8 compatible format."""
def format(self, match: "MatchError") -> str:
result = (
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}: "
f"[error_code]E{match.rule.id}[/] [dim]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
return result
class AnnotationsFormatter(BaseFormatter):
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-a-warning-message
"""Formatter for emitting violations as GitHub Workflow Commands.
These commands trigger the GHA Workflow runners platform to post violations
in a form of GitHub Checks API annotations that appear rendered in pull-
request files view.
::debug file={name},line={line},col={col},severity={severity}::{message}
::warning file={name},line={line},col={col},severity={severity}::{message}
::error file={name},line={line},col={col},severity={severity}::{message}
Supported levels: debug, warning, error
"""
def format(self, match: "MatchError") -> str:
"""Prepare a match instance for reporting as a GitHub Actions annotation."""
level = self._severity_to_level(match.rule.severity)
file_path = self._format_path(match.filename or "")
line_num = match.linenumber
rule_id = match.rule.id
severity = match.rule.severity
violation_details = self.escape(match.message)
if match.column:
col = f",col={match.column}"
else:
col = ""
return (
f"::{level} file={file_path},line={line_num}{col},severity={severity}"
f"::E{rule_id} {violation_details}"
)
@staticmethod
def _severity_to_level(severity: str) -> str:
if severity in ['VERY_LOW', 'LOW']:
return 'warning'
if severity in ['INFO']:
return 'debug'
# ['MEDIUM', 'HIGH', 'VERY_HIGH'] or anything else
return 'error'
class ParseableSeverityFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
filename = self._format_path(match.filename or "")
position = match.position
rule_id = u"E{0}".format(match.rule.id)
severity = match.rule.severity
message = self.escape(str(match.message))
return (
f"[filename]{filename}[/]:{position}: [[error_code]{rule_id}[/]] "
f"[[error_code]{severity}[/]] [dim]{message}[/]")
| 35.156463
| 113
| 0.620937
|
import os
from pathlib import Path
from typing import TYPE_CHECKING, Generic, TypeVar, Union
import rich
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
T = TypeVar('T', bound='BaseFormatter')
class BaseFormatter(Generic[T]):
def __init__(self, base_dir: Union[str, Path], display_relative_path: bool) -> None:
if isinstance(base_dir, str):
base_dir = Path(base_dir)
if base_dir: base_dir = base_dir.absolute()
if isinstance(base_dir, Path):
base_dir = str(base_dir) # Drop when Python 3.5 is no longer supported
self._base_dir = base_dir if display_relative_path else None
def _format_path(self, path: Union[str, Path]) -> str:
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(path, Path):
path = str(path)
if not self._base_dir:
return path
return os.path.relpath(path, start=self._base_dir)
def format(self, match: "MatchError") -> str:
return str(match)
def escape(self, text: str) -> str:
return rich.markup.escape(text)
class Formatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
_id = getattr(match.rule, 'id', '000')
result = (
f"[error_code]{_id}[/][dim]:[/] [error_title]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
result += (
"\n"
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
if match.details:
result += f" [dim]{match.details}[/]"
result += "\n"
return result
class QuietFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
return (
f"[error_code]{match.rule.id}[/] "
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
class ParseableFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
result = (
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}: "
f"[error_code]E{match.rule.id}[/] [dim]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
return result
class AnnotationsFormatter(BaseFormatter):
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-a-warning-message
def format(self, match: "MatchError") -> str:
level = self._severity_to_level(match.rule.severity)
file_path = self._format_path(match.filename or "")
line_num = match.linenumber
rule_id = match.rule.id
severity = match.rule.severity
violation_details = self.escape(match.message)
if match.column:
col = f",col={match.column}"
else:
col = ""
return (
f"::{level} file={file_path},line={line_num}{col},severity={severity}"
f"::E{rule_id} {violation_details}"
)
@staticmethod
def _severity_to_level(severity: str) -> str:
if severity in ['VERY_LOW', 'LOW']:
return 'warning'
if severity in ['INFO']:
return 'debug'
# ['MEDIUM', 'HIGH', 'VERY_HIGH'] or anything else
return 'error'
class ParseableSeverityFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
filename = self._format_path(match.filename or "")
position = match.position
rule_id = u"E{0}".format(match.rule.id)
severity = match.rule.severity
message = self.escape(str(match.message))
return (
f"[filename]{filename}[/]:{position}: [[error_code]{rule_id}[/]] "
f"[[error_code]{severity}[/]] [dim]{message}[/]")
| true
| true
|
f70adf6cbf6163a3f4aa41d1344c3e513b4e7594
| 6,397
|
py
|
Python
|
src/pymap3d/tests/test_latitude.py
|
EpicWink/pymap3d
|
021e9924f94b2bb5b7148cd00f03d3557619fe27
|
[
"BSD-2-Clause"
] | 1
|
2021-05-05T20:17:17.000Z
|
2021-05-05T20:17:17.000Z
|
src/pymap3d/tests/test_latitude.py
|
EpicWink/pymap3d
|
021e9924f94b2bb5b7148cd00f03d3557619fe27
|
[
"BSD-2-Clause"
] | null | null | null |
src/pymap3d/tests/test_latitude.py
|
EpicWink/pymap3d
|
021e9924f94b2bb5b7148cd00f03d3557619fe27
|
[
"BSD-2-Clause"
] | null | null | null |
import pytest
from pytest import approx
from math import radians, inf
import pymap3d as pm
@pytest.mark.parametrize(
"geodetic_lat,alt_m,geocentric_lat",
[(0, 0, 0), (90, 0, 90), (-90, 0, -90), (45, 0, 44.80757678), (-45, 0, -44.80757678)],
)
def test_geodetic_alt_geocentric(geodetic_lat, alt_m, geocentric_lat):
assert pm.geod2geoc(geodetic_lat, alt_m) == approx(geocentric_lat)
r = pm.geocentric_radius(geodetic_lat)
assert pm.geoc2geod(geocentric_lat, r) == approx(geodetic_lat)
assert pm.geoc2geod(geocentric_lat, 1e5 + r) == approx(
pm.geocentric2geodetic(geocentric_lat, 1e5 + alt_m)
)
assert pm.geod2geoc(geodetic_lat, 1e5 + alt_m) == approx(
pm.geodetic2geocentric(geodetic_lat, 1e5 + alt_m)
)
@pytest.mark.parametrize(
"geodetic_lat,geocentric_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.80757678), (-45, -44.80757678)],
)
def test_geodetic_geocentric(geodetic_lat, geocentric_lat):
assert pm.geodetic2geocentric(geodetic_lat, 0) == approx(geocentric_lat)
assert pm.geodetic2geocentric(radians(geodetic_lat), 0, deg=False) == approx(
radians(geocentric_lat)
)
assert pm.geocentric2geodetic(geocentric_lat, 0) == approx(geodetic_lat)
assert pm.geocentric2geodetic(radians(geocentric_lat), 0, deg=False) == approx(
radians(geodetic_lat)
)
def test_numpy_geodetic_geocentric():
pytest.importorskip("numpy")
assert pm.geodetic2geocentric([45, 0], 0) == approx([44.80757678, 0])
assert pm.geocentric2geodetic([44.80757678, 0], 0) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat, isometric_lat",
[(0, 0), (90, inf), (-90, -inf), (45, 50.227466), (-45, -50.227466), (89, 271.275)],
)
def test_geodetic_isometric(geodetic_lat, isometric_lat):
isolat = pm.geodetic2isometric(geodetic_lat)
assert isolat == approx(isometric_lat)
assert isinstance(isolat, float)
assert pm.geodetic2isometric(radians(geodetic_lat), deg=False) == approx(radians(isometric_lat))
assert pm.isometric2geodetic(isometric_lat) == approx(geodetic_lat)
assert pm.isometric2geodetic(radians(isometric_lat), deg=False) == approx(radians(geodetic_lat))
def test_numpy_geodetic_isometric():
pytest.importorskip("numpy")
assert pm.geodetic2isometric([45, 0]) == approx([50.227466, 0])
assert pm.isometric2geodetic([50.227466, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,conformal_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.80768406), (-45, -44.80768406), (89, 88.99327)],
)
def test_geodetic_conformal(geodetic_lat, conformal_lat):
clat = pm.geodetic2conformal(geodetic_lat)
assert clat == approx(conformal_lat)
assert isinstance(clat, float)
assert pm.geodetic2conformal(radians(geodetic_lat), deg=False) == approx(radians(conformal_lat))
assert pm.conformal2geodetic(conformal_lat) == approx(geodetic_lat)
assert pm.conformal2geodetic(radians(conformal_lat), deg=False) == approx(radians(geodetic_lat))
def test_numpy_geodetic_conformal():
pytest.importorskip("numpy")
assert pm.geodetic2conformal([45, 0]) == approx([44.80768406, 0])
assert pm.conformal2geodetic([44.80768406, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,rectifying_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.855682), (-45, -44.855682)],
)
def test_geodetic_rectifying(geodetic_lat, rectifying_lat):
assert pm.geodetic2rectifying(geodetic_lat) == approx(rectifying_lat)
assert pm.geodetic2rectifying(radians(geodetic_lat), deg=False) == approx(
radians(rectifying_lat)
)
assert pm.rectifying2geodetic(rectifying_lat) == approx(geodetic_lat)
assert pm.rectifying2geodetic(radians(rectifying_lat), deg=False) == approx(
radians(geodetic_lat)
)
def test_numpy_geodetic_rectifying():
pytest.importorskip("numpy")
assert pm.geodetic2rectifying([45, 0]) == approx([44.855682, 0])
assert pm.rectifying2geodetic([44.855682, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,authalic_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.87170288), (-45, -44.87170288)],
)
def test_geodetic_authalic(geodetic_lat, authalic_lat):
assert pm.geodetic2authalic(geodetic_lat) == approx(authalic_lat)
assert pm.geodetic2authalic(radians(geodetic_lat), deg=False) == approx(radians(authalic_lat))
assert pm.authalic2geodetic(authalic_lat) == approx(geodetic_lat)
assert pm.authalic2geodetic(radians(authalic_lat), deg=False) == approx(radians(geodetic_lat))
def test_numpy_geodetic_authalic():
pytest.importorskip("numpy")
assert pm.geodetic2authalic([45, 0]) == approx([44.87170288, 0])
assert pm.authalic2geodetic([44.87170288, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,parametric_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.9037878), (-45, -44.9037878)],
)
def test_geodetic_parametric(geodetic_lat, parametric_lat):
assert pm.geodetic2parametric(geodetic_lat) == approx(parametric_lat)
assert pm.geodetic2parametric(radians(geodetic_lat), deg=False) == approx(
radians(parametric_lat)
)
assert pm.parametric2geodetic(parametric_lat) == approx(geodetic_lat)
assert pm.parametric2geodetic(radians(parametric_lat), deg=False) == approx(
radians(geodetic_lat)
)
def test_numpy_geodetic_parametric():
pytest.importorskip("numpy")
assert pm.geodetic2parametric([45, 0]) == approx([44.9037878, 0])
assert pm.parametric2geodetic([44.9037878, 0]) == approx([45, 0])
@pytest.mark.parametrize("lat", [91, -91])
def test_badvals(lat):
# geodetic_isometric is not included on purpose
with pytest.raises(ValueError):
pm.geodetic2geocentric(lat, 0)
with pytest.raises(ValueError):
pm.geocentric2geodetic(lat, 0)
with pytest.raises(ValueError):
pm.geodetic2conformal(lat)
with pytest.raises(ValueError):
pm.conformal2geodetic(lat)
with pytest.raises(ValueError):
pm.geodetic2rectifying(lat)
with pytest.raises(ValueError):
pm.rectifying2geodetic(lat)
with pytest.raises(ValueError):
pm.geodetic2authalic(lat)
with pytest.raises(ValueError):
pm.authalic2geodetic(lat)
with pytest.raises(ValueError):
pm.geodetic2parametric(lat)
with pytest.raises(ValueError):
pm.parametric2geodetic(lat)
| 36.346591
| 100
| 0.706425
|
import pytest
from pytest import approx
from math import radians, inf
import pymap3d as pm
@pytest.mark.parametrize(
"geodetic_lat,alt_m,geocentric_lat",
[(0, 0, 0), (90, 0, 90), (-90, 0, -90), (45, 0, 44.80757678), (-45, 0, -44.80757678)],
)
def test_geodetic_alt_geocentric(geodetic_lat, alt_m, geocentric_lat):
assert pm.geod2geoc(geodetic_lat, alt_m) == approx(geocentric_lat)
r = pm.geocentric_radius(geodetic_lat)
assert pm.geoc2geod(geocentric_lat, r) == approx(geodetic_lat)
assert pm.geoc2geod(geocentric_lat, 1e5 + r) == approx(
pm.geocentric2geodetic(geocentric_lat, 1e5 + alt_m)
)
assert pm.geod2geoc(geodetic_lat, 1e5 + alt_m) == approx(
pm.geodetic2geocentric(geodetic_lat, 1e5 + alt_m)
)
@pytest.mark.parametrize(
"geodetic_lat,geocentric_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.80757678), (-45, -44.80757678)],
)
def test_geodetic_geocentric(geodetic_lat, geocentric_lat):
assert pm.geodetic2geocentric(geodetic_lat, 0) == approx(geocentric_lat)
assert pm.geodetic2geocentric(radians(geodetic_lat), 0, deg=False) == approx(
radians(geocentric_lat)
)
assert pm.geocentric2geodetic(geocentric_lat, 0) == approx(geodetic_lat)
assert pm.geocentric2geodetic(radians(geocentric_lat), 0, deg=False) == approx(
radians(geodetic_lat)
)
def test_numpy_geodetic_geocentric():
pytest.importorskip("numpy")
assert pm.geodetic2geocentric([45, 0], 0) == approx([44.80757678, 0])
assert pm.geocentric2geodetic([44.80757678, 0], 0) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat, isometric_lat",
[(0, 0), (90, inf), (-90, -inf), (45, 50.227466), (-45, -50.227466), (89, 271.275)],
)
def test_geodetic_isometric(geodetic_lat, isometric_lat):
isolat = pm.geodetic2isometric(geodetic_lat)
assert isolat == approx(isometric_lat)
assert isinstance(isolat, float)
assert pm.geodetic2isometric(radians(geodetic_lat), deg=False) == approx(radians(isometric_lat))
assert pm.isometric2geodetic(isometric_lat) == approx(geodetic_lat)
assert pm.isometric2geodetic(radians(isometric_lat), deg=False) == approx(radians(geodetic_lat))
def test_numpy_geodetic_isometric():
pytest.importorskip("numpy")
assert pm.geodetic2isometric([45, 0]) == approx([50.227466, 0])
assert pm.isometric2geodetic([50.227466, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,conformal_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.80768406), (-45, -44.80768406), (89, 88.99327)],
)
def test_geodetic_conformal(geodetic_lat, conformal_lat):
clat = pm.geodetic2conformal(geodetic_lat)
assert clat == approx(conformal_lat)
assert isinstance(clat, float)
assert pm.geodetic2conformal(radians(geodetic_lat), deg=False) == approx(radians(conformal_lat))
assert pm.conformal2geodetic(conformal_lat) == approx(geodetic_lat)
assert pm.conformal2geodetic(radians(conformal_lat), deg=False) == approx(radians(geodetic_lat))
def test_numpy_geodetic_conformal():
pytest.importorskip("numpy")
assert pm.geodetic2conformal([45, 0]) == approx([44.80768406, 0])
assert pm.conformal2geodetic([44.80768406, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,rectifying_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.855682), (-45, -44.855682)],
)
def test_geodetic_rectifying(geodetic_lat, rectifying_lat):
assert pm.geodetic2rectifying(geodetic_lat) == approx(rectifying_lat)
assert pm.geodetic2rectifying(radians(geodetic_lat), deg=False) == approx(
radians(rectifying_lat)
)
assert pm.rectifying2geodetic(rectifying_lat) == approx(geodetic_lat)
assert pm.rectifying2geodetic(radians(rectifying_lat), deg=False) == approx(
radians(geodetic_lat)
)
def test_numpy_geodetic_rectifying():
pytest.importorskip("numpy")
assert pm.geodetic2rectifying([45, 0]) == approx([44.855682, 0])
assert pm.rectifying2geodetic([44.855682, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,authalic_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.87170288), (-45, -44.87170288)],
)
def test_geodetic_authalic(geodetic_lat, authalic_lat):
assert pm.geodetic2authalic(geodetic_lat) == approx(authalic_lat)
assert pm.geodetic2authalic(radians(geodetic_lat), deg=False) == approx(radians(authalic_lat))
assert pm.authalic2geodetic(authalic_lat) == approx(geodetic_lat)
assert pm.authalic2geodetic(radians(authalic_lat), deg=False) == approx(radians(geodetic_lat))
def test_numpy_geodetic_authalic():
pytest.importorskip("numpy")
assert pm.geodetic2authalic([45, 0]) == approx([44.87170288, 0])
assert pm.authalic2geodetic([44.87170288, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,parametric_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.9037878), (-45, -44.9037878)],
)
def test_geodetic_parametric(geodetic_lat, parametric_lat):
assert pm.geodetic2parametric(geodetic_lat) == approx(parametric_lat)
assert pm.geodetic2parametric(radians(geodetic_lat), deg=False) == approx(
radians(parametric_lat)
)
assert pm.parametric2geodetic(parametric_lat) == approx(geodetic_lat)
assert pm.parametric2geodetic(radians(parametric_lat), deg=False) == approx(
radians(geodetic_lat)
)
def test_numpy_geodetic_parametric():
pytest.importorskip("numpy")
assert pm.geodetic2parametric([45, 0]) == approx([44.9037878, 0])
assert pm.parametric2geodetic([44.9037878, 0]) == approx([45, 0])
@pytest.mark.parametrize("lat", [91, -91])
def test_badvals(lat):
with pytest.raises(ValueError):
pm.geodetic2geocentric(lat, 0)
with pytest.raises(ValueError):
pm.geocentric2geodetic(lat, 0)
with pytest.raises(ValueError):
pm.geodetic2conformal(lat)
with pytest.raises(ValueError):
pm.conformal2geodetic(lat)
with pytest.raises(ValueError):
pm.geodetic2rectifying(lat)
with pytest.raises(ValueError):
pm.rectifying2geodetic(lat)
with pytest.raises(ValueError):
pm.geodetic2authalic(lat)
with pytest.raises(ValueError):
pm.authalic2geodetic(lat)
with pytest.raises(ValueError):
pm.geodetic2parametric(lat)
with pytest.raises(ValueError):
pm.parametric2geodetic(lat)
| true
| true
|
f70adf83b566478b793627f9a744332eef59285a
| 649
|
py
|
Python
|
py2c/abc/manager.py
|
timgates42/Py2C
|
b5c9fd238db589f6d7709482901e33ffebb764eb
|
[
"BSD-3-Clause"
] | 149
|
2015-01-03T14:21:20.000Z
|
2022-03-19T06:23:26.000Z
|
py2c/abc/manager.py
|
timgates42/Py2C
|
b5c9fd238db589f6d7709482901e33ffebb764eb
|
[
"BSD-3-Clause"
] | 5
|
2019-06-15T18:52:25.000Z
|
2021-07-18T18:19:56.000Z
|
py2c/abc/manager.py
|
timgates42/Py2C
|
b5c9fd238db589f6d7709482901e33ffebb764eb
|
[
"BSD-3-Clause"
] | 62
|
2015-03-02T08:15:31.000Z
|
2022-03-14T04:02:35.000Z
|
"""An Abstract Base Class for Managers
"""
import abc
from py2c.utils import verify_attribute
__all__ = ["Manager"]
class Manager(object, metaclass=abc.ABCMeta):
"""Base class of all managers
"""
def __init__(self):
super().__init__()
verify_attribute(self, "options", dict)
@abc.abstractmethod # coverage: no partial
def run(self, options, *args, **kwargs):
"""Perform the task that manager is supposed to do.
Arguments:
options
A dictionary object with the relavent options passed with
values.
"""
raise NotImplementedError()
| 22.37931
| 73
| 0.617874
|
import abc
from py2c.utils import verify_attribute
__all__ = ["Manager"]
class Manager(object, metaclass=abc.ABCMeta):
def __init__(self):
super().__init__()
verify_attribute(self, "options", dict)
@abc.abstractmethod def run(self, options, *args, **kwargs):
raise NotImplementedError()
| true
| true
|
f70adfb5e0c2020651397d903b579cb71d6d8d6a
| 6,408
|
py
|
Python
|
tsai/data/mixed.py
|
dnth/tsai
|
641d5bb75f3aa75889c00a4bb60d96510b4c5605
|
[
"Apache-2.0"
] | 1
|
2021-12-03T20:44:55.000Z
|
2021-12-03T20:44:55.000Z
|
tsai/data/mixed.py
|
dnth/tsai
|
641d5bb75f3aa75889c00a4bb60d96510b4c5605
|
[
"Apache-2.0"
] | null | null | null |
tsai/data/mixed.py
|
dnth/tsai
|
641d5bb75f3aa75889c00a4bb60d96510b4c5605
|
[
"Apache-2.0"
] | 1
|
2021-11-14T02:58:25.000Z
|
2021-11-14T02:58:25.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/022_data.mixed.ipynb (unless otherwise specified).
__all__ = ['MixedDataLoader', 'MixedDataLoaders', 'get_mixed_dls']
# Cell
from ..imports import *
# Cell
# This implementation of a mixed dataloader is based on a great implementation created by Zach Mueller in this fastai thread:
# https://forums.fast.ai/t/combining-tabular-images-in-fastai2-and-should-work-with-almost-any-other-type/73197
from packaging import version
from fastai.data.load import _FakeLoader
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter, _SingleProcessDataLoaderIter, _DatasetKind
_loaders = (_MultiProcessingDataLoaderIter, _SingleProcessDataLoaderIter)
class MixedDataLoader():
def __init__(self, *loaders, path='.', shuffle=False, device=None, bs=None):
"Accepts any number of `DataLoader` and a device"
self.path = path
device = ifnone(device, default_device())
self.device = device
self.c = None
self.d = None
self.bs = ifnone(bs, min([dl.bs for dl in loaders]))
for i, dl in enumerate(loaders): # ensure all dls have the same bs
if hasattr(dl, 'vars'):
self.vars = dl.vars
if hasattr(dl, 'len'):
self.len = dl.len
if hasattr(dl, 'split_idxs'):
self.split_idxs = dl.split_idxs
dl.bs = self.bs
dl.shuffle_fn = self.shuffle_fn
if self.c is None and hasattr(dl, "c"):
self.c = dl.c
if self.d is None and hasattr(dl, "d"):
self.d = dl.d
if i == 0:
self.dataset = dl.dataset
dl.to(device=device)
self.shuffle = shuffle
if not self.shuffle:
self.rng = np.arange(len(self.dataset)).tolist()
self.loaders = loaders
self.count = 0
self.fake_l = _FakeLoader(self, False, 0, 0, 0) if version.parse(
fastai.__version__) >= version.parse("2.1") else _FakeLoader(self, False, 0, 0)
if sum([len(dl.dataset) for dl in loaders]) > 0:
self._get_idxs() # Do not apply on an empty dataset
def new(self, *args, **kwargs):
loaders = [dl.new(*args, **kwargs) for dl in self.loaders]
return type(self)(*loaders, path=self.path, device=self.device)
# def __len__(self): return len(self.loaders[0])
def __len__(self): return self.loaders[0].__len__()
def _get_vals(self, x):
"Checks for duplicates in batches"
idxs, new_x = [], []
for i, o in enumerate(x):
x[i] = o.cpu().numpy().flatten()
for idx, o in enumerate(x):
if not self._arrayisin(o, new_x):
idxs.append(idx)
new_x.append(o)
return idxs
def _get_idxs(self):
"Get `x` and `y` indices for batches of data"
self.n_inps = [dl.n_inp for dl in self.loaders]
self.x_idxs = self._split_idxs(self.n_inps)
# Identify duplicate targets
dl_dict = dict(zip(range(0, len(self.loaders)), self.n_inps))
outs = L([])
for key, n_inp in dl_dict.items():
b = next(iter(self.loaders[key]))
outs += L(b[n_inp:])
self.y_idxs = self._get_vals(outs)
def __iter__(self):
z = zip(*[_loaders[i.fake_l.num_workers == 0](i.fake_l) for i in self.loaders])
for b in z:
inps = []
outs = []
if self.device is not None:
b = to_device(b, self.device)
for batch, dl in zip(b, self.loaders):
if hasattr(dl, 'idxs'): self.idxs = dl.idxs
if hasattr(dl, 'input_idxs'): self.input_idxs = dl.input_idxs
batch = dl.after_batch(batch)
inps += batch[:dl.n_inp]
outs += batch[dl.n_inp:]
inps = tuple([tuple(L(inps)[idx]) if isinstance(idx, list) else inps[idx]
for idx in self.x_idxs]) if len(self.x_idxs) > 1 else tuple(L(outs)[self.x_idxs][0])
outs = tuple(L(outs)[self.y_idxs]) if len(self.y_idxs) > 1 else L(outs)[self.y_idxs][0]
yield inps, outs
def one_batch(self):
"Grab one batch of data"
with self.fake_l.no_multiproc():
res = first(self)
if hasattr(self, 'it'):
delattr(self, 'it')
return res
def shuffle_fn(self, idxs):
"Generate the same idxs for all dls in each batch when shuffled"
if self.count == 0:
self.shuffled_idxs = np.random.permutation(idxs)
# sort each batch
for i in range(len(self.shuffled_idxs)//self.bs + 1):
self.shuffled_idxs[i*self.bs:(i+1)*self.bs] = np.sort(self.shuffled_idxs[i*self.bs:(i+1)*self.bs])
self.count += 1
if self.count == len(self.loaders):
self.count = 0
return self.shuffled_idxs
def show_batch(self):
"Show a batch of data"
for dl in self.loaders:
dl.show_batch()
def to(self, device): self.device = device
def _arrayisin(self, arr, arr_list):
"Checks if `arr` is in `arr_list`"
for a in arr_list:
if np.array_equal(arr, a):
return True
return False
def _split_idxs(self, a):
a_cum = np.array(a).cumsum().tolist()
b = np.arange(sum(a)).tolist()
start = 0
b_ = []
for i, idx in enumerate(range(len(a))):
end = a_cum[i]
b_.append(b[start:end] if end - start > 1 else b[start])
start = end
return b_
class MixedDataLoaders(DataLoaders):
pass
# Cell
def get_mixed_dls(*dls, device=None, shuffle_train=None, shuffle_valid=None, **kwargs):
_mixed_train_dls = []
_mixed_valid_dls = []
for dl in dls:
_mixed_train_dls.append(dl.train)
_mixed_valid_dls.append(dl.valid)
if shuffle_train is None: shuffle_train = dl.train.shuffle
if shuffle_valid is None: shuffle_valid = dl.valid.shuffle
if device is None: device = dl.train.device
mixed_train_dl = MixedDataLoader(*_mixed_train_dls, shuffle=shuffle_train, **kwargs)
mixed_valid_dl = MixedDataLoader(*_mixed_valid_dls, shuffle=shuffle_valid, **kwargs)
mixed_dls = MixedDataLoaders(mixed_train_dl, mixed_valid_dl, device=device)
return mixed_dls
| 38.836364
| 125
| 0.593945
|
__all__ = ['MixedDataLoader', 'MixedDataLoaders', 'get_mixed_dls']
from ..imports import *
from packaging import version
from fastai.data.load import _FakeLoader
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter, _SingleProcessDataLoaderIter, _DatasetKind
_loaders = (_MultiProcessingDataLoaderIter, _SingleProcessDataLoaderIter)
class MixedDataLoader():
def __init__(self, *loaders, path='.', shuffle=False, device=None, bs=None):
self.path = path
device = ifnone(device, default_device())
self.device = device
self.c = None
self.d = None
self.bs = ifnone(bs, min([dl.bs for dl in loaders]))
for i, dl in enumerate(loaders): if hasattr(dl, 'vars'):
self.vars = dl.vars
if hasattr(dl, 'len'):
self.len = dl.len
if hasattr(dl, 'split_idxs'):
self.split_idxs = dl.split_idxs
dl.bs = self.bs
dl.shuffle_fn = self.shuffle_fn
if self.c is None and hasattr(dl, "c"):
self.c = dl.c
if self.d is None and hasattr(dl, "d"):
self.d = dl.d
if i == 0:
self.dataset = dl.dataset
dl.to(device=device)
self.shuffle = shuffle
if not self.shuffle:
self.rng = np.arange(len(self.dataset)).tolist()
self.loaders = loaders
self.count = 0
self.fake_l = _FakeLoader(self, False, 0, 0, 0) if version.parse(
fastai.__version__) >= version.parse("2.1") else _FakeLoader(self, False, 0, 0)
if sum([len(dl.dataset) for dl in loaders]) > 0:
self._get_idxs()
def new(self, *args, **kwargs):
loaders = [dl.new(*args, **kwargs) for dl in self.loaders]
return type(self)(*loaders, path=self.path, device=self.device)
def __len__(self): return self.loaders[0].__len__()
def _get_vals(self, x):
idxs, new_x = [], []
for i, o in enumerate(x):
x[i] = o.cpu().numpy().flatten()
for idx, o in enumerate(x):
if not self._arrayisin(o, new_x):
idxs.append(idx)
new_x.append(o)
return idxs
def _get_idxs(self):
self.n_inps = [dl.n_inp for dl in self.loaders]
self.x_idxs = self._split_idxs(self.n_inps)
dl_dict = dict(zip(range(0, len(self.loaders)), self.n_inps))
outs = L([])
for key, n_inp in dl_dict.items():
b = next(iter(self.loaders[key]))
outs += L(b[n_inp:])
self.y_idxs = self._get_vals(outs)
def __iter__(self):
z = zip(*[_loaders[i.fake_l.num_workers == 0](i.fake_l) for i in self.loaders])
for b in z:
inps = []
outs = []
if self.device is not None:
b = to_device(b, self.device)
for batch, dl in zip(b, self.loaders):
if hasattr(dl, 'idxs'): self.idxs = dl.idxs
if hasattr(dl, 'input_idxs'): self.input_idxs = dl.input_idxs
batch = dl.after_batch(batch)
inps += batch[:dl.n_inp]
outs += batch[dl.n_inp:]
inps = tuple([tuple(L(inps)[idx]) if isinstance(idx, list) else inps[idx]
for idx in self.x_idxs]) if len(self.x_idxs) > 1 else tuple(L(outs)[self.x_idxs][0])
outs = tuple(L(outs)[self.y_idxs]) if len(self.y_idxs) > 1 else L(outs)[self.y_idxs][0]
yield inps, outs
def one_batch(self):
with self.fake_l.no_multiproc():
res = first(self)
if hasattr(self, 'it'):
delattr(self, 'it')
return res
def shuffle_fn(self, idxs):
if self.count == 0:
self.shuffled_idxs = np.random.permutation(idxs)
for i in range(len(self.shuffled_idxs)//self.bs + 1):
self.shuffled_idxs[i*self.bs:(i+1)*self.bs] = np.sort(self.shuffled_idxs[i*self.bs:(i+1)*self.bs])
self.count += 1
if self.count == len(self.loaders):
self.count = 0
return self.shuffled_idxs
def show_batch(self):
for dl in self.loaders:
dl.show_batch()
def to(self, device): self.device = device
def _arrayisin(self, arr, arr_list):
for a in arr_list:
if np.array_equal(arr, a):
return True
return False
def _split_idxs(self, a):
a_cum = np.array(a).cumsum().tolist()
b = np.arange(sum(a)).tolist()
start = 0
b_ = []
for i, idx in enumerate(range(len(a))):
end = a_cum[i]
b_.append(b[start:end] if end - start > 1 else b[start])
start = end
return b_
class MixedDataLoaders(DataLoaders):
pass
def get_mixed_dls(*dls, device=None, shuffle_train=None, shuffle_valid=None, **kwargs):
_mixed_train_dls = []
_mixed_valid_dls = []
for dl in dls:
_mixed_train_dls.append(dl.train)
_mixed_valid_dls.append(dl.valid)
if shuffle_train is None: shuffle_train = dl.train.shuffle
if shuffle_valid is None: shuffle_valid = dl.valid.shuffle
if device is None: device = dl.train.device
mixed_train_dl = MixedDataLoader(*_mixed_train_dls, shuffle=shuffle_train, **kwargs)
mixed_valid_dl = MixedDataLoader(*_mixed_valid_dls, shuffle=shuffle_valid, **kwargs)
mixed_dls = MixedDataLoaders(mixed_train_dl, mixed_valid_dl, device=device)
return mixed_dls
| true
| true
|
f70adfb91065c384fd4247793ef097d44b87aa12
| 1,838
|
bzl
|
Python
|
deps/prebuilt_protoc_deps.bzl
|
heartless-clown/rules_proto
|
99c0d0c7a00c1df7221afc3331b5d859a02c420f
|
[
"Apache-2.0"
] | 249
|
2018-10-24T21:11:08.000Z
|
2022-03-31T03:28:34.000Z
|
deps/prebuilt_protoc_deps.bzl
|
heartless-clown/rules_proto
|
99c0d0c7a00c1df7221afc3331b5d859a02c420f
|
[
"Apache-2.0"
] | 147
|
2018-12-05T18:58:13.000Z
|
2022-03-26T15:41:07.000Z
|
deps/prebuilt_protoc_deps.bzl
|
heartless-clown/rules_proto
|
99c0d0c7a00c1df7221afc3331b5d859a02c420f
|
[
"Apache-2.0"
] | 126
|
2018-11-20T22:34:48.000Z
|
2022-03-18T13:42:05.000Z
|
"""
GENERATED FILE - DO NOT EDIT (created via @build_stack_rules_proto//cmd/depsgen)
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
def prebuilt_protoc_deps():
prebuilt_protoc_linux() # via <TOP>
prebuilt_protoc_osx() # via <TOP>
prebuilt_protoc_windows() # via <TOP>
def prebuilt_protoc_linux():
_maybe(
http_archive,
name = "prebuilt_protoc_linux",
sha256 = "6003de742ea3fcf703cfec1cd4a3380fd143081a2eb0e559065563496af27807",
urls = [
"https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-linux-x86_64.zip",
],
build_file_content = """
filegroup(
name = "protoc",
srcs = ["bin/protoc"],
visibility = ["//visibility:public"],
)
""",
)
def prebuilt_protoc_osx():
_maybe(
http_archive,
name = "prebuilt_protoc_osx",
sha256 = "0decc6ce5beed07f8c20361ddeb5ac7666f09cf34572cca530e16814093f9c0c",
urls = [
"https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-osx-x86_64.zip",
],
build_file_content = """
filegroup(
name = "protoc",
srcs = ["bin/protoc"],
visibility = ["//visibility:public"],
)
""",
)
def prebuilt_protoc_windows():
_maybe(
http_archive,
name = "prebuilt_protoc_windows",
sha256 = "0decc6ce5beed07f8c20361ddeb5ac7666f09cf34572cca530e16814093f9c0c",
urls = [
"https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-win32.zip",
],
build_file_content = """
filegroup(
name = "protoc",
srcs = ["bin/protoc.exe"],
visibility = ["//visibility:public"],
)
""",
)
| 27.848485
| 104
| 0.632753
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
def prebuilt_protoc_deps():
prebuilt_protoc_linux() prebuilt_protoc_osx() prebuilt_protoc_windows()
def prebuilt_protoc_linux():
_maybe(
http_archive,
name = "prebuilt_protoc_linux",
sha256 = "6003de742ea3fcf703cfec1cd4a3380fd143081a2eb0e559065563496af27807",
urls = [
"https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-linux-x86_64.zip",
],
build_file_content = """
filegroup(
name = "protoc",
srcs = ["bin/protoc"],
visibility = ["//visibility:public"],
)
""",
)
def prebuilt_protoc_osx():
_maybe(
http_archive,
name = "prebuilt_protoc_osx",
sha256 = "0decc6ce5beed07f8c20361ddeb5ac7666f09cf34572cca530e16814093f9c0c",
urls = [
"https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-osx-x86_64.zip",
],
build_file_content = """
filegroup(
name = "protoc",
srcs = ["bin/protoc"],
visibility = ["//visibility:public"],
)
""",
)
def prebuilt_protoc_windows():
_maybe(
http_archive,
name = "prebuilt_protoc_windows",
sha256 = "0decc6ce5beed07f8c20361ddeb5ac7666f09cf34572cca530e16814093f9c0c",
urls = [
"https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-win32.zip",
],
build_file_content = """
filegroup(
name = "protoc",
srcs = ["bin/protoc.exe"],
visibility = ["//visibility:public"],
)
""",
)
| true
| true
|
f70adff244a996e05d8cdfd8c5098172e41ab655
| 30,557
|
py
|
Python
|
trunk/MOPS_Timings.py
|
n5iln/railmops
|
f7d3b446435b31bad8cddf343f18ca7efb9eac10
|
[
"Unlicense"
] | 1
|
2015-03-30T12:10:56.000Z
|
2015-03-30T12:10:56.000Z
|
trunk/MOPS_Timings.py
|
n5iln/railmops
|
f7d3b446435b31bad8cddf343f18ca7efb9eac10
|
[
"Unlicense"
] | null | null | null |
trunk/MOPS_Timings.py
|
n5iln/railmops
|
f7d3b446435b31bad8cddf343f18ca7efb9eac10
|
[
"Unlicense"
] | null | null | null |
'''
Timings Class
Arrival and departure times for all Route Sections on a Route on a particular
schedule and shows the time into a section and the time out of a section
Model Operations Processing System. Copyright Brian Fairbairn 2009-2010. Licenced under the EUPL.
You may not use this work except in compliance with the Licence. You may obtain a copy of the
Licence at http://ec.europa.eu/idabc/eupl or as attached with this application (see Licence file).
Unless required by applicable law or agreed to in writing, software distributed under the Licence
is distributed on an 'AS IS' basis WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed
or implied. See the Licence governing permissions and limitations under the Licence.
Changes:
15/08/2010 Ver 1 Removed unused variables
Added handling of bad database return codes
'''
import MOPS_Element
class cTimings(MOPS_Element.cElement):
"""Details about Timings. Inherits from ListHandler class.
Timings are contained in fixed-length data records.
Id 10 Automatically generated reference
Section 10 link to Section that timing is for
Schedule 10 Link to Schedule
DepartStation 10 Copied from Route Section.
ArrivalStation 10 Copied from Route Section.
PlannedDepartTime 12 Planned departure time from station
PlannedArriveTime 12 Planned arrival time at station
"""
extract_code = 'select * from timings'
extract_header = 'id|section|schedule|depart_station|arrive_station|planned_depart|planned_arrive\n'
def adtims(self, message):
"""add timings to a section. this is a basic addition process;
other facilities will help copy/duplicate timings. this process is a special
process as, having been given a route, it will prompt for subsequent departure
and arrival times until the route is complete. the process can be abandoned by
entering an x at the input prompt
"""
if self.show_access(message, 'ADTIMS schedule', 'S') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#check it exists
data = (schedule, 'I')
sql = 'select id, direction, route from schedule where schedule = ? and status = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE CODE DOES NOT EXIST OR NOT IN INACTIVE STATUS')
return
print('SCHEDULE ENTRY MODE: ENTER TIME HHMM OR <X> TO QUIT')
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station from timings ' +\
'where schedule = ? order by id'
count, ds_timings = self.db_read(sql, data)
if count < 0:
return
last_time = '0000'
for timing_row in ds_timings:
#build the input prompt strings
depart_station = timing_row[2]
arrive_station = timing_row[3]
t2 = (depart_station,)
sql = 'select short_name from station where station = ?'
count, ds_departs = self.db_read(sql, t2)
if count < 0:
return
for station_row in ds_departs:
depart_name = station_row[0]
t2 = (arrive_station,)
sql = 'select short_name from station where station = ?'
count, ds_arrives = self.db_read(sql, t2)
if count < 0:
return
for station_row in ds_arrives:
arrive_name = station_row[0]
#get the departing time
re_enter = True
while re_enter:
new_time = raw_input('TIME DEPARTING ' + depart_station + ' ' + depart_name + ' >')
if new_time == 'x':
print('EXITING INPUT OF TIMINGS FOR SCHEDULE')
return
if self.validate_time(new_time, last_time) == 0:
departure_time = new_time
last_time = new_time
re_enter = False
#get the arriving time
re_enter = True
while re_enter:
new_time = raw_input('TIME ARRIVING ' + arrive_station + ' ' + arrive_name + ' >')
if new_time == 'x':
print('EXITING INPUT OF TIMINGS FOR SCHEDULE')
return
if self.validate_time(new_time, last_time) == 0:
arrival_time = new_time
last_time = new_time
re_enter = False
data = (departure_time, arrival_time, timing_row[0])
sql = 'update timings set planned_depart = ?, planned_arrive = ? where id = ?'
if self.db_update(sql, data) != 0:
return
print('UPDATE OF SCHEDULE TIMINGS FOR ' + schedule + ' COMPLETED')
return
def chtims(self, message):
"""allows changes to the timings of an individual section. This routine can also
be used for batch loading times from a file. Enter the route, section and depart
and arrive times. note that there is no validation on timings on previous or
following sections, only within the section itself.
"""
if self.show_access(message, 'CHTIMS schedule;section;depart;arrive', 'S') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#read the database
data = (schedule, 'I')
sql = 'select id from schedule where schedule = ? and status = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE DOES NOT EXIST OR IS ACTIVE AND CANNOT BE AMENDED')
return
#section code-------------------------------------------------------------------------------
section, rc = self.extract_field(message, 1, 'SECTION CODE')
if rc > 0:
return
#read the database
data = (schedule, section)
sql = 'select depart_station, arrive_station, id from timings ' +\
'where schedule = ? and section = ?'
count, ds_sections = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE/SECTION DOES NOT EXIST')
return
for row in ds_sections:
departing = row[0]
arriving = row[1]
timings_id = row[2]
#depart time -----------------------------------------------------------------
depart_time, rc = self.extract_field(message, 2, 'DEPARTURE TIME')
if rc > 0:
return
if len(depart_time) != 4:
print('* TIME MUST BE ENTERED IN FORMAT HHMM')
return
hours = int(depart_time[0:2])
if hours < 0 or hours > 23:
print('* HOURS MUST BE ENTERED IN RANGE 00-23')
return
minutes = int(depart_time[2:4])
if minutes < 0 or minutes > 59:
print('* MINUTES MUST BE ENTERED IN RANGE 00-59')
return
#arrival time -----------------------------------------------------------------
arrive_time, rc = self.extract_field(message, 3, 'ARRIVAL TIME')
if rc > 0:
return
if self.validate_time(arrive_time, depart_time) != 0:
return
#carry out the update and report ----------------------------------------------
data = (depart_time, arrive_time, timings_id)
sql = 'update timings set planned_depart = ?, planned_arrive = ? where id = ?'
if self.db_update(sql, data) != 0:
return
print('SCHEDULE TIMINGS CHANGED FOR:' + schedule, departing + ':' + depart_time + arriving + ':' + arrive_time)
return
def validate_time(self, hhmm, prev_time):
"""internal routine to validate a given time to make sure it corresponds
to an hhmm format. if a previous_time is entered then it makes sure that the
new time is later, unless the previous time > 2000 (8pm) and the new time is
less than 0400 (4am), in which case a new day is assumed
"""
if len(hhmm) != 4:
print('* TIME MUST BE ENTERED IN FORMAT HHMM')
return 1
try:
hours = int(hhmm[0:2])
if hours < 0 or hours > 23:
print('* HOURS MUST BE ENTERED IN RANGE 00-23')
return 2
minutes = int(hhmm[2:4])
if minutes < 0 or minutes > 59:
print('* MINUTES MUST BE ENTERED IN RANGE 00-59')
return 3
except:
print('* TIME MUST BE ENTERED IN MINUTES AND HOURS')
return 5
if prev_time > '2100':
if hhmm < '0300':
return 0
if hhmm < prev_time:
print('* NEW TIME MUST BE LATE THAN PREVIOUS TIME')
return 4
return 0
def timing(self, message):
"""Lists times and associated information for a schedule, including station type,
instructions
"""
if self.show_access(message, 'TIMING schedule', 'R') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#get the schedule detail to display
data = (schedule,)
sql = 'select name, direction, status, route, run_days from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
schedule_route = row[3]
schedule_days = row[4]
data = (schedule_route,)
sql = 'select default_direction from route where route = ?'
count, ds_routes = self.db_read(sql, data)
if count < 0:
return
for row in ds_routes:
default_direction = row[0]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'W':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
rundays = ''
if schedule_days[0:1] == '1':
rundays = ' MON'
if schedule_days[1:2] == '2':
rundays = rundays + ' TUE'
if schedule_days[2:3] == '3':
rundays = rundays + ' WED'
if schedule_days[3:4] == '4':
rundays = rundays + ' THU'
if schedule_days[4:5] == '5':
rundays = rundays + ' FRI'
if schedule_days[5:6] == '6':
rundays = rundays + ' SAT'
if schedule_days[6:7] == '7':
rundays = rundays + ' SUN'
if schedule_days[7:8] == '8':
rundays = rundays + ' HOL'
print('SCHEDULE:', schedule, schedule_name,' (SCHEDULE STATUS:' + status + ')')
print('DIRECTION:',direction, ' RUNS:', rundays)
data = (schedule,)
sql = 'select instruction from instructions where schedule = ?'
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
print(' - ', row[0])
data = (schedule_route,)
sql = 'select instruction from instructions where route = ?'
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
print(' - ', row[0])
print(' ' )
# build the column titles ------------------------------------------
titles = self.x_field('STATION===', self.staxsize) + ' ' + \
self.x_field('NAME====', 8) + ' ' +\
self.x_field('TYPE======', self.statsize) + ' ' +\
self.x_field('=ARR', 4) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('INSTRUCTIONS =========================', 40)
data = (schedule,)
if default_direction == schedule_dirn:
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section'
else:
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section DESC'
timing_count, ds_timings = self.db_read(sql, data)
if count < 0:
return
#report the extracted data -----------------------------------------
line_count = 0
arrival = ' '
depart_station = ''
arrive_station = ''
arrive_name = ''
depart_name = ''
station_type = ''
planned_arrive = ''
dummy = ''
instructions = ''
for row in ds_timings:
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
if line_count == 0:
print(titles)
#get the name for the departure station
data = (depart_station,)
sql = 'select short_name, stationtype from station where station = ?'
stax_count, ds_departs = self.db_read(sql, data)
if stax_count < 0:
return
for stax_row in ds_departs:
depart_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
count, ds_instructions = self.db_read(sql, data)
instructions = ' '
for inst_row in ds_instructions:
instructions = inst_row[0]
if not(planned_depart.strip() == '' and planned_arrive.strip() == ''):
print(self.x_field(row[2], self.staxsize) + " " +
self.x_field(depart_name, 8) + " " +
self.x_field(station_type, self.statsize) + " " +
self.x_field(arrival, 4) + " " +
self.x_field(row[4], 4) + " " +
self.x_field(instructions, 40))
arrival = planned_arrive
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
dummy = ' '
for inst_row in ds_instructions:
line = line + 1
instructions = inst_row[0]
if line != 1:
print(self.x_field(dummy, self.staxsize) + " " +
self.x_field(dummy, 8) + " " +
self.x_field(dummy, self.statsize) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
#get the long name for the arrive station (for the last entry)
sql = 'select short_name, stationtype from station where station = ?'
data = (arrive_station,)
stax_count, ds_arrives = self.db_read(sql, data)
for stax_row in ds_arrives:
arrive_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
instructions = ' '
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
instructions = row[0]
print(self.x_field(arrive_station, self.staxsize) + " " +
self.x_field(arrive_name, 8) + " " +
self.x_field(station_type, self.statsize) + " " +
self.x_field(planned_arrive, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
for row in ds_instructions:
line = line + 1
instructions = row[0]
if line != 1:
print(self.x_field(dummy, self.staxsize) + " " +
self.x_field(dummy, 8) + " " +
self.x_field(dummy, self.statsize) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
print(' ** END OF DATA: ' + str(timing_count) + ' RECORDS DISPLAYED **')
return
def ldtims(self, message):
"""Gives detail of Timing records for checking timetables vs routes
"""
if self.show_access(message, 'LDTIMS schedule', 'R') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#get the schedule detail to display
data = (schedule,)
sql = 'select name, direction, status, route from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'WEST':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
print('SCHEDULE: ', schedule, schedule_name,' (SCHEDULE STATUS: ' + status + ')')
print(' DIRECTION:',direction)
# build the column titles ------------------------------------------
titles = self.x_field('SECTION===', 10) + ' ' + \
self.x_field('DEPARTS===', self.staxsize) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('ARRIVES===', self.staxsize) + ' ' +\
self.x_field('=ARR', 4)
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section'
timing_count, ds_timings = self.db_read(sql, data)
if count < 0:
return
#report the extracted data -----------------------------------------
line_count = 0
for row in ds_timings:
section = row[1]
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
if line_count == 0:
print(titles)
print(self.x_field(section , 10) + " " +
self.x_field(depart_station, self.staxsize) + " " +
self.x_field(planned_depart, 4) + " " +
self.x_field(arrive_station, self.staxsize) + " " +
self.x_field(planned_arrive, 4))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
print(' ** END OF DATA: ' + str(timing_count) + ' RECORDS DISPLAYED **')
return
def prtims(self, message, Params):
"""Prints times and associated information for a schedule, including station type,
instructions
"""
if self.show_access(message, 'PRTIMS schedule', 'R') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
self.temp = {}
i = 0
#get the schedule detail to display
data = (schedule,)
sql = 'select name, direction, status, route from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
schedule_route = row[3]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'WEST':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
print_line = ('SCHEDULE: ' + schedule + ' ' + schedule_name +' (SCHEDULE STATUS:' + status + ')')
self.temp[i]= print_line
i = i + 1
print_line = (' DIRECTION: ' + direction)
self.temp[i]= print_line
i = i + 1
t = (schedule,)
sql = 'select instruction from instructions where schedule = ?'
count, ds_instructions = self.db_read(sql, t)
for row in ds_instructions:
print_line = (' - ' + row[0])
self.temp[i]= print_line
i = i + 1
t = (schedule_route,)
sql = 'select instruction from instructions where route = ?'
count, ds_instructions = self.db_read(sql, t)
for row in ds_instructions:
print_line = (' - ' + row[0])
self.temp[i]= print_line
i = i + 1
print_line = (' ' )
self.temp[i]= print_line
i = i + 1
# build the column titles ------------------------------------------
titles = self.x_field('STATION===', self.staxsize) + ' ' + \
self.x_field('NAME====', 8) + ' ' +\
self.x_field('TYPE======', self.statsize) + ' ' +\
self.x_field('=ARR', 4) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('INSTRUCTIONS =========================', 40)
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by id'
timing_count, ds_timings = self.db_read(sql, data)
if timing_count < 0:
return
#report the extracted data -----------------------------------------
arrival = ' '
for row in ds_timings:
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
#get the name for the departure station
data = (depart_station,)
sql = 'select short_name, stationtype from station where station = ?'
stax_count, ds_departs = self.db_read(sql, data)
if stax_count < 0:
return
for stax_row in ds_departs:
depart_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
count, ds_instructions = self.db_read(sql, data)
instructions = ' '
for inst_row in ds_instructions:
instructions = inst_row[0]
if not(planned_depart.strip() == '' and planned_arrive.strip() == ''):
print_line = (self.x_field(depart_station, self.staxsize) + ' ' +
self.x_field(depart_name, 8) + ' ' +
self.x_field(station_type, self.statsize) + ' ' +
self.x_field(arrival, 4) + ' ' +
self.x_field(planned_depart, 4) + ' ' +
self.x_field(instructions, 40))
arrival = planned_arrive
self.temp[i]= print_line
i = i + 1
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
dummy = ' '
for inst_row in ds_instructions:
line = line + 1
instructions = inst_row[0]
if line != 1:
print_line = (self.x_field(dummy, self.staxsize) + ' ' +
self.x_field(dummy, 8) + ' ' +
self.x_field(dummy, self.statsize) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
#get the long name for the arrive station (for the last entry)
sql = 'select short_name, stationtype from station where station = ?'
data = (arrive_station,)
stax_count, ds_arrives = self.db_read(sql, data)
for stax_row in ds_arrives:
arrive_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
instructions = ' '
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
instructions = row[0]
print_line = (self.x_field(arrive_station, self.staxsize) + ' ' +
self.x_field(arrive_name, 8) + ' ' +
self.x_field(station_type, self.statsize) + ' ' +
self.x_field(planned_arrive, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
for row in ds_instructions:
line = line + 1
instructions = row[0]
if line != 1:
print_line = (self.x_field(dummy, self.staxsize) + ' ' +
self.x_field(dummy, 8) + ' ' +
self.x_field(dummy, self.statsize) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
#report the extracted data ---------------------------------------
self.print_report (titles = titles,
report_id = 'PRTIMS',
report_name = 'TIMETABLE FOR ' + schedule,
Params = Params)
return
| 40.526525
| 120
| 0.491148
|
import MOPS_Element
class cTimings(MOPS_Element.cElement):
extract_code = 'select * from timings'
extract_header = 'id|section|schedule|depart_station|arrive_station|planned_depart|planned_arrive\n'
def adtims(self, message):
if self.show_access(message, 'ADTIMS schedule', 'S') != 0:
return
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
data = (schedule, 'I')
sql = 'select id, direction, route from schedule where schedule = ? and status = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE CODE DOES NOT EXIST OR NOT IN INACTIVE STATUS')
return
print('SCHEDULE ENTRY MODE: ENTER TIME HHMM OR <X> TO QUIT')
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station from timings ' +\
'where schedule = ? order by id'
count, ds_timings = self.db_read(sql, data)
if count < 0:
return
last_time = '0000'
for timing_row in ds_timings:
depart_station = timing_row[2]
arrive_station = timing_row[3]
t2 = (depart_station,)
sql = 'select short_name from station where station = ?'
count, ds_departs = self.db_read(sql, t2)
if count < 0:
return
for station_row in ds_departs:
depart_name = station_row[0]
t2 = (arrive_station,)
sql = 'select short_name from station where station = ?'
count, ds_arrives = self.db_read(sql, t2)
if count < 0:
return
for station_row in ds_arrives:
arrive_name = station_row[0]
re_enter = True
while re_enter:
new_time = raw_input('TIME DEPARTING ' + depart_station + ' ' + depart_name + ' >')
if new_time == 'x':
print('EXITING INPUT OF TIMINGS FOR SCHEDULE')
return
if self.validate_time(new_time, last_time) == 0:
departure_time = new_time
last_time = new_time
re_enter = False
re_enter = True
while re_enter:
new_time = raw_input('TIME ARRIVING ' + arrive_station + ' ' + arrive_name + ' >')
if new_time == 'x':
print('EXITING INPUT OF TIMINGS FOR SCHEDULE')
return
if self.validate_time(new_time, last_time) == 0:
arrival_time = new_time
last_time = new_time
re_enter = False
data = (departure_time, arrival_time, timing_row[0])
sql = 'update timings set planned_depart = ?, planned_arrive = ? where id = ?'
if self.db_update(sql, data) != 0:
return
print('UPDATE OF SCHEDULE TIMINGS FOR ' + schedule + ' COMPLETED')
return
def chtims(self, message):
if self.show_access(message, 'CHTIMS schedule;section;depart;arrive', 'S') != 0:
return
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
data = (schedule, 'I')
sql = 'select id from schedule where schedule = ? and status = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE DOES NOT EXIST OR IS ACTIVE AND CANNOT BE AMENDED')
return
section, rc = self.extract_field(message, 1, 'SECTION CODE')
if rc > 0:
return
data = (schedule, section)
sql = 'select depart_station, arrive_station, id from timings ' +\
'where schedule = ? and section = ?'
count, ds_sections = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE/SECTION DOES NOT EXIST')
return
for row in ds_sections:
departing = row[0]
arriving = row[1]
timings_id = row[2]
depart_time, rc = self.extract_field(message, 2, 'DEPARTURE TIME')
if rc > 0:
return
if len(depart_time) != 4:
print('* TIME MUST BE ENTERED IN FORMAT HHMM')
return
hours = int(depart_time[0:2])
if hours < 0 or hours > 23:
print('* HOURS MUST BE ENTERED IN RANGE 00-23')
return
minutes = int(depart_time[2:4])
if minutes < 0 or minutes > 59:
print('* MINUTES MUST BE ENTERED IN RANGE 00-59')
return
arrive_time, rc = self.extract_field(message, 3, 'ARRIVAL TIME')
if rc > 0:
return
if self.validate_time(arrive_time, depart_time) != 0:
return
data = (depart_time, arrive_time, timings_id)
sql = 'update timings set planned_depart = ?, planned_arrive = ? where id = ?'
if self.db_update(sql, data) != 0:
return
print('SCHEDULE TIMINGS CHANGED FOR:' + schedule, departing + ':' + depart_time + arriving + ':' + arrive_time)
return
def validate_time(self, hhmm, prev_time):
if len(hhmm) != 4:
print('* TIME MUST BE ENTERED IN FORMAT HHMM')
return 1
try:
hours = int(hhmm[0:2])
if hours < 0 or hours > 23:
print('* HOURS MUST BE ENTERED IN RANGE 00-23')
return 2
minutes = int(hhmm[2:4])
if minutes < 0 or minutes > 59:
print('* MINUTES MUST BE ENTERED IN RANGE 00-59')
return 3
except:
print('* TIME MUST BE ENTERED IN MINUTES AND HOURS')
return 5
if prev_time > '2100':
if hhmm < '0300':
return 0
if hhmm < prev_time:
print('* NEW TIME MUST BE LATE THAN PREVIOUS TIME')
return 4
return 0
def timing(self, message):
if self.show_access(message, 'TIMING schedule', 'R') != 0:
return
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
data = (schedule,)
sql = 'select name, direction, status, route, run_days from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
schedule_route = row[3]
schedule_days = row[4]
data = (schedule_route,)
sql = 'select default_direction from route where route = ?'
count, ds_routes = self.db_read(sql, data)
if count < 0:
return
for row in ds_routes:
default_direction = row[0]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'W':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
rundays = ''
if schedule_days[0:1] == '1':
rundays = ' MON'
if schedule_days[1:2] == '2':
rundays = rundays + ' TUE'
if schedule_days[2:3] == '3':
rundays = rundays + ' WED'
if schedule_days[3:4] == '4':
rundays = rundays + ' THU'
if schedule_days[4:5] == '5':
rundays = rundays + ' FRI'
if schedule_days[5:6] == '6':
rundays = rundays + ' SAT'
if schedule_days[6:7] == '7':
rundays = rundays + ' SUN'
if schedule_days[7:8] == '8':
rundays = rundays + ' HOL'
print('SCHEDULE:', schedule, schedule_name,' (SCHEDULE STATUS:' + status + ')')
print('DIRECTION:',direction, ' RUNS:', rundays)
data = (schedule,)
sql = 'select instruction from instructions where schedule = ?'
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
print(' - ', row[0])
data = (schedule_route,)
sql = 'select instruction from instructions where route = ?'
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
print(' - ', row[0])
print(' ' )
titles = self.x_field('STATION===', self.staxsize) + ' ' + \
self.x_field('NAME====', 8) + ' ' +\
self.x_field('TYPE======', self.statsize) + ' ' +\
self.x_field('=ARR', 4) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('INSTRUCTIONS =========================', 40)
data = (schedule,)
if default_direction == schedule_dirn:
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section'
else:
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section DESC'
timing_count, ds_timings = self.db_read(sql, data)
if count < 0:
return
line_count = 0
arrival = ' '
depart_station = ''
arrive_station = ''
arrive_name = ''
depart_name = ''
station_type = ''
planned_arrive = ''
dummy = ''
instructions = ''
for row in ds_timings:
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
if line_count == 0:
print(titles)
data = (depart_station,)
sql = 'select short_name, stationtype from station where station = ?'
stax_count, ds_departs = self.db_read(sql, data)
if stax_count < 0:
return
for stax_row in ds_departs:
depart_name = stax_row[0]
station_type = stax_row[1]
sql = 'select instruction from instructions where station = ? limit 1'
count, ds_instructions = self.db_read(sql, data)
instructions = ' '
for inst_row in ds_instructions:
instructions = inst_row[0]
if not(planned_depart.strip() == '' and planned_arrive.strip() == ''):
print(self.x_field(row[2], self.staxsize) + " " +
self.x_field(depart_name, 8) + " " +
self.x_field(station_type, self.statsize) + " " +
self.x_field(arrival, 4) + " " +
self.x_field(row[4], 4) + " " +
self.x_field(instructions, 40))
arrival = planned_arrive
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
dummy = ' '
for inst_row in ds_instructions:
line = line + 1
instructions = inst_row[0]
if line != 1:
print(self.x_field(dummy, self.staxsize) + " " +
self.x_field(dummy, 8) + " " +
self.x_field(dummy, self.statsize) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
sql = 'select short_name, stationtype from station where station = ?'
data = (arrive_station,)
stax_count, ds_arrives = self.db_read(sql, data)
for stax_row in ds_arrives:
arrive_name = stax_row[0]
station_type = stax_row[1]
sql = 'select instruction from instructions where station = ? limit 1'
instructions = ' '
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
instructions = row[0]
print(self.x_field(arrive_station, self.staxsize) + " " +
self.x_field(arrive_name, 8) + " " +
self.x_field(station_type, self.statsize) + " " +
self.x_field(planned_arrive, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
for row in ds_instructions:
line = line + 1
instructions = row[0]
if line != 1:
print(self.x_field(dummy, self.staxsize) + " " +
self.x_field(dummy, 8) + " " +
self.x_field(dummy, self.statsize) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
print(' ** END OF DATA: ' + str(timing_count) + ' RECORDS DISPLAYED **')
return
def ldtims(self, message):
if self.show_access(message, 'LDTIMS schedule', 'R') != 0:
return
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
data = (schedule,)
sql = 'select name, direction, status, route from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'WEST':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
print('SCHEDULE: ', schedule, schedule_name,' (SCHEDULE STATUS: ' + status + ')')
print(' DIRECTION:',direction)
titles = self.x_field('SECTION===', 10) + ' ' + \
self.x_field('DEPARTS===', self.staxsize) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('ARRIVES===', self.staxsize) + ' ' +\
self.x_field('=ARR', 4)
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section'
timing_count, ds_timings = self.db_read(sql, data)
if count < 0:
return
line_count = 0
for row in ds_timings:
section = row[1]
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
if line_count == 0:
print(titles)
print(self.x_field(section , 10) + " " +
self.x_field(depart_station, self.staxsize) + " " +
self.x_field(planned_depart, 4) + " " +
self.x_field(arrive_station, self.staxsize) + " " +
self.x_field(planned_arrive, 4))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
print(' ** END OF DATA: ' + str(timing_count) + ' RECORDS DISPLAYED **')
return
def prtims(self, message, Params):
if self.show_access(message, 'PRTIMS schedule', 'R') != 0:
return
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
self.temp = {}
i = 0
data = (schedule,)
sql = 'select name, direction, status, route from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
schedule_route = row[3]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'WEST':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
print_line = ('SCHEDULE: ' + schedule + ' ' + schedule_name +' (SCHEDULE STATUS:' + status + ')')
self.temp[i]= print_line
i = i + 1
print_line = (' DIRECTION: ' + direction)
self.temp[i]= print_line
i = i + 1
t = (schedule,)
sql = 'select instruction from instructions where schedule = ?'
count, ds_instructions = self.db_read(sql, t)
for row in ds_instructions:
print_line = (' - ' + row[0])
self.temp[i]= print_line
i = i + 1
t = (schedule_route,)
sql = 'select instruction from instructions where route = ?'
count, ds_instructions = self.db_read(sql, t)
for row in ds_instructions:
print_line = (' - ' + row[0])
self.temp[i]= print_line
i = i + 1
print_line = (' ' )
self.temp[i]= print_line
i = i + 1
titles = self.x_field('STATION===', self.staxsize) + ' ' + \
self.x_field('NAME====', 8) + ' ' +\
self.x_field('TYPE======', self.statsize) + ' ' +\
self.x_field('=ARR', 4) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('INSTRUCTIONS =========================', 40)
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by id'
timing_count, ds_timings = self.db_read(sql, data)
if timing_count < 0:
return
arrival = ' '
for row in ds_timings:
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
data = (depart_station,)
sql = 'select short_name, stationtype from station where station = ?'
stax_count, ds_departs = self.db_read(sql, data)
if stax_count < 0:
return
for stax_row in ds_departs:
depart_name = stax_row[0]
station_type = stax_row[1]
sql = 'select instruction from instructions where station = ? limit 1'
count, ds_instructions = self.db_read(sql, data)
instructions = ' '
for inst_row in ds_instructions:
instructions = inst_row[0]
if not(planned_depart.strip() == '' and planned_arrive.strip() == ''):
print_line = (self.x_field(depart_station, self.staxsize) + ' ' +
self.x_field(depart_name, 8) + ' ' +
self.x_field(station_type, self.statsize) + ' ' +
self.x_field(arrival, 4) + ' ' +
self.x_field(planned_depart, 4) + ' ' +
self.x_field(instructions, 40))
arrival = planned_arrive
self.temp[i]= print_line
i = i + 1
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
dummy = ' '
for inst_row in ds_instructions:
line = line + 1
instructions = inst_row[0]
if line != 1:
print_line = (self.x_field(dummy, self.staxsize) + ' ' +
self.x_field(dummy, 8) + ' ' +
self.x_field(dummy, self.statsize) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
sql = 'select short_name, stationtype from station where station = ?'
data = (arrive_station,)
stax_count, ds_arrives = self.db_read(sql, data)
for stax_row in ds_arrives:
arrive_name = stax_row[0]
station_type = stax_row[1]
sql = 'select instruction from instructions where station = ? limit 1'
instructions = ' '
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
instructions = row[0]
print_line = (self.x_field(arrive_station, self.staxsize) + ' ' +
self.x_field(arrive_name, 8) + ' ' +
self.x_field(station_type, self.statsize) + ' ' +
self.x_field(planned_arrive, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
for row in ds_instructions:
line = line + 1
instructions = row[0]
if line != 1:
print_line = (self.x_field(dummy, self.staxsize) + ' ' +
self.x_field(dummy, 8) + ' ' +
self.x_field(dummy, self.statsize) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
self.print_report (titles = titles,
report_id = 'PRTIMS',
report_name = 'TIMETABLE FOR ' + schedule,
Params = Params)
return
| true
| true
|
f70ae05e1e355304e928e6fc4760453b28642856
| 757
|
py
|
Python
|
python/test/test_biadjacent.py
|
EQt/graphidx
|
9716488cf29f6235072fc920fa1a473bf88e954f
|
[
"MIT"
] | 4
|
2020-04-03T15:18:30.000Z
|
2022-01-06T15:22:48.000Z
|
python/test/test_biadjacent.py
|
EQt/graphidx
|
9716488cf29f6235072fc920fa1a473bf88e954f
|
[
"MIT"
] | null | null | null |
python/test/test_biadjacent.py
|
EQt/graphidx
|
9716488cf29f6235072fc920fa1a473bf88e954f
|
[
"MIT"
] | null | null | null |
import numpy as np
from graphidx.idx import BiAdjacent
def square():
head = np.array([0, 0, 1, 2])
tail = np.array([1, 2, 3, 3])
return BiAdjacent(head, tail)
def test_sqare():
neigh = square()
assert repr(neigh) == "BiAdjacent[m = 4, n = 4]"
assert set(neigh[0]) == {1, 2}
assert set(neigh[1]) == {0, 3}
assert set(neigh[2]) == {0, 3}
assert set(neigh[3]) == {1, 2}
def test_1():
head = np.array([0, 1, 2, 3], dtype=np.int32)
tail = np.array([1, 3, 1, 2], dtype=np.int32)
index = BiAdjacent(head, tail)
assert repr(index) == "BiAdjacent[m = 4, n = 4]"
i2 = index[2]
assert len(i2) == 2
assert list(i2) == [1, 3]
assert list(index[0]) == [1]
assert list(index[1]) == [0, 3, 2]
| 23.65625
| 52
| 0.548217
|
import numpy as np
from graphidx.idx import BiAdjacent
def square():
head = np.array([0, 0, 1, 2])
tail = np.array([1, 2, 3, 3])
return BiAdjacent(head, tail)
def test_sqare():
neigh = square()
assert repr(neigh) == "BiAdjacent[m = 4, n = 4]"
assert set(neigh[0]) == {1, 2}
assert set(neigh[1]) == {0, 3}
assert set(neigh[2]) == {0, 3}
assert set(neigh[3]) == {1, 2}
def test_1():
head = np.array([0, 1, 2, 3], dtype=np.int32)
tail = np.array([1, 3, 1, 2], dtype=np.int32)
index = BiAdjacent(head, tail)
assert repr(index) == "BiAdjacent[m = 4, n = 4]"
i2 = index[2]
assert len(i2) == 2
assert list(i2) == [1, 3]
assert list(index[0]) == [1]
assert list(index[1]) == [0, 3, 2]
| true
| true
|
f70ae09d3d78cf87978fb48a1ad8112b54e656d2
| 3,833
|
py
|
Python
|
src/Products/PluginRegistry/interfaces.py
|
zopefoundation/Products.PluginRegistry
|
5093cec2ef2c0769ac19e854d19acd9cae27c878
|
[
"ZPL-2.1"
] | null | null | null |
src/Products/PluginRegistry/interfaces.py
|
zopefoundation/Products.PluginRegistry
|
5093cec2ef2c0769ac19e854d19acd9cae27c878
|
[
"ZPL-2.1"
] | 13
|
2016-02-27T22:32:34.000Z
|
2021-09-21T06:46:05.000Z
|
src/Products/PluginRegistry/interfaces.py
|
zopefoundation/Products.PluginRegistry
|
5093cec2ef2c0769ac19e854d19acd9cae27c878
|
[
"ZPL-2.1"
] | 2
|
2015-04-03T05:26:05.000Z
|
2015-10-16T08:22:24.000Z
|
##############################################################################
#
# Copyright (c) 2001 Zope Foundation and Contributors
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this
# distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
""" PluginRegistry interface declarations
"""
from zope.interface import Interface
class IPluginRegistry(Interface):
""" Manage a set of plugin definitions, grouped by type.
"""
def listPluginTypeInfo():
""" Return a sequence of mappings describing our plugin types.
o Keys for the mappings must include:
'id' -- a string used to identify the plugin type (should be
the __name__ of the interface)
'interface' -- the plugin type interface
'methods' -- the methods expected by the plugin type interface
'title' -- a display title for the plugin type
'description' -- a description of what the plugins do
"""
def listPlugins(plugin_type):
""" Return a sequence of tuples, one for each plugin of the given type.
o 'plugin_type' must be one of the known types, else raise KeyError.
o Tuples will be of the form, '(plugin_id, plugin)'.
"""
def listPluginIds(plugin_type):
""" Return a sequence of plugin ids
o Return ids for each active plugin of the given type.
o 'plugin_type' must be one of the known types, else raise KeyError.
"""
def activatePlugin(plugin_type, plugin_id):
""" Activate a plugin of the given type.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'plugin_id' must be the ID of an available plugin, else raise
KeyError.
o Append 'plugin_id' to the list of active plugins for the given
'plugin_type'.
"""
def deactivatePlugin(plugin_type, plugin_id):
""" Deactivate a plugin of the given type.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'plugin_id' must be an ID of an existing plugin of that type,
else raise KeyError.
"""
def movePluginsUp(plugin_type, ids_to_move):
""" Move a set of plugins "up" in their list.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'ids_to_move' must be a sequence of ids of current plugins
for that type.
- If any item is not the ID of a current plugin, raise ValueError.
"""
def movePluginsTop(plugin_type, ids_to_move):
""" Move a set of plugins to the "top" in their list.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'ids_to_move' must be a sequence of ids of current plugins
for that type.
- If any item is not the ID of a current plugin, raise ValueError.
- Moving one plugin to top has obvious result;
moving more than one plugin to top puts them one by one at the top
iow, last in the list gets to top
"""
def movePluginsDown(plugin_type, ids_to_move):
""" Move a set of plugins "down" in their list.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'ids_to_move' must be a sequence of indexes of items in the current
list of plugins for that type.
- If any item is not the ID of a current plugin, raise ValueError.
"""
| 31.941667
| 79
| 0.623272
|
from zope.interface import Interface
class IPluginRegistry(Interface):
def listPluginTypeInfo():
def listPlugins(plugin_type):
def listPluginIds(plugin_type):
def activatePlugin(plugin_type, plugin_id):
def deactivatePlugin(plugin_type, plugin_id):
def movePluginsUp(plugin_type, ids_to_move):
def movePluginsTop(plugin_type, ids_to_move):
def movePluginsDown(plugin_type, ids_to_move):
| true
| true
|
f70ae09eab8088111ac2aa5d19572f95f48b55f4
| 1,923
|
py
|
Python
|
tests/test_sock.py
|
xxNB/gunicorn
|
fde9fcfaaaa5db628bcb16644de524122ef0f057
|
[
"MIT"
] | 1
|
2020-04-03T18:00:08.000Z
|
2020-04-03T18:00:08.000Z
|
tests/test_sock.py
|
xgfone/gunicorn
|
3857ebc4a7ca52cc7ad5a89a62f2cf94519e426b
|
[
"MIT"
] | 1
|
2016-08-04T09:36:31.000Z
|
2016-08-04T09:36:31.000Z
|
tests/test_sock.py
|
xgfone/gunicorn
|
3857ebc4a7ca52cc7ad5a89a62f2cf94519e426b
|
[
"MIT"
] | 1
|
2021-02-22T14:46:39.000Z
|
2021-02-22T14:46:39.000Z
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
try:
import unittest.mock as mock
except ImportError:
import mock
from gunicorn import sock
@mock.patch('os.stat')
def test_create_sockets_unix_bytes(stat):
conf = mock.Mock(address=[b'127.0.0.1:8000'])
log = mock.Mock()
with mock.patch.object(sock.UnixSocket, '__init__', lambda *args: None):
listeners = sock.create_sockets(conf, log)
assert len(listeners) == 1
print(type(listeners[0]))
assert isinstance(listeners[0], sock.UnixSocket)
@mock.patch('os.stat')
def test_create_sockets_unix_strings(stat):
conf = mock.Mock(address=['127.0.0.1:8000'])
log = mock.Mock()
with mock.patch.object(sock.UnixSocket, '__init__', lambda *args: None):
listeners = sock.create_sockets(conf, log)
assert len(listeners) == 1
assert isinstance(listeners[0], sock.UnixSocket)
def test_socket_close():
listener1 = mock.Mock()
listener1.getsockname.return_value = ('127.0.0.1', '80')
listener2 = mock.Mock()
listener2.getsockname.return_value = ('192.168.2.5', '80')
sock.close_sockets([listener1, listener2])
listener1.close.assert_called_with()
listener2.close.assert_called_with()
@mock.patch('os.unlink')
def test_unix_socket_close_unlink(unlink):
listener = mock.Mock()
listener.getsockname.return_value = '/var/run/test.sock'
sock.close_sockets([listener])
listener.close.assert_called_with()
unlink.assert_called_once_with('/var/run/test.sock')
@mock.patch('os.unlink')
def test_unix_socket_close_without_unlink(unlink):
listener = mock.Mock()
listener.getsockname.return_value = '/var/run/test.sock'
sock.close_sockets([listener], False)
listener.close.assert_called_with()
assert not unlink.called, 'unlink should not have been called'
| 31.52459
| 76
| 0.703588
|
try:
import unittest.mock as mock
except ImportError:
import mock
from gunicorn import sock
@mock.patch('os.stat')
def test_create_sockets_unix_bytes(stat):
conf = mock.Mock(address=[b'127.0.0.1:8000'])
log = mock.Mock()
with mock.patch.object(sock.UnixSocket, '__init__', lambda *args: None):
listeners = sock.create_sockets(conf, log)
assert len(listeners) == 1
print(type(listeners[0]))
assert isinstance(listeners[0], sock.UnixSocket)
@mock.patch('os.stat')
def test_create_sockets_unix_strings(stat):
conf = mock.Mock(address=['127.0.0.1:8000'])
log = mock.Mock()
with mock.patch.object(sock.UnixSocket, '__init__', lambda *args: None):
listeners = sock.create_sockets(conf, log)
assert len(listeners) == 1
assert isinstance(listeners[0], sock.UnixSocket)
def test_socket_close():
listener1 = mock.Mock()
listener1.getsockname.return_value = ('127.0.0.1', '80')
listener2 = mock.Mock()
listener2.getsockname.return_value = ('192.168.2.5', '80')
sock.close_sockets([listener1, listener2])
listener1.close.assert_called_with()
listener2.close.assert_called_with()
@mock.patch('os.unlink')
def test_unix_socket_close_unlink(unlink):
listener = mock.Mock()
listener.getsockname.return_value = '/var/run/test.sock'
sock.close_sockets([listener])
listener.close.assert_called_with()
unlink.assert_called_once_with('/var/run/test.sock')
@mock.patch('os.unlink')
def test_unix_socket_close_without_unlink(unlink):
listener = mock.Mock()
listener.getsockname.return_value = '/var/run/test.sock'
sock.close_sockets([listener], False)
listener.close.assert_called_with()
assert not unlink.called, 'unlink should not have been called'
| true
| true
|
f70ae137d2eb886399cbe83df02b37e5d3c5be8f
| 11,876
|
py
|
Python
|
arkane/encorr/ae.py
|
tza0035/RMG-Py
|
38c49f7107d1b19e4a534408a1040ddd313b8596
|
[
"MIT"
] | 250
|
2015-06-06T23:32:00.000Z
|
2022-03-22T16:45:16.000Z
|
arkane/encorr/ae.py
|
tza0035/RMG-Py
|
38c49f7107d1b19e4a534408a1040ddd313b8596
|
[
"MIT"
] | 1,781
|
2015-05-26T23:52:00.000Z
|
2022-03-31T19:07:54.000Z
|
arkane/encorr/ae.py
|
tza0035/RMG-Py
|
38c49f7107d1b19e4a534408a1040ddd313b8596
|
[
"MIT"
] | 161
|
2015-06-02T14:28:59.000Z
|
2022-03-02T19:37:14.000Z
|
#!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2021 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module provides classes for fitting atom energies based on a very
small, predetermined set of molecules.
"""
import importlib
import json
import logging
from collections import Counter
from typing import Dict, Hashable, List, Union
import numpy as np
from scipy.stats import distributions
from rmgpy import constants
from rmgpy.molecule import get_element, Molecule
import arkane.encorr.data as data
from arkane.encorr.reference import ReferenceDatabase
from arkane.modelchem import LevelOfTheory, CompositeLevelOfTheory
# List of species labels that will be used for fitting (labels should match reference database)
SPECIES_LABELS = [
'Dihydrogen',
'Dinitrogen',
'Dioxygen',
'Disulfur',
'Difluorine',
'Dichlorine',
'Dibromine',
'Hydrogen fluoride',
'Hydrogen chloride',
'Hydrogen bromide',
'Hydrogen sulfide',
'Water',
'Methane',
'Methyl',
'Ammonia',
'Chloromethane'
]
class AEJob:
"""
A job for fitting atom energies.
"""
def __init__(self,
species_energies: Dict[str, float],
level_of_theory: Union[LevelOfTheory, CompositeLevelOfTheory] = None,
write_to_database: bool = False,
overwrite: bool = False):
"""
Initialize an AEJob instance.
Notes:
The species energies should be provided as a dictionary
containing the species labels as keys and their single-
point electronic energies in Hartree as values. The
energies should be calculated using the experimental
geometry provided for the species in the reference
database, and the zero-point energy should not be included
in the electronic energy.
Args:
species_energies: Dictionary of species labels with single-point electronic energies (Hartree).
level_of_theory: Dictionary key for saving atom energies to the database.
write_to_database: Save the fitted atom energies directly to the RMG database.
overwrite: Overwrite atom energies in the RMG database if they already exist.
"""
self.spcs_energies = species_energies
self.level_of_theory = level_of_theory
self.write_to_database = write_to_database
self.overwrite = overwrite
self.ae = AE(species_energies)
def execute(self, output_file: str = None):
"""
Execute the atom energy job.
Args:
output_file: Write the fitted energies to this file.
"""
if self.level_of_theory is None:
logging.info('Fitting atom energies')
else:
logging.info(f'Fitting atom energies for {self.level_of_theory}')
self.ae.fit()
if output_file is not None:
with open(output_file, 'a') as f:
if self.level_of_theory is not None:
f.write(f'# {self.level_of_theory}\n')
for element, energy in self.ae.atom_energies.items():
f.write(f'# {element:2}: {energy:15.8f} +/- {self.ae.confidence_intervals[element]:.8f} Hartree\n')
f.writelines(self.ae.format_atom_energies(
'atom_energies' if self.level_of_theory is None else self.level_of_theory))
if self.write_to_database:
if self.level_of_theory is None:
raise Exception('Level of theory is required for writing to database')
try:
self.ae.write_to_database(self.level_of_theory, overwrite=self.overwrite)
except ValueError as e:
logging.warning('Could not write atom energies to database. Captured error:')
logging.warning(str(e))
class AE:
"""
A class for fitting atom energies.
"""
ref_data_src = 'CCCBDB' # Use CCCBDB data
ref_data = None # Dictionary of reference data entries
def __init__(self, species_energies: Dict[str, float]):
self.species_energies = species_energies # Hartree
self.atom_energies = None
self.confidence_intervals = None
for lbl in SPECIES_LABELS:
if lbl not in self.species_energies:
logging.warning(f'{lbl} missing from provided species energies!')
@classmethod
def _load_refdata(cls):
if cls.ref_data is None:
logging.info('Loading reference database')
db = ReferenceDatabase()
db.load()
cls.ref_data = {lbl: spc for lbl, spc in zip(SPECIES_LABELS, db.get_species_from_label(SPECIES_LABELS))}
def fit(self):
"""
Fit atom energies using the provided species energies and
corresponding atomization energies from the reference data.
"""
self._load_refdata()
mols = [
Molecule().from_adjacency_list(
self.ref_data[lbl].adjacency_list,
raise_atomtype_exception=False,
raise_charge_exception=False
) for lbl in self.species_energies
]
atom_counts = [Counter(atom.element.symbol for atom in mol.atoms) for mol in mols]
elements = sorted({element for ac in atom_counts for element in ac}, key=lambda s: get_element(s).number)
x = np.array([[ac[element] for element in elements] for ac in atom_counts]) # Nmols x Nelements
atomization_energies = np.array([
self.ref_data[lbl].reference_data[self.ref_data_src].atomization_energy.value_si
/ constants.E_h / constants.Na for lbl in self.species_energies
])
zpes = np.array([
self.ref_data[lbl].reference_data[self.ref_data_src].zpe.value_si
/ constants.E_h / constants.Na for lbl in self.species_energies
])
elec_energies = np.array(list(self.species_energies.values())) # Should already be in Hartree
y = atomization_energies + elec_energies + zpes
w = np.linalg.solve(x.T @ x, x.T @ y)
self.atom_energies = dict(zip(elements, w))
# Get confidence intervals
n = len(y) # Ndata
k = len(w) # Nparam
ypred = x @ w
sigma2 = np.sum((y - ypred)**2) / (n - k - 1) # MSE
cov = sigma2 * np.linalg.inv(x.T @ x) # covariance matrix
se = np.sqrt(np.diag(cov)) # standard error
alpha = 0.05 # 95% confidence level
tdist = distributions.t.ppf(1 - alpha/2, n - k - 1) # student-t
ci = tdist * se # confidence interval half-width
self.confidence_intervals = dict(zip(elements, ci)) # Parameter estimates are w +/- ci
def write_to_database(self, key: Hashable, overwrite: bool = False, alternate_path: str = None):
"""
Write atom energies to database.
Args:
key: Dictionary key to use for atom energies in database.
overwrite: Overwrite existing atom energies.
alternate_path: Write atom energies and existing database to this path instead.
"""
if self.atom_energies is None:
raise ValueError('No atom energies available for writing')
data_path = data.quantum_corrections_path
with open(data_path) as f:
lines = f.readlines()
ae_formatted = self.format_atom_energies(key, indent=True)
# Add new atom energies to file without changing existing formatting
for i, line in enumerate(lines):
if 'atom_energies' in line:
if key in data.atom_energies:
if overwrite:
# Does not overwrite comments
del_idx_start = del_idx_end = None
for j, line2 in enumerate(lines[i:]):
if repr(key) in line2:
del_idx_start = i + j
del_idx_end = None
elif line2.rstrip() == ' },': # Can't have a comment after final brace
del_idx_end = i + j + 1
if del_idx_start is not None and del_idx_end is not None:
if (lines[del_idx_start - 1].lstrip().startswith('#')
or lines[del_idx_end + 1].lstrip().startswith('#')):
logging.warning('There may be left over comments from previous atom energies')
lines[del_idx_start:del_idx_end] = ae_formatted
break
else:
raise ValueError(f'{key} already exists. Set `overwrite` to True.')
else:
lines[(i+1):(i+1)] = ['\n'] + ae_formatted
break
with open(data_path if alternate_path is None else alternate_path, 'w') as f:
f.writelines(lines)
# Reload data to update atom energy dictionary
if alternate_path is None:
importlib.reload(data)
def format_atom_energies(self, key: Hashable, indent: bool = False) -> List[str]:
"""
Obtain a list of nicely formatted atom energies suitable for
writelines.
Args:
key: Dictionary key to use for formatting dictionary.
indent: Indent each line.
Returns:
Formatted list of atom energies.
"""
ae_formatted = json.dumps(self.atom_energies, indent=4).replace('"', "'").split('\n')
ae_formatted[0] = f'"{key}": ' + ae_formatted[0]
ae_formatted[-1] += ','
ae_formatted = [e + '\n' for e in ae_formatted]
if indent:
ae_formatted = [' ' + e for e in ae_formatted]
return ae_formatted
| 43.028986
| 119
| 0.575867
|
import importlib
import json
import logging
from collections import Counter
from typing import Dict, Hashable, List, Union
import numpy as np
from scipy.stats import distributions
from rmgpy import constants
from rmgpy.molecule import get_element, Molecule
import arkane.encorr.data as data
from arkane.encorr.reference import ReferenceDatabase
from arkane.modelchem import LevelOfTheory, CompositeLevelOfTheory
SPECIES_LABELS = [
'Dihydrogen',
'Dinitrogen',
'Dioxygen',
'Disulfur',
'Difluorine',
'Dichlorine',
'Dibromine',
'Hydrogen fluoride',
'Hydrogen chloride',
'Hydrogen bromide',
'Hydrogen sulfide',
'Water',
'Methane',
'Methyl',
'Ammonia',
'Chloromethane'
]
class AEJob:
def __init__(self,
species_energies: Dict[str, float],
level_of_theory: Union[LevelOfTheory, CompositeLevelOfTheory] = None,
write_to_database: bool = False,
overwrite: bool = False):
self.spcs_energies = species_energies
self.level_of_theory = level_of_theory
self.write_to_database = write_to_database
self.overwrite = overwrite
self.ae = AE(species_energies)
def execute(self, output_file: str = None):
if self.level_of_theory is None:
logging.info('Fitting atom energies')
else:
logging.info(f'Fitting atom energies for {self.level_of_theory}')
self.ae.fit()
if output_file is not None:
with open(output_file, 'a') as f:
if self.level_of_theory is not None:
f.write(f'# {self.level_of_theory}\n')
for element, energy in self.ae.atom_energies.items():
f.write(f'# {element:2}: {energy:15.8f} +/- {self.ae.confidence_intervals[element]:.8f} Hartree\n')
f.writelines(self.ae.format_atom_energies(
'atom_energies' if self.level_of_theory is None else self.level_of_theory))
if self.write_to_database:
if self.level_of_theory is None:
raise Exception('Level of theory is required for writing to database')
try:
self.ae.write_to_database(self.level_of_theory, overwrite=self.overwrite)
except ValueError as e:
logging.warning('Could not write atom energies to database. Captured error:')
logging.warning(str(e))
class AE:
ref_data_src = 'CCCBDB' ref_data = None
def __init__(self, species_energies: Dict[str, float]):
self.species_energies = species_energies self.atom_energies = None
self.confidence_intervals = None
for lbl in SPECIES_LABELS:
if lbl not in self.species_energies:
logging.warning(f'{lbl} missing from provided species energies!')
@classmethod
def _load_refdata(cls):
if cls.ref_data is None:
logging.info('Loading reference database')
db = ReferenceDatabase()
db.load()
cls.ref_data = {lbl: spc for lbl, spc in zip(SPECIES_LABELS, db.get_species_from_label(SPECIES_LABELS))}
def fit(self):
self._load_refdata()
mols = [
Molecule().from_adjacency_list(
self.ref_data[lbl].adjacency_list,
raise_atomtype_exception=False,
raise_charge_exception=False
) for lbl in self.species_energies
]
atom_counts = [Counter(atom.element.symbol for atom in mol.atoms) for mol in mols]
elements = sorted({element for ac in atom_counts for element in ac}, key=lambda s: get_element(s).number)
x = np.array([[ac[element] for element in elements] for ac in atom_counts])
atomization_energies = np.array([
self.ref_data[lbl].reference_data[self.ref_data_src].atomization_energy.value_si
/ constants.E_h / constants.Na for lbl in self.species_energies
])
zpes = np.array([
self.ref_data[lbl].reference_data[self.ref_data_src].zpe.value_si
/ constants.E_h / constants.Na for lbl in self.species_energies
])
elec_energies = np.array(list(self.species_energies.values())) y = atomization_energies + elec_energies + zpes
w = np.linalg.solve(x.T @ x, x.T @ y)
self.atom_energies = dict(zip(elements, w))
n = len(y) k = len(w) ypred = x @ w
sigma2 = np.sum((y - ypred)**2) / (n - k - 1) cov = sigma2 * np.linalg.inv(x.T @ x) se = np.sqrt(np.diag(cov)) alpha = 0.05 tdist = distributions.t.ppf(1 - alpha/2, n - k - 1) ci = tdist * se self.confidence_intervals = dict(zip(elements, ci))
def write_to_database(self, key: Hashable, overwrite: bool = False, alternate_path: str = None):
if self.atom_energies is None:
raise ValueError('No atom energies available for writing')
data_path = data.quantum_corrections_path
with open(data_path) as f:
lines = f.readlines()
ae_formatted = self.format_atom_energies(key, indent=True)
for i, line in enumerate(lines):
if 'atom_energies' in line:
if key in data.atom_energies:
if overwrite:
del_idx_start = del_idx_end = None
for j, line2 in enumerate(lines[i:]):
if repr(key) in line2:
del_idx_start = i + j
del_idx_end = None
elif line2.rstrip() == ' },': del_idx_end = i + j + 1
if del_idx_start is not None and del_idx_end is not None:
if (lines[del_idx_start - 1].lstrip().startswith(' or lines[del_idx_end + 1].lstrip().startswith(' logging.warning('There may be left over comments from previous atom energies')
lines[del_idx_start:del_idx_end] = ae_formatted
break
else:
raise ValueError(f'{key} already exists. Set `overwrite` to True.')
else:
lines[(i+1):(i+1)] = ['\n'] + ae_formatted
break
with open(data_path if alternate_path is None else alternate_path, 'w') as f:
f.writelines(lines)
# Reload data to update atom energy dictionary
if alternate_path is None:
importlib.reload(data)
def format_atom_energies(self, key: Hashable, indent: bool = False) -> List[str]:
ae_formatted = json.dumps(self.atom_energies, indent=4).replace('"', "'").split('\n')
ae_formatted[0] = f'"{key}": ' + ae_formatted[0]
ae_formatted[-1] += ','
ae_formatted = [e + '\n' for e in ae_formatted]
if indent:
ae_formatted = [' ' + e for e in ae_formatted]
return ae_formatted
| true
| true
|
f70ae202c9bbc57106e38b3a87518d56915eb222
| 1,997
|
py
|
Python
|
guillotina/tests/test_commands.py
|
diefenbach/guillotina
|
a8c7247fca8294752901f643b35c5ed1c5dee76d
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/tests/test_commands.py
|
diefenbach/guillotina
|
a8c7247fca8294752901f643b35c5ed1c5dee76d
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/tests/test_commands.py
|
diefenbach/guillotina
|
a8c7247fca8294752901f643b35c5ed1c5dee76d
|
[
"BSD-2-Clause"
] | null | null | null |
import json
import os
from tempfile import mkstemp
import pytest
from guillotina import testing
from guillotina.commands import get_settings
from guillotina.commands.run import RunCommand
DATABASE = os.environ.get('DATABASE', 'DUMMY')
def test_run_command(command_arguments):
_, filepath = mkstemp(suffix='.py')
_, filepath2 = mkstemp()
with open(filepath, 'w') as fi:
fi.write(f'''
async def run(app):
with open("{filepath2}", 'w') as fi:
fi.write("foobar")
''')
command_arguments.script = filepath
command = RunCommand(command_arguments)
settings = testing.get_settings()
command.run_command(settings=settings)
with open(filepath2) as fi:
assert fi.read() == 'foobar'
@pytest.mark.skipif(DATABASE != 'postgres', reason="Cockroach does not have cascade support")
def test_run_command_with_container(command_arguments, container_command):
_, filepath = mkstemp(suffix='.py')
_, filepath2 = mkstemp()
with open(filepath, 'w') as fi:
fi.write(f'''
async def run(container):
with open("{filepath2}", 'w') as fi:
fi.write('foobar')
''')
command_arguments.script = filepath
command = RunCommand(command_arguments)
command.run_command(settings=container_command['settings'])
with open(filepath2) as fi:
assert fi.read() == 'foobar'
def test_get_settings():
settings = get_settings('doesnotexist.json', [
'foobar=foobar',
'foo.bar=foobar'
])
assert settings['foobar'] == 'foobar'
assert settings['foo']['bar'] == 'foobar'
def test_get_settings_with_environment_variables():
os.environ.update({
'G_foobar': 'foobar',
'G_foo__bar': 'foobar',
'G_foo__bar1__bar2': json.dumps({
'foo': 'bar'
})
})
settings = get_settings('doesnotexist.json')
assert settings['foobar'] == 'foobar'
assert settings['foo']['bar'] == 'foobar'
assert settings['foo']['bar1']['bar2'] == {'foo': 'bar'}
| 28.126761
| 93
| 0.656485
|
import json
import os
from tempfile import mkstemp
import pytest
from guillotina import testing
from guillotina.commands import get_settings
from guillotina.commands.run import RunCommand
DATABASE = os.environ.get('DATABASE', 'DUMMY')
def test_run_command(command_arguments):
_, filepath = mkstemp(suffix='.py')
_, filepath2 = mkstemp()
with open(filepath, 'w') as fi:
fi.write(f'''
async def run(app):
with open("{filepath2}", 'w') as fi:
fi.write("foobar")
''')
command_arguments.script = filepath
command = RunCommand(command_arguments)
settings = testing.get_settings()
command.run_command(settings=settings)
with open(filepath2) as fi:
assert fi.read() == 'foobar'
@pytest.mark.skipif(DATABASE != 'postgres', reason="Cockroach does not have cascade support")
def test_run_command_with_container(command_arguments, container_command):
_, filepath = mkstemp(suffix='.py')
_, filepath2 = mkstemp()
with open(filepath, 'w') as fi:
fi.write(f'''
async def run(container):
with open("{filepath2}", 'w') as fi:
fi.write('foobar')
''')
command_arguments.script = filepath
command = RunCommand(command_arguments)
command.run_command(settings=container_command['settings'])
with open(filepath2) as fi:
assert fi.read() == 'foobar'
def test_get_settings():
settings = get_settings('doesnotexist.json', [
'foobar=foobar',
'foo.bar=foobar'
])
assert settings['foobar'] == 'foobar'
assert settings['foo']['bar'] == 'foobar'
def test_get_settings_with_environment_variables():
os.environ.update({
'G_foobar': 'foobar',
'G_foo__bar': 'foobar',
'G_foo__bar1__bar2': json.dumps({
'foo': 'bar'
})
})
settings = get_settings('doesnotexist.json')
assert settings['foobar'] == 'foobar'
assert settings['foo']['bar'] == 'foobar'
assert settings['foo']['bar1']['bar2'] == {'foo': 'bar'}
| true
| true
|
f70ae20e0c6f0ebe03040dfd7db2eca4e293191c
| 37,560
|
py
|
Python
|
test/test_adaptor_pytorch.py
|
intel/lp-opt-tool
|
130eefa3586b38df6c0ff78cc8807ae273f6a63f
|
[
"Apache-2.0"
] | 52
|
2020-08-04T04:31:48.000Z
|
2020-11-29T02:34:32.000Z
|
test/test_adaptor_pytorch.py
|
intel/lp-opt-tool
|
130eefa3586b38df6c0ff78cc8807ae273f6a63f
|
[
"Apache-2.0"
] | null | null | null |
test/test_adaptor_pytorch.py
|
intel/lp-opt-tool
|
130eefa3586b38df6c0ff78cc8807ae273f6a63f
|
[
"Apache-2.0"
] | 7
|
2020-08-21T01:08:55.000Z
|
2020-11-29T03:36:55.000Z
|
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.quantization import QuantStub, DeQuantStub
import torchvision
import unittest
import os
from neural_compressor.adaptor import FRAMEWORKS
from neural_compressor.model import MODELS
from neural_compressor.adaptor.pytorch import PyTorchVersionMode
import neural_compressor.adaptor.pytorch as nc_torch
from neural_compressor.experimental import Quantization, common
from neural_compressor.conf.config import Quantization_Conf
from neural_compressor.utils.pytorch import load
from neural_compressor.utils.utility import recover
import shutil
import copy
import numpy as np
import yaml
try:
try:
import intel_pytorch_extension as ipex
except:
import intel_extension_for_pytorch as ipex
TEST_IPEX = True
except:
TEST_IPEX = False
PT_VERSION = nc_torch.get_torch_version()
if PT_VERSION >= PyTorchVersionMode.PT18.value:
FX_MODE = True
else:
FX_MODE = False
fake_dyn_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
approach: post_training_dynamic_quant
op_wise: {
'decoder': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_ptq_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 1
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_ptq_yaml_for_fx = '''
model:
name: imagenet
framework: pytorch_fx
quantization:
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'default_qconfig': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_qat_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
approach: quant_aware_training
train:
end_epoch: 1
iteration: 1
optimizer:
SGD:
learning_rate: 0.0001
criterion:
CrossEntropyLoss:
reduction: mean
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
def build_pytorch_yaml():
with open('ptq_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_ptq_yaml)
with open('dynamic_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_dyn_yaml)
with open('qat_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_qat_yaml)
def build_pytorch_fx_yaml():
if PT_VERSION >= PyTorchVersionMode.PT19.value:
fake_fx_ptq_yaml = fake_ptq_yaml_for_fx
else:
fake_fx_ptq_yaml = fake_ptq_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_ptq_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_ptq_yaml)
fake_fx_dyn_yaml = fake_dyn_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_dynamic_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_dyn_yaml)
fake_fx_qat_yaml = fake_qat_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_qat_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_qat_yaml)
def build_ipex_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch_ipex
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
with open('ipex_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
def build_dump_tensors_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
tensorboard: true
'''
with open('dump_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(3, 1, 1)
self.linear = nn.Linear(224 * 224, 5)
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = x.view(1, -1)
x = self.linear(x)
x = self.dequant(x)
return x
class FP32Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
times = x.size(1)
if times == 1:
return x + x
return x
class DynamicModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
if x is not None:
x = self.conv(x)
return x
class SubModel(torch.nn.Module):
def __init__(self, bypass=True):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(1, 1, 1)
self.conv1 = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
self.fp32 = FP32Model()
self.norm = nn.LayerNorm([1, 224, 224])
self.dequant = DeQuantStub()
self.bypass = bypass
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.quant(x)
x = self.relu(x)
x = self.conv1(x)
x = self.dequant(x)
if not self.bypass:
x = self.fp32(x)
x = self.norm(x)
return x
class PartialQuantModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(3, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.conv1 = nn.Conv2d(1, 1, 1)
self.bn1 = nn.BatchNorm2d(1)
self.conv2 = nn.Conv2d(1, 1, 1)
self.linear = nn.Linear(224 * 224, 1)
self.dequant = DeQuantStub()
self.sub = SubModel(bypass=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.sub(x)
x = self.quant(x)
x = self.conv2(x)
x = x.view(1, -1)
x = self.linear(x)
x = self.dequant(x)
return x
class DynamicControlModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.linear = nn.Linear(224 * 224, 1)
self.sub = SubModel()
self.fp32 = FP32Model()
self.dyn = DynamicModel()
def forward(self, x):
x = self.conv(x)
x = self.dyn(x)
x = self.bn(x)
x = self.sub(x)
x = self.fp32(x)
x = x.view(1, -1)
x = self.linear(x)
return x
def eval_func(model):
# switch to evaluate mode
model.eval()
with torch.no_grad():
input = torch.randn(1, 3, 224, 224)
# compute output
output = model(input)
return 0.0
def q_func(model):
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
# switch to evaluate mode
model.train()
input = torch.randn(1, 3, 224, 224)
# compute output
output = model(input)
loss = output.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return model
class TestPytorchAdaptor(unittest.TestCase):
framework_specific_info = {"device": "cpu",
"approach": "post_training_static_quant",
"random_seed": 1234,
"q_dataloader": None,
"workspace_path": "./"}
framework = "pytorch"
adaptor = FRAMEWORKS[framework](framework_specific_info)
model = torchvision.models.quantization.resnet18()
nc_model = MODELS['pytorch'](model)
@classmethod
def setUpClass(self):
build_pytorch_yaml()
build_dump_tensors_yaml()
@classmethod
def tearDownClass(self):
os.remove('ptq_yaml.yaml')
os.remove('dynamic_yaml.yaml')
os.remove('qat_yaml.yaml')
os.remove('dump_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_get_all_weight_name(self):
assert len(list(self.nc_model.get_all_weight_names())) == 62
def test_get_weight(self):
for name, param in self.model.named_parameters():
if name == "layer4.1.conv2.weight":
param.data.fill_(0.0)
if name == "fc.bias":
param.data.fill_(0.1)
assert int(torch.sum(self.nc_model.get_weight("layer4.1.conv2.weight"))) == 0
assert torch.allclose(
torch.sum(
self.nc_model.get_weight("fc.bias")),
torch.tensor(100.))
def test_get_input(self):
model = MODELS['pytorch'](torchvision.models.quantization.resnet18())
model.model.eval().fuse_model()
model.register_forward_pre_hook()
rand_input = torch.rand(100, 3, 224, 224).float()
model.model(rand_input)
assert torch.equal(model.get_inputs('x'), rand_input)
model.remove_hooks()
def test_update_weights(self):
self.nc_model.update_weights('fc.bias', torch.zeros([1000]))
assert int(torch.sum(self.nc_model.get_weight("fc.bias"))) == 0
def test_get_gradient(self):
with self.assertRaises(AssertionError):
self.nc_model.get_gradient('fc.bias')
for name, tensor in self.nc_model._model.named_parameters():
if name == 'fc.bias':
tensor.grad = torch.zeros_like(tensor)
break
assert torch.equal(torch.Tensor(self.nc_model.get_gradient('fc.bias')), torch.zeros_like(tensor))
rand_input = torch.rand(100, 3, 224, 224).float()
rand_input.grad = torch.ones_like(rand_input)
assert torch.equal(torch.Tensor(self.nc_model.get_gradient(rand_input)),
torch.ones_like(rand_input))
def test_report_sparsity(self):
df, total_sparsity = self.nc_model.report_sparsity()
self.assertTrue(total_sparsity > 0)
self.assertTrue(len(df) == 22)
def test_quantization_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
q_model.save('./saved')
# Load configure and weights by neural_compressor.utils
saved_model = load("./saved", model)
eval_func(saved_model)
# recover int8 model from history
history_file = './saved/history.snapshot'
model_recover = recover(model, history_file, 0)
eval_func(model_recover)
self.assertEqual(type(saved_model.conv), \
type(model_recover.conv))
shutil.rmtree('./saved', ignore_errors=True)
from neural_compressor.experimental import Benchmark
evaluator = Benchmark('ptq_yaml.yaml')
# Load configure and weights by neural_compressor.model
evaluator.model = model
evaluator.b_dataloader = common.DataLoader(dataset)
evaluator()
evaluator.model = model
evaluator()
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = copy.deepcopy(self.model)
if fake_yaml == 'ptq_yaml.yaml':
model.eval().fuse_model()
conf = Quantization_Conf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (100, 3, 224, 224))
quantizer.model = model
if fake_yaml == 'qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights by neural_compressor.utils
saved_model = load("./saved", model)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_quantization_new_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/model.pt')
# Load configure and weights by neural_compressor.utils
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.load_quantized_state_dict(torch.load('./saved/model.pt'))
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), \
type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
def test_non_quant_module(self):
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = PartialQuantModel()
conf = Quantization_Conf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (1, 3, 224, 224))
non_quant_dict = {'non_quant_module_name': ['conv', 'conv1', 'sub.conv'], \
'non_quant_module_class': ['BatchNorm2d', 'FP32Model']}
quantizer.model = common.Model(model, **non_quant_dict)
if fake_yaml == 'qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
q_model.save('./saved')
saved_model = load("./saved", model, **non_quant_dict)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_workspace_path(self):
model = M()
quantizer = Quantization('ptq_yaml.yaml')
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/best_model.pt')
# Load configure and weights by workspace_path
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.workspace_path = './saved'
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), \
type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
def test_get_graph_info(self):
from neural_compressor.model.torch_model import PyTorchModel
model = PyTorchModel(self.model)
op_map = model.graph_info
self.assertTrue(op_map['conv1'] == 'Conv2d')
def test_tensorboard(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('dump_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model.model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
quantizer.fit()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.eval_func = None
quantizer.fit()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
def test_tensor_dump_and_set(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('ptq_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
dataloader = common.DataLoader(dataset)
dataloader = common._generate_common_dataloader(dataloader, 'pytorch')
quantizer.eval_dataloader = dataloader
quantizer.calib_dataloader = dataloader
quantizer.model = model.model
q_model = quantizer.fit()
quantizer.strategy.adaptor.inspect_tensor(
model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],
iteration_list=[1, 2], inspect_type='all', save_to_disk=True)
load_array = lambda *a, **k: np.load(*a, allow_pickle=True, **k)
a = load_array('saved/dump_tensor/activation_iter1.npz')
w = load_array('saved/dump_tensor/weight.npz')
if PT_VERSION >= PyTorchVersionMode.PT18.value:
self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==
a['conv1.0'].item()['conv1.0.output0'].shape[1])
else:
self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==
a['conv1.0'].item()['conv1.1.output0'].shape[1])
data = np.random.random(w['conv1.0'].item()['conv1.0.weight'].shape).astype(np.float32)
quantizer.strategy.adaptor.set_tensor(q_model, {'conv1.0.weight': data})
changed_tensor = q_model.get_weight('conv1.weight')
scales = changed_tensor.q_per_channel_scales()
changed_tensor_fp32 = torch.dequantize(changed_tensor)
self.assertTrue(np.allclose(data, changed_tensor_fp32.numpy(), atol=2 / np.min(scales.numpy())))
quantizer.strategy.adaptor.inspect_tensor(
q_model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],
iteration_list=[1, 2], inspect_type='all', save_to_disk=False)
def test_get_graph_info(self):
from neural_compressor.adaptor.pytorch import get_ops_recursively
model = copy.deepcopy(self.model)
op_map = {}
get_ops_recursively(model, '', op_map)
self.assertTrue(op_map['conv1'] == 'Conv2d')
def test_forward_wrapper(self):
vision_model = torchvision.models.resnet18()
class dummymodel(torch.nn.Module):
def __init__(self, model):
super(dummymodel, self).__init__()
self._model = model
def forward(self,input=None):
return self._model(input)
data = [[{'input': torch.rand(3,224,224)}, torch.ones(1,1)], ]
# dataloader.batch_size=100
dataloader = common.DataLoader(data, batch_size=1)
quantizer = Quantization('dynamic_yaml.yaml')
model = dummymodel(vision_model)
quantizer.model = model
quantizer.calib_dataloader = dataloader
quantizer.eval_dataloader = dataloader
quantizer.fit()
def test_floatfunctions_fallback(self):
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super(ModelWithFunctionals, self).__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
self.my_scalar_add = nnq.FloatFunctional()
self.mymul = nnq.FloatFunctional()
self.my_scalar_mul = nnq.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
y = self.mycat.cat([x, x, x])
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
w = self.my_scalar_add.add_scalar(w, -0.5)
w = self.mymul.mul(w, w)
w = self.my_scalar_mul.mul_scalar(w, 0.5)
w = self.dequant(w)
return w
model = ModelWithFunctionals()
model = MODELS['pytorch'](model)
x = torch.rand(10, 1, dtype=torch.float)
y = model.model(x)
fallback_ops = []
q_capability = self.adaptor.query_fw_capability(model)
for k, v in q_capability["opwise"].items():
if k[0] != "quant" and k[0] != "dequant":
fallback_ops.append(k[0])
model.model.qconfig = torch.quantization.default_qconfig
model.model.quant.qconfig = torch.quantization.default_qconfig
if PT_VERSION >= PyTorchVersionMode.PT18.value:
model.model.dequant.qconfig = torch.quantization.default_qconfig
nc_torch._fallback_quantizable_ops_recursively(
model.model, '', fallback_ops, op_qcfgs={})
torch.quantization.add_observer_(model.model)
model.model(x)
torch.quantization.convert(model.model, self.adaptor.q_mapping, inplace=True)
qy = model.model(x)
tol = {'atol': 1e-01, 'rtol': 1e-03}
self.assertTrue(np.allclose(y, qy, **tol))
@unittest.skipIf(not TEST_IPEX, "Unsupport Intel PyTorch Extension")
class TestPytorchIPEXAdaptor(unittest.TestCase):
@classmethod
def setUpClass(self):
build_ipex_yaml()
@classmethod
def tearDownClass(self):
os.remove('ipex_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_tuning_ipex(self):
from neural_compressor.experimental import Quantization
model = M()
quantizer = Quantization('ipex_yaml.yaml')
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
nc_model = quantizer.fit()
nc_model.save('./saved')
try:
script_model = torch.jit.script(model.to(ipex.DEVICE))
except:
script_model = torch.jit.trace(model.to(ipex.DEVICE), torch.randn(10, 3, 224, 224).to(ipex.DEVICE))
from neural_compressor.experimental import Benchmark
evaluator = Benchmark('ipex_yaml.yaml')
evaluator.model = script_model
evaluator.b_dataloader = common.DataLoader(dataset)
results = evaluator()
@unittest.skipIf(not FX_MODE, "Unsupport Fx Mode with PyTorch Version Below 1.8")
class TestPytorchFXAdaptor(unittest.TestCase):
@classmethod
def setUpClass(self):
build_pytorch_fx_yaml()
@classmethod
def tearDownClass(self):
os.remove('fx_ptq_yaml.yaml')
os.remove('fx_dynamic_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_fx_quant(self):
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = torchvision.models.resnet18()
# run fx_quant in neural_compressor and save the quantized GraphModule
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)
quantizer.eval_func = eval_func
if fake_yaml == 'fx_qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights with neural_compressor.utils
model_fx = load('./saved', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
# recover int8 model with only tune_cfg
history_file = './saved/history.snapshot'
model_fx_recover = recover(model_origin, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.code, model_fx_recover.code)
shutil.rmtree('./saved', ignore_errors=True)
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = M()
# run fx_quant in neural_compressor and save the quantized GraphModule
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights with neural_compressor.utils
model_fx = load('./saved', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
shutil.rmtree('./saved', ignore_errors=True)
@unittest.skipIf(PT_VERSION < PyTorchVersionMode.PT19.value,
"Please use PyTroch 1.9 or higher version for dynamic quantization with pytorch_fx backend")
def test_fx_dynamic_quant(self):
# Model Definition
class LSTMModel(nn.Module):
'''Container module with an encoder, a recurrent module, and a decoder.'''
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5):
super(LSTMModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
return decoded, hidden
model = LSTMModel(
ntoken = 10,
ninp = 512,
nhid = 256,
nlayers = 5,
)
# run fx_quant in neural_compressor and save the quantized GraphModule
model.eval()
quantizer = Quantization('fx_dynamic_yaml.yaml')
quantizer.model = common.Model(model,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights by neural_compressor.utils
model_fx = load("./saved", model,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
# recover int8 model with only tune_cfg
history_file = './saved/history.snapshot'
model_fx_recover = recover(model, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.code, model_fx_recover.code)
shutil.rmtree('./saved', ignore_errors=True)
def test_fx_sub_module_quant(self):
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = DynamicControlModel()
# run fx_quant in neural_compressor and save the quantized GraphModule
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset('dummy', (1, 3, 224, 224), label=True)
quantizer.eval_func = eval_func
if fake_yaml == 'fx_qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights with neural_compressor.utils
model_fx = load('./saved/best_model.pt', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx.sub, torch.fx.graph_module.GraphModule))
# recover int8 model with only tune_cfg
history_file = './saved/history.snapshot'
model_fx_recover = recover(model_origin, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.sub.code, model_fx_recover.sub.code)
shutil.rmtree('./saved', ignore_errors=True)
if __name__ == "__main__":
unittest.main()
| 38.13198
| 125
| 0.557535
|
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.quantization import QuantStub, DeQuantStub
import torchvision
import unittest
import os
from neural_compressor.adaptor import FRAMEWORKS
from neural_compressor.model import MODELS
from neural_compressor.adaptor.pytorch import PyTorchVersionMode
import neural_compressor.adaptor.pytorch as nc_torch
from neural_compressor.experimental import Quantization, common
from neural_compressor.conf.config import Quantization_Conf
from neural_compressor.utils.pytorch import load
from neural_compressor.utils.utility import recover
import shutil
import copy
import numpy as np
import yaml
try:
try:
import intel_pytorch_extension as ipex
except:
import intel_extension_for_pytorch as ipex
TEST_IPEX = True
except:
TEST_IPEX = False
PT_VERSION = nc_torch.get_torch_version()
if PT_VERSION >= PyTorchVersionMode.PT18.value:
FX_MODE = True
else:
FX_MODE = False
fake_dyn_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
approach: post_training_dynamic_quant
op_wise: {
'decoder': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_ptq_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 1
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_ptq_yaml_for_fx = '''
model:
name: imagenet
framework: pytorch_fx
quantization:
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'default_qconfig': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_qat_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
approach: quant_aware_training
train:
end_epoch: 1
iteration: 1
optimizer:
SGD:
learning_rate: 0.0001
criterion:
CrossEntropyLoss:
reduction: mean
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
def build_pytorch_yaml():
with open('ptq_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_ptq_yaml)
with open('dynamic_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_dyn_yaml)
with open('qat_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_qat_yaml)
def build_pytorch_fx_yaml():
if PT_VERSION >= PyTorchVersionMode.PT19.value:
fake_fx_ptq_yaml = fake_ptq_yaml_for_fx
else:
fake_fx_ptq_yaml = fake_ptq_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_ptq_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_ptq_yaml)
fake_fx_dyn_yaml = fake_dyn_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_dynamic_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_dyn_yaml)
fake_fx_qat_yaml = fake_qat_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_qat_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_qat_yaml)
def build_ipex_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch_ipex
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
with open('ipex_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
def build_dump_tensors_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
tensorboard: true
'''
with open('dump_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(3, 1, 1)
self.linear = nn.Linear(224 * 224, 5)
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = x.view(1, -1)
x = self.linear(x)
x = self.dequant(x)
return x
class FP32Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
times = x.size(1)
if times == 1:
return x + x
return x
class DynamicModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
if x is not None:
x = self.conv(x)
return x
class SubModel(torch.nn.Module):
def __init__(self, bypass=True):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(1, 1, 1)
self.conv1 = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
self.fp32 = FP32Model()
self.norm = nn.LayerNorm([1, 224, 224])
self.dequant = DeQuantStub()
self.bypass = bypass
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.quant(x)
x = self.relu(x)
x = self.conv1(x)
x = self.dequant(x)
if not self.bypass:
x = self.fp32(x)
x = self.norm(x)
return x
class PartialQuantModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(3, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.conv1 = nn.Conv2d(1, 1, 1)
self.bn1 = nn.BatchNorm2d(1)
self.conv2 = nn.Conv2d(1, 1, 1)
self.linear = nn.Linear(224 * 224, 1)
self.dequant = DeQuantStub()
self.sub = SubModel(bypass=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.sub(x)
x = self.quant(x)
x = self.conv2(x)
x = x.view(1, -1)
x = self.linear(x)
x = self.dequant(x)
return x
class DynamicControlModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.linear = nn.Linear(224 * 224, 1)
self.sub = SubModel()
self.fp32 = FP32Model()
self.dyn = DynamicModel()
def forward(self, x):
x = self.conv(x)
x = self.dyn(x)
x = self.bn(x)
x = self.sub(x)
x = self.fp32(x)
x = x.view(1, -1)
x = self.linear(x)
return x
def eval_func(model):
model.eval()
with torch.no_grad():
input = torch.randn(1, 3, 224, 224)
output = model(input)
return 0.0
def q_func(model):
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
model.train()
input = torch.randn(1, 3, 224, 224)
output = model(input)
loss = output.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return model
class TestPytorchAdaptor(unittest.TestCase):
framework_specific_info = {"device": "cpu",
"approach": "post_training_static_quant",
"random_seed": 1234,
"q_dataloader": None,
"workspace_path": "./"}
framework = "pytorch"
adaptor = FRAMEWORKS[framework](framework_specific_info)
model = torchvision.models.quantization.resnet18()
nc_model = MODELS['pytorch'](model)
@classmethod
def setUpClass(self):
build_pytorch_yaml()
build_dump_tensors_yaml()
@classmethod
def tearDownClass(self):
os.remove('ptq_yaml.yaml')
os.remove('dynamic_yaml.yaml')
os.remove('qat_yaml.yaml')
os.remove('dump_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_get_all_weight_name(self):
assert len(list(self.nc_model.get_all_weight_names())) == 62
def test_get_weight(self):
for name, param in self.model.named_parameters():
if name == "layer4.1.conv2.weight":
param.data.fill_(0.0)
if name == "fc.bias":
param.data.fill_(0.1)
assert int(torch.sum(self.nc_model.get_weight("layer4.1.conv2.weight"))) == 0
assert torch.allclose(
torch.sum(
self.nc_model.get_weight("fc.bias")),
torch.tensor(100.))
def test_get_input(self):
model = MODELS['pytorch'](torchvision.models.quantization.resnet18())
model.model.eval().fuse_model()
model.register_forward_pre_hook()
rand_input = torch.rand(100, 3, 224, 224).float()
model.model(rand_input)
assert torch.equal(model.get_inputs('x'), rand_input)
model.remove_hooks()
def test_update_weights(self):
self.nc_model.update_weights('fc.bias', torch.zeros([1000]))
assert int(torch.sum(self.nc_model.get_weight("fc.bias"))) == 0
def test_get_gradient(self):
with self.assertRaises(AssertionError):
self.nc_model.get_gradient('fc.bias')
for name, tensor in self.nc_model._model.named_parameters():
if name == 'fc.bias':
tensor.grad = torch.zeros_like(tensor)
break
assert torch.equal(torch.Tensor(self.nc_model.get_gradient('fc.bias')), torch.zeros_like(tensor))
rand_input = torch.rand(100, 3, 224, 224).float()
rand_input.grad = torch.ones_like(rand_input)
assert torch.equal(torch.Tensor(self.nc_model.get_gradient(rand_input)),
torch.ones_like(rand_input))
def test_report_sparsity(self):
df, total_sparsity = self.nc_model.report_sparsity()
self.assertTrue(total_sparsity > 0)
self.assertTrue(len(df) == 22)
def test_quantization_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
q_model.save('./saved')
saved_model = load("./saved", model)
eval_func(saved_model)
history_file = './saved/history.snapshot'
model_recover = recover(model, history_file, 0)
eval_func(model_recover)
self.assertEqual(type(saved_model.conv), \
type(model_recover.conv))
shutil.rmtree('./saved', ignore_errors=True)
from neural_compressor.experimental import Benchmark
evaluator = Benchmark('ptq_yaml.yaml')
evaluator.model = model
evaluator.b_dataloader = common.DataLoader(dataset)
evaluator()
evaluator.model = model
evaluator()
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = copy.deepcopy(self.model)
if fake_yaml == 'ptq_yaml.yaml':
model.eval().fuse_model()
conf = Quantization_Conf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (100, 3, 224, 224))
quantizer.model = model
if fake_yaml == 'qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
q_model.save('./saved')
saved_model = load("./saved", model)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_quantization_new_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/model.pt')
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.load_quantized_state_dict(torch.load('./saved/model.pt'))
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), \
type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
def test_non_quant_module(self):
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = PartialQuantModel()
conf = Quantization_Conf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (1, 3, 224, 224))
non_quant_dict = {'non_quant_module_name': ['conv', 'conv1', 'sub.conv'], \
'non_quant_module_class': ['BatchNorm2d', 'FP32Model']}
quantizer.model = common.Model(model, **non_quant_dict)
if fake_yaml == 'qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
q_model.save('./saved')
saved_model = load("./saved", model, **non_quant_dict)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_workspace_path(self):
model = M()
quantizer = Quantization('ptq_yaml.yaml')
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/best_model.pt')
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.workspace_path = './saved'
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), \
type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
def test_get_graph_info(self):
from neural_compressor.model.torch_model import PyTorchModel
model = PyTorchModel(self.model)
op_map = model.graph_info
self.assertTrue(op_map['conv1'] == 'Conv2d')
def test_tensorboard(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('dump_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model.model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
quantizer.fit()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.eval_func = None
quantizer.fit()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
def test_tensor_dump_and_set(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('ptq_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
dataloader = common.DataLoader(dataset)
dataloader = common._generate_common_dataloader(dataloader, 'pytorch')
quantizer.eval_dataloader = dataloader
quantizer.calib_dataloader = dataloader
quantizer.model = model.model
q_model = quantizer.fit()
quantizer.strategy.adaptor.inspect_tensor(
model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],
iteration_list=[1, 2], inspect_type='all', save_to_disk=True)
load_array = lambda *a, **k: np.load(*a, allow_pickle=True, **k)
a = load_array('saved/dump_tensor/activation_iter1.npz')
w = load_array('saved/dump_tensor/weight.npz')
if PT_VERSION >= PyTorchVersionMode.PT18.value:
self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==
a['conv1.0'].item()['conv1.0.output0'].shape[1])
else:
self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==
a['conv1.0'].item()['conv1.1.output0'].shape[1])
data = np.random.random(w['conv1.0'].item()['conv1.0.weight'].shape).astype(np.float32)
quantizer.strategy.adaptor.set_tensor(q_model, {'conv1.0.weight': data})
changed_tensor = q_model.get_weight('conv1.weight')
scales = changed_tensor.q_per_channel_scales()
changed_tensor_fp32 = torch.dequantize(changed_tensor)
self.assertTrue(np.allclose(data, changed_tensor_fp32.numpy(), atol=2 / np.min(scales.numpy())))
quantizer.strategy.adaptor.inspect_tensor(
q_model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],
iteration_list=[1, 2], inspect_type='all', save_to_disk=False)
def test_get_graph_info(self):
from neural_compressor.adaptor.pytorch import get_ops_recursively
model = copy.deepcopy(self.model)
op_map = {}
get_ops_recursively(model, '', op_map)
self.assertTrue(op_map['conv1'] == 'Conv2d')
def test_forward_wrapper(self):
vision_model = torchvision.models.resnet18()
class dummymodel(torch.nn.Module):
def __init__(self, model):
super(dummymodel, self).__init__()
self._model = model
def forward(self,input=None):
return self._model(input)
data = [[{'input': torch.rand(3,224,224)}, torch.ones(1,1)], ]
dataloader = common.DataLoader(data, batch_size=1)
quantizer = Quantization('dynamic_yaml.yaml')
model = dummymodel(vision_model)
quantizer.model = model
quantizer.calib_dataloader = dataloader
quantizer.eval_dataloader = dataloader
quantizer.fit()
def test_floatfunctions_fallback(self):
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super(ModelWithFunctionals, self).__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
self.my_scalar_add = nnq.FloatFunctional()
self.mymul = nnq.FloatFunctional()
self.my_scalar_mul = nnq.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
y = self.mycat.cat([x, x, x])
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
w = self.my_scalar_add.add_scalar(w, -0.5)
w = self.mymul.mul(w, w)
w = self.my_scalar_mul.mul_scalar(w, 0.5)
w = self.dequant(w)
return w
model = ModelWithFunctionals()
model = MODELS['pytorch'](model)
x = torch.rand(10, 1, dtype=torch.float)
y = model.model(x)
fallback_ops = []
q_capability = self.adaptor.query_fw_capability(model)
for k, v in q_capability["opwise"].items():
if k[0] != "quant" and k[0] != "dequant":
fallback_ops.append(k[0])
model.model.qconfig = torch.quantization.default_qconfig
model.model.quant.qconfig = torch.quantization.default_qconfig
if PT_VERSION >= PyTorchVersionMode.PT18.value:
model.model.dequant.qconfig = torch.quantization.default_qconfig
nc_torch._fallback_quantizable_ops_recursively(
model.model, '', fallback_ops, op_qcfgs={})
torch.quantization.add_observer_(model.model)
model.model(x)
torch.quantization.convert(model.model, self.adaptor.q_mapping, inplace=True)
qy = model.model(x)
tol = {'atol': 1e-01, 'rtol': 1e-03}
self.assertTrue(np.allclose(y, qy, **tol))
@unittest.skipIf(not TEST_IPEX, "Unsupport Intel PyTorch Extension")
class TestPytorchIPEXAdaptor(unittest.TestCase):
@classmethod
def setUpClass(self):
build_ipex_yaml()
@classmethod
def tearDownClass(self):
os.remove('ipex_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_tuning_ipex(self):
from neural_compressor.experimental import Quantization
model = M()
quantizer = Quantization('ipex_yaml.yaml')
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
nc_model = quantizer.fit()
nc_model.save('./saved')
try:
script_model = torch.jit.script(model.to(ipex.DEVICE))
except:
script_model = torch.jit.trace(model.to(ipex.DEVICE), torch.randn(10, 3, 224, 224).to(ipex.DEVICE))
from neural_compressor.experimental import Benchmark
evaluator = Benchmark('ipex_yaml.yaml')
evaluator.model = script_model
evaluator.b_dataloader = common.DataLoader(dataset)
results = evaluator()
@unittest.skipIf(not FX_MODE, "Unsupport Fx Mode with PyTorch Version Below 1.8")
class TestPytorchFXAdaptor(unittest.TestCase):
@classmethod
def setUpClass(self):
build_pytorch_fx_yaml()
@classmethod
def tearDownClass(self):
os.remove('fx_ptq_yaml.yaml')
os.remove('fx_dynamic_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_fx_quant(self):
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = torchvision.models.resnet18()
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)
quantizer.eval_func = eval_func
if fake_yaml == 'fx_qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
model_fx = load('./saved', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
history_file = './saved/history.snapshot'
model_fx_recover = recover(model_origin, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.code, model_fx_recover.code)
shutil.rmtree('./saved', ignore_errors=True)
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
model_fx = load('./saved', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
shutil.rmtree('./saved', ignore_errors=True)
@unittest.skipIf(PT_VERSION < PyTorchVersionMode.PT19.value,
"Please use PyTroch 1.9 or higher version for dynamic quantization with pytorch_fx backend")
def test_fx_dynamic_quant(self):
class LSTMModel(nn.Module):
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5):
super(LSTMModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
return decoded, hidden
model = LSTMModel(
ntoken = 10,
ninp = 512,
nhid = 256,
nlayers = 5,
)
model.eval()
quantizer = Quantization('fx_dynamic_yaml.yaml')
quantizer.model = common.Model(model,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
model_fx = load("./saved", model,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
history_file = './saved/history.snapshot'
model_fx_recover = recover(model, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.code, model_fx_recover.code)
shutil.rmtree('./saved', ignore_errors=True)
def test_fx_sub_module_quant(self):
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = DynamicControlModel()
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset('dummy', (1, 3, 224, 224), label=True)
quantizer.eval_func = eval_func
if fake_yaml == 'fx_qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
model_fx = load('./saved/best_model.pt', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx.sub, torch.fx.graph_module.GraphModule))
history_file = './saved/history.snapshot'
model_fx_recover = recover(model_origin, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.sub.code, model_fx_recover.sub.code)
shutil.rmtree('./saved', ignore_errors=True)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f70ae3517f3d8b2963d6bc5c320c15fd5a4c04f2
| 988
|
py
|
Python
|
Apteki/migrations/0001_initial.py
|
Daneev/Django_test
|
7c0cf5ab28b3faba3cd8dfad60a3194a3eff11d6
|
[
"Apache-2.0"
] | null | null | null |
Apteki/migrations/0001_initial.py
|
Daneev/Django_test
|
7c0cf5ab28b3faba3cd8dfad60a3194a3eff11d6
|
[
"Apache-2.0"
] | null | null | null |
Apteki/migrations/0001_initial.py
|
Daneev/Django_test
|
7c0cf5ab28b3faba3cd8dfad60a3194a3eff11d6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-23 16:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lekarstv',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='Наименование')),
('price', models.IntegerField(verbose_name='цена')),
('address', models.TextField(verbose_name='Адрес аптеки')),
('photo', models.ImageField(blank=True, default='', upload_to='Lekarstv/images', verbose_name='изображение')),
],
options={
'verbose_name': 'Лекарство',
'verbose_name_plural': 'Лекарства',
},
),
]
| 31.870968
| 126
| 0.581984
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lekarstv',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='Наименование')),
('price', models.IntegerField(verbose_name='цена')),
('address', models.TextField(verbose_name='Адрес аптеки')),
('photo', models.ImageField(blank=True, default='', upload_to='Lekarstv/images', verbose_name='изображение')),
],
options={
'verbose_name': 'Лекарство',
'verbose_name_plural': 'Лекарства',
},
),
]
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.