input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
Integer, key="b"),
Column("b", Integer),
Column("c", Integer, key="a"),
)
self.assert_compile(
select([a, b, c, a, b, c]),
"SELECT a, b, c",
dialect=default.DefaultDialect(),
)
self.assert_compile(
select([bindparam("a"), bindparam("b"), bindparam("c")]),
"SELECT :a AS anon_1, :b AS anon_2, :c AS anon_3",
dialect=default.DefaultDialect(paramstyle="named"),
)
self.assert_compile(
select([bindparam("a"), bindparam("b"), bindparam("c")]),
"SELECT ? AS anon_1, ? AS anon_2, ? AS anon_3",
dialect=default.DefaultDialect(paramstyle="qmark"),
)
self.assert_compile(
select([column("a"), column("a"), column("a")]), "SELECT a, a, a"
)
s = select([bindparam("a"), bindparam("b"), bindparam("c")])
s = s.compile(dialect=default.DefaultDialect(paramstyle="qmark"))
eq_(s.positiontup, ["a", "b", "c"])
def test_nested_label_targeting(self):
"""test nested anonymous label generation.
"""
s1 = table1.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(
s5,
"SELECT anon_1.anon_2_myid AS "
"anon_1_anon_2_myid, anon_1.anon_2_name AS "
"anon_1_anon_2_name, anon_1.anon_2_descript"
"ion AS anon_1_anon_2_description FROM "
"(SELECT anon_2.myid AS anon_2_myid, "
"anon_2.name AS anon_2_name, "
"anon_2.description AS anon_2_description "
"FROM (SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable) AS anon_2) "
"AS anon_1",
)
def test_nested_label_targeting_keyed(self):
s1 = keyed.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
self.assert_compile(
s3,
"SELECT anon_1.x AS anon_1_x, "
"anon_1.y AS anon_1_y, "
"anon_1.z AS anon_1_z FROM "
"(SELECT keyed.x AS x, keyed.y "
"AS y, keyed.z AS z FROM keyed) AS anon_1",
)
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(
s5,
"SELECT anon_1.anon_2_x AS anon_1_anon_2_x, "
"anon_1.anon_2_y AS anon_1_anon_2_y, "
"anon_1.anon_2_z AS anon_1_anon_2_z "
"FROM (SELECT anon_2.x AS anon_2_x, "
"anon_2.y AS anon_2_y, "
"anon_2.z AS anon_2_z FROM "
"(SELECT keyed.x AS x, keyed.y AS y, keyed.z "
"AS z FROM keyed) AS anon_2) AS anon_1",
)
def test_exists(self):
s = select([table1.c.myid]).where(table1.c.myid == 5)
self.assert_compile(
exists(s),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)",
)
self.assert_compile(
exists(s.as_scalar()),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)",
)
self.assert_compile(
exists([table1.c.myid], table1.c.myid == 5).select(),
"SELECT EXISTS (SELECT mytable.myid FROM "
"mytable WHERE mytable.myid = :myid_1) AS anon_1",
params={"mytable_myid": 5},
)
self.assert_compile(
select([table1, exists([1], from_obj=table2)]),
"SELECT mytable.myid, mytable.name, "
"mytable.description, EXISTS (SELECT 1 "
"FROM myothertable) AS anon_1 FROM mytable",
params={},
)
self.assert_compile(
select([table1, exists([1], from_obj=table2).label("foo")]),
"SELECT mytable.myid, mytable.name, "
"mytable.description, EXISTS (SELECT 1 "
"FROM myothertable) AS foo FROM mytable",
params={},
)
self.assert_compile(
table1.select(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
self.assert_compile(
table1.select(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
self.assert_compile(
table1.select(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
).replace_selectable(table2, table2.alias()),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT * FROM myothertable AS "
"myothertable_1 WHERE myothertable_1.otheri"
"d = mytable.myid)",
)
self.assert_compile(
table1.select(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
)
.select_from(
table1.join(table2, table1.c.myid == table2.c.otherid)
)
.replace_selectable(table2, table2.alias()),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable JOIN "
"myothertable AS myothertable_1 ON "
"mytable.myid = myothertable_1.otherid "
"WHERE EXISTS (SELECT * FROM myothertable "
"AS myothertable_1 WHERE "
"myothertable_1.otherid = mytable.myid)",
)
self.assert_compile(
select(
[
or_(
exists().where(table2.c.otherid == "foo"),
exists().where(table2.c.otherid == "bar"),
)
]
),
"SELECT (EXISTS (SELECT * FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)) "
"OR (EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = :otherid_2)) AS anon_1",
)
self.assert_compile(
select([exists([1])]), "SELECT EXISTS (SELECT 1) AS anon_1"
)
self.assert_compile(
select([~exists([1])]), "SELECT NOT (EXISTS (SELECT 1)) AS anon_1"
)
self.assert_compile(
select([~(~exists([1]))]),
"SELECT NOT (NOT (EXISTS (SELECT 1))) AS anon_1",
)
def test_where_subquery(self):
s = select(
[addresses.c.street],
addresses.c.user_id == users.c.user_id,
correlate=True,
).alias("s")
# don't correlate in a FROM list
self.assert_compile(
select([users, s.c.street], from_obj=s),
"SELECT users.user_id, users.user_name, "
"users.password, s.street FROM users, "
"(SELECT addresses.street AS street FROM "
"addresses, users WHERE addresses.user_id = "
"users.user_id) AS s",
)
self.assert_compile(
table1.select(
table1.c.myid
== select([table1.c.myid], table1.c.name == "jack")
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"mytable.myid = (SELECT mytable.myid FROM "
"mytable WHERE mytable.name = :name_1)",
)
self.assert_compile(
table1.select(
table1.c.myid
== select(
[table2.c.otherid], table1.c.name == table2.c.othername
)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"mytable.myid = (SELECT "
"myothertable.otherid FROM myothertable "
"WHERE mytable.name = myothertable.othernam"
"e)",
)
self.assert_compile(
table1.select(exists([1], table2.c.otherid == table1.c.myid)),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT 1 FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
talias = table1.alias("ta")
s = subquery(
"sq2", [talias], exists([1], table2.c.otherid == talias.c.myid)
)
self.assert_compile(
select([s, table1]),
"SELECT sq2.myid, sq2.name, "
"sq2.description, mytable.myid, "
"mytable.name, mytable.description FROM "
"(SELECT ta.myid AS myid, ta.name AS name, "
"ta.description AS description FROM "
"mytable AS ta WHERE EXISTS (SELECT 1 FROM "
"myothertable WHERE myothertable.otherid = "
"ta.myid)) AS sq2, mytable",
)
# test constructing the outer query via append_column(), which
# occurs in the ORM's Query object
s = select(
[], exists([1], table2.c.otherid == table1.c.myid), from_obj=table1
)
s.append_column(table1)
self.assert_compile(
s,
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT 1 FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
def test_orderby_subquery(self):
self.assert_compile(
table1.select(
order_by=[
select(
[table2.c.otherid], table1.c.myid == table2.c.otherid
)
]
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY "
"(SELECT myothertable.otherid FROM "
"myothertable WHERE mytable.myid = "
"myothertable.otherid)",
)
self.assert_compile(
table1.select(
order_by=[
desc(
select(
[table2.c.otherid],
table1.c.myid == table2.c.otherid,
)
)
]
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY "
"(SELECT myothertable.otherid FROM "
"myothertable WHERE mytable.myid = "
"myothertable.otherid) DESC",
)
def test_scalar_select(self):
assert_raises_message(
exc.InvalidRequestError,
r"Select objects don't have a type\. Call as_scalar\(\) "
r"on this Select object to return a 'scalar' "
r"version of this Select\.",
func.coalesce,
select([table1.c.myid]),
)
s = select([table1.c.myid], correlate=False).as_scalar()
self.assert_compile(
select([table1, s]),
"SELECT mytable.myid, mytable.name, "
"mytable.description, (SELECT mytable.myid "
"FROM mytable) AS anon_1 FROM mytable",
)
s = select([table1.c.myid]).as_scalar()
self.assert_compile(
select([table2, s]),
"SELECT myothertable.otherid, "
"myothertable.othername, (SELECT "
"mytable.myid FROM mytable) AS anon_1 FROM "
"myothertable",
)
s = select([table1.c.myid]).correlate(None).as_scalar()
self.assert_compile(
select([table1, s]),
"SELECT mytable.myid, mytable.name, "
"mytable.description, (SELECT mytable.myid "
"FROM mytable) AS anon_1 FROM mytable",
)
s = select([table1.c.myid]).as_scalar()
s2 = s.where(table1.c.myid == 5)
self.assert_compile(
s2,
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)",
)
self.assert_compile(s, "(SELECT mytable.myid FROM mytable)")
# test that aliases use as_scalar() when used in an explicitly
# scalar context
s = select([table1.c.myid]).alias()
self.assert_compile(
select([table1.c.myid]).where(table1.c.myid == s),
"SELECT mytable.myid FROM mytable WHERE "
"mytable.myid = (SELECT mytable.myid FROM "
"mytable)",
)
self.assert_compile(
select([table1.c.myid]).where(s > table1.c.myid),
"SELECT mytable.myid FROM mytable WHERE "
"mytable.myid < (SELECT mytable.myid FROM "
"mytable)",
)
s = select([table1.c.myid]).as_scalar()
self.assert_compile(
select([table2, s]),
"SELECT myothertable.otherid, "
"myothertable.othername, (SELECT "
"mytable.myid FROM mytable) AS anon_1 FROM "
"myothertable",
)
# test expressions against scalar selects
self.assert_compile(
select([s - literal(8)]),
"SELECT (SELECT mytable.myid FROM mytable) "
"- :param_1 AS anon_1",
)
self.assert_compile(
select([select([table1.c.name]).as_scalar() + literal("x")]),
"SELECT (SELECT mytable.name FROM mytable) "
"|| :param_1 AS anon_1",
)
self.assert_compile(
select([s > literal(8)]),
"SELECT (SELECT mytable.myid FROM mytable) "
"> :param_1 AS anon_1",
)
self.assert_compile(
select([select([table1.c.name]).label("foo")]),
"SELECT (SELECT mytable.name FROM mytable) " "AS foo",
)
# scalar selects should not have any attributes on their 'c' or
# 'columns' attribute
s = select([table1.c.myid]).as_scalar()
try:
s.c.foo
except exc.InvalidRequestError as err:
assert (
str(err)
== "Scalar Select expression has no columns; use this "
"object directly within a column-level expression."
)
try:
s.columns.foo
except exc.InvalidRequestError as err:
assert (
str(err)
== "Scalar Select expression has no columns; use this "
"object directly within a column-level expression."
)
zips = table(
"zips", column("zipcode"), column("latitude"), column("longitude")
)
places = table("places", column("id"), column("nm"))
zipcode = "12345"
qlat = (
select([zips.c.latitude], zips.c.zipcode == zipcode)
.correlate(None)
.as_scalar()
)
qlng = (
select([zips.c.longitude], zips.c.zipcode == zipcode)
.correlate(None)
.as_scalar()
)
q = select(
[
places.c.id,
places.c.nm,
zips.c.zipcode,
func.latlondist(qlat, qlng).label("dist"),
],
zips.c.zipcode == zipcode,
order_by=["dist", places.c.nm],
)
self.assert_compile(
q,
"SELECT places.id, places.nm, "
"zips.zipcode, latlondist((SELECT "
"zips.latitude FROM zips WHERE "
"zips.zipcode = :zipcode_1), (SELECT "
"zips.longitude FROM zips WHERE "
"zips.zipcode = :zipcode_2)) AS dist FROM "
"places, zips WHERE zips.zipcode = "
":zipcode_3 ORDER BY dist, | |
add pkginfo to packages
package_dict = packages.setdefault(pkginfo["name"], {})
package_dict.setdefault("pkgsinfo", []).append(
(pkginfo, status, excluded_tags, shard_repr, default_shard_repr, prepared_tag_shards, included)
)
for sub_manifest, smo_list in sub_manifest_objects.items():
for name, key, excluded_tag_names, shard_repr, default_shard_repr, tag_shards, included in smo_list:
# rehydrate excluded tags using seen tags
excluded_tags = []
if excluded_tag_names:
for excluded_tag_name in excluded_tag_names:
try:
excluded_tags.append(seen_tags[excluded_tag_name])
except KeyError:
logger.warning("Unknown excluded tag name")
# rehydrate tag shards using seen tags
prepared_tag_shards = []
if tag_shards:
for tag_name in sorted(tag_shards.keys()):
try:
prepared_tag_shards.append((seen_tags[tag_name], tag_shards[tag_name]))
except KeyError:
logger.warning("Unknown tag shard name")
# add sub manifest to packages
package_dict = packages.setdefault(name, {})
package_dict.setdefault("sub_manifests", []).append(
(sub_manifest,
key,
excluded_tags,
shard_repr,
default_shard_repr,
prepared_tag_shards,
included)
)
# root keys
manifest_data = manifest.build(machine.tags)
for key, _ in SUB_MANIFEST_PKG_INFO_KEY_CHOICES:
for name in manifest_data.get(key, []):
packages.setdefault(name, {})["manifest"] = key.replace("_", " ")
ctx["packages"] = [(name, packages[name]) for name in sorted(packages.keys(), key=lambda n: n.lower())]
return ctx
# manifest catalogs
class BaseManifestM2MView(FormView):
m2m_model = None
def dispatch(self, request, *args, **kwargs):
self.manifest = Manifest.objects.get(pk=kwargs['pk'])
if self.m2m_model and 'm2m_pk' in kwargs:
self.m2m_object = self.m2m_model.objects.get(pk=kwargs['m2m_pk'])
else:
self.m2m_object = None
return super(BaseManifestM2MView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(BaseManifestM2MView, self).get_form_kwargs()
kwargs['manifest'] = self.manifest
return kwargs
def get_context_data(self, **kwargs):
context = super(BaseManifestM2MView, self).get_context_data(**kwargs)
context['monolith'] = True
context['manifest'] = self.manifest
context['m2m_object'] = self.m2m_object
return context
def get_success_url(self):
return self.manifest.get_absolute_url()
def form_valid(self, form):
form.save()
self.manifest.bump_version()
return HttpResponseRedirect(self.get_success_url())
class AddManifestCatalogView(PermissionRequiredMixin, BaseManifestM2MView):
permission_required = "monolith.add_manifestcatalog"
form_class = AddManifestCatalogForm
template_name = "monolith/manifest_catalog_form.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["title"] = "Add catalog"
return ctx
class EditManifestCatalogView(PermissionRequiredMixin, BaseManifestM2MView):
permission_required = "monolith.change_manifestcatalog"
form_class = EditManifestCatalogForm
template_name = "monolith/manifest_catalog_form.html"
m2m_model = Catalog
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["catalog"] = self.m2m_object
return kwargs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["title"] = f"Edit {self.m2m_object} catalog tags"
return ctx
class DeleteManifestCatalogView(PermissionRequiredMixin, BaseManifestM2MView):
permission_required = "monolith.delete_manifestcatalog"
form_class = DeleteManifestCatalogForm
template_name = "monolith/delete_manifest_catalog.html"
m2m_model = Catalog
def get_initial(self):
return {'catalog': self.m2m_object}
# manifest enrollment packages
class BaseEditManifestEnrollmentPackageView(TemplateView):
template_name = "monolith/manifest_enrollment_package_forms.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["pk"])
if "mep_pk" in kwargs:
self.manifest_enrollment_package = get_object_or_404(ManifestEnrollmentPackage,
manifest=self.manifest,
pk=kwargs["mep_pk"])
builder = self.manifest_enrollment_package.builder
self.builder_config = monolith_conf.enrollment_package_builders[builder]
self.builder_class = self.manifest_enrollment_package.builder_class
else:
self.manifest_enrollment_package = None
try:
self.builder = request.GET["builder"]
self.builder_config = monolith_conf.enrollment_package_builders[self.builder]
self.builder_class = self.builder_config["class"]
except KeyError:
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_forms(self):
builder_form_kwargs = {
"prefix": "builder",
"enrollment_only": len(self.builder_config["requires"]) > 0
}
mep_form_kwargs = {
"prefix": "mep",
"manifest": self.manifest
}
if self.request.method == "POST":
for kwargs in (builder_form_kwargs, mep_form_kwargs):
kwargs["data"] = self.request.POST
if self.manifest_enrollment_package:
builder_form_kwargs["instance"] = self.manifest_enrollment_package.get_enrollment()
mep_form_kwargs["initial"] = {"tags": self.manifest_enrollment_package.tags.all()}
return (self.builder_class.form(**builder_form_kwargs),
AddManifestEnrollmentPackageForm(**mep_form_kwargs))
def forms_invalid(self, builder_form, mep_form):
return self.render_to_response(self.get_context_data(builder_form=builder_form,
mep_form=mep_form))
def get_context_data(self, **kwargs):
kwargs["manifest"] = self.manifest
if hasattr(self, "manifest_enrollment_package"):
kwargs["manifest_enrollment_package"] = self.manifest_enrollment_package
kwargs["builder_name"] = self.builder_class.name
if "builder_form" not in kwargs or "mep_form" not in kwargs:
kwargs["builder_form"], kwargs["mep_form"] = self.get_forms()
return super().get_context_data(**kwargs)
def post(self, request, *args, **kwargs):
builder_form, mep_form = self.get_forms()
if builder_form.is_valid() and mep_form.is_valid():
return self.forms_valid(builder_form, mep_form)
else:
return self.forms_invalid(builder_form, mep_form)
class AddManifestEnrollmentPackageView(PermissionRequiredMixin, BaseEditManifestEnrollmentPackageView):
permission_required = "monolith.add_manifestenrollmentpackage"
def forms_valid(self, builder_form, mep_form):
# enrollment secret
enrollment_secret = EnrollmentSecret.objects.create(meta_business_unit=self.manifest.meta_business_unit)
# enrollment
enrollment = builder_form.save(commit=False)
enrollment.version = 0 # will be saved one extra time, and start at 1
enrollment.secret = enrollment_secret
enrollment.save()
# manifest enrollment package
mep = ManifestEnrollmentPackage.objects.create(
manifest=self.manifest,
builder=self.builder,
enrollment_pk=enrollment.pk,
version=0 # will be updated by the callback call in enrollment.save()
)
mep.tags.set(mep_form.cleaned_data["tags"])
# link from enrollment to manifest enrollment package, for config update propagation
enrollment.distributor = mep
enrollment.save() # bump mep and manifest versions, and build package via callback call
return redirect(self.manifest)
class UpdateManifestEnrollmentPackageView(PermissionRequiredMixin, BaseEditManifestEnrollmentPackageView):
permission_required = "monolith.change_manifestenrollmentpackage"
def forms_valid(self, builder_form, mep_form):
self.manifest_enrollment_package.tags.set(mep_form.cleaned_data["tags"])
self.manifest_enrollment_package.save()
builder_form.save() # bump mep and manifest versions, and build package via callback call
return redirect(self.manifest)
class DeleteManifestEnrollmentPackageView(PermissionRequiredMixin, TemplateView):
permission_required = "monolith.delete_manifestenrollmentpackage"
template_name = "monolith/delete_manifest_enrollment_package.html"
def dispatch(self, request, *args, **kwargs):
self.manifest_enrollment_package = get_object_or_404(
ManifestEnrollmentPackage,
manifest__id=kwargs["pk"], pk=kwargs["mep_pk"]
)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['manifest_enrollment_package'] = self.manifest_enrollment_package
context['manifest'] = self.manifest_enrollment_package.manifest
return context
def post(self, request, *args, **kwargs):
manifest = self.manifest_enrollment_package.manifest
self.manifest_enrollment_package.delete()
manifest.bump_version()
return redirect(manifest)
# manifest printers
class AddManifestPrinterView(PermissionRequiredMixin, CreateView):
permission_required = "monolith.add_printer"
model = Printer
form_class = ManifestPrinterForm
template_name = "monolith/manifest_printer_form.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["m_pk"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["manifest"] = self.manifest
return ctx
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['manifest'] = self.manifest
return kwargs
def form_valid(self, form):
printer = form.save(commit=False)
printer.manifest = self.manifest
printer.save()
form.save_m2m()
self.manifest.bump_version()
return HttpResponseRedirect("{}#printers".format(self.manifest.get_absolute_url()))
class UpdateManifestPrinterView(PermissionRequiredMixin, UpdateView):
permission_required = "monolith.change_printer"
model = Printer
form_class = ManifestPrinterForm
template_name = "monolith/manifest_printer_form.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["m_pk"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["manifest"] = self.manifest
return ctx
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['manifest'] = self.manifest
return kwargs
def form_valid(self, *args, **kwargs):
response = super().form_valid(*args, **kwargs)
self.manifest.bump_version()
return response
def get_success_url(self):
return "{}#printers".format(self.manifest.get_absolute_url())
class DeleteManifestPrinterView(PermissionRequiredMixin, DeleteView):
permission_required = "monolith.delete_printer"
model = Printer
template_name = "monolith/delete_manifest_printer.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["m_pk"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["manifest"] = self.manifest
return ctx
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.mark_as_trashed()
self.manifest.bump_version()
return HttpResponseRedirect("{}#printers".format(self.manifest.get_absolute_url()))
# manifest sub manifests
class AddManifestSubManifestView(PermissionRequiredMixin, BaseManifestM2MView):
permission_required = "monolith.add_manifestsubmanifest"
form_class = AddManifestSubManifestForm
template_name = "monolith/manifest_sub_manifest_form.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["title"] = "Add sub manifest"
return ctx
class EditManifestSubManifestView(PermissionRequiredMixin, BaseManifestM2MView):
permission_required = "monolith.change_manifestsubmanifest"
form_class = EditManifestSubManifestForm
template_name = "monolith/manifest_sub_manifest_form.html"
m2m_model = SubManifest
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["sub_manifest"] = self.m2m_object
return kwargs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["title"] = f"Edit {self.m2m_object} sub manifest tags"
return ctx
class DeleteManifestSubManifestView(PermissionRequiredMixin, BaseManifestM2MView):
permission_required = "monolith.delete_manifestsubmanifest"
form_class = DeleteManifestSubManifestForm
template_name = "monolith/delete_manifest_sub_manifest.html"
m2m_model = SubManifest
def get_initial(self):
return {'sub_manifest': self.m2m_object}
class DeleteManifestCacheServerView(PermissionRequiredMixin, View):
permission_required = "monolith.delete_cacheserver"
def post(self, request, *args, **kwargs):
cache_server = get_object_or_404(CacheServer, pk=kwargs["cs_pk"], manifest__pk=kwargs["pk"])
manifest = cache_server.manifest
cache_server.delete()
return HttpResponseRedirect("{}#cache-servers".format(manifest.get_absolute_url()))
# extra
class DownloadPrinterPPDView(View):
@cached_property
def _redirect_to_files(self):
return file_storage_has_signed_urls()
def get(self, request, *args, **kwargs):
try:
printer_ppd = PrinterPPD.objects.get_with_token(kwargs["token"])
except ValueError:
logger.error("Invalid token %s", kwargs["token"])
raise Http404
except PrinterPPD.DoesNotExist:
logger.warning("Could not find printer PPD with token %s", kwargs["token"])
raise Http404
else:
if self._redirect_to_files:
return HttpResponseRedirect(default_storage.url(printer_ppd.file.name))
else:
return FileResponse(printer_ppd.file)
# managedsoftwareupdate API
class MRBaseView(View):
def post_monolith_munki_request(self, **payload):
payload["manifest"] = {"id": self.manifest.id,
"name": str(self.manifest),
"version": self.manifest.version}
post_monolith_munki_request(self.machine_serial_number, self.user_agent, self.ip, **payload)
def get_secret(self, request):
try:
return request.META["HTTP_AUTHORIZATION"].strip().split()[-1]
except (AttributeError, IndexError, KeyError):
raise PermissionDenied("Could not read enrollment secret")
def get_serial_number(self, request):
try:
return request.META["HTTP_X_ZENTRAL_SERIAL_NUMBER"].strip()
except (AttributeError, KeyError):
raise PermissionDenied("Missing custom serial number header")
def get_uuid(self, request):
try:
return request.META["HTTP_X_ZENTRAL_UUID"].strip()
except (AttributeError, KeyError):
raise PermissionDenied("Missing custom UUID header")
def enroll_machine(self, request, secret, serial_number):
uuid = self.get_uuid(request)
try:
es_request = verify_enrollment_secret(
"monolith_enrollment", secret,
self.user_agent, self.ip, serial_number, uuid
)
except EnrollmentSecretVerificationFailed:
raise PermissionDenied("Enrollment secret verification failed")
enrollment = es_request.enrollment_secret.monolith_enrollment
# get or create enrolled machine
enrolled_machine, enrolled_machine_created = EnrolledMachine.objects.get_or_create(
enrollment=enrollment,
serial_number=serial_number,
)
if enrolled_machine_created:
# apply enrollment secret tags
for tag in es_request.enrollment_secret.tags.all():
MachineTag.objects.get_or_create(serial_number=serial_number, tag=tag)
post_monolith_enrollment_event(serial_number, self.user_agent, self.ip, {'action': "enrollment"})
return enrolled_machine
def get_enrolled_machine_and_tags(self, request):
secret = self.get_secret(request)
serial_number = self.get_serial_number(request)
cache_key = "{}{}".format(secret, serial_number)
try:
enrolled_machine, tags = cache.get(cache_key)
except TypeError:
try:
enrolled_machine = (EnrolledMachine.objects.select_related("enrollment__secret",
"enrollment__manifest")
.get(enrollment__secret__secret=secret,
serial_number=serial_number))
except EnrolledMachine.DoesNotExist:
enrolled_machine = self.enroll_machine(request, secret, serial_number)
machine = MetaMachine(serial_number)
tags = machine.tags
cache.set(cache_key, (enrolled_machine, tags), 600)
return enrolled_machine, tags
def dispatch(self, request, *args, **kwargs):
self.user_agent, self.ip = user_agent_and_ip_address_from_request(request)
enrolled_machine, self.tags = self.get_enrolled_machine_and_tags(request)
self.machine_serial_number = enrolled_machine.serial_number
self.manifest = enrolled_machine.enrollment.manifest
return super().dispatch(request, *args, **kwargs)
class MRNameView(MRBaseView):
def get_request_args(self, name):
try:
model, key = parse_munki_name(name)
except MunkiNameError:
model = key = None
return model, key
def get_cache_key(self, model, key):
items = ["monolith",
self.manifest.pk, self.manifest.version]
items.extend(sorted(t.id for t in self.tags))
items.append(model)
if isinstance(key, list):
items.extend(key)
else:
items.append(key)
return ".".join(str(i) for i in items)
def get(self, request, *args, **kwargs):
name = kwargs["name"]
event_payload = {"type": self.event_payload_type,
"name": name}
model, key = self.get_request_args(name)
if model is None or key is None:
error = True
response = HttpResponseForbidden("No no no!")
else:
cache_key = self.get_cache_key(model, key)
event_payload.update({
"subtype": model,
"cache": {
"key": cache_key,
"hit": False
}
})
response = self.do_get(model, key, cache_key, event_payload)
if not response:
error = True
response = HttpResponseNotFound("Not found!")
else:
error = False
event_payload["error"] = error
self.post_monolith_munki_request(**event_payload)
return response
class MRCatalogView(MRNameView):
event_payload_type = "catalog"
def do_get(self, model, key, cache_key, event_payload):
if model == "manifest_catalog" and key == self.manifest.pk:
catalog_data = cache.get(cache_key)
if not isinstance(catalog_data, | |
<gh_stars>0
#!/usr/bin/env python3
""" Bounding box
Provides a variable dimensionaly bounding box
"""
import abc
import numpy as np
from kmeans.activation import Activation
class BoundingBox(metaclass=abc.ABCMeta):
"""
Abstract data type that will represent a bounding box.
"""
# Margins for rounding numbers to allow for float variance.
# Used in Lowerbound to determine how close to 0 an activation needs to be
# before it is treated as 0
margin = 0.0001
zero_margin = 0.0000001
def __init__(self, ll: np.array=None, ur: np.array=None):
"""
Base constructor. Not that we can't do much until we have a point added.
"""
self.lower_left = ll
self.upper_right = ur
@abc.abstractmethod
def expand(self, point: Activation) -> None:
"""
Expand the bounding box so that it includes the supplied point
:param point: to add
"""
raise NotImplementedError("Use a subtype")
@abc.abstractmethod
def get_centre(self) -> np.array:
"""
calculate the centre point of the bounding box
:return: a numpy array
"""
if self.lower_left is None:
# Not yet initialised
return None
return np.mean((self.lower_left, self.upper_right), axis=0)
@abc.abstractmethod
def contains(self, point: Activation) -> bool:
"""
test whether the given box contains a specific point
:param point: the point to add
:return: true iff the point is contained within the bounding box
"""
raise NotImplementedError("Use a subtype")
@abc.abstractmethod
def escape_distance_L1(self, point: Activation) -> float:
"""
calculate an approximation of how far outside the cluster a given point falls.
this is defined as the sum of the distance from the closest limit to the point on each axis
for all axes where the point falls outside the cluster.
L1 distance or block distance (sum of errors)
:param point: a point to test
:return: the escape distance for the point
"""
under = self.lower_left - point.vector
under_distance = np.sum(np.where(under > 0, under, 0.0))
over = point.vector - self.upper_right
over_distance = np.sum(np.where(over > 0, over, 0.0))
return under_distance + over_distance
@abc.abstractmethod
def _get_shortest_edge_vector(self, point:Activation):
"""
Calculate the shortest vector to the edge of the bounding box from a _contained_ point.
:param point:
:return: a vector representing the distance, or None
"""
if not self.contains(point):
return None
to_under = point.vector - self.lower_left
to_upper = self.upper_right - point.vector
# As we _know_ the point is contained, all values in to_under and to_upper are >=0
return np.minimum(to_under, to_upper)
def internal_distance_L1(self, point:Activation) -> float:
""" Calculates the minimum distance from a _contained_ point to the
edge of the bounding box using the L1 mestric
"""
closest = self._get_shortest_edge_vector(point)
if not closest:
raise ValueError("Point must be contained")
return np.sum(closest)
def internal_distance_L2(self, point: Activation) -> float:
""" Calculates the minimum distance from a _contained_ point to the
edge of the bounding box using the L2 metric
"""
closest = self._get_shortest_edge_vector(point)
if not closest:
raise ValueError("Point must be contained")
return np.linalg.norm(closest)
@abc.abstractmethod
def escape_distance_L0(self, point: Activation) -> int:
"""
calculates on how many dimensions a given point falls outside the cluster.
this is defined as the sum of the distance from the closest limit to the point on each axis
for all axes where the point falls outside the cluster.
L0 distance or no. nonzero errors, equiv. to weight in binary vectors or Hamming distance
:param point: a point to test
:return: the escape distance for the point
"""
under = self.lower_left - point.vector
under_distance = np.sum(np.where(under > 0, 1.0, 0.0))
over = point.vector - self.upper_right
over_distance = np.sum(np.where(over > 0, 1.0, 0.0))
return under_distance + over_distance
@abc.abstractmethod
def escape_distance_L2(self, point: Activation) -> float:
"""
calculate how far outside the cluster a given point falls.
this is defined as the sum of the distance from the closest limit to the point on each axis
for all axes where the point falls outside the cluster.
L2 distance (sum of errors) pythagorean distance
:param point: a point to test
:return: the escape distance for the point
"""
under = self.lower_left - point.vector
under_distance = np.where(under > 0, under, 0.0)
over = point.vector - self.upper_right
over_distance = np.where(over > 0, over, 0.0)
return np.linalg.norm(under_distance + over_distance)
def __eq__(self, other):
"""
Equality test
:param other: the object to test against
:return: true iff the object is considered equal
"""
if not isinstance(other, self.__class__):
return False
return np.all(self.lower_left == other.lower_left) and \
np.all(self.upper_right == other.upper_right)
def __hash__(self):
""" Hash function for sets"""
return hash((tuple(self.upper_right),
tuple(self.lower_left)))
def __repr__(self):
""" bounding box representation string"""
return 'BoundingBox: {} to {}'.format(
self.lower_left, self.upper_right)
class LowerDimensionedBoundingBox(BoundingBox):
""" LowerDimensionedBoundingBox
Defines a bounding box to contain a region of point space.
These bounding boxes only exist in a subset of point space's dimensions,
specifically those to which it has been shown a non-0 value in a point
"""
def __init__(self) -> None:
""" create an empty bounding box """
super().__init__()
self.upper_right = None
# an array of all dimensions in which we have seen a non-zero
# activations.
self.filter = None
# an array of all dimensions in which we have seen a zero activation.
self.zero_filter = None
self.count = 0
def expand(self, activation: Activation):
""" Increase the boundingbox to include the passed activation."""
self.count += 1
if self.lower_left is None:
# Make sure that we actually have a box
self.lower_left = np.where(
activation.vector != 0, activation.vector - self.margin, 0.0)
self.upper_right = np.where(
activation.vector != 0, activation.vector + self.margin, 0.0)
# self.filter = np.logical_or(activation.vector > self.zero_margin, activation.vector < -self.zero_margin)
self.filter = activation.vector != 0
self.zero_filter = np.logical_not(self.filter)
return
# Note: we assume all points have the same dimensionality.
self.filter = np.logical_or(self.filter, (activation.vector != 0.0))
self.zero_filter = np.logical_or(
self.zero_filter, (activation.vector == 0.0))
self.lower_left = np.min((self.lower_left, np.where(
activation.vector != 0, activation.vector - self.margin, 0.0)), axis=0)
self.upper_right = np.max((self.upper_right, np.where(
activation.vector != 0, activation.vector + self.margin, 0.0)), axis=0)
def get_centre(self) -> np.array:
""" calculate the point at the centre of the bounding box."""
if self.lower_left is None:
return None
return np.where(self.filter, np.mean(
(self.lower_left, self.upper_right), axis=0), 0.0)
def contains(self, activation: Activation) -> bool:
""" Determine if a activation lies within the box.
Note that a activation on the lower border is considered 'in',
while one on the upper border is considered 'out'.
"""
# if it has a value on an axis in which we don't exist, skip
if np.any(np.where(self.filter, 0.0, activation.vector)):
return False
allowed_zeros = np.logical_and(
self.zero_filter, activation.vector == 0.0)
# Not that we take containment as inclusive on the lower bound only
contain = np.logical_and(
self.lower_left <= activation.vector, self.upper_right > activation.vector)
# ignore filtered overlaps
distance = np.size(
self.filter) - np.sum(np.logical_or(np.where(self.filter, contain, True),
allowed_zeros))
return distance == 0
def _get_shortest_edge_vector(self, point:Activation):
"""
Calculate the shortest vector to the edge of the bounding box from a _contained_ point.
:param point:
:return: a vector representing the distance, or None
"""
if not self.contains(point):
return None
to_under = np.where(self.filter, point.vector - self.lower_left, 0)
to_upper = np.where(self.filter, self.upper_right - point.vector, 0)
# As we _know_ the point is contained, all values in to_under and to_upper are >=0
return np.minimum(to_under, to_upper)
def escape_distance_L1(self, point: Activation) -> float:
"""L1 Norm
"""
outside_distance = abs(np.sum(point.vector[self.zero_filter]))
under = np.where(self.filter, self.lower_left - point.vector, 0.0)
under_distance = np.sum(np.where(under > 0, under, 0.0))
over = np.where(self.filter, point.vector - self.upper_right, 0.0)
over_distance = np.sum(np.where(over > 0, over, 0.0))
return outside_distance + under_distance + over_distance
def escape_distance_L2(self, point: Activation) -> float:
"""L2 Norm - pythaogorean distance
"""
mask = np.ones(len(self.filter), np.bool)
mask[self.filter] = False
outside_distance = np.where(mask, point.vector, 0.0)
zero_values = np.logical_and(self.zero_filter, point.vector)
under = np.where(zero_values, 0.0, self.lower_left - point.vector)
under_distance = np.where(under > 0, under, 0.0)
over = np.where(self.filter, point.vector - self.upper_right, 0.0)
over_distance = np.where(over > 0, over, 0.0)
return np.linalg.norm(
outside_distance + under_distance + over_distance)
def escape_distance_L0(self, point: Activation) -> int:
"""L0 Norm - no of non zero dimensions
"""
mask = np.ones(len(self.filter), np.bool)
mask[self.filter] = False
# vector of dimensions which have always been zero
outside_vector = point.vector[mask]
outside_distance = np.sum(np.where(outside_vector != 0, 1, 0))
zero_values = np.logical_and(self.zero_filter, point.vector)
under = np.where(zero_values, 0.0, self.lower_left - point.vector)
under_distance = np.sum(np.where(under > 0, 1.0, 0.0))
over = np.where(self.filter, point.vector | |
Healthcare"),
("American College of Traditional Chinese Medicine","American College of Traditional Chinese Medicine"),
("American Commercial College-Abilene","American Commercial College-Abilene"),
("American Commercial College-Odessa","American Commercial College-Odessa"),
("American Commercial College-San Angelo","American Commercial College-San Angelo"),
("American Commercial College-Wichita Falls","American Commercial College-Wichita Falls"),
("American Conservatory Theater","American Conservatory Theater"),
("American Educational College","American Educational College"),
("American Film Institute Conservatory","American Film Institute Conservatory"),
("American Hair Academy","American Hair Academy"),
("American Health Institute","American Health Institute"),
("American Indian College of the Assemblies of God Inc","American Indian College of the Assemblies of God Inc"),
("American Indian OIC Inc","American Indian OIC Inc"),
("American Institute of Alternative Medicine","American Institute of Alternative Medicine"),
("American Institute of Beauty","American Institute of Beauty"),
("American Institute of Interior Design","American Institute of Interior Design"),
("American Institute of Massage Therapy","American Institute of Massage Therapy"),
("American Institute of Medical Sciences & Education","American Institute of Medical Sciences & Education"),
("American Institute of Medical Sonography","American Institute of Medical Sonography"),
("American Institute of Medical Technology","American Institute of Medical Technology"),
("American Institute of Technology","American Institute of Technology"),
("American Institute","American Institute"),
("American Institute-Margate","American Institute-Margate"),
("American Institute-Toms River","American Institute-Toms River"),
("American InterContinental University-Atlanta","American InterContinental University-Atlanta"),
("American InterContinental University-Houston","American InterContinental University-Houston"),
("American InterContinental University-Online","American InterContinental University-Online"),
("American InterContinental University-South Florida","American InterContinental University-South Florida"),
("American International College","American International College"),
("American Jewish University","American Jewish University"),
("American Medical Academy","American Medical Academy"),
("American Medical Sciences Center","American Medical Sciences Center"),
("American Musical and Dramatic Academy","American Musical and Dramatic Academy"),
("American National College","American National College"),
("American National University","American National University"),
("American Public University System","American Public University System"),
("American River College","American River College"),
("American Samoa Community College","American Samoa Community College"),
("American School of Business","American School of Business"),
("American School of Technology","American School of Technology"),
("American Sentinel University","American Sentinel University"),
("American Technical Institute","American Technical Institute"),
("American Trade School","American Trade School"),
("American University of Health Sciences","American University of Health Sciences"),
("American University of Puerto Rico","American University of Puerto Rico"),
("American University of Puerto Rico","American University of Puerto Rico"),
("American University","American University"),
("Amherst College","Amherst College"),
("Amridge University","Amridge University"),
("Anabaptist Mennonite Biblical Seminary","Anabaptist Mennonite Biblical Seminary"),
("Anamarc College-El Paso Central","Anamarc College-El Paso Central"),
("Anamarc College-El Paso East","Anamarc College-El Paso East"),
("Anamarc College-Santa Teresa","Anamarc College-Santa Teresa"),
("Ancilla College","Ancilla College"),
("Anderson University","Anderson University"),
("Anderson University","Anderson University"),
("Andover Newton Theological School","Andover Newton Theological School"),
("Andrew College","Andrew College"),
("Andrews University","Andrews University"),
("Angeles College","Angeles College"),
("Angeles Institute","Angeles Institute"),
("Angelina College","Angelina College"),
("Angelo State University","Angelo State University"),
("Ann Arbor Institute of Massage Therapy","Ann Arbor Institute of Massage Therapy"),
("Ann Marie's World of Beauty School","Ann Marie's World of Beauty School"),
("Anna Maria College","Anna Maria College"),
("Anne Arundel Community College","Anne Arundel Community College"),
("Annenberg School of Nursing","Annenberg School of Nursing"),
("Anoka Technical College","Anoka Technical College"),
("Anoka-Ramsey Community College","Anoka-Ramsey Community College"),
("Antelope Valley College","Antelope Valley College"),
("Anthem Career College-Memphis","Anthem Career College-Memphis"),
("Anthem Career College-Nashville","Anthem Career College-Nashville"),
("Anthem College-Atlanta","Anthem College-Atlanta"),
("Anthem College-Brookfield","Anthem College-Brookfield"),
("Anthem College-Denver","Anthem College-Denver"),
("Anthem College-Fenton","Anthem College-Fenton"),
("Anthem College-Irving","Anthem College-Irving"),
("Anthem College-Kansas City","Anthem College-Kansas City"),
("Anthem College-Maryland Heights","Anthem College-Maryland Heights"),
("Anthem College-Minnesota","Anthem College-Minnesota"),
("Anthem College-Orlando","Anthem College-Orlando"),
("Anthem College-Phoenix","Anthem College-Phoenix"),
("Anthem College-Portland","Anthem College-Portland"),
("Anthem College-Sacramento","Anthem College-Sacramento"),
("Anthem Institute-Cherry Hill","Anthem Institute-Cherry Hill"),
("Anthem Institute-Jersey City","Anthem Institute-Jersey City"),
("Anthem Institute-Las Vegas","Anthem Institute-Las Vegas"),
("Anthem Institute-North Brunswick","Anthem Institute-North Brunswick"),
("Anthem Institute-Parsippany","Anthem Institute-Parsippany"),
("Anthem Institute-Springfield","Anthem Institute-Springfield"),
("Antilles School of Technical Careers","Antilles School of Technical Careers"),
("Antioch College","Antioch College"),
("Antioch University-Los Angeles","Antioch University-Los Angeles"),
("Antioch University-Midwest","Antioch University-Midwest"),
("Antioch University-New England","Antioch University-New England"),
("Antioch University-PhD Program in Leadership and Change","Antioch University-PhD Program in Leadership and Change"),
("Antioch University-Santa Barbara","Antioch University-Santa Barbara"),
("Antioch University-Seattle","Antioch University-Seattle"),
("Antioch University-System Administration","Antioch University-System Administration"),
("Antonelli College-Cincinnati","Antonelli College-Cincinnati"),
("Antonelli College-Hattiesburg","Antonelli College-Hattiesburg"),
("Antonelli College-Jackson","Antonelli College-Jackson"),
("Antonelli Institute","Antonelli Institute"),
("Antonelli Medical and Professional Institute","Antonelli Medical and Professional Institute"),
("Apex Academy of Hair Design Inc","Apex Academy of Hair Design Inc"),
("Apex School of Theology","Apex School of Theology"),
("Apex Technical School","Apex Technical School"),
("Apollo Career Center","Apollo Career Center"),
("Apostolic Bible Institute Inc","Apostolic Bible Institute Inc"),
("Appalachian Beauty School","Appalachian Beauty School"),
("Appalachian Bible College","Appalachian Bible College"),
("Appalachian College of Pharmacy","Appalachian College of Pharmacy"),
("Appalachian School of Law","Appalachian School of Law"),
("Appalachian State University","Appalachian State University"),
("Applied Technology Services","Applied Technology Services"),
("Aquinas College","Aquinas College"),
("Aquinas College","Aquinas College"),
("Aquinas Institute of Theology","Aquinas Institute of Theology"),
("Arapahoe Community College","Arapahoe Community College"),
("Arcadia University","Arcadia University"),
("Argosy University-Atlanta","Argosy University-Atlanta"),
("Argosy University-Chicago","Argosy University-Chicago"),
("Argosy University-Dallas","Argosy University-Dallas"),
("Argosy University-Denver","Argosy University-Denver"),
("Argosy University-Hawaii","Argosy University-Hawaii"),
("Argosy University-Inland Empire","Argosy University-Inland Empire"),
("Argosy University-Los Angeles","Argosy University-Los Angeles"),
("Argosy University-Nashville","Argosy University-Nashville"),
("Argosy University-Orange County","Argosy University-Orange County"),
("Argosy University-Phoenix Online Division","Argosy University-Phoenix Online Division"),
("Argosy University-Phoenix","Argosy University-Phoenix"),
("Argosy University-Salt Lake City","Argosy University-Salt Lake City"),
("Argosy University-San Diego","Argosy University-San Diego"),
("Argosy University-San Francisco Bay Area","Argosy University-San Francisco Bay Area"),
("Argosy University-Sarasota","Argosy University-Sarasota"),
("Argosy University-Schaumburg","Argosy University-Schaumburg"),
("Argosy University-Seattle","Argosy University-Seattle"),
("Argosy University-Tampa","Argosy University-Tampa"),
("Argosy University-Twin Cities","Argosy University-Twin Cities"),
("Argosy University-Washington DC","Argosy University-Washington DC"),
("Aria Health School of Nursing","Aria Health School of Nursing"),
("Arizona Academy of Beauty-East","Arizona Academy of Beauty-East"),
("Arizona Academy of Beauty-North","Arizona Academy of Beauty-North"),
("Arizona Automotive Institute","Arizona Automotive Institute"),
("Arizona Board of Regents","Arizona Board of Regents"),
("Arizona Christian University","Arizona Christian University"),
("Arizona College-Glendale","Arizona College-Glendale"),
("Arizona College-Mesa","Arizona College-Mesa"),
("Arizona Culinary Institute","Arizona Culinary Institute"),
("Arizona School of Acupuncture and Oriental Medicine","Arizona School of Acupuncture and Oriental Medicine"),
("Arizona State University-Downtown Phoenix","Arizona State University-Downtown Phoenix"),
("Arizona State University-Polytechnic","Arizona State University-Polytechnic"),
("Arizona State University-Skysong","Arizona State University-Skysong"),
("Arizona State University-Tempe","Arizona State University-Tempe"),
("Arizona State University-West","Arizona State University-West"),
("Arizona Summit Law School","Arizona Summit Law School"),
("Arizona Western College","Arizona Western College"),
("Arkansas Baptist College","Arkansas Baptist College"),
("Arkansas Beauty College","Arkansas Beauty College"),
("Arkansas Beauty School","Arkansas Beauty School"),
("Arkansas Beauty School","Arkansas Beauty School"),
("Arkansas College of Barbering and Hair Design","Arkansas College of Barbering and Hair Design"),
("Arkansas Northeastern College","Arkansas Northeastern College"),
("Arkansas State University-Beebe","Arkansas State University-Beebe"),
("Arkansas State University-Main Campus","Arkansas State University-Main Campus"),
("Arkansas State University-Mountain Home","Arkansas State University-Mountain Home"),
("Arkansas State University-Newport","Arkansas State University-Newport"),
("Arkansas State University-System Office","Arkansas State University-System Office"),
("Arkansas Tech University","Arkansas Tech University"),
("Arlington Baptist College","Arlington Baptist College"),
("Arlington Career Institute","Arlington Career Institute"),
("Arlington Medical Institute","Arlington Medical Institute"),
("Armstrong Atlantic State University","Armstrong Atlantic State University"),
("Arnolds Beauty School","Arnolds Beauty School"),
("Arnot Ogden Medical Center","Arnot Ogden Medical Center"),
("Arrojo Cosmetology School","Arrojo Cosmetology School"),
("Art Academy of Cincinnati","Art Academy of Cincinnati"),
("Art Center College of Design","Art Center College of Design"),
("Arthur's Beauty College Inc-Fort Smith","Arthur's Beauty College Inc-Fort Smith"),
("Arthur's Beauty College Inc-Jacksonville","Arthur's Beauty College Inc-Jacksonville"),
("Arthur's Beauty School Inc-Conway","Arthur's Beauty School Inc-Conway"),
("Arthur's Beauty School Inc-Pine Bluff","Arthur's Beauty School Inc-Pine Bluff"),
("Artistic Academy of Hair Design","Artistic Academy of Hair Design"),
("Artistic Nails and Beauty Academy-Lakeland","Artistic Nails and Beauty Academy-Lakeland"),
("Artistic Nails and Beauty Academy-Tampa","Artistic Nails and Beauty Academy-Tampa"),
("Asbury Theological Seminary","Asbury Theological Seminary"),
("Asbury University","Asbury University"),
("Ashdown College of Health Sciences","Ashdown College of Health Sciences"),
("Asher College","Asher College"),
("Asheville-Buncombe Technical Community College","Asheville-Buncombe Technical Community College"),
("Ashford University","Ashford University"),
("Ashland Community and Technical College","Ashland Community and Technical College"),
("Ashland County-West Holmes Career Center","Ashland County-West Holmes Career Center"),
("Ashland University","Ashland University"),
("Ashtabula County Technical and Career Campus","Ashtabula County Technical and Career Campus"),
("Asian American International Beauty College","Asian American International Beauty College"),
("Asnuntuck Community College","Asnuntuck Community College"),
("Aspen Beauty Academy of Laurel","Aspen Beauty Academy of Laurel"),
("Aspen University","Aspen University"),
("Assabet Valley Regional Technical School","Assabet Valley Regional Technical School"),
("Assemblies of God Theological Seminary","Assemblies of God Theological Seminary"),
("Associated Beth Rivkah Schools","Associated Beth Rivkah Schools"),
("Associated Technical College-Los Angeles","Associated Technical College-Los Angeles"),
("Associated Technical College-San Diego","Associated Technical College-San Diego"),
("Assumption College for Sisters","Assumption College for Sisters"),
("Assumption College","Assumption College"),
("Astrodome Career Centers","Astrodome Career Centers"),
("Atelier Esthetique Institute of Esthetics","Atelier Esthetique Institute of Esthetics"),
("Atenas College","Atenas College"),
("Athena Career Academy","Athena Career Academy"),
("Athenaeum of Ohio","Athenaeum of Ohio"),
("Athens State University","Athens State University"),
("Athens Technical College","Athens Technical College"),
("Atlanta Beauty & Barber Academy","Atlanta Beauty & Barber Academy"),
("Atlanta Institute of Music","Atlanta Institute of Music"),
("Atlanta Metropolitan State College","Atlanta Metropolitan State College"),
("Atlanta School of Massage","Atlanta School of Massage"),
("Atlanta Technical College","Atlanta Technical College"),
("Atlanta's John Marshall Law School","Atlanta's John Marshall Law School"),
("Atlantic Beauty & Spa Academy LLC","Atlantic Beauty & Spa Academy LLC"),
("Atlantic Cape Community College","Atlantic Cape Community College"),
("Atlantic Institute of Oriental Medicine","Atlantic Institute of Oriental Medicine"),
("Atlantic Technical Center","Atlantic Technical Center"),
("Atlantic University College","Atlantic University College"),
("Auburn Career Center","Auburn Career Center"),
("Auburn University at Montgomery","Auburn University at Montgomery"),
("Auburn University","Auburn University"),
("Augsburg College","Augsburg College"),
("Augusta Area Dietetic Internship-University Hospital","Augusta Area Dietetic Internship-University Hospital"),
("Augusta School of Massage","Augusta School of Massage"),
("Augusta State University","Augusta State University"),
("Augusta Technical College","Augusta Technical College"),
("Augustana College","Augustana College"),
("Augustana College","Augustana College"),
("Auguste Escoffier School of Culinary Arts-Austin","Auguste Escoffier School of Culinary Arts-Austin"),
("Auguste Escoffier School of Culinary Arts-Boulder","Auguste Escoffier School of Culinary Arts-Boulder"),
("Aultman College of Nursing and Health Sciences","Aultman College of Nursing and Health Sciences"),
("Aurora University","Aurora University"),
("Austin College","Austin College"),
("Austin Community College District","Austin Community College District"),
("Austin Graduate School of Theology","Austin Graduate School of Theology"),
("Austin Kade Academy","Austin Kade Academy"),
("Austin Peay State University","Austin Peay State University"),
("Austin Presbyterian Theological Seminary","Austin Presbyterian Theological Seminary"),
("Austin's School of Spa Technology","Austin's School of Spa Technology"),
("Automeca Technical College-Aguadilla","Automeca Technical College-Aguadilla"),
("Automeca Technical College-Bayamon","Automeca Technical College-Bayamon"),
("Automeca Technical College-Caguas","Automeca Technical College-Caguas"),
("Automeca Technical College-Ponce","Automeca Technical College-Ponce"),
("Automotive Training Center-Exton","Automotive Training Center-Exton"),
("Automotive Training Center-Warminster","Automotive Training Center-Warminster"),
("Autry Technology Center","Autry Technology Center"),
("Avalon School of Cosmetology","Avalon School of Cosmetology"),
("Avalon School of Cosmetology-Alameda","Avalon School of Cosmetology-Alameda"),
("Avalon School of Cosmetology-Layton","Avalon School of Cosmetology-Layton"),
("Avalon School of Cosmetology-Mesa","Avalon School of Cosmetology-Mesa"),
("Avalon School of Cosmetology-Phoenix","Avalon School of Cosmetology-Phoenix"),
("Avance Beauty College","Avance Beauty College"),
("Avant Gard The School","Avant Gard The School"),
("Ave Maria School of Law","Ave Maria School of Law"),
("Ave Maria University","Ave Maria University"),
("Aveda Fredric's Institute-Cincinnati","Aveda Fredric's Institute-Cincinnati"),
("Aveda Fredric's | |
<filename>dataset/utils.py
from math import ceil
from skimage import io, img_as_ubyte
from skimage.transform import resize
import cv2
from PIL import ImageFile
import os
import json
from shutil import copyfile, move
import numpy as np
from matplotlib import image
from sklearn.neighbors import KDTree
from sklearn import decomposition
from sklearn import preprocessing
ImageFile.LOAD_TRUNCATED_IMAGES = True
def resize_image(img_dir, new_img_dir_name, x, y):
"""
Resizes image direction inputted into a new direction
Inputs:
:param img_dir: direction of the image you want to resize
:param new_img_dir_name: new direction of the resized image XxY resolution
:param x: X resolution of XxY resolution
:param y: Y resolution of xxY resolution
:return: None
"""
if img_dir:
img = io.imread(img_dir)
img_rescaled = resize(img, (x, y, 3), anti_aliasing=True)
io.imsave(new_img_dir_name, img_as_ubyte(img_rescaled))
def resize_path(path, new_path, x, y):
"""
Resize all images within a directory
:param path: directory path
:param new_path: new directory path
:param x: X resolution of XxY resolution
:param y: Y resolution of xxY resolution
:return: None
"""
for root, dirs, files in os.walk(path):
for file in files:
split = root.replace(os.sep, '/').split('/')[3]
genre = root.replace(os.sep, '/').split('/')[4]
#resize_image function declared above
resize_image(root+'/'+file, new_path+'/'+split+'/'+genre+'/'+file, x, y)
def dhash(image, hashSize=8):
"""
Hash algorithm function for images. Copied from :
https://www.pyimagesearch.com/2020/04/20/detect-and-remove-duplicate-images-from-a-dataset-for-deep-learning/
Inputs:
:param image: image to hash
:param hashSize: size of output hash
:return: hash
"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (hashSize + 1, hashSize))
# compute the (relative) horizontal gradient between adjacent
# column pixels
diff = resized[:, 1:] > resized[:, :-1]
# convert the difference image to a hash and return it
return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])
def split_set_genre(path, genre, new_path):
"""
Split dataset into three splits: train, test and val
Inputs:
:param path: initial dataset directory
:param genre: genre of the dataset directory
:param new_path: new dataset directory
:return: None
"""
# Create split directories if dont exist
if not os.path.exists(new_path + '/train'):
os.mkdir(new_path + '/train')
if not os.path.exists(new_path + '/val'):
os.mkdir(new_path + '/val')
if not os.path.exists(new_path + '/test'):
os.mkdir(new_path + '/test')
# Create genre directory for each split
if not os.path.exists(new_path + '/train/' + genre):
os.mkdir(new_path + '/train/' + genre)
if not os.path.exists(new_path + '/val/' + genre):
os.mkdir(new_path + '/val/' + genre)
if not os.path.exists(new_path + '/test/' + genre):
os.mkdir(new_path + '/test/' + genre)
count = 0
for root, dirs, files in os.walk(path):
train_len = int(len(files) * 0.8)
val_len = ceil(len(files) * 0.1)
test_len = len(files)-train_len-val_len-1
for file in files:
count += 1
if count < train_len:
copyfile(root + '/' + file, new_path + '/train/' + genre + '/' + file)
elif train_len <= count < (train_len + val_len):
copyfile(root + '/' + file, new_path + '/val/' + genre + '/' + file)
elif count >= train_len + val_len:
copyfile(root + '/' + file, new_path + '/test/' + genre + '/' + file)
def check_compilation(path_audio, root, genre, file, count_comp, count_new_file, count_repeat):
'''Check if cover is a compilation by reading tags in json file (path_audio)
and copies unique and not compilation covers
Inputs:
- path_audio: path of the json file with tags
- root: root of cover file
- genre: name of genre currently checking
- file: file name of cover file
- count_comp: counter of compilations
- count_new_file: counter of new files addded
:return number of compilations and new files added
'''
with open(path_audio + '/' + file.split('__')[0] + '.json') as json_file:
data = json.load(json_file)
check = data['metadata']['tags']
if 'releasetype' in check:
album_type = check['releasetype']
for i in range(len(album_type)):
if 'compilation' not in album_type[i] and 'Compilation' not in album_type[i]:
copy = True
else:
print('Compilation, not copying...', file)
copy = False
count_comp += 1
break
if 'musicbrainz album type' in check:
album_type = check['musicbrainz album type']
for i in range(len(album_type)):
if 'compilation' not in album_type[i] and 'Compilation' not in album_type[i]:
copy = True
else:
print('Compilation, not copying...', file)
copy = False
count_comp += 1
break
else:
copy = True
if copy:
print("Copying new file")
count_new_file += 1
copyfile(root + '/' + file, 'E:/cover_dataset/' + genre + '/' + file + '__' + str(count_repeat) + '.jpg')
return count_comp, count_new_file
def check_same_covers_hash(path):
"""
Saves into a dictionary the paths of the images with an specific hash
Input:
:param path: dataset directory
:return: hash dictionary
"""
hashes = {}
count = 0
for root, dir, files in os.walk(path):
for file in files:
count += 1
print(count)
imagePath = root + '/' + file
# load the input image and compute the hash
image = cv2.imread(imagePath)
#if image is None:
#os.remove(imagePath)
#else:
h = dhash(image)
# grab all image paths with that hash, add the current image
# path to it, and store the list back in the hashes dictionary
p = hashes.get(h, [])
p.append(imagePath)
hashes[h] = p
return hashes
def delete_repeated_covers(hash):
"""
Deletes cover with the same hash
Input:
:param hash: hash dictionary extracted with the previous function
:return: None
"""
for key in hash.keys():
if len(hash[key]) > 1:
for i, file in enumerate(hash[key],1):
os.remove(hash[key][i-1])
if i == (len(hash[key])-1):
break
def json_to_npy(json_path, new_npy_path):
"""
Converts json files from the lowlevel dumps of AcousticBrainz into numpy arrays
Inputs:
:param json_path: json file path
:param new_npy_path: new numpy array path
:return: None
"""
keys = ['lowlevel','rhythm','tonal']
l = []
with open(json_path) as f:
data = json.load(f)
for key in keys:
for item, key_1 in data[key].items():
if type(key_1) == float or type(key_1) == int:
l.append(key_1)
elif type(key_1) == list:
mean = np.mean(key_1)
std = np.std(key_1)
l.append(mean)
l.append(std)
elif type(key_1) == dict:
for k, key_2 in key_1.items():
if type(key_2) == list:
mean = np.mean(key_2)
std = np.std(key_2)
l.append(mean)
l.append(std)
else:
if item == 'spectral_spread' and (k == 'dvar2' or k == 'dvar' or k == 'var'):
break
elif item == 'spectral_rolloff' and (k == 'dvar2' or k == 'dvar' or k == 'var'):
break
elif item == 'spectral_centroid' and (k == 'dvar2' or k == 'dvar' or k == 'var'):
break
else:
l.append(key_2)
n = np.array(l)
np.save(new_npy_path, n)
def concat_4x4_images(path):
"""
Concatenates 4x4 images into the same file
Input:
:param path: directory of covers dataset
:return: array with all images and a list with the paths for all images
"""
count = 0
images = np.empty((1, 48))
dir_images = []
for root, dirs, files in os.walk(path):
for file in files:
count += 1
print(count)
new_image = image.imread(root + '/' + file)
new_image = new_image.reshape(1, -1)
images = np.concatenate((images, new_image), axis=0)
dir_images.append(root + '/' + file)
return images[1:], dir_images
def similar_4x4_images(images, dir_images, threshold):
"""
Moves similar images into the same split
Inputs:
:param images: array of 4x4 images calculated with the previous function
:param dir_images: a list with the paths for all images extracted with the previous function
:param threshold: threshold which selects the distance in the KDTree to decide if split in same dataset or not
:return: None
"""
count = 0
c = 0
tree = KDTree(images, metric='manhattan')
for i in range(images.shape[0]):
c += 1
print(c)
dist, ind = tree.query(images[i:i + 1], k=10)
for count, d in enumerate(dist[0]):
if 0 < count < (len(dist[0]) - 1):
interval = dist[0][count + 1] - d
if interval > 150:
num_similar_int = count + 1
break
else:
num_similar_int = 0
num_similar_thr = len([x for x in dist[0] if x < threshold])
num_similar = min(num_similar_int, num_similar_thr)
if 1 < num_similar:
ind_split = ind.tolist()[0][0]
split = dir_images[ind_split].split('\\')[1]
current_file = 'E:/dataset/cover' + dir_images[ind_split].split('E:/dataset/cover_4')[1]
if os.path.exists(current_file):
for j in range(num_similar):
if j > 0:
index = ind.tolist()[0][j]
dir_4x4 = dir_images[index]
new_dir = 'E:/dataset/cover' + '\\' + split + '\\' + dir_4x4.split('\\')[2]
di = 'E:/dataset/cover' + dir_4x4.split('E:/dataset/cover_4')[1]
if os.path.exists(di):
move(di, new_dir)
def PCA(path, new_path):
"""
Calculates PCA for arrays contained in a certain directory
Inputs:
:param path: directory of arrays
:param new_path: new directory to save compressed arrays
:return: None
"""
file_names = []
genres = []
splits = []
c = 0
tot = np.zeros((1,2048))
for root, dirs, files in os.walk(path):
for file in files:
x = np.load(root+'/'+file)
x = x.reshape(1,-1)
tot = np.concatenate((tot,x), axis=0)
genre = root.replace(os.sep, '/').split('/')[11]
split = root.replace(os.sep, '/').split('/')[12]
| |
<gh_stars>1-10
import asyncio
import json
import os
from unittest.mock import patch
import pytest
from eth_utils import encode_hex, to_canonical_address, to_checksum_address
from tornado.httpclient import HTTPRequest
from tornado.websocket import websocket_connect
from raiden_installer import load_settings
from raiden_installer.account import find_keystore_folder_path
from raiden_installer.base import RaidenConfigurationFile
from raiden_installer.ethereum_rpc import Infura, make_web3_provider
from raiden_installer.network import Network
from raiden_installer.raiden import RaidenClient
from raiden_installer.shared_handlers import get_passphrase, set_passphrase
from raiden_installer.tokens import ETH, Erc20Token, EthereumAmount, TokenAmount, Wei
from raiden_installer.transactions import get_token_balance, get_token_deposit
from raiden_installer.utils import TransactionTimeoutError
from raiden_installer.web import get_app
from raiden_installer.web_testnet import get_app as get_app_testnet
from tests.constants import TESTING_TEMP_FOLDER
from tests.fixtures import create_account, test_account, test_password
from tests.utils import empty_account
INFURA_PROJECT_ID = os.getenv("TEST_RAIDEN_INSTALLER_INFURA_PROJECT_ID")
UNLOCK_PAGE_HEADLINE = "<h2>Unlock your Raiden Account</h2>"
pytestmark = pytest.mark.skipif(not INFURA_PROJECT_ID, reason="missing configuration for infura")
def successful_html_response(response):
return response.code == 200 and response.headers["Content-Type"] == "text/html; charset=UTF-8"
def successful_json_response(response):
return (response.code == 200 and
response.headers["Content-Type"] == "application/json")
def is_unlock_page(body):
return UNLOCK_PAGE_HEADLINE in body.decode("utf-8")
def check_balances(w3, account, settings, check_func):
balance = account.get_ethereum_balance(w3)
service_token = Erc20Token.find_by_ticker(settings.service_token.ticker, settings.network)
udc_balance = get_token_deposit(w3, account, service_token)
transfer_token = Erc20Token.find_by_ticker(settings.transfer_token.ticker, settings.network)
transfer_token_balance = get_token_balance(w3, account, transfer_token)
return (
check_func(balance.as_wei) and
check_func(udc_balance.as_wei) and
check_func(transfer_token_balance.as_wei)
)
class SharedHandlersTests:
@pytest.fixture
def infura(self, test_account, network_name):
assert INFURA_PROJECT_ID
network = Network.get_by_name(network_name)
return Infura.make(network, INFURA_PROJECT_ID)
@pytest.fixture
def settings(self, settings_name):
return load_settings(settings_name)
@pytest.fixture
def patch_config_folder(self, monkeypatch):
monkeypatch.setattr(
RaidenConfigurationFile,
"FOLDER_PATH",
TESTING_TEMP_FOLDER.joinpath("config")
)
@pytest.fixture
def config(self, patch_config_folder, test_account, infura, settings):
config = RaidenConfigurationFile(
test_account.keystore_file_path,
settings,
infura.url,
)
config.save()
yield config
config.path.unlink()
@pytest.fixture
def unlocked(self, test_password):
set_passphrase(test_password)
yield
set_passphrase(None)
@pytest.fixture
def ws_client(self, http_client, http_port):
loop = asyncio.get_event_loop()
url = f"ws://localhost:{http_port}/ws"
client = loop.run_until_complete(websocket_connect(url))
yield client
client.close()
for task in asyncio.all_tasks(loop):
task.cancel()
@pytest.mark.gen_test
def test_index_handler(self, http_client, base_url):
response = yield http_client.fetch(base_url)
assert successful_html_response(response)
@pytest.mark.gen_test
def test_home_handler(self, http_client, base_url):
response = yield http_client.fetch(f"{base_url}/home")
assert successful_html_response(response)
@pytest.mark.gen_test
def test_terms_handler(self, http_client, base_url):
response = yield http_client.fetch(f"{base_url}/terms")
assert successful_html_response(response)
@pytest.mark.gen_test
def test_create_wallet_handler(self, http_client, base_url):
response = yield http_client.fetch(f"{base_url}/create_wallet")
assert successful_html_response(response)
@pytest.mark.gen_test
def test_setup_handler(self, http_client, base_url, test_account):
response = yield http_client.fetch(f"{base_url}/setup/{test_account.keystore_file_path}")
assert successful_html_response(response)
@pytest.mark.gen_test
def test_account_handler(self, http_client, base_url, config, unlocked):
response = yield http_client.fetch(f"{base_url}/account/{config.file_name}")
assert successful_html_response(response)
assert not is_unlock_page(response.body)
@pytest.mark.gen_test
def test_locked_account_handler(self, http_client, base_url, config):
response = yield http_client.fetch(f"{base_url}/account/{config.file_name}")
assert successful_html_response(response)
assert is_unlock_page(response.body)
@pytest.mark.gen_test
def test_launch_handler(self, http_client, base_url, config, unlocked):
response = yield http_client.fetch(f"{base_url}/launch/{config.file_name}")
assert successful_html_response(response)
assert not is_unlock_page(response.body)
@pytest.mark.gen_test
def test_locked_launch_handler(self, http_client, base_url, config):
response = yield http_client.fetch(f"{base_url}/launch/{config.file_name}")
assert successful_html_response(response)
assert is_unlock_page(response.body)
@pytest.mark.gen_test
def test_keystore_handler(self, http_client, base_url, test_account, config):
response = yield http_client.fetch(
f"{base_url}/keystore/{config.file_name}/{test_account.keystore_file_path.name}"
)
json_response = json.loads(response.body)
assert successful_json_response(response)
assert to_canonical_address(json_response["address"]) == test_account.address
@pytest.mark.gen_test(timeout=10)
def test_gas_price_handler(self, http_client, base_url, config):
response = yield http_client.fetch(
f"{base_url}/gas_price/{config.file_name}"
)
json_response = json.loads(response.body)
assert successful_json_response(response)
assert json_response["gas_price"] > 0
@pytest.mark.gen_test
def test_configuration_item_handler(self, http_client, base_url, config):
response = yield http_client.fetch(
f"{base_url}/api/configuration/{config.file_name}"
)
json_response = json.loads(response.body)
assert successful_json_response(response)
assert json_response["file_name"] == config.file_name
assert to_canonical_address(json_response["account"]) == config.account.address
assert json_response["network"] == config.network.name
assert json_response["balance"]["ETH"]["as_wei"] == 0
assert json_response["balance"]["service_token"]["as_wei"] == 0
assert json_response["balance"]["transfer_token"]["as_wei"] == 0
# Websocket methods tests
@pytest.mark.gen_test
def test_create_wallet(self, ws_client, test_password, test_account):
with patch(
"raiden_installer.account.Account.create",
return_value=test_account
) as mock_create_account:
data = {
"method": "create_wallet",
"passphrase1": <PASSWORD>,
"passphrase2": <PASSWORD>
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "status-update"
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "redirect"
assert message["redirect_url"] == f"/setup/{test_account.keystore_file_path}"
mock_create_account.assert_called_once_with(
find_keystore_folder_path(),
test_password
)
@pytest.mark.gen_test
def test_unlock(self, ws_client, test_password, test_account):
data = {
"method": "unlock",
"passphrase": <PASSWORD>,
"keystore_file_path": str(test_account.keystore_file_path),
"return_to": f"/setup/{test_account.keystore_file_path}"
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "redirect"
assert message["redirect_url"] == f"/setup/{test_account.keystore_file_path}"
assert get_passphrase() == test_password
set_passphrase(None)
@pytest.mark.gen_test
def test_unlock_with_wrong_passphrase(self, ws_client, test_password, test_account):
data = {
"method": "unlock",
"passphrase": "<PASSWORD>" + test_password,
"keystore_file_path": str(test_account.keystore_file_path),
"return_to": f"/setup/{test_account.keystore_file_path}"
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "error-message"
assert get_passphrase() == None
@pytest.mark.gen_test
def test_setup(
self,
ws_client,
test_account,
infura,
network_name,
patch_config_folder,
settings
):
data = {
"method": "setup",
"endpoint": f"https://{network_name}.infura.io/v3/{infura.project_id}",
"account_file": str(test_account.keystore_file_path)
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "status-update"
account_address = to_checksum_address(test_account.address)
config_file_name = f"config-{account_address}-{settings.name}.toml"
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "redirect"
assert message["redirect_url"] == f"/account/{config_file_name}"
config = RaidenConfigurationFile.get_by_filename(config_file_name)
assert config.account.keystore_file_path == test_account.keystore_file_path
assert config.settings == settings
assert config.ethereum_client_rpc_endpoint == infura.url
assert config.routing_mode == settings.routing_mode
assert config.enable_monitoring == settings.monitoring_enabled
config.path.unlink()
@pytest.mark.gen_test
def test_setup_with_invalid_infura_url(
self,
ws_client,
test_account,
infura,
patch_config_folder,
settings
):
data = {
"method": "setup",
"endpoint": f"https://invalid-network.infura.io/v3/{infura.project_id}",
"account_file": str(test_account.keystore_file_path)
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "error-message"
with pytest.raises(ValueError):
RaidenConfigurationFile.get_by_filename(
f"config-{to_checksum_address(test_account.address)}-{settings.name}.toml"
)
@pytest.mark.gen_test
def test_setup_with_invalid_endpoint(
self,
ws_client,
test_account,
patch_config_folder,
settings
):
data = {
"method": "setup",
"endpoint": "https://no.infura.node",
"account_file": str(test_account.keystore_file_path)
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "error-message"
with pytest.raises(ValueError):
RaidenConfigurationFile.get_by_filename(
f"config-{to_checksum_address(test_account.address)}-{settings.name}.toml"
)
@pytest.mark.gen_test
def test_launch(self, ws_client, config, unlocked):
with patch("raiden_installer.raiden.RaidenClient.get_client") as mock_get_client:
mock_client = mock_get_client()
mock_client.is_installed = False
mock_client.is_running = False
data = {
"method": "launch",
"configuration_file_name": config.file_name,
}
ws_client.write_message(json.dumps(data))
for _ in range(3):
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "status-update"
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "task-complete"
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "redirect"
assert message["redirect_url"] == RaidenClient.WEB_UI_INDEX_URL
mock_client.install.assert_called_once()
mock_client.launch.assert_called_once()
mock_client.wait_for_web_ui_ready.assert_called_once()
@pytest.mark.gen_test
def test_locked_launch(self, ws_client, config):
with patch("raiden_installer.raiden.RaidenClient.get_client") as mock_get_client:
mock_client = mock_get_client()
mock_client.is_installed = False
mock_client.is_running = False
data = {
"method": "launch",
"configuration_file_name": config.file_name,
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "error-message"
mock_client.install.assert_not_called()
mock_client.launch.assert_not_called()
mock_client.wait_for_web_ui_ready.assert_not_called()
class TestWeb(SharedHandlersTests):
@pytest.fixture
def app(self):
return get_app()
@pytest.fixture
def network_name(self):
return "mainnet"
@pytest.fixture
def settings_name(self):
return "mainnet"
@pytest.fixture
def mock_get_exchange(self):
with patch("raiden_installer.token_exchange.Exchange.get_by_name") as mock_get_exchange:
yield mock_get_exchange
@pytest.fixture
def mock_deposit_service_tokens(self):
with patch(
"raiden_installer.shared_handlers.deposit_service_tokens",
return_value=os.urandom(32)
) as mock_deposit_service_tokens:
yield mock_deposit_service_tokens
@pytest.fixture
def mock_wait_for_transaction(self):
with patch(
"raiden_installer.web.wait_for_transaction"
), patch(
"raiden_installer.shared_handlers.wait_for_transaction"
):
yield
@pytest.mark.gen_test
def test_swap_handler(self, http_client, base_url, config, settings, unlocked):
response = yield http_client.fetch(
f"{base_url}/swap/{config.file_name}/{settings.service_token.ticker}"
)
assert successful_html_response(response)
assert not is_unlock_page(response.body)
@pytest.mark.gen_test
def test_locked_swap_handler(self, http_client, base_url, config, settings):
response = yield http_client.fetch(
f"{base_url}/swap/{config.file_name}/{settings.service_token.ticker}"
)
assert successful_html_response(response)
assert is_unlock_page(response.body)
@pytest.mark.gen_test(timeout=15)
def test_cost_estimation_handler(
self,
http_client,
base_url,
config,
settings,
mock_get_exchange
):
exchange = "Kyber"
exchange_costs = {
"gas_price": EthereumAmount(Wei(1000000000)),
"gas": Wei(500000),
"eth_sold": EthereumAmount(0.5),
"total": EthereumAmount(0.505),
"exchange_rate": EthereumAmount(0.05),
}
mock_exchange = mock_get_exchange()()
mock_exchange.name = exchange
mock_exchange.calculate_transaction_costs.return_value = exchange_costs
currency = settings.transfer_token.ticker
target_amount = 3
data = {
"exchange": exchange,
"currency": currency,
"target_amount": target_amount,
}
request = HTTPRequest(
url=f"{base_url}/api/cost-estimation/{config.file_name}",
method="POST",
body=json.dumps(data)
)
response = yield http_client.fetch(request)
json_response = json.loads(response.body)
assert successful_json_response(response)
assert json_response["exchange"] == exchange
assert json_response["currency"] == currency
assert json_response["target_amount"] == target_amount
assert json_response["as_wei"] == exchange_costs["total"].as_wei
assert json_response["formatted"] == exchange_costs["total"].formatted
# Websocket methods tests
@pytest.mark.gen_test
def test_track_transaction(self, ws_client, config, settings):
with patch("raiden_installer.web.wait_for_transaction") as mock_wait_for_transaction:
tx_hash_bytes = os.urandom(32)
tx_hash = encode_hex(tx_hash_bytes)
data = {
"method": "track_transaction",
"configuration_file_name": config.file_name,
"tx_hash": tx_hash
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "hash"
assert message["tx_hash"] == tx_hash
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "status-update"
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "redirect"
assert message["redirect_url"] == (
f"/swap/{config.file_name}/{settings.service_token.ticker}"
)
mock_wait_for_transaction.assert_called_once()
args, _ = mock_wait_for_transaction.call_args
assert tx_hash_bytes in args
loaded_config = RaidenConfigurationFile.get_by_filename(config.file_name)
assert loaded_config._initial_funding_txhash == None
@pytest.mark.gen_test
def test_track_transaction_with_invalid_config(self, ws_client, config, io_loop):
with patch("raiden_installer.web.wait_for_transaction") as mock_wait_for_transaction:
tx_hash = encode_hex(os.urandom(32))
data = {
"method": "track_transaction",
"configuration_file_name": "invalid" + config.file_name,
"tx_hash": tx_hash
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "error-message"
mock_wait_for_transaction.assert_not_called()
@pytest.mark.gen_test(timeout=10)
def test_swap(
self,
ws_client,
config,
settings,
unlocked,
mock_get_exchange,
mock_deposit_service_tokens,
mock_wait_for_transaction
):
def token_balance(w3, account, token):
return (
TokenAmount(0, token)
if token.ticker == settings.transfer_token.ticker
else TokenAmount(10, token)
)
eth_balance_patch = patch(
"raiden_installer.account.Account.get_ethereum_balance",
return_value=EthereumAmount(100)
)
token_balance_patch = patch(
"raiden_installer.web.get_token_balance",
side_effect=token_balance
)
total_tokens_patch = patch(
"raiden_installer.web.get_total_token_owned",
side_effect=lambda w3, account, token: TokenAmount(10, token)
)
token_deposit_patch = patch(
"raiden_installer.shared_handlers.get_token_deposit",
side_effect=lambda w3, account, token: TokenAmount(10, token)
)
with eth_balance_patch, token_balance_patch, total_tokens_patch, token_deposit_patch:
mock_exchange = mock_get_exchange()()
mock_exchange.calculate_transaction_costs.return_value = {
"gas_price": EthereumAmount(Wei(1000000000)),
"gas": Wei(500000),
"eth_sold": EthereumAmount(0.5),
"total": EthereumAmount(0.505),
"exchange_rate": EthereumAmount(0.05),
}
mock_exchange.buy_tokens.return_value = os.urandom(32)
mock_exchange.name = "uniswap"
data = {
"method": "swap",
"configuration_file_name": config.file_name,
"amount": "10000000000000000000",
"token": settings.service_token.ticker,
"exchange": "uniswap"
}
ws_client.write_message(json.dumps(data))
for _ in range(8):
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "status-update"
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "summary"
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "redirect"
assert message["redirect_url"] == (
f"/swap/{config.file_name}/{settings.transfer_token.ticker}"
)
mock_exchange.calculate_transaction_costs.assert_called_once()
mock_exchange.buy_tokens.assert_called_once()
mock_deposit_service_tokens.assert_called_once()
@pytest.mark.gen_test
def test_swap_with_invalid_exchange(
self,
ws_client,
config,
settings,
unlocked,
mock_get_exchange,
mock_deposit_service_tokens
):
mock_exchange = mock_get_exchange()()
data = {
"method": "swap",
"configuration_file_name": config.file_name,
"amount": "10000000000000000000",
"token": settings.service_token.ticker,
"exchange": "invalid exchange"
}
ws_client.write_message(json.dumps(data))
message = json.loads((yield ws_client.read_message()))
assert message["type"] == "error-message"
mock_exchange.calculate_transaction_costs.assert_not_called()
mock_exchange.buy_tokens.assert_not_called()
mock_deposit_service_tokens.assert_not_called()
@pytest.mark.gen_test(timeout=10)
def test_swap_without_enough_eth(
self,
ws_client,
config,
settings,
unlocked,
mock_get_exchange,
mock_deposit_service_tokens,
mock_wait_for_transaction
):
with patch(
"raiden_installer.account.Account.get_ethereum_balance",
return_value=EthereumAmount(0)
):
mock_exchange = mock_get_exchange()()
mock_exchange.calculate_transaction_costs.return_value = {
"gas_price": EthereumAmount(Wei(1000000000)),
"gas": | |
497, 445),
(4, -3, 497, 365),
(3.2, 2.1, 1.9, 0.1031015682350942),
(S(3) / 2, 5, S(5) / 6, S(3) / 32),
]:
assert pow(S(s), t, u) == v
assert pow(S(s), S(t), u) == v
assert pow(S(s), t, S(u)) == v
assert pow(S(s), S(t), S(u)) == v
assert pow(S(2), S(10000000000), S(3)) == 1
assert pow(x, y, z) == x ** y % z
raises(TypeError, lambda: pow(S(4), "13", 497))
raises(TypeError, lambda: pow(S(4), 13, "497"))
def test_pow_E():
assert 2 ** (y / log(2)) == S.Exp1 ** y
assert 2 ** (y / log(2) / 3) == S.Exp1 ** (y / 3)
assert 3 ** (1 / log(-3)) != S.Exp1
assert (3 + 2 * I) ** (1 / (log(-3 - 2 * I) + I * pi)) == S.Exp1
assert (4 + 2 * I) ** (1 / (log(-4 - 2 * I) + I * pi)) == S.Exp1
assert (3 + 2 * I) ** (1 / (log(-3 - 2 * I, 3) / 2 + I * pi / log(3) / 2)) == 9
assert (3 + 2 * I) ** (1 / (log(3 + 2 * I, 3) / 2)) == 9
# every time tests are run they will affirm with a different random
# value that this identity holds
while 1:
b = x._random()
r, i = b.as_real_imag()
if i:
break
assert verify_numerically(b ** (1 / (log(-b) + sign(i) * I * pi).n()), S.Exp1)
def test_pow_issue_3516():
assert 4 ** Rational(1, 4) == sqrt(2)
def test_pow_im():
for m in (-2, -1, 2):
for d in (3, 4, 5):
b = m * I
for i in range(1, 4 * d + 1):
e = Rational(i, d)
assert (b ** e - b.n() ** e.n()).n(2, chop=1e-10) == 0
e = Rational(7, 3)
assert (2 * x * I) ** e == 4 * 2 ** Rational(1, 3) * (
I * x
) ** e # same as Wolfram Alpha
im = symbols("im", imaginary=True)
assert (2 * im * I) ** e == 4 * 2 ** Rational(1, 3) * (I * im) ** e
args = [I, I, I, I, 2]
e = Rational(1, 3)
ans = 2 ** e
assert Mul(*args, evaluate=False) ** e == ans
assert Mul(*args) ** e == ans
args = [I, I, I, 2]
e = Rational(1, 3)
ans = 2 ** e * (-I) ** e
assert Mul(*args, evaluate=False) ** e == ans
assert Mul(*args) ** e == ans
args.append(-3)
ans = (6 * I) ** e
assert Mul(*args, evaluate=False) ** e == ans
assert Mul(*args) ** e == ans
args.append(-1)
ans = (-6 * I) ** e
assert Mul(*args, evaluate=False) ** e == ans
assert Mul(*args) ** e == ans
args = [I, I, 2]
e = Rational(1, 3)
ans = (-2) ** e
assert Mul(*args, evaluate=False) ** e == ans
assert Mul(*args) ** e == ans
args.append(-3)
ans = (6) ** e
assert Mul(*args, evaluate=False) ** e == ans
assert Mul(*args) ** e == ans
args.append(-1)
ans = (-6) ** e
assert Mul(*args, evaluate=False) ** e == ans
assert Mul(*args) ** e == ans
assert Mul(Pow(-1, Rational(3, 2), evaluate=False), I, I) == I
assert Mul(I * Pow(I, S.Half, evaluate=False)) == sqrt(I) * I
def test_real_mul():
assert Float(0) * pi * x == 0
assert set((Float(1) * pi * x).args) == {Float(1), pi, x}
def test_ncmul():
A = Symbol("A", commutative=False)
B = Symbol("B", commutative=False)
C = Symbol("C", commutative=False)
assert A * B != B * A
assert A * B * C != C * B * A
assert A * b * B * 3 * C == 3 * b * A * B * C
assert A * b * B * 3 * C != 3 * b * B * A * C
assert A * b * B * 3 * C == 3 * A * B * C * b
assert A + B == B + A
assert (A + B) * C != C * (A + B)
assert C * (A + B) * C != C * C * (A + B)
assert A * A == A ** 2
assert (A + B) * (A + B) == (A + B) ** 2
assert A ** -1 * A == 1
assert A / A == 1
assert A / (A ** 2) == 1 / A
assert A / (1 + A) == A / (1 + A)
assert set((A + B + 2 * (A + B)).args) == {A, B, 2 * (A + B)}
def test_ncpow():
x = Symbol("x", commutative=False)
y = Symbol("y", commutative=False)
z = Symbol("z", commutative=False)
a = Symbol("a")
b = Symbol("b")
c = Symbol("c")
assert (x ** 2) * (y ** 2) != (y ** 2) * (x ** 2)
assert (x ** -2) * y != y * (x ** 2)
assert 2 ** x * 2 ** y != 2 ** (x + y)
assert 2 ** x * 2 ** y * 2 ** z != 2 ** (x + y + z)
assert 2 ** x * 2 ** (2 * x) == 2 ** (3 * x)
assert 2 ** x * 2 ** (2 * x) * 2 ** x == 2 ** (4 * x)
assert exp(x) * exp(y) != exp(y) * exp(x)
assert exp(x) * exp(y) * exp(z) != exp(y) * exp(x) * exp(z)
assert exp(x) * exp(y) * exp(z) != exp(x + y + z)
assert x ** a * x ** b != x ** (a + b)
assert x ** a * x ** b * x ** c != x ** (a + b + c)
assert x ** 3 * x ** 4 == x ** 7
assert x ** 3 * x ** 4 * x ** 2 == x ** 9
assert x ** a * x ** (4 * a) == x ** (5 * a)
assert x ** a * x ** (4 * a) * x ** a == x ** (6 * a)
def test_powerbug():
x = Symbol("x")
assert x ** 1 != (-x) ** 1
assert x ** 2 == (-x) ** 2
assert x ** 3 != (-x) ** 3
assert x ** 4 == (-x) ** 4
assert x ** 5 != (-x) ** 5
assert x ** 6 == (-x) ** 6
assert x ** 128 == (-x) ** 128
assert x ** 129 != (-x) ** 129
assert (2 * x) ** 2 == (-2 * x) ** 2
def test_Mul_doesnt_expand_exp():
x = Symbol("x")
y = Symbol("y")
assert unchanged(Mul, exp(x), exp(y))
assert unchanged(Mul, 2 ** x, 2 ** y)
assert x ** 2 * x ** 3 == x ** 5
assert 2 ** x * 3 ** x == 6 ** x
assert x ** (y) * x ** (2 * y) == x ** (3 * y)
assert sqrt(2) * sqrt(2) == 2
assert 2 ** x * 2 ** (2 * x) == 2 ** (3 * x)
assert sqrt(2) * 2 ** Rational(1, 4) * 5 ** Rational(3, 4) == 10 ** Rational(3, 4)
assert (x ** (-log(5) / log(3)) * x) / (x * x ** (-log(5) / log(3))) == sympify(1)
def test_Add_Mul_is_integer():
x = Symbol("x")
k = Symbol("k", integer=True)
n = Symbol("n", integer=True)
assert (2 * k).is_integer is True
assert (-k).is_integer is True
assert (k / 3).is_integer is None
assert (x * k * n).is_integer is None
assert (k + n).is_integer is True
assert (k + x).is_integer is None
assert (k + n * x).is_integer is None
assert (k + n / 3).is_integer is None
assert ((1 + sqrt(3)) * (-sqrt(3) + 1)).is_integer is not False
assert (1 + (1 + sqrt(3)) * | |
<gh_stars>0
"""Module for preparing data from NCBI. Most low layer module for manipulating data."""
import os
import pickle
from collections import defaultdict
from Bio import Entrez
from Bio import SeqIO
# TODO: move to init
CACHE_DIR = "../../Diploma/cache"
if not os.path.isdir(CACHE_DIR):
CACHE_DIR = "cache/"
if not os.path.isdir(CACHE_DIR):
os.makedirs(CACHE_DIR)
# ************ NCBI RECORD OPERATIONS ************ #
def get_gids(term="Viruses[Organism] AND srcdb_refseq[PROP] AND complete_genome"):
"""
Get genome IDs for given search term.
:param term: search term for NCBI query
:return: list of genome IDs for given term
"""
# term = "Viruses[Organism] AND srcdb_refseq[PROP] AND complete_genome"
handle = Entrez.esearch(db="nucleotide", term=term, retmax=100000)
record = Entrez.read(handle)
id_list = sorted(set(record["IdList"]))
print((record["Count"], len(record["IdList"]), len(id_list)))
return id_list
def get_rec(rec_id):
"""
Get record for given genome id.
:param rec_id: genome id
:return: record
"""
try:
rec = pickle.load(open(os.path.join(CACHE_DIR, "%s.pkl.gz" % rec_id), "rb"))
except IOError: # , FileNotFoundError:
print(("downloading sequence id:", rec_id))
handle = Entrez.efetch(db="nucleotide", rettype="gb", id=rec_id)
rec = SeqIO.read(handle, "gb")
handle.close()
pickle.dump(rec, open(os.path.join(CACHE_DIR, "%s.pkl.gz" % rec_id), "wb"), -1)
print(("genome size:", len(rec.seq), rec.seq[:20] + "..."))
print(("Taxonomy:", rec.annotations['taxonomy']))
for a, t in list(rec.annotations.items()):
print((" %s: %s" % (a, str(t)[:15])))
print()
return rec
def get_gene(rec):
"""
Get record and return gene sequence.
:param rec: record
:return: gene sequence
"""
sequence = ""
for f in rec.features:
if f.type == "gene":
start = f.location.nofuzzy_start
end = f.location.nofuzzy_end
if f.location.strand == 1:
sequence += rec.seq[start:end]
else:
# ??
sequence += rec.seq[start:end].complement()
return str(sequence)
def load_oid_seq_classification(ids):
"""
Build dictionary of sequences and taxonomies for every genome ID.
:param ids: genome IDs
:return: sequences and taxonomy annotations dictionaries for every genome ID
"""
seq = defaultdict(list)
tax = {}
for oid in ids:
rec = get_rec(oid)
seq[oid] = str(rec.seq)
tax[oid] = ';'.join(rec.annotations["taxonomy"])
return seq, tax
# ************ TAXONOMY OPERATIONS ************ #
def rec_dd():
"""
Create dictionary of dictionaries to 'simulate' tree.
:return: dictionary of dictionaries
"""
return defaultdict(rec_dd)
def update_taxonomy(taxonomy, tax_path, genome_id):
"""
Create dictionary with taxonomy name and IDs of sequences which belongs to specific taxonomy.
:param taxonomy: current taxonomy
:param tax_path: taxonomy path
:param genome_id: genome_id
:return: updated taxonomy
"""
if not tax_path:
return taxonomy
tax = tax_path[0].lower()
if tax in taxonomy: # check if tax in taxonomy and update
# temp_taxonomy[tax]["data"].append(seq_record.annotations["gi"])
taxonomy[tax]["data"].append(genome_id)
# taxonomy[tax]["data"].append(get_gene(rec))
update_taxonomy(taxonomy[tax], tax_path[1:], genome_id)
else:
# temp_taxonomy[tax] = {"data": list({seq_record.annotations["gi"]})}
taxonomy[tax] = {"data": list({genome_id})}
# taxonomy[tax] = dict({"data": list({get_gene(rec)})})
temp = update_taxonomy(taxonomy[tax], tax_path[1:], genome_id)
if len(temp) > 1: # 1 = data, 2 = data + key
taxonomy = temp
return taxonomy
def filter_classification(rec, to_filter):
"""
Check if record is in filter list.
:param rec: record
:param to_filter: filter list
:return: bool
"""
in_to_filter = False
for temp_tax in rec.annotations["taxonomy"]:
temp_tax = temp_tax.lower().split()
for temp_tax_el in temp_tax:
if temp_tax_el in to_filter:
in_to_filter = True
print("filtered ", rec.annotations["taxonomy"])
return in_to_filter
def print_nice(taxonomy, level=0):
"""
Print taxonomy with tabs.
:param taxonomy: taxonomy
:param level: current level
:return:
"""
for i in sorted(taxonomy.keys()):
if i == "data":
if len(taxonomy) == 1:
return
else:
continue
else:
print(level * "\t", i.replace("->", "", 1), len(taxonomy[i]["data"]))
print_nice(taxonomy[i], level + 1)
def load_whole_taxonomy():
"""
Build taxonomy and get list ids and labels.
:return: data, label
"""
taxonomy = get_taxonomy(get_gids())
list_nodes = get_list_nodes_ids_labels(taxonomy)
data, labels = list(zip(*list_nodes))
for label in labels:
print(label)
label_number = -1
temp_l = []
label_n = []
for l in labels:
if l not in temp_l:
temp_l.append(l)
label_number += 1
label_n.append(label_number)
return data, label_n
def get_taxonomy(id_list, count=-1):
# call: python get_viral_sequence.py>log.out 2>log.err
# all virus sequences
# term = "Viruses[Organism] NOT srcdb_refseq[PROP] NOT cellular organisms[ORGN] AND
# nuccore genome samespecies[Filter] NOT nuccore genome[filter] NOT gbdiv syn[prop]"
# only reference (refSEQ) virues sequences
# see distinction between the two, here:
# http://www.ncbi.nlm.nih.gov/genomes/GenomesHome.cgi?taxid=10239&hopt=faq
"""
Build taxonomy from Entrez search.
:param id_list: list of genome ids we want to build taxonomy tree from
:param count: how many elements we want in taxonomy; -1 means whole taxonomy
:return: taxonomy
"""
taxonomy = rec_dd()
temp_count = 1
for genome_id in id_list:
try:
rec = get_rec(genome_id)
in_filter = filter_classification(rec, list({"bacteria", "unclassified", "unassigned"}))
if not in_filter:
update_taxonomy(taxonomy, rec.annotations["taxonomy"], genome_id)
if count != -1:
if temp_count == count:
break
temp_count += 1
except IOError as e:
# efetch - Raises an IOError exception if there's a network error.
# http://biopython.org/DIST/docs/api/Bio.Entrez-module.html
print("IOError raised...")
print(e)
except ValueError as v:
# http: // biopython.org / DIST / docs / api / Bio.SeqIO - module.html # read
print("problems with handling SeqIO...")
print(v)
except pickle.PicklingError as p:
# https://docs.python.org/2/library/pickle.html#pickle.PicklingError
print("problems with pickling object...")
print(p)
return taxonomy
def remove_small_nodes(taxonomy, threshold_size=100):
"""
Remove small nodes from dataset.
:param taxonomy: input taxonomy
:param threshold_size: how many nodes do parent need to keep it
:return: output taxonomy
"""
if isinstance(taxonomy, (defaultdict, dict)):
taxonomy_keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in taxonomy_keys:
print(i, len(taxonomy[i]['data']))
if len(taxonomy[i]['data']) < threshold_size:
taxonomy.pop(i)
else:
remove_small_nodes(taxonomy[i])
else:
return taxonomy
# ************ LIST OPERATIONS ************ #
def remove_lists(taxonomy):
"""
Remove all list nodes from taxonomy.
:param taxonomy: taxonomy
:return: taxonomy
"""
# check for recurse exit
if isinstance(taxonomy, (defaultdict, dict)):
for i in [x for x in list(taxonomy.keys()) if x != "data"]:
if set(taxonomy[i]) == set(list({"data"})):
# if parent has only one list node, remove it
# if len([x for x in taxonomy.keys() if x != "data"]) == 1:
taxonomy.pop(i)
continue
else:
remove_lists(taxonomy[i])
else:
return taxonomy
def get_list_nodes_unique(taxonomy, parent=""):
"""
Get taxonomy and return unique list nodes.
:param taxonomy: taxonomy
:param parent: parent of current node
:return: unique list nodes
"""
# checked by hand and it works as expected
list_nodes = list()
keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in keys:
if set(taxonomy[i]) == set(list({"data"})):
list_nodes.append(i)
else:
list_nodes += get_list_nodes_unique(taxonomy[i], parent + "->" + i)
return list_nodes
def count_list_nodes(taxonomy):
"""
Count list nodes and return sum.
:param taxonomy: taxonomy
:return: int
"""
count = 0
keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in keys:
if set(taxonomy[i]) == set(list({"data"})):
if i == keys[-1]:
count += 1
return count
else:
count += 1
else:
count += count_list_nodes(taxonomy[i])
return count
def get_list_nodes_ids_labels(taxonomy):
"""
Get taxonomy and return tuples of all list nodes.
:param taxonomy: taxonomy
:return: list of tuples (id, class)
"""
if len(list(taxonomy.keys())) > 1 or list(taxonomy.keys()) == ["viruses"]:
temp = []
for k in [x for x in list(taxonomy.keys()) if x != "data"]:
temp += get_list_nodes_ids_labels(taxonomy[k])
return temp
# else:
# return [(x, parent) for x in taxonomy["data"]]
# ************ ALL NODES OPERATIONS ************ #
def count_examples(taxonomy):
"""
Get taxonomy, count examples in every node and return sum.
:param taxonomy: taxonomy
:return: sum of examples
"""
count = 0
keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in keys:
if set(taxonomy[i]) == set(list({"data"})):
if i == keys[-1]:
count += len(taxonomy[i]["data"])
return count
else:
count += len(taxonomy[i]["data"])
else:
count += count_examples(taxonomy[i])
return count
def get_all_nodes(taxonomy, parent=""):
"""
Get taxonomy and return all nodes (including list nodes).
:param parent: parent of current node - default ""
:param taxonomy: taxonomy
:return: all nodes
"""
all_nodes = list()
keys = [x for x in list(taxonomy.keys()) if x != "data"]
for i in keys:
# if we want all non-list nodes, than this stays, otherwise comment this
# if len([x for x in taxonomy[i].keys() if x != "data"]) == 0:
# continue
if i == "rest":
all_nodes.append(parent + "->" + i)
else:
all_nodes.append(i)
all_nodes += get_all_nodes(taxonomy[i], i)
return all_nodes
# ************ OTHER ************ #
def load_seqs_from_ncbi(seq_len=100, skip_read=0, overlap=50, taxonomy_el_count=-1):
"""
Load sequences from NCBI database.
Prepare sequences, sliced to seq_len length. Skip every skip_read and overlap two reads with overlap nucleotides.
Overlap 50 means that half of the read is going to be overlapped with next read.
If seq_len is -1, load whole sequences (do not strip them) - usually using with fasta format as we slice sequences
later.
:param seq_len: read length
:param skip_read: number of skipped reads
:param overlap: overlapping nucleotides count
:param taxonomy_el_count: how many elements we want in taxonomy; -1 means whole taxonomy
:return: dictionary reads - each genome | |
local_port = port
public_port = None
new_pfs_set.add((public_port, local_port))
to_remove = old_pfs_set - new_pfs_set
to_create = new_pfs_set - old_pfs_set
for public_port, local_port in to_remove:
if local_port == 22:
continue
machine.portforward_delete(public_port)
for public_port, local_port in to_create:
machine.portforward_create(
publicport=public_port, localport=local_port, protocol='tcp'
)
# Get all created ports forwarding to save it in the model
all_ports = []
for port_forward in machine.portforwards:
port_added = "{public}:{local}".format(public=port_forward["publicPort"],
local=port_forward["localPort"])
all_ports.append(port_added)
setattr(service.model.data, key, all_ports)
if key == 'disk':
# Get machine data disks only
# machine_disks = {disk['name']: disk['id'] for disk in machine.disks if disk['type'] != 'B'}
old_disks_services = service.producers.get('disk', [])
# Check for removed disk services which aren't of type B(boot), and delete them
for old_disk_service in old_disks_services:
if old_disk_service.name not in value and old_disk_service.model.data.type != 'B':
service.model.producerRemove(old_disk_service)
# Check for the new disk services and add them
for disk_service_name in value:
disk_service = service.aysrepo.serviceGet('disk.ovc', disk_service_name)
if disk_service not in old_disks_services:
service.consume(disk_service)
_configure_disks(service, machine, service.executor.prefab)
setattr(service.model.data, key, value)
if key == 'uservdc':
# value is a list of (uservdc)
if not isinstance(value, list):
raise j.exceptions.Input(message="%s should be a list" % key)
if 'uservdc' in service.producers:
for s in service.producers['uservdc']:
if not any(v['name'] == s.name for v in value):
service.model.producerRemove(s)
for v in value:
accessRight = v.get('accesstype', '')
if v['name'] == s.name and accessRight != get_user_accessright(s.name, service):
user = s.name + '@' + s.model.data.provider if s.model.data.provider else s.name
try:
result = machine.client.api.cloudapi.machines.updateUser(
machineId=machine.id, userId=user, accesstype=accessRight
)
except ApiError as err:
service.logger.error(
"""
Failed to update access rights for user {} on machine {}.
Error: {}""".format(user, machine.name, err)
)
raise
if result is not True:
service.logger.error(
'Failed to update access rights for user {} on machine {}'.format(user, machine.name)
)
for v in value:
userservice = service.aysrepo.serviceGet('uservdc', v['name'])
if userservice not in service.producers.get('uservdc', []):
service.consume(userservice)
setattr(service.model.data, key, value)
authorization_user(machine, service)
if key in ['cloneName', 'snapshotEpoch']:
setattr(service.model.data, key, value)
space.save()
service.save()
def export(job):
service = job.service
vdc = service.parent
if 'g8client' not in vdc.producers:
raise j.exceptions.AYSNotFound("No producer g8client found. Cannot continue export of %s" % service)
g8client = vdc.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
acc = cl.account_get(vdc.model.data.account)
space = acc.space_get(vdc.model.dbobj.name, vdc.model.data.location)
if service.name not in space.machines:
raise j.exceptions.NotFound("Can not find a machine with this name %s" % service.name)
cl.api.cloudapi.machines.exportOVF(
link=service.model.data.ovfLink,
username=service.model.data.ovfUsername,
passwd=service.model.data.ovfPassword,
path=service.model.data.ovfPath,
machineId=service.model.data.machineId,
callbackUrl=service.model.data.ovfCallbackUrl)
def import_(job):
service = job.service
space = _get_cloud_space(service)
size_id = space.size_find_id(service.model.data.memory)
service.client.api.cloudapi.machines.importOVF(
link=service.model.data.ovfLink,
username=service.model.data.ovfUsername,
passwd=service.model.data.ovfPassword,
path=service.model.data.ovfPath,
cloudspaceId=space.id,
name=service.name,
sizeId=size_id,
callbackUrl=service.model.data.ovfCallbackUrl
)
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the dependencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
action_required = args.get('action_required')
if action_required in ['stop', 'uninstall']:
for action_name, action_model in service.model.actions.items():
if action_name in ['stop', 'uninstall']:
continue
if action_model.state == 'scheduled':
action_model.state = 'new'
if action_required in ['install']:
for action_name, action_model in service.model.actions.items():
if action_name in ['uninstall', 'stop'] and action_model.state == 'scheduled':
action_model.state = 'new'
if action_required == 'stop':
if service.model.actionsState['start'] == 'sheduled':
service.model.actionsState['start'] = 'new'
if action_required == 'start':
if service.model.actionsState['stop'] == 'sheduled':
service.model.actionsState['stop'] = 'new'
service.save()
return {
'init': [],
'install': ['init'],
'start': ['install'],
'export': ['install'],
'import_': ['init'],
'monitor': ['start'],
'stop': ['install'],
'clone': ['stop'],
'get_history': ['install'],
'attach_external_network': ['install'],
'detach_external_network': ['install'],
'uninstall': ['stop'],
'add_user': ['install'],
'update_user': ['install'],
'delete_user': ['install'],
'pause': ['install'],
'resume': ['install'],
'restart': ['install'],
'list_snapshots': ['install'],
'snapshot': ['install'],
'rollback_snapshot': ['stop'],
'delete_snapshot': ['install']
}
def add_disk(job):
service = job.service
repo = service.aysrepo
space = _get_cloud_space(service)
# find os
os = None
for child in service.children:
if child.model.role == 'os':
os = child
break
if os is None:
raise RuntimeError('no child os found')
if service.name in space.machines:
# machine already exists
machine = space.machines[service.name]
else:
raise RuntimeError('Machine {} was not found'.format(service.name))
args = job.model.args
prefix = args.get('prefix', 'added')
available_disks = service.producers.get('disk', [])
available_names = service.model.data.disk
device_names = list(map(lambda d: d.model.data.devicename, available_disks))
idx = 1
name = '%s-%d' % (prefix, idx)
while name in available_names:
idx += 1
name = '%s-%d' % (prefix, idx)
model = {
'size': args.get('size', 1000),
'description': args.get('description', 'disk'),
}
machine.add_disk(name=name, description=model['description'], size=model['size'], type='D')
code, out, err = os.executor.prefab.core.run("lsblk -J", die=False)
if code != 0:
raise RuntimeError('failed to list devices on node: %s' % err)
jsonout = j.data.serializer.json.loads(out)
# should be only 1
devices = [x for x in jsonout['blockdevices'] if x['mountpoint'] is None and x['type'] == 'disk']
for dv in devices:
if 'children' in dv or dv['name'] in device_names:
continue
model['devicename'] = dv['name']
disk_service = repo.actorGet('disk.ovc').serviceCreate(name, model)
disk_service.saveAll()
service.consume(disk_service)
disks = list(service.model.data.disk)
disks.append(name)
service.model.data.disk = disks
service.saveAll()
def open_port(job):
"""
Open port in the firewall by creating port forward
if public_port is None, auto select available port
Return the public port assigned
"""
requested_port = job.model.args['requested_port']
public_port = job.model.args.get('public_port', None)
service = job.service
space = _get_cloud_space(service)
if service.name in space.machines:
# machine already exists
machine = space.machines[service.name]
else:
raise RuntimeError('machine not found')
# check if already open, if yes return public port
spaceport = None
for pf in machine.portforwards:
if pf['localPort'] == requested_port:
spaceport = pf['publicPort']
break
ports = set(service.model.data.ports)
if spaceport is None:
if public_port is None:
# reach that point, the port is not forwarded yet
unavailable_ports = [int(portinfo['publicPort']) for portinfo in machine.space.portforwards]
spaceport = 2200
while True:
if spaceport not in unavailable_ports:
break
else:
spaceport += 1
else:
spaceport = public_port
machine.portforward_create(spaceport, requested_port)
ports.add("%s:%s" % (spaceport, requested_port))
service.model.data.ports = list(ports)
service.saveAll()
return spaceport
def uninstall(job):
service = job.service
space = _get_cloud_space(service)
if service.name not in space.machines:
service.logger.warning("Machine doesn't exist in the cloud space")
return
machine = space.machines[service.name]
machine.delete()
def start(job):
space = _get_cloud_space(job.service)
machine = space.machines[job.service.name]
_check_ssh_authorization(job.service, machine)
machine.start()
def stop(job):
space = _get_cloud_space(job.service)
machine = space.machines[job.service.name]
_check_ssh_authorization(job.service, machine)
machine.stop()
def restart(job):
space = _get_cloud_space(job.service)
machine = space.machines[job.service.name]
_check_ssh_authorization(job.service, machine)
machine.restart()
def pause(job):
space = _get_cloud_space(job.service)
machine = space.machines[job.service.name]
_check_ssh_authorization(job.service, machine)
machine.pause()
def resume(job):
space = _get_cloud_space(job.service)
machine = space.machines[job.service.name]
_check_ssh_authorization(job.service, machine)
machine.resume()
def reset(job):
space = _get_cloud_space(job.service)
machine = space.machines[job.service.name]
_check_ssh_authorization(job.service, machine)
machine.reset()
def clone(job):
"""
Action that creates a clone of a machine.
"""
service = job.service
vdc = service.parent
if 'g8client' not in vdc.producers:
raise j.exceptions.RuntimeError("No producer g8client found. Cannot continue clone of %s" % service)
g8client = vdc.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
acc = cl.account_get(vdc.model.data.account)
space = acc.space_get(vdc.model.dbobj.name, vdc.model.data.location)
if service.name not in space.machines:
raise j.exceptions.RuntimeError("Machine with name %s doesn't exist in the cloud space" % service.name)
machine = space.machines[service.name]
clone_name = service.model.data.cloneName
machine.clone(clone_name)
machine.start()
def get_history(job):
import json
service = job.service
space = _get_cloud_space(service)
machine = space.machines[service.name]
_check_ssh_authorization(job.service, machine)
res = machine.getHistory(10)
service.model.data.vmHistory = json.dumps(res)
service.saveAll()
def attach_external_network(job):
"""
Action that attaches the machine to the external network.
"""
service = job.service
vdc = service.parent
if 'g8client' not in vdc.producers:
raise j.exceptions.RuntimeError("No producer g8client found. Cannot continue attaching external network to %s" % service)
g8client = vdc.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
acc = cl.account_get(vdc.model.data.account)
space = acc.space_get(vdc.model.dbobj.name, vdc.model.data.location)
if service.name not in space.machines:
raise j.exceptions.RuntimeError("Machine with name %s doesn't exist in the cloud space" % service.name)
machine = space.machines[service.name]
machine.attach_external_network()
def detach_external_network(job):
"""
Action that detaches the machine from the external network.
"""
service = job.service
vdc = service.parent
if 'g8client' not in vdc.producers:
raise j.exceptions.RuntimeError("No producer g8client found. Cannot continue detaching external network from %s" % service)
g8client = vdc.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
acc = cl.account_get(vdc.model.data.account)
space = acc.space_get(vdc.model.dbobj.name, vdc.model.data.location)
if service.name not in space.machines:
raise j.exceptions.RuntimeError("Machine with name %s doesn't exist in the cloud space" % service.name)
machine = space.machines[service.name]
machine.detach_external_network()
def mail(job):
print('hello world')
def list_snapshots(job):
"""
Action that lists the snapshots of the machine
"""
import json
service = job.service
vdc = service.parent
if 'g8client' not in vdc.producers:
raise j.exceptions.RuntimeError("No producer g8client found. Cannot continue creating snapshot of %s" % service)
g8client = vdc.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
acc = cl.account_get(vdc.model.data.account)
space = acc.space_get(vdc.model.dbobj.name, vdc.model.data.location)
if service.name not in space.machines:
raise j.exceptions.RuntimeError("Machine with name %s doesn't exist | |
<gh_stars>1-10
# task 1 for a question, task 2, task 3
import nltk
import io
import os
import re
import requests
import json
from pprint import pprint
from nltk.parse import CoreNLPParser
from nltk.parse.corenlp import CoreNLPDependencyParser
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer
from nltk.wsd import lesk
from nltk.stem.porter import PorterStemmer
import requests
from elasticsearch import Elasticsearch
import json
from nltk.corpus import stopwords
import string
import numpy as np
import ast
import dateparser
# who - wp when -wrb where -wrb
stop_words = stopwords.words('english') + list(string.punctuation)
r = requests.get('http://localhost:9200')
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
def question_pipeline(question):
lemmatizer = WordNetLemmatizer()
porter = PorterStemmer()
# stanford corenlp is expected to run at localhost:9000
dep_parser = CoreNLPDependencyParser(url='http://localhost:9000')
ner_tagger = CoreNLPParser(url='http://localhost:9000', tagtype='ner')
corpus_dict = {}
count = 0
sent_text = question
tokenized_text = nltk.word_tokenize(sent_text)
question_types = ['who', 'when', 'where', 'Who', 'When', 'Where']
type_of_question = [i for i in question_types if i in tokenized_text]
lemma = [lemmatizer.lemmatize(word) for word in tokenized_text]
stemmed = [porter.stem(word)
for word in tokenized_text] # Stemming the words
# POS tagging the words to extract POS features
tagged = nltk.pos_tag(tokenized_text)
parse, = dep_parser.raw_parse(question)
# Dependency parsing to parse tree based patters as features
dependency_parse = list(parse.triples())
# LESK to extract best sense of a word
best_sense = [lesk(question, word) for word in tokenized_text]
# tokenized_text_ner = nltk.word_tokenize(sent_text) #Tokenizing sentences into words
ner_tag = ner_tagger.tag(tokenized_text)
head_list = []
striped_sentence = sent_text.strip(" '\"")
if striped_sentence != "":
dependency_parser = dep_parser.raw_parse(striped_sentence)
parsetree = list(dependency_parser)[0]
head_word = ""
head_word = [k["word"]
for k in parsetree.nodes.values() if k["head"] == 0][0]
if head_word != "":
head_list.append([head_word])
else:
for i, pp in enumerate(tagged):
if pp.startswith("VB"):
head_list.append([tokenized_text[i]])
break
if head_word == "":
for i, pp in enumerate(tagged):
if pp.startswith("NN"):
head_list.append([tokenized_text[i]])
break
else:
head_list.append([""])
synonym_list = []
hypernym_list = []
hyponym_list = []
meronym_list = []
holonym_list = []
for t in tokenized_text:
best_sense = lesk(sent_text, t) # LESK to extract best sense of a word
if best_sense is not None:
this_synonym = t
if best_sense.lemmas()[0].name() != t:
this_synonym = best_sense.lemmas()[0].name()
synonym_list.append(this_synonym)
if best_sense.hypernyms() != []:
hypernym_list.append(best_sense.hypernyms()[
0].lemmas()[0].name())
if best_sense.hyponyms() != []:
hyponym_list.append(best_sense.hyponyms()[
0].lemmas()[0].name())
if best_sense.part_meronyms() != []:
meronym_list.append(best_sense.part_meronyms()[
0].lemmas()[0].name())
if best_sense.part_holonyms() != []:
holonym_list.append(best_sense.part_holonyms()[
0].lemmas()[0].name())
else:
synonym_list.append(t)
count = count + 1
corpus_dict[count] = {}
corpus_dict[count]["sentence"] = {}
corpus_dict[count]["sentence"] = sent_text
corpus_dict[count]["type_of_question"] = {}
corpus_dict[count]["type_of_question"] = type_of_question
corpus_dict[count]["tokenized_text"] = {}
corpus_dict[count]["tokenized_text"] = tokenized_text
corpus_dict[count]["lemma"] = {}
corpus_dict[count]["lemma"] = lemma
corpus_dict[count]["stemmed"] = {}
corpus_dict[count]["stemmed"] = stemmed
corpus_dict[count]["tagged"] = {}
corpus_dict[count]["tagged"] = tagged
corpus_dict[count]["dependency_parse"] = {}
corpus_dict[count]["dependency_parse"] = dependency_parse
corpus_dict[count]["synonyms"] = {}
corpus_dict[count]["synonyms"] = synonym_list
corpus_dict[count]["hypernyms"] = {}
corpus_dict[count]["hypernyms"] = hypernym_list
corpus_dict[count]["hyponyms"] = {}
corpus_dict[count]["hyponyms"] = hyponym_list
corpus_dict[count]["meronyms"] = {}
corpus_dict[count]["meronyms"] = meronym_list
corpus_dict[count]["holonyms"] = {}
corpus_dict[count]["holonyms"] = holonym_list
corpus_dict[count]["ner_tag"] = {}
corpus_dict[count]["ner_tag"] = dict(ner_tag)
corpus_dict[count]["head_word"] = {}
corpus_dict[count]["head_word"] = head_list[0]
return corpus_dict
def quesFeatures(piped):
keywords = ""
namedEntities = []
similarWords = []
stems = []
ques_type = piped[1]['type_of_question']
head_word = piped[1]['head_word'][0]
lemma = piped[1]['lemma']
ner = piped[1]['ner_tag']
stems = piped[1]['stemmed']
depparse = piped[1]['dependency_parse']
ner_tag = piped[1]['ner_tag']
dep_list = list(list(x) for x in depparse)
depElements = []
for i in dep_list:
if i[1] == 'nsubj':
depElements.append(i[0])
if i[1] == 'dobj':
depElements.append(i[0])
dep_list2 = list(list(x) for x in depElements)
# oth = stems
similarWords = piped[1]['synonyms'] + piped[1]['meronyms'] + piped[1]['hyponyms'] + piped[1]['hypernyms'] + piped[1]['holonyms']
for word, ent in ner.items():
namedEntities.append(ent)
#***************add more********************
if ent == 'ORGANIZATION':
keywords +=" "+word+" "
if ent == 'LOCATION':
keywords +=" "+word+" "
if ent == 'PERSON':
keywords +=" "+word+" "
keywords +=" "+head_word+" "
return keywords, similarWords, ques_type, lemma, stems, dep_list2
def query_match(theQuery, dep_list2):
querybody = {
"query": {
"dis_max": {
"queries": [
# { "match": { "lemma": {"query": spclQuery,"boost": 2} }},
{"multi_match": {'query': theQuery, "fields": [
# "lemma^2.0", "synonyms^0.5", "meronyms^0.1", "holonyms^0.1", "hypernyms^0.1", "hyponyms^0.1"]}},
"lemma^2", "ner_tag", "synonyms", "meronyms^0.5", "holonyms^0.5", "hypernyms^0.5", "hyponyms^0.5"]}},
]
}
}
}
ans2 = es.search(index="wikiarticles", body=querybody)
answers = ans2['hits']['hits']
depparses = []
sentenses = []
scores = []
articles = []
ners = []
for i in range(len(answers)):
sent = ans2['hits']['hits'][i]['_source']['sentence']
score = ans2['hits']['hits'][i]['_score']
depparse = ans2['hits']['hits'][i]['_source']['dependency_parse']
article = ans2['hits']['hits'][i]['_source']['file_name']
ner = ans2['hits']['hits'][i]['_source']['ner_tag']
sentenses.append(sent)
scores.append(score)
depparses.append(depparse)
articles.append(article)
ners.append(ner)
# print("Sentence: '{}' DepParse: '{}' Score:'{}'".format(sent, depparse, score))
# print("--------------------------------------------")
return sentenses,scores, depparses, articles, ners
def getHeuristics(keywords, similarWords, question, ques_type):
heuristics = ""
# domain knowledge for person, organization, location
utdFlag = 0
if 'Apple' in keywords:
heuristics += " Apple inc. computer apple Apple Inc. "
if 'UTD' in question:
utdFlag = 1
heuristics += ' UT Dallas the University of Texas at Dallas institution'
if 'headquarters' in question:
heuristics += ' world corporate headquarters American multinational conglomerate headquartered '
if 'Lincoln' in keywords:
heuristics += ' <NAME> '
if 'die' in keywords:
heuristics += ' assassination '
if 'born' in keywords:
heuristics += ' '
if 'AT&T' in question:
heuristics += ' AT&T Inc. expand '
if 'south' in question:
heuristics += ' latin '
if 'Exxon' in question:
heuristics += " Exxon Mobil Corporation ExxonMobil oil companies "
month = " january february march april April may june June july august september September october november december "
year = " 1969 1970 1971 1980 1981 1982 1983 1984 1975 1976 1977 1978 1979 2001 2002 2003 2004 2005 2006 2007 2010 2012 2013 2014 2015"
# where locations
if ques_type[0].lower() == 'who':
if 'found' in question and utdFlag == 1:
heuristics += ' <NAME>, <NAME> and <NAME> '
print("Who question")
if ques_type[0].lower() == 'when':
print("When question")
if ques_type[0].lower() == 'where':
if 'birth' in question:
heuristics += ' born '
heuristics += ' locate '
print("Where question")
return heuristics
def computeScore(ques_type, keywords, sentenses,scores, depparses, articles, ners, dep_list2):
# dependency parsing
# get answers , imporve heuristics, expand synonyms
count = 0
anoList = []
for dep in depparses:
subList = []
for i in dep:
if i[1] == 'nsubj' or i[1] == 'dboj':
if i[0] in dep_list2:
anoList.append([count,i[0]])
subList.append(i[0])
count += 1
questionType = ques_type[0].lower()
answersList = []
# NATIONALITY
orgList = ['ORGANIZATION']
personList = ['PERSON']
locationList = ['LOCATION', 'PLACE', 'CITY', 'COUNTRY', 'STATE_OR_PROVINCE']
timeList = ['TIME', 'DATE', 'NUMBER']
dieList = ['die', 'died', 'assassination']
bornList = ['born', 'birth', 'life']
keywordlist = keywords.split()
keywordList = [item.lower() for item in keywordlist]
# print(keywordList)
# domain knowledge
poi = ['Jobs']
orgoi = ['apple']
#stop words for time
timeoi = ['year', 'now']
for ano in anoList:
sentIndex = ano[0]
scores[sentIndex] +=100
if questionType == 'who':
for ner in ners:
a = eval(ner)
answers = []
for key, value in a.items():
if key in poi and value == 'O':
answers.append(key)
if value in personList:
answers.append(key)
if value in orgList and key.lower() not in orgoi:
answers.append(key)
if answers != [] and key == ',':
answers.append(key)
if answers != [] and key == 'and':
answers.append(key)
answersList.append(' '.join(answers))
if questionType == 'when':
for ner in ners:
# evaluating stringified dict
a = eval(ner)
answers = []
for key, value in a.items():
if value in timeList and dateparser.parse(key) is not None and key.lower() not in timeoi:
answers.append(key)
answersList.append(' '.join(answers))
if questionType == 'where':
for ner in ners:
a = eval(ner)
answers = []
for key, value in a.items():
if value in locationList:
answers.append(key)
if answers != [] and key == ',':
answers.append(key)
if answers != [] and key == 'and':
answers.append(key)
answersList.append(' '.join(answers))
for naught in range(0,len(answersList)):
if len(answersList[naught]) < 3:
scores[naught] -= 100
dieconcept = 0
if questionType == 'when':
for keyw in range(0,len(sentenses)):
for j in dieList:
if j in sentenses[keyw]:
pattern = r"\((.*?)\)"
try:
matched = re.findall(pattern,sentenses[keyw])
splits = matched[0].split(' ')
splitjoin = ' '.join(splits[4:])
answersList[keyw] = splitjoin
dieconcept = 1
except:
pass
scores[keyw] += 50
if dieconcept == 0:
for keyw in range(0,len(sentenses)):
for j in bornList:
if j in sentenses[keyw]:
pattern = r"\((.*?)\)"
try:
matched = re.findall(pattern,sentenses[keyw])
splits = matched[0].split(' ')
splitjoin = ' '.join(splits)
answersList[keyw] = splitjoin
dieconcept = 1
except:
pass
scores[keyw] += 10
l = zip(answersList, sentenses,articles, scores)
n = sorted(l, key=lambda x: x[3])
return reversed(n)
def readQuestions(filename):
# take input | |
an intention to main process to fail.
Args:
process (List[subprocess.Popen]):
list of process handles opened for background benchmark runs.
lenient (bool):
If True, do not terminate background process.
Returns:
bool:
Acknowledges current program that subprocesses exited early, and guides to exit as well
"""
print("Early exiting, killing all the background processes...")
if not lenient:
for process in processes:
kill_process(process)
return True
return False
def get_background_benchmarks(GPU: str,
main_benchmark: str,
background_benchmark_input: str,
background_duration: str,
background_timeout: int) -> Tuple[List[str], int, str]:
"""
Returns information of background benchmarks, i.e. list of benchmarks selected
to run in the background, duration of those runs, and the action commandline argument
for those runs.
Main benchmark is removed from background benchmarks to avoid any possible interference.
Args:
GPU (str):
String representing system identification. See also get_mig_identification()
main_benchmark (str):
Name of the main benchmark to run, as in mlperf-inference.
background_benchmark_input (str):
Comma separated list of benchmarks to be run in background.
Also can be specified for the category to choose background benchmarks from.
See also --background_benchmarks from commandline arguments.
background_duration (str):
Time to run the background benchmarks. Note that this is string type.
If 'automatic' is provided, algorithm determines the run duration.
If string with digits is provided, it is translated into time in milliseconds.
background_timeout (int):
Time to wait before auto-terminating background benchmarks, in milliseconds.
Returns:
Tuple[List[str], int, str]:
list of str:
background_benchmarks -- benchmark names to be used for background runs.
ex) ["DLRM", "ResNet50", "SSD-ResNet34"]
int:
background_benchmark_duration -- runtime in milliseconds for background runs.
str:
background_benchmark_action -- commandline argument to be used for background runs.
"""
# to return, default value
background_benchmarks = list()
background_benchmark_duration = 0
background_benchmark_action = "run_harness"
# Expand the list of background benchmarks if macro was used and
# then validate that the selected benchmarks are all supported.
if background_benchmark_input in {'all', 'datacenter', 'edge'}:
background_benchmarks = list(
{
'datacenter': BENCHMARKS.DATACENTER,
'edge': BENCHMARKS.EDGE,
'all': BENCHMARKS.ALL,
}[background_benchmark_input]
)
elif background_benchmark_input == 'none':
background_benchmarks = list()
else:
background_benchmarks = background_benchmark_input.split(',')
assert any(background_benchmarks), "Unexpected background benchmark(s): {}".format(background_benchmark_input)
# Just for determinism :)
background_benchmarks.sort()
for benchmark in background_benchmarks:
assert BENCHMARKS.alias(benchmark) in SUPPORT_MATRIX[GPU],\
"Specified background benchmark {} is not supported".format(benchmark)
# Set the duration for which to run the background benchmark, if user provided any
if background_duration.isdigit():
background_benchmark_duration = int(background_duration)
# If the background_benchmark_duration is automatic, let background benchmarks run until main benchmark ends.
# The timeout would be 60 minutes for now.
elif background_duration.lower() == 'automatic':
logging.info(("Setting the background benchmark to run for indefinitely long "
"until main benchmark finishes."))
background_benchmark_duration = background_timeout
else:
assert false, "Unknown setting for background_benchmark_duration: {}".format(background_duration)
# If we're running all the benchmarks at once, the main benchmark can't also be a background benchmark
if background_benchmark_input == 'all':
background_benchmarks.remove(main_benchmark)
return background_benchmarks, background_benchmark_duration, background_benchmark_action
def get_cmd_templates(main_benchmark_runargs: str,
verbose: bool, verbose_all: bool,
background_logdir: str) -> Tuple[str, str]:
"""
Returns command templates to run the main and background benchmarks.
These command templates
Args:
main_benchmark_runargs (str):
Run argument(s) to be added for main benchmark run.
verbose (bool):
Use verbose logging for main benchmark.
verbose_all (bool):
Use verbose logging for all the benchmarks, including main and background benchmarks.
background_logdir (str):
Points to the directory where background LoadGen logs are going to be kept.
Returns:
Tuple[str, str]:
str:
Commandline string used as template for main benchmark.
str:
Commandline string used as template for background benchmark(s).
"""
# Template command for running each of the benchmarks
cmd_template_prefix = \
('CUDA_VISIBLE_DEVICES={} make {} RUN_ARGS="--benchmarks={} '
'--scenarios={} --config_ver={} --min_duration={}')
cmd_template_suffix = '"'
# Always set the min_query_count to 1 for background benchmarks so that their duration
# is purely based on the min_duration setting
background_runargs = " --min_query_count=1"
background_extra = f" LOG_DIR={background_logdir}"
if verbose_all:
background_runargs += " --verbose --verbose_nvtx"
main_runargs = ""
if main_benchmark_runargs:
main_runargs += " " + main_benchmark_runargs
if verbose_all or verbose:
main_runargs += " --verbose --verbose_nvtx"
background_cmd_template = cmd_template_prefix + background_runargs + cmd_template_suffix + background_extra
main_cmd_template = cmd_template_prefix + main_runargs + cmd_template_suffix
return main_cmd_template, background_cmd_template
def launch_background_benchmarks(background_benchmarks: List[str],
background_cmd_template: str,
background_benchmark_duration: int,
background_benchmark_action: str,
start_time_buffer: int,
main_benchmark_immediate_start: bool,
mig_uuids: List[str],
lenient: bool,
dryrun: bool) -> Tuple[List[subprocess.Popen], List[bool], str]:
"""
Launches background benchmarks and waits until they start inferences.
Args:
background_benchmarks (List[str]):
background_benchmarks -- benchmark names to be used for background runs
ex) ["DLRM", "ResNet50", "SSD-ResNet34"]
background_cmd_template (str):
Commandline string used as template for background benchmark(s)
background_benchmark_duration (int):
Time to run the background benchmarks, in milliseconds.
background_benchmark_action (str):
Commandline argument to be used for background runs.
start_time_buffer (int):
Gap between launching the background benchmark and main benchmark, in milliseconds.
main_benchmark_immediate_start (bool):
Start main benchmark immediately, as soon as background benchmarks start inferences.
See also --main_benchmark_immediate_start from commandline arguments.
mig_uuids (List[str]):
List containing MIG_UUIDs that will be used for benchmark runs.
First one is reserved for main benchmark run.
ex) [MIG-1e8534f8-5dd9-575e-bcee-82f3db1bebd1,
MIG-818abda7-8ede-58fa-bb02-ed3d388b6922]
lenient (bool):
Whether to terminate run on any background benchmark anomaly.
See also --lenient from commandline arguments.
dryrun (bool):
Just generate cmds, instead of really run benchmarks.
See also --dryrun from commandline arguments.
Returns:
List[subprocess.Popen], List[bool], str]:
List[subprocess.Popen]:
background_processes -- holds handles of background benchmark processes launched.
List[bool]:
completed_benchmarks -- holds flags of background benchmarks that are completed already.
str:
debug_msg -- holds string containing debug information
"""
# to return
background_processes = list()
completed_benchmarks = [False,] * len(background_benchmarks)
debug_msg = ""
if background_benchmarks:
# Set logging level if something is wrong
logging_wrong = logging.warn if lenient else logging.error
for i, background_benchmark in enumerate(background_benchmarks):
# scenario is fixed as Offline for background benchmarks
cmd = background_cmd_template.format(
mig_uuids[i+1],
background_benchmark_action,
background_benchmark,
'offline',
BENCHMARK_DEFAULT_CONFIG[BENCHMARKS.alias(background_benchmark)],
background_benchmark_duration
)
if dryrun:
debug_msg += cmd + '\n'
else:
logging.info('Launching background workload: {}'.format(cmd))
background_processes.append(subprocess.Popen(cmd, universal_newlines=True, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
if not dryrun:
# Checking explicitly to detect that benchmarks have started by monitoring their logs in real-time
# Using mechanism described on:
# https://stackoverflow.com/questions/375427/a-non-blocking-read-on-a-subprocess-pipe-in-python
# to poll stdout of each background benchmark
await_start_timeout = start_time_buffer / 1000
logging.info(("Waiting for background benchmarks to reach timed section "
"with a {} second timeout".format(await_start_timeout)))
background_launch_time = datetime.datetime.now()
background_queues = [queue.Queue(),] * len(background_benchmarks)
assert len(background_queues) == len(background_processes), "Error in prep"
background_threads = [threading.Thread(target=enqueue_output,
args=(background_processes[i].stdout, myq))
for i, myq in enumerate(background_queues)]
for t in background_threads:
t.daemon = True
t.start()
unstarted_benchmarks = [True,] * (len(background_benchmarks))
termination_check_counter = 0
while ((datetime.datetime.now() - background_launch_time).seconds < await_start_timeout) and\
(unstarted_benchmarks.count(True) > 0):
found_lines = False
for i, benchmark in enumerate(background_benchmarks):
if unstarted_benchmarks[i]:
try:
line = background_queues[i].get_nowait()
except queue.Empty:
pass
else:
found_lines = True
line = line.strip()
logging.debug("[{} {}]:".format(i, benchmark).upper() + line)
if line.endswith("running actual test."):
logging.info("Detected that background benchmark {}, {} has started".format(
i, background_benchmarks[i]))
unstarted_benchmarks[i] = False
if not found_lines:
termination_check_counter += 1
if termination_check_counter == 30:
for i, benchmark in enumerate(background_benchmarks):
process_poll_result = background_processes[i].poll()
if not (process_poll_result is None):
unstarted_benchmarks[i] = False
completed_benchmarks[i] = True
logging_wrong(
"Background benchmark {}, {} exited earlier than expected with code {}".format(
i, background_benchmarks[i], process_poll_result)
)
to_early_exit = early_exit(background_processes, lenient)
if to_early_exit:
debug_msg += "CRITICAL: EARLY EXIT\n"
return [], [], debug_msg
time.sleep(.1)
early_starters = []
if unstarted_benchmarks.count(True) > 0:
early_starters = ["({}, {})".format(i, background_benchmarks[i])
for i in range(len(background_benchmarks)) if unstarted_benchmarks[i]]
logging_wrong(("Did not detect start of background benchmarks {} "
"after waiting for specified delay.".format(early_starters)))
to_early_exit = early_exit(background_processes, lenient)
if to_early_exit:
debug_msg += "CRITICAL: EARLY EXIT\n"
return [], [], debug_msg
logging.info("All background benchmarks have started")
remaining_delay = await_start_timeout - (datetime.datetime.now() - background_launch_time).seconds
if not main_benchmark_immediate_start and remaining_delay > 0:
logging.info("Waiting for remaining delay of {} seconds.".format(remaining_delay))
time.sleep(remaining_delay)
return background_processes, completed_benchmarks, debug_msg
def run_main_benchmark(main_benchmark: str,
main_scenario: str,
main_action: str,
main_benchmark_duration: int,
main_cmd_template: str,
background_check_auto: bool,
background_benchmarks: List[str],
background_processes: List[subprocess.Popen],
completed_benchmarks: List[bool],
mig_uuids: List[str],
lenient: bool,
dryrun: bool) -> str:
"""
Launches background benchmarks and waits until they start inferences.
Args:
main_benchmarks (str):
Benchmark name for main benchmark.
See also --main_benchmark from commandline arguments.
ex) "ResNet50"
main_scenario (str):
Scenario main benchmark to run for.
See also --main_scenario from commandline arguments.
ex) "Server"
| |
#!/usr/bin/python3
from __future__ import print_function
import collections
import numbers
import itertools
# import functools
import timeit
# Will probably limit support to 2.7 and 3.4, but for the moment 2.6
# and 3.3 should work.
# Single-dispatch generic functions are in, one of the CPython
# developers maintains a backport for Python 2.6.5 onwards to 3.3.
# These are really too useful for this project to avoid. The enhanced
# buffer protocol, which I'll also need, is in 2.6 on. Coroutines
# were added in Python 2.5. New-style string formatting, the abstract
# classes (both abc and collections.abc), and built-in sets were also
# added in 2.6. The u'' prefix for Unicode strings came back in 3.3
# to make writing compatible code easier. There's better iterable
# unpacking syntax in 3.0: http://legacy.python.org/dev/peps/pep-3132/
# .
# Big question: 'yield from' was added in Python 3.3. Experimentation
# suggests that as of 3.4, using "yield from" still hits the maximum
# recursion depth which means that "yield from" still adds a stack
# frame for each call.
# 2.7 support hacks
import six
if six.PY3:
import teststruct as _struct
three_bytes_available = True
else:
# Hack for having not hacked a 2.7 version of struct yet.
import struct as _struct
three_bytes_available = False
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
# 3.x vs. 2.7 syntax difference with metaclass assignment here.
# metaclass=ABCMeta
# import functools
# import abc
# In Jensen-Gray's terminology, where I'm starting is an
# internal/embedded DSL that's an API, possibly with embedded data.
# Features struct needs to have and doesn't:
# Three-byte integers
# Arbitrary-length integers
# Bits
# A function that returns the length of the tuple returned by unpack
# or the arguments required by pack.
# The bird's-eye view of the trivial compiler design is that it should
# break up the problem into a set of struct module instances that
# cover all possible reads of the data that will be necessary during
# the execution of the packer/unpacker. Note that some format strings
# may be duplicated in multiple places in the AST of the declarative
# mini-language: for instance, in my DVI parser, there will be a
# struct string 'b' for an opcode, but the same struct string appears
# in one-parameter mixed-sign opcode quadruplets. There's no reason
# to make extra instances for these duplicates, so the right data
# structure for storing the struct instances is a dictionary keyed by
# the format strings. Unfortunately, format strings don't have unique
# semantics (it's possible to express the same format with different
# strings), so the trivial compiler will have to enforce this
# invariant itself. I *believe* that this can only happen with count
# values and cases where endianness gets broken up differently
# (['>hb', '<h'] and ['>h', '<bh'] would be stored differently in the
# dictionary but have identical packing/upacking properties in toto).
# One other piece of information has to be stored in this dictionary,
# the length of the tuple produced by the struct instance. The rules
# on which format characters I'm allowing prohibit struct strings with
# dynamic lengths, so every struct instance has an integer length that
# needs to be stored with it.
# There are four cases that have to be handled during compilation:
# Atoms: atoms always fit together in a single string except for endianness.
# Static combinators: each instance of an entirely static combinator
# will carry a list of format strings created when *it's* compiled.
# To put the two lists together, the trivial compiler has to check if
# the beginning of the second and the end of the first can be stuck
# together---if they can, great, do so and concatenate the lists, if
# not, just concatenate the lists. Note that after this happens, the
# trivial compiler can delete some attributes on the child static
# combinator. This may be better implemented by a visitor pattern, I
# will have to examine that.
# Dynamic combinators: dynamic combinators have format strings, but
# these can never be merged with static format strings, so they have
# to be just added to the dictionary.
# Static-dynamic combinators: these are static combinators with dynamic
# children. The dynamic children need to be added to the list to say,
# "Don't call struct, call me," and the static parts of the lists need
# to be concatenated in the usual way.
# Some subtleties that I need to work out:
# For static combinators, it's possible to precalculate all the byte
# offsets in advance. This saves some additions in the inner loop
# while parsing. For dynamic combinators, I want to replace this with
# some kind of generator (coroutine?) that runs during parsing and
# spits out byte offsets. Actually, once the struct strings are
# built, this entire problem can be conceptualized as building a
# generator that keeps track of its own byte offset and spits out
# tuples for the data transformers (adapters) to consume. What I've
# been doing so far is just working with static structures, but I
# should be able to figure out how to turn the list comprehensions
# I've been using into generators.
# Key idea: moving from static to dynamic involves turning lists into
# generators!
# Less sure about: adapters as coroutines to do data transformation,
# using internal state to keep track of how many objects it's seen so
# it knows when to terminate.
# Very high-level overview of the architecture:
# On one end, the unpacker will be an iterator that produces a stream
# of tuples according to the rules embedded in the the declarative
# structure. Dynamic combinators affect the rules in this iterator.
# This is a non-trivial compilation problem.
# The rest of the program looks schematically like:
# Unpacker -> data transformers -> Struct/Sequence -> Python representation
# Python representation -> Struct/Sequence -> data transformers -> Packer
# The unpacker is the producer, taking the binary data and making it
# into python tuples. Everything else, including the packer, should
# be coroutines. That said, at some point there has to be a yield
# statement to produce the Python representation that's the output of
# the parser and another producer that feeds the Python representation
# into the coroutines leading to the packer. Theoretically, if the
# problem solution involves transforming binary data in some way or
# writing binary data back, the coroutine at the end of the unpacking
# process could skip the Struct/Sequence step altogether and send data
# straight into a packer, either the same packer to write back to the
# same binary data format with the data transformed in some way or a
# different packer to write to a different binary data format.
# Note that the order of the coroutines for the unpacking process goes
# from the innermost levels of the declarative structure outwards.
# However, I don't think this works for all cases---transforming bytes
# into bits is going to have to live outside the data-processers in
# the declarative syntax but at the innermost level in the procedural
# implementation. The pipeline may have to be set up on a
# case-by-case basis.
# This framework has some natural iterative structure. I need to set
# up the declarative syntax for deciding the final units for the
# Python representation.
# To figure out how to set this up, let's try some examples from DVI
# files that aren't too hard. (No tunneling, yet.)
# VF preamble: 'B', 'BB', length-'s', 'qq'
# Note: have to be able to dynamically add struct format strings for
# things like Pascal strings to the dictionary at run time.
# Procedurally:
# 1: parser generates a one-tuple of an int.
# 2: byte -> command name in return object but not context, sends
# return object on to Struct.
# 3: parser reads context, figures out it needs to call 'BB' next,
# generates a two-tuple of ints.
# 4: first byte is checked, raise an error if it doesn't match, sends
# it nowhere.
# 5: second byte put into the context but not sent on.
# 6: parser reads the previous byte and generates a string of that length.
# 7: decode string and send it on to Struct.
# 8: parser generates a two-tuple of ints.
# 9: send first int on to | |
self.fig.canvas.mpl_connect( 'close_event', self.__exit__ )
self.fig.canvas.mpl_connect( 'resize_event', self.__draw__ )
plt.show( block=self.blocking )
# plt.ion()
def loadFromFile(self, filename, loadBoxes=True ):
self.titles = self.im
print( "Try to load MRC or DM4 files" )
file_front, file_ext = os.path.splitext( self.im )
if (file_ext.lower() == ".mrc" or file_ext.lower() == ".mrcs" or
file_ext.lower() == ".mrcz" or file_ext.lower() == ".mrcsz"):
self.im, self.header = mrcz.readMRC( self.im, pixelunits=u'nm' )
elif file_ext.lower() == ".dm4":
dm4struct = mrcz.readDM4( self.im )
self.im = dm4struct.im[1].imageData
self.header = dm4struct.im[1].imageInfo
del dm4struct
else:
print( "Filename has unknown/unimplemented file type: " + self.im )
return
# Check for boxes
# Star files don't contain box sizes so use the box files instead
box_name = file_front + "_automatch.box"
if bool(self.showBoxes) and os.path.isfile( box_name ):
self.loadBoxFile( box_name )
return
# Try the star file instead
box_name = file_front + "_automatch.star"
if bool(self.showBoxes) and os.path.isfile( box_name ):
self.loadStarFile( box_name )
return
def loadBoxFile(self, box_name ):
box_data = np.loadtxt( box_name, comments="_" )
# box_data columns = [x_center, y_center, ..., ..., FigureOfMerit]
self.boxLen = box_data[0,2]
# In boxfiles coordinates are at the edges.
self.boxYX = np.fliplr( box_data[:,:2] )
# DEBUG: The flipping of the y-coordinate system is annoying...
print( "boxYX.shape = " + str(self.boxYX.shape) + ", len = " + str(self.boxLen) )
self.boxYX[:,0] = self.im.shape[0] - self.boxYX[:,0]
self.boxYX[:,1] += int( self.boxLen / 2 )
self.boxYX[:,0] -= int( self.boxLen/2)
try:
self.boxFoM = box_data[:,4]
clim = zorro.zorro_util.ciClim( self.boxFoM, sigma=2.5 )
self.boxFoM = zorro.zorro_util.normalize( np.clip( self.boxFoM, clim[0], clim[1] ) )
except:
self.boxFoM = np.ones( self.boxYX.shape[0] )
self.boxColors = plt.cm.gnuplot( self.boxFoM )
def loadStarFile(self, box_name ):
box_data = np.loadtxt( box_name, comments="_", skiprows=5 )
# box_data columns = [x_center, y_center, ..., ..., FigureOfMerit]
# In star files coordinates are centered
self.boxYX = np.fliplr( box_data[:,:2] )
# DEBUG: The flipping of the y-coordinate system is annoying...
self.boxYX[:,0] = self.im.shape[0] - self.boxYX[:,0]
# There's no box size information in a star file so we have to use a guess
self.boxLen = 224
#self.boxYX[:,1] -= int( self.boxLen / 2 )
#self.boxYX[:,0] += int( self.boxLen / 2 )
try:
self.boxFoM = box_data[:,4]
clim = zorro.zorro_util.ciClim( self.boxFoM, sigma=2.5 )
self.boxFoM = zorro.zorro_util.normalize( np.clip( self.boxFoM, clim[0], clim[1] ) )
except:
self.boxFoM = np.ones( self.boxYX.shape[0] )
self.boxColors = plt.cm.gnuplot( self.boxFoM )
def __setaxes__(self):
self.ax.cla()
################
# definitions for the axes
widthProf = 0.1
left, width = 0.05, 0.75
bottomProf = 0.05
bottom, height = widthProf + bottomProf + 0.05, 0.75
leftProf = left + width + 0.05
rect_im = [left, bottom, width, height]
rect_X = [left, bottomProf, width, widthProf] # horizontal
rect_Y = [leftProf, bottom, widthProf, height] # vertical
# start with a rectangular Figure
self.ax = plt.axes(rect_im)
self.axX = plt.axes(rect_X)
self.axY = plt.axes(rect_Y)
nullfmt = plt.NullFormatter() # no labels
self.axX.xaxis.set_major_formatter(nullfmt)
self.axX.yaxis.set_major_formatter(nullfmt)
self.axY.xaxis.set_major_formatter(nullfmt)
self.axY.yaxis.set_major_formatter(nullfmt)
self.posProfHoriz = np.round(self.frameShape[0]/2)
self.posProfVert = np.round(self.frameShape[1]/2)
def __recompute__(self):
self.__currTitle = ""
if self.doTranspose:
self.doTranspose = False
self.im = np.transpose( self.im, axes=[2,0,1] )
print( "Tranposed axes shape: %s" % str(self.im.shape) )
self.__setaxes__()
if self.im.ndim is 2:
self.im2show = self.im
elif self.im.ndim is 3:
self.im2show = np.squeeze( self.im[self.index,...] )
self.__currTitle = 'frame %d/%d' % (self.index, self.im.shape[0]-1)
# projections
if self.projToggle:
if self.projType=='M':
self.im2show = self.im.max(axis=0)
self.__currTitle = 'max proj'
if self.projType=='S':
self.im2show = self.im.sum(axis=0)
self.__currTitle = 'sum proj'
if self.projType=='V':
self.im2show = np.var(self.im,axis=0)
self.__currTitle = 'var proj'
if self.complex:
self.__currTitle += ', cplx (0=abs,1=phase)'
if self.fftMode:
self.__currTitle += ", fft"
self.im2show = np.abs(np.fft.fftshift( np.fft.fft2( self.im2show ) ))
if self.polarMode:
self.__currTitle += ", polar"
self.im2show = zorro.zorro_util.img2polar( self.im2show )
if self.filterMode:
self.__currTitle += ", gauss%.2f" % self.__gaussSigma
self.im2show = ni.gaussian_filter( self.im2show, self.__gaussSigma )
if self.logMode:
# # TODO: this can be sent to matplotlib as an argument in imshow instead
self.__currTitle += ', log10'
if np.any(self.im <= 0.0):
# RAM: alternatively we could just add the minimum value to the whole matrix
self.im2show = np.log10( self.im2show - np.min( self.im2show ) + 1.0 )
else:
self.im2show = np.log10( self.im2show )
else:
self.__currTitle += ', lin'
# We need to compute image-wide statistics
if self.sigmaMode:
self.__meanList[self.index] = np.mean( self.im2show )
self.__stdList[self.index] = np.std( self.im2show )
else:
self.__minList[self.index] = np.min( self.im2show )
self.__maxList[self.index] = np.max( self.im2show )
self.__draw__()
def __draw__(self, info=None ):
# print( "Called ims.draw()" )
plt.cla()
tit = self.__currTitle + ""
if self.zoom > 1:
tit += ', zoom %g x'%(self.zoom)
center_y = np.int( self.frameShape[0]/2 )
center_x = np.int( self.frameShape[1]/2 )
halfWidth_y = np.int( 0.5* self.frameShape[0]/self.zoom )
halfWidth_x = np.int( 0.5* self.frameShape[1]/self.zoom )
im_range = [ np.maximum( 0, center_x-halfWidth_x),
np.minimum( self.frameShape[1], center_x+halfWidth_x ),
np.maximum( 0, center_y-halfWidth_y),
np.minimum( self.frameShape[0], center_y+halfWidth_y ) ]
if self.sigmaMode:
if np.isnan( self.__meanList[self.index] ):
self.__meanList[self.index] = np.mean( self.im2show )
self.__stdList[self.index] = np.std( self.im2show )
clim_min = self.__meanList[self.index] - self.__sigmaLevels[self.__sigmaIndex]*self.__stdList[self.index]
clim_max = self.__meanList[self.index] + self.__sigmaLevels[self.__sigmaIndex]*self.__stdList[self.index]
tit += ", $\sigma$%.2f clim[%.1f,%.1f]" % (self.__sigmaLevels[self.__sigmaIndex], clim_min, clim_max)
else:
if np.isnan( self.__minList[self.index] ):
self.__minList[self.index] = np.min( self.im2show )
self.__maxList[self.index] = np.max( self.im2show )
clim_min = self.__minList[self.index]
clim_max = self.__maxList[self.index]
tit += ", clim[%.1f,%.1f]" % (clim_min, clim_max)
# LogNorm really isn't very failsafe...
# if self.logMode:
# norm = col.LogNorm()
# else:
# norm = None
norm = None
self.ax.set_title( tit )
self.ax.imshow(self.im2show[ im_range[2]:im_range[3], im_range[0]:im_range[1] ],
vmin=clim_min, vmax=clim_max,
interpolation='none',
norm=norm,
extent=im_range,
cmap=self.cmap )
# plt.colorbar(self.ax)
# Printing particle box overlay
if bool(self.showBoxes) and np.any(self.boxYX) != None and self.boxLen > 0:
# Coordinate systems are upside-down in y-axis?
# box2 = int( self.boxLen/4 )
dpi = self.fig.get_dpi()
width = np.minimum( self.fig.get_figwidth(), self.fig.get_figheight() )
# Ok I'm not getting draw events from resizing...
markerSize = (self.boxLen*width/dpi)**2
print( "dpi = %d, width = %g, markerSize = %g" %(dpi,width, markerSize) )
#for J in np.arange( self.boxYX.shape[0] ):
# box = self.boxYX[J,:]
#boxCoord = np.array( [box+[-box2,-box2], box+[-box2,box2],
# box+[box2,box2],
# box+[box2,-box2], box+[-box2,-box2] ] )
# self.ax.scatter( self.boxYX[:,1], self.boxYX[:,0], s=markerSize, color=colors, alpha=0.3 )
self.ax.scatter( self.boxYX[:,1], self.boxYX[:,0],
s=markerSize, color=self.boxColors, alpha=0.2, marker='s' )
plt.xlim( [im_range[0], im_range[1] ] )
plt.ylim( [im_range[2], im_range[3] ] )
# RAM: This format_coord function is amazingly sensitive to minor changes and often breaks
# the whole class.
# DO NOT TOUCH format_coord!!!!
def format_coord(x, y):
x = np.int(x + 0.5)
y = np.int(y + 0.5)
try:
#return "%s @ [%4i, %4i]" % (round(im2show[y, x],2), x, y)
return "%.5G @ [%4i, %4i]" % (self.im2show[y, x], y, x) #first shown coordinate is vertical, second is horizontal
except IndexError:
return ""
self.ax.format_coord = format_coord
# DO NOT TOUCH format_coord!!!!
if isinstance(self.titles, (list,tuple)) and len(self.titles) > 0:
try:
self.fig.canvas.set_window_title(self.titles[self.index])
except:
self.fig.canvas.set_window_title(self.titles[0])
elif isinstance( self.titles, str ):
self.fig.canvas.set_window_title(self.titles)
if 'qt' in plt.matplotlib.get_backend().lower():
self.fig.canvas.manager.window.raise_() #this pops the window to the top
# TODO: X-Y profiles
# if self.showProfiles:
# posProf = self.posProfHoriz
# self.axX.cla()
# self.axX.plot(rx+1,self.im2show[posProf,rx])
## plt.xlim(rx[0],rx[-1])
# self.axX.set_xlim(rx[0],rx[-1])
plt.show( block=self.blocking )
def printStat(self, mode='all'):
if mode == 'all':
modePrint = 'all frames'
img = self.im
if self.complex:
modePrint = 'the modulus'
img = self.im[0,...]
elif mode == 'curr':
if self.im.ndim > 2:
img = self.im[self.index, ...]
modePrint = 'frame %d'%self.index
else:
img = self.im
modePrint = 'the current frame'
else:
print( "Unknown statistics mode: %s" % mode )
return
print( "===========================================" )
print( "Statistics of " + modePrint + " in figure %g:"%self.figNum)
print( "Shape: ", img.shape )
print( "Maximum: ", img.max(), "@", np.unravel_index(np.argmax(img),img.shape))
print( "Minimum: ", img.min(), "@", np.unravel_index(np.argmin(img),img.shape))
print( "Center of mass:", ni.measurements.center_of_mass(img))
print( "Mean: ", img.mean())
print( "Standard deviation: ", img.std())
print( "Variance: ", img.var() )
print( "Sum: ", img.sum())
print( "Data type:", self.dtype)
print( "===========================================" )
def __exit__(self, event):
print( "Exiting IMS" )
self.exiting = True
self.fig.close()
def __call__(self, event):
redraw = False
recompute = False
# print( "Received key press %s" % event.key )
if event.key=='n':#'up': #'right'
if self.im.ndim > 2:
self.index = np.minimum(self.im.shape[0]-1, self.index+1)
recompute = True
elif event.key | |
<gh_stars>10-100
import json
import logging
import os
import re
import shutil
import tempfile
import time
from typing import Tuple, List, Dict, Optional
from cloudfoundry_client.client import CloudFoundryClient
from cloudfoundry_client.operations.push.cf_ignore import CfIgnore
from cloudfoundry_client.operations.push.file_helper import FileHelper
from cloudfoundry_client.operations.push.validation.manifest import ManifestReader
from cloudfoundry_client.v2.entities import Entity
_logger = logging.getLogger(__name__)
class PushOperation(object):
UPLOAD_TIMEOUT = 15 * 60
SPLIT_ROUTE_PATTERN = re.compile(r"(?P<protocol>[a-z]+://)?(?P<domain>[^:/]+)(?P<port>:\d+)?(?P<path>/.*)?")
def __init__(self, client: CloudFoundryClient):
self.client = client
def push(self, space_id: str, manifest_path: str, restart: bool = True):
app_manifests = ManifestReader.load_application_manifests(manifest_path)
organization, space = self._retrieve_space_and_organization(space_id)
for app_manifest in app_manifests:
if "path" in app_manifest or "docker" in app_manifest:
self._push_application(organization, space, app_manifest, restart)
def _retrieve_space_and_organization(self, space_id: str) -> Tuple[Entity, Entity]:
space = self.client.v2.spaces.get(space_id)
organization = space.organization()
return organization, space
def _push_application(self, organization: Entity, space: Entity, app_manifest: dict, restart: bool):
app = self._init_application(space, app_manifest)
self._route_application(
organization,
space,
app,
app_manifest.get("no-route", False),
app_manifest.get("routes", []),
app_manifest.get("random-route", False),
)
if "path" in app_manifest:
self._upload_application(app, app_manifest["path"])
self._bind_services(space, app, app_manifest.get("services", []))
if restart:
PushOperation._restart_application(app)
def _init_application(self, space: Entity, app_manifest: dict) -> Entity:
app = self.client.v2.apps.get_first(name=app_manifest["name"], space_guid=space["metadata"]["guid"])
return self._update_application(app, app_manifest) if app is not None else self._create_application(space, app_manifest)
def _create_application(self, space: Entity, app_manifest: dict) -> Entity:
_logger.debug("Creating application %s", app_manifest["name"])
request = self._build_request_from_manifest(app_manifest)
request["environment_json"] = PushOperation._merge_environment(None, app_manifest)
request["space_guid"] = space["metadata"]["guid"]
if request.get("health-check-type") == "http" and request.get("health-check-http-endpoint") is None:
request["health-check-http-endpoint"] = "/"
return self.client.v2.apps.create(**request)
def _update_application(self, app: Entity, app_manifest: dict) -> Entity:
_logger.debug("Uploading application %s", app["entity"]["name"])
request = self._build_request_from_manifest(app_manifest)
request["environment_json"] = PushOperation._merge_environment(app, app_manifest)
if (
request.get("health-check-type") == "http"
and request.get("health-check-http-endpoint") is None
and app["entity"].get("health_check_http_endpoint") is None
):
request["health-check-http-endpoint"] = "/"
return self.client.v2.apps.update(app["metadata"]["guid"], **request)
def _build_request_from_manifest(self, app_manifest: dict) -> dict:
request = dict()
request.update(app_manifest)
stack = self.client.v2.stacks.get_first(name=app_manifest["stack"]) if "stack" in app_manifest else None
if stack is not None:
request["stack_guid"] = stack["metadata"]["guid"]
docker = request.pop("docker", None)
if docker is not None and "image" in docker:
request["docker_image"] = docker["image"]
request["diego"] = True
if "username" in docker and "password" in docker:
request["docker_credentials"] = dict(username=docker["username"], password=docker["password"])
buildpacks = request.pop("buildpacks", None)
if "buildpack" not in request and buildpacks is not None and len(buildpacks) > 0:
request["buildpack"] = buildpacks[0]
return request
@staticmethod
def _merge_environment(app: Optional[Entity], app_manifest: dict) -> dict:
environment = dict()
if app is not None and "environment_json" in app["entity"]:
environment.update(app["entity"]["environment_json"])
if "env" in app_manifest:
environment.update(app_manifest["env"])
return environment
def _route_application(
self, organization: Entity, space: Entity, app: Entity, no_route: bool, routes: List[str], random_route: bool
):
existing_routes = [route for route in app.routes()]
if no_route:
self._remove_all_routes(app, existing_routes)
elif len(routes) == 0 and len(existing_routes) == 0:
self._build_default_route(space, app, random_route)
else:
self._build_new_requested_routes(organization, space, app, existing_routes, routes)
def _remove_all_routes(self, app: Entity, routes: List[Entity]):
for route in routes:
self.client.v2.apps.remove_route(app["metadata"]["guid"], route["metadata"]["guid"])
def _build_default_route(self, space: Entity, app: Entity, random_route: bool):
shared_domain = None
for domain in self.client.v2.shared_domains.list():
if not domain["entity"].get("internal", False):
shared_domain = domain
break
if shared_domain is None:
raise AssertionError("No route specified and no no-route field or shared domain")
if shared_domain["entity"].get("router_group_type") == "tcp":
route = self.client.v2.routes.create_tcp_route(shared_domain["metadata"]["guid"], space["metadata"]["guid"])
elif random_route:
route = self.client.v2.routes.create_host_route(
shared_domain["metadata"]["guid"],
space["metadata"]["guid"],
self._to_host("%s-%d" % (app["entity"]["name"], int(time.time()))),
)
else:
route = self.client.v2.routes.create_host_route(
shared_domain["metadata"]["guid"], space["metadata"]["guid"], self._to_host(app["entity"]["name"])
)
self.client.v2.apps.associate_route(app["metadata"]["guid"], route["metadata"]["guid"])
def _build_new_requested_routes(
self, organization: Entity, space: Entity, app: Entity, existing_routes: List[Entity], requested_routes: List[str]
):
private_domains = {domain["entity"]["name"]: domain for domain in organization.private_domains()}
shared_domains = {domain["entity"]["name"]: domain for domain in self.client.v2.shared_domains.list()}
for requested_route in requested_routes:
route, port, path = PushOperation._split_route(requested_route)
if len(path) > 0 and port is not None:
_logger.error("Neither path nor port provided for route", requested_route)
raise AssertionError("Cannot set both port and path for route: %s" % requested_route)
host, domain_name, domain = PushOperation._resolve_domain(route, private_domains, shared_domains)
if port is not None and host is not None:
_logger.error("Host provided in route %s for tcp domain %s", requested_route, domain_name)
raise AssertionError(
"For route (%s) refers to domain %s that is a tcp one. It is hence routed by port and not by host"
% (requested_route, domain_name)
)
route_to_map = None
if port is not None and domain["entity"].get("router_group_type") != "tcp":
_logger.error("Port provided in route %s for non tcp domain %s", requested_route, domain_name)
raise AssertionError("Cannot set port on route(%s) for non tcp domain" % requested_route)
elif domain["entity"].get("router_group_type") == "tcp" and port is None:
_logger.error("No port provided in route %s for tcp domain %s", requested_route, domain_name)
raise AssertionError("Please specify a port on route (%s) for tcp domain" % requested_route)
elif domain["entity"].get("router_group_type") == "tcp":
if not any(
[route["entity"]["domain_guid"] == domain["metadata"]["guid"] and route["entity"]["port"] == port]
for route in existing_routes
):
route_to_map = self._resolve_new_tcp_route(space, domain, port)
else:
if not any(
[route["entity"]["domain_guid"] == domain["metadata"]["guid"] and route["entity"]["host"] == host]
for route in existing_routes
):
route_to_map = self._resolve_new_host_route(space, domain, host, path)
if route_to_map is not None:
_logger.debug("Associating route %s to application %s", requested_route, app["entity"]["name"])
self.client.v2.apps.associate_route(app["metadata"]["guid"], route_to_map["metadata"]["guid"])
def _resolve_new_host_route(self, space: Entity, domain: Entity, host: str, path: str) -> Entity:
existing_route = self.client.v2.routes.get_first(domain_guid=domain["metadata"]["guid"], host=host, path=path)
if existing_route is None:
_logger.debug("Creating host route %s on domain %s and path %s", host, domain["entity"]["name"], path)
existing_route = self.client.v2.routes.create_host_route(
domain["metadata"]["guid"], space["metadata"]["guid"], host, path
)
else:
_logger.debug(
"Host route %s on domain %s and path %s already exists with guid %s",
host,
domain["entity"]["name"],
path,
existing_route["metadata"]["guid"],
)
return existing_route
def _resolve_new_tcp_route(self, space: Entity, domain: Entity, port: int) -> Entity:
existing_route = self.client.v2.routes.get_first(domain_guid=domain["metadata"]["guid"], port=port)
if existing_route is None:
_logger.debug("Creating tcp route %d on domain %s", port, domain["entity"]["name"])
existing_route = self.client.v2.routes.create_tcp_route(domain["metadata"]["guid"], space["metadata"]["guid"], port)
else:
_logger.debug(
"TCP route %d on domain %s already exists with guid %s",
port,
domain["entity"]["name"],
existing_route["metadata"]["guid"],
)
return existing_route
@staticmethod
def _split_route(requested_route: Dict[str, str]) -> Tuple[str, int, str]:
route_splitted = PushOperation.SPLIT_ROUTE_PATTERN.match(requested_route["route"])
if route_splitted is None:
raise AssertionError("Invalid route: %s" % requested_route["route"])
domain = route_splitted.group("domain")
port = route_splitted.group("port")
path = route_splitted.group("path")
return domain, int(port[1:]) if port is not None else None, "" if path is None or path == "/" else path
@staticmethod
def _resolve_domain(
route: str, private_domains: Dict[str, Entity], shared_domains: Dict[str, Entity]
) -> Tuple[str, str, Entity]:
for domains in [private_domains, shared_domains]:
if route in domains:
return "", route, domains[route]
else:
idx = route.find(".")
if 0 < idx < (len(route) - 2):
host = route[:idx]
domain = route[idx + 1 :]
if domain in domains:
return host, domain, domains[domain]
raise AssertionError("Cannot find domain for route %s" % route)
def _upload_application(self, app: Entity, application_path: str) -> Entity:
_logger.debug("Uploading application %s", app["entity"]["name"])
if os.path.isfile(application_path):
self._upload_application_zip(app, application_path)
elif os.path.isdir(application_path):
self._upload_application_directory(app, application_path)
else:
raise AssertionError("Path %s is neither a directory nor a file" % application_path)
def _upload_application_zip(self, app: Entity, path: str):
_logger.debug("Unzipping file %s", path)
tmp_dir = tempfile.mkdtemp()
try:
FileHelper.unzip(path, tmp_dir)
self._upload_application_directory(app, tmp_dir)
finally:
shutil.rmtree(tmp_dir)
def _upload_application_directory(self, app: Entity, application_path: str):
_logger.debug("Uploading application from directory %s", application_path)
_, temp_file = tempfile.mkstemp()
try:
resource_descriptions_by_path = PushOperation._load_all_resources(application_path)
def generate_key(item: dict):
return "%s-%d" % (item["sha1"], item["size"])
already_uploaded_entries = [
generate_key(item)
for item in self.client.v2.resources.match(
[dict(sha1=item["sha1"], size=item["size"]) for item in resource_descriptions_by_path.values()]
)
]
_logger.debug("Already uploaded %d / %d items", len(already_uploaded_entries), len(resource_descriptions_by_path))
FileHelper.zip(
temp_file,
application_path,
lambda item: item in resource_descriptions_by_path
and generate_key(resource_descriptions_by_path[item]) not in already_uploaded_entries,
)
_logger.debug("Diff zip file built: %s", temp_file)
resources = [
dict(
fn=resource_path,
sha1=resource_description["sha1"],
size=resource_description["size"],
mode=resource_description["mode"],
)
for resource_path, resource_description in resource_descriptions_by_path.items()
if generate_key(resource_description) in already_uploaded_entries
]
_logger.debug("Uploading bits of application")
job = self.client.v2.apps.upload(app["metadata"]["guid"], resources, temp_file, True)
self._poll_job(job)
finally:
_logger.debug("Skipping remove of zip file")
@staticmethod
def _load_all_resources(top_directory: str) -> dict:
application_items = {}
cf_ignore = CfIgnore(top_directory)
for directory, file_names in FileHelper.walk(top_directory):
for file_name in file_names:
relative_file_location = os.path.join(directory, file_name)
if not cf_ignore.is_entry_ignored(relative_file_location):
absolute_file_location = os.path.join(top_directory, relative_file_location)
application_items[relative_file_location] = dict(
sha1=FileHelper.sha1(absolute_file_location),
size=FileHelper.size(absolute_file_location),
mode=FileHelper.mode(absolute_file_location),
)
return application_items
def _bind_services(self, space: Entity, app: Entity, services: List[str]):
service_instances = [
service_instance for service_instance in space.service_instances(return_user_provided_service_instances="true")
]
service_name_to_instance_guid = {
service_instance["entity"]["name"]: service_instance["metadata"]["guid"] for service_instance in service_instances
}
existing_service_instance_guid = [
service_binding["entity"]["service_instance_guid"] for service_binding in app.service_bindings()
]
for service_name in services:
service_instance_guid = service_name_to_instance_guid.get(service_name)
if service_instance_guid is None:
raise AssertionError("No service found with name %s" % service_name)
elif service_instance_guid in existing_service_instance_guid:
_logger.debug("%s already bound to %s", app["entity"]["name"], service_name)
else:
_logger.debug("Binding %s to %s", app["entity"]["name"], service_name)
self.client.v2.service_bindings.create(app["metadata"]["guid"], service_instance_guid)
def _poll_job(self, job: Entity):
def job_not_ended(j):
return j["entity"]["status"] in ["queued", "running"]
job_guid = job["metadata"]["guid"]
_logger.debug("Waiting for upload of application to be complete. Polling job %s...", job_guid)
started_time = time.time()
elapsed_time = 0
while job_not_ended(job) and elapsed_time < PushOperation.UPLOAD_TIMEOUT:
_logger.debug("Getting job status %s..", job_guid)
job = self.client.v2.jobs.get(job_guid)
if job_not_ended(job):
time.sleep(5)
| |
[-2.247976724721672, -2.3230490248981575, 4.8350209920697722, -0.32527190181639298]]], [[[2.5335174897949022,
-0.20854461379276934, 3.2780648503024352, -1.6620646994478872], [-2.1379815500652954, -3.2990143123595739,
2.9742698157590199, -3.0724429738487213], [-1.941430518641484, -3.8959116863238554, -3.0686598489329029,
4.0511881765933175]], [[0.02000325485725174, 3.2082680619577548, -4.598681116192207, 4.4506904679653321],
[-1.3308670983956148, -1.4563013194069239, 1.9676557830927131, -1.443387189237999], [3.9560651184617885,
3.6345685490807202, -3.1473146440504864, 2.5626571964669047]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[-0.26692229001064205, 0.19885873770611004, -1.0465465029518624, 1.1181056258382696],
[-0.13830360552907706, 0.80330964898773283, -19.429643178861738, 0.099203093268639547], [2.986875938683629,
-1.1480465371192021, 0.51934518479270775, 0.49648401132503633]], [[1.2279008511818705, 0.28180912845077649,
-1.6730565421652779, -0.34733288039983173], [-0.70062421444068845, -0.11353892593910048, 9.2276510545379935,
0.66963678015000538], [-3.2816488989412327, -0.018558832023123775, 0.9807787578967162, -0.32796280554448065]]],
[[[0.41563304164901532, 4.8953633274033006, -1.0060304328401219, -1.1616260269333594], [-101.43397097149828,
1.490297970370789, -0.62428760628404523, -1.1850178719997067], [-0.053555636159778923, 1.0371300604863249,
-1.291058595851039, -0.18245067811287974]], [[-0.16407151694367492, 0.25326309579912565, -10.822193808552843,
3.3145404571095454], [-5.1863817870065798, -1.348733628879734, 1.3321515146271816, 3.3642001467839115],
[1.1845081664272488, -1.9102494451953862, -0.74401954478721888, -2.0039395631523571]]], [[[1.2190939122756466,
-14.998071124806016, 0.51155603954235385, -0.5906616210734561], [-1.9609158519571066, 0.14405209220302512,
0.71003058349945258, -0.71349209293425142], [-0.19634221905167631, 0.026507155728614104, 0.21445137531607825,
0.66314735446318107]], [[-157.61442618672615, -1.0100192970849919, 0.22541064348860901, 0.86434760274348943],
[1.3673457816808718, 1.0231862389635615, 0.09224972362744735, 1.9442455668211094], [-1.2144237177893482,
0.99364914197916654, -1.4532221807783379, -1.8215580986331779]]]])+(1.-msk_ref)*numpy.array([[[[9.0847562175307548,
1.778995292561067, -1.0232209271680595, -2.6987201009137105], [0.14972528615081884, 0.12133798041887074,
-131.42919529410358, -1.0919436530339175], [-1.3941761929913368, 1.164058005397852, -0.74681041285503469,
-0.80051890017845562]], [[-0.38249803887159334, 4.1689131086319398, 0.73141690092266454, -0.90446672481449997],
[0.76060602339657379, 0.2173604465747247, -9.879728652007838, -0.47244978099981061], [-20.909614121584703,
-0.52322153328871157, -1.0481592170429599, 1.1181278413803195]]], [[[0.29732648638247061, -1.1389455735288434,
0.20023948914673259, 0.14814687684718106], [-12.430756373157672, -1.5125118563266158, 0.064114862641108966,
2.0187332441924042], [2.3110877318397747, -0.85481475622463277, -0.79738959712203872, 1.107634200210057]],
[[0.5537448644793036, -0.92709881139841899, -10.271674824635689, -1.2580773672773902], [4.0101027383583956,
-0.15510131000245292, -0.29802903157125132, -5.402719975423115], [-0.0025642366860165779, -0.59028737896051597,
0.68976761182818369, -13.511219209980442]]], [[[-0.72307985519587537, 1.1578563845894034, 0.38289603651893378,
2.6914473541170971], [-0.8288972310274092, 0.5803287826743494, -0.37735470305926527, -0.67287427157387836],
[-0.97204427369426094, -0.32735223991262846, -1.3978494129477628, 0.82072493298010418]], [[59.459275890228867,
-0.95758774625624643, 0.87822569277673845, 0.61462320089170264], [-2.65978045034828, -2.3738902894641662,
-0.76043784039873807, -0.92389751694797129], [0.31008655924816314, 0.89999700786337411, 1.5387790324814787,
0.61943307038110118]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(3.78107997963,self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(0.0800878500517)
sub=res.substitute({arg1:s1})
ref=Data(1.11239752186,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(4.90258552017,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0**arg1
s1=numpy.array([1.2995894043345173, 0.78253444504138814])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.8934933252568413, 3.4696165734693301]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(3.92827709573,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0**arg1
s1=numpy.array([[4.1349425587832229, 0.91098413715014492, 2.7789836134951633, 0.12883166700263957,
3.6410211713020102], [4.0888877726009252, 4.0211549295632993, 4.6892515161560091, 3.4849861795634953,
4.0032210210582928], [4.299348637922626, 1.7371642461914261, 2.185930497819307, 1.7970091028967969,
4.5443361828998592], [3.1255490514768072, 2.9012687039592109, 0.33081920581130919, 1.5123642954465053,
4.3636469435748326]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[286.41226576143146, 3.477832814129735, 44.799992375326617, 1.1927572059187601,
145.71440254454342], [268.92164649005002, 245.12000850057427, 611.45312320484868, 117.70262752348444,
239.17864250542837], [358.65926391850468, 10.770271935452501, 19.901441052324959, 11.689248981538858,
501.48063851421955], [71.979339350865629, 52.959027255929435, 1.5724377868886246, 7.9186302143497267,
391.64108226882905]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(3.16126608038,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0**arg1
s1=numpy.array([[[2.7962312793949295, 2.0684099882736957], [0.38062016949789762, 3.4476824424756334]],
[[2.041547872112385, 0.47599085293280569], [4.1441460833378203, 0.81184164478938559]], [[4.3084580838333535,
4.0366716734419619], [0.46138540150130164, 3.7240656768046549]], [[0.67126378380454188, 2.6436153849274531],
[3.6341579561428712, 0.79424640609140063]], [[2.8792963151587307, 0.15059024316863731], [1.4454736834885067,
0.20711228881254098]], [[2.5282275840574844, 1.1701013108471818], [4.7723602197014845, 4.287233089759706]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[24.98774742669173, 10.812287661988025], [1.5497341316804971, 52.888612151720686]],
[[10.48311297133106, 1.7295347337178362], [117.89549729799067, 2.5457046637850267]], [[142.43948278798089,
104.17774369773555], [1.7007034469541953, 72.69681517764694]], [[2.1654024349191139, 20.962369213688653],
[65.550199410739893, 2.4946685023586084]], [[27.494642910796053, 1.1892528402215572], [5.2788095694213668,
1.2691921182937207]], [[18.355342751374547, 3.8449267600833394], [242.95005261713331,
139.00193314904641]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(0.993829389255,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0**arg1
s1=numpy.array([[[[0.70582890780189012, 4.459679941127856, 1.7698082654335738, 0.7425015240513263],
[0.17863451536630379, 0.23071258083531751, 1.1519692954618987, 1.546358793346283], [0.43213129061136418,
4.7802564121893596, 3.8019765535927128, 1.7733817493669464]], [[4.4818458367006624, 2.8755675026947229,
1.8170070828708857, 3.689568151497173], [3.2656926466727252, 0.84976244428097047, 0.086645785873186429,
4.3622867616943184], [2.510711276781572, 1.1909328518939111, 2.7614763003730736, 2.859226851647112]]],
[[[2.2999398221257867, 3.7396236508286136, 3.6474360642296735, 0.22271721684765292], [2.5248956632409825,
4.8262190185960856, 0.37713575312925274, 0.15680972420135947], [2.5933818932928228, 4.6152058845108996,
0.37698282218649021, 0.48371699902902621]], [[1.5772879034133864, 0.010480084060075445, 2.7768860672153597,
4.4433640193360082], [3.4675949148609839, 1.0817479487868906, 3.3899814682499194, 0.87646369237945143],
[0.49739388005553137, 1.3463814284307101, 1.0826007333159808, 0.79496282667929863]]], [[[2.4363105443735353,
3.4203323611137431, 1.8278252124219663, 3.6768660714690418], [3.8007621375663323, 3.2788349807887727,
4.723911572993079, 4.5936800906463686], [1.8071402427683898, 3.3902207884220079, 0.37997536953424454,
4.7038449248986476]], [[3.2488654149278418, 1.6048006810659028, 4.9317774433591417, 2.1007443740528178],
[3.054516223687195, 0.55422473313263965, 3.5194589681498765, 3.326550366831976], [4.3669194535152762,
0.047451200822730306, 0.70452802796481673, 0.1404011071625294]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.99564064100720095, 0.97277331018429902, 0.98910515232215201, 0.99541466269551271],
[0.99889491206162018, 0.99857297113553989, 0.99289498462376558, 0.99047412161372594], [0.99732879902719118,
0.97084496836204925, 0.97674154691389037, 0.98908327465377543]], [[0.97263985400820108, 0.98235848653943914,
0.98881622960918547, 0.9774213780802502], [0.97998918013383751, 0.99475401042354128, 0.99946382997432526,
0.97335991097124475], [0.98457951330340654, 0.99265555323835852, 0.98305246670030244, 0.98245785140758057]]],
[[[0.98586485161567172, 0.97711859061328932, 0.97767630927362992, 0.99862239086150251], [0.98449307348906423,
0.97056880612221985, 0.99766835491976258, 0.99902986140398409], [0.98407582438051522, 0.97183730768595933,
0.99766929931395654, 0.99701040129524177]], [[0.99028452065604722, 0.99993513323790106, 0.98295870540613939,
0.97287155660372138], [0.97876523296044249, 0.99332664123569459, 0.97923555070780133, 0.99458961757808051],
[0.99692600178713375, 0.99170089495067859, 0.99332139797090802, 0.99509148295090599]]], [[[0.98503303654727714,
0.97905160511359157, 0.98875001942994056, 0.97749822833139455], [0.97674888901436785, 0.97990946373697985,
0.9711836185354098, 0.9719668027506676], [0.98887662147928368, 0.97923410014112311, 0.99765081960374968,
0.97130425391482611]], [[0.98009125718466428, 0.99011589292747115, 0.96993486503795368, 0.98708113870220937],
[0.98127098560076487, 0.99657537729014167, 0.97845107590229674, 0.97962009487115176], [0.9733320001780138,
0.99970633311899726, 0.99564865802941016, 0.99913133289538214]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([2.866799909147558, 4.876125586700824]),self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(2.71414665156)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([17.435868015676721, 73.711650960189885]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([2.2084898163228521, 1.4985706335851434]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0**arg1
s1=numpy.array([2.9685871945135585, 4.2199180780027188])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([10.506962956516261, 5.5124348187786136]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[0.77922995940155448, 4.9091672288365764, 4.6023034116074584, 4.0585585948105418,
3.8131836319105985], [0.63921561638913638, 1.9638083205927952, 0.87156249802113606, 1.8813609152354294,
4.652603835129498], [4.4185672711880182, 4.342637431217093, 2.1485008678971482, 3.5999975099694348,
3.0608084623767837], [1.5079573236002384, 0.75251090029482415, 1.7564448317741483, 3.6923994643349598,
1.3401399799253775]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(1.88416909919)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[0.62499961485879429, 20.043593846903835, 17.748329103451528, 14.004750781266964,
12.452150471142952], [0.43033512176215821, 3.5665495273262708, 0.7718134533153036, 3.2896665757733499,
18.115582822820418], [16.436880911894658, 15.90873223635964, 4.2247353523330435, 11.17293869306093,
8.2299551314515949], [2.1682788679762832, 0.5852335625438071, 2.8902325719037805, 11.71940284996929,
1.7360906938610048]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[1.3915653950314695, 0.72721703282696448, 3.1310378168895978, 2.6348429358346048,
2.0782491529918197], [1.6459194387429872, 2.562889776578305, 2.0761088251705457, 0.22461806677113369,
4.206926695670866], [3.2315466420058132, 0.54551438767992344, 2.3962603907765661, 3.5705685721719806,
1.576440920087347], [4.5957055774395901, 2.6996348087339399, 1.4523409725849665, 2.7420672940204591,
1.9685543649271546]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0**arg1
s1=numpy.array([[2.5855195439682124, 1.0827101090847635, 4.9957557839533946, 0.96832195337783145,
3.2516725650946836], [2.885435871411993, 3.9749315017293902, 4.653244839630724, 2.3530666812704477,
2.8630405667066632], [2.0653715617592563, 1.7593252053700164, 0.52661102213923405, 0.82097677205000363,
2.4546391327367552], [4.7379088202900395, 3.5407454959024718, 1.2463674187947462, 3.0822199358702487,
1.0765535893703142]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[2.349802789241195, 0.70830818466492951, 299.45926695476669, 2.5552067368444966,
10.790674928106091], [4.2114637167679394, 42.137956376461631, 29.939356280651772, 0.029778724884887482,
61.155701008278086], [11.275136610799086, 0.34431546555745196, 1.5844071459062889, 2.8430503327720711,
3.0565212302888405], [1374.5619596132401, 33.662107628675898, 1.5921977639828369, 22.400261085750593,
2.0733157810519325]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[3.0660567741514542, 4.3056273316538842], [3.0504750623805625, 3.7871768115665883]],
[[0.69274312119694637, 4.6538571932210537], [4.8355982528719377, 2.9815267638605385]], [[1.1230458665786869,
2.1830008754633661], [0.91820982431621256, 0.077119031117770812]], [[2.9614071331788696, 3.5610220293470181],
[2.6599569889924082, 3.6221415743920904]], [[1.1098062342497441, 2.676585421551215], [2.0821210491789137,
0.94398503882355267]], [[1.1824417768595235, 1.7231439179679477], [2.2607396485998188,
3.3720654338288014]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(0.560759226052)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[1.8743657165500538, 2.2674712116369911], [1.8690182094624257, 2.1100648252921164]],
[[0.81395313946463899, 2.3685487640913623], [2.4199799687700909, 1.8452103631477881]], [[1.0672369661252965,
1.5492717722289933], [0.95327751240899017, 0.23766519192617833]], [[1.8382175994812311, 2.0384524232966164],
[1.7308218130238717, 2.0579983247753746]], [[1.0601632912957684, 1.7368809476716751], [1.5087078979960178,
0.96819192271565813]], [[1.0985300082097622, 1.356811420052328], [1.5799715425927656,
1.9770722875528537]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[3.3957720535524389, 2.4552077579471865], [4.7087905663535405, 1.2879146157483581]],
[[0.040773334485048905, 1.338962649449466], [0.15911448294230743, 0.69782664300100028]], [[1.1490297281690691,
3.6797714112654387], [1.6806152634816163, 2.8654713323448382]], [[3.7833659416351537, 4.1661154468039028],
[3.469956553077123, 3.9754846411912803]], [[1.8241168951232549, 4.4870775362426878], [1.3424534585235119,
1.3802296431342149]], [[3.3210730308450875, 2.5390685855193476], [1.1031930645119352,
2.4082267572037321]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0**arg1
s1=numpy.array([[[2.0527495636735842, 1.3882529561124286], [1.3707482038402852, 0.39288536207269026]],
[[2.3705399572424319, 4.3811434485491185], [3.6465442725413943, 0.81187293080800826]], [[1.5760458701127449,
2.4821521454352227], [2.2611045908095928, 1.222781185593224]], [[4.0828137815142842, 2.0166310563465872],
[3.0037225489944039, 3.8035215103515534]], [[1.8270206735321162, 2.0909380886426585], [1.700233947322449,
0.94341612832048682]], [[3.4512483540336021, 1.9153164559186842], [3.4796412330592381, 4.631860024508482]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[12.299397442261979, 3.4796948710857625], [8.3635241772012918, 1.1045185700581053]],
[[0.0005079738169757814, 3.592450715967165], [0.0012274367495971861, 0.74669434602170293]], [[1.2447577564334256,
25.37776750720391], [3.2345019416156076, 3.6228533120009838]], [[228.75452373327897, 17.773353840406003],
[41.974303066305048, 190.45529976112371]], [[2.9988077003642086, 23.078919356327741], [1.6499038138569069,
1.3552903973767674]], [[62.959505654838587, 5.9577132902226237], [1.4073820176182781,
58.609608978624692]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[3.6942258331411808, 1.2537649796273209, 0.42770554254169135, 4.700673631145162],
[4.4515625216392314, 4.7809283564358287, 3.701290949078563, 2.0902986899469349], [0.95965882113153478,
2.5044950882367898, 1.5843001497355667, 1.9737450842678197]], [[3.4205167033036328, 1.4681649100887066,
1.1447801230251771, 3.5317125976738892], [2.925503328877014, 3.6994047548692786, 3.3897537710459931,
3.2244141062166558], [2.6498609343187218, 1.949698950181703, 2.3949386461700057, 1.6425500663306651]]],
[[[4.2780973886416191, 4.0618406835817691, 0.76281370649400426, 2.2204933921619303], [0.35215441411200815,
1.3818360860420937, 2.5239753242436236, 4.5811725609009564], [4.1716817147405427, 0.44398290268201318,
1.3895634892776816, 0.88264326772164403]], [[0.91724435354033873, 2.1538723799206236, 3.4971391199135713,
3.3554017029685319], [2.1064185289212674, 2.2856357366084303, 0.51918179267857123, 4.6850674663899552],
[4.7379107746617768, 1.3230622992714034, 4.6315184230420652, 1.8428337487384241]]], [[[2.4876080017868238,
2.6810602120023037, 4.5081405094604081, 2.4120441389715914], [4.8306123550480162, 4.2210604925186157,
4.4800528271783264, 2.9281844533079764], [0.63995448593153881, 1.0111923343576328, 3.3513915338885765,
3.1411718524458285]], [[1.4289737147392298, 0.45352921851220701, 2.0118969615009972, 3.5980186028034558],
[3.616620626102065, 2.7726087163304962, 2.3432614104421239, 2.8491353233149006], [0.5320613661271224,
4.4831794866209096, 1.2664788279071584, 0.28049422891374309]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(1.06920335544)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[4.0438772782241736, 1.2735412636471706, 0.40329131191155071, 5.232097558570211],
[4.9361882834372706, 5.32766320430086, 4.0521468468151465, 2.1997222137283998], [0.95692805287595428,
2.6687811343412777, 1.6355615272939708, 2.0688366138549434]], [[3.72436858199117, 1.5077044564832467,
1.1555422923156822, 3.8539651197510056], [3.1511076657220811, 4.0499389894595748, 3.6885660756269054,
3.4965304503075489], [2.8347289565408293, 2.0418991380778091, 2.5441507235354024, 1.6999385690588711]]],
[[[4.7308080931849492, 4.4755720332454247, 0.74865452317740788, 2.3465237379092794], [0.32761633574092269,
1.4131120282318979, 2.6909816922836431, 5.0900078595749889], [4.6050969731100313, 0.41972303754885415,
1.4215628284095434, 0.87505100482757892]], [[0.91177753244543847, 2.2713282731526636, 3.8136397625647156,
3.6486130365880771], [2.2178646706058562, 2.4202014010190394, 0.49615640785649234, 5.2135271144615709],
[5.2764248443312534, 1.3489444284266585, 5.1498394369137142, 1.9224660983894153]]], [[[2.6495455109781756,
2.8704290671798018, 5.0032967403347266, 2.5635842415978054], [5.3868816518123941, 4.6634019262660349,
4.9699739610882974, 3.1541954979564295], [0.62048881052075555, 1.011971499899829, 3.6439508643357619,
3.4001032908251019]], [[1.4647126684483562, 0.42937939938465319, 2.1116224485244599, 3.9313784965136467],
[3.9531144908142579, 2.9753493135007223, 2.4854989078477878, 3.063238048859795], [0.50932777150901942,
4.9736826666605927, 1.2873542029993255, 0.25687297390081976]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[0.99270068488685292, 2.3472808551478828, 3.1312848033300495, 0.28480210095870712],
[4.1760257284106208, 1.6103974533434464, 4.5276876502334824, 0.85611009354413747], [3.2374935042795578,
4.9456771875167229, 4.9802453823156245, 1.2149127690087014]], [[1.8848010664604495, 4.1824957168331602,
2.4934752197401808, 0.28374659653088119], [4.6634122856431866, 4.948465351830853, 4.5396716570475313,
3.7492655830226167], [4.2660162764714533, 0.061690267720591635, 4.6305624131357668, 4.183404037793947]]],
[[[2.7660211396774219, 0.29841308804884997, 2.0624440665165937, 3.4380484119931474], [4.1607961864955918,
3.0023473648209906, 2.7294567940865302, 4.1979140776413049], [1.1126946263221209, 0.12554655331410369,
2.1673178358996532, 1.5768419503465658]], [[2.1884187117240974, 2.9636992993533875, 4.0944289364844799,
0.20431967084707772], [4.9645724723200422, 4.3471545530472664, 3.8082987236588131, 4.9248087663466533],
[0.94748379921640247, 4.1019196615549953, 2.7446431670111022, 3.2260449250615477]]], [[[4.815971417067975,
0.55080927709037153, 4.2444312341013362, 2.4110979642875288], [3.8869586759056411, 3.1718207539088712,
2.2815482728827998, 4.5248091593844997], [1.7555444263914652, 4.8358722724194543, 0.40104556755678317,
0.72504095284272219]], [[1.6815499163639582, 4.7423275338548976, 4.654236755712521, 4.137431652375871],
[2.4100531826283573, 2.054034553663679, 4.6472521248761733, 0.98452264700986813], [4.8377266538556594,
3.082865747001903, 2.5083476832499958, 3.2444587922972503]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0**arg1
s1=numpy.array([[[[0.21753326670751072, 4.1682471517901032, 4.1014560095520087, 4.6700944718391098],
[3.4121094051365737, 1.1029489552818827, 2.0092241837599003, 0.33422517551710779], [1.1473193657482628,
2.4557624148843549, 3.4564859349197894, 1.8642853910874539]], [[2.7889107332297205, 0.14971366335904424,
3.0278221311653697, 3.2041631900025194], [2.9835640858496824, 3.958665469023726, 0.91439051931800353,
1.7058037073741574], [2.3938649784363508, 3.0154719728696082, 2.6948872188976947, 3.1007570939621973]]],
[[[2.6265063521385672, 4.5831638786700442, 3.219142414188263, 2.7281296715792824], [3.5791848830985131,
2.9499608289992416, 0.6944852503432879, 3.4643337801246901], [3.8686503402845167, 2.0190151391117173,
4.4489402687543533, 0.060998130418435118]], [[2.7466045097612528, 1.6726966829566545, 3.0165307005368542,
4.277931081219279], [4.5278288671598306, 1.9114000108412157, 2.2131466233201289, 1.7114320346159109],
[4.0455182963260539, 4.4214997321041718, 1.833260490213896, 1.6277957064140334]]], [[[3.1805485901595181,
4.6387720949831293, 4.6764390004305776, 0.92658048030955986], [3.6108344682454585, 2.5431872524441639,
0.36463412919482818, 1.0071871900401932], [2.8537457951315179, 3.6983860534861037, 3.23721845983441,
0.55328298453643388]], [[1.2875876430466009, 4.3485253099923922, 1.8266284021761314, 0.0290062123030045],
[3.4490819270809823, 0.22338889791366173, 3.0381024201828888, 1.9556163422370025], [0.44312399416135478,
0.1273534956738884, 1.2491109040783503, 4.9052871585448905]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.99840760191037914, 35.043440814220617, 107.94047254802666, 0.00283573184979817],
[131.25378495858195, 1.69136219457744, 20.78752777038369, 0.94940103216918048], [3.8492184440675246,
50.681976498619285, 257.06170337461504, 1.4375275893388262]], [[5.8572122227013041, 1.2389032321759992,
15.902123865064281, 0.01766446796707654], [98.882782586287178, 561.27606329168657, 3.9881999273730413,
9.5288296866662598], [32.224695168018663, 0.00022487036760118656, 62.202503506565009, 84.569345856197444]]],
[[[14.472256933722013, 0.0039174568156997774, 10.281133718280593, 29.048830143480959], [164.49282671202778,
25.614805564866785, 2.0083921055841238, 144.01116277295671], [1.5115151066807018, 0.01515211425419326,
31.224795639114653, 1.0281694802522898]], [[8.5941661198499677, 6.1551040178885987, 70.258681959622137,
0.0011208680343288049], [1415.2525647254004, 16.590718644897333, 19.286031315896192, 15.310076132459979],
[0.80393467182931233, 513.24239208001916, 6.3658859963948018, 6.7299449373066729]]], [[[148.35743563372318,
0.06288742447799564, 862.89828006183689, 2.2602309897996125], [134.58102586913591, 18.833096590582731,
1.3508986287641715, 4.5741688350871659], [4.9829808253152352, 339.97903268955292, 0.051933953881959871,
0.83702989264405847]], [[1.9526351491620346, 870.09434438616336, 16.592467915656908, 1.0420511218052597],
[20.779788947462336, 1.174446125966941, 106.41687348793162, 0.96995612336544568], [2.0108561274370049,
1.1541707905965375, 3.1541298870902299, 321.58780899817299]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(1.32457126708,self.functionspace)
arg0.setTaggedValue(1,2.5302736409)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(0.0639287997913)
sub=res.substitute({arg1:s1})
ref=Data(1.01813209787,self.functionspace)
ref.setTaggedValue(1,1.0611432451)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
| |
<reponame>4kssoft/unilm
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
from torch._six import container_abcs
from copy import deepcopy
from itertools import chain
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError(
"Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError(
"Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError(
"Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError(
"Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError(
"Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(
state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(
state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
class BertAdamFineTune(BertAdam):
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0):
self.init_param_group = []
super(BertAdamFineTune, self).__init__(params, lr, warmup,
t_total, schedule, b1, b2, e, weight_decay, max_grad_norm)
def save_init_param_group(self, param_groups, name_groups, missing_keys):
self.init_param_group = []
for group, name in zip(param_groups, name_groups):
if group['weight_decay'] > 0.0:
init_p_list = []
for p, n in zip(group['params'], name):
init_p = p.data.clone().detach()
if any(mk in n for mk in missing_keys):
print("[no finetuning weight decay]", n)
# should use the original weight decay
init_p.zero_()
init_p_list.append(init_p)
self.init_param_group.append(init_p_list)
else:
# placeholder
self.init_param_group.append([])
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for i_group, group in enumerate(self.param_groups):
for i_p, p in enumerate(group['params']):
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
if self.init_param_group:
update += group['weight_decay'] * \
(2.0 * p.data -
self.init_param_group[i_group][i_p])
else:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(
state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
def load_state_dict_subset_finetune(self, state_dict, num_load_group):
r"""Loads the optimizer state.
Arguments:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = deepcopy(state_dict)
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict['param_groups']
if len(groups) < num_load_group or len(saved_groups) < num_load_group:
raise ValueError("loaded state dict has a different number of "
"parameter groups")
param_lens = (len(g['params']) for g in groups[:num_load_group])
saved_lens = (len(g['params']) for g in saved_groups[:num_load_group])
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError("loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group")
# Update the state
id_map = {old_id: p for old_id, p in
zip(chain(*(g['params'] for g in saved_groups[:num_load_group])),
chain(*(g['params'] for g in groups[:num_load_group])))}
def cast(param, value):
r"""Make a deep copy of value, casting all tensors to device of param."""
if isinstance(value, torch.Tensor):
# Floating-point types are a | |
params.get("SecurityGroupId")
self.SecurityGroupName = params.get("SecurityGroupName")
self.SecurityGroupRemark = params.get("SecurityGroupRemark")
if params.get("InboundRule") is not None:
self.InboundRule = []
for item in params.get("InboundRule"):
obj = SecurityGroupsInboundAndOutbound()
obj._deserialize(item)
self.InboundRule.append(obj)
if params.get("OutboundRule") is not None:
self.OutboundRule = []
for item in params.get("OutboundRule"):
obj = SecurityGroupsInboundAndOutbound()
obj._deserialize(item)
self.OutboundRule.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SecurityGroupsInboundAndOutbound(AbstractModel):
"""安全组出入规则
"""
def __init__(self):
"""
:param Action: 执行动作
:type Action: str
:param Ip: IP地址
:type Ip: str
:param Port: 端口号
:type Port: str
:param Proto: 协议类型
:type Proto: str
"""
self.Action = None
self.Ip = None
self.Port = None
self.Proto = None
def _deserialize(self, params):
self.Action = params.get("Action")
self.Ip = params.get("Ip")
self.Port = params.get("Port")
self.Proto = params.get("Proto")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SourceCommand(AbstractModel):
"""访问命令
"""
def __init__(self):
"""
:param Cmd: 命令
:type Cmd: str
:param Count: 执行次数
:type Count: int
"""
self.Cmd = None
self.Count = None
def _deserialize(self, params):
self.Cmd = params.get("Cmd")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SourceInfo(AbstractModel):
"""访问来源信息
"""
def __init__(self):
"""
:param Ip: 来源IP
:type Ip: str
:param Conn: 连接数
:type Conn: int
:param Cmd: 命令
:type Cmd: int
"""
self.Ip = None
self.Conn = None
self.Cmd = None
def _deserialize(self, params):
self.Ip = params.get("Ip")
self.Conn = params.get("Conn")
self.Cmd = params.get("Cmd")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class StartupInstanceRequest(AbstractModel):
"""StartupInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例id
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class StartupInstanceResponse(AbstractModel):
"""StartupInstance返回参数结构体
"""
def __init__(self):
"""
:param TaskId: 任务id
:type TaskId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SwitchInstanceVipRequest(AbstractModel):
"""SwitchInstanceVip请求参数结构体
"""
def __init__(self):
"""
:param SrcInstanceId: 源实例ID
:type SrcInstanceId: str
:param DstInstanceId: 目标实例ID
:type DstInstanceId: str
:param TimeDelay: 单位为秒。源实例与目标实例间DTS已断开时间,如果DTS断开时间大于TimeDelay,则不切换VIP,建议尽量根据业务设置一个可接受的值。
:type TimeDelay: int
:param ForceSwitch: 在DTS断开的情况下是否强制切换。1:强制切换,0:不强制切换
:type ForceSwitch: int
:param SwitchTime: now: 立即切换,syncComplete:等待同步完成后切换
:type SwitchTime: str
"""
self.SrcInstanceId = None
self.DstInstanceId = None
self.TimeDelay = None
self.ForceSwitch = None
self.SwitchTime = None
def _deserialize(self, params):
self.SrcInstanceId = params.get("SrcInstanceId")
self.DstInstanceId = params.get("DstInstanceId")
self.TimeDelay = params.get("TimeDelay")
self.ForceSwitch = params.get("ForceSwitch")
self.SwitchTime = params.get("SwitchTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SwitchInstanceVipResponse(AbstractModel):
"""SwitchInstanceVip返回参数结构体
"""
def __init__(self):
"""
:param TaskId: 任务ID
:type TaskId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TaskInfoDetail(AbstractModel):
"""任务信息详情
"""
def __init__(self):
"""
:param TaskId: 任务Id
注意:此字段可能返回 null,表示取不到有效值。
:type TaskId: int
:param StartTime: 开始时间
注意:此字段可能返回 null,表示取不到有效值。
:type StartTime: str
:param TaskType: 任务类型
注意:此字段可能返回 null,表示取不到有效值。
:type TaskType: str
:param InstanceName: 实例名称
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceName: str
:param InstanceId: 实例Id
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceId: str
:param ProjectId: 项目Id
注意:此字段可能返回 null,表示取不到有效值。
:type ProjectId: int
:param Progress: 任务进度
注意:此字段可能返回 null,表示取不到有效值。
:type Progress: float
:param EndTime: 结束时间
注意:此字段可能返回 null,表示取不到有效值。
:type EndTime: str
:param Result: 任务状态
注意:此字段可能返回 null,表示取不到有效值。
:type Result: int
"""
self.TaskId = None
self.StartTime = None
self.TaskType = None
self.InstanceName = None
self.InstanceId = None
self.ProjectId = None
self.Progress = None
self.EndTime = None
self.Result = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.StartTime = params.get("StartTime")
self.TaskType = params.get("TaskType")
self.InstanceName = params.get("InstanceName")
self.InstanceId = params.get("InstanceId")
self.ProjectId = params.get("ProjectId")
self.Progress = params.get("Progress")
self.EndTime = params.get("EndTime")
self.Result = params.get("Result")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TendisNodes(AbstractModel):
"""tendis节点信息
"""
def __init__(self):
"""
:param NodeId: 节点ID
:type NodeId: str
:param NodeRole: 节点角色
:type NodeRole: str
"""
self.NodeId = None
self.NodeRole = None
def _deserialize(self, params):
self.NodeId = params.get("NodeId")
self.NodeRole = params.get("NodeRole")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TendisSlowLogDetail(AbstractModel):
"""Tendis慢查询详情
"""
def __init__(self):
"""
:param ExecuteTime: 执行时间
:type ExecuteTime: str
:param Duration: 慢查询耗时(毫秒)
:type Duration: int
:param Command: 命令
:type Command: str
:param CommandLine: 详细命令行信息
:type CommandLine: str
:param Node: 节点ID
:type Node: str
"""
self.ExecuteTime = None
self.Duration = None
self.Command = None
self.CommandLine = None
self.Node = None
def _deserialize(self, params):
self.ExecuteTime = params.get("ExecuteTime")
self.Duration = params.get("Duration")
self.Command = params.get("Command")
self.CommandLine = params.get("CommandLine")
self.Node = params.get("Node")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TradeDealDetail(AbstractModel):
"""订单交易信息
"""
def __init__(self):
"""
:param DealId: 订单号ID,调用云API时使用此ID
:type DealId: str
:param DealName: 长订单ID,反馈订单问题给官方客服使用此ID
:type DealName: str
:param ZoneId: 可用区id
:type ZoneId: int
:param GoodsNum: 订单关联的实例数
:type GoodsNum: int
:param Creater: 创建用户uin
:type Creater: str
:param CreatTime: 订单创建时间
:type CreatTime: str
:param OverdueTime: 订单超时时间
:type OverdueTime: str
:param EndTime: 订单完成时间
:type EndTime: str
:param Status: 订单状态 1:未支付 2:已支付,未发货 3:发货中 4:发货成功 5:发货失败 6:已退款 7:已关闭订单 8:订单过期 9:订单已失效 10:产品已失效 11:代付拒绝 12:支付中
:type Status: int
:param Description: 订单状态描述
:type Description: str
:param Price: 订单实际总价,单位:分
:type Price: int
:param InstanceIds: 实例ID
:type InstanceIds: list of str
"""
self.DealId = None
self.DealName = None
self.ZoneId = None
self.GoodsNum = None
self.Creater = None
self.CreatTime = None
self.OverdueTime = None
self.EndTime = None
self.Status = None
self.Description = None
self.Price = None
self.InstanceIds = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.DealName = params.get("DealName")
self.ZoneId = params.get("ZoneId")
self.GoodsNum = params.get("GoodsNum")
self.Creater = params.get("Creater")
self.CreatTime = params.get("CreatTime")
self.OverdueTime = params.get("OverdueTime")
self.EndTime = params.get("EndTime")
self.Status = params.get("Status")
self.Description = params.get("Description")
self.Price = params.get("Price")
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeInstanceRequest(AbstractModel):
"""UpgradeInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param MemSize: 分片大小 单位 MB
:type MemSize: int
:param RedisShardNum: 分片数量,Redis2.8主从版、CKV主从版和Redis2.8单机版不需要填写
:type RedisShardNum: int
:param RedisReplicasNum: 副本数量,Redis2.8主从版、CKV主从版和Redis2.8单机版不需要填写
:type RedisReplicasNum: int
:param NodeSet: 多AZ实例增加副本时的附带信息,非多AZ实例不需要传此参数。多AZ增加副本时此参数为必传参数,传入要增加的副本的信息,包括副本的可用区和副本的类型(NodeType为1)
:type NodeSet: list of RedisNodeInfo
"""
self.InstanceId = None
self.MemSize = None
self.RedisShardNum = None
self.RedisReplicasNum = None
self.NodeSet = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.MemSize = params.get("MemSize")
self.RedisShardNum = params.get("RedisShardNum")
self.RedisReplicasNum = params.get("RedisReplicasNum")
if params.get("NodeSet") is not None:
self.NodeSet = []
for item in params.get("NodeSet"):
obj = RedisNodeInfo()
obj._deserialize(item)
self.NodeSet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeInstanceResponse(AbstractModel):
"""UpgradeInstance返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeInstanceVersionRequest(AbstractModel):
"""UpgradeInstanceVersion请求参数结构体
"""
def __init__(self):
"""
:param TargetInstanceType: 目标实例类型,同 [CreateInstances](https://cloud.tencent.com/document/api/239/20026) 的Type,即实例要变更的目标类型
:type TargetInstanceType: str
:param SwitchOption: 切换模式:1-维护时间窗切换,2-立即切换
:type SwitchOption: int
:param InstanceId: 实例ID
:type InstanceId: str
"""
self.TargetInstanceType = None
self.SwitchOption = None
self.InstanceId = None
def _deserialize(self, params):
self.TargetInstanceType = params.get("TargetInstanceType")
self.SwitchOption = params.get("SwitchOption")
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) | |
-57022,
557857506,
69916,
-1,
557911911,
69915,
558113094,
557862135,
-1,
-57019,
558054114,
69956,
-1,
558108519,
69923,
558309705,
558058179,
-1,
-57016,
558250722,
69896,
-1,
558305127,
69895,
558506316,
558254581,
-1,
-57013,
558447330,
69903,
-1,
558501735,
69902,
558702927,
558446505,
-1,
-57010,
558643938,
69898,
-1,
558698343,
69897,
558899538,
558636343,
-1,
-57007,
558840546,
69901,
-1,
558894951,
69900,
559096149,
558831650,
-1,
-57004,
559037154,
69918,
-1,
559091559,
69917,
-57002,
559059252,
69924,
-57001,
559253655,
69893,
-57000,
559315113,
69925,
-56999,
559378348,
69922,
-56998,
559437409,
69919,
-56997,
559497309,
69892,
-56996,
559561442,
69926,
-56995,
559623007,
69894,
-1,
559681383,
69891,
559882605,
556502581,
-1,
559948140,
559835263,
-1,
560013668,
559877905,
69927,
-56990,
559974551,
69935,
-56989,
560021597,
69933,
-1,
560074513,
69957,
560275814,
559974551,
69930,
-1,
560236695,
69931,
560406888,
560226405,
69934,
-1,
560349277,
69936,
560537962,
560349277,
69928,
-1,
560480349,
69929,
560726015,
560474975,
69932,
-1,
560611421,
69958,
-1,
559892308,
69939,
560865662,
559818079,
-1,
560931196,
560808681,
-1,
560996722,
560890190,
-1,
-56975,
560960726,
69944,
-1,
561004370,
69945,
561193333,
560953378,
-1,
-56972,
561138045,
69948,
-1,
561197622,
69949,
561389944,
561132326,
-1,
-56969,
561342218,
69946,
-1,
561400031,
69947,
-56967,
561355015,
69942,
-56966,
561537796,
69943,
-56965,
561601986,
69951,
-1,
561655296,
69950,
-56963,
560883344,
69954,
-1,
561779643,
69953,
561979780,
560822306,
-1,
562045315,
561922806,
-1,
-56959,
562008154,
69890,
-56958,
562044366,
69888,
-1,
562107529,
69889,
-1,
561982619,
69952,
-56955,
561934106,
69955,
-56954,
562323589,
69937,
-56953,
562386533,
69940,
-1,
562436009,
69938,
562692095,
556429132,
-1,
562700685,
562594254,
-1,
562766220,
562662597,
128200,
-1,
562692197,
128185,
-1,
562703054,
128201,
563019775,
562630736,
983058,
563085311,
562888704,
-1,
-56944,
562992067,
983139,
-1,
563051121,
983136,
563224982,
556351581,
-1,
563290516,
563171040,
129490,
-1,
563245637,
128696,
-56939,
563244044,
128063,
-1,
563355962,
128020,
563552665,
563175525,
-1,
-56936,
563506544,
129378,
-1,
563552734,
127851,
-1,
563508806,
127876,
563815046,
548954213,
-1,
563880505,
563762708,
-1,
563946038,
563812965,
-1,
564011489,
563885993,
-1,
564077008,
563957151,
-1,
564142504,
564041389,
-1,
564208037,
564079455,
-1,
564273572,
564162380,
-1,
-56925,
564234391,
122911,
-1,
564281437,
122912,
-1,
564232333,
122885,
-56922,
564168855,
122915,
-56921,
564486245,
122918,
-1,
564533033,
122913,
564732334,
564084829,
122891,
564797868,
564684302,
-1,
-56917,
564755881,
122919,
-1,
564795959,
122921,
-56915,
564762892,
122889,
-1,
564944416,
122890,
565125556,
564688930,
-1,
565191090,
565066439,
-1,
-56911,
565150166,
122907,
-1,
565186321,
122910,
-56909,
565139227,
122916,
-1,
565335264,
122900,
565518775,
565090424,
-1,
-56906,
565460618,
122886,
-1,
565522152,
122888,
565715386,
565477710,
-1,
-56903,
565678340,
122901,
-1,
565738498,
122908,
565911997,
565654310,
-1,
-56900,
565868121,
122903,
-1,
565921915,
122922,
566108608,
565847391,
-1,
-56897,
566059206,
122884,
-1,
566119011,
122892,
566305219,
566040610,
-1,
-56894,
566266549,
122881,
-1,
566313472,
122920,
-56892,
566267549,
122882,
-56891,
566463148,
122902,
-56890,
566523475,
122899,
-56889,
566586473,
122898,
-56888,
566649930,
122897,
-56887,
566713348,
122896,
-56886,
566778703,
122895,
-56885,
566842315,
122894,
-56884,
566905652,
122893,
-56883,
566967690,
122904,
-56882,
567032281,
122883,
-56881,
567091216,
122909,
-1,
567154718,
122880,
567410687,
564031584,
-1,
567419355,
567289849,
-1,
567484885,
567378254,
-1,
-56876,
567448790,
70504,
-1,
567492434,
70505,
567681496,
567423782,
-1,
-56873,
567633674,
70506,
-1,
567691487,
70507,
-56871,
567646471,
70502,
-56870,
567835863,
70508,
-1,
567894788,
70503,
568131583,
567366057,
-1,
-56867,
568037377,
70515,
-56866,
568092939,
70516,
-56865,
568155056,
70514,
-56864,
568216346,
70513,
-1,
568266513,
70512,
568467961,
563956983,
-1,
568533492,
568399687,
-1,
568599014,
568494289,
-1,
-56859,
568546072,
7664,
-1,
568600489,
7668,
568795625,
568549516,
-1,
-56856,
568742680,
7661,
-1,
568797097,
7667,
568992235,
568727586,
7656,
-1,
568930683,
7657,
569123310,
568921873,
-1,
-56851,
569071014,
7655,
-1,
569116182,
7666,
-56849,
569086233,
7665,
-56848,
569276766,
7658,
-56847,
569338065,
7662,
-56846,
569396526,
7660,
-56845,
569455398,
7659,
-1,
569520455,
7663,
569769983,
568476157,
-1,
569778680,
569644921,
-1,
-56841,
569734016,
65064,
-1,
569790427,
65063,
-1,
569720796,
6841,
570040843,
568401207,
-1,
570106378,
570005612,
-1,
570171911,
570053033,
-1,
570237439,
570136237,
-1,
-56834,
570179677,
42614,
-1,
570241320,
42617,
570434049,
570198167,
42615,
-1,
570379407,
42612,
570565123,
570376285,
42613,
-1,
570517042,
42655,
-56828,
570523111,
42618,
-56827,
570647170,
42619,
-56826,
570702937,
42616,
-1,
570764664,
42654,
571015167,
570131759,
-1,
-56823,
570913653,
65071,
-1,
570970065,
65070,
-1,
570057697,
65069,
571220503,
569976159,
-1,
571286037,
571170917,
-1,
571351571,
571246989,
-1,
571417106,
571277312,
-1,
-56816,
571369960,
6844,
-56815,
571434211,
6840,
-1,
571492027,
7676,
-1,
571352429,
6832,
-56812,
571315380,
6835,
-1,
571703635,
7672,
-56810,
571228403,
6833,
-1,
571813527,
7675,
572006943,
571170917,
-1,
572072478,
571952878,
-1,
-56806,
572037362,
66424,
-56805,
572095641,
66426,
-56804,
572153099,
66425,
-56803,
572205149,
66423,
-1,
572265262,
66422,
-1,
572025474,
6839,
572531236,
571965774,
-1,
572596771,
572474220,
-1,
-56798,
572552064,
65066,
-1,
572608475,
65065,
-1,
572552662,
6836,
572858920,
572484059,
-1,
-56794,
572811348,
6846,
-56793,
572856684,
6845,
-1,
572919751,
6843,
573121067,
572822337,
-1,
-56790,
573061379,
6838,
-1,
573122227,
7673,
573317678,
573068936,
-1,
-56787,
573272960,
65068,
-1,
573329371,
65067,
573514289,
573262758,
-1,
-56784,
573469515,
7670,
-1,
573525947,
7671,
-56782,
573478427,
6837,
-56781,
573672461,
7669,
-56780,
573734985,
6842,
-56779,
573785329,
6834,
-1,
573839961,
70459,
574095359,
563898577,
-1,
-56776,
573993614,
128476,
-1,
574035619,
129517,
574235243,
563833041,
-1,
574300778,
574194776,
-1,
574366312,
574238742,
-1,
574431834,
574316532,
-1,
574497349,
574390606,
-1,
574562881,
574460185,
-1,
-56768,
574513261,
66292,
-1,
574566371,
66283,
574759492,
574503623,
-1,
-56765,
574714310,
66293,
-1,
574769121,
66284,
-1,
574697198,
66282,
575021644,
574453794,
-1,
575087177,
574966141,
-1,
-56760,
575047803,
66287,
-1,
575078958,
66296,
575340543,
575025718,
-1,
-56757,
575244411,
66288,
-1,
575275566,
66297,
575480403,
574960422,
-1,
575545936,
575430757,
-1,
-56753,
575507687,
66294,
-1,
575567843,
66285,
575799295,
575488093,
-1,
-56750,
575704692,
66295,
-1,
575747988,
66286,
575939158,
575430082,
-1,
-56747,
575899771,
66290,
-1,
575930926,
66299,
576135769,
575876608,
-1,
-56744,
576100013,
66289,
-1,
576127534,
66298,
-1,
576086806,
66291,
576397927,
574367737,
-1,
576463454,
576356686,
-1,
-56739,
576427222,
66274,
-1,
576470866,
66275,
576660065,
576419874,
-1,
-56736,
576604541,
66278,
-1,
576664118,
66279,
576856676,
576598822,
-1,
-56733,
576808714,
66276,
-1,
576866527,
66277,
-56731,
576807684,
66273,
-56730,
577002946,
66281,
-1,
577056256,
66280,
-1,
576357287,
66272,
-56727,
574324021,
11507,
-1,
577248774,
11506,
-1,
574265332,
127279,
577512055,
574184279,
-1,
577577584,
577450790,
-1,
-56722,
577539481,
128533,
-56721,
577595124,
128534,
-1,
577647007,
127882,
577839731,
577537869,
-1,
-56718,
577796682,
983186,
-1,
577850471,
127899,
578036342,
577798274,
-1,
-56715,
578000292,
128119,
-1,
578059339,
128679,
-1,
577998563,
127978,
578298492,
577462717,
-1,
578364027,
578240605,
-1,
-56710,
578313552,
127859,
-1,
578366303,
127850,
-1,
578301156,
127834,
578626174,
578261273,
128004,
-1,
578552270,
128046,
578757249,
578586775,
-1,
-56704,
578710453,
128145,
-1,
578756400,
128715,
578953860,
578690359,
-1,
-56701,
578905166,
129381,
-1,
578964635,
127864,
-56699,
578901009,
128165,
-1,
579081935,
129509,
579281652,
563744529,
-1,
579347155,
579242475,
-1,
579412690,
579293609,
-1,
579478165,
579345719,
-1,
579543695,
579418823,
-1,
579609229,
579473169,
66873,
-1,
579567950,
66902,
-56690,
579561632,
66889,
-1,
579682397,
66890,
579871378,
579507976,
-1,
-56687,
579835565,
66891,
-1,
579899673,
66911,
580124671,
579800849,
-1,
-56684,
580032587,
66912,
-1,
580087628,
66882,
580264603,
579434530,
-1,
580330138,
580205255,
-1,
580395673,
580259601,
66877,
-1,
580340419,
66896,
-1,
580348064,
66909,
-1,
580268823,
66906,
580657825,
580199775,
-1,
580723359,
580622456,
-1,
-56674,
580687685,
66895,
-1,
580720634,
66901,
-56672,
580687639,
66881,
-1,
580851407,
66867,
581051046,
580622456,
-1,
581116581,
580991687,
-1,
-56668,
581059415,
66871,
-1,
581111569,
66875,
-1,
581047653,
66869,
581378730,
581009742,
-1,
-56664,
581343045,
66899,
-56663,
581388665,
66908,
-1,
581441109,
66872,
581640878,
581320797,
-1,
-56660,
581604494,
66910,
-56659,
581660861,
66876,
-1,
581722195,
66879,
581903025,
581605037,
-1,
-56656,
581855355,
66874,
-1,
581900294,
66913,
582099636,
581856815,
-1,
-56653,
582063789,
66887,
-1,
582119244,
66893,
582296247,
582052049,
-1,
-56650,
582240633,
66914,
-1,
582299374,
66903,
582492858,
582241527,
-1,
-56647,
582457328,
66885,
-1,
582488878,
66878,
582689469,
582437571,
-1,
-56644,
582633846,
66915,
-1,
582685936,
66884,
582886080,
582633973,
-1,
-56641,
582827854,
66883,
-1,
582883354,
66897,
583082691,
582825897,
-1,
-56638,
583025570,
66866,
-1,
583089569,
66904,
583279302,
583019359,
-1,
-56635,
583243851,
66870,
-1,
583276578,
66868,
583475913,
583208721,
-1,
-56632,
583427383,
66888,
-1,
583488945,
66864,
-56630,
583439933,
66880,
-56629,
583634936,
66907,
-56628,
583692392,
66905,
-56627,
583754406,
66898,
-56626,
583819060,
66894,
-56625,
583882545,
66892,
-56624,
583941542,
66886,
-56623,
584004489,
66900,
-1,
584063389,
66865,
-1,
579346663,
66927,
584327900,
579296087,
983098,
584393431,
584261360,
983097,
-56618,
584339125,
128473,
-1,
584384841,
983170,
584590042,
584328543,
-1,
-56615,
584554157,
127852,
-1,
584601380,
128367,
-56613,
584540458,
128758,
-1,
584735943,
129387,
584917734,
584281932,
-1,
584983265,
584852833,
-1,
585048800,
584926291,
128199,
-1,
584974740,
128450,
-1,
584987688,
128451,
585245412,
584937292,
-1,
-56605,
585197039,
129365,
-1,
585253121,
983073,
-56603,
585197800,
127887,
-1,
585394063,
127904,
585573099,
584876366,
128008,
585695231,
585499086,
128049,
585760767,
585565572,
-1,
-56598,
585668063,
128572,
-1,
585728669,
128569,
585900783,
585520660,
| |
'''
Python:
accept epsg as argument
copy into new DB
create structure for 3nf
add geometry columns
Call Ruby:
Write responses into 3nf
write geometries into 3nf tables
call shapefile tool
TODO:
convert ruby calls into rbenv or system ruby calls
figure out how shell script wrapper needs to work for exporter
'''
import unicodedata
import sqlite3
import csv, codecs, cStringIO
from xml.dom import minidom
import sys
import pprint
import glob
import json
import os
import shutil
import re
import zipfile
import subprocess
import glob
import tempfile
import errno
import imghdr
import bz2
import tarfile
import codecs
import lsb_release
from collections import defaultdict
import zipfile
try:
import zlib
compression = zipfile.ZIP_DEFLATED
except:
compression = zipfile.ZIP_STORED
modes = { zipfile.ZIP_DEFLATED: 'deflated',
zipfile.ZIP_STORED: 'stored',
}
if lsb_release.get_lsb_information()['RELEASE'] == '16.04':
LIBSPATIALITE = 'mod_spatialite.so'
else:
LIBSPATIALITE = 'libspatialite.so.5'
print sys.argv
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def convertBuffer(self, obj):
#print type(obj)
if isinstance(obj, basestring):
#print obj.encode("utf-8", errors="replace")
return obj.encode("utf-8", errors="replace").replace('"',"''")
if isinstance(obj, buffer):
bufferCon = sqlite3.connect(':memory:')
bufferCon.enable_load_extension(True)
bufferCon.load_extension(LIBSPATIALITE)
foo = bufferCon.execute("select astext(?);", ([obj])).fetchone()
return foo[0]
if obj == None:
return ""
return obj
def writerow(self, row):
self.writer.writerow(['"%s"' % self.convertBuffer(s) for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data.replace('"""','"').replace('"None"',''))
# empty queue
self.queue.truncate(0)
self.stream.flush()
def writerows(self, rows):
for row in rows:
self.writerow(row)
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def upper_repl(match):
if (match.group(1) == None):
return "_"
return "_"+match.group(1)
def clean(str):
out = re.sub(" ([a-z])|[^A-Za-z0-9]+", upper_repl, str)
return out
def cleanWithUnder(str):
out = re.sub("[^a-zA-Z0-9]+", "_", str)
return out
def makeSurePathExists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
originalDir = sys.argv[1]
exportDir = tempfile.mkdtemp()+"/"
finalExportDir = sys.argv[2]+"/"
options = {"Rotation":"90"}
with open(sys.argv[3]) as json_file:
options = json.load(json_file)
importDB = originalDir+"db.sqlite3"
exportDB = exportDir+"shape.sqlite3"
jsondata = json.load(open(originalDir+'module.settings'))
srid = jsondata['srid']
arch16nFiles=[]
for file in glob.glob(originalDir+"*.properties"):
arch16nFiles.append(file)
arch16nFile = next((s for s in arch16nFiles if '.0.' in s), arch16nFiles[0])
# print jsondata
moduleName = clean(jsondata['name'])
fileNameType = "Identifier" #Original, Unchanged, Identifier
images = None
#try:
# foo= json.load(open(sys.argv[3],"r"))
# # print foo["Export Images and Files?"]
# if (foo["Export Images and Files?"] != []):
# images = True
# else:
# images = False
#except:
# sys.stderr.write("Json input failed")
# images = True
#print "Exporting Images %s" % (images)
def zipdir(path, zip):
for root, dirs, files in os.walk(path):
for file in files:
zip.write(os.path.join(root, file))
try:
os.remove(exportDB)
except OSError:
pass
importCon = sqlite3.connect(importDB)
importCon.enable_load_extension(True)
importCon.load_extension(LIBSPATIALITE)
exportCon = sqlite3.connect(exportDB)
exportCon.enable_load_extension(True)
exportCon.load_extension(LIBSPATIALITE)
exportCon.execute("select initSpatialMetaData(1)")
'''
for line in importCon.iterdump():
try:
exportCon.execute(line)
except sqlite3.Error:
pass
'''
exifCon = sqlite3.connect(exportDB)
exifCon.row_factory = dict_factory
exportCon.enable_load_extension(True)
exportCon.load_extension(LIBSPATIALITE)
exportCon.execute("create table keyval (key text, val text);")
f = open(arch16nFile, 'r')
for line in f:
if "=" in line:
keyval = line.replace("\n","").replace("\r","").decode("utf-8").split('=',1)
keyval[0] = '{'+keyval[0]+'}'
exportCon.execute("replace into keyval(key, val) VALUES(?, ?)", keyval)
f.close()
for aenttypeid, aenttypename in importCon.execute("select aenttypeid, aenttypename from aenttype"):
aenttypename = clean(aenttypename)
attributes = ['identifier', 'createdBy', 'createdAtGMT', 'modifiedBy', 'modifiedAtGMT']
for attr in importCon.execute("select distinct attributename from attributekey join idealaent using (attributeid) where aenttypeid = ? group by attributename order by aentcountorder", [aenttypeid]):
attrToInsert = clean(attr[0])
if attrToInsert not in attributes:
attributes.append(attrToInsert)
attribList = " TEXT, \n\t".join(attributes)
createStmt = "Create table if not exists %s (\n\tuuid TEXT PRIMARY KEY,\n\t%s TEXT);" % (aenttypename, attribList)
exportCon.execute(createStmt)
geometryColumns = []
for row in importCon.execute("select aenttypename, geometrytype(geometryn(geospatialcolumn,1)) as geomtype, count(distinct geometrytype(geometryn(geospatialcolumn,1))) from latestnondeletedarchent join aenttype using (aenttypeid) where geomtype is not null group by aenttypename having count(distinct geometrytype(geometryn(geospatialcolumn,1))) = 1"):
geometryColumns.append(row[0])
geocolumn = "select addGeometryColumn('%s', 'geospatialcolumn', %s, '%s', 'XY');" %(clean(row[0]),srid,row[1]);
exportCon.execute(geocolumn)
for aenttypename, uuid, createdAt, createdBy, modifiedAt, modifiedBy,geometry in importCon.execute("select aenttypename, uuid, createdAt || ' GMT', createdBy, datetime(modifiedAt) || ' GMT', modifiedBy, geometryn(transform(geospatialcolumn,casttointeger(%s)),1) from latestnondeletedarchent join aenttype using (aenttypeid) join createdModifiedAtBy using (uuid) order by createdAt" % (srid)):
if (aenttypename in geometryColumns):
insert = "insert into %s (uuid, createdAtGMT, createdBy, modifiedAtGMT, modifiedBy, geospatialcolumn) VALUES(?, ?, ?, ?, ?, ?)" % (clean(aenttypename))
exportCon.execute(insert, [str(uuid), createdAt, createdBy, modifiedAt, modifiedBy, geometry])
else:
insert = "insert into %s (uuid, createdAtGMT, createdBy, modifiedAtGMT, modifiedBy) VALUES(?, ?, ?, ?, ?)" % (clean(aenttypename))
exportCon.execute(insert, [str(uuid), createdAt, createdBy, modifiedAt, modifiedBy])
try:
os.remove(exportDir+'shape.out')
except OSError:
pass
subprocess.call(["bash", "./format.sh", originalDir, exportDir, exportDir])
updateArray = []
for line in codecs.open(exportDir+'shape.out', 'r', encoding='utf-8').readlines():
out = line.replace("\n","").replace("\\r","").split("\t")
#print "!!%s -- %s!!" %(line, out)
if (len(out) ==4):
update = "update %s set %s = ? where uuid = %s;" % (clean(out[1]), clean(out[2]), out[0])
data = (unicode(out[3].replace("\\n","\n").replace("'","''")),)
# print update, data
exportCon.execute(update, data)
exportCon.commit()
files = [exportDir+'shape.sqlite3']
for directory in importCon.execute("select distinct aenttypename, attributename from latestnondeletedaentvalue join attributekey using (attributeid) join latestnondeletedarchent using (uuid) join aenttype using (aenttypeid) where attributeisfile is not null and measure is not null"):
makeSurePathExists("%s/%s/%s" % (exportDir,clean(directory[0]), clean(directory[1])))
filehash = defaultdict(int)
for directory in importCon.execute("select distinct aenttypename, attributename from latestnondeletedaentvalue join attributekey using (attributeid) join latestnondeletedarchent using (uuid) join aenttype using (aenttypeid) where attributeisfile is not null and measure is not null"):
makeSurePathExists("%s/%s/%s" % (exportDir,clean(directory[0]), clean(directory[1])))
filehash = defaultdict(int)
exportPhotos = []
realExportList = {}
#print "* UUIDs exported:"
for filename in importCon.execute("""
select uuid, measure, freetext, certainty, attributename, aenttypename, substr(measure,48) as sortname
from latestnondeletedaentvalue
join attributekey using (attributeid)
join latestnondeletedarchent using (uuid)
join aenttype using (aenttypeid)
join idealaent using (aenttypeid, attributeid)
where attributeisfile is not null and measure is not null
order by sortname;
"""):
try:
oldPath = filename[1].split("/")
oldFilename = oldPath[2]
aenttypename = clean(filename[5])
attributename = clean(filename[4])
newFilename = "%s/%s/%s" % (aenttypename, attributename, oldFilename)
if os.path.isfile(originalDir+filename[1]):
if (fileNameType == "Identifier"):
#print filename[0],
filehash["%s%s" % (filename[0], attributename)] += 1
foo = exportCon.execute("select identifier from %s where uuid = %s" % (aenttypename, filename[0]))
identifier=cleanWithUnder(foo.fetchone()[0])
r= re.search("(\.[^.]*)$",oldFilename)
delimiter = ""
if filename[2]:
delimiter = "a"
newFilename = "%s/%s/%s/%s_%s%s%s" % (aenttypename, attributename, identifier, identifier, filehash["%s%s" % (filename[0], attributename)],delimiter, r.group(0))
makeSurePathExists("%s/%s/%s/%s" % (exportDir, clean(directory[0]), clean(directory[1]), identifier))
exifdata = exifCon.execute("select * from %s where uuid = %s" % (aenttypename, filename[0])).fetchone()
iddata = []
for id in importCon.execute("select coalesce(measure, vocabname, freetext) from latestnondeletedarchentidentifiers where uuid = %s union select aenttypename from latestnondeletedarchent join aenttype using (aenttypeid) where uuid = %s" % (filename[0], filename[0])):
iddata.append(id[0])
shutil.copyfile(originalDir+filename[1], exportDir+newFilename)
mergedata = exifdata.copy()
mergedata.update(jsondata)
mergedata.pop("geospatialcolumn", None)
exifjson = {"SourceFile":exportDir+newFilename,
"UserComment": [json.dumps(mergedata)],
"ImageDescription": exifdata['identifier'],
"XPSubject": "Annotation: %s" % (filename[2]),
"Keywords": iddata,
"Artist": exifdata['createdBy'],
"XPAuthor": exifdata['createdBy'],
"Software": "FAIMS Project",
"ImageID": exifdata['uuid'],
"Copyright": jsondata['name']
}
with open(exportDir+newFilename+".json", "w") as outfile:
json.dump(exifjson, outfile)
if imghdr.what(exportDir+newFilename):
subprocess.call(["exiftool", "-m", "-q", "-sep", "\"; \"", "-overwrite_original", "-j=%s" % (exportDir+newFilename+".json"), exportDir+newFilename])
#exportPhotos.append((clean(aenttypename), attributename, newFilename, filename[0]))
#print " * %s" % (newFilename)
#files.append(exportDir+newFilename+".json")
#files.append(exportDir+newFilename)
else:
print "<b>Unable to find file %s, from uuid: %s" % (originalDir+filename[1], filename[0])
except:
print "<b>Unable to find file (exception thrown) %s, from uuid: %s" % (originalDir+filename[1], filename[0])
exportAttributes = {}
for aenttypename, attributename, newFilename, uuid in exportPhotos:
if aenttypename not in realExportList:
realExportList[aenttypename] = {}
exportAttributes[aenttypename] = attributename
if uuid not in realExportList[aenttypename]:
realExportList[aenttypename][uuid] = []
#realExportList[aenttypename][uuid].append(newFilename)
#print " ",realExportList
for aenttypename in realExportList:
for uuid in realExportList[aenttypename]:
exportCon.execute("update %s set %s = ? where uuid = ?" % (aenttypename, exportAttributes[aenttypename]), (', '.join(realExportList[aenttypename][uuid]), uuid))
exportCon.commit()
# check input flag as to what filename to export
for row in importCon.execute("select aenttypename, geometrytype(geometryn(geospatialcolumn,1)) as geomtype, count(distinct geometrytype(geometryn(geospatialcolumn,1))) from latestnondeletedarchent join aenttype using (aenttypeid) where geomtype is not null group by aenttypename having count(distinct geometrytype(geometryn(geospatialcolumn,1))) = 1"):
cmd = ["spatialite_tool", "-e", "-shp", "%s" % (clean(row[0]).decode("ascii")), "-d", "%sshape.sqlite3" % (exportDir), "-t", "%s" % (clean(row[0])), "-c", "utf-8", "-g", "geospatialcolumn", "-s", | |
are up-to-date with library files (semantic cats, real world size, etc.)
Also updates potentially out-dated functions?? (THIS NEEDS TO BE CLARIFIED / CHANGED)
Optionally, raise an error if the "name" (unique string designating each object) has changed in the bvpLibrary
since scene list creation.
Parameters
----------
RaiseError : bool | False
Whether to raise an error if named scene elemements are missing from library.
Returns
-------
(Nothing - modifies SceneList in place)
"""
Lib = bvp.bvpLibrary()
Fail=False
for iS, S in enumerate(self.ScnList):
# Objects
for iO, O in enumerate(S.Obj):
try:
N = bvp.Object(O.name, Lib)
self.ScnList[iS].Obj[iO].semantic_category = N.semantic_category
self.ScnList[iS].Obj[iO].real_world_size = N.real_world_size
except:
Fail=True
print('Update failed because name has been changed for scene %d object %s!\nNeeds to be manually updated!'%(iS, O.name))
# Background
try:
N = bvp.Background(S.BG.name, Lib)
self.ScnList[iS].BG.semantic_category = N.semantic_category
self.ScnList[iS].BG.real_world_size = N.real_world_size
except:
Fail=True
print('Update failed because name has been changed for scene %d background %s! Needs to be manually updated!'%(iS, S.BG.name))
# Skies
try:
N = bvp.Sky(S.Sky.name, Lib)
self.ScnList[iS].Sky.semantic_category = N.semantic_category
self.ScnList[iS].Sky.real_world_size = N.real_world_size
except:
if S.Sky.name is not None:
Fail=True
print('Update failed because name has been changed for scene %d sky %s! Needs to be manually updated!'%(iS, S.Sky.name))
# Update render options
if hasattr(self.RenderOptions, 'file_format'):
# Assume both need changing:
self.RenderOptions.image_settings = {'file_format':self.RenderOptions.file_format, 'color_mode':self.RenderOptions.color_mode}
# Get rid of old ones
self.RenderOptions.__delattr__('file_format')
self.RenderOptions.__delattr__('color_mode')
if Fail and RaiseError:
raise Exception('One or more objects need manual updating!')
def PlotImagePos(self, ScnIdx=None):
"""Plots image positions of objects in all scenes
"""
if not ScnIdx:
ScnIdx = range(self.nScenes)
for Ct, Scn in enumerate(self.ScnList):
if Ct in ScnIdx:
for O in Scn.Obj:
plt.plot(O.pos2D[0], O.pos2D[1], 'k.', alpha=.5)
# Assumes image size!
plt.xlim((0, 1))
plt.ylim((1, 0))
plt.show()
# def Render(self, RenderType=('Image', ), RenderGroupSize=1, Is_Overwrite=False, memory=7700, nCPUs='2'):
# """
# Renders locally, calling this machine. Blender is closed and re-opened after RenderGroupSize scenes.
# This is an easy way to prevent Blender from
# WARNING: Can go for a LONG time with big render jobs! Only recommended for one scene at a time!
# This function writes three different kinds of temporary files associated with the render job:
# (1) a pickle (saved as .pik) file ("SLpickleFile" variable below)
# (2) a python script to read in and do the rendering ("BlenderPyFile" variable below). Two lines written into this file determine (a) the pickled scene list file to load, and (b) the portion or chunk of the scene list to render for each job
# (3) a shell script that calls blender with each chunk's python script
# """
# ### --- General set-up: --- ###
# B1lender = bvp.Settings['Paths']['BlenderCmd']
# BlendFile = os.path.join(bvp.__path__[0], 'BlendFiles', 'Blank.blend')
# if isinstance(nCPUs, int):
# nCPUs = str(nCPUs)
# # This string should be used to specify (masks, zdepth, contours, etc)
# if 'LogFileAdd' in self.RenderOptions.BVPopts:
# LogAdd = self.RenderOptions.BVPopts['LogFileAdd']
# else:
# LogAdd = '' #self.RenderOptions.BVPopts['LogFileAdd']
# for x in ['Image', 'Clay', 'ObjectMasks', 'Zdepth', 'Contours', 'Normals']:
# self.RenderOptions.BVPopts[x] = x in RenderType
# if x in RenderType:
# LogAdd += '_'+x
# if 'Test' in RenderType:
# if len(RenderType)>2 and not "Image" in RenderType:
# raise Exception("I'm still too stupid to handle test renders of depth, normals, etc")
# elif len(RenderType)==1 and not "Image" in RenderType:
# RenderType = ("Test", "Image")
# # Keep original values to re-set later:
# resPctOrig = copy.copy(self.RenderOptions.resolution_percentage)
# # Set render options for test render
# self.RenderOptions.resolution_percentage = 50
# self.RenderOptions.BVPopts['BasePath'] = self.RenderOptions.BVPopts['BasePath'].replace('Scenes', 'Test')
# self.RenderOptions.BVPopts['Type'] = 'FirstAndLastFrame' #'FirstFrame'
# # Creation of temporary files
# # Pre: set up temp directory
# BaseDir = os.path.dirname(os.path.split(self.RenderOptions.BVPopts['BasePath'])[0])
# if not os.path.exists(BaseDir):
# os.mkdir(BaseDir)
# # -> (write slurm output here too???)
# # -> Make specific date??? No - there should only be one for each stim set, right...?
# if not os.path.exists(os.path.join(BaseDir, 'Log')):
# os.mkdir(os.path.join(BaseDir, 'Log'))
# # (1) Save scene list as a temporary pickle file to be loaded by the RenderFile
# # Save scene list as a temporary pickle file to be loaded by the RenderFile
# rName = 'ScnListRender_%s%s_%s'%(self.Name, LogAdd, time.strftime('%Y%m%d_%H%M%S'))
# SLpickleFile = os.path.join(BaseDir, 'Log', rName+'.pik')
# with open(SLpickleFile, 'wb') as fid:
# pickle.dump(self, fid, protocol=2)
# # (2, 3) Setup:
# BlenderPyFileBase = self.RenderOptions.BVPopts['RenderFile']
# # Get this file into a list:
# with open(BlenderPyFileBase, 'r') as fid:
# RenderScript = fid.readlines()
# # Set up first of two lines to print into temp file:
# FileToLoadLine = "TempFile = '%s'\n"%SLpickleFile #os.path.join(bvp.__path__[0], 'Scripts', 'CurrentRender.pik')
# for x in range(nChunks):
# ScnToRenderLine = 'ScnToRender = range(%d, %d)\n'%(x*RenderGroupSize, min([(x+1)*RenderGroupSize, self.nScenes]))
# InsertLine1 = RenderScript.index('### --- REPLACE 1 --- ###\n')+1
# InsertLine2 = RenderScript.index('### --- REPLACE 2 --- ###\n')+1
# RenderScript[InsertLine1] = FileToLoadLine
# RenderScript[InsertLine2] = ScnToRenderLine
# ChunkfNm = '%s_chunk%03d.py'%(rName, x+1)
# BlenderPyFile = os.path.join(BaseDir, 'Log', ChunkfNm) # Add datestr to "Log" ?
# with open(BlenderPyFile, 'w') as fid:
# fid.writelines(RenderScript)
# # Create & call slurm script for this chunk
# #TempScriptName = os.path.join(BaseDir, 'Log', 'BlenderRenderTmp_chunk%03d.sh'%(x+1))
# #BlenderCmd = Blender+' -b '+BlendFile+' -P '+BlenderPyFile+' --mem '+str(memory) # Specify output? stdout? File?
# BlenderCmd = [Blender, '-b', BlendFile, '-P', BlenderPyFile]
# subprocess.call(BlenderCmd)
def RenderSlurm(self, RenderType=('Image', ), RenderGroupSize=3, Is_Overwrite=False, memory=7700, nCPUs='2'):
"""Calls separate instances of Blender via Slurm queue to render the scene list.
DEPRECATED. Simply calls SceneList.Render(..., Is_Slurm=True). Please use that in the future.
"""
self.Render(RenderType=RenderType, Is_Overwrite=Is_Overwrite, Is_Slurm=True, nCPUs=nCPUs, RenderGroupSize=RenderGroupSize, memory=memory)
def Render(self, RenderType=('Image', ), Is_Overwrite=False, Is_Slurm=False, nCPUs='2', RenderGroupSize=3, memory=7700):
"""Renders the scene list.
Writes three different kinds of temporary files associated with the render job:
(1) a pickle (saved as .pik) file ("SLpickleFile" variable below)
(2) a python script to read in and do the rendering ("BlenderPyFile" variable below). Two lines written into this file determine (a) the pickled scene list file to load, and (b) the portion or chunk of the scene list to render for each job
(3) a shell script for use by sbatch that calls blender with each chunk's python script
Parameters
----------
RenderType : tuple | ('Image', )
A tuple of types of outputs to render for this SceneList. Can contain any of the following:
('Image', 'Clay'*, 'ObjectMasks', 'Zdepth', 'Contours'*, 'Normals')
* Not working yet!
Is_Overwrite : bool | False
Whether to over-write extant files (True) or skip files that are already rendered (False)
nCPUs : str | '2'
Number of cpus to use for render parallelization.
Is_Slurm :
RenderGroupSize : int
Number of scenes to render in a single job. A scene can be an arbitrary number of frames.
For Is_Slurm=False, this is not really useful, since all jobs are done serially. This parameter
will still determines how often Blender closes and re-opens a new instance, which *SHOULD NOT*
have any effect so long as each scene is correctly cleared (but there may still be bugs here))
memory : int
Maximum memory required for the job (in MB). For Is_Slurm=True only.
This is difficult to estimate... Aim high!
TO DO:
Add gpu render option??
"""
"""
For error messages:
-e, --error=<filename pattern>
Instruct SLURM to connect the batch script's standard error directly to the file name specified in the "filename pattern". See the --input option for filename specification options.
"""
### --- General set-up: --- ###
Blender = config.get('path', 'blender') #bvp.Settings['Paths']['BlenderCmd']
BlendFile = os.path.join(bvp.__path__[0], 'BlendFiles', 'Blank.blend')
if isinstance(nCPUs, int):
nCPUs = str(nCPUs)
### --- Set type of render --- ###
# Keep original render options for re-set at end:
BVPoptOrig = copy.copy(self.RenderOptions.BVPopts)
if 'LogFileAdd' in self.RenderOptions.BVPopts:
LogAdd = self.RenderOptions.BVPopts['LogFileAdd']
else:
LogAdd = '' #self.RenderOptions.BVPopts['LogFileAdd']
# TO DO: set available render options in settings / config file
for x in ['Image', 'Clay', 'ObjectMasks', 'Zdepth', 'Contours', 'Normals', 'Voxels']:
self.RenderOptions.BVPopts[x] = x in RenderType
if x in | |
"styleData.iLevel={:d} styleData.istyBuiltIn={:#0{:d}x} ixfe={!s}".format(res['iLevel'].int(), res['istyBuiltIn'].int(), 2 + 2 * res['istyBuiltIn'].size(), flags.summary())
return "styleData={!r} ixfe={!s}".format(res.str(), flags.summary())
@BIFF8.define
class Style(pstruct.type):
type = 659
type = 0x293
class _ixfe(pbinary.flags):
_fields_ = R([
(12, 'ixfe'),
(3, 'unused'),
(1, 'fBuiltIn'),
])
_fields_ = [
(_ixfe, 'ixfe'),
(lambda self: BuiltInStyle if self['ixfe'].li['fBuiltIn'] else undefined, 'builtInData'),
(lambda self: XLUnicodeString if not self['ixfe'].li['fBuiltIn'] else undefined, 'user')
]
@BIFF5.define
@BIFF8.define
class StyleExt(pstruct.type):
type = 2194
type = 0x892
class _flags(pbinary.flags):
_fields_ = R([
(1, 'fBuiltIn'),
(1, 'fHidden'),
(1, 'fCustom'),
(5, 'reserved'),
])
class _iCategory(pint.enum, ubyte1):
_values_ = [
('custom', 0x00),
('good-bad-neutral', 0x00),
('data-model', 0x00),
('title-heading', 0x00),
('themed', 0x00),
('number-format', 0x00),
]
_fields_ = [
(FrtHeader, 'frtHeader'),
(_flags, 'flags'),
(_iCategory, 'iCategory'),
(BuiltInStyle,'builtInData'),
(LPWideString, 'stName'),
(XFProps, 'xfProps'),
]
class FullColorExt(pstruct.type):
class Unknown(uint4): pass
class IcvXF(uint4): pass
def __xclrValue(self):
t = self['xclrType'].li.int()
if t == 1:
return self.IcvXF
if t == 2:
return LongRGBA
if t == 3:
return ColorTheme
return self.Unknown
_fields_ = [
(XColorType, 'xclrType'),
(sint2, 'nTintShade'),
(__xclrValue, 'xclrValue'),
(pint.uint64_t, 'unused'),
]
class ColorTheme(pint.enum, uint4):
_values_ = [
('Dark 1', 0x00000000),
('Light 1', 0x00000001),
('Dark 2', 0x00000002),
('Light 2', 0x00000003),
('Accent 1', 0x00000004),
('Accent 2', 0x00000005),
('Accent 3', 0x00000006),
('Accent 4', 0x00000007),
('Accent 5', 0x00000008),
('Accent 6', 0x00000009),
('Hyperlink', 0x0000000A),
('Followed hyperlink', 0x0000000B),
]
class XFPropGradient(pstruct.type):
class type(pint.enum, uint4):
_values_ = [ ('linear', 0), ('rectangular', 1) ]
_fields_ = [
(type, 'type'),
(Xnum, 'numDegree'),
(Xnum, 'numFillToLeft'),
(Xnum, 'numFillToRight'),
(Xnum, 'numFillToTop'),
(Xnum, 'numFillToBottom'),
]
class GradStop(pstruct.type):
class Unknown(uint4): pass
class IcvXF(uint4): pass
def __xclrValue(self):
t = self['xclrType'].li.int()
if t == 1:
return self.IcvXF
if t == 2:
return LongRGBA
if t == 3:
return ColorTheme
return self.Unknown
_fields_ = [
(XColorType, 'xclrType'),
(__xclrValue, 'xclrValue'),
(Xnum, 'numPosition'),
]
class XFExtGradient(pstruct.type):
_fields_ = [
(XFPropGradient, 'gradient'),
(uint4, 'cGradSTops'),
(lambda self: dyn.array(GradStop, self['cGradStops'].li.int()), 'rgGradStops'),
]
class ExtPropType(ptype.definition):
attribute, cache = 'extType', {}
class ET_Unknown(ptype.block):
def classname(self):
res = getattr(self, ExtPropType.attribute, None)
return self.typename() if res is None else "{:s}<{:04x}>".format(self.typename(), res)
default = ET_Unknown
@ExtPropType.define
class ET_Foreground_Color(FullColorExt):
extType = 0x0004
@ExtPropType.define
class ET_Background_Color(FullColorExt):
extType = 0x0005
@ExtPropType.define
class ET_GradientFill(XFExtGradient):
extType = 0x0006
@ExtPropType.define
class ET_TopBorderColor(FullColorExt):
extType = 0x0007
@ExtPropType.define
class ET_BottomBorderColor(FullColorExt):
extType = 0x0008
@ExtPropType.define
class ET_LeftBorderColor(FullColorExt):
extType = 0x0009
@ExtPropType.define
class ET_RightBorderColor(FullColorExt):
extType = 0x000a
@ExtPropType.define
class ET_DiagonalBorderColor(FullColorExt):
extType = 0x000b
@ExtPropType.define
class ET_TextColor(FullColorExt):
extType = 0x000d
@ExtPropType.define
class ET_FontScheme(FontScheme):
extType = 0x000e
@ExtPropType.define
class ET_TextIndentation(uint2):
extType = 0x000f
class ExtProp(pstruct.type):
class _extType(pint.enum, uint2):
_values_ = [
('interior-fg-color', 0x0004),
('interior-bg-color', 0x0005),
('interior-igradient', 0x0006),
('top-border-color', 0x0007),
('bottom-border-color', 0x0008),
('left-border-color', 0x0009),
('right-border-color', 0x000a),
('diagonal-border-color', 0x000b),
('text-color', 0x000d),
('font-scheme', 0x000e),
('text-indent', 0x000f),
]
def __extPropData(self):
res = self['extType'].li.int()
cb = self['cb'].li.int() - (2 + 2)
# FIXME
return ExtPropType.get(res, blocksize=lambda _, size=cb: size)
_fields_ = [
(_extType, 'extType'),
(uint2, 'cb'),
(__extPropData, 'extPropData'),
]
@BIFF5.define
@BIFF8.define
class XFCFC(pstruct.type):
type = 0x87c
type = 2172
_fields_ = [
(FrtHeader, 'frtHeader'),
(uint2, 'reserved'),
(uint2, 'cxfs'),
(uint4, 'crc'),
]
@BIFF5.define
@BIFF8.define
class XFExt(pstruct.type):
type = 0x87d
type = 2173
_fields_ = [
(FrtHeader, 'frtHeader'),
(uint2, 'reserved1'),
(XFIndex, 'ixfe'),
(uint2, 'reserved2'),
(uint2, 'cexts'),
(lambda self: dyn.array(ExtProp, self['cexts'].li.int()), 'rgExt'),
]
@BIFF5.define
class Format(pstruct.type):
type = 0x41e
type = 1054
def __stFormat(self):
res = self['cch'].li
return dyn.clone(pstr.string, length=res.int())
_fields_ = [
(uint2, 'ifmt'),
(ubyte1, 'cch'),
(__stFormat, 'stFormat'),
]
def summary(self):
return "ifmt={:#0{:d}x} stFormat={:s}".format(self['ifmt'].int(), 2 + 2 * self['ifmt'].size(), self['stFormat'].summary())
@BIFF8.define
class Format(pstruct.type):
type = 0x41e
type = 1054
def __stFormat(self):
p = self.getparent(type=RecordGeneralBase)
length = p['header']['length'].int() - 2
return dyn.block(length)
_fields_ = [
(uint2, 'ifmt'),
#(XLUnicodeString, 'stFormat'), # FIXME: is the specification wrong here?
(__stFormat, 'stFormat'),
]
@BIFF5.define
@BIFF8.define
class SerAuxErrBar(pstruct.type):
type = 4187
type = 0x105b
class sertm(pint.enum, ubyte1):
_values_ = [
('horizontal+', 1),
('horizontal-', 2),
('vertical+', 3),
('vertical-', 4),
]
class ebsrc(pint.enum, ubyte1):
_values_ = [
('percentage', 1),
('fixed', 2),
('standard', 3),
('custom', 4),
('error', 5),
]
class fTeeTop(Boolean, ubyte1): pass
_fields_ = [
(sertm, 'sertm'),
(ebsrc, 'ebsrc'),
(fTeeTop, 'fTeeTop'),
(ubyte1, 'reserved'),
(Xnum, 'numValue'),
(uint2, 'cnum'),
]
class SharedFeatureType(pint.enum, uint2):
_values_ = [
('ISFPROTECTION', 0x2),
('ISFFEC2', 0x3),
('ISFFACTOID', 0x4),
('ISFLIST', 0x5),
]
class Ref8U(pstruct.type):
_fields_ = [
(RwU, 'rwFirst'),
(RwU, 'rwLast'),
(ColU, 'colFirst'),
(ColU, 'colLast'),
]
class SqRefU(pstruct.type):
_fields_ = [
(uint2, 'cref'),
(lambda self: dyn.array(Ref8U, self['cref'].li.int()), 'rgrefs'),
]
class SDContainer(pstruct.type):
_fields_ = [
(uint4, 'cbSD'), # GUARD: >20
(lambda self: dyn.block(self['cbSD'].li.int()), 'sd'),
]
class FeatProtection(pstruct.type):
_fields_ = [
(ubyte1, 'fSD'),
(uint4, 'wPassword'),
(XLUnicodeString, 'stTitle'),
(SDContainer, 'sdContainer'),
]
class FFErrorCheck(pbinary.flags):
_fields_ = R([
(1, 'ffecCalcError'),
(1, 'ffecEmptyCellRef'),
(1, 'ffecNumStoredAsText'),
(1, 'ffecInconsistRange'),
(1, 'ffecInconsistFmla'),
(1, 'ffecTextDateInsuff'),
(1, 'ffecUnprotFmla'),
(1, 'ffecDateValidation'),
(24, 'reserved'),
])
class FeatFormulaErr2(FFErrorCheck): pass
class Property(pstruct.type):
_fields_ = [(uint4,'keyIndex'),(uint4,'valueIndex')]
class PropertyBag(pstruct.type):
_fields_ = [
(uint2, 'id'),
(uint2, 'cProp'),
(uint2, 'cbUnknown'),
(lambda self: dyn.array(Property, self['cProp'].li.int()), 'properties'),
]
class FactoidData(pstruct.type):
class _flags(pbinary.flags):
_fields_ = R([(1,'fDelete'),(1,'fXMLBased'),(6,'reserved')])
_fields_ = [
(_flags, 'flags'),
(PropertyBag, 'propertyBag'),
]
class FeatSmartTag(pstruct.type):
_fields_ = [
(uint4, 'hashValue'),
(ubyte1, 'cSmartTags'),
(lambda self: dyn.array(FactoidData, self['cSmartTags'].li.int()), 'rgFactoid'),
]
@BIFF5.define
@BIFF8.define
class Feat(pstruct.type):
type = 0x868
type = 2152
def __rgbFeat(self):
isf = self['isf'].l
if isf['ISFPROTECTION']:
return FeatProtection
elif isf['ISFFEC2']:
return FeatFormulaErr2
elif isf['ISFFACTOID']:
return FeatSmartTag
return undefined
_fields_ =[
(FrtHeader, 'frtHeader'),
(SharedFeatureType, 'isf'),
(ubyte1, 'reserved1'),
(uint4, 'reserved2'),
(uint2, 'cref'),
(uint4, 'cbFeatData'),
(uint2, 'reserved3'),
(lambda self: dyn.array(Reg8U, self['cref'].li.int()), 'refs'),
(__rgbFeat, 'rgbFeat'),
]
class EnhancedProtection(pbinary.flags):
_fields_ = R([
(1, 'iprotObjects'),
(1, 'iprotScenarios'),
(1, 'iprotFormatCells'),
(1, 'iprotFormatColumns'),
(1, 'iprotFormatRows'),
(1, 'iprotInsertColumns'),
(1, 'iprotInsertRows'),
(1, 'iprotInsertHyperlinks'),
(1, 'iprotDeleteColumns'),
(1, 'iprotDeleteRows'),
(1, 'iprotSelLockedCells'),
(1, 'iprotSort'),
(1, 'iprotAutoFilter'),
(1, 'iprotPivotTables'),
(1, 'iprotSelUnlockedCells'),
(17, 'reserved'),
])
@BIFF5.define
@BIFF8.define
class FeatHdr(pstruct.type):
type = 2151
type = 0x867
def __rgbHdrData(self):
isf = self['isf'].l
if self['cbHdrData'].li.int() == 0:
return undefined
if isf['ISFPROTECTION']:
return EnhancedProtection
elif isf['ISFFEC2']:
return undefined
raise NotImplementedError(isf)
_fields_ = [
(FrtHeader, 'frtHeader'),
(SharedFeatureType, 'isf'),
(ubyte1, 'reserved'),
(uint4, 'cbHdrData'),
(__rgbHdrData, 'rgbHdrData'),
]
@BIFF5.define
@BIFF8.define
class FeatHdr11(pstruct.type):
type = 2161
type = 0x871
_fields_ = [
(FrtHeader, 'frtHeader'),
(SharedFeatureType, 'isf'), # GUARD: ISFLIST
(ubyte1, 'reserved1'),
(uint4, 'reserved2'),
(uint4, 'reserved3'),
(uint4, 'idListNext'),
(uint2, 'reserved4'),
]
class FrtRefHeaderU(pstruct.type):
_fields_ = [
(uint2, 'rt'),
(FrtFlags, 'grbitFrt'),
(Ref8U, 'ref8'),
]
@BIFF5.define
@BIFF8.define
class ContinueFrt(pstruct.type):
type = 0x812
type = 2066
def __rgb(self):
try:
p = self.getparent(RecordGeneralBase)
cb = p['header'].li.Length()
except (ptypes.error.ItemNotFoundError, ptypes.error.InitializationError):
return dyn.block(0)
return dyn.block(max(0, cb - self['frtHeaderOld'].li.size()))
_fields_ = [
(FrtHeaderOld, 'frtHeaderOld'),
(__rgb, 'rgb'),
]
class SourceType(pint.enum, uint4):
_values_ = [
('LTRANGE', 0),
('LTSHAREPOINT', 1),
('LTXML', 2),
('LTEXTERNALDATA', 3),
]
class LEMMode(pint.enum, uint4):
_values_ = [
('LEMNORMAL', 0x00000000),
('LEMREFRESHCOPY', 0x00000001),
('LEMREFRESHCACHE', 0x00000002),
('LEMREFRESHCACHEUNDO', 0x00000003),
('LEMREFRESHLOADED', 0x00000004),
('LEMREFRESHTEMPLATE', 0x00000005),
('LEMREFRESHREFRESH', 0x00000006),
('LEMNOINSROWSSPREQUIRED', 0x00000007),
('LEMNOINSROWSSPDOCLIB', 0x00000008),
('LEMREFRESHLOADDISCARDED', 0x00000009),
('LEMREFRESHLOADHASHVALIDATION', 0x0000000A),
('LEMNOEDITSPMODVIEW', 0x0000000B),
]
class XFExtNoFRT(pstruct.type):
_fields_ = [
(uint2, 'reserved1'),
(uint2, 'reserved2'),
(uint2, 'reserved3'),
(uint2, 'cexts'),
(lambda self: dyn.array(ExtProp, self['cexts'].li.int()), 'rgExt'),
]
@BIFF5.define
@BIFF8.define
class DXF(pstruct.type):
type = 2189
type = 0x88d
class _flags(pbinary.flags):
_fields_ = R([
(1, 'unused'),
(1, 'fNewBorder'),
(1, 'unused2'),
(13, 'reserved'),
])
_fields_ = [
(FrtHeader, 'frtHeader'),
(_flags, 'flags'),
(XFProps, 'xfprops'),
]
# DXFN
class DXFNumIfmt(IFmt): pass
class DXFNumUsr(pstruct.type):
_fields_ = [
(ubyte1, 'cb'),
(XLUnicodeString, 'fmt'), # FIXME: should this be bound by cb?
]
class DXFFntD(pstruct.type):
def __stFontName(self):
cch = self['cchFont'].li.int()
# FIXME
return dyn.clone(XLUnicodeStringNoCch, blocksize=lambda _, size=cch: size)
_fields_ = [
(ubyte1, 'cchFont'),
(__stFontName, 'stFontName'),
(lambda self: dyn.block(max(0, 63 - self['cchFont'].li.int())), 'unused1'),
(Stxp, 'stxp'),
(uint4, 'icvFore'),
(uint4, 'reserved'),
(Ts, 'tsNinch'),
(uint4, 'fSssNinch'),
(uint4, 'fUlsNinch'),
(uint4, 'fBlsNinch'),
(uint4, 'unused2'),
(sint4, 'ich'),
(uint4, 'cch'),
(uint2, 'iFnt'),
]
class DXFALC(pstruct.type):
class _flags(pbinary.struct):
_fields_ = R([
(HorizAlign, 'alc'),
(1, 'fWrap'),
(VertAlign, 'alcv'),
(1, 'fJustLast'),
(8, 'trot'),
(4, 'cIndent'),
(1, 'fShrinkToFit'),
(1, 'fMergeCell'),
(ReadingOrder, 'iReadingOrder'),
(8, 'unused'),
])
_fields_ = [
(_flags, 'flags'),
(sint2, 'iIndent'),
]
class DXFBdr(pbinary.struct):
_fields_ = R([
(BorderStyle, 'dgLeft'),
(BorderStyle, 'dgRight'),
(BorderStyle, 'dgTop'),
(BorderStyle, 'dgBottom'),
(7, 'icvLeft'),
(7, 'icvRight'),
(1, 'bitDiagDown'),
(1, 'bitDiagUp'),
(7, 'icvTop'),
(7, 'icvBottom'),
(7, 'icvDiag'),
(4, 'dgDiag'),
(7, 'unused'),
])
class DXFPat(pbinary.struct):
_fields_ = R([
(10, 'unused1'),
(FillPattern, 'fls'),
(7, 'icvForeground'),
(7, 'icvBackground'),
(2, 'unused2'),
])
class DXFProt(pbinary.struct):
_fields_ = R([
(1, 'fLocked'),
(1, 'fHidden'),
(14, 'reserved'),
])
class DXFN(pstruct.type):
class _flags(pbinary.flags):
_fields_ = R([
(1, 'alchNinch'), (1, 'alcvNinch'), (1, 'wrapNinch'),
(1, 'trotNinch'), (1, 'kintoNinch'), (1, 'cIndentNinch'),
(1, 'fShrinkNinch'), (1, 'fMergeCellNinch'), (1, 'lockedNinch'),
(1, 'hiddenNinch'), (1, 'glLeftNinch'), (1, 'glRightNinch'),
(1, 'glTopNinch'), (1, 'glBottomNinch'), (1, 'glDiagDownNinch'),
(1, 'glDiagUpNinch'), (1, 'flsNinch'), (1, 'icvFNinch'),
(1, 'icvBNinch'), (1, 'ifmtNinch'), (1, 'fIfntNinch'),
(1, 'unused1'), (1, 'reserved1'), (1, 'ibitAtrNum'),
(1, 'ibitAtrFnt'), (1, 'ibitAtrAlc'), (1, 'ibitAtrBdr'),
(1, 'ibitAtrPat'), (1, 'ibitAtrProt'), (1, 'iReadingOrderNinch'),
(1, 'fIfmtUser'), (1, 'unused2'), (1, 'fNewBorder'),
(1, 'fZeroInited'),
])
def __dxfnum(self):
f = self['flags'].li
if f['ibitAtrNum']:
return DXFNumUsr if f['fIfmtUser'] else DXFNumIfmt
return undefined
hasFlag = lambda t, field: lambda | |
#! /usr/bin/env python3
"""
WinCE Decompressor: Decompress compressed files from Windows CE ROMs.
"""
import argparse
import ctypes
import io
import os
class UnsupportedWindowSizeRange(Exception):
"""
Exception to deal with window sizes out of range.
"""
def __init__(self):
super().__init__()
class LZXConstants(object):
"""
A class to hold constants relating to LZX compression/decompression.
"""
PRETREE_NUM_ELEMENTS = 20
SECONDARY_NUM_ELEMENTS = 249
ALIGNED_NUM_ELEMENTS = 8
NUM_PRIMARY_LENGTHS = 7
NUM_CHARS = 256
MIN_MATCH = 2
MAX_MATCH = 257
NUM_REPEATED_OFFSETS = 3
MAX_GROWTH = 6144
E8_DISABLE_THRESHOLD = 32768
class BlockTypeEnum(object):
"""
An enum type for the different types of blocks in LZX.
"""
def __init__(self, value):
self.value = value
def __eq__(self, other):
if not isinstance(other, LZXConstants.BlockTypeEnum):
return False
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.value)
BLOCKTYPE_INVALID = BlockTypeEnum(0)
BLOCKTYPE_VERBATIM = BlockTypeEnum(1)
BLOCKTYPE_ALIGNED = BlockTypeEnum(2)
BLOCKTYPE_UNCOMPRESSED = BlockTypeEnum(3)
PRETREE_MAXSYMBOLS = PRETREE_NUM_ELEMENTS
PRETREE_TABLEBITS = 6
PRETREE_MAX_CODEWORD = 16
MAINTREE_MAXSYMBOLS = NUM_CHARS + (51 << 3)
MAINTREE_TABLEBITS = 11
MAINTREE_MAX_CODEWORD = 16
LENTREE_MAXSYMBOLS = SECONDARY_NUM_ELEMENTS
LENTREE_TABLEBITS = 10
LENTREE_MAX_CODEWORD = 16
ALIGNTREE_MAXSYMBOLS = ALIGNED_NUM_ELEMENTS
ALIGNTREE_TABLEBITS = 7
ALIGNTREE_MAX_CODEWORD = 8
LENTABLE_SAFETY = 64
position_slots = [30, 32, 34, 36, 38, 42, 50, 66, 98, 162, 290]
extra_bits = \
[
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17
]
position_base = \
[
0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512,
768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576, 32768,
49152, 65536, 98304, 131072, 196608, 262144, 393216, 524288, 655360,
786432, 917504, 1048576, 1179648, 1310720, 1441792, 1572864, 1703936,
1835008, 1966080, 2097152
]
class LZXState(object):
"""
Holds the current state of LZX decompression.
"""
def __init__(self, window):
if window < 15 or window > 21:
raise UnsupportedWindowSizeRange()
self.R0 = 1
self.R1 = 1
self.R2 = 1
self.main_elements = LZXConstants.NUM_CHARS + (LZXConstants.position_slots[window - 15] << 3)
self.header_read = False
self.block_type = LZXConstants.BLOCKTYPE_INVALID
self.block_length = 0
self.block_remaining = 0
self.frames_read = 0
self.intel_filesize = 0
self.intel_curpos = 0
self.intel_started = False
self.pretree_table = [0] * ((1 << LZXConstants.PRETREE_TABLEBITS) + (LZXConstants.PRETREE_MAXSYMBOLS << 1))
self.pretree_len = [0] * (LZXConstants.PRETREE_MAXSYMBOLS + LZXConstants.LENTABLE_SAFETY)
self.maintree_table = [0] * ((1 << LZXConstants.MAINTREE_TABLEBITS) + (LZXConstants.MAINTREE_MAXSYMBOLS << 1))
self.maintree_len = [0] * (LZXConstants.MAINTREE_MAXSYMBOLS + LZXConstants.LENTABLE_SAFETY)
self.lentree_table = [0] * ((1 << LZXConstants.LENTREE_TABLEBITS) + (LZXConstants.LENTREE_MAXSYMBOLS << 1))
self.lentree_len = [0] * (LZXConstants.LENTREE_MAXSYMBOLS + LZXConstants.LENTABLE_SAFETY)
self.aligntree_table = [0] * (
(1 << LZXConstants.ALIGNTREE_TABLEBITS) + (LZXConstants.ALIGNTREE_MAXSYMBOLS << 1))
self.aligntree_len = [0] * (LZXConstants.ALIGNTREE_MAXSYMBOLS + LZXConstants.LENTABLE_SAFETY)
self.window_size = 1 << (window & 0x1f)
self.actual_size = self.window_size
self.window = bytearray(b'\xDC') * self.window_size
self.window_posn = 0
class LZXDecoder(object):
def __init__(self, window):
self.state = LZXState(window)
def decompress(self, in_f, in_len, out_f, out_len):
"""
Decompresses an input file.
:param in_f: Compressed input file
:param in_len: Length of compressed input file
:param out_f: Decompressed output file
:param out_len: Length of decompressed output file
:return: Status of function
"""
bit_buf = LZXDecoder.BitBuffer(in_f)
start_pos = in_f.tell()
end_pos = start_pos + in_len
togo = out_len
'''
The header consists of either a zero bit indicating no encoder preprocessing, or a one bit followed by a
file translation size, a value which is used in encoder preprocessing.
'''
if not self.state.header_read:
intel = bit_buf.read_bits(1)
if intel == 1:
i = bit_buf.read_bits(16)
j = bit_buf.read_bits(16)
self.state.intel_filesize = (i << 16) | j
self.state.header_read = True
while togo > 0:
if self.state.block_remaining == 0:
if self.state.block_type == LZXConstants.BLOCKTYPE_UNCOMPRESSED:
if (self.state.block_length & 1) == 1:
in_f.seek(1, os.SEEK_CUR)
self.state.block_type = LZXConstants.BLOCKTYPE_INVALID
bit_buf.reset()
self.state.block_type = LZXConstants.BlockTypeEnum(bit_buf.read_bits(3))
self.state.block_length = bit_buf.read_bits(24)
self.state.block_remaining = self.state.block_length
if self.state.block_type == LZXConstants.BLOCKTYPE_ALIGNED:
for i in range(0, 8):
self.state.aligntree_len[i] = bit_buf.read_bits(3)
self.__make_decode_table(LZXConstants.ALIGNTREE_MAXSYMBOLS, LZXConstants.ALIGNTREE_TABLEBITS,
self.state.aligntree_len, self.state.aligntree_table)
if self.state.block_type == LZXConstants.BLOCKTYPE_VERBATIM or \
self.state.block_type == LZXConstants.BLOCKTYPE_ALIGNED:
self.__read_lengths(self.state.maintree_len, 0, 256, bit_buf)
self.__read_lengths(self.state.maintree_len, 256, self.state.main_elements, bit_buf)
LZXDecoder.__make_decode_table(LZXConstants.MAINTREE_MAXSYMBOLS, LZXConstants.MAINTREE_TABLEBITS,
self.state.maintree_len, self.state.maintree_table)
if self.state.maintree_len[0xE8] != 0:
self.state.intel_started = True
self.__read_lengths(self.state.lentree_len, 0, LZXConstants.SECONDARY_NUM_ELEMENTS, bit_buf)
LZXDecoder.__make_decode_table(LZXConstants.LENTREE_MAXSYMBOLS, LZXConstants.LENTREE_TABLEBITS,
self.state.lentree_len, self.state.lentree_table)
elif self.state.block_type == LZXConstants.BLOCKTYPE_UNCOMPRESSED:
if end_pos <= in_f.tell() + 4:
return -1
self.state.intel_started = True
bit_buf.ensure_bits(16)
if bit_buf.bits_left > 16:
in_f.seek(-2, os.SEEK_CUR)
self.state.R0 = int.from_bytes(in_f.read(4), byteorder='little')
self.state.R1 = int.from_bytes(in_f.read(4), byteorder='little')
self.state.R2 = int.from_bytes(in_f.read(4), byteorder='little')
else:
return -1
if in_f.tell() > start_pos + in_len:
if in_f.tell() > start_pos + in_len + 2 or bit_buf.bits_left < 16:
return -1
togo -= self.state.block_remaining if self.state.block_remaining > togo else togo
self.state.window_posn &= self.state.window_size - 1
if self.state.window_posn + self.state.block_remaining > self.state.window_size:
return -1
if self.state.block_type == LZXConstants.BLOCKTYPE_VERBATIM or \
self.state.block_type == LZXConstants.BLOCKTYPE_ALIGNED:
# Block Type: Verbatim or Aligned
self.__decompress_block(bit_buf)
elif self.state.block_type == LZXConstants.BLOCKTYPE_UNCOMPRESSED:
# Block Type: Uncompressed
if in_f.tell() >= end_pos:
return -1
self.__decompress_uncompress(in_f)
else:
return -1
if togo != 0:
return -1
start_window_pos = self.state.window_size if self.state.window_posn == 0 else self.state.window_posn
start_window_pos -= out_len
out_f.write(memoryview(self.state.window)[start_window_pos:start_window_pos + out_len])
'''
The encoder may optionally perform a preprocessing stage on all input blocks which improves compression on
Intel x86 code. The preprocessing translates x86 CALL instructions to use absolute offsets instead of
relative offsets. Therefore must be translated back to relative offsets after decompression.
'''
self.undo_e8_preprocessing(out_len, out_f)
return 0
def undo_e8_preprocessing(self, out_len, out_f):
"""
Translates x86 CALL instruction offsets from absolute to relative.
:param out_len: Output file length
:param out_f: Output file
:return: None
"""
if out_len >= 10 and self.state.intel_started:
out_f.seek(0)
i = 0
'''
E8 preprocessing does not appear to be disabled after the 32768th chunk of a XIP compressed file,
which is another difference from the LZX compression used in cabinet files.
'''
while i < out_len - 10:
byte = int.from_bytes(out_f.read(1), byteorder='little')
if byte == 0xE8:
absolute_offset = int.from_bytes(out_f.read(4), byteorder='little', signed=True)
'''Values in the range of -2^31 to i and intel_filesize to +2^31 are left unchanged.'''
if -i <= absolute_offset < self.state.intel_filesize:
absolute_offset += -i if absolute_offset >= 0 else self.state.intel_filesize
out_f.seek(-4, os.SEEK_CUR)
out_f.write(absolute_offset.to_bytes(4, byteorder='little', signed=True))
i += 4
i += 1
def __read_lengths(self, lens, first, last, bit_buf):
"""
Reads the pretree from the input, then uses the pretree to decode lens length values from the input.
:param lens: Decode table length
:param first: first index of the given length table
:param last: last index of the given length table
:param bit_buf: Input bitstream
:return: None
"""
for x in range(0, 20):
self.state.pretree_len[x] = bit_buf.read_bits(4)
LZXDecoder.__make_decode_table(LZXConstants.PRETREE_MAXSYMBOLS, LZXConstants.PRETREE_TABLEBITS,
self.state.pretree_len, self.state.pretree_table)
x = first
while x < last:
z = self.__read_huff_sym_pretree(bit_buf)
if z == 17:
y = bit_buf.read_bits(4) + 4
for _ in range(y):
lens[x] = 0
x += 1
elif z == 18:
y = bit_buf.read_bits(5) + 20
for _ in range(y):
lens[x] = 0
x += 1
elif z == 19:
y = bit_buf.read_bits(1) + 4
z = self.__read_huff_sym_pretree(bit_buf)
z = (lens[x] + 17 - z) % 17
for _ in range(y):
lens[x] = z
x += 1
else:
z = (lens[x] + 17 - z) % 17
lens[x] = z
x += 1
@staticmethod
def __read_huff_sym(table, lengths, nsyms, nbits, bit_buf, codeword):
"""
Reads and returns the next Huffman-encoded symbol from a bitstream.
:param table: Decode table
:param lengths: Decode table length
:param nsyms: Decode table's max symbols
:param nbits: Decode table bit length
:param bit_buf: Input bitstream
:param codeword: Codeword length
:return: Huffman-encoded symbol
"""
bit_buf.ensure_bits(codeword)
i = table[bit_buf.peek_bits(nbits)]
if i >= nsyms:
j = 1 << (LZXDecoder.BitBuffer.buffer_num_bits - nbits)
while True:
j >>= 1
i <<= 1
i |= 1 if (bit_buf.buffer.value & j) != 0 else 0
if j == 0:
return 0
i = table[i]
if i < nsyms:
break
j = lengths[i]
bit_buf.remove_bits(j)
return i
def __read_huff_sym_pretree(self, bit_buf):
"""
Reads and returns the next Huffman-encoded symbol from a bitstream using the PreTree.
:param bit_buf: Input bitstream
:return: Huffman-encoded symbol
"""
return self.__read_huff_sym(self.state.pretree_table, self.state.pretree_len,
LZXConstants.PRETREE_MAXSYMBOLS, LZXConstants.PRETREE_TABLEBITS, bit_buf,
LZXConstants.PRETREE_MAX_CODEWORD)
def __read_huff_sym_maintree(self, bit_buf):
"""
Reads and returns the next Huffman-encoded | |
<gh_stars>1-10
import numpy as np
import torch
import pysc2
from pysc2.agents import base_agent
from pysc2.env import sc2_env
from pysc2.lib import actions, features, units
from pysc2 import maps, lib
from s2clientprotocol import sc2api_pb2 as sc_pb
from sc2env.pysc2_util import register_map
from sc2env.utility import getOneHotState
from copy import copy, deepcopy
import os
import sys
import random
SCREEN_SIZE = 40
MAP_NAME = 'TugOfWar-2-Lane-Self-Play-No-FIFO'
UNIT_TYPES = {
'SCV': 45,
'Marine': 48,
'Baneling': 9,
'Immortal': 83
}
action_to_ability_id = {
0: 146, # Effect Marine T
1: 148, # Effect Baneling T
2: 150, # Effect Immortal T
3: 156, # Effect Marine B
4: 158, # Effect Baneling B
5: 160, # Effect Immortal B
6: 152, # Effect Pylon
'switch_player': 154,
}
action_to_name = {
0: "Effect Marine T",
1: "Effect Baneling T",
2: "Effect Immortal T",
3: "Effect Marine B",
4: "Effect Baneling B",
5: "Effect Immortal B",
6: "Effect Pylon",
7: "no_op",
}
unit_types_player1 = {
21 : 1, #'Barracks'
28 : 2, # 'Starport'
70 : 3, # 'RoboticsFacility'
60 : 7, # 'Pylon'
59 : 63, # 'Nexus'
48 : 15, # 'Marine'
9 : 16, # 'Baneling'
83 : 17 # 'Immortal'
}
unit_types_player2 = {
21 : 8, #'Barracks'
28 : 9, # 'Starport'
70 : 10, # 'RoboticsFacility'
60 : 14, # 'Pylon'
59 : 65, # 'Nexus'
48 : 39, # 'Marine'
9 : 40, # 'Baneling'
83 : 41 # 'Immortal'
}
reward_dict = {
1: "damge_to_player1_top_2",
2: "damge_to_player1_bottom_2",
101: "damge_to_player2_top_1",
102: "damge_to_player2_bottom_1",
3: "player1_wins_1",
103: "player2_wins_2"
}
maker_cost = {
'Marine T' : 50,
'Baneling T' : 75,
'Immortal T' : 200,
'Marine B' : 50,
'Baneling B' : 75,
'Immortal B' : 200,
}
action_component_names = {
0: 'Marine',
1: 'Baneling',
2: 'Immortal',
3: 'Pylon'
}
class TugOfWar():
def __init__(self, map_name = None, unit_type = [], generate_xai_replay = False, xai_replay_dimension = 256, verbose = False):
if map_name is None:
map_name = MAP_NAME
maps_dir = os.path.join(os.path.dirname(__file__), '..', 'maps')
print("map director: " + str(maps_dir))
register_map(maps_dir, map_name)
if generate_xai_replay:
aif=features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(screen=SCREEN_SIZE, minimap=SCREEN_SIZE),
rgb_dimensions=sc2_env.Dimensions(
screen=(1.5*xai_replay_dimension, xai_replay_dimension),
minimap=(64, 64),
),
action_space=actions.ActionSpace.FEATURES,
camera_width_world_units = 28,
#use_camera_position = True,
)
step_mul_value = 4
# step_mul_value = 16
else:
aif=features.AgentInterfaceFormat(
feature_dimensions = features.Dimensions(screen = SCREEN_SIZE, minimap = SCREEN_SIZE),
action_space = actions.ActionSpace.FEATURES,
camera_width_world_units = 100,
)
step_mul_value = 16
np.set_printoptions(threshold=sys.maxsize,linewidth=sys.maxsize, precision = 2)
self.sc2_env = sc2_env.SC2Env(
map_name = map_name,
agent_interface_format = aif,
step_mul = step_mul_value,
game_steps_per_episode = 0,
score_index = 0,
visualize = True,)
self.current_obs = None
self.decomposed_rewards = []
self.verbose = verbose
self.miner_index = 0
self.reset_steps = -1
self.mineral_limiation = 1500
self.norm_vector = np.array([1500, # Player 1 unspent minerals
30, 30, 10, # Player 1 top lane building
30, 30, 10, # Player 1 bottom lane building
3, # Player 1 pylons
30, 30, 10, # Player 2 top lane building
30, 30, 10, # Player 2 bottom lane building
3, # Player 2 pylons
30, 30, 10, # Player 1 units top lane grid 1
30, 30, 10, # Player 1 units top lane grid 2
30, 30, 10, # Player 1 units top lane grid 3
30, 30, 10, # Player 1 units top lane grid 4
30, 30, 10, # Player 1 units bottom lane grid 1
30, 30, 10, # Player 1 units bottom lane grid 2
30, 30, 10, # Player 1 units bottom lane grid 3
30, 30, 10, # Player 1 units bottom lane grid 4
30, 30, 10, # Player 2 units top lane grid 1
30, 30, 10, # Player 2 units top lane grid 2
30, 30, 10, # Player 2 units top lane grid 3
30, 30, 10, # Player 2 units top lane grid 4
30, 30, 10, # Player 2 units bottom lane grid 1
30, 30, 10, # Player 2 units bottom lane grid 2
30, 30, 10, # Player 2 units bottom lane grid 3
30, 30, 10, # Player 2 units bottom lane grid 4
2000, 2000, # Player 1 Nexus HP (top, bottom)
2000, 2000, # Player 2 Nexus HP (top, bottom)
40]) # Wave Number
self.decision_point = 1
self.signal_of_end = False
self.end_state = None
self.maker_cost_np = np.zeros(len(maker_cost))
# Have to change the combine func if this changed
self.pylon_cost = 300
self.pylon_index = 7
for i, mc in enumerate(maker_cost.values()):
self.maker_cost_np[i] = mc
self.last_decomposed_reward_dict = {}
self.decomposed_reward_dict = {}
self.num_waves = 0
maps_dir = os.path.join(os.path.dirname(__file__), '..', 'maps')
action_dict_path = os.path.join(os.path.dirname(__file__), 'action_1500_tow_2L.pt')
print("actions path:" + action_dict_path)
self.a_dict = torch.load(action_dict_path)
self.action_space = self.a_dict['actions']
self.action_space_dict = self.a_dict['mineral']
# print(self.a_dict.keys())
# at the end of the reward type name:
# 1 means for player 1 is positive, for player 2 is negative
# 2 means for player 2 is positive, for player 1 is negative
self.reward_types = list(reward_dict.values())
# print(self.reward_types)
for rt in self.reward_types:
self.decomposed_reward_dict[rt] = 0
self.last_decomposed_reward_dict[rt] = 0
unit_type = [UNIT_TYPES['Marine'], UNIT_TYPES['Baneling'], UNIT_TYPES['Immortal']]
# self.input_screen_features = {
# "PLAYER_RELATIVE":[1, 4],
# "UNIT_TYPE": unit_type,
# 'HIT_POINT': 0,
# 'HIT_POINT_RATIO': 0,
# 'SHIELD': 0,
# 'SHIELD_RATIO': 0,
# 'UNIT_DENSITY': 0
# }
def reset(self):
# Move the camera in any direction
# This runs the ResetEpisode trigger built into the map
self.decomposed_rewards = []
action = actions.FUNCTIONS.move_camera([0, 0])
self.current_obs = self.sc2_env.step([action])[0]
if self.reset_steps >= 10:
self.sc2_env.reset()
self.reset_steps = 0
self.reset_steps += 1
self.end_state = None
self.decision_point = 1
self.num_waves = 0
data = self.sc2_env._controllers[0]._client.send(observation = sc_pb.RequestObservation())
actions_space = self.sc2_env._controllers[0]._client.send(action = sc_pb.RequestAction())
data = data.observation.raw_data.units
self.getRewards(data)
# Get channel states
# state = self.get_channel_state(self.current_obs)
# Get custom states
state_1 = self.get_custom_state(data, 1)
state_2 = self.get_custom_state(data, 2)
for rt in self.reward_types:
self.decomposed_reward_dict[rt] = 0
self.last_decomposed_reward_dict[rt] = 0
# self.use_custom_ability(action_to_ability_id['switch_player'])
return state_1, state_2
def step(self, action, player):
done = False
dp = False
data = self.sc2_env._controllers[0]._client.send(observation=sc_pb.RequestObservation())
data = data.observation.raw_data.units
# pretty_print_units(data)
#input("pausing at step")
## ACTION TAKING ###
if len(action) > 0:
current_player = self.get_current_player(data)
# print(current_player)
if current_player != player:
# print('switch')
self.use_custom_ability(action_to_ability_id['switch_player'])
for a_index, num_action in enumerate(action):
for _ in range(int(num_action)):
# print(a_index, num_action)
self.use_custom_ability(action_to_ability_id[a_index])
# self.use_custom_ability(action_to_ability_id[0])
action = actions.FUNCTIONS.no_op()
self.current_obs = self.sc2_env.step([action])[0]
else:
action = actions.FUNCTIONS.no_op()
self.current_obs = self.sc2_env.step([action])[0]
# Get reward from data
done, dp = self.getRewards(data)
if dp or done:
# Get channel states
# state = self.get_channel_state(self.current_obs)
# Get custom states
self.num_waves += 1
state_1 = self.get_custom_state(data, 1)
state_2 = self.get_custom_state(data, 2)
if done:
self.end_state_1 = state_1
self.end_state_2 = state_2
# print(self.decomposed_reward_dict)
self.decomposed_rewards = []
for rt in self.reward_types:
value_reward = self.decomposed_reward_dict[rt] - self.last_decomposed_reward_dict[rt]
self.decomposed_rewards.append(value_reward)
# TODO: consider to merge two for
for rt in self.reward_types:
self.last_decomposed_reward_dict[rt] = self.decomposed_reward_dict[rt]
return state_1, state_2, done, dp
return None, None, done, dp
def register_map(self, map_dir, map_name):
map_filename = map_name + '.SC2Map'
class_definition = dict(prefix = map_dir, filename = map_filename, players = 1)
constructed_class = type(map_name, (pysc2.maps.lib.Map,), class_definition)
globals()[map_name] = constructed_class
def use_custom_ability(self, ability_id, player_id=1):
# Sends a command directly to the SC2 protobuf API
# Can cause the pysc2 client to desync, unless step_sc2env() is called afterward
from s2clientprotocol import sc2api_pb2
from s2clientprotocol import common_pb2
from s2clientprotocol import spatial_pb2
def get_action_spatial(ability_id):
target_point = common_pb2.PointI()
target_point.x = 0
target_point.y = 0
action_spatial_unit_command = spatial_pb2.ActionSpatialUnitCommand(target_minimap_coord=target_point)
action_spatial_unit_command.ability_id = ability_id
action_spatial = spatial_pb2.ActionSpatial(unit_command=action_spatial_unit_command)
action = sc2api_pb2.Action(action_feature_layer=action_spatial)
return action
player_action = get_action_spatial(ability_id)
request_action = sc2api_pb2.RequestAction(actions=[player_action])
request = sc2api_pb2.Request(action=request_action)
# Bypass pysc2 and send the proto directly
client = self.sc2_env._controllers[player_id - 1]._client
if self.verbose:
print('Calling client.send_req for player_id {}'.format(player_id))
if self.sc2_env._state == 2:
print('Game is over, cannot send action')
return
client.send_req(request)
def get_channel_state(self, observation):
state = observation[3]['feature_screen']
state = getOneHotState(state, self.input_screen_features)
state = np.reshape(state, (1, -1))
return state
def get_custom_state(self, data, player):
"""
State of Player 1:
For the player 2, the position of player1 label and player2 label switch
Unspent Miner 0
Plyer1 : Number of Marines Maker 1 T
Plyer1 : Number of Banelings Maker 2 T
Plyer1 : Number of Immortal Maker 3 T
Plyer1 : Number of Marines Maker 4 B
Plyer1 : Number of Banelings Maker 5 B
Plyer1 : Number of Immortal Maker 6 B
Plyer1 : Number of Pylon 7
Plyer2 : Number of Marines Maker | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'eugene'
'''
MIT License
Copyright (c) 2015 <NAME> (email : <EMAIL>)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
Step 1) Load template files to memory
Step 2) Search and replace these tags in memory (including filenames).
<<<NAMESPACE>>>
<<<STATEMACHINENAME>>> or <<<CLASSNAME>>>
<<<AUTHOR>>>
Step 3) Search for the following pairs of tags
<<<PER_STATE_BEGIN>>>
<<<PER_STATE_END>>>
<<<PER_EVENT_BEGIN>>>
<<<PER_EVENT_END>>>
<<<PER_ACTION_BEGIN>>>
<<<PER_ACTION_END>>>
<<<PER_ACTION_SIGNATURE_BEGIN>>>
<<<PER_ACTION_SIGNATURE_END>>>
<<<PER_GUARD_BEGIN>>>
<<<PER_GUARD_END>>>
and duplicate the following for each item, replacing each tag with the item name
<<<STATENAME>>>
<<<EVENTNAME>>>
<<<ACTIONNAME>>>
<<<GUARDNAME>>>
These need to be expanded for event structs
<<<EVENTSIGNATURE>>>
<<<EVENTMEMBERSINSTANTIATE>>>
<<<EVENTMEMBERSDECLARE>>>
When looping <<<ALPH>>> should increment from a through Z.
When looping <<<NUM>>> should increment from 1 through 10000.
When reading the transition table, first state name (top, left) should be set to the value for this tag : <<<STATE_0>>>
Then, the transition table needs to go here, following the rules.
<<<TTT_BEGIN>>>
<<<TTT_END>>>
or
<<<TTT_LITE_BEGIN>>>
<<<TTT_LITE_END>>>
or
<<<TTT_LITE_SML_BEGIN>>>
<<<TTT_LITE_SML_END>>>
# EMBEDDED SM SUPPORT.
Step 4) In each <<PER_XXX tag, there might be more expansion required. The following tags apply in this pass
<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>
<<<PER_EVENT_NEXT_STATE_END>>>
and the following replacement tags will be correctly set
<<<EVENTSTATECURRENT>>>
<<<EVENTSTATENEXT>>>
Also, the original SM only allows a single state-based action to happen.
I want there to be several actions allowed in a State, based on several events valid in that state.
These tags provide for that.
<<<PER_STATE_ACTION_EVENT_BEGIN>>>
<<<PER_STATE_ACTION_EVENT_END>>>
and the following replacement tags will be correctly set
<<<PER_STATE_ACTION>>>
<<<PER_STATE_EVENT>>>
# END EMBEDDED SM SUPPORT.
'''
__TAG_AUTHOR__ = '<<<AUTHOR>>>'
__TAG_GROUP__ = '<<<GROUP>>>'
__TAG_BRIEF__ = '<<<BRIEF>>>'
__TAG_NAMESPACE__ = '<<<NAMESPACE>>>'
__TAG_SM_NAME__ = '<<<STATEMACHINENAME>>>'
__TAG_SM_NAME_UPPER__ = '<<<STATEMACHINENAMEUPPER>>>'
__TAG_CLASS_NAME__ = '<<<CLASSNAME>>>'
__TAG_PyIFGen_NAME__ = '<<<PYIFGENNAME>>>'
__TAG_PS_BEGIN__ = "<<<PER_STATE_BEGIN>>>"
__TAG_PS_END__ = "<<<PER_STATE_END>>>"
__TAG_PE_BEGIN__ = "<<<PER_EVENT_BEGIN>>>"
__TAG_PE_END__ = "<<<PER_EVENT_END>>>"
__TAG_PA_BEGIN__ = "<<<PER_ACTION_BEGIN>>>"
__TAG_PA_END__ = "<<<PER_ACTION_END>>>"
__TAG_PASIG_BEGIN__ = "<<<PER_ACTION_SIGNATURE_BEGIN>>>"
__TAG_PASIG_END__ = "<<<PER_ACTION_SIGNATURE_END>>>"
__TAG_PG_BEGIN__ = "<<<PER_GUARD_BEGIN>>>"
__TAG_PG_END__ = "<<<PER_GUARD_END>>>"
__TAG_EVENT_SIGNATURE__ = "<<<EVENTSIGNATURE>>>"
__TAG_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSINSTANTIATE>>>"
__TAG_LITE_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSLITEINSTANTIATE>>>"
__TAG_EVENT_MEMBERDECL__ = "<<<EVENTMEMBERSDECLARE>>>"
__TAG_STATENAME__ = '<<<STATENAME>>>'
__TAG_EVENTNAME__ = '<<<EVENTNAME>>>'
__TAG_EVENTNAME_SMALL_CAMEL__ = '<<<EVENTNAMESMALLCAMEL>>>'
__TAG_ACTIONNAME__ = '<<<ACTIONNAME>>>'
__TAG_GUARDNAME__ = '<<<GUARDNAME>>>'
__TAG_ABC__ = '<<<ALPH>>>'
__TAG_123__ = '<<<NUM>>>'
__TAG_INIT_STATE__ = '<<<STATE_0>>>'
__TAG_TTT_BEGIN__ = '<<<TTT_BEGIN>>>'
__TAG_TTT_END___ = '<<<TTT_END>>>'
__TAG_TTT_LITE_BEGIN__ = '<<<TTT_LITE_BEGIN>>>'
__TAG_TTT_LITE_END__ = '<<<TTT_LITE_END>>>'
__TAG_TTT_LITE_SML_BEGIN__ = '<<<TTT_LITE_SML_BEGIN>>>'
__TAG_TTT_LITE_SML_END__ = '<<<TTT_LITE_SML_END>>>'
__TAG_DECLSPEC_DLL_EXPORT__ = "<<<DLL_EXPORT>>>"
# EMBEDDED SM SUPPORT.
__TAG_EVENT_CURNEX_ST_BEG__ = "<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>"
__TAG_EVENT_CURNEX_ST_END__ = "<<<PER_EVENT_NEXT_STATE_END>>>"
__TAG_EVENT_ST_CUR__ = "<<<EVENTSTATECURRENT>>>"
__TAG_EVENT_ST_NXT__ = "<<<EVENTSTATENEXT>>>"
__TAG_PSAE_BEGIN__ = "<<<PER_STATE_ACTION_EVENT_BEGIN>>>"
__TAG_PSAE_END__ = "<<<PER_STATE_ACTION_EVENT_END>>>"
__TAG_PSAE_ACTION__ = "<<<PER_STATE_ACTION>>>"
__TAG_PSAE_EVENT__ = "<<<PER_STATE_EVENT>>>"
# END EMBEDDED SM SUPPORT.
# Python2 -> 3 shennanigans...try support both
try:
from interface_base import * # py2
except (ModuleNotFoundError, ImportError) as e:
from .interface_base import * # py3
try:
from .preservative import *
except (ModuleNotFoundError, ImportError) as e:
from preservative import *
try:
from .cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
except (ModuleNotFoundError, ImportError) as e:
from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
try:
from LanguageCPP import LanguageCPP
except (ModuleNotFoundError, ImportError) as e:
from .LanguageCPP import LanguageCPP
# Model that describes a state machine.
class CStateMachineModel:
def __init__(self):
self.statemachinename = ""
self.namespacename = ""
self.declspecdllexport = ""
self.pythoninterfacegeneratorfilename = ""
self.states = []
self.actions = []
self.events = []
self.guards = []
# EMBEDDED SM SUPPORT.
self.event_transitions_per_state = {} # ['event', ['next state,current state' , ...]]
self.actionevents_per_state = {} # ['state', [['event', 'action'] , ...]
# END EMBEDDED SM SUPPORT.
self.actionsignatures = OrderedDict()
# Transition Table Model uses State Machine Model to generate all code required for a working state machine.
class CTransitionTableModel(CStateMachineModel):
START_STATE = 0
EVENT = 1
NEXT_STATE = 2
ACTION = 3
GUARD = 4
def __init__(self, tt, nn, smn, dclspc = ""):
CStateMachineModel.__init__(self)
self.transition_table = tt
self.statemachinename = smn
self.namespacename = nn
self.declspecdllexport = dclspc
tstate = OrderedDict()
taction = OrderedDict()
tevent = OrderedDict()
tguard = OrderedDict()
# EMBEDDED SM SUPPORT. ['current state, event', 'next state']
tevent_transitions_tmp = {}
# END EMBEDDED SM SUPPORT.
# Filter
for tableline in self.transition_table:
if tableline[self.START_STATE] != "" and tableline[self.START_STATE].lower() != "none":
tstate[tableline[self.START_STATE]] = 0
if tableline[self.NEXT_STATE] != "" and tableline[self.NEXT_STATE].lower() != "none":
tstate[tableline[self.NEXT_STATE]] = 0
if tableline[self.EVENT] != "" and tableline[self.EVENT].lower() != "none":
tevent[tableline[self.EVENT]] = 0
# EMBEDDED SM SUPPORT. ['current state, event', 'next state']
'''
if tableline[self.NEXT_STATE] == "" or tableline[self.NEXT_STATE].lower() == "none":
raise Exception('Events that dont change state should re-enter the current state.\nPlease fix your transition table')
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.NEXT_STATE]
TODO : For the case below, how to support a different 'action' on the in-state-event???? Ie that event might have gotten the machine
to this state with a particular action, but perhaps the user has configured a different action for this event in-state???
'''
if tableline[self.NEXT_STATE] == "" or tableline[self.NEXT_STATE].lower() == "none":
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.START_STATE]
else:
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.NEXT_STATE]
# This is for in-state-actions based on events...
if tableline[self.ACTION] != "" and tableline[self.ACTION].lower() != "none":
if not (tableline[self.START_STATE] in self.actionevents_per_state):
self.actionevents_per_state[tableline[self.START_STATE]] = []
self.actionevents_per_state[tableline[self.START_STATE]].append([tableline[self.EVENT], tableline[self.ACTION]])
# END EMBEDDED SM SUPPORT.
if tableline[self.ACTION] != "" and tableline[self.ACTION].lower() != "none":
taction[tableline[self.ACTION]] = 0
if not ((tableline[self.ACTION] + tableline[self.EVENT]) in self.actionsignatures):
self.actionsignatures[tableline[self.ACTION] + tableline[self.EVENT]] = (tableline[self.ACTION], tableline[self.EVENT]) #, tableline[self.START_STATE],tableline[self.NEXT_STATE]))
if tableline[self.GUARD] != "" and tableline[self.GUARD].lower() != "none":
tguard[tableline[self.GUARD]] = 0
# Populate CStateMachineModel
for s in tstate:
self.states.append(s)
for e in tevent:
self.events.append(e)
for a in taction:
self.actions.append(a)
for g in tguard:
self.guards.append(g)
# EMBEDDED SM SUPPORT.
for e in tevent:
self.event_transitions_per_state[e] = []
for s in tstate:
key = s+','+e
if key in tevent_transitions_tmp:
self.event_transitions_per_state[e].append([tevent_transitions_tmp[key], s])
else:
self.event_transitions_per_state[e].append(['EVENT_IGNORED', s])
# END EMBEDDED SM SUPPORT.
def __getfirststate__(self):
if not self.transition_table:
return "NO TT PRESENT!"
return self.transition_table[0][0]
class CStateMachineGenerator(CBASEGenerator):
def __init__(self, inputfiledir, outputfiledir, events_interface=None, language=None, author='Anonymous', group='', brief=''):
CBASEGenerator.__init__(self,inputfiledir,outputfiledir,language, author, group, brief)
self.events_interface = events_interface
def __loadtemplates_firstfiltering__(self, smmodel):
"""
See baseclass implementation. This just prepares the dictionary of things to replace
for this type of codegeneration.
@param smmodel:
@return: cgen.CCodeModel, a dictionary -> {filename,[lines]}
"""
dict_to_replace_lines = {}
dict_to_replace_lines[__TAG_SM_NAME_UPPER__] = caps(smmodel.statemachinename)
dict_to_replace_lines[__TAG_SM_NAME__] = smmodel.statemachinename
dict_to_replace_lines[__TAG_CLASS_NAME__] = smmodel.statemachinename
dict_to_replace_lines[__TAG_PyIFGen_NAME__] = smmodel.pythoninterfacegeneratorfilename.replace('.py', '') # hack : for tcpgen simple templates,
if not dict_to_replace_lines[__TAG_PyIFGen_NAME__]:
dict_to_replace_lines[__TAG_PyIFGen_NAME__] = self.vpp_filename
dict_to_replace_lines[__TAG_NAMESPACE__] = smmodel.namespacename
dict_to_replace_lines[__TAG_AUTHOR__] = self.author
dict_to_replace_lines[__TAG_GROUP__] = self.group
dict_to_replace_lines[__TAG_BRIEF__] = self.brief
dict_to_replace_lines[__TAG_DECLSPEC_DLL_EXPORT__] = smmodel.declspecdllexport
dict_to_replace_filenames = {}
dict_to_replace_filenames["TEMPLATE_"] = smmodel.statemachinename
#dict_to_replace_filenames['.ty'] = '.py'
#dict_to_replace_filenames['.t#'] = '.cs'
#dict_to_replace_filenames['.t'] = '.h'
#dict_to_replace_filenames['.hpp'] = '.cpp' # there are no '.hpp' templates...but search and replace will apply '.t -> .h' first so '.tpp' becomes '.hpp'...grrr
return CBASEGenerator.__loadtemplates_firstfiltering__(self,dict_to_replace_lines,dict_to_replace_filenames)
def __get_event_signature__(self,name):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
return self.language.ParameterString(self.language.GetFactoryCreateParams(s, self.events_interface))
return ""
def __instantiate_event_struct_member(self, name, whitespace_cnt, is_ptr=True, instancename="data"):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
guts = self.language.InstantiateStructMembers(s, self.events_interface, '', instancename, self.language.Accessor(is_ptr))
result = ''
cnt = 0
for g in guts:
result = result + (whitespace_cnt*' ' if cnt > 0 else '') + g + '\n'
cnt = cnt + 1
return result
return ""
def __declare_event_struct_members(self, name, whitespace_cnt):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
guts = self.language.DeclareStructMembers(s, self.events_interface, '', False)
result = ''
cnt = 0
for g in guts:
result = result + ((whitespace_cnt+1)*' ' if cnt > 0 else ' ') + g + '\n'
cnt = cnt + 1
# remove last '\n'
result = result[:-1]
return result
return ""
def hasTag(self, line, tag):
return line.find(tag.replace("<<<", "").replace(">>>", "")) > 0
def hasMemberName(self, a):
return a.find("::") > 0
def extractMemberNameAndTag(self, a):
member = a[a.find("::"):a.find(">>>")].replace("::", "")
tag = a.strip()
return [tag, member]
def __innerexpand__secondfiltering__(self, names2x, lines2x, puthere):
global alpha
__resetalphabet__()
cnt = 0
for name in names2x:
| |
<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Accessory(Model):
"""Accessory item and corresponding confidence level.
:param type: Type of an accessory. Possible values include: 'headWear',
'glasses', 'mask'
:type type: str or
~azure.cognitiveservices.vision.face.models.AccessoryType
:param confidence: Confidence level of an accessory
:type confidence: float
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'AccessoryType'},
'confidence': {'key': 'confidence', 'type': 'float'},
}
def __init__(self, **kwargs):
super(Accessory, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.confidence = kwargs.get('confidence', None)
class APIError(Model):
"""Error information returned by the API.
:param error:
:type error: ~azure.cognitiveservices.vision.face.models.Error
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(self, **kwargs):
super(APIError, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class APIErrorException(HttpOperationError):
"""Server responsed with exception of type: 'APIError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(APIErrorException, self).__init__(deserialize, response, 'APIError', *args)
class ApplySnapshotRequest(Model):
"""Request body for applying snapshot operation.
All required parameters must be populated in order to send to Azure.
:param object_id: Required. User specified target object id to be created
from the snapshot.
:type object_id: str
:param mode: Snapshot applying mode. Currently only CreateNew is
supported, which means the apply operation will fail if target
subscription already contains an object of same type and using the same
objectId. Users can specify the "objectId" in request body to avoid such
conflicts. Possible values include: 'CreateNew'. Default value:
"CreateNew" .
:type mode: str or
~azure.cognitiveservices.vision.face.models.SnapshotApplyMode
"""
_validation = {
'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'},
}
_attribute_map = {
'object_id': {'key': 'objectId', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'SnapshotApplyMode'},
}
def __init__(self, **kwargs):
super(ApplySnapshotRequest, self).__init__(**kwargs)
self.object_id = kwargs.get('object_id', None)
self.mode = kwargs.get('mode', "CreateNew")
class Blur(Model):
"""Properties describing any presence of blur within the image.
:param blur_level: An enum value indicating level of blurriness. Possible
values include: 'Low', 'Medium', 'High'
:type blur_level: str or
~azure.cognitiveservices.vision.face.models.BlurLevel
:param value: A number indicating level of blurriness ranging from 0 to 1.
:type value: float
"""
_attribute_map = {
'blur_level': {'key': 'blurLevel', 'type': 'BlurLevel'},
'value': {'key': 'value', 'type': 'float'},
}
def __init__(self, **kwargs):
super(Blur, self).__init__(**kwargs)
self.blur_level = kwargs.get('blur_level', None)
self.value = kwargs.get('value', None)
class Coordinate(Model):
"""Coordinates within an image.
All required parameters must be populated in order to send to Azure.
:param x: Required. The horizontal component, in pixels.
:type x: float
:param y: Required. The vertical component, in pixels.
:type y: float
"""
_validation = {
'x': {'required': True},
'y': {'required': True},
}
_attribute_map = {
'x': {'key': 'x', 'type': 'float'},
'y': {'key': 'y', 'type': 'float'},
}
def __init__(self, **kwargs):
super(Coordinate, self).__init__(**kwargs)
self.x = kwargs.get('x', None)
self.y = kwargs.get('y', None)
class DetectedFace(Model):
"""Detected Face object.
All required parameters must be populated in order to send to Azure.
:param face_id:
:type face_id: str
:param recognition_model: Possible values include: 'recognition_01',
'recognition_02', 'recognition_03'. Default value: "recognition_01" .
:type recognition_model: str or
~azure.cognitiveservices.vision.face.models.RecognitionModel
:param face_rectangle: Required.
:type face_rectangle:
~azure.cognitiveservices.vision.face.models.FaceRectangle
:param face_landmarks:
:type face_landmarks:
~azure.cognitiveservices.vision.face.models.FaceLandmarks
:param face_attributes:
:type face_attributes:
~azure.cognitiveservices.vision.face.models.FaceAttributes
"""
_validation = {
'face_rectangle': {'required': True},
}
_attribute_map = {
'face_id': {'key': 'faceId', 'type': 'str'},
'recognition_model': {'key': 'recognitionModel', 'type': 'str'},
'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'},
'face_landmarks': {'key': 'faceLandmarks', 'type': 'FaceLandmarks'},
'face_attributes': {'key': 'faceAttributes', 'type': 'FaceAttributes'},
}
def __init__(self, **kwargs):
super(DetectedFace, self).__init__(**kwargs)
self.face_id = kwargs.get('face_id', None)
self.recognition_model = kwargs.get('recognition_model', "recognition_01")
self.face_rectangle = kwargs.get('face_rectangle', None)
self.face_landmarks = kwargs.get('face_landmarks', None)
self.face_attributes = kwargs.get('face_attributes', None)
class Emotion(Model):
"""Properties describing facial emotion in form of confidence ranging from 0
to 1.
:param anger:
:type anger: float
:param contempt:
:type contempt: float
:param disgust:
:type disgust: float
:param fear:
:type fear: float
:param happiness:
:type happiness: float
:param neutral:
:type neutral: float
:param sadness:
:type sadness: float
:param surprise:
:type surprise: float
"""
_attribute_map = {
'anger': {'key': 'anger', 'type': 'float'},
'contempt': {'key': 'contempt', 'type': 'float'},
'disgust': {'key': 'disgust', 'type': 'float'},
'fear': {'key': 'fear', 'type': 'float'},
'happiness': {'key': 'happiness', 'type': 'float'},
'neutral': {'key': 'neutral', 'type': 'float'},
'sadness': {'key': 'sadness', 'type': 'float'},
'surprise': {'key': 'surprise', 'type': 'float'},
}
def __init__(self, **kwargs):
super(Emotion, self).__init__(**kwargs)
self.anger = kwargs.get('anger', None)
self.contempt = kwargs.get('contempt', None)
self.disgust = kwargs.get('disgust', None)
self.fear = kwargs.get('fear', None)
self.happiness = kwargs.get('happiness', None)
self.neutral = kwargs.get('neutral', None)
self.sadness = kwargs.get('sadness', None)
self.surprise = kwargs.get('surprise', None)
class Error(Model):
"""Error body.
:param code:
:type code: str
:param message:
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Error, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class Exposure(Model):
"""Properties describing exposure level of the image.
:param exposure_level: An enum value indicating level of exposure.
Possible values include: 'UnderExposure', 'GoodExposure', 'OverExposure'
:type exposure_level: str or
~azure.cognitiveservices.vision.face.models.ExposureLevel
:param value: A number indicating level of exposure level ranging from 0
to 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75,
1] is over exposure.
:type value: float
"""
_attribute_map = {
'exposure_level': {'key': 'exposureLevel', 'type': 'ExposureLevel'},
'value': {'key': 'value', 'type': 'float'},
}
def __init__(self, **kwargs):
super(Exposure, self).__init__(**kwargs)
self.exposure_level = kwargs.get('exposure_level', None)
self.value = kwargs.get('value', None)
class FaceAttributes(Model):
"""Face Attributes.
:param age: Age in years
:type age: float
:param gender: Possible gender of the face. Possible values include:
'male', 'female'
:type gender: str or ~azure.cognitiveservices.vision.face.models.Gender
:param smile: Smile intensity, a number between [0,1]
:type smile: float
:param facial_hair: Properties describing facial hair attributes.
:type facial_hair: ~azure.cognitiveservices.vision.face.models.FacialHair
:param glasses: Glasses type if any of the face. Possible values include:
'noGlasses', 'readingGlasses', 'sunglasses', 'swimmingGoggles'
:type glasses: str or
~azure.cognitiveservices.vision.face.models.GlassesType
:param head_pose: Properties indicating head pose of the face.
:type head_pose: ~azure.cognitiveservices.vision.face.models.HeadPose
:param emotion: Properties describing facial emotion in form of confidence
ranging from 0 to 1.
:type emotion: ~azure.cognitiveservices.vision.face.models.Emotion
:param hair: Properties describing hair attributes.
:type hair: ~azure.cognitiveservices.vision.face.models.Hair
:param makeup: Properties describing present makeups on a given face.
:type makeup: ~azure.cognitiveservices.vision.face.models.Makeup
:param occlusion: Properties describing occlusions on a given face.
:type occlusion: ~azure.cognitiveservices.vision.face.models.Occlusion
:param accessories: Properties describing any accessories on a given face.
:type accessories:
list[~azure.cognitiveservices.vision.face.models.Accessory]
:param blur: Properties describing any presence of blur within the image.
:type blur: ~azure.cognitiveservices.vision.face.models.Blur
:param exposure: Properties describing exposure level of the image.
:type exposure: ~azure.cognitiveservices.vision.face.models.Exposure
:param noise: Properties describing noise level of the image.
:type noise: ~azure.cognitiveservices.vision.face.models.Noise
"""
_attribute_map = {
'age': {'key': 'age', 'type': 'float'},
'gender': {'key': 'gender', 'type': 'Gender'},
'smile': {'key': 'smile', 'type': 'float'},
'facial_hair': {'key': 'facialHair', 'type': 'FacialHair'},
'glasses': {'key': 'glasses', 'type': 'GlassesType'},
'head_pose': {'key': 'headPose', 'type': 'HeadPose'},
'emotion': {'key': 'emotion', 'type': 'Emotion'},
'hair': {'key': 'hair', 'type': 'Hair'},
'makeup': {'key': 'makeup', 'type': 'Makeup'},
'occlusion': {'key': 'occlusion', 'type': 'Occlusion'},
'accessories': {'key': 'accessories', 'type': '[Accessory]'},
'blur': {'key': 'blur', 'type': 'Blur'},
'exposure': {'key': 'exposure', 'type': 'Exposure'},
'noise': {'key': 'noise', 'type': 'Noise'},
}
def __init__(self, **kwargs):
super(FaceAttributes, self).__init__(**kwargs)
self.age = kwargs.get('age', None)
self.gender = kwargs.get('gender', None)
self.smile = kwargs.get('smile', None)
self.facial_hair = kwargs.get('facial_hair', None)
self.glasses = kwargs.get('glasses', None)
self.head_pose = kwargs.get('head_pose', None)
self.emotion = kwargs.get('emotion', None)
self.hair = kwargs.get('hair', None)
self.makeup = kwargs.get('makeup', None)
self.occlusion = kwargs.get('occlusion', None)
self.accessories = kwargs.get('accessories', None)
self.blur = kwargs.get('blur', None)
self.exposure = kwargs.get('exposure', None)
self.noise = kwargs.get('noise', None)
class FaceLandmarks(Model):
"""A collection of 27-point face landmarks pointing to the important positions
of face components.
:param pupil_left:
:type pupil_left: ~azure.cognitiveservices.vision.face.models.Coordinate
:param pupil_right:
:type pupil_right: ~azure.cognitiveservices.vision.face.models.Coordinate
:param nose_tip:
:type nose_tip: ~azure.cognitiveservices.vision.face.models.Coordinate
:param mouth_left:
:type mouth_left: ~azure.cognitiveservices.vision.face.models.Coordinate
:param mouth_right:
:type mouth_right: ~azure.cognitiveservices.vision.face.models.Coordinate
:param eyebrow_left_outer:
:type eyebrow_left_outer:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eyebrow_left_inner:
:type eyebrow_left_inner:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eye_left_outer:
:type eye_left_outer:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eye_left_top:
:type eye_left_top: ~azure.cognitiveservices.vision.face.models.Coordinate
:param eye_left_bottom:
:type eye_left_bottom:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eye_left_inner:
:type eye_left_inner:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eyebrow_right_inner:
:type eyebrow_right_inner:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eyebrow_right_outer:
:type eyebrow_right_outer:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eye_right_inner:
:type eye_right_inner:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eye_right_top:
:type eye_right_top:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eye_right_bottom:
:type eye_right_bottom:
~azure.cognitiveservices.vision.face.models.Coordinate
:param eye_right_outer:
:type eye_right_outer:
~azure.cognitiveservices.vision.face.models.Coordinate
:param nose_root_left:
:type nose_root_left:
~azure.cognitiveservices.vision.face.models.Coordinate
:param nose_root_right:
:type nose_root_right:
~azure.cognitiveservices.vision.face.models.Coordinate
:param nose_left_alar_top:
:type nose_left_alar_top:
~azure.cognitiveservices.vision.face.models.Coordinate
:param nose_right_alar_top:
:type nose_right_alar_top:
| |
from PyQt5.Qt import QColor, QPixmap, QIcon, QCursor
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import *
from PaintBoard import PaintBoard
import random
from getline import do_image
from compare import compare_image
import data
class MainWidget(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setWindowIcon(QIcon("./UI/icons/icon.png"))
self.setAttribute(QtCore.Qt.WA_TranslucentBackground) # 设置窗口背景透明
# 初始化界面
self.__see = 1
self.setWindowOpacity(self.__see) # 设置透明度
self.setFixedSize(1200, 740) # 设置窗口大小
self.setWindowTitle("动漫头像搜索工具") # 设置窗口标题
self.m_flag = False # 是否拖动
self.m_Position = self.pos() # 自身(全局)位置
# 添加内部窗口
self.widget = QWidget(self)
self.widget.resize(1197, 736)
self.widget.setStyleSheet("border-image: url(./UI/background.png) 0 0 0 0 stretch stretch;")
self.widget.move(0, 0)
# 初始化成员变量
self.__paintBoard = PaintBoard(self)
self.__paintBoard.move(800, 145)
# 获取颜色列表(字符串类型)
self.__colorList = QColor.colorNames()
self.__color = "null" # 当前颜色
self.__is_searched = False
self.__search_files = []
self.__now_files = []
self.__pic_files = data.pic_files
self.__files = data.pic_files
self.__number = 0 # 当前页码
self.__select = [False, False, False, False, False, False, False, False, False, False, False, False]
# 图像
self.__pic_1 = QPixmap("./pictures/bbxy 1.jpg")
self.__pic_2 = QPixmap("./pictures/bbxy 12.jpg")
self.__pic_3 = QPixmap("./pictures/bbxy 13.jpg")
self.__pic_4 = QPixmap("./pictures/bbxy 14.jpg")
self.__pic_5 = QPixmap("./pictures/bbxy 15.jpg")
self.__pic_6 = QPixmap("./pictures/bbxy 16.jpg")
self.__pic_7 = QPixmap("./pictures/bbxy 17.jpg")
self.__pic_8 = QPixmap("./pictures/bbxy 18.jpg")
self.__pic_9 = QPixmap("./pictures/bbxy 19.jpg")
self.__pic_10 = QPixmap("./pictures/bbxy 2.jpg")
self.__pic_11 = QPixmap("./pictures/bbxy 20.jpg")
self.__pic_12 = QPixmap("./pictures/bbxy 21.jpg")
# 图片
self.__label_1 = QLabel(self)
self.__label_1.resize(150, 150)
self.__label_1.move(65, 150)
self.__label_1.setPixmap(self.__pic_1)
self.__label_1.setScaledContents(True)
self.__btn_1 = QPushButton()
self.__btn_1.setParent(self)
self.__btn_1.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_1.clicked.connect(self.on_btn_1_clicked)
self.__btn_1.resize(150, 150)
self.__btn_1.move(65, 150)
self.__label_2 = QLabel(self)
self.__label_2.resize(150, 150)
self.__label_2.move(231, 150)
self.__label_2.setPixmap(self.__pic_2)
self.__label_2.setScaledContents(True)
self.__btn_2 = QPushButton()
self.__btn_2.setParent(self)
self.__btn_2.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_2.clicked.connect(self.on_btn_2_clicked)
self.__btn_2.resize(150, 150)
self.__btn_2.move(231, 150)
self.__label_3 = QLabel(self)
self.__label_3.resize(150, 150)
self.__label_3.move(397, 150)
self.__label_3.setPixmap(self.__pic_3)
self.__label_3.setScaledContents(True)
self.__btn_3 = QPushButton()
self.__btn_3.setParent(self)
self.__btn_3.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_3.clicked.connect(self.on_btn_3_clicked)
self.__btn_3.resize(150, 150)
self.__btn_3.move(397, 150)
self.__label_4 = QLabel(self)
self.__label_4.resize(150, 150)
self.__label_4.move(563, 150)
self.__label_4.setPixmap(self.__pic_4)
self.__label_4.setScaledContents(True)
self.__btn_4 = QPushButton()
self.__btn_4.setParent(self)
self.__btn_4.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_4.clicked.connect(self.on_btn_4_clicked)
self.__btn_4.resize(150, 150)
self.__btn_4.move(563, 150)
self.__label_5 = QLabel(self)
self.__label_5.resize(150, 150)
self.__label_5.move(65, 316)
self.__label_5.setPixmap(self.__pic_5)
self.__label_5.setScaledContents(True)
self.__btn_5 = QPushButton()
self.__btn_5.setParent(self)
self.__btn_5.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_5.clicked.connect(self.on_btn_5_clicked)
self.__btn_5.resize(150, 150)
self.__btn_5.move(65, 316)
self.__label_6 = QLabel(self)
self.__label_6.resize(150, 150)
self.__label_6.move(231, 316)
self.__label_6.setPixmap(self.__pic_6)
self.__label_6.setScaledContents(True)
self.__btn_6 = QPushButton()
self.__btn_6.setParent(self)
self.__btn_6.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_6.clicked.connect(self.on_btn_6_clicked)
self.__btn_6.resize(150, 150)
self.__btn_6.move(231, 316)
self.__label_7 = QLabel(self)
self.__label_7.resize(150, 150)
self.__label_7.move(397, 316)
self.__label_7.setPixmap(self.__pic_7)
self.__label_7.setScaledContents(True)
self.__btn_7 = QPushButton()
self.__btn_7.setParent(self)
self.__btn_7.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_7.clicked.connect(self.on_btn_7_clicked)
self.__btn_7.resize(150, 150)
self.__btn_7.move(397, 316)
self.__label_8 = QLabel(self)
self.__label_8.resize(150, 150)
self.__label_8.move(563, 316)
self.__label_8.setPixmap(self.__pic_8)
self.__label_8.setScaledContents(True)
self.__btn_8 = QPushButton()
self.__btn_8.setParent(self)
self.__btn_8.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_8.clicked.connect(self.on_btn_8_clicked)
self.__btn_8.resize(150, 150)
self.__btn_8.move(563, 316)
self.__label_9 = QLabel(self)
self.__label_9.resize(150, 150)
self.__label_9.move(65, 482)
self.__label_9.setPixmap(self.__pic_9)
self.__label_9.setScaledContents(True)
self.__btn_9 = QPushButton()
self.__btn_9.setParent(self)
self.__btn_9.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_9.clicked.connect(self.on_btn_9_clicked)
self.__btn_9.resize(150, 150)
self.__btn_9.move(65, 482)
self.__label_10 = QLabel(self)
self.__label_10.resize(150, 150)
self.__label_10.move(231, 482)
self.__label_10.setPixmap(self.__pic_10)
self.__label_10.setScaledContents(True)
self.__btn_10 = QPushButton()
self.__btn_10.setParent(self)
self.__btn_10.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_10.clicked.connect(self.on_btn_10_clicked)
self.__btn_10.resize(150, 150)
self.__btn_10.move(231, 482)
self.__label_11 = QLabel(self)
self.__label_11.resize(150, 150)
self.__label_11.move(397, 482)
self.__label_11.setPixmap(self.__pic_11)
self.__label_11.setScaledContents(True)
self.__btn_11 = QPushButton()
self.__btn_11.setParent(self)
self.__btn_11.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_11.clicked.connect(self.on_btn_11_clicked)
self.__btn_11.resize(150, 150)
self.__btn_11.move(397, 482)
self.__label_12 = QLabel(self)
self.__label_12.resize(150, 150)
self.__label_12.move(563, 482)
self.__label_12.setPixmap(self.__pic_12)
self.__label_12.setScaledContents(True)
self.__btn_12 = QPushButton()
self.__btn_12.setParent(self)
self.__btn_12.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/tick_hover.png)}")
self.__btn_12.clicked.connect(self.on_btn_12_clicked)
self.__btn_12.resize(150, 150)
self.__btn_12.move(563, 482)
# 按钮
self.__btn_see = QPushButton()
self.__btn_see.setParent(self) # 设置父对象为本界面
self.__btn_see.clicked.connect(self.on_btn_see_clicked)
self.__btn_see.setStyleSheet("QPushButton{border-image: url(./UI/icons/see.png)}"
"QPushButton:hover{border-image: url(./UI/icons/see_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/see_pressed.png)}")
self.__btn_see.move(27, 58)
self.__btn_see.resize(89, 70)
self.__btn_see.setToolTip('透明度')
self.__btn_refresh = QPushButton()
self.__btn_refresh.setParent(self) # 设置父对象为本界面
self.__btn_refresh.clicked.connect(self.refresh)
self.__btn_refresh.setStyleSheet("QPushButton{border-image: url(./UI/icons/refresh.png)}"
"QPushButton:hover{border-image: url(./UI/icons/refresh_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/refresh_pressed.png)}")
self.__btn_refresh.move(485, 50)
self.__btn_refresh.resize(35, 35)
self.__btn_refresh.setToolTip('随便看看')
self.__btn_min = QPushButton()
self.__btn_min.setParent(self) # 设置父对象为本界面
self.__btn_min.clicked.connect(self.showMinimized)
self.__btn_min.setStyleSheet("QPushButton{border-image: url(./UI/icons/min.png)}"
"QPushButton:hover{border-image: url(./UI/icons/min_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/min_pressed.png)}")
self.__btn_min.move(1085, 57)
self.__btn_min.resize(45, 45)
self.__btn_min.setToolTip('最小化')
self.__btn_Quit = QPushButton()
self.__btn_Quit.setParent(self) # 设置父对象为本界面
self.__btn_Quit.clicked.connect(self.close)
self.__btn_Quit.setStyleSheet("QPushButton{border-image: url(./UI/icons/close.png)}"
"QPushButton:hover{border-image: url(./UI/icons/close_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/close_pressed.png)}")
self.__btn_Quit.move(1135, 57)
self.__btn_Quit.resize(45, 45)
self.__btn_Quit.setToolTip('关闭')
self.__btn_eraser = QPushButton()
self.__btn_eraser.setParent(self)
self.__btn_eraser.clicked.connect(self.on_btn_eraser_clicked)
self.__btn_eraser.move(913, 522)
self.__btn_eraser.resize(39, 39)
self.__btn_eraser.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_pen.png)}"
"QPushButton:hover{border-image: url(./UI/icons/button_pen_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_eraser_pressed.png)}")
self.__btn_eraser.setToolTip('切换橡皮')
self.__btn_undo = QPushButton()
self.__btn_undo.setParent(self) # 设置父对象为本界面
self.__btn_undo.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_undo.png)}"
"QPushButton:hover{border-image: url(./UI/icons/button_undo_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_undo_pressed.png)}")
self.__btn_undo.clicked.connect(self.__paintBoard.undo)
self.__btn_undo.move(955, 522)
self.__btn_undo.resize(39, 39)
self.__btn_undo.setToolTip('撤销')
self.__btn_redo = QPushButton()
self.__btn_redo.setParent(self) # 设置父对象为本界面
self.__btn_redo.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_redo.png)}"
"QPushButton:hover{border-image: url(./UI/icons/button_redo_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_redo_pressed.png)}")
self.__btn_redo.clicked.connect(self.__paintBoard.redo)
self.__btn_redo.move(997, 522)
self.__btn_redo.resize(39, 39)
self.__btn_redo.setToolTip('重做')
self.__btn_up = QPushButton()
self.__btn_up.setParent(self) # 设置父对象为本界面
self.__btn_up.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_up.png)}"
"QPushButton:hover{border-image: url(./UI/icons/button_up_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_up_pressed.png)}")
self.__btn_up.clicked.connect(self.on_btn_up_clicked)
self.__btn_up.move(1039, 522)
self.__btn_up.resize(39, 39)
self.__btn_up.setToolTip('上传文件')
self.__btn_Clear = QPushButton()
self.__btn_Clear.setParent(self) # 设置父对象为本界面
self.__btn_Clear.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_clear.png)}"
"QPushButton:hover{border-image: url(./UI/icons/button_clear_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_clear_pressed.png)}")
self.__btn_Clear.clicked.connect(self.__paintBoard.clear)
self.__btn_Clear.move(1081, 522)
self.__btn_Clear.resize(39, 39)
self.__btn_Clear.setToolTip('清空画板')
self.__btn_Save = QPushButton()
self.__btn_Save.setParent(self)
self.__btn_Save.clicked.connect(self.on_btn_save_clicked)
self.__btn_Save.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_save.png)}"
"QPushButton:hover{border-image: url(./UI/icons/button_save_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_save_pressed.png)}")
self.__btn_Save.move(1123, 522)
self.__btn_Save.resize(39, 39)
self.__btn_Save.setToolTip('保存画板')
self.__btn_save_all = QPushButton()
self.__btn_save_all.setParent(self)
self.__btn_save_all.clicked.connect(self.on_btn_save_all_clicked)
self.__btn_save_all.setStyleSheet("QPushButton{border-image: url(./UI/icons/download.png)}"
"QPushButton:hover{border-image: url(./UI/icons/download_hover.png)}")
self.__btn_save_all.move(80, 670)
self.__btn_save_all.resize(42, 38)
self.__btn_save_all.setToolTip('保存选中图片')
self.__btn_eye = QPushButton()
self.__btn_eye.setParent(self) # 设置父对象为本界面
self.__btn_eye.setStyleSheet("QPushButton{border-image: url(./UI/icons/eye.png)}"
"QPushButton:hover{border-image: url(./UI/icons/eye_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/eye_pressed.png)}")
self.__btn_eye.clicked.connect(self.on_btn_line_clicked)
self.__btn_eye.move(1125, 568)
self.__btn_eye.resize(35, 35)
self.__btn_eye.setToolTip('线条化图片')
self.__btn_search = QPushButton()
self.__btn_search.setParent(self) # 设置父对象为本界面
self.__btn_search.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_search.png)}"
"QPushButton:hover{border-image: url(./UI/icons/button_search_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_search_pressed.png)}")
self.__btn_search.clicked.connect(self.on_btn_search_clicked)
self.__btn_search.move(1123, 607)
self.__btn_search.resize(39, 39)
self.__btn_search.setToolTip('查找图片')
# 画笔
self.__btn_small = QPushButton()
self.__btn_small.setParent(self)
self.__btn_small.clicked.connect(self.on_btn_small_clicked)
self.__btn_small.move(787, 528)
self.__btn_small.resize(30, 30)
self.__btn_small.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_small.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_small_pressed.png)}")
self.__btn_small.setToolTip('画笔粗细 - 细')
self.__btn_mid = QPushButton()
self.__btn_mid.setParent(self)
self.__btn_mid.clicked.connect(self.on_btn_mid_clicked)
self.__btn_mid.move(817, 528)
self.__btn_mid.resize(30, 30)
self.__btn_mid.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_mid_selected.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_mid_pressed.png)}")
self.__btn_mid.setToolTip('画笔粗细 - 中')
self.__btn_big = QPushButton()
self.__btn_big.setParent(self)
self.__btn_big.clicked.connect(self.on_btn_big_clicked)
self.__btn_big.move(852, 528)
self.__btn_big.resize(30, 30)
self.__btn_big.setStyleSheet("QPushButton{border-image: url(./UI/icons/button_big.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/button_big_pressed.png)}")
self.__btn_big.setToolTip('画笔粗细 - 粗')
# 颜色框
self.__btn_black = QPushButton()
self.__btn_black.setParent(self)
self.__btn_black.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_black.clicked.connect(self.on_btn_black_clicked)
self.__btn_black.move(809, 565)
self.__btn_black.resize(41, 41)
self.__btn_gray = QPushButton()
self.__btn_gray.setParent(self)
self.__btn_gray.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_gray.clicked.connect(self.on_btn_gray_clicked)
self.__btn_gray.move(850, 565)
self.__btn_gray.resize(41, 41)
self.__btn_brown = QPushButton()
self.__btn_brown.setParent(self)
self.__btn_brown.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_brown.clicked.connect(self.on_btn_brown_clicked)
self.__btn_brown.move(892, 565)
self.__btn_brown.resize(41, 41)
self.__btn_khaki = QPushButton()
self.__btn_khaki.setParent(self)
self.__btn_khaki.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_khaki.clicked.connect(self.on_btn_khaki_clicked)
self.__btn_khaki.move(932, 565)
self.__btn_khaki.resize(41, 41)
self.__btn_wheat = QPushButton()
self.__btn_wheat.setParent(self)
self.__btn_wheat.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_wheat.clicked.connect(self.on_btn_wheat_clicked)
self.__btn_wheat.move(974, 565)
self.__btn_wheat.resize(41, 41)
self.__btn_yellow = QPushButton()
self.__btn_yellow.setParent(self)
self.__btn_yellow.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_yellow.clicked.connect(self.on_btn_yellow_clicked)
self.__btn_yellow.move(1015, 565)
self.__btn_yellow.resize(41, 41)
self.__btn_pink = QPushButton()
self.__btn_pink.setParent(self)
self.__btn_pink.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_pink.clicked.connect(self.on_btn_pink_clicked)
self.__btn_pink.move(1056, 565)
self.__btn_pink.resize(41, 41)
self.__btn_white = QPushButton()
self.__btn_white.setParent(self)
self.__btn_white.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_white.clicked.connect(self.on_btn_white_clicked)
self.__btn_white.move(809, 606)
self.__btn_white.resize(41, 41)
self.__btn_red = QPushButton()
self.__btn_red.setParent(self)
self.__btn_red.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_red.clicked.connect(self.on_btn_red_clicked)
self.__btn_red.move(850, 606)
self.__btn_red.resize(41, 41)
self.__btn_purple = QPushButton()
self.__btn_purple.setParent(self)
self.__btn_purple.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_purple.clicked.connect(self.on_btn_purple_clicked)
self.__btn_purple.move(892, 606)
self.__btn_purple.resize(41, 41)
self.__btn_sapphire = QPushButton()
self.__btn_sapphire.setParent(self)
self.__btn_sapphire.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_sapphire.clicked.connect(self.on_btn_sapphire_clicked)
self.__btn_sapphire.move(932, 606)
self.__btn_sapphire.resize(41, 41)
self.__btn_blue = QPushButton()
self.__btn_blue.setParent(self)
self.__btn_blue.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_blue.clicked.connect(self.on_btn_blue_clicked)
self.__btn_blue.move(974, 606)
self.__btn_blue.resize(41, 41)
self.__btn_sky = QPushButton()
self.__btn_sky.setParent(self)
self.__btn_sky.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_sky.clicked.connect(self.on_btn_sky_clicked)
self.__btn_sky.move(1015, 606)
self.__btn_sky.resize(41, 41)
self.__btn_green = QPushButton()
self.__btn_green.setParent(self)
self.__btn_green.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}"
"QPushButton:hover{border-image: url(./UI/icons/border.png)}")
self.__btn_green.clicked.connect(self.on_btn_green_clicked)
self.__btn_green.move(1056, 606)
self.__btn_green.resize(41, 41)
self.__btn_left = QPushButton()
self.__btn_left.setParent(self)
self.__btn_left.setStyleSheet("QPushButton{border-image: url(./UI/icons/left.png)}"
"QPushButton:hover{border-image: url(./UI/icons/left_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/left_pressed.png)}")
self.__btn_left.clicked.connect(self.on_btn_left_clicked)
self.__btn_left.move(185, 655)
self.__btn_left.resize(65, 65)
self.__btn_left.setToolTip('上一页')
self.__more_1 = QLabel(self)
self.__more_1.setStyleSheet("QLabel{border-image: url(./UI/numbers/more.png)}")
self.__more_1.move(285, 685)
self.__more_1.resize(25, 4)
self.__more_1.setVisible(False)
self.__btn_one = QPushButton()
self.__btn_one.setParent(self)
self.__btn_one.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}")
self.__btn_one.clicked.connect(self.on_btn_left_clicked)
self.__btn_one.move(334, 678)
self.__btn_one.resize(18, 17)
self.__dot = QLabel(self)
self.__dot.setStyleSheet("QLabel{border-image: url(./UI/numbers/down.png)}")
self.__dot.move(379, 678)
self.__dot.resize(18, 17)
self.__btn_two = QPushButton()
self.__btn_two.setParent(self)
self.__btn_two.setStyleSheet("QPushButton{border-image: url(./UI/numbers/1.png)}")
self.__btn_two.move(379, 678)
self.__btn_two.resize(18, 18)
self.__btn_three = QPushButton()
self.__btn_three.setParent(self)
self.__btn_three.setStyleSheet("QPushButton{border-image: url(./UI/numbers/2.png)}")
self.__btn_three.clicked.connect(self.on_btn_right_clicked)
self.__btn_three.move(424, 678)
self.__btn_three.resize(18, 17)
self.__more_2 = QLabel(self)
self.__more_2.setStyleSheet("QLabel{border-image: url(./UI/numbers/more.png)}")
self.__more_2.move(465, 685)
self.__more_2.resize(25, 4)
self.__more_2.setVisible(True)
self.__btn_right = QPushButton()
self.__btn_right.setParent(self)
self.__btn_right.setStyleSheet("QPushButton{border-image: url(./UI/icons/right.png)}"
"QPushButton:hover{border-image: url(./UI/icons/right_hover.png)}"
"QPushButton:pressed{border-image: url(./UI/icons/right_pressed.png)}")
self.__btn_right.clicked.connect(self.on_btn_right_clicked)
self.__btn_right.move(525, 655)
self.__btn_right.resize(65, 65)
self.__btn_right.setToolTip('下一页')
self.refresh()
def on_btn_left_clicked(self):
if self.__number > 0:
self.__number -= 1
file_list = self.__files[0 + self.__number * 12: 12 + self.__number * 12]
self.load(file_list)
def on_btn_right_clicked(self):
length = len(self.__files)
if (self.__number + 2) * 12 < length:
self.__number += 1
file_list = self.__files[0 + self.__number * 12: 12 + self.__number * 12]
self.load(file_list)
elif (self.__number + 1) * 12 < length:
self.__number += 1
file_list = []
for i in range(length - self.__number * 12):
file_list.append(self.__files[self.__number * 12 + i])
self.load(file_list)
def refresh(self):
self.reset_select()
self.__files = self.__pic_files
self.__number = 0
self.__is_searched = False
random.shuffle(self.__files)
file_list = self.__files[0 + self.__number * 12: 12 + self.__number * 12]
self.load(file_list)
def load(self, file_list):
self.reset_select()
if self.__number == 0:
self.__btn_one.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}")
elif self.__number > 0:
self.__btn_one.setStyleSheet("QPushButton{border-image: url(./UI/numbers/" + str(self.__number) + ".png)}")
self.__btn_two.setStyleSheet("QPushButton{border-image: url(./UI/numbers/" + str(self.__number + 1) + ".png)}")
if self.__number <= 1:
self.__more_1.setVisible(False)
else:
self.__more_1.setVisible(True)
if (self.__number + 2) * 12 >= len(self.__files):
self.__more_2.setVisible(False)
else:
self.__more_2.setVisible(True)
if (self.__number + 1) * 12 < len(self.__files):
self.__btn_three.setStyleSheet("QPushButton"
"{border-image: url(./UI/numbers/" + str(self.__number + 2) + ".png)}")
elif self.__number * 12 < len(self.__files):
self.__btn_three.setStyleSheet("QPushButton{border-image: url(./UI/icons/nothing.png)}")
length = len(file_list)
self.__now_files = file_list
if length >= 12:
self.__pic_12.load("./pictures/" + file_list[11])
self.__label_12.setPixmap(self.__pic_12)
length = 11
else:
self.__pic_12.load("./UI/icons/nothing.png")
self.__label_12.setPixmap(self.__pic_12)
if length == 11:
self.__pic_11.load("./pictures/" + file_list[10])
self.__label_11.setPixmap(self.__pic_11)
length = 10
else:
self.__label_11.setPixmap(self.__pic_12)
if length == 10:
self.__pic_10.load("./pictures/" + file_list[9])
self.__label_10.setPixmap(self.__pic_10)
length = 9
else:
self.__label_10.setPixmap(self.__pic_12)
if length == 9:
self.__pic_9.load("./pictures/" + file_list[8])
self.__label_9.setPixmap(self.__pic_9)
length = 8
else:
self.__label_9.setPixmap(self.__pic_12)
if length == 8:
self.__pic_8.load("./pictures/" + file_list[7])
self.__label_8.setPixmap(self.__pic_8)
length = 7
else:
self.__label_8.setPixmap(self.__pic_12)
if length == 7:
self.__pic_7.load("./pictures/" + file_list[6])
self.__label_7.setPixmap(self.__pic_7)
length = 6
else:
self.__label_7.setPixmap(self.__pic_12)
if length == 6:
self.__pic_6.load("./pictures/" + file_list[5])
self.__label_6.setPixmap(self.__pic_6)
length = 5
else:
self.__label_6.setPixmap(self.__pic_12)
if length == 5:
| |
"""Scalene: a scripting-language aware profiler for Python.
https://github.com/plasma-umass/scalene
See the paper "docs/scalene-paper.pdf" in this repository for technical
details on an earlier version of Scalene's design; note that a
number of these details have changed.
by <NAME>
https://emeryberger.com
usage: scalene test/testme.py
usage help: scalene --help
"""
import argparse
import atexit
import builtins
import dis
import functools
import gc
import inspect
import math
import mmap
import multiprocessing
import queue
import os
import random
import re
import signal
import stat
import sys
import tempfile
import threading
import time
import traceback
if sys.platform != "win32":
from scalene import get_line_atomic
from collections import defaultdict
from functools import lru_cache
from signal import Handlers, Signals
from types import CodeType, FrameType
from typing import (
Any,
Callable,
Dict,
Set,
FrozenSet,
List,
Optional,
Tuple,
Union,
cast,
)
from multiprocessing.process import BaseProcess
from scalene.scalene_arguments import ScaleneArguments
from scalene.scalene_statistics import *
from scalene.scalene_output import ScaleneOutput
from scalene.scalene_preload import ScalenePreload
from scalene.scalene_signals import ScaleneSignals
from scalene.scalene_gpu import ScaleneGPU
from scalene.scalene_parseargs import ScaleneParseArgs, StopJupyterExecution
from scalene.scalene_sigqueue import ScaleneSigQueue
assert (sys.version_info >= (3,7)), "Scalene requires Python version 3.7 or above."
# Scalene fully supports Unix-like operating systems; in
# particular, Linux, Mac OS X, and WSL 2 (Windows Subsystem for Linux 2 = Ubuntu).
# It also has partial support for Windows.
# Install our profile decorator.
def scalene_redirect_profile(func: Any) -> Any:
return Scalene.profile(func)
builtins.profile = scalene_redirect_profile # type: ignore
class Scalene:
"""The Scalene profiler itself."""
# Debugging flag, for internal use only.
__debug: bool = False
# Whether the current profiler is a child
__is_child = -1
# the pid of the primary profiler
__parent_pid = -1
# Support for @profile
# decorated files
__files_to_profile: Dict[Filename, bool] = defaultdict(bool)
# decorated functions
__functions_to_profile: Dict[Filename, Dict[Any, bool]] = defaultdict(lambda: {})
# We use these in is_call_function to determine whether a
# particular bytecode is a function call. We use this to
# distinguish between Python and native code execution when
# running in threads.
__call_opcodes: FrozenSet[int] = frozenset(
{
dis.opmap[op_name]
for op_name in dis.opmap
if op_name.startswith("CALL_FUNCTION")
}
)
# Cache the original thread join function, which we replace with our own version.
__original_thread_join = threading.Thread.join
# As above; we'll cache the original thread and replace it.
__original_lock = threading.Lock
__args = ScaleneArguments()
__stats = ScaleneStatistics()
__output = ScaleneOutput()
__gpu = ScaleneGPU()
__output.gpu = __gpu.has_gpu()
@staticmethod
def get_original_lock() -> threading.Lock:
return Scalene.__original_lock()
# Likely names for the Python interpreter.
__all_python_names = [
os.path.basename(sys.executable),
os.path.basename(sys.executable) + str(sys.version_info.major),
os.path.basename(sys.executable)
+ str(sys.version_info.major)
+ "."
+ str(sys.version_info.minor),
]
# last num seconds between interrupts for CPU sampling.
__last_cpu_sampling_rate: float = 0
# when did we last receive a signal?
__last_signal_time_virtual: float = 0
__last_signal_time_wallclock: float = 0
# path for the program being profiled
__program_path: str = ""
# temporary directory to hold aliases to Python
__python_alias_dir: Filename
## Profile output parameters
# when we output the next profile
__next_output_time: float = float("inf")
# pid for tracking child processes
__pid: int = 0
# Things that need to be in sync with the C++ side
# (see include/sampleheap.hpp, include/samplefile.hpp)
MAX_BUFSIZE = 256 # Must match SampleFile::MAX_BUFSIZE
__malloc_buf = bytearray(MAX_BUFSIZE)
__memcpy_buf = bytearray(MAX_BUFSIZE)
# file to communicate the number of malloc/free samples (+ PID)
__malloc_signal_filename = Filename(f"/tmp/scalene-malloc-signal{os.getpid()}")
__malloc_lock_filename = Filename(f"/tmp/scalene-malloc-lock{os.getpid()}")
__malloc_signal_position = 0
__malloc_lastpos = bytearray(8)
__malloc_signal_mmap = None
try:
__malloc_signal_fd = open(__malloc_signal_filename, "r")
os.unlink(__malloc_signal_fd.name)
__malloc_lock_fd = open(__malloc_lock_filename, "r+")
os.unlink(__malloc_lock_fd.name)
__malloc_signal_mmap = mmap.mmap(
__malloc_signal_fd.fileno(),
0,
mmap.MAP_SHARED,
mmap.PROT_READ,
)
__malloc_lock_mmap = mmap.mmap(
__malloc_lock_fd.fileno(),
0,
mmap.MAP_SHARED,
mmap.PROT_READ | mmap.PROT_WRITE,
)
except BaseException as exc:
# Ignore if we aren't profiling memory.
pass
# file to communicate the number of memcpy samples (+ PID)
__memcpy_signal_filename = Filename(f"/tmp/scalene-memcpy-signal{os.getpid()}")
__memcpy_lock_filename = Filename(f"/tmp/scalene-memcpy-lock{os.getpid()}")
try:
__memcpy_signal_fd = open(__memcpy_signal_filename, "r")
os.unlink(__memcpy_signal_fd.name)
__memcpy_lock_fd = open(__memcpy_lock_filename, "r+")
os.unlink(__memcpy_lock_fd.name)
__memcpy_signal_mmap = mmap.mmap(
__memcpy_signal_fd.fileno(),
0,
mmap.MAP_SHARED,
mmap.PROT_READ,
)
__memcpy_lock_mmap = mmap.mmap(
__memcpy_lock_fd.fileno(),
0,
mmap.MAP_SHARED,
mmap.PROT_READ | mmap.PROT_WRITE,
)
except BaseException:
pass
__memcpy_lastpos = bytearray(8)
# Program-specific information:
# the name of the program being profiled
__program_being_profiled = Filename("")
# Is the thread sleeping? (We use this to properly attribute CPU time.)
__is_thread_sleeping: Dict[int, bool] = defaultdict(bool) # False by default
__child_pids: Set[int] = set()
@classmethod
def clear_metrics(cls) -> None:
"""
Clears the various states so that each forked process
can start with a clean slate
"""
cls.__stats.clear()
cls.__child_pids.clear()
@classmethod
def add_child_pid(cls, pid: int) -> None:
cls.__child_pids.add(pid)
@classmethod
def remove_child_pid(cls, pid: int) -> None:
cls.__child_pids.remove(pid)
# Replacement @profile decorator function.
# We track which functions - in which files - have been decorated,
# and only report stats for those.
@staticmethod
def profile(func: Any) -> Any:
# Record the file and function name
Scalene.__files_to_profile[func.__code__.co_filename] = True
Scalene.__functions_to_profile[func.__code__.co_filename][func] = True
@functools.wraps(func)
def wrapper_profile(*args: Any, **kwargs: Any) -> Any:
value = func(*args, **kwargs)
return value
return wrapper_profile
@staticmethod
def shim(func: Callable[[Any], Any]) -> Any:
"""
Provides a decorator that, when used, calls the wrapped function with the Scalene type
Wrapped function must be of type (s: Scalene) -> Any
This decorator allows for marking a function in a separate file as a drop-in replacement for an existing
library function. The intention is for these functions to replace a function that indefinitely blocks (which
interferes with Scalene) with a function that awakens periodically to allow for signals to be delivered
"""
func(Scalene)
# Returns the function itself to the calling file for the sake
# of not displaying unusual errors if someone attempts to call
# it
@functools.wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
return func(*args, **kwargs) # type: ignore
return wrapped
@staticmethod
def set_thread_sleeping(tid: int) -> None:
Scalene.__is_thread_sleeping[tid] = True
@staticmethod
def reset_thread_sleeping(tid: int) -> None:
Scalene.__is_thread_sleeping[tid] = False
@staticmethod
@lru_cache(maxsize=None)
def is_call_function(code: CodeType, bytei: ByteCodeIndex) -> bool:
"""Returns true iff the bytecode at the given index is a function call."""
for ins in dis.get_instructions(code):
if ins.offset == bytei and ins.opcode in Scalene.__call_opcodes:
return True
return False
timer_signals = True
@staticmethod
def windows_timer_loop() -> None:
"""For Windows, send periodic timer signals; launch as a background thread."""
Scalene.timer_signals = True
pid = os.getpid()
while Scalene.timer_signals:
time.sleep(Scalene.__args.cpu_sampling_rate)
os.kill(pid, ScaleneSignals.cpu_signal)
@staticmethod
def set_timer_signals() -> None:
"""Set up timer signals for CPU profiling."""
if sys.platform == "win32":
return
if Scalene.__args.use_virtual_time:
ScaleneSignals.cpu_timer_signal = signal.ITIMER_VIRTUAL
else:
ScaleneSignals.cpu_timer_signal = signal.ITIMER_REAL
# Now set the appropriate timer signal.
if ScaleneSignals.cpu_timer_signal == signal.ITIMER_REAL:
ScaleneSignals.cpu_signal = signal.SIGALRM
elif ScaleneSignals.cpu_timer_signal == signal.ITIMER_VIRTUAL:
ScaleneSignals.cpu_signal = signal.SIGVTALRM
elif ScaleneSignals.cpu_timer_signal == signal.ITIMER_PROF:
ScaleneSignals.cpu_signal = signal.SIGPROF
# NOT SUPPORTED
assert False, "ITIMER_PROF is not currently supported."
@staticmethod
def start_signal_queues() -> None:
"""Starts the signal processing queues (i.e., their threads)"""
Scalene.__cpu_sigq.start()
Scalene.__alloc_sigq.start()
Scalene.__memcpy_sigq.start()
@staticmethod
def stop_signal_queues() -> None:
"""Stops the signal processing queues (i.e., their threads)"""
Scalene.__cpu_sigq.stop()
Scalene.__alloc_sigq.stop()
Scalene.__memcpy_sigq.stop()
@staticmethod
def malloc_signal_handler(
signum: Union[Callable[[Signals, FrameType], None], int, Handlers, None],
this_frame: FrameType,
) -> None:
Scalene.__alloc_sigq.put((signum, this_frame))
@staticmethod
def free_signal_handler(
signum: Union[Callable[[Signals, FrameType], None], int, Handlers, None],
this_frame: FrameType,
) -> None:
Scalene.__alloc_sigq.put((signum, this_frame))
@staticmethod
def memcpy_signal_handler(
signum: Union[Callable[[Signals, FrameType], None], int, Handlers, None],
this_frame: FrameType,
) -> None:
Scalene.__memcpy_sigq.put((signum, this_frame))
@staticmethod
def enable_signals() -> None:
"""Set up the signal handlers to handle interrupts for profiling and start the
timer interrupts."""
if sys.platform == "win32":
Scalene.timer_signals = True
signal.signal(
ScaleneSignals.cpu_signal,
Scalene.cpu_signal_handler,
)
# On Windows, we simulate timer signals by running a background thread.
Scalene.timer_signals = True
t = threading.Thread(target=Scalene.windows_timer_loop)
t.start()
Scalene.start_signal_queues()
return
Scalene.start_signal_queues()
# Set signal handlers for memory allocation and memcpy events.
signal.signal(ScaleneSignals.malloc_signal, Scalene.malloc_signal_handler)
signal.signal(ScaleneSignals.free_signal, Scalene.free_signal_handler)
signal.signal(
ScaleneSignals.memcpy_signal,
Scalene.memcpy_signal_handler,
)
# Set every signal to restart interrupted system calls.
signal.siginterrupt(ScaleneSignals.cpu_signal, False)
signal.siginterrupt(ScaleneSignals.malloc_signal, False)
signal.siginterrupt(ScaleneSignals.free_signal, False)
signal.siginterrupt(ScaleneSignals.memcpy_signal, False)
# Turn on the CPU profiling timer to run at the sampling rate (exactly once).
signal.signal(
ScaleneSignals.cpu_signal,
Scalene.cpu_signal_handler,
)
signal.setitimer(
ScaleneSignals.cpu_timer_signal,
Scalene.__args.cpu_sampling_rate,
0,
)
def __init__(
self,
arguments: argparse.Namespace,
program_being_profiled: Optional[Filename] = None,
):
import scalene.replacement_pjoin
# Hijack lock, poll, thread_join, fork, and exit.
import scalene.replacement_lock
import scalene.replacement_thread_join
import scalene.replacement_exit
import scalene.replacement_mp_lock
if sys.platform != "win32":
import scalene.replacement_poll_selector
import scalene.replacement_fork
Scalene.__args = cast(ScaleneArguments, arguments)
Scalene.__cpu_sigq = ScaleneSigQueue(Scalene.cpu_sigqueue_processor)
Scalene.__alloc_sigq = ScaleneSigQueue(Scalene.alloc_sigqueue_processor)
Scalene.__memcpy_sigq = ScaleneSigQueue(Scalene.memcpy_sigqueue_processor)
Scalene.set_timer_signals()
if arguments.pid:
# Child process.
# We need to use the same directory as the parent.
# The parent always puts this directory as the first entry in the PATH.
# Extract the alias | |
<reponame>NLNZDigitalPreservation/nlnz-tools-scripts-ingestion<gh_stars>1-10
#!/usr/bin/env python
# SunOS:
#!/usr/bin/python2.7
# fairfax-pre-and-post-process-grouper.py
# Group source files by date and titleCode.
# Determines if the source files have been processed by looking for 'done' file.
# If processing has taken place, then move to post-process location and mimic file structure.
# If no 'done' file, determine if files have already been processed by matching filename
# and md5 hash to post-process location. If no post-process then files go to pre-process location.
# Pre-process output is used by readyForIngestion.
# Requires source_folder, target_pre_process_folder, target_post_process_folder, for_review_folder.
# Uses starting_date, ending_date.
# One or the other: do_pre_processing, do_post_processing
# Optional create_targets, move_files, verbose, test.
import argparse
import datetime
import os
import re
import platform
import subprocess
import sys
import time
move_or_copy_flags = ""
is_sun_os = False
unacceptable_parameters = False
source_folder = ""
target_pre_process_folder = ""
target_post_process_folder = ""
for_review_folder = ""
starting_date = None
ending_date = None
create_targets = False
move_files = False
DATE_PARSE_FORMAT = "%Y%m%d"
DATE_DISPLAY_FORMAT = "%Y-%m-%d"
DATE_TIME_DISPLAY_FORMAT = "%Y-%m-%d %H:%M:%S"
FILENAME_UNSAFE_CHARACTERS = " *$"
REPLACEMENT_FILENAME_SAFE_CHARACTER = "-"
FILE_PATH_SEPARATORS = "/\\"
REPLACEMENT_FILE_PATH_SEPARATOR = "_"
PDF_FILENAME_REGEX_PATTERN = "(.*)(\\.pdf)"
PDF_FILENAME_REGEX = re.compile(PDF_FILENAME_REGEX_PATTERN)
PDF_COMPONENT_REGEX_PATTERN = "(.*?)\\/(\\w{5,7})-([0-9]{8})(-\\w{3,4}.*?\\.[pP]{1}[dD]{1}[fF]{1})"
PDF_COMPONENT_REGEX = re.compile(PDF_COMPONENT_REGEX_PATTERN)
FAIRFAX_PDF_FILE_REGEX_PATTERN = "(?P<titleCode>[a-zA-Z0-9]{3,4})(?P<editionCode>[a-zA-Z0-9]{2,3})-(?P<date>\\d{8})-" +\
"(?P<qualifier>.*?)(?P<extension>\\.[pP]{1}[dD]{1}[fF]{1})"
FAIRFAX_PDF_FILE_REGEX = re.compile(FAIRFAX_PDF_FILE_REGEX_PATTERN)
FAIRFAX_PDF_FILE_FULL_REGEX_PATTERN = "(?<titleCode>[a-zA-Z0-9]{3,4})(?<editionCode>[a-zA-Z0-9]{2,3})" +\
"-(?<date>\\d{8})-(?<sequenceLetter>[A-Za-z]{0,2})(?<sequenceNumber>\\d{1,4})" +\
"(?<qualifier>.*?)\\.[pP]{1}[dD]{1}[fF]{1}"
EXISTS_IN_POST_PROCESSING_BUT_NOT_THE_SAME_FILE_FOLDER_NAME = "EXISTS-IN-POST-PROCESSING-BUT-NOT-THE-SAME-FILE"
print("Python version: " + platform.python_version() + ", complete: " + str(sys.version_info))
class FairfaxFile:
is_fairfax_pdf_file = False
is_done_file = False
is_mets_xml_file = False
is_other_file = False
def __init__(self, file_path):
self.file_name = os.path.basename(file_path)
self.dirname = os.path.dirname(file_path)
self.full_path = file_path
match = FAIRFAX_PDF_FILE_REGEX.search(self.file_name)
if match is None:
if self.file_name == "done":
self.is_done_file = True
elif self.file_name == "mets.xml":
self.is_mets_xml_file = True
else:
self.is_other_file = True
else:
self.is_fairfax_pdf_file = True
self.title_code = match.group("titleCode")
self.edition_code = match.group("editionCode")
self.file_date_string = match.group("date")
self.file_date = convert_string_to_date(self.file_date_string)
self.qualifier = match.group("qualifier")
self.extension = match.group("extension")
if len(self.title_code) == 4 and len(self.edition_code) == 2:
self.edition_code = self.title_code[3:4] + self.edition_code
self.title_code = self.title_code[0:3]
def __lt__(self, other):
if isinstance(other, FairfaxFile):
return self.full_path < other.full_path
else:
return self.full_path < other
def __hash__(self):
return hash(self.full_path)
def show_values(self):
print("FairfaxFile, file_name=" + self.file_name)
print(" dirname=" + self.dirname)
print(" is_fairfax_pdf_file=" + str(self.is_fairfax_pdf_file))
print(" is_done_file=" + str(self.is_done_file))
print(" is_mets_xml_file=" + str(self.is_mets_xml_file))
print(" is_other_file=" + str(self.is_other_file))
if self.is_fairfax_pdf_file:
print(" title_code=" + self.title_code)
print(" edition_code=" + self.edition_code)
print(" file_date=" + self.file_date.strftime(DATE_DISPLAY_FORMAT) +
", file_date_string=" + self.file_date_string)
print(" qualifier=" + self.qualifier)
print(" extension=" + self.extension)
class FileComparison:
def __init__(self, source_file, target_file, is_target_a_file, are_files_the_same):
self.source_file = source_file
self.target_file = target_file
self.is_target_a_file = is_target_a_file
self.are_files_the_same = are_files_the_same
def convert_string_to_date(date_string):
try:
return datetime.datetime.strptime(date_string, DATE_PARSE_FORMAT).date()
except ValueError:
raise argparse.ArgumentTypeError(date_string + " is not a proper date string in the format 'yyyyMMdd'")
def parse_parameters():
parser = argparse.ArgumentParser(description="Process pre-and-post processed Fairfax files by grouping them by " +
"date and titleCode in appropriate pre-process and " +
"post-process folders.")
parser.add_argument('--source_folder', type=str, required=True,
help='The source-folder for the files for processing')
parser.add_argument('--target_pre_process_folder', type=str, required=True,
help='The target folder for pre-processed files')
parser.add_argument('--target_post_process_folder', type=str, required=True,
help='The target folder for post-processed files')
parser.add_argument('--for_review_folder', type=str, required=True,
help='The target folder for unrecognized files')
parser.add_argument('--starting_date', type=convert_string_to_date, default=datetime.date(2014, 1, 1),
help='The starting-date, format is yyyyMMdd')
parser.add_argument('--ending_date', type=convert_string_to_date, default=datetime.date(2019, 6, 30),
help='The ending date, format is yyyyMMdd')
parser.add_argument('--do_pre_processing', dest='do_pre_processing', action='store_true',
help="Do pre-processing. The source folder is unprocessed files " +
"(they will be checked against processed)")
parser.add_argument('--do_post_processing', dest='do_post_processing', action='store_true',
help="Do post-processing. The source folder contains processed files with a" +
"'done' file for each group")
parser.add_argument('--do_list_unique_files', dest='do_list_unique_files', action='store_true',
help='List all files with unique filenames. The source folder is unprocessed files')
parser.add_argument('--create_targets', dest='create_targets', action='store_true',
help='Indicates that the target folders will be created if they do not already exist')
parser.add_argument('--pre_process_include_non_pdf_files', dest='pre_process_include_non_pdf_files',
action='store_true',
help='Indicates that non-pdf files will be processed. By default only PDF files are processed.')
parser.add_argument('--move_files', dest='move_files', action='store_true',
help='Indicates that files will be moved to the target folder instead of copied')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Indicates that operations will be done in a verbose manner')
parser.add_argument('--test', dest='test', action='store_true',
help='Indicates that only tests will be run')
parser.set_defaults(do_pre_processing=False, do_post_processing=False, do_list_unique_files=False,
create_targets=False, pre_process_include_non_pdf_files=False, move_files=False,
verbose=False, test=False)
args = parser.parse_args()
return args
def determine_if_sun_os():
global is_sun_os
output = subprocess.check_output(["uname"])
is_sun_os = "sunos" in str(output).lower()
print("is_sun_os=" + str(is_sun_os))
def is_directory(directory_path):
return os.path.exists(directory_path) and not os.path.isfile(directory_path)
def is_file(file_path):
return os.path.exists(file_path) and os.path.isfile(file_path)
def is_file_or_directory(file_path):
return os.path.exists(file_path)
def make_directory_path(directory_path):
if not is_directory(directory_path):
os.makedirs(directory_path)
def display_parameter_values():
print("")
print("Parameters as set:")
print(" source_folder=" + source_folder)
print(" target_pre_process_folder=" + target_pre_process_folder)
print(" target_post_process_folder=" + target_post_process_folder)
print(" for_review_folder=" + for_review_folder)
print(" starting_date=" + starting_date.strftime(DATE_DISPLAY_FORMAT))
print(" ending_date=" + ending_date.strftime(DATE_DISPLAY_FORMAT))
print(" do_pre_processing=" + str(do_pre_processing))
print(" do_post_processing=" + str(do_post_processing))
print(" do_list_unique_files=" + str(do_list_unique_files))
print(" create_targets=" + str(create_targets))
print(" pre_process_include_non_pdf_files=" + str(pre_process_include_non_pdf_files))
print(" move_files=" + str(move_files))
print(" verbose=" + str(verbose))
print(" test=" + str(test))
print("")
def display_processing_legend():
print("")
print("Processing legend:")
print(" . -- indicates a file has been processed (either moved or copied)")
print(" : -- indicates a folder has been processed (either moved or copied)")
print(" + -- indicates a duplicate pre-process file has been detected and is exactly the same as")
print(" the target file. If --move_files has been specified the source file is deleted.")
print(" # -- indicates a duplicate folder has been detected and will be copied or moved with the name of the")
print(" folder with a '-<number>' appended to it.")
print(" * -- indicates that a pre-process file already exists (and is the same) in the post-processing")
print(" target directory. In this case, the file is either not processed (if a copy) or deleted in the")
print(" source folder (if --move_files).")
print(" ? -- indicates that a pre-process file already exists (and is NOT the same) in the post-processing")
print(" target directory. In this case, the file is either copied or moved to the for_review_folder")
print(" - -- indicates that a source file has been deleted. This can happen when:")
print(" - When pre-processing and the file already exists and --move_files is specified.")
print(" = -- indicates that a source folder has been deleted. This can happen when:")
print(" - When post-processing and --move_files, the parent folder of the 'done' file deleted.")
print("")
def process_parameters(parsed_arguments):
global source_folder
source_folder = parsed_arguments.source_folder
global target_pre_process_folder
target_pre_process_folder = parsed_arguments.target_pre_process_folder
global target_post_process_folder
target_post_process_folder = parsed_arguments.target_post_process_folder
global for_review_folder
for_review_folder = parsed_arguments.for_review_folder
global starting_date
starting_date = parsed_arguments.starting_date
global ending_date
ending_date = parsed_arguments.ending_date
global do_pre_processing
do_pre_processing = parsed_arguments.do_pre_processing
global do_post_processing
do_post_processing = parsed_arguments.do_post_processing
global do_list_unique_files
do_list_unique_files = parsed_arguments.do_list_unique_files
global create_targets
create_targets = parsed_arguments.create_targets
global pre_process_include_non_pdf_files
pre_process_include_non_pdf_files = parsed_arguments.pre_process_include_non_pdf_files
global move_files
move_files = parsed_arguments.move_files
global verbose
verbose = parsed_arguments.verbose
global test
test = parsed_arguments.test
global move_or_copy_flags
global unacceptable_parameters
display_parameter_values()
if starting_date > ending_date:
print("")
print(" ERROR starting_date=" + starting_date.strftime(DATE_DISPLAY_FORMAT) +
" must be BEFORE ending_date=" + ending_date.strftime(DATE_DISPLAY_FORMAT))
unacceptable_parameters = True
print("")
if verbose and not is_sun_os:
move_or_copy_flags = "-v"
else:
move_or_copy_flags = ""
if is_directory(source_folder):
print(" source_folder=" + source_folder + " exists and is directory, processing can take place.")
else:
print(" ERROR source_folder=" + source_folder + " does not exist or is not a directory. It must exist!")
unacceptable_parameters = True
print("")
if is_directory(target_pre_process_folder):
print(" target_pre_process_folder=" + target_pre_process_folder + " exists and is directory.")
else:
if create_targets:
print(" Creating " + target_pre_process_folder)
make_directory_path(target_pre_process_folder)
else:
print(" ERROR createTargets=" + create_targets + ", therefore target_pre_process_folder=" +
target_pre_process_folder + " must exist!")
unacceptable_parameters = True
print("")
if is_directory(target_post_process_folder):
print(" target_post_process_folder=" + target_post_process_folder + " exists and is directory.")
else:
if create_targets:
print(" Creating " + target_post_process_folder)
make_directory_path(target_post_process_folder)
else:
print(" ERROR createTargets=" + create_targets + ", therefore target_post_process_folder=" +
target_post_process_folder + " must exist!")
unacceptable_parameters = True
print("")
if is_directory(for_review_folder):
print(" for_review_folder=" + for_review_folder + " exists and is directory.")
else:
if create_targets:
print(" Creating " + for_review_folder)
make_directory_path(for_review_folder)
else:
print(" ERROR createTargets=" + create_targets + ", therefore for_review_folder=" +
for_review_folder + " must exist!")
unacceptable_parameters = True
print("")
do_command_count = 0
if do_pre_processing:
do_command_count += 1
if do_post_processing:
do_command_count += 1
if do_list_unique_files:
do_command_count += 1
if not do_command_count == 1:
print(" Only ONE of do_pre_processing=" + str(do_pre_processing) + " AND do_post_processing=" +
str(do_post_processing) + " AND do_list_unique_files=" + str(do_list_unique_files) + " MUST be set.")
unacceptable_parameters = True
if unacceptable_parameters:
print("")
print("Parameters are incomplete or incorrect. Please try again.")
print("")
def timestamp_message(message_string):
current_time = datetime.datetime.now()
print(current_time.strftime(DATE_TIME_DISPLAY_FORMAT) + ": " + message_string)
sys.stdout.flush()
def convert_to_filename(file_path_string):
safe_filename = file_path_string
if safe_filename.startswith("/"):
safe_filename = safe_filename[1:]
for file_path_separator_character in FILE_PATH_SEPARATORS:
safe_filename = safe_filename.replace(file_path_separator_character, REPLACEMENT_FILE_PATH_SEPARATOR)
for unsafe_character in FILENAME_UNSAFE_CHARACTERS:
safe_filename = safe_filename.replace(unsafe_character, REPLACEMENT_FILENAME_SAFE_CHARACTER)
| |
'19',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 20 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '20',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# setting -symbolicPathName
symbolicPathNameMv = ixNet.getAttribute(pccInit2, '-symbolicPathName')
ixNet.add(symbolicPathNameMv, 'string')
ixNet.setMultiAttribute(symbolicPathNameMv + '/string',
'-pattern', 'IXIA LSP {Inc:1,1}')
ixNet.commit()
# setting -includeAssociation
includeAssociationMv = ixNet.getAttribute(pccInit2, '-includeAssociation')
ixNet.add(includeAssociationMv, 'singleValue')
ixNet.setMultiAttribute(includeAssociationMv + '/singleValue',
'-value', 'true')
# Adding overlay 1 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 2 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '7',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 3 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '8',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 4 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '9',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 5 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '10',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 6 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '16',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 7 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '17',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 8 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '18',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 9 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '19',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 10 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '20',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# setting -standbyMode
standbyModeMv = ixNet.getAttribute(pccInit2, '-standbyMode')
ixNet.add(standbyModeMv, 'alternate')
# Adding overlay 1 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 2 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 3 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 4 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# setting -protectionLsp
protectionLspMv = ixNet.getAttribute(pccInit2, '-protectionLsp')
ixNet.add(protectionLspMv, 'singleValue')
ixNet.setMultiAttribute(protectionLspMv + '/singleValue',
'-value', 'false')
# Adding overlay 1 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 2 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 3 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 4 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 5 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 6 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 7 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 8 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 9 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 10 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# setting -associationId
associationIdMv = ixNet.getAttribute(pccInit2, '-associationId')
ixNet.add(associationIdMv, 'singleValue')
ixNet.setMultiAttribute(associationIdMv + '/singleValue',
'-value', '1')
# Adding overlay 1 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', '11',
'-value', '11')
ixNet.commit()
# Adding overlay 2 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', '11',
'-value', '11')
ixNet.commit()
# Adding overlay 3 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', '12',
'-value', '12')
ixNet.commit()
# Adding overlay 4 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', '12',
'-value', '12')
ixNet.commit()
# Adding overlay 5 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', '13',
'-value', '13')
ixNet.commit()
# Adding overlay 6 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', '13',
'-value', '13')
ixNet.commit()
# Adding overlay 7 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', '111',
'-value', '111')
ixNet.commit()
# Adding overlay 8 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', '111',
'-value', '111')
ixNet.commit()
# Adding overlay 9 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', '112',
'-value', '112')
ixNet.commit()
# Adding overlay 10 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', '112',
'-value', '112')
ixNet.commit()
# Adding overlay 11 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', '113',
'-value', '113')
ixNet.commit()
# Adding overlay 12 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '16',
'-indexStep', '0',
'-valueStep', '113',
'-value', '113')
ixNet.commit()
pccInit2 = pccGroup2+'/pceInitiateLspParameters:10'
# setting -numberOfEroSubObjects
ixNet.setAttribute(pccInit2, '-numberOfEroSubObjects', '1')
ixNet.commit()
# setting -srcEndPointIpv4
srcEndPointIpv4Mv = ixNet.getAttribute(pccInit2, '-srcEndPointIpv4')
ixNet.add(srcEndPointIpv4Mv, 'singleValue')
ixNet.setMultiAttribute(srcEndPointIpv4Mv + '/singleValue',
'-value', '0.0.0.0')
# Adding overlay 1 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', '1.0.0.11',
'-value', '1.0.0.11')
ixNet.commit()
# Adding overlay 2 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', '1.0.0.11',
'-value', '1.0.0.11')
ixNet.commit()
# Adding overlay 3 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', '1.0.0.12',
'-value', '1.0.0.12')
ixNet.commit()
# Adding overlay 4 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', '1.0.0.12',
'-value', '1.0.0.12')
ixNet.commit()
# Adding overlay 5 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', '1.0.0.13',
'-value', '1.0.0.13')
ixNet.commit()
# Adding overlay 6 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', '1.0.0.13',
'-value', '1.0.0.13')
ixNet.commit()
# Adding overlay 7 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '7',
'-indexStep', '0',
'-valueStep', '1.0.0.14',
'-value', '1.0.0.14')
ixNet.commit()
# Adding overlay 8 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '8',
'-indexStep', '0',
'-valueStep', '1.0.0.14',
'-value', '1.0.0.14')
ixNet.commit()
# Adding overlay 9 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '9',
'-indexStep', '0',
'-valueStep', '1.0.0.15',
'-value', '1.0.0.15')
ixNet.commit()
# Adding overlay 10 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '10',
'-indexStep', '0',
'-valueStep', '1.0.0.15',
'-value', '1.0.0.15')
ixNet.commit()
# Adding overlay 11 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 12 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 13 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay 14 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay 15 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', '172.16.17.32',
'-value', '172.16.17.32')
ixNet.commit()
# Adding overlay 16 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '16',
'-indexStep', '0',
'-valueStep', '172.16.17.32',
'-value', '172.16.17.32')
ixNet.commit()
# Adding overlay 17 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '17',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 18 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '18',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 19 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '19',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 20 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '20',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# setting -destEndPointIpv4
destEndPointIpv4Mv = ixNet.getAttribute(pccInit2, '-destEndPointIpv4')
ixNet.add(destEndPointIpv4Mv, 'singleValue')
ixNet.setMultiAttribute(destEndPointIpv4Mv + '/singleValue',
'-value', '0.0.0.0')
# Adding overlay 1 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', '2.0.0.11',
'-value', '2.0.0.11')
ixNet.commit()
# Adding overlay 2 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', '2.0.0.11',
'-value', '2.0.0.11')
ixNet.commit()
# Adding overlay 3 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', '2.0.0.12',
'-value', '2.0.0.12')
ixNet.commit()
# Adding overlay 4 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', '2.0.0.12',
'-value', '2.0.0.12')
ixNet.commit()
# Adding overlay 5 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', '2.0.0.13',
'-value', '2.0.0.13')
ixNet.commit()
# Adding overlay 6 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', '2.0.0.13',
'-value', '2.0.0.13')
ixNet.commit()
# Adding overlay | |
<filename>src/baseline/exnn/exnn/base.py
import os
import numpy as np
from itertools import *
import tensorflow as tf
from matplotlib import gridspec
from matplotlib import pyplot as plt
from matplotlib import lines as mlines
from abc import ABCMeta, abstractmethod
from sklearn.model_selection import train_test_split
from .layers import ProjectLayer, SubnetworkBlock, OutputLayer, CategNetBlock
class BaseNet(tf.keras.Model, metaclass=ABCMeta):
"""
Abstract Class.
"""
@abstractmethod
def __init__(self, meta_info,
subnet_num=10,
subnet_arch=[10, 6],
task_type="Regression",
proj_method="orthogonal",
activation_func=tf.tanh,
bn_flag=True,
lr_bp=0.001,
l1_proj=0.001,
l1_subnet=0.001,
l2_smooth=0.00001,
batch_size=1000,
training_epochs=2000,
tuning_epochs=500,
beta_threshold=0.05,
verbose=False,
val_ratio=0.2,
early_stop_thres=1000,
random_state=0):
super(BaseNet, self).__init__()
# Parameter initiation
self.subnet_num = subnet_num
self.subnet_arch = subnet_arch
self.task_type = task_type
self.proj_method = proj_method
self.activation_func = activation_func
self.bn_flag = bn_flag
self.lr_bp = lr_bp
self.l1_proj = l1_proj
self.l1_subnet = l1_subnet
self.l2_smooth = l2_smooth
self.batch_size = batch_size
self.beta_threshold = beta_threshold
self.tuning_epochs = tuning_epochs
self.training_epochs = training_epochs
self.verbose = verbose
self.val_ratio = val_ratio
self.early_stop_thres = early_stop_thres
self.random_state = random_state
np.random.seed(random_state)
tf.random.set_seed(random_state)
self.dummy_values_ = {}
self.cfeature_num_ = 0
self.nfeature_num_ = 0
self.cfeature_list_ = []
self.nfeature_list_ = []
self.cfeature_index_list_ = []
self.nfeature_index_list_ = []
self.feature_list_ = []
self.feature_type_list_ = []
for idx, (feature_name, feature_info) in enumerate(meta_info.items()):
if feature_info["type"] == "target":
continue
if feature_info["type"] == "categorical":
self.cfeature_num_ += 1
self.cfeature_list_.append(feature_name)
self.cfeature_index_list_.append(idx)
self.feature_type_list_.append("categorical")
self.dummy_values_.update({feature_name:meta_info[feature_name]["values"]})
else:
self.nfeature_num_ += 1
self.nfeature_list_.append(feature_name)
self.nfeature_index_list_.append(idx)
self.feature_type_list_.append("continuous")
self.feature_list_.append(feature_name)
# build
self.subnet_num = min(self.subnet_num, self.nfeature_num_)
self.proj_layer = ProjectLayer(index_list=self.nfeature_index_list_,
subnet_num=self.subnet_num,
l1_proj=self.l1_proj,
method=self.proj_method)
self.categ_blocks = CategNetBlock(feature_list=self.feature_list_,
cfeature_index_list=self.cfeature_index_list_,
dummy_values=self.dummy_values_,
bn_flag=self.bn_flag)
self.subnet_blocks = SubnetworkBlock(subnet_num=self.subnet_num,
subnet_arch=self.subnet_arch,
activation_func=self.activation_func,
l2_smooth=self.l2_smooth,
bn_flag=self.bn_flag)
self.output_layer = OutputLayer(subnet_num=self.subnet_num + self.cfeature_num_, l1_subnet=self.l1_subnet)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr_bp)
if self.task_type == "Regression":
self.loss_fn = tf.keras.losses.MeanSquaredError()
elif self.task_type == "Classification":
self.loss_fn = tf.keras.losses.BinaryCrossentropy()
else:
raise ValueError('The task type is not supported')
def call(self, inputs, training=False):
self.proj_outputs = self.proj_layer(inputs, training=training)
self.categ_outputs = self.categ_blocks(inputs, training=training)
self.subnet_outputs = self.subnet_blocks(self.proj_outputs, training=training)
concat_list = []
if self.nfeature_num_ > 0:
concat_list.append(self.subnet_outputs)
if self.cfeature_num_ > 0:
concat_list.append(self.categ_outputs)
if self.task_type == "Regression":
output = self.output_layer(tf.concat(concat_list, 1))
elif self.task_type == "Classification":
output = tf.nn.sigmoid(self.output_layer(tf.concat(concat_list, 1)))
else:
raise ValueError('The task type is not supported')
return output
@tf.function
def predict_graph(self, x):
return self.__call__(tf.cast(x, tf.float32), training=False)
def predict(self, x):
return self.predict_graph(x).numpy()
@tf.function
def evaluate_graph(self, x, y, training=False):
return self.loss_fn(y, self.__call__(tf.cast(x, tf.float32), training=training))
def evaluate(self, x, y, training=False):
return self.evaluate_graph(x, y, training=training).numpy()
@tf.function
def train_step_init(self, inputs, labels):
pass
@tf.function
def train_step_finetune(self, inputs, labels):
pass
@property
def projection_indices_(self):
"""Return the projection indices.
Returns
-------
projection_indices_ : ndarray of shape (d, )
"""
projection_indices = np.array([])
if self.nfeature_num_ > 0:
active_sim_subnets = [item["indice"] for key, item in self.active_subnets_.items()]
projection_indices = self.proj_layer.proj_weights.numpy()[:, active_sim_subnets]
return projection_indices
@property
def orthogonality_measure_(self):
"""Return the orthogonality measure (the lower, the better).
Returns
-------
orthogonality_measure_ : float scalar
"""
ortho_measure = np.nan
if self.nfeature_num_ > 0:
ortho_measure = np.linalg.norm(np.dot(self.projection_indices_.T,
self.projection_indices_) - np.eye(self.projection_indices_.shape[1]))
if self.projection_indices_.shape[1] > 1:
ortho_measure /= self.projection_indices_.shape[1]
return ortho_measure
@property
def importance_ratios_(self):
"""Return the estimator importance ratios (the higher, the more important the feature).
Returns
-------
importance_ratios_ : ndarray of shape (n_estimators,)
The estimator importances.
"""
importance_ratios_ = {**self.active_subnets_, **self.active_dummy_subnets_}
return importance_ratios_
@property
def active_subnets_(self):
"""
Return the information of sim subnetworks
"""
if self.bn_flag:
beta = self.output_layer.output_weights.numpy()
else:
subnet_norm = [self.subnet_blocks.subnets[i].moving_norm.numpy()[0] for i in range(self.subnet_num)]
categ_norm = [self.categ_blocks.categnets[i].moving_norm.numpy()[0]for i in range(self.cfeature_num_)]
beta = self.output_layer.output_weights.numpy() * np.hstack([subnet_norm, categ_norm]).reshape([-1, 1])
beta = beta * self.output_layer.output_switcher.numpy()
importance_ratio = (np.abs(beta) / np.sum(np.abs(beta))).reshape([-1])
sorted_index = np.argsort(importance_ratio)
active_index = sorted_index[importance_ratio[sorted_index].cumsum() > 0][::-1]
active_subnets = {"Subnet " + str(indice + 1):{"type":"sim_net",
"indice":indice,
"rank":idx,
"beta":self.output_layer.output_weights.numpy()[indice],
"ir":importance_ratio[indice]}
for idx, indice in enumerate(active_index) if indice in range(self.subnet_num)}
return active_subnets
@property
def active_dummy_subnets_(self):
"""
Return the information of active categorical features
"""
if self.bn_flag:
beta = self.output_layer.output_weights.numpy()
else:
subnet_norm = [self.subnet_blocks.subnets[i].moving_norm.numpy()[0] for i in range(self.subnet_num)]
categ_norm = [self.categ_blocks.categnets[i].moving_norm.numpy()[0]for i in range(self.cfeature_num_)]
beta = self.output_layer.output_weights.numpy() * np.hstack([subnet_norm, categ_norm]).reshape([-1, 1])
beta = beta * self.output_layer.output_switcher.numpy()
importance_ratio = (np.abs(beta) / np.sum(np.abs(beta))).reshape([-1])
sorted_index = np.argsort(importance_ratio)
active_index = sorted_index[importance_ratio[sorted_index].cumsum() > 0][::-1]
active_dummy_subnets = {self.cfeature_list_[indice - self.subnet_num]:{"type":"dummy_net",
"indice":indice,
"rank":idx,
"beta":self.output_layer.output_weights.numpy()[indice],
"ir":importance_ratio[indice]}
for idx, indice in enumerate(active_index) if indice in range(self.subnet_num, self.subnet_num + self.cfeature_num_)}
return active_dummy_subnets
def estimate_density(self, x):
density, bins = np.histogram(x, bins=10, density=True)
return density, bins
def get_active_subnets(self, beta_threshold=0):
if self.bn_flag:
beta = self.output_layer.output_weights.numpy()
else:
subnet_norm = [self.subnet_blocks.subnets[i].moving_norm.numpy()[0] for i in range(self.subnet_num)]
categ_norm = [self.categ_blocks.categnets[i].moving_norm.numpy()[0]for i in range(self.cfeature_num_)]
beta = self.output_layer.output_weights.numpy() * np.hstack([subnet_norm, categ_norm]).reshape([-1, 1])
beta = beta * self.output_layer.output_switcher.numpy()
subnets_scale = (np.abs(beta) / np.sum(np.abs(beta))).reshape([-1])
sorted_index = np.argsort(subnets_scale)
active_index = sorted_index[subnets_scale[sorted_index].cumsum()>beta_threshold][::-1]
active_me_index = []
active_categ_index = []
for i in active_index:
if i in range(self.subnet_num):
active_me_index.append(i)
elif i in range(self.subnet_num, self.subnet_num + self.cfeature_num_):
active_categ_index.append(i)
return active_me_index, active_categ_index, beta, subnets_scale
def fit(self, train_x, train_y):
self.err_val = []
self.err_train = []
n_samples = train_x.shape[0]
indices = np.arange(n_samples)
if self.task_type == "Regression":
tr_x, val_x, tr_y, val_y, tr_idx, val_idx = train_test_split(train_x, train_y, indices, test_size=self.val_ratio,
random_state=self.random_state)
elif self.task_type == "Classification":
tr_x, val_x, tr_y, val_y, tr_idx, val_idx = train_test_split(train_x, train_y, indices, test_size=self.val_ratio,
stratify=train_y, random_state=self.random_state)
self.tr_idx = tr_idx
self.val_idx = val_idx
# 1. Training
if self.verbose:
print("Initial training.")
last_improvement = 0
best_validation = np.inf
train_size = tr_x.shape[0]
for epoch in range(self.training_epochs):
shuffle_index = np.arange(tr_x.shape[0])
np.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_step_init(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train.append(self.evaluate(tr_x, tr_y, training=False))
self.err_val.append(self.evaluate(val_x, val_y, training=False))
if self.verbose & (epoch % 1 == 0):
print("Training epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train[-1], self.err_val[-1]))
if self.err_val[-1] < best_validation:
best_validation = self.err_val[-1]
last_improvement = epoch
if epoch - last_improvement > self.early_stop_thres:
if self.verbose:
print("Early stop at epoch %d, With Testing Error: %0.5f" % (epoch + 1, self.err_val[-1]))
break
# 2. pruning
if self.verbose:
print("Subnetwork pruning.")
self.evaluate(tr_x, tr_y, training=True) # update the batch normalization using all the training data
active_me_index, active_categ_index, _, _ = self.get_active_subnets(self.beta_threshold)
scal_factor = np.zeros((self.subnet_num + self.cfeature_num_, 1))
scal_factor[active_me_index] = 1
scal_factor[active_categ_index] = 1
self.output_layer.output_switcher.assign(tf.constant(scal_factor, dtype=tf.float32))
# 3. fine tune
if self.verbose:
print("Fine tuning.")
last_improvement = 0
best_validation = np.inf
for epoch in range(self.tuning_epochs):
shuffle_index = np.arange(tr_x.shape[0])
np.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_step_finetune(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train.append(self.evaluate(tr_x, tr_y, training=False))
self.err_val.append(self.evaluate(val_x, val_y, training=False))
if self.verbose & (epoch % 1 == 0):
print("Tuning epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train[-1], self.err_val[-1]))
if self.err_val[-1] < best_validation:
best_validation = self.err_val[-1]
last_improvement = epoch
if epoch - last_improvement > self.early_stop_thres:
if self.verbose:
print("Early stop at epoch %d, With Testing Error: %0.5f" % (epoch + 1, self.err_val[-1]))
break
# record the key values in the network
self.subnet_input_min = []
self.subnet_input_max = []
self.dummy_density_ = {}
self.subnet_input_density = []
self.evaluate(tr_x, tr_y, training=True) # update the batch normalization using all the training data
for i in range(self.subnet_num):
xb = np.dot(tr_x[:,self.nfeature_index_list_], self.proj_layer.get_weights()[0])[:, i]
min_ = xb.min()
max_ = xb.max()
self.subnet_input_min.append(min_)
self.subnet_input_max.append(max_)
self.subnet_input_density.append(self.estimate_density(xb))
for idx in range(self.cfeature_num_):
feature_name = self.cfeature_list_[idx]
feature_indice = self.cfeature_index_list_[idx]
unique, counts = np.unique(tr_x[:, feature_indice], return_counts=True)
density = np.zeros((len(self.dummy_values_[feature_name])))
density[unique.astype(int)] = counts / tr_x.shape[0]
self.dummy_density_.update({feature_name:{"density":{"values":self.dummy_values_[feature_name],
"scores":density}}})
def visualize(self, folder="./results/", name="demo", save_png=False, save_eps=False):
input_size = self.nfeature_num_
coef_index = self.proj_layer.proj_weights.numpy()
active_index, active_categ_index, beta, subnets_scale = self.get_active_subnets()
max_ids = len(active_index) + len(active_categ_index)
fig = plt.figure(figsize=(12, int(max_ids * 4.5)))
for i, indice in enumerate(active_index):
subnet = self.subnet_blocks.subnets[indice]
min_ = self.subnet_input_min[indice]
max_ = self.subnet_input_max[indice]
subnets_inputs = np.linspace(min_, max_, 1000).reshape([-1, 1])
subnets_outputs = np.sign(beta[indice]) * subnet.__call__(tf.cast(tf.constant(subnets_inputs), tf.float32)).numpy()
if coef_index[np.argmax(np.abs(coef_index[:, indice])), indice] < 0:
coef_index[:, indice] = - coef_index[:, indice]
subnets_inputs = - subnets_inputs
ax1 = fig.add_subplot(np.int(max_ids), 2, i * 2 + 1)
ax1.plot(subnets_inputs, subnets_outputs)
xint = np.round(np.linspace(np.min(subnets_inputs), np.max(subnets_inputs), 5), 2)
ax1.set_xticks(xint)
ax1.set_xticklabels(["{0: .2f}".format(j) for j in xint], fontsize=14)
yint = np.round(np.linspace(np.min(subnets_outputs), np.max(subnets_outputs), 6), 2)
ax1.set_yticks(yint)
ax1.set_yticklabels(["{0: .2f}".format(j) for j in yint], fontsize=14)
ax1.set_ylim([np.min(subnets_outputs) - (np.max(subnets_outputs) - np.min(subnets_outputs))*0.1,
np.max(subnets_outputs) + (np.max(subnets_outputs) - np.min(subnets_outputs))*0.25])
ax1.text(0.25, 0.9,'IR: ' + str(np.round(100 * subnets_scale[indice], 1)) + "%",
fontsize=24, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
ax2 = fig.add_subplot(np.int(max_ids), 2, i * 2 + 2)
ax2.bar(np.arange(input_size), coef_index.T[indice, :input_size])
ax2.set_xticks(np.arange(input_size))
ax2.set_xticklabels(["X" + str(j + 1) for j in range(input_size)])
yint = np.round(np.linspace(
| |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.114157,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.9249,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0449842,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.238021,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.240951,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.106175,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.171256,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0864445,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.363876,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.084492,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.41377,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0455207,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00445346,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0491258,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0329361,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0946465,
'Execution Unit/Register Files/Runtime Dynamic': 0.0373895,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.114743,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.284427,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.32909,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000346191,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000346191,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000317628,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000131762,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000473129,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00148314,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00274417,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0316623,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.01399,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.074327,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.107539,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.33025,
'Instruction Fetch Unit/Runtime Dynamic': 0.217756,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0429333,
'L2/Runtime Dynamic': 0.0040241,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.45577,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.590351,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0394261,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.039426,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.64194,
'Load Store Unit/Runtime Dynamic': 0.824213,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0972181,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.194436,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.034503,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0351461,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.125223,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0121899,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.340602,
'Memory Management Unit/Runtime Dynamic': 0.0473359,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.359,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.119744,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00624758,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0521402,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
sketch
A ConstrainedSketch object.
"""
pass
def assignThickness(self, faces: tuple[Face], thickness: float = None, topFaces: tuple[Face] = (),
bottomFaces: tuple[Face] = ()):
"""This method assigns thickness data to shell faces. The thickness can be used while
assigning shell and membrane sections to faces.
Parameters
----------
faces
A sequence of Face objects specifying the regions where thickness will be applied.
thickness
A Float specifying the thickness along the given *faces* . Either *thickness*,
*topFaces*, or *bottomFaces* must be specified.
topFaces
A sequence of Face objects whose distance to *faces* argument is used to calculate the
thickness along the *faces*. The combination of *topFaces* and *bottomFaces* determines
the thickness and the offset of the elements. If *bottomFaces* is not specified then the
thickness is twice the distance to the *topFaces*. This argument will be ignored if
*thickness* is specified. Either *thickness*, *topFaces*, or *bottomFaces* must be
specified.
bottomFaces
A sequence of Face objects whose distance to *faces* is used to calculate the thickness
along the *faces*. The combination of *topFaces* and *bottomFaces* determines the
thickness and the offset of the elements. If *topFaces* is not specified then the
thickness is twice the distance to the *bottomFaces*. This argument will be ignored if
*thickness* is specified. Either *thickness*, *topFaces*, or *bottomFaces* must be
specified.
"""
pass
def backup(self):
"""This method makes a backup copy of the features in the part. Use the restore method to
retrieve the part's features from the backup.
"""
pass
def checkGeometry(self, detailed: Boolean = OFF, reportFacetErrors: Boolean = OFF, level: int = None):
"""This method checks the validity of the geometry of the part and prints a count of all
topological entities on the part (faces, edges, vertices, etc.).
Parameters
----------
detailed
A Boolean specifying whether detailed output will be printed to the replay file. The
default value is OFF.
reportFacetErrors
A Boolean specifying whether faces are checked for proper facetting. The default value
is OFF.
level
An Int specifying which level of checking is performed. Values can range from 20 to 70,
with higher values reporting less and less important errors. The default value is 20,
which reports all critical errors. When the default value is used, the stored validity
status is updated to agree with the result of this check.
"""
pass
def clearGeometryCache(self):
"""This method clears the geometry cache. Clearing the geometry cache reduces the amount of
memory being used to cache part features.
"""
pass
def deleteAllFeatures(self):
"""This method deletes all the features in the part.
"""
pass
def deleteFeatures(self, featureNames: tuple):
"""This method deletes the given features.
Parameters
----------
featureNames
A sequence of Strings specifying the feature names that will be deleted from the part.
"""
pass
def getAngle(self, plane1: str, plane2: str, line1: str, line2: str, commonVertex: str = ''):
"""This method returns the angle between the specified entities.
Parameters
----------
plane1
A Face, MeshFace, or a Datum object specifying the first plane. The Datum object must
represent a datum plane. The *plane1* and *line1* arguments are mutually exclusive. One
of them must be specified.
plane2
A Face, MeshFace, or a Datum object specifying the second plane. The Datum object must
represent a datum plane. The *plane2* and *line2* arguments are mutually exclusive. One
of them must be specified.
line1
An Edge, MeshEdge, or a Datum object specifying the first curve. The Datum object must
represent a datum axis. The *plane1* and *line1* arguments are mutually exclusive. One
of them must be specified.
line2
An Edge, MeshEdge, or a Datum object specifying the second curve. The Datum object must
represent a datum axis. The *plane2* and *line2* arguments are mutually exclusive. One
of them must be specified.
commonVertex
If the two selected Edge objects have more than one vertex in common, this ConstrainedSketchVertex object
specifies the vertex at which to evaluate the angle.
Returns
-------
angle: float
A Float specifying the angle between the specified entities. If you provide a plane as
an argument, Abaqus/CAE computes the angle using the normal to the plane.
"""
pass
def getArea(self, faces: tuple[Face], relativeAccuracy: float = 0):
"""This method returns the total surface area of a given face or group of faces.
Parameters
----------
faces
A sequence of Face objects whose area the method will calculate.
relativeAccuracy
A Float specifying that the area computation should stop when the specified relative
accuracy has been achieved. The default value is 0.000001 (0.0001%).
Returns
-------
area: float
A Float specifying the sum of the calculated areas of the given faces.
"""
pass
def getAssociatedCADPaths(self):
"""This method returns the paths to the associated CAD part and root file. These are only
available if the part was imported from one of the supported CAD softwares using the
Associative Import capability. The root file can be the assembly file or the part file,
depending on what which one was imported.
Returns
-------`
paths: tuple
A sequence containing the path to the associated CAD part and assembly file
"""
pass
def getCADParameters(self):
"""This method returns the names and values of the CAD parameters associated with the part.
These are only available if the part was imported from one of the supported CAD
softwares using the Associative Import capability, and if the parameter names defined in
that CAD software are prefixed with the string ABQ.
Returns
-------
paras: dict
A dictionary object representing a map of the name of the parameter and its associated
value.
"""
pass
def getCentroid(self, faces: tuple[Face], cells: tuple[Face], relativeAccuracy: float = 0):
"""Depending on the arguments provided, this method returns the following:
- The location of the centroid of a given face or group of faces.
- The location of the centroid of a given cell or group of cells.
faces
A sequence of Face objects whose centroid the method will calculate. The arguments
*faces* and *cells* are mutually exclusive.
cells
A sequence of Face objects whose centroid the method will calculate. The arguments
*faces* and *cells* are mutually exclusive.
relativeAccuracy
A Float specifying that the centroid computation should stop when the specified relative
accuracy has been achieved. The default value is 0.000001 (0.0001%).
Returns
-------
centroid: tuple[float]
A sequence of Floats specifying the *X*-, *Y*-, and *Z*-coordinates of the centroid.
"""
pass
def getCoordinates(self, entity: str):
"""This method returns the coordinates of specified point.
Parameters
----------
entity
A ConstrainedSketchVertex, Datum point, MeshNode, or ReferencePoint specifying the entity to query.
Returns
-------
A tuple of 3 Floats representing the coordinates of the specified point.
"""
pass
def getCurvature(self, edges: tuple[Edge], samplePoints: int = 100):
"""This method returns the maximum curvature of a given edge or group of edges. For an arc,
the curvature is constant over the entire edge, and equal to the inverse of the radius.
For a straight line, the curvature is constant and equal to 0. For a spline edge, the
curvature varies over a range, and this methods computes the maximum.
Parameters
----------
edges
A sequence of Edge objects whose curvature the method will calculate.
samplePoints
An Int specifying the number of points along each edge at which the curvature will be
computed. The higher the number of sample points, the better the accuracy of the
computation. The default value is 100.
Returns
-------
curvature: float
A Float specifying the maximum curvature.
"""
pass
def getDistance(self, entity1: str, entity2: str):
"""Depending on the arguments provided, this method returns one of the following:
- The | |
<reponame>sometallgit/PdfToTrello<filename>Python/Lib/pdfrw/pdfreader.py
# A part of pdfrw (https://github.com/pmaupin/pdfrw)
# Copyright (C) 2006-2015 <NAME>, Austin, Texas
# Copyright (C) 2012-2015 <NAME>
# MIT license -- See LICENSE.txt for details
'''
The PdfReader class reads an entire PDF file into memory and
parses the top-level container objects. (It does not parse
into streams.) The object subclasses PdfDict, and the
document pages are stored in a list in the pages attribute
of the object.
'''
import gc
import binascii
import collections
import itertools
from .errors import PdfParseError, log
from .tokens import PdfTokens
from .objects import PdfDict, PdfArray, PdfName, PdfObject, PdfIndirect
from .uncompress import uncompress
from .py23_diffs import convert_load, convert_store, iteritems
class PdfReader(PdfDict):
def findindirect(self, objnum, gennum, PdfIndirect=PdfIndirect, int=int):
''' Return a previously loaded indirect object, or create
a placeholder for it.
'''
key = int(objnum), int(gennum)
result = self.indirect_objects.get(key)
if result is None:
self.indirect_objects[key] = result = PdfIndirect(key)
self.deferred_objects.add(key)
result._loader = self.loadindirect
return result
def readarray(self, source, PdfArray=PdfArray):
''' Found a [ token. Parse the tokens after that.
'''
specialget = self.special.get
result = []
pop = result.pop
append = result.append
for value in source:
if value in ']R':
if value == ']':
break
generation = pop()
value = self.findindirect(pop(), generation)
else:
func = specialget(value)
if func is not None:
value = func(source)
append(value)
return PdfArray(result)
def readdict(self, source, PdfDict=PdfDict):
''' Found a << token. Parse the tokens after that.
'''
specialget = self.special.get
result = PdfDict()
next = source.next
tok = next()
while tok != '>>':
if not tok.startswith('/'):
source.error('Expected PDF /name object')
tok = next()
continue
key = tok
value = next()
func = specialget(value)
if func is not None:
value = func(source)
tok = next()
else:
tok = next()
if value.isdigit() and tok.isdigit():
tok2 = next()
if tok2 != 'R':
source.error('Expected "R" following two integers')
tok = tok2
continue
value = self.findindirect(value, tok)
tok = next()
result[key] = value
return result
def empty_obj(self, source, PdfObject=PdfObject):
''' Some silly git put an empty object in the
file. Back up so the caller sees the endobj.
'''
source.floc = source.tokstart
def badtoken(self, source):
''' Didn't see that coming.
'''
source.exception('Unexpected delimiter')
def findstream(self, obj, tok, source, len=len):
''' Figure out if there is a content stream
following an object, and return the start
pointer to the content stream if so.
(We can't read it yet, because we might not
know how long it is, because Length might
be an indirect object.)
'''
fdata = source.fdata
startstream = source.tokstart + len(tok)
gotcr = fdata[startstream] == '\r'
startstream += gotcr
gotlf = fdata[startstream] == '\n'
startstream += gotlf
if not gotlf:
if not gotcr:
source.error(r'stream keyword not followed by \n')
else:
source.warning(r"stream keyword terminated "
r"by \r without \n")
return startstream
def readstream(self, obj, startstream, source, exact_required=False,
streamending='endstream endobj'.split(), int=int):
fdata = source.fdata
length = int(obj.Length)
source.floc = target_endstream = startstream + length
endit = source.multiple(2)
obj._stream = fdata[startstream:target_endstream]
if endit == streamending:
return
if exact_required:
source.exception('Expected endstream endobj')
# The length attribute does not match the distance between the
# stream and endstream keywords.
# TODO: Extract maxstream from dictionary of object offsets
# and use rfind instead of find.
maxstream = len(fdata) - 20
endstream = fdata.find('endstream', startstream, maxstream)
source.floc = startstream
room = endstream - startstream
if endstream < 0:
source.error('Could not find endstream')
return
if (length == room + 1 and
fdata[startstream - 2:startstream] == '\r\n'):
source.warning(r"stream keyword terminated by \r without \n")
obj._stream = fdata[startstream - 1:target_endstream - 1]
return
source.floc = endstream
if length > room:
source.error('stream /Length attribute (%d) appears to '
'be too big (size %d) -- adjusting',
length, room)
obj.stream = fdata[startstream:endstream]
return
if fdata[target_endstream:endstream].rstrip():
source.error('stream /Length attribute (%d) appears to '
'be too small (size %d) -- adjusting',
length, room)
obj.stream = fdata[startstream:endstream]
return
endobj = fdata.find('endobj', endstream, maxstream)
if endobj < 0:
source.error('Could not find endobj after endstream')
return
if fdata[endstream:endobj].rstrip() != 'endstream':
source.error('Unexpected data between endstream and endobj')
return
source.error('Illegal endstream/endobj combination')
def loadindirect(self, key, PdfDict=PdfDict,
isinstance=isinstance):
result = self.indirect_objects.get(key)
if not isinstance(result, PdfIndirect):
return result
source = self.source
offset = int(self.source.obj_offsets.get(key, '0'))
if not offset:
source.warning("Did not find PDF object %s", key)
return None
# Read the object header and validate it
objnum, gennum = key
source.floc = offset
objid = source.multiple(3)
ok = len(objid) == 3
ok = ok and objid[0].isdigit() and int(objid[0]) == objnum
ok = ok and objid[1].isdigit() and int(objid[1]) == gennum
ok = ok and objid[2] == 'obj'
if not ok:
source.floc = offset
source.next()
objheader = '%d %d obj' % (objnum, gennum)
fdata = source.fdata
offset2 = (fdata.find('\n' + objheader) + 1 or
fdata.find('\r' + objheader) + 1)
if (not offset2 or
fdata.find(fdata[offset2 - 1] + objheader, offset2) > 0):
source.warning("Expected indirect object '%s'", objheader)
return None
source.warning("Indirect object %s found at incorrect "
"offset %d (expected offset %d)",
objheader, offset2, offset)
source.floc = offset2 + len(objheader)
# Read the object, and call special code if it starts
# an array or dictionary
obj = source.next()
func = self.special.get(obj)
if func is not None:
obj = func(source)
self.indirect_objects[key] = obj
self.deferred_objects.remove(key)
# Mark the object as indirect, and
# just return it if it is a simple object.
obj.indirect = key
tok = source.next()
if tok == 'endobj':
return obj
# Should be a stream. Either that or it's broken.
isdict = isinstance(obj, PdfDict)
if isdict and tok == 'stream':
self.readstream(obj, self.findstream(obj, tok, source), source)
return obj
# Houston, we have a problem, but let's see if it
# is easily fixable. Leaving out a space before endobj
# is apparently an easy mistake to make on generation
# (Because it won't be noticed unless you are specifically
# generating an indirect object that doesn't end with any
# sort of delimiter.) It is so common that things like
# okular just handle it.
if isinstance(obj, PdfObject) and obj.endswith('endobj'):
source.error('No space or delimiter before endobj')
obj = PdfObject(obj[:-6])
else:
source.error("Expected 'endobj'%s token",
isdict and " or 'stream'" or '')
obj = PdfObject('')
obj.indirect = key
self.indirect_objects[key] = obj
return obj
def read_all(self):
deferred = self.deferred_objects
prev = set()
while 1:
new = deferred - prev
if not new:
break
prev |= deferred
for key in new:
self.loadindirect(key)
def uncompress(self):
self.read_all()
uncompress(self.indirect_objects.values())
def load_stream_objects(self, object_streams):
# read object streams
objs = []
for num in object_streams:
obj = self.findindirect(num, 0).real_value()
assert obj.Type == '/ObjStm'
objs.append(obj)
# read objects from stream
if objs:
uncompress(objs)
for obj in objs:
objsource = PdfTokens(obj.stream, 0, False)
next = objsource.next
offsets = []
firstoffset = int(obj.First)
while objsource.floc < firstoffset:
offsets.append((int(next()), firstoffset + int(next())))
for num, offset in offsets:
# Read the object, and call special code if it starts
# an array or dictionary
objsource.floc = offset
sobj = next()
func = self.special.get(sobj)
if func is not None:
sobj = func(objsource)
key = (num, 0)
self.indirect_objects[key] = sobj
if key in self.deferred_objects:
self.deferred_objects.remove(key)
# Mark the object as indirect, and
# add it to the list of streams if it starts a stream
sobj.indirect = key
def findxref(self, fdata):
''' Find the cross reference section at the end of a file
'''
startloc = fdata.rfind('startxref')
if startloc < 0:
raise PdfParseError('Did not find "startxref" at end of file')
source = PdfTokens(fdata, startloc, False, self.verbose)
tok = source.next()
assert tok == 'startxref' # (We just checked this...)
tableloc = source.next_default()
if not tableloc.isdigit():
source.exception('Expected table location')
if source.next_default().rstrip().lstrip('%') != 'EOF':
source.exception('Expected %%EOF')
return startloc, PdfTokens(fdata, int(tableloc), True, self.verbose)
def parse_xref_stream(self, source, int=int, range=range,
enumerate=enumerate, islice=itertools.islice,
defaultdict=collections.defaultdict,
hexlify=binascii.hexlify):
''' Parse (one of) the cross-reference file section(s)
'''
def readint(s, lengths):
offset = 0
for length in itertools.cycle(lengths):
next = offset + length
yield int(hexlify(s[offset:next]), 16) if length else None
offset = next
setdefault = source.obj_offsets.setdefault
next = source.next
# check for xref stream object
objid = source.multiple(3)
ok = len(objid) == 3
ok = ok and objid[0].isdigit()
ok = ok and objid[1] == 'obj'
ok | |
#!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
CERTIFICATE_HEADER = "-----BEGIN CERTIFICATE-----"
CERTIFICATE_FOOTER = "-----END CERTIFICATE-----"
def _get_from_dictionary(dictionary, key):
"""
Safely returns the value from a dictionary that has the given key.
if the dictionary is None or does not contain the specified key, None is returned
:return: a dictionary
"""
if dictionary and key in dictionary:
return dictionary[key]
else:
return None
class AmbariConfiguration(object):
"""
AmbariConfiguration is a class the encapsulates the Ambari server configuration data.
The Ambari server configurations are split into categories, where each category contains 0 or more
properties. For example, the 'ldap-configuration' category contains the
"ambari.ldap.authentication.enabled"
property.
...
"ambari-server-configuration" : {
...
"ldap-configuration" : {
...
"ambari.ldap.authentication.enabled" : "true"
...
},
...
"sso-configuration" : {
...
"ambari.sso.enabled_services" : "ATLAS, AMBARI"
...
},
...
}
...
"""
def __init__(self, services):
self.services = services
def get_ambari_server_configuration(self):
"""
Safely returns the "ambari-server-configurations" dictionary from the services dictionary.
if the services dictionary is None or does not contain "ambari-server-configuration",
None is returned
:return: a dictionary
"""
return _get_from_dictionary(self.services, "ambari-server-configuration")
def get_ambari_server_configuration_category(self, category):
"""
Safely returns a dictionary of the properties for the requested category from the
"ambari-server-configurations" dictionary.
If the ambari-server-configurations dictionary is None or does not contain the
request category name, None is returned
:param category: the name of a category
:return: a dictionary
"""
return _get_from_dictionary(self.get_ambari_server_configuration(), category)
def get_ambari_sso_configuration(self):
"""
Safely gets a dictionary of properties for the "sso-configuration" category.
:return: a dictionary or None, if "sso-configuration" is not available
"""
return self.get_ambari_server_configuration_category("sso-configuration")
def get_ambari_sso_details(self):
"""
Gets a dictionary of properties that may be used to configure a service for SSO integration.
:return: a dictionary
"""
return AmbariSSODetails(self.get_ambari_sso_configuration())
def get_ambari_ldap_configuration(self):
"""
Safely gets a dictionary of properties for the "ldap-configuration" category.
:return: a dictionary or None, if "ldap-configuration" is not available
"""
return self.get_ambari_server_configuration_category("ldap-configuration")
def get_ambari_ldap_details(self):
"""
:return: instance of AmbariLDAPConfiguration that may be used to configure a service for LDAP integration
"""
return AmbariLDAPConfiguration(self.get_ambari_ldap_configuration())
class AmbariSSODetails(object):
"""
AmbariSSODetails encapsulates the SSO configuration data specified in the ambari-server-configuration data
"""
def __init__(self, sso_properties):
self.sso_properties = sso_properties
def is_managing_services(self):
"""
Tests the configuration data to determine if Ambari should be configuring servcies to enable SSO integration.
The relevant property is "sso-configuration/ambari.sso.manage_services", which is expected
to be a "true" or "false".
:return: True, if Ambari should manage services' SSO configurations
"""
return "true" == _get_from_dictionary(self.sso_properties, "ambari.sso.manage_services")
def get_services_to_enable(self):
"""
Safely gets the list of services that Ambari should enabled for SSO.
The returned value is a list of the relevant service names converted to lowercase.
:return: a list of service names converted to lowercase
"""
sso_enabled_services = _get_from_dictionary(self.sso_properties, "ambari.sso.enabled_services")
return [x.strip().lower() for x in sso_enabled_services.strip().split(",")] \
if sso_enabled_services \
else []
def should_enable_sso(self, service_name):
"""
Tests the configuration data to determine if the specified service should be configured by
Ambari to enable SSO integration.
The relevant property is "sso-configuration/ambari.sso.enabled_services", which is expected
to be a comma-delimited list of services to be enabled.
:param service_name: the name of the service to test
:return: True, if SSO should be enabled; False, otherwise
"""
if self.is_managing_services():
services_to_enable = self.get_services_to_enable()
return "*" in services_to_enable or service_name.lower() in services_to_enable
else:
return False
def should_disable_sso(self, service_name):
"""
Tests the configuration data to determine if the specified service should be configured by
Ambari to disable SSO integration.
The relevant property is "sso-configuration/ambari.sso.enabled_services", which is expected
to be a comma-delimited list of services to be enabled.
:param service_name: the name of the service to test
:return: true, if SSO should be disabled; false, otherwise
"""
if self.is_managing_services():
services_to_enable = self.get_services_to_enable()
return "*" not in services_to_enable and service_name.lower() not in services_to_enable
else:
return False
def get_jwt_audiences(self):
"""
Gets the configured JWT audiences list
The relevant property is "sso-configuration/ambari.sso.jwt.audiences", which is expected
to be a comma-delimited list of audience names.
:return the configured JWT audiences list:
"""
return _get_from_dictionary(self.sso_properties, 'ambari.sso.jwt.audiences')
def get_jwt_cookie_name(self):
"""
Gets the configured JWT cookie name
The relevant property is "sso-configuration/ambari.sso.jwt.cookieName", which is expected
to be a string.
:return: the configured JWT cookie name
"""
return _get_from_dictionary(self.sso_properties, 'ambari.sso.jwt.cookieName')
def get_sso_provider_url(self):
"""
Gets the configured SSO provider URL
The relevant property is "sso-configuration/ambari.sso.provider.url", which is expected
to be a string.
:return: the configured SSO provider URL
"""
return _get_from_dictionary(self.sso_properties, 'ambari.sso.provider.url')
def get_sso_provider_original_parameter_name(self):
"""
Gets the configured SSO provider's original URL parameter name
The relevant property is "sso-configuration/ambari.sso.provider.originalUrlParamName", which is
expected to be a string.
:return: the configured SSO provider's original URL parameter name
"""
return _get_from_dictionary(self.sso_properties, 'ambari.sso.provider.originalUrlParamName')
def get_sso_provider_certificate(self, include_header_and_footer=False, remove_line_breaks=True):
"""
Retrieves, formats, and returns the PEM data from the stored 509 certificate.
The relevant property is "sso-configuration/ambari.sso.provider.certificate", which is expected
to be a PEM-encoded x509 certificate, including the header and footer.
If the header and footer need to exist, and do not, the will be added. If they need to be removed,
they will be removed if they exist. Any line break characters will be left alone unless the
caller specifies them to be removed. Line break characters will not be added if missing.
:param include_header_and_footer: True, to include the standard header and footer; False to remove
the standard header and footer
:param remove_line_breaks: True, remove and line breaks from PEM data; False to leave any existing line break as-is
:return: formats, and returns the PEM data from an x509 certificate
"""
public_cert = _get_from_dictionary(self.sso_properties, 'ambari.sso.provider.certificate')
if public_cert:
public_cert = public_cert.lstrip().rstrip()
if include_header_and_footer:
# Ensure the header and footer are in the string
if not public_cert.startswith(CERTIFICATE_HEADER):
public_cert = CERTIFICATE_HEADER + '\n' + public_cert
if not public_cert.endswith(CERTIFICATE_FOOTER):
public_cert = public_cert + '\n' + CERTIFICATE_FOOTER
else:
# Ensure the header and footer are not in the string
if public_cert.startswith(CERTIFICATE_HEADER):
public_cert = public_cert[len(CERTIFICATE_HEADER):]
if public_cert.endswith(CERTIFICATE_FOOTER):
public_cert = public_cert[:len(public_cert) - len(CERTIFICATE_FOOTER)]
# Remove any leading and ending line breaks
public_cert = public_cert.lstrip().rstrip()
if remove_line_breaks:
public_cert = public_cert.replace('\n', '')
return public_cert
class AmbariLDAPConfiguration:
"""
AmbariLDAPConfiguration encapsulates the LDAP configuration data specified in the ambari-server-configuration data.
The public API of class mirrors the following Java class's public API (except for trust store related API and getLdapServerProperties which we do not need in Pyton side):
org.apache.ambari.server.ldap.domain.AmbariLdapConfiguration
"""
def __init__(self, ldap_properties):
self.ldap_properties = ldap_properties
def is_ldap_enabled(self):
return "true" == _get_from_dictionary(self.ldap_properties, 'ambari.ldap.authentication.enabled')
def get_server_host(self):
'''
:return: The LDAP URL host used for connecting to an LDAP server when authenticating users or None if ldap-configuration/ambari.ldap.connectivity.server.host is not specified
'''
return _get_from_dictionary(self.ldap_properties, 'ambari.ldap.connectivity.server.host')
def get_server_port(self):
'''
:return: The LDAP URL port (as an integer) used for connecting to an LDAP server when authenticating users or None if ldap-configuration/ambari.ldap.connectivity.server.port is not specified
'''
ldap_server_port = _get_from_dictionary(self.ldap_properties, 'ambari.ldap.connectivity.server.port')
return int(ldap_server_port) if ldap_server_port is not None else None
def get_server_url(self):
'''
:return: The LDAP URL (host:port) used for connecting to an LDAP server when authenticating users
'''
ldap_host = self.get_server_host()
ldap_port = self.get_server_port()
return None if ldap_host is None or ldap_port is None else '{}:{}'.format(ldap_host,ldap_port)
def get_secondary_server_host(self):
'''
:return: A second LDAP URL host to use as a backup when authenticating users or None if ldap-configuration/ambari.ldap.connectivity.secondary.server.host is not specified
'''
return _get_from_dictionary(self.ldap_properties, 'ambari.ldap.connectivity.secondary.server.host')
def get_secondary_server_port(self):
'''
:return: A second LDAP URL port (as an integer) to use as a backup when authenticating users or None if ldap-configuration/ambari.ldap.connectivity.secondary.server.port is not specified
'''
ldap_server_secondary_port = _get_from_dictionary(self.ldap_properties, 'ambari.ldap.connectivity.secondary.server.port')
return int(ldap_server_secondary_port) if ldap_server_secondary_port is not None else None
def get_secondary_server_url(self):
'''
:return: A second LDAP | |
calling Bio.SeqIO.parse() twice!
raise ValueError("Handle seems to be at SFF index block, not start")
if magic_number != _sff: # 779314790
raise ValueError("SFF file did not start '.sff', but %r" % magic_number)
if (ver0, ver1, ver2, ver3) != (0, 0, 0, 1):
raise ValueError(
"Unsupported SFF version in header, %i.%i.%i.%i" % (ver0, ver1, ver2, ver3)
)
if flowgram_format != 1:
raise ValueError("Flowgram format code %i not supported" % flowgram_format)
if (index_offset != 0) ^ (index_length != 0):
raise ValueError(
"Index offset %i but index length %i" % (index_offset, index_length)
)
flow_chars = handle.read(number_of_flows_per_read).decode()
key_sequence = handle.read(key_length).decode()
# According to the spec, the header_length field should be the total number
# of bytes required by this set of header fields, and should be equal to
# "31 + number_of_flows_per_read + key_length" rounded up to the next value
# divisible by 8.
assert header_length % 8 == 0
padding = header_length - number_of_flows_per_read - key_length - 31
assert 0 <= padding < 8, padding
if handle.read(padding).count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn(
"Your SFF file is invalid, post header %i byte "
"null padding region contained data." % padding,
BiopythonParserWarning,
)
return (
header_length,
index_offset,
index_length,
number_of_reads,
number_of_flows_per_read,
flow_chars,
key_sequence,
)
def _sff_do_slow_index(handle):
"""Generate an index by scanning though all the reads in an SFF file (PRIVATE).
This is a slow but generic approach if we can't parse the provided index
(if present).
Will use the handle seek/tell functions.
"""
handle.seek(0)
(
header_length,
index_offset,
index_length,
number_of_reads,
number_of_flows_per_read,
flow_chars,
key_sequence,
) = _sff_file_header(handle)
# Now on to the reads...
read_header_fmt = ">2HI4H"
read_header_size = struct.calcsize(read_header_fmt)
# NOTE - assuming flowgram_format==1, which means struct type H
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
assert 1 == struct.calcsize(">B")
assert 1 == struct.calcsize(">s")
assert 1 == struct.calcsize(">c")
assert read_header_size % 8 == 0 # Important for padding calc later!
for read in range(number_of_reads):
record_offset = handle.tell()
if record_offset == index_offset:
# Found index block within reads, ignore it:
offset = index_offset + index_length
if offset % 8:
offset += 8 - (offset % 8)
assert offset % 8 == 0
handle.seek(offset)
record_offset = offset
# assert record_offset%8 == 0 # Worth checking, but slow
# First the fixed header
data = handle.read(read_header_size)
(
read_header_length,
name_length,
seq_len,
clip_qual_left,
clip_qual_right,
clip_adapter_left,
clip_adapter_right,
) = struct.unpack(read_header_fmt, data)
if read_header_length < 10 or read_header_length % 8 != 0:
raise ValueError(
"Malformed read header, says length is %i:\n%r"
% (read_header_length, data)
)
# now the name and any padding (remainder of header)
name = handle.read(name_length).decode()
padding = read_header_length - read_header_size - name_length
if handle.read(padding).count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn(
"Your SFF file is invalid, post name %i byte "
"padding region contained data" % padding,
BiopythonParserWarning,
)
assert record_offset + read_header_length == handle.tell()
# now the flowgram values, flowgram index, bases and qualities
size = read_flow_size + 3 * seq_len
handle.seek(size, 1)
# now any padding...
padding = size % 8
if padding:
padding = 8 - padding
if handle.read(padding).count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn(
"Your SFF file is invalid, post quality %i "
"byte padding region contained data" % padding,
BiopythonParserWarning,
)
# print("%s %s %i" % (read, name, record_offset))
yield name, record_offset
if handle.tell() % 8 != 0:
raise ValueError("After scanning reads, did not end on a multiple of 8")
def _sff_find_roche_index(handle):
"""Locate any existing Roche style XML meta data and read index (PRIVATE).
Makes a number of hard coded assumptions based on reverse engineered SFF
files from Roche 454 machines.
Returns a tuple of read count, SFF "index" offset and size, XML offset
and size, and the actual read index offset and size.
Raises a ValueError for unsupported or non-Roche index blocks.
"""
handle.seek(0)
(
header_length,
index_offset,
index_length,
number_of_reads,
number_of_flows_per_read,
flow_chars,
key_sequence,
) = _sff_file_header(handle)
assert handle.tell() == header_length
if not index_offset or not index_offset:
raise ValueError("No index present in this SFF file")
# Now jump to the header...
handle.seek(index_offset)
fmt = ">4s4B"
fmt_size = struct.calcsize(fmt)
data = handle.read(fmt_size)
if not data:
raise ValueError(
"Premature end of file? Expected index of size %i at offest %i, found nothing"
% (index_length, index_offset)
)
if len(data) < fmt_size:
raise ValueError(
"Premature end of file? Expected index of size %i at offest %i, found %r"
% (index_length, index_offset, data)
)
magic_number, ver0, ver1, ver2, ver3 = struct.unpack(fmt, data)
if magic_number == _mft: # 778921588
# Roche 454 manifest index
# This is typical from raw Roche 454 SFF files (2009), and includes
# both an XML manifest and the sorted index.
if (ver0, ver1, ver2, ver3) != (49, 46, 48, 48):
# This is "1.00" as a string
raise ValueError(
"Unsupported version in .mft index header, %i.%i.%i.%i"
% (ver0, ver1, ver2, ver3)
)
fmt2 = ">LL"
fmt2_size = struct.calcsize(fmt2)
xml_size, data_size = struct.unpack(fmt2, handle.read(fmt2_size))
if index_length != fmt_size + fmt2_size + xml_size + data_size:
raise ValueError(
"Problem understanding .mft index header, %i != %i + %i + %i + %i"
% (index_length, fmt_size, fmt2_size, xml_size, data_size)
)
return (
number_of_reads,
header_length,
index_offset,
index_length,
index_offset + fmt_size + fmt2_size,
xml_size,
index_offset + fmt_size + fmt2_size + xml_size,
data_size,
)
elif magic_number == _srt: # 779317876
# Roche 454 sorted index
# I've had this from Roche tool sfffile when the read identifiers
# had nonstandard lengths and there was no XML manifest.
if (ver0, ver1, ver2, ver3) != (49, 46, 48, 48):
# This is "1.00" as a string
raise ValueError(
"Unsupported version in .srt index header, %i.%i.%i.%i"
% (ver0, ver1, ver2, ver3)
)
data = handle.read(4)
if data != _null * 4:
raise ValueError("Did not find expected null four bytes in .srt index")
return (
number_of_reads,
header_length,
index_offset,
index_length,
0,
0,
index_offset + fmt_size + 4,
index_length - fmt_size - 4,
)
elif magic_number == _hsh:
raise ValueError(
"Hash table style indexes (.hsh) in SFF files are not (yet) supported"
)
else:
raise ValueError(
"Unknown magic number %r in SFF index header:\n%r" % (magic_number, data)
)
def ReadRocheXmlManifest(handle):
"""Read any Roche style XML manifest data in the SFF "index".
The SFF file format allows for multiple different index blocks, and Roche
took advantage of this to define their own index block which also embeds
an XML manifest string. This is not a publicly documented extension to
the SFF file format, this was reverse engineered.
The handle should be to an SFF file opened in binary mode. This function
will use the handle seek/tell functions and leave the handle in an
arbitrary location.
Any XML manifest found is returned as a Python string, which you can then
parse as appropriate, or reuse when writing out SFF files with the
SffWriter class.
Returns a string, or raises a ValueError if an Roche manifest could not be
found.
"""
(
number_of_reads,
header_length,
index_offset,
index_length,
xml_offset,
xml_size,
read_index_offset,
read_index_size,
) = _sff_find_roche_index(handle)
if not xml_offset or not xml_size:
raise ValueError("No XML manifest found")
handle.seek(xml_offset)
return handle.read(xml_size).decode()
# This is a generator function!
def _sff_read_roche_index(handle):
"""Read any existing Roche style read index provided in the SFF file (PRIVATE).
Will use the handle seek/tell functions.
This works on ".srt1.00" and ".mft1.00" style Roche SFF index blocks.
Roche SFF indices use base 255 not 256, meaning we see bytes in range the
range 0 to 254 only. This appears to be so that byte 0xFF (character 255)
can be used as a marker character to separate entries (required if the
read name lengths vary).
Note that since only four bytes are used for the read offset, this is
limited to 255^4 bytes (nearly 4GB). If you try to use the Roche sfffile
tool to combine SFF files beyound this limit, they issue a warning and
omit the index (and manifest).
"""
(
number_of_reads,
header_length,
index_offset,
index_length,
xml_offset,
xml_size,
read_index_offset,
read_index_size,
) = | |
doc[len(to_strip):].lstrip()
linedata.append( (cmdstr, doc) )
return linedata
def _help_preprocess_command_list(self, help, cmdname=None):
marker = "${command_list}"
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
linedata = self._get_cmds_data()
if linedata:
subindent = indent + ' '*4
lines = _format_linedata(linedata, subindent, indent_width+4)
block = indent + "Commands:\n" \
+ '\n'.join(lines) + "\n\n"
help = help.replace(indent+marker+suffix, block, 1)
return help
def _gen_names_and_attrs(self):
# Inheritance says we have to look in class and
# base classes; order is not important.
names = []
classes = [self.__class__]
while classes:
aclass = classes.pop(0)
if aclass.__bases__:
classes = classes + list(aclass.__bases__)
for name in dir(aclass):
yield (name, getattr(aclass, name))
def _get_help_names(self):
"""Return a mapping of help topic name to `.help_*()` method."""
# Determine the additional help topics, if any.
help_names = {}
token2cmdname = self._get_canonical_map()
for attrname, attr in self._gen_names_and_attrs():
if not attrname.startswith("help_"): continue
help_name = attrname[5:]
if help_name not in token2cmdname:
help_names[help_name] = attr
return help_names
def _help_preprocess_help_list(self, help, cmdname=None):
marker = "${help_list}"
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
help_names = self._get_help_names()
if help_names:
linedata = [(n, a.__doc__ or "") for n, a in help_names.items()]
linedata.sort()
subindent = indent + ' '*4
lines = _format_linedata(linedata, subindent, indent_width+4)
block = (indent
+ "Additional help topics (run `%s help TOPIC'):\n" % self.name
+ '\n'.join(lines)
+ "\n\n")
else:
block = ''
help = help.replace(indent+marker+suffix, block, 1)
return help
def _help_preprocess_cmd_name(self, help, cmdname=None):
marker = "${cmd_name}"
handler = self._get_cmd_handler(cmdname)
if not handler:
raise CmdlnError("cannot preprocess '%s' into help string: "
"could not find command handler for %r"
% (marker, cmdname))
s = cmdname
if hasattr(handler, "aliases"):
s += " (%s)" % (", ".join(handler.aliases))
help = help.replace(marker, s)
return help
#TODO: this only makes sense as part of the Cmdln class.
# Add hooks to add help preprocessing template vars and put
# this one on that class.
def _help_preprocess_cmd_usage(self, help, cmdname=None):
marker = "${cmd_usage}"
handler = self._get_cmd_handler(cmdname)
if not handler:
raise CmdlnError("cannot preprocess '%s' into help string: "
"could not find command handler for %r"
% (marker, cmdname))
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
# Extract the introspection bits we need.
func = handler.__func__
if func.__defaults__:
func_defaults = list(func.__defaults__)
else:
func_defaults = []
co_argcount = func.__code__.co_argcount
co_varnames = func.__code__.co_varnames
co_flags = func.__code__.co_flags
CO_FLAGS_ARGS = 4
CO_FLAGS_KWARGS = 8
# Adjust argcount for possible *args and **kwargs arguments.
argcount = co_argcount
if co_flags & CO_FLAGS_ARGS: argcount += 1
if co_flags & CO_FLAGS_KWARGS: argcount += 1
# Determine the usage string.
usage = "%s %s" % (self.name, cmdname)
if argcount <= 2: # handler ::= do_FOO(self, argv)
usage += " [ARGS...]"
elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...)
argnames = list(co_varnames[3:argcount])
tail = ""
if co_flags & CO_FLAGS_KWARGS:
name = argnames.pop(-1)
import warnings
# There is no generally accepted mechanism for passing
# keyword arguments from the command line. Could
# *perhaps* consider: arg=value arg2=value2 ...
warnings.warn("argument '**%s' on '%s.%s' command "
"handler will never get values"
% (name, self.__class__.__name__,
func.__name__))
if co_flags & CO_FLAGS_ARGS:
name = argnames.pop(-1)
tail = "[%s...]" % name.upper()
while func_defaults:
func_defaults.pop(-1)
name = argnames.pop(-1)
tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail)
while argnames:
name = argnames.pop(-1)
tail = "%s %s" % (name.upper(), tail)
usage += ' ' + tail
block_lines = [
self.helpindent + "Usage:",
self.helpindent + ' '*4 + usage
]
block = '\n'.join(block_lines) + '\n\n'
help = help.replace(indent+marker+suffix, block, 1)
return help
#TODO: this only makes sense as part of the Cmdln class.
# Add hooks to add help preprocessing template vars and put
# this one on that class.
def _help_preprocess_cmd_option_list(self, help, cmdname=None):
marker = "${cmd_option_list}"
handler = self._get_cmd_handler(cmdname)
if not handler:
raise CmdlnError("cannot preprocess '%s' into help string: "
"could not find command handler for %r"
% (marker, cmdname))
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
if hasattr(handler, "optparser"):
# Setup formatting options and format.
# - Indentation of 4 is better than optparse default of 2.
# C.f. <NAME>'s discussion of this in Perl Best
# Practices.
handler.optparser.formatter.indent_increment = 4
handler.optparser.formatter.current_indent = indent_width
block = handler.optparser.format_option_help() + '\n'
else:
block = ""
help = help.replace(indent+marker+suffix, block, 1)
return help
def _get_canonical_cmd_name(self, token):
map = self._get_canonical_map()
return map.get(token, None)
def _get_canonical_map(self):
"""Return a mapping of available command names and aliases to
their canonical command name.
"""
cacheattr = "_token2canonical"
if not hasattr(self, cacheattr):
# Get the list of commands and their aliases, if any.
token2canonical = {}
cmd2funcname = {} # use a dict to strip duplicates
for attr in self.get_names():
if attr.startswith("do_"): cmdname = attr[3:]
elif attr.startswith("_do_"): cmdname = attr[4:]
else:
continue
cmd2funcname[cmdname] = attr
token2canonical[cmdname] = cmdname
for cmdname, funcname in cmd2funcname.items(): # add aliases
func = getattr(self, funcname)
aliases = getattr(func, "aliases", [])
for alias in aliases:
if alias in cmd2funcname:
import warnings
warnings.warn("'%s' alias for '%s' command conflicts "
"with '%s' handler"
% (alias, cmdname, cmd2funcname[alias]))
continue
token2canonical[alias] = cmdname
setattr(self, cacheattr, token2canonical)
return getattr(self, cacheattr)
def _get_cmd_handler(self, cmdname):
handler = None
try:
handler = getattr(self, 'do_' + cmdname)
except AttributeError:
try:
# Private command handlers begin with "_do_".
handler = getattr(self, '_do_' + cmdname)
except AttributeError:
pass
return handler
def _do_EOF(self, argv):
# Default EOF handler
# TODO: A mechanism so "EOF" and "KeyboardInterrupt" work as handlers
# but are *not* real available commands.
self.stdout.write('\n')
self.stdout.flush()
self.stop = True
def _do_KeyboardInterrupt(self, argv):
# Default keyboard interrupt (i.e. <Ctrl+C>) handler.
# TODO: A mechanism so "EOF" and "KeyboardInterrupt" work as handlers
# but are *not* real available commands.
self.stdout.write('\n')
self.stdout.flush()
def emptyline(self):
# Different from cmd.Cmd: don't repeat the last command for an
# emptyline.
if self.cmdlooping:
pass
else:
return self.do_help(["help"])
#---- optparse.py extension to fix (IMO) some deficiencies
#
# See the class _OptionParserEx docstring for details.
#
class StopOptionProcessing(Exception):
"""Indicate that option *and argument* processing should stop
cleanly. This is not an error condition. It is similar in spirit to
StopIteration. This is raised by _OptionParserEx's default "help"
and "version" option actions and can be raised by custom option
callbacks too.
Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx)
usage is:
parser = CmdlnOptionParser(mycmd)
parser.add_option("-f", "--force", dest="force")
...
try:
opts, args = parser.parse_args()
except StopOptionProcessing:
# normal termination, "--help" was probably given
sys.exit(0)
"""
class _OptionParserEx(optparse.OptionParser):
"""An optparse.OptionParser that uses exceptions instead of sys.exit.
This class is an extension of optparse.OptionParser that differs
as follows:
- Correct (IMO) the default OptionParser error handling to never
sys.exit(). Instead OptParseError exceptions are passed through.
- Add the StopOptionProcessing exception (a la StopIteration) to
indicate normal termination of option processing.
See StopOptionProcessing's docstring for details.
I'd also like to see the following in the core optparse.py, perhaps
as a RawOptionParser which would serve as a base class for the more
generally used OptionParser (that works as current):
- Remove the implicit addition of the -h|--help and --version
options. They can get in the way (e.g. if want '-?' and '-V' for
these as well) and it is not hard to do:
optparser.add_option("-h", "--help", action="help")
optparser.add_option("--version", action="version")
These are good practices, just not valid defaults if they can
get in the way.
"""
def error(self, msg):
raise optparse.OptParseError(msg)
def exit(self, status=0, msg=None):
if status == 0:
raise StopOptionProcessing(msg)
else:
#TODO: don't lose status info here
raise optparse.OptParseError(msg)
#---- optparse.py-based option processing support
class CmdlnOptionParser(_OptionParserEx):
"""An optparse.OptionParser class more appropriate for top-level
Cmdln options. For parsing of sub-command options, see
SubCmdOptionParser.
Changes:
- disable_interspersed_args() by default, because a Cmdln instance
has sub-commands which may themselves have options.
- Redirect print_help() to the Cmdln.do_help() which is better
equiped to handle the "help" action.
- error() will raise a CmdlnUserError: OptionParse.error() is meant
to be called for user errors. Raising a well-known error here can
make error handling clearer.
- Also see the changes in _OptionParserEx.
"""
def __init__(self, | |
-> CurveData:
"""An optional subroutine to perform data pre-processing.
Subclasses can override this method to apply pre-precessing to data values to fit.
Otherwise the analysis uses extracted data values as-is.
For example,
- Apply smoothing to y values to deal with noisy observed values
- Remove redundant data points (outlier)
- Apply frequency filter function
etc...
.. note::
The data returned by this method should have the label "fit_ready".
Returns:
Formatted CurveData instance.
"""
return CurveData(
label="fit_ready",
x=data.x,
y=data.y,
y_err=data.y_err,
data_index=data.data_index,
metadata=data.metadata,
)
# pylint: disable=unused-argument
def _extra_database_entry(self, fit_data: FitData) -> List[AnalysisResultData]:
"""Calculate new quantity from the fit result.
Subclasses can override this method to do post analysis.
Args:
fit_data: Fit result.
Returns:
List of database entry created from the fit data.
"""
return []
# pylint: disable=unused-argument
def _evaluate_quality(self, fit_data: FitData) -> Union[str, None]:
"""Evaluate quality of the fit result.
Subclasses can override this method to do post analysis.
Args:
fit_data: Fit result.
Returns:
String that represents fit result quality. Usually "good" or "bad".
"""
return None
def _extract_curves(
self, experiment_data: ExperimentData, data_processor: Union[Callable, DataProcessor]
):
"""Extract curve data from experiment data.
This method internally populate two type of curve data.
- raw_data:
This is the data directly obtained from the experiment data.
You can access to this data with ``self._data(label="raw_data")``.
- fit_ready:
This is the formatted data created pre-processing defined by
`self._format_data()` method. This method is implemented by subclasses.
You can access to this data with ``self._data(label="fit_ready")``.
If multiple series exists, you can optionally specify ``series_name`` in
``self._data`` method to filter data in the target series.
.. notes::
The target metadata properties to define each curve entry is described by
the class attribute __series__ (see `filter_kwargs`).
Args:
experiment_data: ExperimentData object to fit parameters.
data_processor: A callable or DataProcessor instance to format data into numpy array.
This should take list of dictionary and returns two tuple of float values
that represent a y value and an error of it.
Raises:
DataProcessorError: When `x_key` specified in the analysis option is not
defined in the circuit metadata.
AnalysisError: When formatted data has the label other than fit_ready.
"""
self.__processed_data_set = list()
def _is_target_series(datum, **filters):
try:
return all(datum["metadata"][key] == val for key, val in filters.items())
except KeyError:
return False
# Extract X, Y, Y_sigma data
data = experiment_data.data()
x_key = self._get_option("x_key")
try:
x_values = [datum["metadata"][x_key] for datum in data]
except KeyError as ex:
raise DataProcessorError(
f"X value key {x_key} is not defined in circuit metadata."
) from ex
if isinstance(data_processor, DataProcessor):
y_values, y_sigmas = data_processor(data)
if y_sigmas is None:
y_sigmas = np.full(y_values.shape, np.nan)
else:
y_values, y_sigmas = zip(*map(data_processor, data))
# Store metadata
metadata = np.asarray([datum["metadata"] for datum in data], dtype=object)
# Format data
x_values = np.asarray(x_values, dtype=float)
y_values = np.asarray(y_values, dtype=float)
y_sigmas = np.asarray(y_sigmas, dtype=float)
# Find series (invalid data is labeled as -1)
data_index = np.full(x_values.size, -1, dtype=int)
for idx, series_def in enumerate(self.__series__):
data_matched = np.asarray(
[_is_target_series(datum, **series_def.filter_kwargs) for datum in data], dtype=bool
)
data_index[data_matched] = idx
# Store raw data
raw_data = CurveData(
label="raw_data",
x=x_values,
y=y_values,
y_err=y_sigmas,
data_index=data_index,
metadata=metadata,
)
self.__processed_data_set.append(raw_data)
# Format raw data
formatted_data = self._format_data(raw_data)
if formatted_data.label != "fit_ready":
raise AnalysisError(f"Not expected data label {formatted_data.label} != fit_ready.")
self.__processed_data_set.append(formatted_data)
def _format_fit_options(self, **fitter_options) -> Dict[str, Any]:
"""Format fitting option args to dictionary of parameter names.
Args:
fitter_options: Fit options generated by `self._setup_fitting`.
Returns:
Formatted fit options.
Raises:
AnalysisError:
- When fit functions have different signature.
- When fit option is dictionary but key doesn't match with parameter names.
- When initial guesses are not provided.
- When fit option is array but length doesn't match with parameter number.
"""
fit_params = self._fit_params()
# Remove any fixed parameter so as not to give them to the fitter.
if self.__fixed_parameters__:
for pname in self.__fixed_parameters__:
fitter_options.pop(pname, None)
# Validate dictionary keys
def _check_keys(parameter_name, default_value=None):
named_values = fitter_options[parameter_name]
if not named_values.keys() == set(fit_params):
raise AnalysisError(
f"Fitting option `{parameter_name}` doesn't have the "
f"expected parameter names {','.join(fit_params)}."
)
for key in named_values:
if named_values[key] is None:
named_values[key] = default_value
# Convert array into dictionary
def _dictionarize(parameter_name):
parameter_array = fitter_options[parameter_name]
if len(parameter_array) != len(fit_params):
raise AnalysisError(
f"Value length of fitting option `{parameter_name}` doesn't "
"match with the length of expected parameters. "
f"{len(parameter_array)} != {len(fit_params)}."
)
return dict(zip(fit_params, parameter_array))
if fitter_options.get("p0", None):
if isinstance(fitter_options["p0"], dict):
_check_keys("p0")
else:
fitter_options["p0"] = _dictionarize("p0")
else:
# p0 should be defined
raise AnalysisError("Initial guess p0 is not provided to the fitting options.")
if fitter_options.get("bounds", None):
if isinstance(fitter_options["bounds"], dict):
_check_keys("bounds", default_value=(-np.inf, np.inf))
else:
fitter_options["bounds"] = _dictionarize("bounds")
else:
# bounds are optional
fitter_options["bounds"] = {par: (-np.inf, np.inf) for par in fit_params}
return fitter_options
@property
def _experiment_type(self) -> str:
"""Return type of experiment."""
try:
return self.__experiment_metadata["experiment_type"]
except (TypeError, KeyError):
# Ignore experiment metadata is not set or key is not found
return None
@property
def _num_qubits(self) -> int:
"""Getter for qubit number."""
try:
return self.__experiment_metadata["num_qubits"]
except (TypeError, KeyError):
# Ignore experiment metadata is not set or key is not found
return None
@property
def _physical_qubits(self) -> List[int]:
"""Getter for physical qubit indices."""
try:
return list(self.__experiment_metadata["physical_qubits"])
except (TypeError, KeyError):
# Ignore experiment metadata is not set or key is not found
return None
@property
def _backend(self) -> Backend:
"""Getter for backend object."""
return self.__backend
def _experiment_options(self, index: int = -1) -> Dict[str, Any]:
"""Return the experiment options of given job index.
Args:
index: Index of job metadata to extract. Default to -1 (latest).
Returns:
Experiment options. This option is used for circuit generation.
"""
try:
return self.__experiment_metadata["job_metadata"][index]["experiment_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None
def _analysis_options(self, index: int = -1) -> Dict[str, Any]:
"""Returns the analysis options of given job index.
Args:
index: Index of job metadata to extract. Default to -1 (latest).
Returns:
Analysis options. This option is used for analysis.
"""
try:
return self.__experiment_metadata["job_metadata"][index]["analysis_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None
def _run_options(self, index: int = -1) -> Dict[str, Any]:
"""Returns the run options of given job index.
Args:
index: Index of job metadata to extract. Default to -1 (latest).
Returns:
Run options. This option is used for backend execution.
"""
try:
return self.__experiment_metadata["job_metadata"][index]["run_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None
def _transpile_options(self, index: int = -1) -> Dict[str, Any]:
"""Returns the transpile options of given job index.
Args:
index: Index of job metadata to extract. Default to -1 (latest).
Returns:
Transpile options. This option is used for circuit optimization.
"""
try:
return self.__experiment_metadata["job_metadata"][index]["transpile_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None
def _data(
self,
series_name: Optional[str] = None,
label: Optional[str] = "fit_ready",
) -> CurveData:
"""Getter for experiment data set.
Args:
series_name: Series name to search for.
label: Label attached to data set. By default it returns "fit_ready" data.
Returns:
Filtered curve data set.
Raises:
AnalysisError: When requested series or label are not defined.
"""
# pylint: disable = undefined-loop-variable
for data in self.__processed_data_set:
if data.label == label:
break
else:
raise AnalysisError(f"Requested data with label {label} does not exist.")
if series_name is None:
return data
for idx, series_def in enumerate(self.__series__):
if series_def.name == series_name:
locs = data.data_index == idx
return CurveData(
label=label,
x=data.x[locs],
y=data.y[locs],
y_err=data.y_err[locs],
data_index=idx,
metadata=data.metadata[locs] if data.metadata is not None else None,
)
raise AnalysisError(f"Specified series {series_name} is not defined in this analysis.")
def _arg_parse(self, **options) -> Dict[str, Any]:
"""Parse input kwargs with predicted input.
Class attributes will be updated according to the ``options``.
For example, if ``options`` has a key ``p0``, and the class
has an attribute named ``__p0``, then the attribute ``__0p``
will be updated to ``options["p0"]``.
Options that don't have matching | |
-0.044891586589426921]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-4.6443072321046044, 0.38417837617679673, 3.4279882680004921, 0.40710770023483178,
2.8688000401894911], [-0.63862918185682371, -1.0613480573447367, 2.5252641176738813, -2.3743507769422569,
2.2710813572310951], [2.0244163796382626, -0.45931604591878106, -4.2337337609916501, -3.5735365306689681,
1.5100285975528873], [1.9953319494326704, -1.458603065344283, -2.6914617917833263, 2.8267445415419745,
-2.1176995723917171]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[2.558345522140046, 3.8739768438957185, -2.5523095986149191,
-1.1599373064608165, -1.9295185061392859], [2.6353186995223199, -3.9407961905622004, 3.7986447608802525,
3.1282445710581239, 0.37606431107885818], [4.6848458369965034, -2.5423454969446944, -0.10041273851269938,
0.39611652325263069, 2.5670890012870329], [2.034125316253621, -4.3475018844119351, -0.69889862294772787,
-4.8039063806121574, 3.7327807333050149]]))
arg1=Symbol(shape=(4, 5))
res=arg0/arg1
s1=numpy.array([[-1.6000167654125255, -0.94021602909627866, 2.5871609076265356, -4.2349329811309033,
4.7526174859280115], [-4.0174826369961654, 1.9310149690886282, 1.3522987503107187, 4.9900274807319445,
-3.1685192513299363], [-2.6079018141064014, 1.653494600107277, 1.6560499898502972, 4.7083848545021212,
-0.40099662839500461], [-2.6239863890404425, -1.1141605513112127, -0.20010931379470431, 3.50058742814422,
-0.89214598784888999]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[2.9026616048658607, -0.40860649498399121, 1.325000025276871, -0.096130848362591351,
0.60362527568938573], [0.15896252443652648, -0.54963222674843171, 1.8673862688208871, -0.47581917857373879,
-0.71676426023854656], [-0.77626249910483291, -0.27778502928826082, -2.5565253385704674, -0.75897290495529057,
-3.7656890123909541], [-0.76042008364316904, 1.3091498021786081, 13.449957629381233, 0.80750576855054512,
2.3737141692446966]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-1.5989491969357201, -4.1203050405546966, -0.98652912970782747,
0.27389744102894042, -0.40599070130352011], [-0.65596268550216397, -2.0407900786093434, 2.8090277832523585,
0.62689926721591294, -0.11868771538030298], [-1.7964042249043672, -1.5375589958260218, -0.060633881300754963,
0.084130022394806395, -6.4017720337496291], [-0.77520421780749937, 3.9020425550837601, 3.4925841765903027,
-1.3723143555819919, -4.1840469879883226]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[2.6594060153757653, 3.0985169336341105], [3.3661217116389821, -0.59504905070450942]],
[[-3.9749579000522637, -4.7342067957921277], [1.1895841648424295, 0.56731685724203196]], [[0.20933515875849551,
-0.47657164361986748], [3.6005053961204521, -2.7248219369010487]], [[-4.5417601893012947, 4.6881992583085577],
[0.95219997618841479, 0.045100875229370452]], [[1.6875122232100468, 0.83285808001415162], [-0.98095523355696734,
1.3721074229557644]], [[1.7539561105027381, -0.13330491004776146], [-3.3240848531330958,
3.9526032719641933]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[4.6963315268174881, 1.5142976851258796], [4.5399300108016529,
4.2558918208130496]], [[-3.5130795594275552, -2.2965838545821149], [3.5486672192586823, -0.16210635982120003]],
[[1.2552491212091041, -0.47844908361541538], [4.446197400169801, 1.9447260435327358]], [[-4.5600746555661678,
-1.6955914419145826], [-2.0546718268066022, 4.4738577557187309]], [[1.0326084184938722, -1.7805867888057159],
[2.4947632536764397, 0.81887034400060177]], [[3.2736611846490149, 3.9295086162843287], [-0.83952910002973979,
-0.69339229981178008]]]))
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(3.40074527236)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[0.78200682567782176, 0.9111287925088859], [0.98981883147769634, -0.17497607231601478]],
[[-1.1688490556353195, -1.3921086163884109], [0.34980101994451862, 0.16682133232772797]], [[0.061555671475896641,
-0.14013741267054175], [1.0587401018787967, -0.8012425861615583]], [[-1.3355190776025851, 1.3785799531693437],
[0.27999744171609176, 0.013262056289824579]], [[0.49621835452551266, 0.244904576295006], [-0.28845301691082309,
0.40347256647195434]], [[0.51575639162366171, -0.039198734210198434], [-0.97745775908348687,
1.1622756059065873]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.3809712726765795, 0.44528406682920152], [1.3349809077741897,
1.2514585715682693]], [[-1.0330322556006573, -0.67531781143652669], [1.0434969205434828, -0.047667892428998465]],
[[0.36911000991810128, -0.14068947989263214], [1.3074185344927436, 0.57185289922762761]], [[-1.3409045048539983,
-0.49859407456847055], [-0.6041828076649105, 1.3155521503132983]], [[0.30364180078027336, -0.52358722756407616],
[0.73359309618224211, 0.24079143788171639]], [[0.96263051845069647, 1.1554845487028675], [-0.24686621101955039,
-0.20389421855490472]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[0.37903407908351117, 4.7562512290710508], [-2.0567133710230481, -2.0597757209890579]],
[[3.1071466326239037, 3.7378759300098636], [-2.2870602196502565, -3.6754721739328113]], [[3.300935107148554,
2.1910214216355826], [-2.2941648800534375, -2.0181827356997148]], [[-3.5358995232538684, 0.077598647462692405],
[1.0254061925828246, 1.3424636637528886]], [[-2.5177374705422064, 3.3774425425435926], [3.7215367528674541,
-2.5394983441996635]], [[4.9947523199127613, 0.074465686539016751], [1.6903291082153435,
-1.548003996221603]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[-4.1180675764846963, -3.2588333429017147], [2.3910605684413362,
-2.5464559979058099]], [[-0.47366800112006935, -2.9943461140704954], [0.68865869503993338, 3.872833966837911]],
[[2.8476798086608648, -3.2531482133842138], [-2.5572122994418356, 2.4221777629945427]], [[4.3150298961939555,
3.6112269569743987], [0.73197953589044573, -3.7189842913799733]], [[-2.7850398483841654, -1.3377438559149857],
[-1.4556441129183781, 3.6956035182385243]], [[2.2019800882262039, 3.8700529933760066], [1.7561308799879001,
-3.6205253465229568]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0/arg1
s1=numpy.array([[[-4.6296995406344124, -2.608733545501265], [-3.2421078749876298, -0.82737098161092959]],
[[-1.4709282084188935, -3.8180830267668897], [-3.8279718065351354, -2.1824375379963898]], [[3.32588617695232,
4.0812665637977101], [3.7989606569590251, 3.748273907432111]], [[-2.888051956795227, -2.7465995899159523],
[4.8196962245729438, 3.6666593093549196]], [[0.56914384990489708, -1.5780278215915988], [-0.33162965001135003,
3.2519356905168753]], [[1.400833424519293, 1.2066487950482028], [1.140741354057325, -0.80768829487616767]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-0.08187012477953845, -1.8232031543708858], [0.63437536637515357, 2.4895431031175148]],
[[-2.1123713685277594, -0.97899283588263286], [0.59746004809799647, 1.6841133411346645]], [[0.99249791830620315,
0.5368483992373162], [-0.60389277152711029, -0.53842989747842174]], [[1.2243199139594205, -0.028252624717338932],
[0.21275328253155254, 0.366127188399478]], [[-4.42372779915467, -2.1402934069547039], [-11.221966289021758,
-0.78091899283408828]], [[3.5655576405358471, 0.061712808933805818], [1.4817812137723205,
1.9165858983494843]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.8894891645431473, 1.2492013024946684], [-0.73750185392904588,
3.0777680804658418]], [[0.32201979566984912, 0.78425379780336379], [-0.17990171554143927, -1.7745451585264647]],
[[0.85621685684695914, -0.79709280502302848], [-0.67313471508515732, 0.64621151570375557]], [[-1.4940970456023919,
-1.3147992048906205], [0.15187254585849005, -1.0142704782774759]], [[-4.8933847723199335, 0.84773147698101881],
[4.3893666108219174, 1.1364319193074606]], [[1.5719071587557463, 3.207273739681153], [1.5394645541180672,
4.482577461492177]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[-0.18987391835747758, 4.5085269431466148, 2.3332490022782091, 3.5140443207840626],
[-3.8071577551437374, 4.4720793176524474, 1.5612407831467943, -3.8104362786352852], [-4.0532150845521837,
-0.57280649274337314, -0.56128092769382665, 1.5156021557578434]], [[1.7447218212497937, -2.3056218481816462,
-3.1637924745560939, -0.30131377200161236], [-2.7752874159121497, 2.6013067905592049, -3.746096460635143,
-2.9734953480155388], [-3.6161482942867931, 1.9377292214084427, -4.7468533933334172, 3.2118802123097385]]],
[[[-4.4843124086103083, 2.3827156971613297, 2.1743800991624589, 4.4296694534725685], [-3.4871548142175457,
2.955544818380722, 3.0587638749887915, -0.51089762690949225], [3.650413980256328, -3.6682136669452814,
-0.46817705349461569, -0.82910345294884724]], [[1.588186425815735, 1.5765955566881908, -4.3427217875858659,
-2.0163319014518422], [-1.0220958643238132, 1.9727101787163654, -0.065010798608873266, 1.73315365618957],
[2.738465567903118, 1.9645854229284678, -2.7935410540423575, 3.0569496206524445]]], [[[-0.21196663843321328,
-3.1770386067640386, 3.7992231608336073, -4.5172564500621428], [0.98964989530832703, -2.4738974761647881,
0.0114065763112281, -3.1306195317462948], [-1.7022084431469064, -4.8835685460571892, 3.3149881663472325,
-1.8527394999613223]], [[-4.688839386407393, -2.2094627892488683, -1.6723044389697153, 3.4908352481955269],
[-2.4719833755226062, -2.3617449879099341, -4.2339862392091119, 3.802095592491435], [-4.4231212186316329,
-1.5324562390597976, 3.2464993080986027, 4.0812450056830585]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[0.34365977550907534, 3.4152342295544518, -1.2774419454539219,
-0.072154050581789697], [-3.2136192243029962, -4.3100480205097416, 0.92063187364718946, 0.65983261161884776],
[1.836360802359561, 4.7090663804834136, 3.6687227420314681, 0.14984304098941781]], [[1.3240710521065289,
4.8518181175906356, 4.2513462649612741, 2.7178860490952372], [3.6396852376664448, 3.5569808409457586,
3.2627252762282897, 2.4127574016450257], [4.0085015954616114, 0.70935755386425026, -1.9722472564280968,
-4.9450465085805737]]], [[[4.6143116204743464, -1.5718311397775753, -2.8294250621955639, 0.95996522458350686],
[2.3480451792784409, 2.218190909075064, -4.7734664792996711, 0.0048653152667395005], [4.094925294988629,
0.098020816683218825, 4.9363224824892118, -2.7422959820113979]], [[1.9859137713910258, -2.1627818319757597,
-4.0065126947434919, 1.1169402196268106], [0.60687417854650949, 4.7896845328736148, -1.0372629333665175,
-3.4736146138727317], [-0.77834611882985527, 3.7473488660222962, 4.4949031076591055, 1.1504392368842806]]],
[[[3.9105694358071688, 0.85300656236961014, -4.4195308596302096, -1.6714210722352862], [-1.7040870582658831,
4.4180235769979337, 1.0471725443882738, 1.0499359823818599], [-1.2249935138047685, 1.3155129039735147,
2.6341447922360262, -4.9309444752348908]], [[-4.8684443672971778, -2.2231549054199862, 2.0972135466603978,
2.2253917338571325], [4.4575262452243205, -1.0296343337965821, 2.3085756197272289, 3.1003426493793196],
[2.1929744355036931, -1.4027552428126278, -0.58004112631573967, 2.9911401359981511]]]]))
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-3.1632147744)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.060025616943284218, -1.4252990279490751, -0.73761953224333543, -1.1109091767094752],
[1.2035723233069942, -1.4137766913094203, -0.49356142231691552, 1.2046087763224773], [1.2813594313465078,
0.18108365495100007, 0.17744003102042699, -0.47913349672732097]], [[-0.55156603192733833, 0.72888564723490989,
1.0001826307086923, 0.095255552812962635], [0.87736294050361474, -0.82236173515993394, 1.1842687670000487,
0.94002322323480525], [1.1431877226776648, -0.61258224926441518, 1.500642141580049, -1.0153848035560178]]],
[[[1.4176439882937615, -0.75325764043739629, -0.6873956573419292, -1.4003694878140707], [1.1024084872260629,
-0.93434844902089775, -0.96697951076334454, 0.16151215246090378], [-1.1540202738679435, 1.1596473614855805,
0.14800672318666852, 0.26210785927619656]], [[-0.50207985833555446, -0.49841558956020954, 1.3728823672459796,
0.6374312353906596], [0.32311933814789817, -0.62364092210289068, 0.020552129161448576, -0.54790894068148144],
[-0.86572229937262646, -0.62107240988751689, 0.88313353764356695, -0.96640596313356297]]], [[[0.067009878731175476,
1.0043701845590174, -1.2010639276158939, 1.4280587226072361], [-0.31286206150703538, 0.78208330847024454,
-0.003606007534974082, 0.98969553287446743], [0.53812610415292728, 1.5438624609305427, -1.0479807419892684,
0.58571410166503546]], [[1.4823019367366943, 0.69848649137897711, 0.52867242923360025, -1.1035719978443639],
[0.78147819602019131, 0.74662808451185125, 1.3385073544404151, -1.2019720011621717], [1.3982993676015798,
0.48446164688593624, -1.0263290796352258, -1.2902206447417288]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.10864256777323951, -1.0796719391910867, 0.40384293718917252,
0.022810354568944386], [1.0159345645167126, 1.3625530758742546, -0.29104311256316334, -0.20859557718270469],
[-0.58053623712851588, -1.4886963789477379, -1.1598082974708024, -0.047370492260626021]], [[-0.41858398703187571,
-1.5338250683628583, -1.3439954502512519, -0.85921641207896848], [-1.1506285526744569, -1.1244828741103539,
-1.0314586611802425, -0.76275484711676045], [-1.2672239735039237, -0.22425209935319021, 0.62349457659008678,
1.5632977401979911]]], [[[-1.4587411698434058, 0.49690939499221054, 0.89447769563233703, -0.30347772536740358],
[-0.74229710808168103, -0.70124574753092495, 1.5090554450906528, -0.001538091977223457], [-1.2945454504477103,
-0.030987720933937649, -1.5605397782151482, 0.86693322382179105]], [[-0.62781502775689713, 0.68372904978794213,
1.2665952142005037, -0.35310287137826013], [-0.19185361154036801, -1.5141825245748524, 0.32791416560174402,
1.0981279684151384], [0.24606173603165085, -1.1846646950278541, -1.4209920692190618, -0.36369305245877914]]],
[[[-1.2362642800784225, -0.26966444683838514, 1.3971643327533361, 0.52839316690162763], [0.53871999841966511,
-1.3966878293415304, -0.33104693138861208, -0.33192054832286222], [0.38726220037875464, -0.41587846472517831,
-0.83274294668644144, 1.5588396068268995]], [[1.5390811925568579, 0.70281503596024564, -0.66300068007811208,
-0.70352217366561076], [-1.4091759691118937, 0.32550250527704089, -0.72981943509196212, -0.98012397845078236],
[-0.6932739607981846, 0.44345874145667485, 0.18337076919658227, -0.94560134209205504]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[3.1087373682977262, 1.7288468484290664, -4.7350025546766474, -1.2462687606986202],
[1.1147661257221184, 1.4226862562785945, -4.8699002989410065, -0.88238981212800027], [-2.5253953718962063,
-2.7446236822447503, -0.3879205662380496, -3.5863352294770778]], [[-3.8709898597864569, -4.5958808941745852,
-0.69941717063653019, -4.1924685416353791], [-3.532941685164098, 3.0050044088519066, -0.43457496025027353,
3.7774083718666578], [-3.4471087299766379, 1.6436932375732649, 2.6583331263073102, 1.6377907641557758]]],
[[[4.7414386788893221, -3.2160736850371494, 0.96698243106325243, -0.82828848269705979], [3.5200044289955326,
4.9118207264463223, -4.3585332820909706, -0.77115446561512169], [0.16582160103948596, -0.65994126459526647,
2.6619397454888905, -2.3437138040847803]], [[1.2215261759413565, -1.7584557798190015, -1.1089650453761957,
-1.5641210633931846], [2.7598302729800022, 1.8875202967455316, -3.630936106223853, 1.2007447478800914],
[2.6512287695864964, 1.7707580266874441, 0.1953947241778895, 4.503435455349651]]], [[[4.2589981026451031,
-3.9277495689426001, -2.8473369657351677, 1.4040000652368345], [0.26972497738921852, 0.60147023681782397,
-4.4745745609656007, 1.9306805685522557], [3.6376896663673826, 2.8396270545178259, -2.6836138158505385,
2.9279825131423012]], [[-0.3090571730995233, -0.57453652295428181, -1.6271798513695179, -4.4696813085601139],
[-2.6653810514006215, -2.3768146409366411, -1.3128180331345818, 4.581635378865748], [0.50247688944640778,
4.9532451154747754, -2.7018392025036428, 2.1300845231681196]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.4505138582863983, 1.4248257487998686, -3.9524119923215526,
0.81504856520297952], [-4.3718698276484877, -0.94288708981653535, 4.8282582970579551, -4.6722816105251033],
[3.6647072271001377, 4.7114077690304565, 0.092514361474126616, -3.8124864196234185]], [[4.165582354632777,
0.34102300070869251, 0.98343269313930293, -2.6743082425014606], [4.1326181041099233, -0.23439200672333627,
-1.5849546480857146, -2.0736002379586758], [1.6002261433791851, -1.9446791096418647, -3.7570143378185583,
1.076047727904327]]], [[[1.2380177953892098, -3.1484579868063811, -0.76442488916600748, -2.1818057802025415],
[-1.5383818327876098, -3.6057035623329137, 2.163451126002772, 2.188392374984657], [2.3709893728585962,
2.3471840573448759, -4.4074156468519838, 4.7188550456319831]], [[1.9267506253146127, -1.0414071109895739,
1.0685570204986226, 4.0226454234187585], [-3.2914506386974205, 0.15234686305474998, 4.7028886509160337,
-0.054270133945801824], [-2.3311208759539328, 3.6661719497716891, -3.8474697136079583, 1.8341420305410185]]],
[[[3.3674152480608264, 2.7014266389725128, -1.3030301583873838, -3.2387622104831584], [-4.0951687925040563,
4.7004938357073254, 4.1950637273295168, 2.0382289901206372], [1.7427744024426053, 1.1790502862054941,
-2.2495124667079791, -3.0538048367909409]], [[-4.4962463676429856, 1.5822806162106859, -1.8235905534694785,
-4.4062861037909364], [0.87041939490070597, -4.0113094886293554, -4.6982054748353583, 4.2373051220310582],
[-0.84614829051970197, 3.3176111131043982, -2.6687460434287589, 4.6770431583761969]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0/arg1
s1=numpy.array([[[[-2.3182995299525277, -1.6619336920343253, -2.9660511871402528, 0.93254822596563614],
[-4.2541426675441816, -0.30333038370249721, 3.4789554983748676, 0.5140033510602251], [-3.2905733659402281,
3.7139824585796646, 0.78960211543734715, 2.971514572460773]], [[-0.70540352219700697, -1.3136347308463381,
0.73924055860583593, -4.9656858822166203], [2.1473770309669362, -2.1549075593317712, 1.8563623226892654,
-1.3846952158399581], [-2.4903294101424525, -0.88004658956592507, -0.26617618913608698, 3.100112111326629]]],
[[[-3.3182633250231328, -4.3300560926251794, -0.56051213669826971, -0.70765176831018195], [4.8954497928460068,
1.0868590622929659, 4.1265642365266171, -1.2873984102989136], [2.4610703924835473, 2.7606204885978327,
0.74181190652475859, -0.76191236317201927]], [[0.24802432047997591, 4.825490575588006, -1.2970889077321623,
-1.1984463553405069], [-0.17745571227021895, -1.9770464473025151, 0.90483935921876402, -0.059094745500082979],
[-0.07302552360214154, -1.2720098622673035, -0.68873120844511426, -2.3715151078623045]]], [[[0.24502664280811803,
1.2652825479404042, 1.2097368744797734, 3.0828413949333608], [-1.7791129684882536, -3.7837459912887872,
-0.93771394776597372, -2.4396818013745336], [3.7724216979832228, 1.3354698832167236, 4.4778423096546298,
3.3178355959010588]], [[4.1570198977003194, -1.4482811362101113, 4.7418722485913811, -1.7134079314039576],
[-3.9415515512997623, 2.479810994419001, 3.53014195265167, 4.0686466788459725], [-0.0011085503845311706,
-1.9052407432771523, 4.9068043014278278, -1.9049925369925189]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-1.3409558722385559, -1.0402622299045123, 1.5963994738883607, -1.3364121296870544],
[-0.2620424872505856, -4.6902200792187969, -1.3998167844388623, -1.7167005045938151], [0.76746362747472596,
-0.73899748123591691, -0.49128612835996144, -1.2069048096597954]], [[5.4876247962727875, 3.4985988009114131,
-0.94612932487848034, 0.84428790726567526], [-1.6452358548201755, -1.3944934184479576, -0.23410029116552836,
-2.7279709849905682], [1.3841978960443853, -1.8677343416375281, -9.9871184381116578, 0.52830049538270318]]],
[[[-1.4288916262714828, 0.74273256887241457, -1.7251766157273958, 1.1704746879597985], [0.7190359574598254,
4.5192802791594398, -1.0562136034406204, 0.59900218879101441], [0.067377837523838524, -0.23905541066619487,
3.5884295224641907, 3.0760936787104383]], [[4.925025794153826, -0.36440974285909289, 0.85496455853216458,
1.3051239685641005], [-15.552219974623853, -0.95471722443388563, -4.0127963811817313, -20.318976547219506],
[-36.305508523717684, -1.3920945734895036, -0.28370243976458448, -1.8989697516239186]]], [[[17.381775523816618,
-3.1042470121287091, -2.3536828758399353, 0.4554240343159735], [-0.15160643655945524, -0.1589615788698745,
4.7717905568386882, -0.79136572952443918], [0.96428500247258431, 2.126313060447357, -0.59930958490084085,
0.88249776955784298]], [[-0.074345848878542772, 0.39670234500032192, -0.34315134741406994, 2.6086498297563501],
[0.67622635825267541, -0.95846604692286597, -0.37188817071462443, 1.1260833737878855], [-453.27383983445719,
-2.5998001212984949, -0.55063113108412265, -1.1181589858251946]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.4883813820024607, -0.85733008219827367, 1.3325501628083192,
0.8740015181080979], [1.0276735336128884, 3.1084492041565728, 1.3878470993128227, -9.0899827810220994],
[-1.1136986839535203, 1.2685595103301153, 0.11716579738756713, -1.283011180546296]], [[-5.905247455610807,
-0.25960260695070153, 1.330328377807082, 0.53855767479752115], [1.9244958125723541, 0.10877125828823039,
-0.8537959582101573, 1.4975138313746732], [-0.64257609329187038, 2.2097456347181117, 14.114764923235574,
0.34709961745346513]]], [[[-0.37309208888073375, 0.72711713646590848, 1.363797211723011, 3.0831630441799445],
[-0.31424729041971439, -3.317544737333189, 0.52427419082751925, -1.6998563595216394], [0.96339762572412757,
0.85023786030692383, -5.9414193922821363, -6.1934354575719004]], [[7.7683939284098056, -0.2158137280918167,
-0.82381170182612518, -3.3565502581680677], [18.54801176355144, -0.07705780674126915, 5.1974846175750971,
0.91835802805387523], [31.922001527224523, -2.8821883057077038, 5.5863153381622421, -0.77340516383820268]]],
[[[13.743057528229171, 2.1350382516299056, -1.0771186576814313, -1.0505769825869253], [2.3018036881512924,
-1.2422857788364075, -4.4737136920314677, -0.83544870030685348], [0.46197762126495856, 0.8828729880194196,
-0.50236527129546937, -0.92042078292356966]], [[-1.0816032827098874, -1.0925231135380433, -0.38457184375036541,
2.5716503484260449], [-0.22083166579761651, -1.6175867828867223, -1.3308828760572351, 1.0414532045906044],
[763.29258672131175, -1.7413080865559627, -0.54388679056390776, -2.4551503838225086]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-4.16921840294)+(1.-msk_arg0)*(4.13078960119)
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-3.74029681078)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(1.11467581688)+(1.-msk_ref)*(-1.10440155158)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(0.893893990136)+(1.-msk_arg0)*(0.567432073109)
arg1=Symbol(shape=(2,))
res=arg0/arg1
s1=numpy.array([-0.38687332266391294, 1.9399003858649468])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-2.3105599113962496, 0.4607937586121677])+(1.-msk_ref)*numpy.array([-1.4667128485410923,
0.29250577877272954])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(4.73905345325)+(1.-msk_arg0)*(-4.15656795792)
arg1=Symbol(shape=(4, 5))
res=arg0/arg1
s1=numpy.array([[4.4998386312884939, 0.78446231481643913, -1.6813079285045305, -3.1798436625479907,
-2.0944868186665833], [4.4953342126454849, 1.6650442515799515, -0.91898434961724451, -1.1670746485185877,
4.5503544844109296], [1.9273082092830283, 1.6079265632411186, 2.6574428427115047, -3.0448375461618085,
3.5797303717654643], [-0.075958261118915793, -1.5548980284162739, 3.7360078828652803, -1.3318521748477563,
2.4342937381441239]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[1.0531607556532867, 6.0411486488736692, -2.8186707341949422, -1.4903416507741973,
-2.2626322643857333], [1.0542160446968702, 2.8462027052727157, -5.15683804106071, -4.0606258213740682,
1.0414690700430431], [2.4588975600371681, 2.9473071479664283, 1.783313408317817, -1.5564224302276781,
1.3238576543708847], [-62.39023094310253, -3.0478226653052989, 1.2684805819015268, -3.5582428311065093,
1.9467878419876474]])+(1.-msk_ref)*numpy.array([[-0.9237148925791, -5.2986203153641043, 2.472222897099563,
1.3071611057108705, 1.9845281053476089], [-0.92464047416795137, -2.4963708646060252, 4.5230018984057123,
3.5615270738680049, -0.91346025285808652], [-2.1566700841637609, -2.5850483802851589, -1.5641231830532274,
1.3651197789397753, -1.1611399536423701], [54.721736605010705, 2.6732093564730848, -1.1125693757198694,
3.1208928711619754, -1.7075046831007554]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(4.78787757499)+(1.-msk_arg0)*(-3.28976848057)
arg1=Symbol(shape=(6, 2, 2))
res=arg0/arg1
s1=numpy.array([[[1.740883116607046, -2.2503321522792152], [-1.5061945225102802, 2.4982952052110088]],
[[-2.7983305802801484, 2.1687543854796365], [1.643320759290833, -1.4653409727441757]], [[3.2175020834207952,
3.7650334271118187], [1.8014810554950147, 0.12236435291525538]], [[-3.1233989028943911, 0.7696710585002986],
[-4.2753667310345724, 0.88632267436679513]], [[-4.5134534247108533, 2.7836276968220837], [-1.1959116036265316,
-0.39370816860750679]], [[1.5027698132905005, -2.504876271197424], [-4.7995705146222427, 0.81056999509517258]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[2.7502579175572066, -2.1276315010390197], [-3.1787909884378931, 1.9164578969705082]],
[[-1.7109763974029355, 2.2076624292018749], [2.9135380587879744, -3.2674153415800777]], [[1.4880728748121901,
1.2716693404400943], [2.6577451705005974, 39.128042284553942]], [[-1.532906210139654, 6.2206802790783859],
[-1.1198752940269061, 5.4019576768818869]], [[-1.0608013696955167, 1.7200136284231737], [-4.003538021097242,
-12.160981043203339]], [[3.1860352348351633, -1.9114227836496733], [-0.9975637529240533,
5.9068033654860717]]])+(1.-msk_ref)*numpy.array([[[-1.889712439158538, 1.4619035137730338], [2.1841591052200662,
-1.3168053453857391]], [[1.1756182431612747, -1.5168930620264134], [-2.0019028311874436, 2.2450532277225537]],
[[-1.0224604041517171, -0.87376873121071352], [-1.8261465867424562, -26.885023311092727]], [[1.0532655555218566,
-4.2742525449554005], [0.76947047763019516, -3.7117052014083791]], [[0.72888056461647699, -1.1818277581909278],
[2.7508458573337604, 8.3558552828834873]], [[-2.1891366538499089, 1.3133457003051041], | |
"23.62.225.", # Plano
"24.173.213.", # Plano
"47.185.248.", # Plano
"50.84.81.", # Plano
"50.84.110.", # Plano
"65.42.136.", # Plano
"65.69.239.", # Plano
"65.71.223.", # Plano
"66.138.79.", # Plano
"66.140.20.", # Plano
"66.140.197.", # Plano
"66.141.151.", # Plano
"66.143.7.", # Plano
"67.65.12.", # Plano
"67.66.13.", # Plano
"68.20.41.", # Plano
"68.20.53.", # Plano
"68.22.119.", # Plano
"68.72.56.", # Plano
"68.88.24.", # Plano
"68.88.169.", # Plano
"68.90.204.", # Plano
"68.93.19.", # Plano
"68.93.208.", # Plano
"23.113.179.", # Richardson
"23.123.121.", # Richardson
"23.126.17.", # Richardson
"24.27.103.", # Richardson
"45.23.148.", # Richardson
"47.186.44.", # Richardson
"47.186.233.", # Richardson
"50.84.237.", # Richardson
"63.199.94.", # Richardson
"63.201.89.", # Richardson
"63.203.212.", # Richardson
"63.203.213.", # Richardson
"63.204.90.", # Richardson
"63.204.168.", # Richardson
"63.207.220.", # Richardson
"64.109.192.", # Richardson
"64.123.188.", # Richardson
"64.148.35.", # Richardson
"64.149.192.", # Richardson
"64.217.8.", # Richardson
"64.218.64.", # Richardson
"64.252.212.", # Richardson
"64.252.213.", # Richardson
"64.252.214.", # Richardson
"64.252.215.", # Richardson
"64.252.216.", # Richardson
"64.252.217.", # Richardson
"64.252.218.", # Richardson
"64.252.219.", # Richardson
"64.252.220.", # Richardson
"64.252.221.", # Richardson
"64.252.222.", # Richardson
"64.252.223.", # Richardson
"64.252.224.", # Richardson
"64.252.225.", # Richardson
"64.252.226.", # Richardson
"64.252.227.", # Richardson
"64.252.228.", # Richardson
"64.252.229.", # Richardson
"64.252.230.", # Richardson
"64.252.231.", # Richardson
"64.252.232.", # Richardson
"64.252.233.", # Richardson
"64.252.234.", # Richardson
"64.252.235.", # Richardson
"64.252.236.", # Richardson
"64.252.237.", # Richardson
"64.252.238.", # Richardson
"65.64.221.", # Richardson
"65.64.222.", # Richardson
"65.64.223.", # Richardson
"65.65.49.", # Richardson
"65.65.133.", # Richardson
"65.68.3.", # Richardson
"65.68.4.", # Richardson
"65.69.103.", # Richardson
"65.70.92.", # Richardson
"65.70.203.", # Richardson
"66.73.64.", # Richardson
"66.73.74.", # Richardson
"66.136.184.", # Richardson
"66.136.185.", # Richardson
"66.136.186.", # Richardson
"66.136.187.", # Richardson
"66.137.185.", # Richardson
"66.138.90.", # Richardson
"66.138.5.", # Richardson
"66.142.202.", # Richardson
"66.226.197.", # Richardson
"67.38.82.", # Richardson
"67.39.101.", # Richardson
"67.64.87.", # Richardson
"67.67.134.", # Richardson
"67.115.107.", # Richardson
"67.117.108.", # Richardson
"67.121.40.", # Richardson
"67.122.104.", # Richardson
"67.123.146.", # Richardson
"67.127.68.", # Richardson
"68.23.31.", # Richardson
"68.72.0.", # Richardson
"68.72.114.", # Richardson
"68.72.157.", # Richardson
"68.72.158.", # Richardson
"68.89.77.", # Richardson
"68.91.19.", # Richardson
"68.94.48.", # Richardson
"68.95.210.", # Richardson
"68.122.157.", # Richardson
"23.139.64.", # Rio Grande City
"63.174.141.", # Rocksprings
"66.235.81.", # Rosenberg
"52.144.99.", # Round Rock
"47.184.162.", # Sachse
"8.9.196.", # San Antonio
"12.7.34.", # San Antonio
"12.7.35.", # San Antonio
"12.27.88.", # San Antonio
"12.190.120.", # San Antonio
"12.207.43.", # San Antonio
"12.211.20.", # San Antonio
"15.105.28.", # San Antonio
"15.105.182.", # San Antonio
"15.109.33.", # San Antonio
"15.109.99.", # San Antonio
"15.110.110.", # San Antonio
"15.114.210.", # San Antonio
"15.115.59.", # San Antonio
"15.116.44.", # San Antonio
"15.117.166.", # San Antonio
"15.118.122.", # San Antonio
"15.118.179.", # San Antonio
"15.118.251.", # San Antonio
"15.120.12.", # San Antonio
"15.120.71.", # San Antonio
"15.120.150.", # San Antonio
"15.120.172.", # San Antonio
"15.121.102.", # San Antonio
"15.122.12.", # San Antonio
"15.122.23.", # San Antonio
"15.126.8.", # San Antonio
"15.127.180.", # San Antonio
"15.128.234.", # San Antonio
"15.128.235.", # San Antonio
"15.128.254.", # San Antonio
"15.129.7.", # San Antonio
"15.129.118.", # San Antonio
"15.131.196.", # San Antonio
"15.131.197.", # San Antonio
"15.131.198.", # San Antonio
"15.131.199.", # San Antonio
"15.131.200.", # San Antonio
"15.132.18.", # San Antonio
"15.132.71.", # San Antonio
"15.132.72.", # San Antonio
"15.133.222.", # San Antonio
"15.134.233.", # San Antonio
"15.134.234.", # San Antonio
"15.135.133.", # San Antonio
"15.135.219.", # San Antonio
"15.136.111.", # San Antonio
"15.137.122.", # San Antonio
"15.137.172.", # San Antonio
"15.138.0.", # San Antonio
"15.138.1.", # San Antonio
"15.140.41.", # San Antonio
"15.141.27.", # San Antonio
"15.142.164.", # San Antonio
"15.143.78.", # San Antonio
"15.143.175.", # San Antonio
"15.145.145.", # San Antonio
"15.145.242.", # San Antonio
"15.146.97.", # San Antonio
"15.149.7.", # San Antonio
"15.149.217.", # San Antonio
"15.149.233.", # San Antonio
"15.150.12.", # San Antonio
"15.150.168.", # San Antonio
"15.150.169.", # San Antonio
"15.152.9.", # San Antonio
"15.153.121.", # San Antonio
"15.153.133.", # San Antonio
"15.154.136.", # San Antonio
"15.154.137.", # San Antonio
"15.155.5.", # San Antonio
"15.155.249.", # San Antonio
"15.156.247.", # San Antonio
"15.156.248.", # San Antonio
"15.157.163.", # San Antonio
"15.158.33.", # San Antonio
"15.158.179.", # San Antonio
"15.159.219.", # San Antonio
"15.160.97.", # San Antonio
"15.160.98.", # San Antonio
"15.160.99.", # San Antonio
"15.160.200.", # San Antonio
"15.160.201.", # San Antonio
"15.160.202.", # San Antonio
"15.161.146.", # San Antonio
"15.161.233.", # San Antonio
"15.162.156.", # San Antonio
"15.162.246.", # San Antonio
"15.162.247.", # San Antonio
"15.162.248.", # San Antonio
"15.162.249.", # San Antonio
"15.162.231.", # San Antonio
"15.165.19.", # San Antonio
"15.165.122.", # San Antonio
"15.167.62.", # San Antonio
"15.168.166.", # San Antonio
"15.169.34.", # San Antonio
"15.169.145.", # San Antonio
"15.169.231.", # San Antonio
"15.170.39.", # San Antonio
"15.170.117.", # San Antonio
"15.173.25.", # San Antonio
"15.173.118.", # San Antonio
"15.173.231.", # San Antonio
"15.174.40.", # San Antonio
"15.176.53.", # San Antonio
"15.176.79.", # San Antonio
"15.176.80.", # San Antonio
"15.176.81.", # San Antonio
"15.176.129.", # San Antonio
"15.177.123.", # San Antonio
"15.176.164.", # San Antonio
"15.177.176.", # San Antonio
"15.177.254.", # San Antonio
"15.178.149.", # San Antonio
"15.180.1.", # San Antonio
"15.180.224.", # San Antonio
"15.181.151.", # San Antonio
"15.181.152.", # San Antonio
"15.181.177.", # San Antonio
"15.183.87.", # San Antonio
"15.183.211.", # San Antonio
"15.184.201.", # San Antonio
"15.188.81.", # San Antonio
"15.188.237.", # San Antonio
"15.189.87.", # San Antonio
"15.189.88.", # San Antonio
"15.190.90.", # San Antonio
"15.190.132.", # San Antonio
"15.191.51.", # San Antonio
"15.191.124.", # San Antonio
"15.193.69.", # San Antonio
"15.193.70.", # San Antonio
"15.193.183.", # San Antonio
"15.193.203.", # San Antonio
"15.204.130.", # San Antonio
"15.204.186.", # San Antonio
"15.208.102.", # San Antonio
"15.209.138.", # San Antonio
"15.211.169.", # San Antonio
"15.213.214.", # San Antonio
"15.213.241.", # San Antonio
"15.214.133.", # San Antonio
"15.214.237.", # San Antonio
"15.216.72.", # San Antonio
"15.216.199.", # San Antonio
"15.219.34.", # San Antonio
"12.219.40.", # San Antonio
"15.221.80.", # San Antonio
"15.224.59.", # San Antonio
"15.224.247.", # San Antonio
"15.225.148.", # San Antonio
"15.226.90.", # San Antonio
"15.227.75.", # San Antonio
"15.227.214.", # San Antonio
"15.234.104.", # San Antonio
"15.235.202.", # San Antonio
"15.235.203.", # San Antonio
"15.236.92.", # San Antonio
"15.237.79.", # San Antonio
"15.239.64.", # San Antonio
"15.243.228.", # San Antonio
"15.243.229.", # San Antonio
"15.243.241.", # San Antonio
"15.244.168.", # San Antonio
"15.248.37.", # San Antonio
"15.248.238.", # San Antonio
"15.250.151.", # San Antonio
"15.251.2.", # San Antonio
"15.251.230.", # San Antonio
"15.252.43.", # San Antonio
"15.252.185.", # San Antonio
"15.255.94.", # San Antonio
"15.255.200.", # San Antonio
"15.255.204.", # San Antonio
"40.141.126.", # San Antonio
"50.84.228.", # San Antonio
"50.95.50.", # San Antonio
"52.239.178.", # San Antonio
"64.129.98.", # San Antonio
"64.215.241.", # San Antonio
"67.65.14.", # San Antonio
"67.155.93.", # San Antonio
"68.98.252.", # San Antonio
"24.155.227.", # San Marcos
"45.21.35." # Schertz
"64.134.224.", # San Marcos
"66.90.132.", # San Marcos
"38.65.97.", # Schertz
"45.21.35.", # Schertz
"67.11.166.", # Schertz
"67.78.77.", # Seguin
"67.179.27.", # Seguin
"47.182.60.", # Sherman
"64.22.112.", # Spring
"65.174.248.", # Stafford
"67.21.188.", # Stephenville
"12.205.32.", # Sugar Land
"50.162.51.", # Sugar Land
"50.171.38.", # Sugar Land
"64.61.53.", # Sugar Land
"24.162.122.", # Temple
"24.119.145.", # Texarkana
"23.125.229.", # Tyler
"66.76.117.", # Tyler
"66.76.230.", # Tyler
"67.216.244.", # Tyler
"68.69.62.", # Tyler
"24.32.200.", # Vernon
"66.76.84.", # Victoria
"23.123.184.", # Waco
"65.65.52.", # Waco
"12.94.58.", # Weatherford
"66.69.161.", # Wichita Falls
"50.56.36.", # Windcrest
]
# random element from each list
def sign_up_page():
raise NotImplementedError()
def set_random_seed(seed, n_gpu):
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def adjust_seq_length_to_model(length, max_sequence_length):
if length < 0 and max_sequence_length > 0:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
def generate_text(prompt_text: str, k=50, p=0.9, seq_length=150, seed=None, temperature=1.0, num_return_sequences=1):
""" Create a synthetic text sequence using a pretrained model. """
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu | |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: <NAME>
# Creation date: 2018-02-17 (year-month-day)
"""
Python implementations of common optimization test functions, as well as dictionaries storing their properties.
"""
import numpy as np
class ObjectiveFunction:
def __init__(self, function_token):
self.bounding_box_key = "bounding_box"
self._function_token = function_token.lower()
if self.function_token not in function_info.keys():
raise ValueError("Function not found in the dictionary containing function information; provided value: %s")
def __call__(self, x):
# globals()["myfunction"]() Maybe need this
locals()[self.function_token](x)
@property
def function_token(self):
return self._function_token
@property
def bounding_box(self):
return function_info.get(self.function_token).get(self.bounding_box_key)
def gramacy_lee(x):
"""
A standard 1-dimensional global optimization test function with many local minima.
f(x) = sin(10 pi x) / (2 x) + (x - 1) ^ 4
There is a global minimum at
:param x: a float in [0.5, 2.5]
:return: f(x)
"""
try:
x = x.A.flatten()
except AttributeError:
x = x.flatten()
if not all([0.5 <= xx <= 2.5 for xx in x]):
raise ValueError("Provided values of x not in bounds for this objective function. See documentation.")
y = np.sin(10.0 * np.pi * x)
y /= 2 * x
y += np.power(x - 1.0, 4.0)
return y
def modified_branin(x):
fx = branin(x)
try:
x = x.A
except AttributeError:
pass
x1 = x[:, 0]
fx += 5 * x1 + 16.64099
return fx
def branin(x):
"""
A standard 2-dimensional global optimization test function with a long, shallow valley.
f(x) = (x2 - 4 * 5.1 (x1 / pi)^2 + 5 x1 / pi - 6)^2 + 10 (1 - (8 pi)^-1 ) cos (x1) + 10
(The general form doesn't specify coefficients.)
There is a global minimum at [[-pi, 12.275], [pi, 2.275], [9.42478, 2.475]]
:param x: tuple such that
x[0] in [-5, 10]
x[1] in [0, 15]
:return: f(x)
"""
try:
x = x.A
except AttributeError:
pass
x1 = x[:, 0]
x2 = x[:, 1]
x1_test = np.logical_or(x1 < -5.0, x1 > 10.0).any()
x2_test = np.logical_or(x2 < 0.0, x2 > 15.0).any()
if x1_test or x2_test:
raise ValueError("Provided values of x not in bounds for this objective function. See documentation.")
fx = 10
fx += np.power(x2 - 5.1 / 4 * np.power(x1 / np.pi, 2.0) + 5 / np.pi * x1 - 6, 2.0)
fx += 10 * (1 - np.reciprocal(8 * np.pi)) * np.cos(x1)
return fx
def goldstein_price(x):
"""
A standard 2-dimensional global optimization test function with a large, nearly-flat region.
There is a global minimum at (0, -1).
:param x:
:return:
"""
try:
x = x.A
except AttributeError:
pass
test = np.logical_and(x <= 2.0, -2.0 <= x)
if not test.all():
raise ValueError("Provided value of x not in bounds for this objective function. See documentation.")
x1 = x[:, 0]
x2 = x[:, 1]
factor1a = (x1 + x2 + 1) ** 2
factor1b = 19 - 14 * x1 + 3 * x1 ** 2 - 14 * x2 + 6 * x1 * x2 + 3 * x2 ** 2
factor1 = 1 + factor1a * factor1b
factor2a = np.power(2 * x1 - 3 * x2, 2.0)
factor2b = 18 - 32 * x1 + 12 * x1 ** 2 + 48 * x2 - 36 * x1 * x2 + 27 * x2 ** 2
factor2 = 30 + factor2a * factor2b
out = factor1 * factor2
return np.array(out).reshape((-1,))
def bukin_6(x):
try:
x = x.A
except AttributeError:
pass
x1 = x[:, 0]
x2 = x[:, 1]
x1_test = np.logical_or(x1 < -15.0, x1 > -5.0).any()
x2_test = np.logical_or(x2 < -3.0, x2 > 3.0).any()
if x1_test or x2_test:
raise ValueError("Provided values of x not in bounds for this objective function. See documentation.")
summand = 100 * np.sqrt(np.abs(x2 - 0.01 * x1 ** 2))
summand += 0.01 * np.abs(x1 - 10)
return summand
def sphere(x):
try:
x = x.A
except AttributeError:
pass
x_test = np.logical_or(x < -5.12, x > 5.12)
if x_test.any():
raise ValueError("Provided values of x not in bounds for this objective function. See documentation.")
z = np.square(x)
fx = z.sum(axis=1)
return fx
def camel_hump_6(x):
try:
x = x.A
except AttributeError:
pass
x1 = x[:, 0]
x2 = x[:, 1]
term1 = (4.0 - 2.1 * x1 ** 2 + x1 ** 4 / 3.0)
term2 = x1 * x2
term3 = (-4.0 + 4.0 * x2 ** 2) * x2 ** 2
return term1 + term2 + term3
def hartmann_6(x):
test = np.logical_or(x < 0.0, x > 1.0)
if test.any():
raise ValueError("Provided values of x not in bounds for this objective function. See documentation.")
alpha = np.array([1.0, 1.2, 3.0, 3.2]).reshape((4,))
A = np.array([[10.0, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14]])
P = 1e-4 * np.array([[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381]])
out = []
for i in range(len(x)):
row_mat = np.array([x[i, :] for _ in range(4)]).reshape((4, 6))
inner = (A * (row_mat - P) ** 2.0).sum(axis=1)
# print(inner)
outer = alpha * np.exp(-inner)
# print(outer)
y = - outer.sum()
out.append((float(y)))
return np.array(out)
def michalewicz_10(x):
m = 10
out = []
for i in range(len(x)):
prod1 = np.sin(x[i, :])
z = np.array(range(m))
prod2a = np.sin(z * x[i, :] ** 2 / np.pi)
prod2 = np.power(prod2a, 2 * m)
y = -sum(prod1 * prod2)
out.append(y)
return np.array(out)
def forrester(x):
try:
x = x.A
except AttributeError:
pass
if np.logical_or(0.0 > x, x > 1.0).any():
raise ValueError("Provided values of x not in bounds for this objective function. See documentation.")
y = np.square(6 * x - 2) * np.sin(12 * x - 4)
return y.flatten()
def colville(x):
try:
x = x.A
except AttributeError:
pass
if np.logical_or(x < -10, x > 10).any():
raise ValueError("Provided values of x not in bounds for this objective function. See documentation.")
out = []
for i in range(len(x)):
x1 = x[i, 0]
x2 = x[i, 1]
x3 = x[i, 2]
x4 = x[i, 3]
y = 100 * (x1 ** 2 - x2) ** 2
y += (x1 - 1.0) ** 2
y += (x3 - 1.0) ** 2
y += 90 * (x3 ** 2 - x4) ** 2
y += 10.1 * ((x2 - 1.0) ** 2 + (x4 - 1.0) ** 2)
y += 19.8 * (x2 - 1.0) * (x4 - 1.0)
out.append(float(y))
return np.array(out)
def holder_table(x):
try:
x = x.A
except AttributeError:
pass
if np.logical_or(x < -10, x > 10).any():
raise ValueError("Provided values of x not in bounds for this objective function. See documentation.")
x1 = x[:, 0]
x2 = x[:, 1]
inner = np.sin(x1)
inner *= np.cos(x2)
inner *= np.exp(np.abs(1 - np.sqrt(x1 ** 2 + x2 ** 2) / np.pi))
out = - np.abs(inner)
return out
def rosenbrock(x):
try:
x = x.A
except AttributeError:
pass
if np.logical_or(x < -10, x > 10).any():
raise ValueError("Provided values of x not in bounds for this objective function. See documentation.")
x1 = x[:, 0]
x2 = x[:, 1]
out = np.square(1 - x1)
out += 100 * np.square(x2 - np.square(x1))
return out.flatten()
function_info = {
"goldstein_price": {
"objective_function": goldstein_price,
"bounding_box": [[-2.0, 2.0], [-2.0, 2.0]],
"argmin": np.array([[0.0, -1.0]]),
"fmin": 3.0,
"percentile_5": 100.153513
},
"branin": {
"objective_function": branin,
"bounding_box": [[-5, 10], [0, 15]],
"argmin": np.array([[-np.pi, 12.275], [np.pi, 2.275], [9.42478, 2.475]]),
"fmin": 0.397887,
"percentile_5": 3.064375
},
"gramacy_lee": {
"objective_function": gramacy_lee,
"bounding_box": [[0.5, 2.5]],
"argmin": np.array([0.548563444114526]),
"fmin": -0.869011134989500,
"percentile_5": -0.526385
},
"modified_branin": {
"objective_function": modified_branin,
"bounding_box": [[-5, 10], [0, 15]],
"argmin": np.array([[-3.68928444, 13.62998588]]),
"fmin": 0.0,
"percentile_5": 11.254351
},
"bukin_6": {
"objective_function": bukin_6,
"bounding_box": [[-15, -5], [-3, 3]],
"argmin": np.array([-10, 1]),
"fmin": 0.0,
"percentile_5": 39.130709
},
"sphere": {
# The sphere function can have arbitrary dimension; this is for dimension 6
"objective_function": sphere,
"bounding_box": [[-5.12, 5.12] for _ in range(6)],
"argmin": [[0.0] for _ in range(6)],
"fmin": 0.0,
"percentile_5": 22.401199
},
"camel_hump_6": {
"objective_function": camel_hump_6,
"bounding_box": [[-3, 3], [-2, 2]],
"argmin": [[0.0898, -0.7126], [-0.0898, 0.7126]],
"fmin": -1.0316,
"percentile_5": -0.395632
},
"hartmann_6": {
"objective_function": hartmann_6,
"bounding_box": [[0, 1] for _ in | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file contains the experiments in the active scenario of [Haddenhorst2021].
In order to rerun these, UNCOMMENT in "Part 4" below the corresponding lines and
then execute this file.
[Haddenhorst2021]: <NAME>, <NAME>, <NAME> and E.Hüllermeier, Testification of Condorcet Winners in Dueling Bandits, Proceedings of UAI, 2021
"""
import ReciprocalRelations as rr
import TestEnvironment as tenv
import TestingComponent as tc
import numpy as np
import DeterministicTestingComponent as dtc
import SELECT as select
import math
import matplotlib.pyplot as plt
nr_items, decimal_precision = 5,3
np.set_printoptions(precision=3)
###############################################################################
# PART 1: Define 'find_CW_with_symm_test', 'SELECT_then_verify'
# and 'buf_SPRT'
###############################################################################
def find_CW_with_symm_test(TE, h, gamma):
"""
This is an implementation of the Algorithm NTS from the paper.
"""
SymmTC = tc.Symmetric_TestingComponent(TE.N, TE.R, h=float(h), gamma=gamma)
sampling_strategy = dtc.Optimal_Deterministic_CW_Tester(len(TE.N))
for t in range(500000):
[i,j] = sampling_strategy.getQuery()
while not SymmTC.G.has_edge(i,j) and not SymmTC.G.has_edge(j,i):
feedback = TE.pullArmPair(i,j)
SymmTC.update(i,j,feedback)
if SymmTC.TC():
SymmTC.DC()
return SymmTC.find_CW(), SymmTC.time
sampling_strategy.giveFeedback(feedback)
def SELECT_then_verify(TE,h,gamma, variant = "Hoeffding"):
"""
This is the implementation from SELECT-then-verify from the paper.
The internal hypothesis test for verifying the output of SELECT can either
be the non-sequential Hoeffding-bound test (with 'variant="Hoeffding"')
or the corresponding SPRT (with 'variant="SPRT"').
"""
assert variant=="Hoeffding" or variant=="SPRT", "'variant' has to be 'Hoeffding' or 'SPRT'"
m = TE.P.m
epsilon = -np.log(0.5*gamma)/np.log(np.log2(m))
m_h = math.floor((1+epsilon)*math.log(2)/2*math.log(math.log(m,2),2)/(h*h))+1
CW_M, itera = select.select(list(np.arange(m)), m_h, TE.pullArmPair)
CW_M = int(CW_M)
# print("Output of SELECT:",CW_M,"(after ",itera," iterations)")
# TE.show()
if variant=="Hoeffding":
t0 = np.ceil(2/(h**2) * np.log(2*(m-1)/gamma))
w = np.zeros(m)
for j in range(0,m):
if CW_M is not j:
for t in range(0,int(t0)):
buf = TE.pullArmPair(CW_M,j)
w[j] += buf #Increases by 1 if CW_M has won
if w[j]/t0 < 0.5:
# print("w",w)
return(False)
return(CW_M)
if variant=="SPRT":
for j in range(0,m):
if CW_M is not j:
winner = buf_SPRT(TE,h,gamma/(2*(m-1)),CW_M,j)
if winner != CW_M:
return(False)
return(CW_M)
def buf_SPRT(TE,h,gamma,i,j):
"""
This function conducts a SPRT (with parameters h,gamma) in order to decide
whether the (i,j)-entry of TE.P is >1/2 or <1/2.
"""
N = 1
C = (1/(2*N)) * np.ceil(np.log( (1-gamma) / gamma ) / np.log( (0.5+h) / (0.5-h) ))
w = TE.pullArmPair(i,j)
while 0.5-C < w/N and w/N< 0.5+C:
w += TE.pullArmPair(i,j)
C = (1/(2*N)) * np.ceil(np.log( (1-gamma) / gamma ) / np.log( (0.5+h) / (0.5-h) ))
N = N+1
if w/N >= 0.5+C:
return(i)
else:
return(j)
###############################################################################
# PART 3: Define the evaluation functions 'experiments_one', 'experiment_two'
# 'experiment_three' as well as 'generate_main_figure'.
###############################################################################
def experiments_one(m,h,gamma,nr_iterations=100, real_h = 0.05, has_CW = "No",verify_variant="SPRT"):
"""
This function compares NTS with SELECT-then-verify. It is required for the
function "experiment_two".
"""
assert has_CW=="No" or has_CW=="Yes" or has_CW=="Both", "'has_CW' has to be 'Yes','No' or 'Both'."
results = dict()
results["NTS_output"] =list()
results["NTS_time"] = list()
results["S_t_verify_output"] = list()
results["S_t_verify_time"] = list()
results["Truth"] = list()
for iteration in range(0,nr_iterations):
# Step 1: Sample a reciprocal relation, create a TE and a dictionary to save results.
if has_CW == "No":
P, buf = rr.sampleCW_boundedFromOneHalf(m,real_h,decimal_precision=3)
elif has_CW == "Yes":
P = rr.sampleNotCW_boundedFromOneHalf(m,real_h,max_tries=10000,decimal_precision=3)
else:
P = rr.sampleReciprocal(m,decimal_precision=3)
P = rr.__EnforceBoundedFromOneHalf__(P,real_h)
results["Truth"].append(rr.get_CW(P))
# Step 2: Run and log our NTS
TE = tenv.TestEnvironment(P)
current_output = find_CW_with_symm_test(TE, h, gamma)
results["NTS_output"].append(current_output[0])
results["NTS_time"].append(TE.time)
# Step 3: Run and log SELECT_then_verify
TE = tenv.TestEnvironment(P)
current_output = SELECT_then_verify(TE, h, gamma,variant=verify_variant)
results["S_t_verify_output"].append(current_output)
results["S_t_verify_time"].append(TE.time)
# Step 4: Calculate the accuracy of both algorithms and return the results
nr_correct_NTS ,nr_correct_S_t_verify = 0,0
for i in range(0,nr_iterations):
if results["Truth"][i] == results["NTS_output"][i]:
nr_correct_NTS += 1
if results["Truth"][i] == results["S_t_verify_output"][i]:
nr_correct_S_t_verify += 1
results["Acc_NTS"] = nr_correct_NTS / nr_iterations
results["Acc_S_t_verify"] = nr_correct_S_t_verify / nr_iterations
results["NTS_mean_time"] = np.mean(results["NTS_time"])
results["S_t_verify_mean_time"] = np.mean(results["S_t_verify_time"])
# print(results)
# print("NTS: mean",np.mean(results["NTS_time"]),"\t std:",np.std(results["NTS_time"]), "\t Accuracy:", results["Acc_NTS"])
# print("SELECT_then_verify: mean",np.mean(results["S_t_verify_time"]),"\t std:",np.std(results["S_t_verify_time"]), "\t Accuracy:", results["Acc_S_t_verify"])
return(results)
def experiment_two(m=5, real_h=0.1, h=0.3, file_name ="plot",nr_iterations =25000,has_CW = "No",verify_variant="SPRT"):
"""
This function compares NTS with SELECT-then-verify for the given parameters,
saves the observed accuracies and averaged termination times and plots the
results.
"""
gammas = [0.001,0.005,0.01,0.015,0.02,0.03,0.05,0.075,0.1,0.125,0.15,0.2,0.25,0.35,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.7,0.95,0.99]
x_NTS = np.zeros(len(gammas))
y_NTS = np.zeros(len(gammas))
x_StV = np.zeros(len(gammas))
y_StV = np.zeros(len(gammas))
print("Progress for "+str(file_name)+" (.. out of "+str(len(gammas))+"): ")
for i in range(0,len(gammas)):
print(i,end=",")
buf = experiments_one(m,h,gammas[i],nr_iterations,real_h=real_h,has_CW = has_CW)
x_NTS[i] = buf["NTS_mean_time"]
x_StV[i] = buf["S_t_verify_mean_time"]
y_NTS[i] = buf["Acc_NTS"]
y_StV[i] = buf["Acc_S_t_verify"]
plt.plot(x_NTS,y_NTS, marker = "^", label="NTS")
plt.plot(x_StV, y_StV, marker = "o", label="SELECT-then-verify")
plt.xlabel("Iterations")
plt.ylabel("Success Rate")
plt.legend()
plt.title("h="+str(h))
plt.savefig(str(file_name)+"_plot.png",dpi=300)
plt.show()
np.savetxt(str(file_name)+"_results.csv",np.asarray([x_NTS,y_NTS,x_StV,y_StV]),delimiter=",")
# print(x_NTS,y_NTS)
print("Done.")
def experiment_three_help(m,h,gamma,nr_iterations=100, real_h = 0.05, has_CW = "No",verify_variant="SPRT"):
"""
This is an appropriate modification of 'experiments_one' from above
Each line of code that has been modified is indicated with a comment '[CHANGE]',
together with the original code.
"""
assert has_CW=="No" or has_CW=="Yes" or has_CW=="Both", "'has_CW' has to be 'Yes','No' or 'Both'."
results = dict()
results["NTS_output"] =list()
results["NTS_time"] = list()
results["S_t_verify_output"] = list()
results["S_t_verify_time"] = list()
results["Truth"] = list()
for iteration in range(0,nr_iterations):
# Step 1: Sample a reciprocal relation, create a TE and a dictionary to save results.
if has_CW == "No":
#P, buf = rr.sampleCW_boundedFromOneHalf(m,real_h,decimal_precision=3) [CHANGE]
P, buf = rr.sampleCW_exactly_h(m,real_h,decimal_precision=3)
elif has_CW == "Yes":
# P = rr.sampleNotCW_boundedFromOneHalf(m,real_h,max_tries=10000,decimal_precision=3) [CHANGE]
P = rr.sampleNotCW_exactly_h(m, real_h,max_tries = 100000,decimal_precision = 3)
else:
# P = rr.sampleReciprocal(m,decimal_precision=3) [CHANGE]
# P = rr.__EnforceBoundedFromOneHalf__(P,real_h) [CHANGE]
P = rr.sampleRecRel_exactly_h(m,real_h,decimal_precision = 3)
results["Truth"].append(rr.get_CW(P))
# Step 2: Run and log our NTS
TE = tenv.TestEnvironment(P)
current_output = find_CW_with_symm_test(TE, h, gamma)
results["NTS_output"].append(current_output[0])
results["NTS_time"].append(TE.time)
# Step 3: Run and log SELECT_then_verify
TE = tenv.TestEnvironment(P)
current_output = SELECT_then_verify(TE, h, gamma,variant=verify_variant)
results["S_t_verify_output"].append(current_output)
results["S_t_verify_time"].append(TE.time)
# Step 4: Calculate the accuracy of both algorithms and return the results
nr_correct_NTS ,nr_correct_S_t_verify = 0,0
for i in range(0,nr_iterations):
if results["Truth"][i] == results["NTS_output"][i]:
nr_correct_NTS += 1
if results["Truth"][i] == results["S_t_verify_output"][i]:
nr_correct_S_t_verify += 1
results["Acc_NTS"] = nr_correct_NTS / nr_iterations
results["Acc_S_t_verify"] = nr_correct_S_t_verify / nr_iterations
results["NTS_mean_time"] = np.mean(results["NTS_time"])
results["S_t_verify_mean_time"] = np.mean(results["S_t_verify_time"])
return(results)
def experiment_three(m=5, gamma = 0.05, real_hs=[0.1,0.2,0.3,0.4], h=0.1, file_name ="plot",nr_iterations =25000,has_CW = "No",verify_variant="SPRT"):
"""
This is a simple modification of experiment_two.
-- instead of modifying gamma, we modify the value of real_h.
"""
# gammas = [0.001,0.005,0.01,0.015,0.02,0.03,0.05,0.075,0.1,0.125,0.15,0.2,0.25,0.35,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.7,0.95,0.99]
len_real_hs = len(real_hs)
x_NTS = np.zeros(len_real_hs)
y_NTS = np.zeros(len_real_hs)
x_StV = np.zeros(len_real_hs)
y_StV = np.zeros(len_real_hs)
print("Progress for "+str(file_name)+" (.. out of "+str(len_real_hs)+"): ")
for i in range(0,len_real_hs):
print(i,end=",")
buf = experiment_three_help(m,h,gamma,nr_iterations,real_h=real_hs[i],has_CW = has_CW)
x_NTS[i] = buf["NTS_mean_time"]
x_StV[i] = buf["S_t_verify_mean_time"]
y_NTS[i] = buf["Acc_NTS"]
y_StV[i] = buf["Acc_S_t_verify"]
plt.plot(real_hs,x_NTS, marker = "^", label="NTS")
plt.plot(real_hs, x_StV, marker = "o", label="SELECT-then-verify")
plt.xlabel("real_h")
plt.ylabel("Iterations")
plt.legend()
plt.title("h="+str(h))
plt.savefig(str(file_name)+"_plot.png",dpi=300)
plt.show()
np.savetxt(str(file_name)+"_results.csv",np.asarray([real_hs,x_NTS,y_NTS,x_StV,y_StV]),delimiter=",")
print("Done.")
# OUTPUT THE RESULTS AS A TABLE:
print("The results in form of [h, T A^NTS, Acc. A^NTS, T^StV, Acc StV] are:")
for i in range(0,len_real_hs):
print(real_hs[i],x_NTS[i],y_NTS[i],x_StV[i],y_StV[i])
##############################################################################
# The following function helps to create Figure 1 of our paper.
##############################################################################
def generate_main_figure():
a = np.loadtxt("MAIN_h02_results.csv",delimiter=",")
b = np.loadtxt("MAIN_h03_results.csv",delimiter=",")
plt.figure(figsize=(12,5))
plt.rcParams.update({'font.size': 14})
plt.subplot(1,2,1)
plt.subplots_adjust(left=0.07,
bottom=0.1,
right=0.99,
top=0.92,
wspace=0.1,
hspace=0.1)
plt.plot(a[0], a[1], marker = "^", label="NTS")
plt.plot(a[2], a[3], marker = "o", label="SELECT-then-verify")
plt.ylabel("Success Rate")
plt.xlabel("Iterations")
plt.title("h=0.2")
plt.legend(loc="lower right")
plt.subplot(1,2,2)
plt.xlim(left=0.5*min(min(b[0]),min(b[2])),right=2*min(max(b[0]),max(b[2])))
plt.plot(b[0],b[1], marker = "^", label="NTS")
plt.plot(b[2],b[3], marker = "o", label="SELECT-then-verify")
plt.xlabel("Iterations")
plt.title("h=0.3")
plt.legend(loc="lower right")
plt.savefig("MAIN_figure.png",dpi=600)
# fig.tight_layout()
plt.show()
plt.rcParams.update(plt.rcParamsDefault)
###############################################################################
# PART 4: Reconstruct the results from our paper.
###############################################################################
###############################################################################
# PART 4.1: Reconstruct the results from Section 7.1
###############################################################################
NR_it = 25000
np.random.seed(1)
experiment_two(m=5, real_h=0.05, h=0.2, file_name ="MAIN_h02",nr_iterations =NR_it,has_CW = "Both",verify_variant="SPRT")
np.random.seed(2)
experiment_two(m=5, real_h=0.05, h=0.3, file_name ="MAIN_h03",nr_iterations =NR_it,has_CW = "Both",verify_variant="SPRT")
generate_main_figure()
###############################################################################
# PART 4.2.1: Reconstruct the results from Section I.1, Figure 3
# (Similar to those in Sec. 7.1, but with larger number of arms)
###############################################################################
NR_it = 100000
np.random.seed(3)
experiment_two(m=10, real_h=0.05, h=0.3, file_name ="SUPPL_LARGE10_h03",nr_iterations =NR_it,has_CW = "Both",verify_variant="SPRT")
np.random.seed(101)
experiment_two(m=8, real_h=0.05, h=0.3, file_name ="SUPPL_LARGE08_h03",nr_iterations =NR_it,has_CW = "Both",verify_variant="SPRT")
###############################################################################
# PART 4.2.2: Reconstruct the results from Section I.1, Table 2
# (Comparison of SELECT-then-Verify and NTS on \hat{Q}_{m}^{h})
###############################################################################
NR_it = 100
real_hs = [0.02,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45]
np.random.seed(3)
experiment_three(m=20, gamma = 0.05, real_hs=real_hs, h=0.05, file_name ="Exp3_m20_h05",nr_iterations =NR_it,has_CW = "Yes",verify_variant="SPRT")
###############################################################################
# PART 4.2.2: Reconstruct the results from Section I.2, Figures 4 & 5
# (Similar to those in Sec. 7.1, but with restriction to CW/Non-CW instances)
###############################################################################
NR_it = 25000
np.random.seed(5)
experiment_two(m=5, real_h=0.05, h=0.1, file_name ="SUPPL_CW_h01",nr_iterations =NR_it,has_CW = "Yes",verify_variant="SPRT")
np.random.seed(6)
experiment_two(m=5, real_h=0.05, h=0.2, file_name ="SUPPL_CW_h02",nr_iterations =NR_it,has_CW = "Yes",verify_variant="SPRT")
np.random.seed(7)
experiment_two(m=5, real_h=0.05, h=0.3, file_name ="SUPPL_CW_h03",nr_iterations =NR_it,has_CW = "Yes",verify_variant="SPRT")
np.random.seed(8)
experiment_two(m=5, real_h=0.05, h=0.1, file_name ="SUPPL_noCW_h01",nr_iterations =NR_it,has_CW = "No",verify_variant="SPRT")
np.random.seed(9)
experiment_two(m=5, real_h=0.05, h=0.2, file_name ="SUPPL_noCW_h02",nr_iterations =NR_it,has_CW = "No",verify_variant="SPRT")
np.random.seed(10)
experiment_two(m=5, real_h=0.05, h=0.3, file_name ="SUPPL_noCW_h03",nr_iterations | |
<filename>src/models/rankae.py<gh_stars>10-100
import copy
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from torch.nn.utils.rnn import pad_sequence
from models.decoder import TransformerDecoder
from models.encoder import Bert, TransformerEncoder, PositionalEncoding
from models.generator import Generator
from others.utils import tile
class RankAE(nn.Module):
def __init__(self, args, device, vocab, checkpoint=None):
super(RankAE, self).__init__()
self.args = args
self.device = device
self.vocab = vocab
self.vocab_size = len(vocab)
self.beam_size = args.beam_size
self.max_length = args.max_length
self.min_length = args.min_length
self.start_token = vocab['[unused1]']
self.end_token = vocab['[unused2]']
self.pad_token = vocab['[PAD]']
self.mask_token = vocab['[MASK]']
self.seg_token = vocab['[unused3]']
self.cls_token = vocab['[CLS]']
self.hidden_size = args.enc_hidden_size
self.embeddings = nn.Embedding(self.vocab_size, self.hidden_size, padding_idx=0)
if args.encoder == 'bert':
self.encoder = Bert(args.bert_dir, args.finetune_bert)
if(args.max_pos > 512):
my_pos_embeddings = nn.Embedding(args.max_pos, self.encoder.model.config.hidden_size)
my_pos_embeddings.weight.data[:512] = self.encoder.model.embeddings.position_embeddings.weight.data
my_pos_embeddings.weight.data[512:] = self.encoder.model.embeddings.position_embeddings.weight.data[-1][None, :].repeat(args.max_pos-512, 1)
self.encoder.model.embeddings.position_embeddings = my_pos_embeddings
tgt_embeddings = nn.Embedding(self.vocab_size, self.encoder.model.config.hidden_size, padding_idx=0)
else:
self.encoder = TransformerEncoder(self.hidden_size, args.enc_ff_size, args.enc_heads,
args.enc_dropout, args.enc_layers)
tgt_embeddings = nn.Embedding(self.vocab_size, self.hidden_size, padding_idx=0)
self.hier_encoder = TransformerEncoder(self.hidden_size, args.hier_ff_size, args.hier_heads,
args.hier_dropout, args.hier_layers)
self.cup_bilinear = nn.Bilinear(self.hidden_size, self.hidden_size, 1)
self.pos_emb = PositionalEncoding(0., self.hidden_size)
self.decoder = TransformerDecoder(
self.args.dec_layers,
self.args.dec_hidden_size, heads=self.args.dec_heads,
d_ff=self.args.dec_ff_size, dropout=self.args.dec_dropout,
embeddings=tgt_embeddings)
self.generator = Generator(self.vocab_size, self.args.dec_hidden_size, self.pad_token)
self.generator.linear.weight = self.decoder.embeddings.weight
if checkpoint is not None:
self.load_state_dict(checkpoint['model'], strict=True)
else:
if args.encoder == "transformer":
for module in self.encoder.modules():
self._set_parameter_tf(module)
xavier_uniform_(self.embeddings.weight)
for module in self.decoder.modules():
self._set_parameter_tf(module)
for module in self.hier_encoder.modules():
self._set_parameter_tf(module)
for p in self.generator.parameters():
self._set_parameter_linear(p)
for p in self.cup_bilinear.parameters():
self._set_parameter_linear(p)
if args.share_emb:
if args.encoder == 'bert':
self.embeddings = self.encoder.model.embeddings.word_embeddings
tgt_embeddings = nn.Embedding(self.vocab_size, self.encoder.model.config.hidden_size, padding_idx=0)
tgt_embeddings.weight = copy.deepcopy(self.encoder.model.embeddings.word_embeddings.weight)
else:
tgt_embeddings = self.embeddings
self.decoder.embeddings = tgt_embeddings
self.generator.linear.weight = self.decoder.embeddings.weight
self.to(device)
def _set_parameter_tf(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_parameter_linear(self, p):
if p.dim() > 1:
xavier_uniform_(p)
else:
p.data.zero_()
def _rebuild_tgt(self, origin, index, sep_token=None):
tgt_list = [torch.tensor([self.start_token], device=self.device)]
selected = origin.index_select(0, index)
for sent in selected:
filted_sent = sent[sent != self.pad_token][1:]
if sep_token is not None:
filted_sent[-1] = sep_token
else:
filted_sent = filted_sent[:-1]
tgt_list.append(filted_sent)
new_tgt = torch.cat(tgt_list, 0)
if sep_token is not None:
new_tgt[-1] = self.end_token
else:
new_tgt = torch.cat([new_tgt, torch.tensor([self.end_token], device=self.device)], 0)
return new_tgt
def _build_memory_window(self, ex_segs, keep_clss, replace_clss=None, mask=None, samples=None):
keep_cls_list = torch.split(keep_clss, ex_segs)
window_list = []
for ex in keep_cls_list:
ex_pad = F.pad(ex, (0, 0, self.args.win_size, self.args.win_size)).unsqueeze(1)
ex_context = torch.cat([ex_pad[:ex.size(0)], ex.unsqueeze(1),
ex_pad[self.args.win_size*2:]], 1)
window_list.append(ex_context)
memory = torch.cat(window_list, 0)
if replace_clss is not None:
replace_cls_list = torch.split(replace_clss, ex_segs)
window_list = []
for ex in replace_cls_list:
ex_pad = F.pad(ex, (0, 0, self.args.win_size, self.args.win_size)).unsqueeze(1)
ex_context = torch.cat([ex_pad[:ex.size(0)], ex.unsqueeze(1),
ex_pad[self.args.win_size*2:]], 1)
window_list.append(ex_context)
origin_memory = torch.cat(window_list, 0)
sample_list = torch.split(samples, ex_segs)
sample_tensor_list = []
for i in range(len(ex_segs)):
sample_index_ = torch.randint(0, samples.size(-1), [mask.size(-1)], device=self.device)
sample_index = torch.index_select(sample_list[i], 1, sample_index_)
sample_tensor = replace_cls_list[i][sample_index]
sample_tensor_list.append(sample_tensor)
sample_memory = torch.cat(sample_tensor_list, 0)
memory = memory * (mask == 2).unsqueeze(-1).float() + \
sample_memory * (mask == 0).unsqueeze(-1).float() + \
origin_memory * (mask == 1).unsqueeze(-1).float()
return memory
def _src_add_noise(self, sent, sampled_sent, expand_ratio=0.):
role_emb = sent[1:2]
filted_sent = sent[sent != self.pad_token][2:]
# filted_sent = sent[sent != self.pad_token][1:]
rand_size = sampled_sent.size(0)
length = max(int(filted_sent.size(0)*(1+expand_ratio)), filted_sent.size(0)+1)
while filted_sent.size(0) < length:
target_length = length - filted_sent.size(0)
rand_sent = sampled_sent[random.randint(0, rand_size-1)]
rand_sent = rand_sent[rand_sent != self.pad_token][2:] # remove cls and role embedding
# rand_sent = rand_sent[rand_sent != self.pad_token][1:] # no role embedding
start_point = random.randint(0, rand_sent.size(0)-1)
end_point = random.randint(start_point, rand_sent.size(0))
rand_segment = rand_sent[start_point:min(end_point, start_point+10, start_point+target_length)]
insert_point = random.randint(0, filted_sent.size(0)-1)
filted_sent = torch.cat([filted_sent[:insert_point],
rand_segment,
filted_sent[insert_point:]], 0)
# return filted_sent
return torch.cat([role_emb, filted_sent], 0)
def _build_noised_src(self, src, ex_segs, samples, expand_ratio=0.):
src_list = torch.split(src, ex_segs)
new_src_list = []
sample_list = torch.split(samples, ex_segs)
for i, ex in enumerate(src_list):
for j, sent in enumerate(ex):
sampled_sent = ex.index_select(0, sample_list[i][j])
expanded_sent = self._src_add_noise(sent, sampled_sent, expand_ratio)
new_src = torch.cat([torch.tensor([self.cls_token], device=self.device), expanded_sent], 0)
new_src_list.append(new_src)
new_src = pad_sequence(new_src_list, batch_first=True, padding_value=self.pad_token)
new_mask = new_src.data.ne(self.pad_token)
new_segs = torch.zeros_like(new_src)
return new_src, new_mask, new_segs
def _build_context_tgt(self, tgt, ex_segs, win_size=1, modify=False, mask=None):
tgt_list = torch.split(tgt, ex_segs)
new_tgt_list = []
if modify and mask is not None:
# 1 means keeping the sentence
mask_list = torch.split(mask, ex_segs)
for i in range(len(tgt_list)):
sent_num = tgt_list[i].size(0)
for j in range(sent_num):
if modify:
low = j-win_size
up = j+win_size+1
index = torch.arange(low, up, device=self.device)
index = index[mask_list[i][j] > 0]
else:
low = max(0, j-win_size)
up = min(sent_num, j+win_size+1)
index = torch.arange(low, up, device=self.device)
new_tgt_list.append(self._rebuild_tgt(tgt_list[i], index, self.seg_token))
new_tgt = pad_sequence(new_tgt_list, batch_first=True, padding_value=self.pad_token)
return new_tgt
def _build_doc_tgt(self, tgt, vec, ex_segs, win_size=1, max_k=6, sigma=1.0):
vec_list = torch.split(vec, ex_segs)
tgt_list = torch.split(tgt, ex_segs)
new_tgt_list = []
index_list = []
shift_list = []
accum_index = 0
for idx in range(len(ex_segs)):
ex_vec = vec_list[idx]
sent_num = ex_segs[idx]
ex_tgt = tgt_list[idx]
tgt_length = ex_tgt[:, 1:].ne(self.pad_token).sum(dim=1).float()
topk_ids = self._centrality_rank(ex_vec, sent_num, tgt_length, win_size, max_k, sigma)
new_tgt_list.append(self._rebuild_tgt(ex_tgt, topk_ids, self.seg_token))
shift_list.append(topk_ids)
index_list.append(topk_ids + accum_index)
accum_index += sent_num
new_tgt = pad_sequence(new_tgt_list, batch_first=True, padding_value=self.pad_token)
return new_tgt, index_list, shift_list
def _centrality_rank(self, vec, sent_num, tgt_length, win_size, max_k, sigma, eta=0.5, min_length=5):
assert vec.size(0) == sent_num
sim = torch.sigmoid(self.cup_bilinear(vec.unsqueeze(1).expand(sent_num, sent_num, -1).contiguous(),
vec.unsqueeze(0).expand(sent_num, sent_num, -1).contiguous())
).squeeze().detach()
# sim = torch.sigmoid(torch.mm(vec, vec.transpose(0, 1)))
# sim = torch.cosine_similarity(
# vec.unsqueeze(1).expand(sent_num, sent_num, -1).contiguous().view(sent_num * sent_num, -1),
# vec.unsqueeze(0).expand(sent_num, sent_num, -1).contiguous().view(sent_num * sent_num, -1)
# ).view(sent_num, sent_num).detach()
# calculate sim weight
k = min(max(sent_num // (win_size*2+1), 1), max_k)
var = sent_num / k * 1.
x = torch.arange(sent_num, device=self.device, dtype=torch.float).unsqueeze(0).expand_as(sim)
u = torch.arange(sent_num, device=self.device, dtype=torch.float).unsqueeze(1)
weight = torch.exp(-(x-u)**2 / (2. * var**2)) * (1. - torch.eye(sent_num, device=self.device))
# weight = 1. - torch.eye(sent_num, device=self.device)
sim[tgt_length < min_length, :] = -1e20
# Calculate centrality and select top k sentence.
topk_ids = torch.empty(0, dtype=torch.long, device=self.device)
mask = torch.zeros([sent_num, sent_num], dtype=torch.float, device=self.device)
for _ in range(k):
mean_score = torch.sum(sim * weight, dim=1) / max(sent_num-1, 1)
max_v, _ = torch.max(sim * weight * mask, dim=1)
centrality = eta*mean_score - (1-eta)*max_v
_, top_id = torch.topk(centrality, 1, dim=0, sorted=False)
topk_ids = torch.cat([topk_ids, top_id], 0)
sim[topk_ids, :] = -1e20
mask[:, topk_ids] = 1.
topk_ids, _ = torch.sort(topk_ids)
"""
centrality = torch.sum(sim * weight, dim=1)
_, topk_ids = torch.topk(centrality, k, dim=0, sorted=False)
topk_ids, _ = torch.sort(topk_ids)
"""
return topk_ids
def _add_mask(self, src, mask_src):
pm_index = torch.empty_like(mask_src).float().uniform_().le(self.args.mask_token_prob)
ps_index = torch.empty_like(mask_src[:, 0]).float().uniform_().gt(self.args.select_sent_prob)
pm_index[ps_index] = 0
# Avoid mask [PAD]
pm_index[(1-mask_src).byte()] = 0
# Avoid mask [CLS]
pm_index[:, 0] = 0
# Avoid mask [SEG]
pm_index[src == self.seg_token] = 0
src[pm_index] = self.mask_token
return src
def _build_cup(self, bsz, ex_segs, win_size=1, negative_num=2):
cup = torch.split(torch.arange(0, bsz, dtype=torch.long, device=self.device), ex_segs)
tgt = torch.split(torch.ones(bsz), ex_segs)
cup_list = []
cup_origin_list = []
tgt_list = []
negative_list = []
for i in range(len(ex_segs)):
sent_num = ex_segs[i]
cup_low = cup[i][0].item()
cup_up = cup[i][sent_num-1].item()
cup_index = cup[i].repeat(win_size*2*(negative_num+1))
tgt_index = tgt[i].repeat(win_size*2*(negative_num+1))
cup_origin_list.append(cup[i].repeat(win_size*2*(negative_num+1)))
tgt_index[sent_num*win_size*2:] = 0
for j in range(cup_index.size(0)):
if tgt_index[j] == 1:
cup_temp = cup_index[j]
window_list = [t for t in range(max(cup_index[j]-win_size, cup_low),
min(cup_index[j]+win_size, cup_up)+1)
if t != cup_index[j]]
cup_temp = window_list[(j // sent_num) % len(window_list)]
else:
cand_list = [t for t in range(cup_low, max(cup_index[j]-win_size, cup_low))] + \
[t for t in range(min(cup_index[j]+win_size, cup_up), cup_up)]
cup_temp = cand_list[random.randint(0, len(cand_list)-1)]
cup_index[j] = cup_temp
negative_list.append((cup_index[sent_num*win_size*2:]-cup_low).
view(negative_num*win_size*2, -1).transpose(0, 1))
cup_list.append(cup_index)
tgt_list.append(tgt_index)
tgt = torch.cat(tgt_list, dim=0).float().to(self.device)
cup_origin = torch.cat(cup_origin_list, dim=0)
cup = torch.cat(cup_list, dim=0)
negative_sample = torch.cat(negative_list, dim=0)
return cup, cup_origin, tgt[cup != -1], negative_sample
def _build_option_window(self, bsz, ex_segs, win_size=1, keep_ratio=0.1, replace_ratio=0.2):
assert keep_ratio + replace_ratio <= 1.
noise_ratio = 1 - keep_ratio - replace_ratio
window_size = 2*win_size+1
index = torch.split(torch.arange(1, bsz+1, dtype=torch.long, device=self.device), ex_segs)
# 2 means noise addition, 1 means keep the memory, 0 means replacement
tgt = torch.zeros([bsz, window_size], device=self.device, dtype=torch.int)
prob = torch.empty([bsz, window_size], device=self.device).uniform_()
tgt.masked_fill_(prob.lt(noise_ratio), 2)
tgt.masked_fill_(prob.ge(1-keep_ratio), 1)
tgt = torch.split(tgt, ex_segs)
for i in range(len(ex_segs)):
sent_num = ex_segs[i]
index_pad = F.pad(index[i], (self.args.win_size, self.args.win_size))
for j in range(sent_num):
window = index_pad[j:j+window_size]
# Avoiding that all elements are 0
if torch.sum(tgt[i][j].byte()*(window > 0)) == 0:
tgt[i][j][win_size] = 2
tgt[i][j][window == 0] = -1
tgt = torch.cat(tgt, 0)
return tgt
def _fast_translate_batch(self, batch, memory_bank, max_length, init_tokens=None, memory_mask=None,
min_length=2, beam_size=3, ignore_mem_attn=False):
batch_size = memory_bank.size(0)
dec_states = self.decoder.init_decoder_state(batch.src, memory_bank, with_cache=True)
# Tile states and memory beam_size times.
dec_states.map_batch_fn(
lambda state, dim: tile(state, beam_size, dim=dim))
memory_bank = tile(memory_bank, beam_size, dim=0)
init_tokens = tile(init_tokens, beam_size, dim=0)
memory_mask = tile(memory_mask, beam_size, dim=0)
batch_offset = torch.arange(
batch_size, | |
encountered.
"""
if should_log:
self._LogCopyOperation(src_uri, dst_uri, headers)
(cb, num_cb, res_download_handler) = self._GetTransferHandlers(
dst_uri, src_key.size, False)
file_name = dst_uri.object_name
dir_name = os.path.dirname(file_name)
if dir_name and not os.path.exists(dir_name):
# Do dir creation in try block so can ignore case where dir already
# exists. This is needed to avoid a race condition when running gsutil
# -m cp.
try:
os.makedirs(dir_name)
except OSError, e:
if e.errno != errno.EEXIST:
raise
# For gzipped objects, download to a temp file and unzip.
if (hasattr(src_key, 'content_encoding')
and src_key.content_encoding == 'gzip'):
# We can't use tempfile.mkstemp() here because we need a predictable
# filename for resumable downloads.
download_file_name = '%s_.gztmp' % file_name
need_to_unzip = True
else:
download_file_name = file_name
need_to_unzip = False
hash_algs = self._GetHashAlgs(src_key)
# Add accept encoding for download operation.
AddAcceptEncoding(headers)
fp = None
try:
if res_download_handler:
fp = open(download_file_name, 'ab')
else:
fp = open(download_file_name, 'wb')
start_time = time.time()
# Use our hash_algs if get_contents_to_file() will accept them, else the
# default (md5-only) will suffice.
try:
src_key.get_contents_to_file(fp, headers, cb=cb, num_cb=num_cb,
res_download_handler=res_download_handler,
hash_algs=hash_algs)
except TypeError:
src_key.get_contents_to_file(fp, headers, cb=cb, num_cb=num_cb,
res_download_handler=res_download_handler)
# If a custom test method is defined, call it here. For the copy command,
# test methods are expected to take one argument: an open file pointer,
# and are used to perturb the open file during download to exercise
# download error detection.
if self.test_method:
self.test_method(fp)
end_time = time.time()
finally:
if fp:
fp.close()
if (not need_to_unzip and
hasattr(src_key, 'content_encoding')
and src_key.content_encoding == 'gzip'):
# TODO: HEAD requests are currently not returning proper Content-Encoding
# headers when an object is gzip-encoded on-the-fly. Remove this once
# it's fixed.
renamed_file_name = '%s_.gztmp' % file_name
os.rename(download_file_name, renamed_file_name)
download_file_name = renamed_file_name
need_to_unzip = True
# Discard all hashes if we are resuming a partial download.
if res_download_handler and res_download_handler.download_start_point:
src_key.local_hashes = {}
# Verify downloaded file checksum matched source object's checksum.
digest_verified = True
computed_hashes = None
try:
self._CheckHashes(src_key, download_file_name, hash_algs)
except CommandException, e:
# If the digest doesn't match, we'll try checking it again after
# unzipping.
if (not need_to_unzip or
'doesn\'t match cloud-supplied digest' not in str(e)):
os.unlink(download_file_name)
raise
digest_verified = False
computed_hashes = dict(
(alg, digester())
for alg, digester in self._GetHashAlgs(src_key).iteritems())
if res_download_handler:
bytes_transferred = (
src_key.size - res_download_handler.download_start_point)
else:
bytes_transferred = src_key.size
if need_to_unzip:
# Log that we're uncompressing if the file is big enough that
# decompressing would make it look like the transfer "stalled" at the end.
if bytes_transferred > 10 * 1024 * 1024:
self.logger.info('Uncompressing downloaded tmp file to %s...',
file_name)
# Downloaded gzipped file to a filename w/o .gz extension, so unzip.
f_in = gzip.open(download_file_name, 'rb')
with open(file_name, 'wb') as f_out:
data = f_in.read(self.GUNZIP_CHUNK_SIZE)
while data:
f_out.write(data)
if computed_hashes:
# Compute digests again on the uncompressed data.
for alg in computed_hashes.itervalues():
alg.update(data)
data = f_in.read(self.GUNZIP_CHUNK_SIZE)
f_in.close()
os.unlink(download_file_name)
if not digest_verified:
computed_hashes = dict((alg, digester.digest())
for alg, digester in computed_hashes.iteritems())
try:
self._CheckHashes(
src_key, file_name, hash_algs, computed_hashes=computed_hashes)
except CommandException, e:
os.unlink(file_name)
raise
return (end_time - start_time, bytes_transferred, dst_uri)
def _PerformDownloadToStream(self, src_key, src_uri, str_fp, headers):
(cb, num_cb, res_download_handler) = self._GetTransferHandlers(
src_uri, src_key.size, False)
start_time = time.time()
src_key.get_contents_to_file(str_fp, headers, cb=cb, num_cb=num_cb)
end_time = time.time()
bytes_transferred = src_key.size
end_time = time.time()
return (end_time - start_time, bytes_transferred)
def _CopyFileToFile(self, src_key, src_uri, dst_uri, headers):
"""Copies a local file to a local file.
Args:
src_key: Source StorageUri. Must be a file URI.
src_uri: Source StorageUri.
dst_uri: Destination StorageUri.
headers: The headers dictionary.
Returns:
(elapsed_time, bytes_transferred, dst_uri), excluding
overhead like initial HEAD.
Raises:
CommandException: if errors encountered.
"""
self._LogCopyOperation(src_uri, dst_uri, headers)
dst_key = dst_uri.new_key(False, headers)
start_time = time.time()
dst_key.set_contents_from_file(src_key.fp, headers)
end_time = time.time()
return (end_time - start_time, os.path.getsize(src_key.fp.name), dst_uri)
def _CopyObjToObjDaisyChainMode(self, src_key, src_uri, dst_uri, headers):
"""Copies from src_uri to dst_uri in "daisy chain" mode.
See -D OPTION documentation about what daisy chain mode is.
Args:
src_key: Source Key.
src_uri: Source StorageUri.
dst_uri: Destination StorageUri.
headers: A copy of the top-level headers dictionary.
Returns:
(elapsed_time, bytes_transferred, version-specific dst_uri) excluding
overhead like initial HEAD.
Raises:
CommandException: if errors encountered.
"""
# Start with copy of input headers, so we'll include any headers that need
# to be set from higher up in call stack (like x-goog-if-generation-match).
headers = headers.copy()
# Now merge headers from src_key so we'll preserve metadata.
# Unfortunately boto separates headers into ones it puts in the metadata
# dict and ones it pulls out into specific key fields, so we need to walk
# through the latter list to find the headers that we copy over to the dest
# object.
for header_name, field_name in (
('cache-control', 'cache_control'),
('content-type', 'content_type'),
('content-language', 'content_language'),
('content-encoding', 'content_encoding'),
('content-disposition', 'content_disposition')):
value = getattr(src_key, field_name, None)
if value:
headers[header_name] = value
# Boto represents x-goog-meta-* headers in metadata dict with the
# x-goog-meta- or x-amx-meta- prefix stripped. Turn these back into headers
# for the destination object.
for name, value in src_key.metadata.items():
header_name = '%smeta-%s' % (dst_uri.get_provider().header_prefix, name)
headers[header_name] = value
# Set content type if specified in '-h Content-Type' option.
self._SetContentTypeHeader(src_uri, headers)
self._LogCopyOperation(src_uri, dst_uri, headers)
(preserve_acl, canned_acl, headers) = (
self._ProcessCopyObjectToObjectOptions(dst_uri, headers))
if preserve_acl:
if src_uri.get_provider() != dst_uri.get_provider():
# We don't attempt to preserve ACLs across providers because
# GCS and S3 support different ACLs and disjoint principals.
raise NotImplementedError('Cross-provider cp -p not supported')
# We need to read and write the ACL manually because the
# Key.set_contents_from_file() API doesn't provide a preserve_acl
# parameter (unlike the Bucket.copy_key() API used
# by_CopyObjToObjInTheCloud).
acl = src_uri.get_acl(headers=headers)
fp = KeyFile(src_key)
result = self._PerformResumableUploadIfApplies(fp, src_uri,
dst_uri, canned_acl, headers,
self._GetFileSize(fp))
if preserve_acl:
# If user specified noclobber flag, we need to remove the
# x-goog-if-generation-match:0 header that was set when uploading the
# object, because that precondition would fail when updating the ACL on
# the now-existing object.
if self.no_clobber:
del headers['x-goog-if-generation-match']
# Remove the owner field from the ACL in case we're copying from an object
# that is owned by a different user. If we left that other user in the
# ACL, attempting to set the ACL would result in a 400 (Bad Request).
if hasattr(acl, 'owner'):
del acl.owner
dst_uri.set_acl(acl, dst_uri.object_name, headers=headers)
return result
def _PerformCopy(self, src_uri, dst_uri, allow_splitting=True):
"""Performs copy from src_uri to dst_uri, handling various special cases.
Args:
src_uri: Source StorageUri.
dst_uri: Destination StorageUri.
allow_splitting: Whether to allow the file to be split into component
pieces for an parallel composite upload.
Returns:
(elapsed_time, bytes_transferred, version-specific dst_uri) excluding
overhead like initial HEAD.
Raises:
CommandException: if errors encountered.
"""
# Make a copy of the input headers each time so we can set a different
# content type for each object.
headers = self.headers.copy() if self.headers else {}
download_headers = headers.copy()
# Add accept encoding for download operation.
AddAcceptEncoding(download_headers)
src_key = src_uri.get_key(False, download_headers)
if not src_key:
raise CommandException('"%s" does not exist.' % src_uri)
if self.use_manifest:
# Set the source size in the manifest.
self.manifest.Set(src_uri, 'size', getattr(src_key, 'size', None))
# On Windows, stdin is opened as text mode instead of binary which causes
# problems when piping a binary file, so this switches it to binary mode.
if IS_WINDOWS and src_uri.is_file_uri() and src_key.is_stream():
import msvcrt
msvcrt.setmode(src_key.fp.fileno(), os.O_BINARY)
if self.no_clobber:
# There are two checks to prevent clobbering:
# 1) The first check is to see if the item
# already exists at the destination and prevent the upload/download
# from happening. This is done by the exists() call.
# 2) The second check is only relevant if we are writing to gs. We can
# enforce that the server only writes the object if it doesn't exist
# by specifying the header below. This check only happens at the
# server after the complete file has been uploaded. We specify this
# header to prevent a race condition where a destination file may
# be created after the first check and before the file is fully
# uploaded.
# In order to save | |
kwargs): If True the user cannot continue until they provide a value.
valid_values (str, kwargs): A list of valid values
Returns:
str: The users selected choice.
"""
# collect input value from user and set default if required
input_value: str = self._input_value('Choice', kwargs.get('option_text')) or kwargs.get(
'default'
)
# ensure input value is provided when input is required
if input_value is None and kwargs.get('required') is True:
self.print_required()
return self.collect_choice(**kwargs)
# if input value is None then there is not need to continue
if input_value is None:
return input_value
# set valid values
valid_values: list = kwargs.get('valid_values', [])
# convert to int or recollect input
try:
input_value = int(input_value)
except ValueError:
self.print_invalid_index(f'0-{len(valid_values)}')
return self.collect_choice(**kwargs)
# ensure input value is valid
valid_index_values = [i for i, _ in enumerate(valid_values)]
# valid_index_values = list(range(0, len(valid_values) - 1))
if input_value not in valid_index_values:
self.print_invalid_index(f'0-{len(valid_values)}')
return self.collect_choice(**kwargs)
# using index value provided by user, set value to valid value
input_value = valid_values[input_value]
if input_value == self._no_selection_text:
# special case for when user select no selection
input_value = None
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
return input_value
def collect_exit_code(self, **kwargs) -> int:
"""Collect exit codes.
Args:
option_text (str, kwargs): The text shown to the user.
Returns:
str: The users provided exit code.
"""
input_value = self._input_value('Code', kwargs.get('option_text'))
if input_value != '':
try:
input_value = int(input_value)
except ValueError:
self.print_invalid_exit_code()
return self.collect_exit_code(**kwargs)
if input_value not in [0, 1, 3]:
self.print_invalid_exit_code()
return self.collect_exit_code(**kwargs)
return input_value
def collect_exit_codes(self, **kwargs) -> list:
"""Collect exit codes.
Returns:
list: The users provided exit code.
"""
input_values = []
while True:
input_value = self.collect_exit_code(**kwargs)
if input_value == '':
break
input_values.append(input_value)
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = [0]
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_key_value(self, **kwargs) -> dict:
"""Collect key value data.
Args:
option_text (str, kwargs): The text shown to the user.
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
dict: The users provided key value input.
"""
input_value = None
key = self._input_value('Key', option_text=kwargs.get('option_text'))
# ensure input value is provided when input is required
if key == '' and kwargs.get('required') is True:
self.print_required()
return self.collect_key_value(**kwargs)
if key != '':
value = self._input_value('Value')
input_value = {'key': key, 'value': value}
else:
input_value = kwargs.get('default')
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
return input_value
def collect_key_value_array(self, **kwargs) -> list:
"""Collect key value array data
Args:
default (str, kwargs): The default value if no value provided by user.
option_text (str, kwargs): The text shown to the user.
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
list: The users provided list of key value inputs.
"""
input_values = []
required: bool = kwargs.get('required')
while True:
input_value = self.collect_key_value(
default=kwargs.get('default'),
feedback=False,
option_test=kwargs.get('option_text'),
required=required,
)
if not input_value:
break
input_values.append(input_value)
required = False
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_multichoice(self, **kwargs) -> list:
"""Collect multichoice data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
valid_values (str, kwargs): A list of valid values
Returns:
list: The users provided list of choice inputs.
"""
input_values = []
required = kwargs.get('required', False)
while True:
input_value = self.collect_choice(
feedback=False,
# option_text=kwargs.get('option_text'),
required=required,
valid_values=kwargs.get('valid_values'),
)
if not input_value:
break
input_values.append(input_value)
required = False
input_values = list(set(input_values))
if input_values:
# format multichoice value as pipe delimited string
input_values = '|'.join(input_values)
else:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_string(self, **kwargs) -> str:
"""Collect string data
Args:
option_text (str, kwargs): The text shown to the user.
default (str, kwargs): The default value if no value provided by user.
Returns:
str: The user provided input.
"""
input_value = self._input_value('Input', kwargs.get('option_text', ''))
if not input_value:
input_value = kwargs.get('default')
if input_value is None and kwargs.get('required', False) is True:
self.print_required()
return self.collect_string(**kwargs)
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
# APP-622 - handle null/None values
if input_value == 'null':
input_value = None
elif input_value in ['"null"', "'null'"]:
input_value = 'null'
return input_value
def collect_string_array(self, **kwargs) -> list:
"""Collect string data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
str: The user provided input.
"""
input_values = []
required = kwargs.get('required', False)
while True:
input_value = self.collect_string(feedback=False, required=required)
if not input_value:
break
input_values.append(input_value)
required = False
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_tcentity(self, **kwargs) -> dict:
"""Collect tcentity data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
str: The user provided input.
"""
input_value = None
id_ = self._input_value('ID')
if id_:
value = self._input_value('Value')
type_ = self._input_value('Type')
input_value = {'id': id_, 'value': value, 'type': type_}
if input_value is None and kwargs.get('required', False) is True:
self.print_required()
return self.collect_tcentity(**kwargs)
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
return input_value
def collect_tcentity_array(self, **kwargs) -> list:
"""Collect tcentity array data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
list: The user provided inputs.
"""
input_values = []
required = kwargs.get('required', False)
while True:
input_value = self.collect_tcentity(feedback=False, required=required)
if not input_value:
break
input_values.append(input_value)
required = False
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
@property
def inputs(self) -> dict:
"""Return inputs dict."""
return self._inputs
def present(self) -> None:
"""Present interactive menu to build profile."""
def params_data() -> tuple:
# handle non-layout and layout based App appropriately
if self.profile.lj.has_layout:
# using inputs from layout.json since they are required to be in order
# (display field can only use inputs previously defined)
for name in self.profile.lj.params_dict:
# get data from install.json based on name
data = self.profile.ij.params_dict.get(name)
yield name, data
# hidden fields will not be in layout.json so they need to be include manually
for name, data in self.profile.ij.filter_params_dict(hidden=True).items():
yield name, data
else:
for name, data in self.profile.ij.params_dict.items():
yield name, data
inputs = {}
for name, data in params_data():
if data.get('serviceConfig'):
# inputs that are serviceConfig are not applicable for profiles
continue
if not data.get('hidden'):
# each input will be checked for permutations if the App has layout and not hidden
if not self.profile.permutations.validate_input_variable(name, inputs):
continue
# present the input
value: str = self.input_type_map.get(data.get('type').lower())(name, data)
# update inputs
inputs[name] = value
self.present_exit_code()
def present_boolean(self, name: str, data) -> bool:
"""Build a question for boolean input.
Args:
name: The name of the input field.
data: The install.json input param object.
Returns:
bool: The user provided input.
"""
# print header information
self.print_header(data)
default = self._default(data)
valid_values = ['true', 'false']
option_default = 'false'
option_text = ''
options = []
for v in valid_values:
if v.lower() == default.lower():
option_default = v
v = f'[{v}]'
options.append(v)
option_text = f'''({'/'.join(options)})'''
value = self.collect_boolean(default=option_default, option_text=option_text)
# add input
self.add_input(name, data, value)
return value
def present_editchoice(self, name: str, data: dict) -> str:
"""Build a question for editchoice input.
Args:
name: The name of the input field.
data: The install.json input param object.
Returns:
str: The user provided input.
"""
# print header information
self.print_header(data)
default = self._default(data)
option_index = 0
valid_values = self._expand_valid_values(data.get('validValues', []))
if data.get('required', False) is False:
# add option to invalidate defaults
valid_values.insert(0, self._no_selection_text)
# default value needs to be converted to index
if default:
try:
option_index = valid_values.index(default)
except ValueError:
# if "magic" variable (e.g., ${GROUP_TYPES}) was not expanded then use index 0.
# there is no way to tell if the default value is be part of the expansion.
if any(re.match(r'^\${.*}$', v) | |
import numpy as np
import mediapipe_utils as mpu
import cv2
from pathlib import Path
from FPS import FPS, now
from math import sin, cos
import depthai as dai
import time, sys
SCRIPT_DIR = Path(__file__).resolve().parent
POSE_DETECTION_MODEL = str(SCRIPT_DIR / "models/pose_detection_sh4.blob")
LANDMARK_MODEL_FULL = str(SCRIPT_DIR / "models/pose_landmark_full_sh4.blob")
LANDMARK_MODEL_HEAVY = str(SCRIPT_DIR / "models/pose_landmark_heavy_sh4.blob")
LANDMARK_MODEL_LITE = str(SCRIPT_DIR / "models/pose_landmark_lite_sh4.blob")
def to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray:
return cv2.resize(arr, shape).transpose(2,0,1).flatten()
class BlazeposeDepthai:
"""
Blazepose body pose detector
Arguments:
- input_src: frame source,
- "rgb" or None: OAK* internal color camera,
- "rgb_laconic": same as "rgb" but without sending the frames to the host,
- a file path of an image or a video,
- an integer (eg 0) for a webcam id,
- pd_model: Blazepose detection model blob file (if None, takes the default value POSE_DETECTION_MODEL),
- pd_score: confidence score to determine whether a detection is reliable (a float between 0 and 1).
- lm_model: Blazepose landmark model blob file
- None or "full": the default blob file LANDMARK_MODEL_FULL,
- "lite": the default blob file LANDMARK_MODEL_LITE,
- "heavy": default blob file LANDMARK_MODEL_HEAVY,
- a path of a blob file.
- lm_score_thresh : confidence score to determine whether landmarks prediction is reliable (a float between 0 and 1).
- xyz: boolean, when True get the (x, y, z) coords of the reference point (center of the hips) (if the device supports depth measures).
- crop : boolean which indicates if square cropping is done or not
- smoothing: boolean which indicates if smoothing filtering is applied
- filter_window_size and filter_velocity_scale:
The filter keeps track (on a window of specified size) of
value changes over time, which as result gives velocity of how value
changes over time. With higher velocity it weights new values higher.
- higher filter_window_size adds to lag and to stability
- lower filter_velocity_scale adds to lag and to stability
- internal_fps : when using the internal color camera as input source, set its FPS to this value (calling setFps()).
- resolution : sensor resolution "full" (1920x1080) or "ultra" (3840x2160),
- internal_frame_height : when using the internal color camera, set the frame height (calling setIspScale()).
The width is calculated accordingly to height and depends on value of 'crop'
- stats : boolean, when True, display some statistics when exiting.
- trace: boolean, when True print some debug messages
- force_detection: boolean, force person detection on every frame (never use landmarks from previous frame to determine ROI)
"""
def __init__(self, input_src="rgb",
pd_model=None,
pd_score_thresh=0.5,
lm_model=None,
lm_score_thresh=0.7,
xyz=False,
crop=False,
smoothing= True,
internal_fps=None,
resolution="full",
internal_frame_height=1080,
stats=False,
trace=False,
force_detection=False
):
self.pd_model = pd_model if pd_model else POSE_DETECTION_MODEL
print(f"Pose detection blob file : {self.pd_model}")
self.rect_transf_scale = 1.25
if lm_model is None or lm_model == "full":
self.lm_model = LANDMARK_MODEL_FULL
elif lm_model == "lite":
self.lm_model = LANDMARK_MODEL_LITE
elif lm_model == "heavy":
self.lm_model = LANDMARK_MODEL_HEAVY
else:
self.lm_model = lm_model
print(f"Landmarks using blob file : {self.lm_model}")
self.pd_score_thresh = pd_score_thresh
self.lm_score_thresh = lm_score_thresh
self.smoothing = smoothing
self.crop = crop
self.internal_fps = internal_fps
self.stats = stats
self.force_detection = force_detection
self.presence_threshold = 0.5
self.visibility_threshold = 0.5
self.device = dai.Device()
self.xyz = False
if input_src == None or input_src == "rgb" or input_src == "rgb_laconic":
# Note that here (in Host mode), specifying "rgb_laconic" has no effect
# Color camera frame is systematically transferred to the host
self.input_type = "rgb" # OAK* internal color camera
if internal_fps is None:
if "heavy" in str(lm_model):
self.internal_fps = 10
elif "full" in str(lm_model):
self.internal_fps = 8
else: # Light
self.internal_fps = 13
else:
self.internal_fps = internal_fps
print(f"Internal camera FPS set to: {self.internal_fps}")
if resolution == "full":
self.resolution = (1920, 1080)
elif resolution == "ultra":
self.resolution = (3840, 2160)
else:
print(f"Error: {resolution} is not a valid resolution !")
sys.exit()
print("Sensor resolution:", self.resolution)
self.video_fps = self.internal_fps # Used when saving the output in a video file. Should be close to the real fps
if xyz:
# Check if the device supports stereo
cameras = self.device.getConnectedCameras()
if dai.CameraBoardSocket.LEFT in cameras and dai.CameraBoardSocket.RIGHT in cameras:
self.xyz = True
else:
print("Warning: depth unavailable on this device, 'xyz' argument is ignored")
if self.crop:
self.frame_size, self.scale_nd = mpu.find_isp_scale_params(internal_frame_height)
self.img_h = self.img_w = self.frame_size
self.pad_w = self.pad_h = 0
self.crop_w = (int(round(self.resolution[0] * self.scale_nd[0] / self.scale_nd[1])) - self.img_w) // 2
else:
width, self.scale_nd = mpu.find_isp_scale_params(internal_frame_height * 1920 / 1080, is_height=False)
self.img_h = int(round(self.resolution[1] * self.scale_nd[0] / self.scale_nd[1]))
self.img_w = int(round(self.resolution[0] * self.scale_nd[0] / self.scale_nd[1]))
self.pad_h = (self.img_w - self.img_h) // 2
self.pad_w = 0
self.frame_size = self.img_w
self.crop_w = 0
print(f"Internal camera image size: {self.img_w} x {self.img_h} - crop_w:{self.crop_w} pad_h: {self.pad_h}")
elif input_src.endswith('.jpg') or input_src.endswith('.png') :
self.input_type= "image"
self.img = cv2.imread(input_src)
self.video_fps = 25
self.img_h, self.img_w = self.img.shape[:2]
else:
self.input_type = "video"
if input_src.isdigit():
input_type = "webcam"
input_src = int(input_src)
self.cap = cv2.VideoCapture(input_src)
self.video_fps = int(self.cap.get(cv2.CAP_PROP_FPS))
self.img_w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.img_h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print("Video FPS:", self.video_fps)
if self.input_type != "rgb":
print(f"Original frame size: {self.img_w}x{self.img_h}")
if self.crop:
self.frame_size = min(self.img_w, self.img_h) # // 16 * 16
else:
self.frame_size = max(self.img_w, self.img_h) #// 16 * 16
self.crop_w = max((self.img_w - self.frame_size) // 2, 0)
if self.crop_w: print("Cropping on width :", self.crop_w)
self.crop_h = max((self.img_h - self.frame_size) // 2, 0)
if self.crop_h: print("Cropping on height :", self.crop_h)
self.pad_w = max((self.frame_size - self.img_w) // 2, 0)
if self.pad_w: print("Padding on width :", self.pad_w)
self.pad_h = max((self.frame_size - self.img_h) // 2, 0)
if self.pad_h: print("Padding on height :", self.pad_h)
print(f"Frame working size: {self.img_w}x{self.img_h}")
self.nb_kps = 33 # Number of "viewable" keypoints
if self.smoothing:
self.filter_landmarks = mpu.LandmarksSmoothingFilter(
frequency=self.video_fps,
min_cutoff=0.05,
beta=80,
derivate_cutoff=1
)
# landmarks_aux corresponds to the 2 landmarks used to compute the ROI in next frame
self.filter_landmarks_aux = mpu.LandmarksSmoothingFilter(
frequency=self.video_fps,
min_cutoff=0.01,
beta=10,
derivate_cutoff=1
)
self.filter_landmarks_world = mpu.LandmarksSmoothingFilter(
frequency=self.video_fps,
min_cutoff=0.1,
beta=40,
derivate_cutoff=1,
disable_value_scaling=True
)
if self.xyz:
self.filter_xyz = mpu.LowPassFilter(alpha=0.25)
# Create SSD anchors
self.anchors = mpu.generate_blazepose_anchors()
self.nb_anchors = self.anchors.shape[0]
print(f"{self.nb_anchors} anchors have been created")
# Define and start pipeline
self.pd_input_length = 224
self.lm_input_length = 256
usb_speed = self.device.getUsbSpeed()
self.device.startPipeline(self.create_pipeline())
print(f"Pipeline started - USB speed: {str(usb_speed).split('.')[-1]}")
# Define data queues
if self.input_type == "rgb":
self.q_video = self.device.getOutputQueue(name="cam_out", maxSize=1, blocking=False)
self.q_pre_pd_manip_cfg = self.device.getInputQueue(name="pre_pd_manip_cfg")
if self.xyz:
self.q_spatial_data = self.device.getOutputQueue(name="spatial_data_out", maxSize=1, blocking=False)
self.q_spatial_config = self.device.getInputQueue("spatial_calc_config_in")
else:
self.q_pd_in = self.device.getInputQueue(name="pd_in")
self.q_pd_out = self.device.getOutputQueue(name="pd_out", maxSize=4, blocking=True)
self.q_lm_in = self.device.getInputQueue(name="lm_in")
self.q_lm_out = self.device.getOutputQueue(name="lm_out", maxSize=4, blocking=True)
self.fps = FPS()
self.nb_frames = 0
self.nb_pd_inferences = 0
self.nb_lm_inferences = 0
self.nb_lm_inferences_after_landmarks_ROI = 0
self.nb_frames_no_body = 0
self.glob_pd_rtrip_time = 0
self.glob_lm_rtrip_time = 0
self.use_previous_landmarks = False
self.cfg_pre_pd = dai.ImageManipConfig()
self.cfg_pre_pd.setResizeThumbnail(self.pd_input_length, self.pd_input_length)
def create_pipeline(self):
print("Creating pipeline...")
# Start defining a pipeline
pipeline = dai.Pipeline()
# pipeline.setOpenVINOVersion(version = dai.OpenVINO.Version.VERSION_2021_4)
if self.input_type == "rgb":
# ColorCamera
print("Creating Color Camera...")
cam = pipeline.createColorCamera()
if self.resolution[0] == 1920:
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
else:
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
cam.setInterleaved(False)
cam.setIspScale(self.scale_nd[0], self.scale_nd[1])
cam.setFps(self.internal_fps)
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
if self.crop:
cam.setVideoSize(self.frame_size, self.frame_size)
cam.setPreviewSize(self.frame_size, self.frame_size)
else:
cam.setVideoSize(self.img_w, self.img_h)
cam.setPreviewSize(self.img_w, self.img_h)
cam_out = pipeline.createXLinkOut()
cam_out.setStreamName("cam_out")
cam_out.input.setQueueSize(1)
cam_out.input.setBlocking(False)
cam.video.link(cam_out.input)
# Define pose detection pre processing (resize preview to (self.pd_input_length, self.pd_input_length))
print("Creating Pose Detection pre processing image manip...")
pre_pd_manip = pipeline.create(dai.node.ImageManip)
pre_pd_manip.setMaxOutputFrameSize(self.pd_input_length*self.pd_input_length*3)
pre_pd_manip.setWaitForConfigInput(True)
pre_pd_manip.inputImage.setQueueSize(1)
pre_pd_manip.inputImage.setBlocking(False)
cam.preview.link(pre_pd_manip.inputImage)
pre_pd_manip_cfg_in = pipeline.create(dai.node.XLinkIn)
pre_pd_manip_cfg_in.setStreamName("pre_pd_manip_cfg")
pre_pd_manip_cfg_in.out.link(pre_pd_manip.inputConfig)
if self.xyz:
# For now, RGB needs fixed focus to properly align with depth.
# This value was used during calibration
cam.initialControl.setManualFocus(130)
mono_resolution = dai.MonoCameraProperties.SensorResolution.THE_400_P
left = pipeline.createMonoCamera()
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
left.setResolution(mono_resolution)
left.setFps(self.internal_fps)
right = pipeline.createMonoCamera()
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
right.setResolution(mono_resolution)
right.setFps(self.internal_fps)
stereo = pipeline.createStereoDepth()
stereo.setConfidenceThreshold(230)
# LR-check is required for depth alignment
stereo.setLeftRightCheck(True)
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
stereo.setSubpixel(False) # subpixel True -> latency
spatial_location_calculator = pipeline.createSpatialLocationCalculator()
spatial_location_calculator.setWaitForConfigInput(True)
spatial_location_calculator.inputDepth.setBlocking(False)
spatial_location_calculator.inputDepth.setQueueSize(1)
spatial_data_out = pipeline.createXLinkOut()
spatial_data_out.setStreamName("spatial_data_out")
spatial_data_out.input.setQueueSize(1)
spatial_data_out.input.setBlocking(False)
spatial_calc_config_in = pipeline.createXLinkIn()
spatial_calc_config_in.setStreamName("spatial_calc_config_in")
left.out.link(stereo.left)
right.out.link(stereo.right)
stereo.depth.link(spatial_location_calculator.inputDepth)
spatial_location_calculator.out.link(spatial_data_out.input)
spatial_calc_config_in.out.link(spatial_location_calculator.inputConfig)
# Define pose detection model
print("Creating Pose Detection Neural Network...")
pd_nn = pipeline.createNeuralNetwork()
pd_nn.setBlobPath(str(Path(self.pd_model).resolve().absolute()))
# Increase threads for detection
# pd_nn.setNumInferenceThreads(2)
# Specify that network takes latest arriving frame in non-blocking manner
# Pose detection input
if self.input_type == "rgb":
pre_pd_manip.out.link(pd_nn.input)
else:
pd_in = pipeline.createXLinkIn()
pd_in.setStreamName("pd_in")
pd_in.out.link(pd_nn.input)
# Pose detection output
pd_out = pipeline.createXLinkOut()
pd_out.setStreamName("pd_out")
pd_nn.out.link(pd_out.input)
# Define landmark model
| |
#
# Return the CASTRO directory on the current machine, based on your environment variables.
#
def get_castro_dir():
"""Return the location of the CASTRO directory."""
import os
CASTRO_HOME = os.getenv('CASTRO_HOME')
return CASTRO_HOME
# Return the name of the current inputs file in a directory.
def get_inputs_filename(directory):
"""Return the name of the inputs file in a directory."""
import os
# At present we have no reason to look for anything other than inputs.
if os.path.isfile(directory + '/inputs'):
return 'inputs'
elif os.path.isfile(directory + '/inputs_2d'):
return 'inputs_2d'
elif os.path.isfile(directory + '/inputs_3d'):
return 'inputs_3d'
else:
print("Error: no inputs file found in " + directory + ".")
exit
# Get a CASTRO variable value from an inputs file.
def get_inputs_var(inputs, var):
"""Retrieve a CASTRO variable value from an inputs file."""
import numpy as np
# Read in all the inputs file lines and search for the one
# that starts with the desired variable name.
inputs_file = open(inputs, 'r')
lines = inputs_file.readlines()
lines = list(filter(lambda s: s.split() != [], lines))
line = list(filter(lambda s: s.split()[0] == var, lines))
# The variable is the last item in a line before the comment.
# This should work correctly even if there is no comment.
var = (line[0].split('=')[1]).split('#')[0]
var = var.strip()
# Now, convert it into a list if it has multiple entries.
if (var.split() != []):
var = var.split()
# Convert this to a floating point array, if possible.
# If this fails, we'll just leave it as a string.
try:
var = np.array(var,dtype='float')
# Now convert this to an integer array, if possible.
if (var[0].is_integer()):
var = np.array(var,dtype='int')
except:
pass
inputs_file.close()
return var
#
# Given a plotfile directory, return the git commit hashes.
#
def get_git_commits_from_plotfile(plotfile):
"""Retrieve git commit hashes from a plotfile."""
job_info = open(plotfile + "/job_info", 'r')
lines = job_info.readlines()
lines = [line.split() for line in lines]
castro_hash = ""
amrex_hash = ""
microphysics_hash = ""
for line in lines:
if (len(line) == 4):
if (line[0] == "Castro" and line[1] == "git" and line[2] == "hash:"):
castro_hash = line[3]
elif (line[0] == "AMReX" and line[1] == "git" and line[2] == "hash:"):
amrex_hash = line[3]
elif (line[0] == "Microphysics" and line[1] == "git" and line[2] == "hash:"):
microphysics_hash = line[3]
job_info.close()
return [castro_hash, amrex_hash, microphysics_hash]
#
# Given a diagnostic output file, return the git commit hashes.
#
def get_git_commits_from_diagfile(diagfile):
"""Retrieve git commit hashes from a diagnostic file."""
diagfile = open(diagfile, 'r')
castro_hash = ""
amrex_hash = ""
microphysics_hash = ""
line = diagfile.readline().split()
if (line[1] == "Castro" and line[2] == "git" and line[3] == "hash:"):
castro_hash = line[4]
line = diagfile.readline().split()
if (line[1] == "AMReX" and line[2] == "git" and line[3] == "hash:"):
amrex_hash = line[4]
line = diagfile.readline().split()
if (line[1] == "Microphysics" and line[2] == "git" and line[3] == "hash:"):
microphysics_hash = line[4]
diagfile.close()
return [castro_hash, amrex_hash, microphysics_hash]
#
# Given the stdout from a Castro run, return the git commit hashes.
#
def get_git_commits_from_infofile(infofile):
"""Retrieve git commit hashes from a stdout file."""
infofile = open(infofile, 'r')
lines = infofile.readlines()
lines = [line.split() for line in lines]
castro_hash = ""
amrex_hash = ""
microphysics_hash = ""
for line in lines:
if (len(line) == 4):
if (line[0] == "Castro" and line[1] == "git" and line[2] == "hash:"):
castro_hash = line[3]
elif (line[0] == "AMReX" and line[1] == "git" and line[2] == "hash:"):
amrex_hash = line[3]
elif (line[0] == "Microphysics" and line[1] == "git" and line[2] == "hash:"):
microphysics_hash = line[4]
infofile.close()
return [castro_hash, amrex_hash, microphysics_hash]
#
# Given CASTRO and AMReX hashes that were used to create the plot for a given plotfile,
# insert these and the current Microphysics and wdmerger hashes into an EPS file.
# Credit: http://stackoverflow.com/questions/1325905/inserting-line-at-specified-position-of-a-text-file-in-python
#
def insert_commits_into_eps(eps_file, data_file, data_file_type):
"""Insert git commit hashes into an EPS file."""
import fileinput
if (data_file_type == 'plot'):
[castro_hash, amrex_hash, microphysics_hash] = get_git_commits_from_plotfile(data_file)
elif (data_file_type == 'diag'):
[castro_hash, amrex_hash, microphysics_hash] = get_git_commits_from_diagfile(data_file)
elif (data_file_type == 'info'):
[castro_hash, amrex_hash, microphysics_hash] = get_git_commits_from_infofile(data_file)
else:
print("Error: Data file type not recognized.")
input = fileinput.input(eps_file, inplace=True)
for line in input:
print(line, end="") # No additional newline
if line.startswith('%%CreationDate:'):
print("%%CASTRO git hash: " + castro_hash + "\n" + \
"%%AMReX git hash: " + amrex_hash + "\n" + \
"%%Microphysics git hash: " + microphysics_hash)
#
# Given CASTRO and AMReX hashes that were used to create the plot for a given plotfile,
# insert these and the current Microphysics and wdmerger hashes into a text file.
# We will append these to the end of the file, so that the code calling this should wait
# until just before it wants the commit hashes to appear to call it, and should probably
# not have the file actively open at the time.
#
# The default comment character will be chosen for use in LaTeX but can be changed if
# another comment character is needed.
#
def insert_commits_into_txt(txt_file, data_file, data_file_type, comment_char='%'):
"""Insert git commit hashes into a text file."""
if (data_file_type == 'plot'):
[castro_hash, amrex_hash, microphysics_hash] = get_git_commits_from_plotfile(data_file)
elif (data_file_type == 'diag'):
[castro_hash, amrex_hash, microphysics_hash] = get_git_commits_from_diagfile(data_file)
elif (data_file_type == 'info'):
[castro_hash, amrex_hash, microphysics_hash] = get_git_commits_from_infofile(data_file)
else:
print("Error: Data file type not recognized.")
string = comment_char + " CASTRO git hash: " + castro_hash + "\n" + \
comment_char + " AMReX git hash: " + amrex_hash + "\n" + \
comment_char + " Microphysics git hash: " + microphysics_hash + "\n"
in_file = open(txt_file, 'a')
in_file.write(string)
in_file.close()
#
# Return the name of the latest wdmerger output file in a directory.
#
def get_last_output(directory):
"""Obtain the name of the the last wdmerger output file in a directory."""
import os
# Open up the standard output for analysis. It will be the numerically last file
# starting with the designated output string.
files = os.listdir(directory + '/output')
files = sorted(filter(lambda s: s[0:9] == "wdmerger.",files))
if (len(files) == 0):
exit()
return directory + "/" + files[len(files)-1]
#
# Return the name of the latest checkpoint in a directory.
#
def get_last_checkpoint(directory):
"""Obtain the name of the last checkpoint in a directory."""
import os
checkpoint = ""
if (not os.path.isdir(directory)):
return checkpoint
# Doing a search this way will treat first any checkpoint files
# with seven digits, and then will fall back to ones with six and then five digits.
# We want to be smart about this and list the ones in the current directory first,
# before checking any output directories where the data is archived, because
# the former are the most likely to be recently created checkpoints.
checkpointList=[]
checkpointNums=[]
dirList = os.listdir(directory)
from glob import glob
def add_to_list(chkList, chkNums, chk_string):
tempList = glob(chk_string)
chkList += [temp_dir for temp_dir in tempList]
chkNums += [temp_dir.split('/')[-1] for temp_dir in tempList]
add_to_list(checkpointList, checkpointNums, directory + '/*chk???????')
add_to_list(checkpointList, checkpointNums, directory + '/*/*chk???????')
add_to_list(checkpointList, checkpointNums, directory + '/*chk??????')
add_to_list(checkpointList, checkpointNums, directory + '/*/*chk??????')
add_to_list(checkpointList, checkpointNums, directory + '/*chk?????')
add_to_list(checkpointList, checkpointNums, directory + '/*/*chk?????')
if not checkpointList or not checkpointNums:
return checkpoint
# Match up the last checkpoint number with the actual file path location.
for chkNum in checkpointNums:
for chkFile in checkpointList:
currBaseName = chkFile.split('/')[-1]
if currBaseName == chkNum:
# The Header is the last thing written -- check if it's there, otherwise,
# we can skip this iteration, because it means the latest checkpoint file
# is still being written.
if os.path.isfile(chkFile + '/Header'):
checkpoint = chkFile
break
if checkpoint:
break
if not checkpoint:
print("Error: no completed checkpoint found in directory " + directory)
return
# Extract out the search directory from the result.
checkpoint = checkpoint.split('/')[-1]
return checkpoint
#
# Get all of the data columns from a diagnostic output file.
#
def get_column_data(diag_filename):
"""Get all columns of data from a diagnostic file."""
import numpy as np
data = None
# Open up the file for reading. Get the names of the columns, as well as a 2D list
# with all the data.
diag_file = open(diag_filename, 'r')
vc_line = 'git'
# Skip the first few lines, they store the version control information
line = diag_file.readline()
while (line.split()[2] == vc_line):
| |
ramienia nie moze zbyt odbiegac od wartosci lewego
if maxRightArmVal > (1 + hsVul) * maxLeftArmVal or maxRightArmVal < (1 - hsVul) * maxLeftArmVal:
return 0, [0, 0, 0, 0]
#wolumin na formacji ma byc malejacy, a conajmniej nie rosnacy
volTrend = optimizedTrend(leftArmVol + headVol + rightArmVol)
if (volTrend > 0):
return 0, [0, 0, 0, 0]
result = (1.0 * maxHeadVal / maxVal + 1.0 * maxLeftArmVol / maxVol) / 2.0
if volTrend > -1:
result = result * 0.8
#wykreslamy linie szyi
leftArmVal = list(leftArmVal)
rightArmVal = list(rightArmVal)
minLeftArmVal = min(leftArmVal[leftArmVal.index(maxLeftArmVal):]) #min z prawej strony max lewego ramienia
rightArmPeek = rightArmVal.index(maxRightArmVal)
if rightArmPeek == 0:
return 0, [0, 0, 0, 0]
minRightArmVal = min(rightArmVal[0:rightArmPeek]) #min z lewej strony max prawego ramienia
maxRightArmVol = max(rightArmVol[rightArmPeek:]) #max wolumin z prawej strony max wartosci ramienia
#sprawdzamy czy linia szyi zostala przelamana przy wyzszym wolumenie
rightArmValMin = min(rightArmVal[rightArmPeek:])
rightArmMaxVol = max(rightArmVol[0:rightArmPeek])
# print 'E'
if rightArmValMin > minRightArmVal:
return 0, [0, 0, 0, 0]
diff = len(leftArmVal) + len(headVal)
a, b = linearFun(leftArmVal.index(minLeftArmVal), minLeftArmVal,
rightArmVal.index(minRightArmVal) + diff, minRightArmVal)
if (trend(a) == 1):
return 0, [0, 0, 0, 0]
# print "Czy przelamano linie szyi?"
if (rightArmValMin >= evaluateFun(a, b, diff + rightArmVal.index(rightArmValMin)) and rightArmMaxVol < maxRightArmVol):
return 0, [0, 0, 0, 0]
if maxHeadVol > maxLeftArmVol or maxHeadVol > maxRightArmVol:
result = result * 0.5
return result, [leftArmVal.index(minLeftArmVal), minLeftArmVal,
len(rightArmVal) + diff, evaluateFun(a, b, len(rightArmVal) + diff)]
def smartLookForHeadAndShoulders(values, volumine):
"""Szukamy formacji glowy i ramion w sposob brutalny, szukajac wszystkich 3 elementowych podzrbiorow - nieuzywac"""
# print "Szukamy formacji glowy i ramion"
values = asarray(values)
volumine = asarray(volumine)
maxVal = max(values)
maxVol = max(volumine)
for i in reversed(range(5, div + 1)):
val = asarray(list(combinations(divideArray(values, i), 3)))
vol = asarray(list(combinations(divideArray(volumine, i), 3)))
z = map(lambda x, y: convertValuesToHeadAndShoulders(x, y, maxVal, maxVol), val, vol)
# print "z = ", z
if max(z) > 0:
return val[z.index(max(z))], vol[z.index(max(z))]
# print "nie znaleziono"
return [0], [0]
def lookForHeadAndShoulders(values, volumine, analyze=0):
"""Szukamy formacji glowy i ramion"""
# print "Szukamy formacji glowy i ramion"
if (len(values) < 15):
if analyze == 0:
return [0, 0, 0, 0]
else:
return [0]
values = asarray(values)
volumine = asarray(volumine)
maxVal = max(values)
maxVol = max(volumine)
for j in reversed(range(hsDiv - 4, min(2 * hsDiv, len(values)))):
val = list(divideArray(values, j))
vol = list(divideArray(volumine, j))
size = len(val[0])
for k in range(1, 6):
if k == 1:
shift = 0
else:
shift = size / k
val = list(divideArray(values[shift:], j))
vol = list(divideArray(volumine[shift:], j))
z = [0 for i in (range(len(val) - 1))]
neckLine = [[0, 0, 0, 0] for i in (range(len(val) - 1))]
for i in range(len(val) - 3):
leftArmVal = val[i]
leftArmVol = vol[i]
headVal = val[i + 1]
headVol = vol[i + 1]
rightArmVal = val[i + 2]
rightArmVol = vol[i + 2]
maxLeftArmVal = max(leftArmVal)
maxLeftArmVol = max(leftArmVol)
maxHeadVal = max(headVal)
maxHeadVol = max(headVol)
maxRightArmVal = max(rightArmVal)
maxRightArmVol = max(rightArmVol)
prev = []
if (i > 0):
prev = val[i - 1]
z[i], neckLine[i] = headAndShoulders(leftArmVal, headVal, rightArmVal, leftArmVol,
headVol, rightArmVol, maxLeftArmVal, maxLeftArmVol,
maxHeadVal, maxHeadVol, maxRightArmVal, maxRightArmVol, maxVal, maxVol, prev)
if max(z) > 0:
# print "znaleziono glowe i ramiona", z
index = z.index(max(z))
diff = sum(map(lambda x: len(x), val[0:index]))
neckLine[index][0] += diff + shift
neckLine[index][2] += diff + shift
if analyze == 0:
return neckLine[index]
else:
return [z[index], neckLine[index]]
# print "nie znaleziono"
if analyze == 0:
return [0, 0, 0, 0]
else:
return [0]
def reversedHeadAndShoulders(leftArmVal, headVal, rightArmVal, leftArmVol, headVol, rightArmVol, minLeftArmVal, maxLeftArmVol, minHeadVal,
maxHeadVol, minRightArmVal, maxRightArmVol, minVal, maxVol, prev=[]):
"""funkcja probuje znalezc odwrocona formacje glowy i ramion, wytlumaczenie argumentow:
leftArmVal - tablica z wartosciami, ktora podejrzewamy o bycie lewym ramieniem formacji
headVal - tablica z wartosciami, ktora podejrzewamy o bycie glowa formacji
rightArmVal - tablica z wartosciami, ktora podejrzewamy o bycie prawym ramieniem formacji
leftArmVol - tablica z wolumenem, ktora podejrzewamy o bycie lewym ramieniem formacji
headVol - tablica z wolumenem, ktora podejrzewamy o bycie glowa formacji
rightArmVol - tablica z wolumenem, ktora podejrzewamy o bycie prawym ramieniem formacji
minLeftArmVal - minimalna wartosc w lewym ramieniu
maxLeftArmVol - minimalny wolumen w lewym ramieniu
minHeadVal - minimalna wartosc w glowie (pik)
maxHeadVol - minimalny wolumen w glowie
minRightArmVal - minimalna wartosc w prawym ramieniu
maxRightArmVol - minimalny wolumen w prawym ramieniu
minVal - globalne minimum wartosci, im bliższa jest wartosc minHeadVal do minVal tym wieksze szanse ze dobrze wykrylismy
maxVol - globalne maksimum wolumenu, im blizsza jest wartosc maxRightArmVol do maxVol - || -
prev - tablica z wartosciami poprzedzajacymi formacje, sluzy do okreslenia trendu przed formacja"""
if len(prev):
if optimizedTrend(prev) == 1:
return 0, [0, 0, 0, 0]
#Wartosc lewego ramienia > glowy i wartosc wolumenu glowy ma byc najmniejsza
if minLeftArmVal < (1 + hsDiff) * minHeadVal or minRightArmVal < (1 + hsDiff) * minHeadVal:
return 0, [0, 0, 0, 0]
#wartosc prawego ramienia nie moze zbyt odbiegac od wartosci lewego
if minRightArmVal > (1 + hsVul) * minLeftArmVal or minRightArmVal < (1 - hsVul) * minLeftArmVal:
return 0, [0, 0, 0, 0]
#wolumin na formacji ma byc niemalejacy
volTrend = optimizedTrend(leftArmVol + headVol + rightArmVol)
if (volTrend > 0):
return 0, [0, 0, 0, 0]
result = (1.0 * minHeadVal / minVal + 1.0 * maxLeftArmVol / maxVol) / 2
if volTrend < 0:
result = result * 0.8
#wykreslamy linie szyi
leftArmVal = list(leftArmVal)
rightArmVal = list(rightArmVal)
maxLeftArmVal = max(leftArmVal[leftArmVal.index(minLeftArmVal):]) #max z prawej strony min lewego ramienia
rightArmPeek = rightArmVal.index(minRightArmVal)
if rightArmPeek == 0:
return 0, [0, 0, 0, 0]
maxRightArmVal = max(rightArmVal[0:rightArmPeek]) #max z lewej strony min prawego ramienia
maxRightArmVol = max(rightArmVol[rightArmPeek:]) #max wolumin z prawej strony min wartosci ramienia
#sprawdzamy czy linia szyi zostala przelamana przy wyzszym wolumenie
rightArmValMax = max(rightArmVal[rightArmPeek:])
rightArmMaxVol = max(rightArmVol[0:rightArmPeek])
# print 'E'
if maxRightArmVal > rightArmValMax:
return 0, [0, 0, 0, 0]
diff = len(leftArmVal) + len(headVal)
a, b = linearFun(leftArmVal.index(maxLeftArmVal), maxLeftArmVal,
rightArmVal.index(maxRightArmVal) + diff, maxRightArmVal)
if (trend(a) == -1):
return 0, [0, 0, 0, 0]
# print "Czy przelamano linie szyi?"
if (rightArmValMax <= evaluateFun(a, b, rightArmVal.index(rightArmValMax) + diff) and rightArmMaxVol < maxRightArmVol):
return 0, [0, 0, 0, 0]
if (maxLeftArmVol < maxHeadVol or maxRightArmVol < maxHeadVol):
result = result * 0.5
return result, [leftArmVal.index(maxLeftArmVal), maxLeftArmVal,
len(rightArmVal) + diff, evaluateFun(a, b, len(rightArmVal) + diff)]
def smartLookForReversedHeadAndShoulders(values, volumine):
"""Szukamy odwroconej formacji glowy i ramion wyszukujac wszystkie 3 elementowe podzbiory - nieuzywac"""
# print "Szukamy odwroconej formacji glowy i ramion"
values = asarray(values)
volumine = asarray(volumine)
minVal = min(values)
maxVol = max(volumine)
for i in reversed(range(4, div + 1)):
val = asarray(list(combinations(divideArray(values, i), 3)))
vol = asarray(list(combinations(divideArray(volumine, i), 3)))
z = map(lambda x, y: reversedHeadAndShoulders(x, y, minVal, maxVol), val, vol)
# print "z = ", z
if max(z) > 0:
return val[z.index(max(z))], vol[z.index(max(z))]
# print "nie znaleziono"
return [0], [0]
def lookForReversedHeadAndShoulders(values, volumine, analyze=0):
"""Szukamy odwroconej formacji glowy i ramion"""
# print "Szukamy odwroconej formacji glowy i ramion"
if (len(values) < 15):
if analyze == 0:
return [0, 0, 0, 0]
else:
return [0]
values = asarray(values)
volumine = asarray(volumine)
minVal = min(values)
maxVol = max(volumine)
for j in reversed(range(hsDiv - 4, min(2 * hsDiv, len(values)))):
val = list(divideArray(values, j))
vol = list(divideArray(volumine, j))
size = len(val[0])
for k in range(1, 6):
if k == 1:
shift = 0
else:
shift = size / k
val = list(divideArray(values[shift:], j))
vol = list(divideArray(volumine[shift:], j))
z = [0 for i in (range(len(val) - 1))]
neckLine = [[0, 0, 0, 0] for i in (range(len(val) - 1))]
# print "\nsprawdzamy ", j
for i in range(len(val) - 3):
leftArmVal = val[i]
leftArmVol = vol[i]
headVal = val[i + 1]
headVol = vol[i + 1]
rightArmVal = val[i + 2]
rightArmVol = vol[i + 2]
minLeftArmVal | |
def getNodeList_Lower_Step(self):
lower_CB_list = self.low_CB_set.centralbase_list
length = len(lower_CB_list)
index = length-1
if length<2:
return
pre_base = lower_CB_list[-2]
base = lower_CB_list[-1]
if self.freq=='30MIN' and abs(base.up-4.35)<0.001:
a=1
if self.freq=='30MIN':
a=1
if (length==2) and len(self.node_list)==0:
self.seek_max = M_TO_UP
if 1==self.__get_CB_pos(pre_base, base):
self.seek_max = M_TO_DOWN
else:
self.seek_max = M_TO_UP
#生成新临时节点
self.__Make_New_Temp_Node_Lower(self.seek_max, base.start, base.end, index)
return
if self.cur_cut_low_id != index:
self.cur_cut_low_id = index
self.cur_cut_start_node_id = base.start_node_id
cur_base_start_node_id = self.cur_cut_start_node_id
cur_base_end_node_id = base.end_node_id
'''
#中枢升级逻辑
if (cur_base_end_node_id - cur_base_start_node_id)==9:
if self.freq=='D':
a=1
self.node_list.pop()
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, cur_base_start_node_id+3, index)
self.node_list[-1].isformal = M_FORMAL
cur_base_start_node_id = cur_base_start_node_id+3
self.seek_max=self.__reverse_direct(self.seek_max)
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, cur_base_start_node_id+3, index)
self.node_list[-1].isformal = M_FORMAL
cur_base_start_node_id = cur_base_start_node_id+3
#进行中枢计算
self.get_Centralbase_Step()
self.update_max_min_value()
self.seek_max=self.__reverse_direct(self.seek_max)
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, cur_base_start_node_id+3, index)
cur_base_start_node_id = cur_base_start_node_id+3
self.cur_cut_start_node_id = cur_base_start_node_id
return
'''
if self.node_list[-1].isformal == M_FORMAL and (base.start<=self.node_list[-1].datetime and base.end>=self.node_list[-1].datetime):
return
if self.seek_max==M_TO_UP: #向上
#当前中枢在前一中枢下或相交,当前趋势结束
if((0<self.__get_CB_pos(pre_base, base)) and (index>self.node_list[-1].low_id)):
#更新正式节点信息
#self.__Update_Last_Node_Lower_WithID(self.seek_max, pre_base.start, pre_base.end, isformal=M_FORMAL)
self.node_list[-1].isformal = M_FORMAL
#生成新临时节点
self.seek_max = M_TO_DOWN
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, base.end_node_id, index)
else:#趋势延续
low_node_time, low_node_value = self.__share_same_beichi_with_low_judge()
if low_node_time!=None and low_node_value!=None and False:
self.node_list[-1].isformal = M_FORMAL
self.node_list[-1].datetime = low_node_time
self.node_list[-1].value = low_node_value
self.node_list[-1].low_id = index
else:
self.__Update_Last_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, base.end_node_id, isformal=M_TEMP,low_id=index)
else:
#当前中枢在前一中枢上或相交,当前趋势结束
if((0>self.__get_CB_pos(pre_base, base)) and (index>self.node_list[-1].low_id)):
#更新正式节点信息
#self.__Update_Last_Node_Lower(self.seek_max, pre_base.start, pre_base.end, isformal=M_FORMAL)
self.node_list[-1].isformal = M_FORMAL
#生成新临时节点
self.seek_max = M_TO_UP
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, base.end_node_id, index)
else:#趋势延续
low_node_time, low_node_value = self.__share_same_beichi_with_low_judge()
if low_node_time!=None and low_node_value!=None and False:
self.node_list[-1].isformal = M_FORMAL
self.node_list[-1].datetime = low_node_time
self.node_list[-1].value = low_node_value
self.node_list[-1].low_id = index
else:
self.__Update_Last_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, base.end_node_id, isformal=M_TEMP,low_id=index)
def __Make_New_Temp_Node_Lower(self, seek_max, start_time, end_time, low_id=None):
'''
生成新的临时节点
seek_max:该临时节点与上一节点的关系
'''
lower_data = self.low_CB_set.data
if seek_max==M_TO_UP:
time,value = self.__getMaxIndex_Val(lower_data, start_time, end_time)
top_bottom = M_TOP
else:
time,value = self.__getMinIndex_Val(lower_data, start_time, end_time)
top_bottom = M_BOTTOM
if time==None:
time_seg = self.data.ix[self.data.index>end_time, 'close']
time = time_seg.index[0]
value = self.data.ix[0, 'close']
self.node_list.append(Node(time, value, top_bottom, low_id=low_id, isformal=M_TEMP))
def __Make_New_Temp_Node_Lower_WithID(self, seek_max, start_node_id, end_node_id, low_id=None):
'''
生成新的临时节点
seek_max:该临时节点与上一节点的关系
'''
lower_node_list = self.low_CB_set.node_list
if seek_max==M_TO_UP:
node_id,value = self.__getMaxLowerNode_Val( start_node_id, end_node_id)
top_bottom = M_TOP
else:
node_id,value = self.__getMinLowerNode_Val( start_node_id, end_node_id)
top_bottom = M_BOTTOM
self.node_list.append(Node(lower_node_list[node_id].datetime, value, top_bottom, low_id=low_id, isformal=M_TEMP))
def __Update_Last_Node_Lower(self, seek_max, start_time, end_time, isformal=None, low_id = None) :
'''
更新最后节点信息
seek_max:该临时节点与上一节点的关系
'''
lower_data = self.low_CB_set.data
if seek_max==M_TO_UP:
time,value = self.__getMaxIndex_Val(lower_data, start_time, end_time)
else:
time,value = self.__getMinIndex_Val(lower_data, start_time, end_time)
if time==None:
time_seg = self.data.ix[self.data.index>end_time, 'close']
time = time_seg.index[0]
value = self.data.ix[0, 'close']
if ((seek_max==M_TO_UP) and (value>self.node_list[-1].value))\
or ((seek_max==M_TO_DOWN) and (value<self.node_list[-1].value)):
self.node_list[-1].datetime = time
self.node_list[-1].value = value
if low_id!=None:
self.node_list[-1].low_id = low_id
if isformal!=None:
self.node_list[-1].isformal = isformal
def __Update_Last_Node_Lower_WithID(self, seek_max, start_node_id, end_node_id, isformal=None, low_id = None) :
'''
更新最后节点信息
seek_max:该临时节点与上一节点的关系
'''
lower_node_list = self.low_CB_set.node_list
if seek_max==M_TO_UP:
node_id,value = self.__getMaxLowerNode_Val( start_node_id, end_node_id)
else:
node_id,value = self.__getMinLowerNode_Val( start_node_id, end_node_id)
if ((seek_max==M_TO_UP) and (value>self.node_list[-1].value))\
or ((seek_max==M_TO_DOWN) and (value<self.node_list[-1].value)):
self.node_list[-1].datetime = lower_node_list[node_id].datetime
self.node_list[-1].value = value
if low_id!=None:
self.node_list[-1].low_id = low_id
if isformal!=None:
self.node_list[-1].isformal = isformal
def __reverse_direct(self, seek_max):
if seek_max == M_TO_UP:
return M_TO_DOWN
else:
return M_TO_UP
def __get_lowest_current_time(self, freq):
low_cb_set = self.low_CB_set
while(low_cb_set!=None):
if low_cb_set.freq == freq:
return low_cb_set.cur_time_index
else:
low_cb_set = low_cb_set.low_CB_set
return self.cur_time_index
def get_lower_beichi(self):
if self.low_CB_set!=None:
low_beichi_list = self.low_CB_set.beichi_list
low_node_list = self.low_CB_set.node_list
if len(low_beichi_list)<=0 \
or len(low_node_list)<2 \
or len(self.centralbase_list)<=0:
return
if self.freq=='30MIN' :
if abs(low_node_list[-2].value-36)<0.001:
a=1
if (low_beichi_list[-1].time == low_node_list[-2].datetime) and (self.cur_low_beichi_time != low_node_list[-2].datetime):
self.cur_low_beichi_time = low_node_list[-2].datetime
base = self.centralbase_list[-1]
if(base.ctype<=-2):
if low_node_list[-2].value <= self.cur_min_value:#创新低
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_macd = self.__getMACD_Sum(low_node_list[-3].datetime, low_node_list[-2].datetime, seekMax=False)
pre_vol = self.__getVolumn_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_vol = self.__getVolumn_Sum(low_node_list[-3].datetime, low_node_list[-2].datetime, seekMax=False)
if (abs(cur_macd) < abs(pre_macd)) or (abs(cur_vol)<abs(pre_vol)):
self.beichi_list.append(BeichiTime(low_node_list[-2].datetime, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_buy_point_list.append(BuyPoint(low_node_list[-2].datetime, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
elif (base.ctype>=2):
if low_node_list[-2].value >= self.cur_max_value:#创新高
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd = self.__getMACD_Sum(low_node_list[-3].datetime, low_node_list[-2].datetime, seekMax=True)
if abs(cur_macd) < abs(pre_macd):
self.beichi_list.append(BeichiTime(low_node_list[-2].datetime, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_sell_point_list.append(SellPoint(low_node_list[-2].datetime, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
def update_max_min_value(self):
'''
根据正式节点的值更新最大和最小值
'''
if(len(self.centralbase_list)<2):
return
pre_base = self.centralbase_list[-2]
base = self.centralbase_list[-1]
if (self.cur_min_node_id == len(self.node_list)-2) \
or (self.cur_max_node_id == len(self.node_list)-2):
return
if base.ctype==0 or pre_base.ctype*base.ctype<0:
self.cur_max_node_id,self.cur_max_value = self.__getMaxNode_Val(base.start_node_id, base.end_node_id)
self.cur_min_node_id,self.cur_min_value = self.__getMinNode_Val(base.start_node_id, base.end_node_id)
else:
if self.node_list[-2].value <= self.cur_min_value:#创新低
self.cur_min_node_id = len(self.node_list)-2
self.cur_min_value = self.node_list[-2].value
if self.node_list[-2].value >= self.cur_max_value:#创新高
self.cur_max_node_id = len(self.node_list)-2
self.cur_max_value = self.node_list[-2].value
def getBeichi_LastTwo_Step(self):
'''
分步获取背驰节点
返回当前中枢新加入节点是否为背驰点
调用时机:
新的正式节点加入中枢,并未更新此中枢的极值信息
'''
if(len(self.centralbase_list)<2):
return False
if len(self.beichi_list)>0 and self.beichi_list[-1].time == self.node_list[-2].datetime:
return False
pre_base = self.centralbase_list[-2]
base = self.centralbase_list[-1]
cur_macd = 0
pre_macd = 0
cur_macd_lower = 0
pre_macd_lower = 0
if(base.ctype<=-2):
if self.node_list[-2].value < self.cur_min_value:#创新低
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id ].datetime, seekMax=False)
cur_macd = self.__getMACD_Sum(self.node_list[-3].datetime, self.node_list[-2].datetime, seekMax=False)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id ].datetime, seekMax=False)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[-3].datetime, self.node_list[-2].datetime, seekMax=False)
if abs(cur_macd) < abs(pre_macd) or abs(cur_macd_lower) < abs(pre_macd_lower) :
if self.freq=="D":
a=1
self.beichi_list.append(BeichiTime(self.node_list[-2].datetime,base.ctype, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_buy_point_list.append(BuyPoint(self.node_list[-2].datetime,len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
elif (base.ctype>=2):
if self.node_list[-2].value > self.cur_max_value:#创新高
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd = self.__getMACD_Sum(self.node_list[-3].datetime, self.node_list[-2].datetime, seekMax=True)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[-3].datetime, self.node_list[-2].datetime, seekMax=True)
if abs(cur_macd) < abs(pre_macd) or abs(cur_macd_lower) < abs(pre_macd_lower) :
self.beichi_list.append(BeichiTime(self.node_list[-2].datetime,base.ctype, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
else:
return self.beichi_processing
return self.beichi_processing
def get_panzheng_beichi_step(self):
'''
盘整背驰
'''
if(len(self.centralbase_list)<=1) or len(self.node_list)<1:
return False
base = self.centralbase_list[-1]
start_node_id = base.start_node_id
end_node_id = base.end_node_id
if len(self.beichi_pc_list)>0 and self.node_list[end_node_id].datetime ==self.beichi_pc_list[-1].time:
return False
if end_node_id-start_node_id >=2:
if self.node_list[end_node_id].value<base.min_val:#创新低
min_node_id = base.min_node_id
pre_macd = self.__getMACD_Sum(self.node_list[min_node_id-1].datetime, self.node_list[min_node_id].datetime, seekMax=False)
cur_macd = self.__getMACD_Sum(self.node_list[end_node_id-1].datetime, self.node_list[end_node_id].datetime, seekMax=False)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[min_node_id-1].datetime, self.node_list[min_node_id].datetime, seekMax=False)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[end_node_id-1].datetime, self.node_list[end_node_id].datetime, seekMax=False)
if abs(cur_macd) < abs(pre_macd) or abs(cur_macd_lower) < abs(pre_macd_lower) :
self.beichi_pc_list.append(BeichiTime(self.node_list[end_node_id].datetime,-2, end_node_id,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
elif self.node_list[end_node_id].value>base.max_val:#创新高
max_node_id = base.max_node_id
pre_macd = self.__getMACD_Sum(self.node_list[max_node_id-1].datetime, self.node_list[max_node_id].datetime, seekMax=True)
cur_macd = self.__getMACD_Sum(self.node_list[end_node_id-1].datetime, self.node_list[end_node_id].datetime, seekMax=True)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[max_node_id-1].datetime, self.node_list[max_node_id].datetime, seekMax=True)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[end_node_id-1].datetime, self.node_list[end_node_id].datetime, seekMax=True)
if abs(cur_macd) < abs(pre_macd) or abs(cur_macd_lower) < abs(pre_macd_lower) :
if self.freq=='30MIN':
a=1
self.beichi_pc_list.append(BeichiTime(self.node_list[end_node_id].datetime,2, end_node_id,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
return False
def getBeichi_LastOne_Step(self):
'''
分步获取背驰节点
返回当前中枢新加入节点是否为背驰点
'''
if(len(self.centralbase_list)<2):
return False
if self.node_list[-1].isformal != M_FORMAL:
return False
if len(self.beichi_list)>0 and self.beichi_list[-1].time == self.node_list[-1].datetime:
return False
if self.freq=='30MIN' :
a=1
pre_base = self.centralbase_list[-2]
base = self.centralbase_list[-1]
cur_macd = 0
pre_macd = 0
if(base.ctype<=-2):
if self.node_list[-1].value <= self.cur_min_value:#创新低
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_macd = self.__getMACD_Sum(self.node_list[-2].datetime, self.node_list[-1].datetime, seekMax=False)
pre_vol = self.__getVolumn_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_vol = self.__getVolumn_Sum(self.node_list[-2].datetime, self.node_list[-1].datetime, seekMax=False)
if (abs(cur_macd) < abs(pre_macd)) or (abs(cur_vol)<abs(pre_vol)):
self.beichi_list.append(BeichiTime(self.node_list[-1].datetime, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_buy_point_list.append(BuyPoint(self.node_list[-1].datetime, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
elif (base.ctype>=2):
if self.node_list[-1].value >= self.cur_max_value:#创新高
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd = self.__getMACD_Sum(self.node_list[-2].datetime, self.node_list[-1].datetime, seekMax=True)
if abs(cur_macd) < abs(pre_macd):
self.beichi_list.append(BeichiTime(self.node_list[-1].datetime, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_sell_point_list.append(SellPoint(self.node_list[-1].datetime, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
else:
return self.beichi_processing
return self.beichi_processing
def getBeichi_Share_With_LowBeichi_Step(self):
if self.low_CB_set==None:
return
if len(self.centralbase_list)<1:
return
if len(self.share_beichi_list)>0 and self.share_beichi_list[-1].time == self.low_CB_set.node_list[-2].datetime:
return
base = self.centralbase_list[-1]
low_node_time, low_node_value = self.__share_same_beichi_with_low_judge()
if low_node_time!=None and low_node_value!=None :
self.share_beichi_list.append(BeichiTime(low_node_time, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
def beichi_judge_step(self):
for beichi in self.beichi_list:
if beichi.real_beichi == M_NODECIDE:
if beichi.node_id + 4 == len(self.node_list):
if beichi.btype >0 and self.node_list[beichi.node_id].value>=self.node_list[-2].value: #顶背驰判断
beichi.real_beichi = M_TRUE
elif beichi.btype <0 and self.node_list[beichi.node_id].value<=self.node_list[-2].value: #低背驰判断
beichi.real_beichi = M_TRUE
else:
beichi.real_beichi = M_FALSE
def trade_strategy_step(self, high_cb_set):
for sec_buy_point in self.sec_buy_point_list:
if sec_buy_point.real_buy==M_NODECIDE:
if len(high_cb_set.centralbase_list)>0 and high_cb_set.centralbase_list[-1].ctype>=-2:
sec_buy_point.real_buy = M_TRUE
else:
sec_buy_point.real_buy = M_FALSE
def sell_point_judge(self):
if len(self.node_list)<3:
return
if len(self.first_buy_point_list)>0:
if len(self.node_list)-2 == (self.first_buy_point_list[-1].node_id+1):
self.all_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
if len(self.sec_buy_point_list)>0:
if len(self.node_list)-2 == (self.sec_buy_point_list[-1].node_id+1):
self.all_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
if len(self.third_buy_point_list)>0:
if len(self.node_list)-2 == (self.third_buy_point_list[-1].node_id+1):
self.all_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
def __getMACD_Sum(self, start_time, end_time, seekMax=True):
data_seg = self.data.ix[(self.data.index>=start_time) & (self.data.index<=end_time) & (self.data.index <= self.cur_time_index), 'MACD']
if seekMax:
data_seg = data_seg[data_seg>0]
else:
data_seg = data_seg[data_seg<0]
#return data_seg.sum()
if data_seg.empty:
return 0
else:
return data_seg.mean()
def __getMACD_Sum_Lower(self, start_time, end_time, seekMax=True):
if self.low_CB_set!= None:
data_seg = self.low_CB_set.data.ix[(self.low_CB_set.data.index>=start_time) & (self.low_CB_set.data.index<=end_time) & (self.low_CB_set.data.index <= self.low_CB_set.cur_time_index), 'MACD']
else:
data_seg = self.data.ix[(self.data.index>=start_time) & (self.data.index<=end_time) & (self.data.index <= self.cur_time_index), 'MACD']
| |
= tmpl
images = filter(lambda x: os.path.isfile(x), dataset.template_to_filenames(*ds))
if len(images) > 1:
h = XIO.Image(images[0]).header
h_next = XIO.Image(images[1]).header
h_last = XIO.Image(images[-1]).header
if h_next.get("PhiStart", 0) == h.get("PhiStart", 0):
print "This job may be scan?:", tmpl
continue
job.wavelength = h.get("Wavelength", 0)
job.osc_end, job.osc_start = h_last.get("PhiEnd", 0), h.get("PhiStart", 0)
job.osc_step = h.get("PhiWidth", 0)
job.status = "finished"
job.exp_time = h.get("ExposureTime", 0)
job.distance = h.get("Distance", 0)
job.attenuator = None, 0
job.detector = "?"
if job.osc_step == 0 or job.osc_end - job.osc_start == 0:
print "This job don't look like osc data set:", tmpl
continue
self.jobs[(prefix, nr)] = job
self.jobs_prefix_lookup.setdefault(prefix, set()).add(nr)
# Dump jobs
pickle.dump(self.jobs, open(os.path.join(config.params.workdir, "jobs.pkl"), "wb"), 2)
# update_jobs_from_files()
def process_data(self, key):
if key not in self.jobs:
mylog.error("Unknown job: %s" % key)
return
if config.params.engine == "xds":
self.process_data_xds(key)
elif config.params.engine == "dials":
self.process_data_dials(key)
else:
raise "Never reaches here"
# process_data()
def process_data_xds(self, key):
job = self.jobs[key]
prefix, nr = key
workdir = self.get_xds_workdir(key)
if not os.path.exists(workdir): os.makedirs(workdir)
# Prepare XDS.INP
img_files = find_existing_files_in_template(job.filename, nr[0], nr[1],
datadir=os.path.dirname(prefix), check_compressed=True)
if len(img_files) == 0:
mylog.error("No files found for %s %s" % (job.filename, nr))
return
overrides = read_override_config(os.path.dirname(job.filename))
# XXX need to update self.jobs (display on GUI)
xdsinp_str = xds_inp.generate_xds_inp(img_files=img_files,
inp_dir=os.path.abspath(workdir),
reverse_phi=config.params.reverse_phi, anomalous=True,
spot_range="all", minimum=False,
integrate_nimages=None, minpk=config.params.xds.minpk,
exclude_resolution_range=config.params.xds.exclude_resolution_range,
orgx=overrides.get("orgx",None),
orgy=overrides.get("orgy",None),
distance=overrides.get("distance",None),
wavelength=overrides.get("wavelength",None),
osc_range=overrides.get("osc_range",None),
rotation_axis=overrides.get("rotation_axis",None))
open(os.path.join(workdir, "XDS.INP"), "w").write(xdsinp_str)
opts = ["multiproc=false", "topdir=.", "nproc=%d"%config.params.batch.nproc_each, "tryhard=true",
"make_report=true"]
if config.params.small_wedges: opts.append("no_scaling=true")
if None not in (config.params.known.space_group, config.params.known.unit_cell):
opts.append("cell_prior.cell=%s" % ",".join(map(lambda x: "%.3f"%x, config.params.known.unit_cell)))
opts.append("cell_prior.sgnum=%d" % sgtbx.space_group_info(config.params.known.space_group).group().type().number())
# Start batch job
job = batchjob.Job(workdir, "xds_auto.sh", nproc=config.params.batch.nproc_each)
job_str = """\
"%(exe)s" - <<+
from yamtbx.dataproc.auto.command_line.run_all_xds_simple import run_from_args
run_from_args([%(args)s])
for i in xrange(%(repeat)d-1):
run_from_args([%(args)s, "mode=recycle"])
+
""" % dict(exe=sys.executable, args=",".join(map(lambda x: '"%s"'%x, opts)),
repeat=config.params.xds.repeat)
job.write_script(job_str+"\n")
batchjobs.submit(job)
self.procjobs[key] = job
# process_data_xds()
def process_data_dials(self, key):
job = self.jobs[key]
prefix, nr = key
workdir = self.get_xds_workdir(key)
if not os.path.exists(workdir): os.makedirs(workdir)
# Prepare
img_files = find_existing_files_in_template(job.filename, nr[0], nr[1],
datadir=os.path.dirname(prefix), check_compressed=True)
if len(img_files) == 0:
mylog.error("No files found for %s %s" % (job.filename, nr))
return
nproc_str = "nproc=%d"%config.params.batch.nproc_each
job_str = ""
job_str += "dials.import template=%s image_range=%d,%d\n" % (job.filename.replace("?","#"),
nr[0], nr[1])
job_str += "dials.find_spots datablock.json global_threshold=200 %s\n" % nproc_str
job_str += "dials.index datablock.json strong.pickle indexing.method=fft3d index_assignment.method=local "
if None not in (config.params.known.space_group, config.params.known.unit_cell):
job_str += "unit_cell=%s space_group=%d\n" % (",".join(map(lambda x: "%.3f"%x, config.params.known.unit_cell)),
sgtbx.space_group_info(config.params.known.space_group).group().type().number())
else:
job_str += "\n"
job_str += "dials.integrate experiments.json indexed.pickle min_spots=30 %s\n" % nproc_str
job_str += "dials.export_mtz integrated_experiments.json integrated.pickle\n"
job_str += "echo tolerance 10 | pointless hklout.mtz hklout pointless.mtz > pointless.log"
job_str += "touch dials_job_finished"
# TODO config.params.xds.exclude_resolution_range config.params.reverse_phi
# Start batch job
job = batchjob.Job(workdir, "dials_auto.sh", nproc=config.params.batch.nproc_each)
job.write_script(job_str)
batchjobs.submit(job)
self.procjobs[key] = job
# process_data_dials()
def _save_chache(self, key, filename, obj):
self._chaches[(key,filename)] = (os.path.getmtime(filename), obj)
# _save_chache()
def _load_if_chached(self, key, filename):
if (key, filename) not in self._chaches: return None
if not os.path.isfile(filename): return None
last_mtime, obj = self._chaches[(key, filename)]
if last_mtime == os.path.getmtime(filename):
return obj
return None
# _load_if_chached()
def get_process_status(self, key):
prefix, nr = key
workdir = self.get_xds_workdir(key)
spot_xds = os.path.join(workdir, "SPOT.XDS")
xparm_xds = os.path.join(workdir, "XPARM.XDS")
correct_lp = os.path.join(workdir, "CORRECT.LP")
state = None
cmpl, sg, resn = None, None, None
if config.params.engine == "xds":
if key not in self.procjobs:
if os.path.exists(os.path.join(workdir, "decision.log")):
state = batchjob.STATE_FINISHED
else:
job = self.procjobs[key]
batchjobs.update_state(job)
state = job.state
if state == batchjob.STATE_FINISHED:
if os.path.isfile(correct_lp):
lp = self._load_if_chached("correctlp", correct_lp)
if lp is None:
lp = correctlp.CorrectLp(correct_lp)
self._save_chache("correctlp", correct_lp, lp)
ISa = lp.get_ISa() if lp.is_ISa_valid() else float("nan")
if config.params.small_wedges:
resn = self._load_if_chached("resn", spot_xds)
if resn is None:
resn = estimate_resolution_by_spotxds.run(spot_xds, xparm_xds)
self._save_chache("resn", spot_xds, resn)
else:
resn = lp.resolution_based_on_ios_of_error_table(min_ios=1.)
self._save_chache("resn", correct_lp, resn) # for html report
sg = lp.space_group_str()
cmpl = float(lp.table["all"]["cmpl"][-1]) if "all" in lp.table else float("nan")
if not os.path.isfile(os.path.join(workdir, "XDS_ASCII.HKL")):
state = "giveup"
elif config.params.engine == "dials":
if key not in self.procjobs:
if os.path.exists(os.path.join(workdir, "dials_job_finished")):
state = batchjob.STATE_FINISHED
else:
job = self.procjobs[key]
batchjobs.update_state(job)
state = job.state
if state == batchjob.STATE_FINISHED:
resn = float("nan")
sg = "?"
cmpl = float("nan")
if not os.path.isfile(os.path.join(workdir, "hklout.mtz")):
state = "giveup"
return state, (cmpl, sg, resn)
# get_process_status()
def get_process_result(self, key):
prefix, nr = key
workdir = self.get_xds_workdir(key)
ret = {}
ret["workdir"] = workdir
correct_lp = os.path.join(workdir, "CORRECT.LP")
gxparm_xds = os.path.join(workdir, "GXPARM.XDS")
stats_pkl = os.path.join(workdir, "merging_stats.pkl")
if os.path.isfile(correct_lp):
lp = correctlp.CorrectLp(correct_lp)
ret["ISa"] = lp.get_ISa() if lp.is_ISa_valid() else float("nan")
ret["resn"] = lp.resolution_based_on_ios_of_error_table(min_ios=1.)
ret["sg"] = lp.space_group_str()
ret["cmpl"] = float(lp.table["all"]["cmpl"][-1]) if "all" in lp.table else float("nan")
if lp.unit_cell is not None:
ret["cell"] = lp.unit_cell
elif os.path.isfile(gxparm_xds):
xp = xparm.XPARM(gxparm_xds)
ret["cell"] = list(xp.unit_cell)
ret["sg"] = xp.space_group_str()
if os.path.isfile(stats_pkl):
sio = StringIO.StringIO()
pickle.load(open(stats_pkl))["stats"].show(out=sio, header=False)
lines = sio.getvalue().replace("<","<").replace(">",">").splitlines()
i_table_begin = filter(lambda x: "Statistics by resolution bin:" in x[1], enumerate(lines))
if len(i_table_begin) == 1:
ret["table_html"] = "\n".join(lines[i_table_begin[0][0]+1:])
return ret
# get_process_result()
# class BssJobs
# Singleton objects
bssjobs = BssJobs()
batchjobs = None # initialized in __main__
mainFrame = None
class WatchLogThread:
def __init__(self, parent):
self.parent = parent
self.interval = 10
self.thread = None
def start(self, interval=None):
self.stop()
self.keep_going = True
self.running = True
if interval is not None:
self.interval = interval
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
if self.is_running():
mylog.info("Stopping WatchLogThread.. Wait.")
self.keep_going = False
self.thread.join()
else:
mylog.info("WatchLogThread already stopped.")
def is_running(self):
return self.thread is not None and self.thread.is_alive()
def run(self):
mylog.info("WatchLogThread loop STARTED")
counter = 0
while self.keep_going:
counter += 1
if config.params.date == "today": date = datetime.datetime.today()
else: date = datetime.datetime.strptime(config.params.date, "%Y-%m-%d")
if not (config.params.logwatch_once and counter > 1):
# check bsslog
if config.params.jobspkl is not None:
bssjobs.jobs = pickle.load(open(config.params.jobspkl))
for prefix, nr in bssjobs.jobs:
bssjobs.jobs_prefix_lookup.setdefault(prefix, set()).add(nr)
else:
if config.params.blconfig is not None:
#joblogs, prev_job_finished, job_is_running = bssjobs.check_bss_log(date, -config.params.checklog_daybefore)
bssjobs.update_jobs(date, -config.params.checklog_daybefore) #joblogs, prev_job_finished, job_is_running)
else:
bssjobs.update_jobs_from_files(config.params.topdir,
config.params.include_dir, config.params.exclude_dir)
# start jobs
if config.params.auto_mode:
for key in bssjobs.keys():
status = bssjobs.get_process_status(key)[0]
if bssjobs.get_job(key).status == "finished" and status is None:
mylog.info("Automatically starting processing %s" % str(key))
bssjobs.process_data(key)
ev = EventLogsUpdated()
wx.PostEvent(self.parent, ev)
# Make html report
html_report.make_kamo_report(bssjobs,
topdir=config.params.topdir,
htmlout=os.path.join(config.params.workdir, "report.html"))
#print
#print "Done. Open?"
#print "firefox %s" % os.path.join(config.params.workdir, "report.html")
if self.interval == 0: # Run only once
self.keep_going = False
continue
if self.interval < 1:
time.sleep(self.interval)
else:
for i in xrange(int(self.interval/.5)):
if self.keep_going:
time.sleep(.5)
mylog.info("WatchLogThread loop FINISHED")
self.running = False
#wx.PostEvent(self.parent, EventDirWatcherStopped()) # Ensure the checkbox unchecked when accidentally exited.
# run()
# class WatchLogThread
class MyCheckListCtrl(wx.ListCtrl, CheckListCtrlMixin, ListCtrlAutoWidthMixin):
"""
http://zetcode.com/wxpython/advanced/
"""
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent, wx.ID_ANY, style=wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_VIRTUAL)
CheckListCtrlMixin.__init__(self)
ListCtrlAutoWidthMixin.__init__(self)
self.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL))
self.InsertColumn(0, "Path", wx.LIST_FORMAT_LEFT, width=400) # with checkbox
self.InsertColumn(1, "Sample ID", wx.LIST_FORMAT_LEFT, width=90)
self.InsertColumn(2, "Wavelen", wx.LIST_FORMAT_LEFT, width=80)
self.InsertColumn(3, "TotalPhi", wx.LIST_FORMAT_LEFT, width=80)
self.InsertColumn(4, "DeltaPhi", wx.LIST_FORMAT_LEFT, width=80)
self.InsertColumn(5, "Cstatus", wx.LIST_FORMAT_LEFT, width=70)
self.InsertColumn(6, "Pstatus", wx.LIST_FORMAT_LEFT, width=70)
self.InsertColumn(7, "Cmpl.", wx.LIST_FORMAT_LEFT, width=50)
self.InsertColumn(8, "SG", wx.LIST_FORMAT_LEFT, width=100)
self.InsertColumn(9, "Resn.", wx.LIST_FORMAT_LEFT, width=50)
self.items = []
self.images = []
self._items_lookup = {} # {key: idx in self.items}
self._sort_acend = True
self._sort_prevcol = None
self.Bind(wx.EVT_LIST_COL_CLICK, self.item_col_click)
# __init__()
def key_at(self, line): return self.items[line][0]
def OnGetItemText(self, line, col): return self.items[line][col+2] # [0] has key, [1] has checked state
def OnGetItemImage(self, line): return self.items[line][1]
def SetItemImage(self, line, im): # checked state
self.items[line][1] = im
self.Refresh()
# SetItemImage()
def get_item(self, key):
if key not in self._items_lookup: return None
return self.items[self._items_lookup[key]][2:]
# get_item()
def update_item(self, key, item):
if key not in self._items_lookup:
self.items.append([key, 0]+item)
self._items_lookup[key] = len(self.items)-1
else:
for i in xrange(len(item)):
self.items[self._items_lookup[key]][i+2] = item[i]
# update_item()
def item_col_click(self, ev):
col = ev.GetColumn()
if col != self._sort_prevcol:
self._sort_acend = True
else:
self._sort_acend = not self._sort_acend
perm = range(len(self.items))
def trans_func(idx):
# 0:lab, 1:sample, 2:wavelen, 3:phirange, 4:deltaphi, 5,6:status, 7:cmpl, 8:sg, 9:resn
if idx in (2, 3, 4, 7, 9): return safe_float
return lambda x: x
# trans_func()
perm.sort(key=lambda x: trans_func(col)(self.items[x][col+2]),
reverse=not self._sort_acend)
perm_table = dict(map(lambda x:(perm[x], x), xrange(len(perm)))) # old idx -> new idx
for k in self._items_lookup: self._items_lookup[k] = perm_table[self._items_lookup[k]]
self.items = map(lambda x: self.items[x], perm)
self._sort_prevcol = col
#self.DeleteAllItems()
self.SetItemCount(len(self.items))
# listctrl_item_col_click()
# class MyCheckListCtrl
class MultiPrepDialog(wx.Dialog):
def __init__(self, parent=None, cm=None):
wx.Dialog.__init__(self, parent=parent, id=wx.ID_ANY, title="Prep multi merge",
size=(1200,600), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER|wx.MAXIMIZE_BOX)
mpanel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
mpanel.SetSizer(vbox)
self.txtCM = wx.TextCtrl(mpanel, wx.ID_ANY, size=(450,25), style=wx.TE_MULTILINE)
self.txtCM.SetFont(wx.Font(10, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.NORMAL))
self.txtCM.SetEditable(False)
vbox.Add(self.txtCM, 1, flag=wx.EXPAND|wx.RIGHT)
hbox1 | |
H2O model. Each list is a dict containing fields
of interest like name, type, gridable, default values, ....
:return: three lists: gridable_params, gridable_types and gridable_defaults containing the names of the parameter,
its associated type like int, float, unicode, bool and default parameter values
"""
# grab all gridable parameters and its type
gridable_parameters = []
gridable_types = []
gridable_defaults = []
for each_param in params_in_json:
if each_param['gridable']:
gridable_parameters.append(str(each_param["name"]))
gridable_types.append(each_param["type"])
if type(each_param["default_value"]) == 'unicode': # hyper-parameters cannot be unicode
gridable_defaults.append(str(each_param["default_value"]))
else:
gridable_defaults.append(each_param["default_value"])
return gridable_parameters, gridable_types, gridable_defaults
def add_fold_weights_offset_columns(h2o_frame, nfold_max_weight_offset, column_names, column_type='fold_assignment'):
"""
Add fold_columns to H2O training frame specified in h2o_frame according to nfold. The new added
columns should use the names in column_names. Returns a h2o_frame with newly added fold_columns.
Copied from Eric's code.
:param h2o_frame: H2O frame containing training data
:param nfold_max_weight_offset: integer, number of fold in the cross-validation or maximum weight scale or offset
:param column_names: list of strings denoting the column names for the new fold columns
:param column_type: optional string denoting whether we are trying to generate fold_assignment or
weights_column or offset_column
:return: H2O frame with added fold column assignments
"""
number_row = h2o_frame.nrow
# copied this part from Eric's code
for index in range(len(column_names)):
if 'fold_assignment' in column_type:
temp_a = np.random.random_integers(0, nfold_max_weight_offset - 1, [number_row, 1]) # inclusive
elif 'weights_column' in column_type:
temp_a = np.random.uniform(0, nfold_max_weight_offset, [number_row, 1])
elif 'offset_column' in column_type:
temp_a = random.uniform(0, nfold_max_weight_offset)*np.asmatrix(np.ones(number_row)).transpose()
else:
assert False, "column_type must be either 'fold_assignment' or 'weights_column'!"
fold_assignments = h2o.H2OFrame(temp_a)
fold_assignments.set_names([column_names[index]])
h2o_frame = h2o_frame.cbind(fold_assignments)
return h2o_frame
def gen_grid_search(model_params, hyper_params, exclude_parameters, gridable_parameters, gridable_types,
gridable_defaults, max_int_number, max_int_val, min_int_val, max_real_number, max_real_val,
min_real_val, quantize_level='1.00000000'):
"""
This function is written to randomly generate griddable parameters for a gridsearch. For parameters already
found in hyper_params, no random list will be generated. In addition, we will check to make sure that the
griddable parameters are actually used by the model before adding them to the hyper_params dict.
:param model_params: list of string containing names of argument to the model
:param hyper_params: dict structure containing a list of gridable parameters names with their list
:param exclude_parameters: list containing parameter names not to be added to hyper_params
:param gridable_parameters: list of gridable parameter names
:param gridable_types: list of gridable parameter types
:param gridable_defaults: list of gridable parameter default values
:param max_int_number: integer, size of integer gridable parameter list
:param max_int_val: integer, maximum integer value for integer gridable parameter
:param min_int_val: integer, minimum integer value for integer gridable parameter
:param max_real_number: integer, size of real gridable parameter list
:param max_real_val: float, maximum real value for real gridable parameter
:param min_real_val: float, minimum real value for real gridable parameter
:param quantize_level: string representing the quantization level of floating point values generated randomly.
:return: a tuple of hyper_params: dict of hyper parameters for gridsearch, true_gridable_parameters:
a list of string containing names of truely gridable parameters, true_gridable_types: a list of string
denoting parameter types and true_gridable_defaults: default values of those truly gridable parameters
"""
count_index = 0
true_gridable_parameters = []
true_gridable_types = []
true_gridable_defaults = []
for para_name in gridable_parameters:
# parameter must not in exclusion list
if (para_name in model_params) and (para_name not in exclude_parameters):
true_gridable_parameters.append(para_name)
true_gridable_types.append(gridable_types[count_index])
true_gridable_defaults.append(gridable_defaults[count_index])
if para_name not in hyper_params.keys(): # add default value to user defined parameter list
# gridable parameter not seen before. Randomly generate values for it
if ('int' in gridable_types[count_index]) or ('long' in gridable_types[count_index]):
# make sure integer values are not duplicated, using set action to remove duplicates
hyper_params[para_name] = list(set([random.randint(min_int_val, max_int_val) for p in
range(0, max_int_number)]))
elif ('double' in gridable_types[count_index]) or ('float' in gridable_types[count_index]):
hyper_params[para_name] = fix_float_precision(list(np.random.uniform(min_real_val, max_real_val,
max_real_number)), quantize_level=quantize_level)
count_index += 1
return hyper_params, true_gridable_parameters, true_gridable_types, true_gridable_defaults
def fix_float_precision(float_list, quantize_level='1.00000000'):
"""
This function takes in a floating point tuple and attempt to change it to floating point number with fixed
precision.
:param float_list: tuple/list of floating point numbers
:param quantize_level: string, optional, represent the number of fix points we care
:return: tuple of floats to the exact precision specified in quantize_level
"""
fixed_float = []
for num in float_list:
fixed_float.append(float(Decimal(num).quantize(Decimal(quantize_level))))
return list(set(fixed_float))
def extract_used_params_xval(a_grid_model, model_param_names, params_dict, algo="GBM"):
"""
This function performs similar functions to function extract_used_params. However, for max_runtime_secs,
we need to go into each cross-valudation model and grab the max_runtime_secs and add them up in order to
get the correct value. In addition, we put your algo model specific parameters into params_dict.
:param a_grid_model: list of models generated by gridsearch
:param model_param_names: hyper-parameter names that are specified for the gridsearch.
:param params_dict: dict containing name/value pairs specified to an algo.
:param algo: string, optional, denoting the algo we are looking at.
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
# need to extract the max_runtime_secs ONE cross-validation model or the base model
if a_grid_model._is_xvalidated:
xv_keys = a_grid_model._xval_keys
for id in xv_keys: # only need to get info from one model
each_xv_model = h2o.get_model(id) # get each model
params_used = extract_used_params(model_param_names, each_xv_model.params, params_dict, algo)
break
else:
params_used = extract_used_params(model_param_names, a_grid_model.params, params_dict, algo)
return params_used
def extract_used_params(model_param_names, grid_model_params, params_dict, algo="GLM"):
"""
This function is used to build a dict out of parameters used by our gridsearch to build a H2O model given
the dict structure that describes the parameters and their values used by gridsearch to build that
particular mode.
:param model_param_names: list contains parameter names that we are interested in extracting
:param grid_model_params: dict contains key as names of parameter and values as list of two values: default and
actual.
:param params_dict: dict containing extra parameters to add to params_used like family, e.g. 'gaussian',
'binomial', ...
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
grid_model_params_keys = grid_model_params.keys()
for each_parameter in model_param_names:
parameter_name = str(each_parameter)
if parameter_name in grid_model_params_keys:
params_used[parameter_name] = grid_model_params[each_parameter]['actual']
if params_dict:
for key, value in params_dict.items():
params_used[key] = value # add distribution family to parameters used list
# only for GLM, change lambda to Lambda
if algo =="GLM":
if 'lambda' in params_used.keys():
params_used['Lambda'] = params_used['lambda']
del params_used['lambda']
return params_used
def insert_error_grid_search(hyper_params, gridable_parameters, gridable_types, error_number):
"""
This function will randomly introduce errors into a copy of hyper_params. Depending on the random number
error_number generated, the following errors can be introduced:
error_number = 0: randomly alter the name of a hyper-parameter name;
error_number = 1: randomly choose a hyper-parameter and remove all elements in its list
error_number = 2: add randomly generated new hyper-parameter names with random list
error_number other: randomly choose a hyper-parameter and insert an illegal type into it
:param hyper_params: dict containing all legal hyper-parameters for our grid search
:param gridable_parameters: name of griddable parameters (some may not be griddable)
:param gridable_types: type of griddable parameters
:param error_number: integer representing which errors to introduce into the gridsearch hyper-parameters
:return: new dict with errors in either parameter names or parameter values
"""
error_hyper_params = copy.deepcopy(hyper_params)
# error_hyper_params = {k : v for k, v in hyper_params.items()}
param_index = random.randint(0, len(hyper_params)-1)
param_name = list(hyper_params)[param_index]
param_type = gridable_types[gridable_parameters.index(param_name)]
if error_number == 0: # grab a hyper-param randomly and copy its name twice
new_name = param_name+param_name
error_hyper_params[new_name] = error_hyper_params[param_name]
del error_hyper_params[param_name]
elif error_number == 1:
error_hyper_params[param_name] = []
elif error_number == 2:
new_param = generate_random_words(random.randint(20,100))
error_hyper_params[new_param] = error_hyper_params[param_name]
else:
error_hyper_params = insert_bad_value(error_hyper_params, param_name, param_type)
return error_hyper_params
def insert_bad_value(error_hyper_params, param_name, param_type):
"""
This function is written to insert a value that is of a different type into an array than the one
its other elements are for.
:param error_hyper_params: dict containing all hyper-parameters for a grid search
:param param_name: string denoting the hyper-parameter we want to | |
joint_attr in f['joints']:
create_joint(joint_attr.get('name'), joint_attr.get('x'), joint_attr.get('y'), active_truss)
for bar_attr in f['bars']:
create_bar(bar_attr.get('name'), *bar_attr.get('connected_joint_names'),
bar_attr.get('bar_params'), active_truss, bar_attr.get('var_name'))
for load_attr in f['loads']:
create_load(load_attr.get('name'), load_attr.get('joint_name'), load_attr.get('x'),
load_attr.get('y'), active_truss, load_attr.get('var_name'))
for supp_attr in f['supports']:
create_support(supp_attr['name'], supp_attr['joint_name'], supp_attr['support_type'],
supp_attr['roller_normal'], supp_attr['pin_rotation'], active_truss,
supp_attr['var_name'])
if show_if_results and (res := f['results']) is not None:
bar_names = active_truss.get_all_bars(str_names_only=True)
support_names = active_truss.get_all_supports(str_names_only=True)
truss_results = active_truss.Result(active_truss, sig_figs=3, solution_method=None,
_override_res=(
{bn: res['internal_forces'][bn] for bn in bar_names},
{sn: res['reaction_forces'][sn] for sn in support_names},
{bn: res['stresses'][bn] for bn in bar_names},
{bn: res['strains'][bn] for bn in bar_names},
{bn: res['buckling_ratios'][bn] for bn in bar_names}
)
)
print(truss_results)
plot_diagram(active_truss, truss_results, show_reactions=True,
_delete_truss_after=_delete_truss_after)
return get_active_truss() if set_as_active_truss else None
# HELPER AND UTILITY FUNCTIONS
def validate_var_name(var_name: str, allow_existing_vars: bool = True, raise_error: bool = True) -> bool:
"""
Checks if a var_name, which is used internally to instantiate the
subclass objects (Joint, Bars, Load, Support) is as valid as if it
were declared explicitly i.e. var_name = Class(...). They are set using
globals() where the key is var_name and the object reference is the value.
"""
import keyword
if var_name in globals() and not allow_existing_vars:
if raise_error:
raise NameError(f'A global variable {var_name} (with the value {globals()[var_name]}) is already '
f'in use, possibly because it is a builtin. \nIt cannot be used in the truss.')
else:
return False
elif not var_name.isidentifier() or keyword.iskeyword(var_name) or var_name.startswith('__'):
if raise_error:
raise NameError(f'{var_name} is not a valid variable name. \n'
'It can only contain alphanumerics and underscores \n'
'and cannot start with double underscore (__).')
else:
return False
else:
return True
def convert_to_valid_var_name(name: str, allow_existing_vars=True) -> str:
"""
Given a user-defined name, converts it to a similar looking valid variable name.
e.g. `convert_to_valid_var_name("My First Truss")` -> "my_first_truss"
If this already exists and `allow_existing_vars = False`, a number is appended to the name
to make it distinct, e.g. "my_first_truss_2", "my_first_truss_3", etc.
"""
import re
# remove trailing whitespace, convert to lowercase and replace spaces with underscores
new_name = name.strip().lower().replace(' ', '_')
# remove non-alphanumeric characters except underscores
pattern = re.compile(r'[\W]+', re.UNICODE)
new_name = pattern.sub('', new_name)
# add a number at the end of the name if it already exists and is needed
if not allow_existing_vars and new_name in globals().keys():
suffix = 2
while not validate_var_name(new_name + '_' + str(suffix)):
suffix += 1
# double-check the new name is valid
if validate_var_name(new_name):
return new_name
else:
raise SyntaxError(f'Unable to convert the name {name} to a suitable internal variable name'
f'(attempt was {new_name}). Please change to a simpler name and try again.')
def get_constants(cls: type) -> dict[str, Hashable]:
"""
Used to get a dict of constants {const_name: const_value}
from the utility classes.
"""
# get a list of the names of the constants
names = list(filter(
lambda a: not callable(getattr(cls(), a)) and not a.startswith('_') and a == a.upper(), dir(cls())))
# get a list of the values of these constants
vals = [getattr(cls(), a) for a in names]
# return in dict {'name': value} form
return dict(zip(names, vals))
def set_active_truss(var_name: str) -> None:
"""
Sets which truss is currently being built on.
"""
global active_truss
active_truss = globals()[var_name]
def get_active_truss() -> Optional[Truss]:
"""
Gets the truss which is currently being built on, or None if there is none.
NOTE: active_truss is a global var.
"""
return active_truss if has_active_truss() else None
def is_active_truss(var_name: str) -> bool:
"""
Determines whether the given truss variable name is being built on.
"""
return globals()[var_name] is active_truss
def has_active_truss() -> bool:
"""
Determines whether an active truss has been set yet, returning True or False.
"""
return 'active_truss' in globals().keys()
def set_matplotlib_fullscreen() -> None:
"""
Automatically set the matplotlib output to fullscreen.
"""
import os
from matplotlib import pyplot as plt
backend = str(plt.get_backend())
mgr = plt.get_current_fig_manager()
if backend == 'TkAgg':
if os.name == 'nt':
mgr.window.state('zoomed')
else:
mgr.resize(*mgr.window.maxsize())
elif backend == 'wxAgg':
mgr.frame.Maximize(True)
elif backend in ['Qt4Agg', 'Qt5Agg']:
mgr.window.showMaximized()
else:
raise RuntimeWarning(f'The backend in use, {backend}, is not supported in fullscreen mode.')
def find_free_space_around_joint(joint: Truss.Joint, results: Truss.Result = None,
truss: Optional[Truss] = None, show_reactions: bool = True,
as_degrees: bool = False) -> float:
"""
Helper function to find a place to label text around a joint. Finds a location
at a fixed small distance from the joint, such that the surrounding bars, loads
and supports/reaction arrows are as far away as possible.
"""
truss = active_truss if truss is None else truss
support = truss.get_support_by_joint(joint)
# find the angles occupied due to bars being there
used_angles = [bar.get_direction(origin_joint=joint)
for bar in truss.get_all_bars_connected_to_joint(joint)]
# find the angles occupied due to load arrows being there
used_angles += [load.direction for load in truss.get_all_loads_at_joint(joint)]
# find the angles occupied due to support icons and/or reaction arrows being there
# TODO: don't add if the reaction force is zero
if support is not None:
if show_reactions:
if support.support_type == 'roller':
used_angles.append(math.pi + support.reaction_direction)
used_angles.append(math.atan2(*reversed(results.reactions[support.name])))
else:
if support.support_type == 'pin':
used_angles.append(math.pi / 2 - support.pin_rotation)
# sort ascending from 0 to 360 (through 2 * pi)
used_angles = sorted([i % (2 * math.pi) for i in used_angles])
# find the angular sizes of the gaps
differences = [(used_angles[i] - used_angles[i - 1]) % (2 * math.pi) for i in range(len(used_angles))]
# determine at what angle is the most free
max_i = differences.index(max(differences))
most_free_angle = np.average([used_angles[max_i], used_angles[max_i - 1]])
if used_angles[max_i] < used_angles[max_i - 1]:
most_free_angle -= math.pi
return math.degrees(most_free_angle) if as_degrees else most_free_angle
def draw_support(x: float, y: float, size: float, support_type: str = 'pin', pin_rotation: float = 0,
roller_normal: np.array = None) -> None:
"""
Draw a particular type of support, using the standard conventional symbols, on
the matplotlib truss diagram. If roller is chosen, its direction is
shown by rotating the drawing. Optional pin rotation in clockwise degrees from vertical.
"""
# Helper function to rotate the drawing
if (pin_rotation != 0) ^ (roller_normal is not None): # either but not both: cannot be encastre
if support_type == 'roller':
a = math.pi / 2 - math.atan2(*reversed(roller_normal))
elif support_type == 'pin':
a = math.radians(pin_rotation)
else:
raise TypeError(f'''
'The combination of supplied information: support type ({support_type}), pin rotation angle'
'({pin_rotation}) and roller direction ({roller_normal}) is invalid.''')
# function for rotating a given coordinate tuple _p = (_x, _y) by a radians clockwise about (x, y)
rot = lambda _p: (x + (_p[0] - x) * math.cos(a) + (_p[1] - y) * math.sin(a), # noqa
y - (_p[0] - x) * math.sin(a) + (_p[1] - y) * math.cos(a))
if support_type == 'encastre':
# Encastre symbol: solid line and hashed lines representing ground
plt.plot((x - size / 2, x + size / 2), (y, y), # horizontal line
linewidth=1, color='black', zorder=0)
for x_pos in np.linspace(x - 0.3 * size, x + 0.5 * size, 5):
plt.plot((x_pos, x_pos - size / 5), (y, y - size / 5), # hashed lines
linewidth=1, color='black', zorder=0)
if (support_type == 'pin' and pin_rotation != 0) or support_type == 'roller':
# NOTE: element indices are
# 0: triangle top left, 1: triangle bottom left, 2: triangle bottom right, 3: triangle top right
# 4,5,6,7,8: ground top right diagonal points, 9,10,11,12,13: ground bottom left diagonal points
# 14: ground left point, 15: ground right point
_old_pts = [
(x - size / 20, y - math.sqrt(3) * size / 20),
(x - (1 / (3 * math.sqrt(3))) * size, y - size / 3),
(x + (1 / (3 * math.sqrt(3))) * size, y - size / 3),
(x + size / 20, y - math.sqrt(3) * size / 20)
] + [(x_pos, y - (size / 3 if support_type == 'pin' else 8 / 15 * size))
for x_pos, y_pos in zip(list(np.linspace(x - 0.3 * size, x + 0.5 * size, 5)), [y] * 5)
] + [(x_pos - size / 5, y - (8/15 * size if support_type == 'pin' else 11/15 * size)) # noqa \
for x_pos, y_pos in zip(list(np.linspace(x - 0.3 * | |
<gh_stars>1-10
'''
Credit:
This code is largely based off the work by __<NAME>__ `<https://alexis-jacq.github.io>`, and is the implementation of the paper: _Image Style Transfer Using Convolutional Neural Networks_ `<https://arxiv.org/avs/1508.06576>` developed by __<NAME>__, __<NAME>__, and __<NAME>__.
'''
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
import torchvision.transforms as transforms
import torchvision.models as models
import torchvision.utils as util
from torch.utils.data import DataLoader
import copy
import argparse
import os
import time
import logging
from logging.handlers import RotatingFileHandler
import sys
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#===================================#
# Define content dataset #
#===================================#
class ContentDataset(torch.utils.data.Dataset):
def __init__(self, root_dir, files=None, transform=None):
'''
Args:
root_dir (string): dir with all the images
files ([string], optional): array of all the images to use in root_dir
if not specified, use all images in root_dir
transform (callable, optional): Optional transform to be
applied on a sample
'''
self.root_dir = root_dir
self.files = [f for f in (os.listdir(root_dir) if files is None else files) \
if os.path.isfile(os.path.join(root_dir, f))]
self.transform = transform
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
img_name = self.files[idx]
img = Image.open(os.path.join(self.root_dir, img_name))
if self.transform:
img = self.transform(img)
return img, img_name
#===================================#
# Define classes for Style transfer #
#===================================#
class StyleLoss(nn.Module):
def __init__(self, target_feature):
super(StyleLoss, self).__init__()
self.target = self._gram_matrix(target_feature).detach()
def forward(self, input):
G = self._gram_matrix(input)
self.loss = F.mse_loss(G, self.target)
return input
def _gram_matrix(self, input):
a, b, c, d = input.size()
# a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a * b, c * d) # resise F_XL into \hat F_XL
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
class ContentLoss(nn.Module):
def __init__(self, target,):
super(ContentLoss, self).__init__()
# we 'detach' the target content from the tree used
# to dynamically compute the gradient: this is a stated value,
# not a variable. Otherwise the forward method of the criterion
# will throw an error.
self.target = target.detach()
def forward(self, input):
self.loss = F.mse_loss(input, self.target)
return input
# create a module to normalize input image so we can easily put it in a
# nn.Sequential
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
# .view the mean and std to make them [C x 1 x 1] so that they can
# directly work with image Tensor of shape [B x C x H x W].
# B is batch size. C is number of channels. H is height and W is width.
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
# normalize img
return (img - self.mean) / self.std
#===================================#
# Define Helper Functions #
#===================================#
# desired depth layers to compute style/content losses:
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img,
content_layers=['conv_4'],
style_layers=['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']):
cnn = copy.deepcopy(cnn)
# normalization module
normalization = Normalization(normalization_mean, normalization_std).to(device)
# just in order to have an iterable access to or list of content/syle
# losses
content_losses = []
style_losses = []
# assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
# to put in modules that are supposed to be activated sequentially
model = nn.Sequential(normalization)
i = 0 # increment every time we see a conv
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
# The in-place version doesn't play very nicely with the ContentLoss
# and StyleLoss we insert below. So we replace with out-of-place
# ones here.
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
# add content loss:
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_losses.append(style_loss)
# now we trim off the layers after the last content and style losses
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses
def get_input_optimizer(input_img):
# this line to show that input is a parameter that requires a gradient
optimizer = optim.LBFGS([input_img.requires_grad_()])
return optimizer
def run_style_transfer(cnn, normalization_mean, normalization_std,
content_img, style_img, input_img, num_steps=300,
style_weight=10000000, content_weight=1,
tmp_dir=False):
"""
Run the style transfer.
This function takes a tmp_dir parameter. If this parameter is specified,
then intermediate results (at every 50 steps) will be saved to the
location specified in the parameter. If the parameter is set to
False, then no intermediate results will be saved to disk.
"""
logger = logging.getLogger('root')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
normalization_mean, normalization_std, style_img, content_img)
optimizer = get_input_optimizer(input_img)
run = [0]
while run[0] <= num_steps:
def closure():
# correct the values of updated input image
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
logger.debug('run #{:_>4} - Style Loss : {:4f} Content Loss: {:4f}'.format(
run[0], style_score.item(), content_score.item()))
# save tmp folder
if tmp_dir:
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
util.save_image(input_img, '{0}/tmp_{1:0>4}.jpg'.format(tmp_dir, run[0]))
return style_score + content_score
optimizer.step(closure)
# a last correction...
input_img.data.clamp_(0, 1)
return input_img
#===================================#
# Main Process: Load content/style images & apply style transfer
#===================================#
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Style Transfer script for Pytorch')
parser.add_argument(
'--style-image',
dest='style_image',
help='The path of the style image',
default='images/style_images/sample_vangogh.jpg'
)
parser.add_argument(
'--content-image-dir',
dest='content_image_dir',
help='The path of the directory of the content images.',
default='./images/content_images'
)
parser.add_argument(
'--content-image-list',
dest='content_image_list',
help='A comma separated list of images to use in the content_image_dir',
default=None
)
parser.add_argument(
'--output-image-dir',
dest='output_image_dir',
help='The path where the output images would be stored.',
default='./images/output_images'
)
parser.add_argument(
'--style-weight',
dest='style_weight',
type=int,
help='The weight to use when optimizing the style loss.',
default=10**8
)
parser.add_argument(
'--content-weight',
dest='content_weight',
type=int,
help='The weight to use when optimizing the content loss.',
default=1
)
parser.add_argument(
'--num-steps',
dest='num_steps',
type=int,
help='The number of steps to use when optimizing the style transfer loss function.',
default=300
)
parser.add_argument(
'--image-size',
dest='image_size',
type=int,
help='The pixel dimension of the output image (W=H)'
)
parser.add_argument(
'--log-path',
dest='log_path',
help='The path of the log file to create.',
default='.'
)
parser.add_argument(
'--log-file',
dest='log_file',
help='The name of the file to log to.',
default=None
)
args = parser.parse_args()
style_image = args.style_image
content_image_dir = args.content_image_dir
content_image_list = args.content_image_list
output_image_dir = args.output_image_dir
style_weight = args.style_weight
content_weight = args.content_weight
num_steps = args.num_steps
image_size = args.image_size
log_path = args.log_path
log_file = args.log_file
# check that all the paths and image references are good
assert os.path.exists(style_image)
assert os.path.exists(content_image_dir)
assert os.path.exists(output_image_dir)
assert os.path.isdir(content_image_dir)
assert os.path.isdir(output_image_dir)
if content_image_list is not None:
for image_file in content_image_list.split(','):
assert os.path.exists(os.path.join(content_image_dir, image_file))
assert os.path.isdir(log_path)
# set up logger
handler_format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(handler_format)
file_handler = RotatingFileHandler(
os.path.join(
log_path,
'{}.log'.format(log_file) if log_file else 'style_transfer_script.log'
),
maxBytes=20000
)
file_handler.setFormatter(handler_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.propagate = False
# log script paramters
num_images = len(content_image_list.split(',')) \
if content_image_list is not None \
else len(os.listdir(content_image_dir))
logger.debug("Images to process: %i" % num_images)
# Setup image transformations
if not image_size:
image_size = 512 if torch.cuda.is_available() else 128 # use small size if no gpu
logger.debug("GPU detected: %s, image size: %s" % (str(torch.cuda.is_available()), image_size))
# setup loader
loader = transforms.Compose([
transforms.Resize(image_size), # scale imported image
transforms.CenterCrop(image_size), # crop on center
transforms.ToTensor()]) # transform it into a torch tensor
# Setup content image loader
content_img_set = ContentDataset(
root_dir=content_image_dir,
files=content_image_list.split(','),
transform=loader)
content_img_loader = DataLoader(content_img_set, batch_size=1, shuffle=False, num_workers=1)
# Load style image
t0 = time.time()
style_img = loader(Image.open(style_image)).unsqueeze(0).to(device, torch.float)
t1 = time.time()
style_img_time = t1 - t0
logger.debug("Time (in seconds) to load style image: %f" % style_img_time)
# load vgg19
t0 = time.time()
cnn = models.vgg19(pretrained=True).features.to(device).eval()
t1 = time.time()
load_cnn_time = t1 - t0
logger.debug("Time (in seconds) to load | |
0x2a, 0x45, 0x9c,
0x44, 0x1d, 0x21, 0x25, 0x01, 0x45, 0x62, 0x62,
0x47, 0x61, 0x2b, 0x14, 0x89, 0x51, 0x18, 0x1a,
0x85, 0xd5, 0x94, 0x21, 0x61, 0xd2, 0xd0, 0x42,
0xd2, 0x82, 0x41, 0x9c, 0x22, 0x28, 0x5a, 0x91,
0xa4, 0x4a, 0xa1, 0x51, 0x49, 0x02, 0xa8, 0x8a,
0x91, 0x98, 0x86, 0x98, 0x98, 0x2c, 0x93, 0x82,
0x5a, 0xe1, 0xb4, 0x9b, 0xc4, 0x47, 0x57, 0xa0,
0x26, 0x8f, 0x18, 0xf1, 0x21, 0x21, 0x46, 0x38,
0x21, 0x6d, 0x18, 0x7d, 0x18, 0x78, 0x8c, 0xa1,
0x24, 0x2c, 0xf4, 0x28, 0x48, 0x43, 0xf2, 0x42,
0x48, 0x4a, 0x51, 0xc6, 0xa1, 0x5e, 0x28, 0x4b,
0x21, 0x4e, 0x72, 0x4d, 0x18, 0x22, 0x44, 0x6f,
0x22, 0x54, 0xe4, 0xad, 0x18, 0x2b, 0x44, 0x81,
0x29, 0x02, 0x12, 0x22, 0x4a, 0xf2, 0x51, 0x21,
0x28, 0x2a, 0x01, 0x2d, 0x28, 0x17, 0x26, 0x8b,
0x21, 0xa3, 0x22, 0x82, 0xf1, 0x28, 0x48, 0x83,
0xec, 0x41, 0x24, 0x66, 0x41, 0x1a, 0xa1, 0x54,
0x5a, 0xa6, 0x22, 0x83, 0x01, 0xa0, 0x28, 0xaa,
0x28, 0x13, 0xf2, 0xf2, 0xcf, 0x10, 0xd4, 0x41,
0x16, 0xf4, 0x28, 0x44, 0x4c, 0xd4, 0x29, 0xa2,
0x62, 0x46, 0xa4, 0x62, 0x85, 0x15, 0x0a, 0x29,
0xf4, 0x14, 0x12, 0x6d, 0x68, 0x8f, 0x21, 0x37,
0x34, 0x6f, 0x42, 0xc5, 0x47, 0x63, 0x71, 0x31,
0x12, 0x68, 0x24, 0x56, 0xb4, 0x21, 0xd2, 0x24,
0x02, 0x6b, 0x64, 0x18, 0x3a, 0xa4, 0x22, 0x1e,
0x38, 0x26, 0x04, 0x12, 0x24, 0xad, 0x42, 0x26,
0xa2, 0x51, 0x24, 0x24, 0x81, 0x26, 0x84, 0xa2,
0x53, 0xaa, 0x8a, 0x82, 0x48, 0x21, 0xa2, 0x82,
0x27, 0x88, 0x49, 0x69, 0x28, 0x2f, 0xca, 0x28,
0xd1, 0x8e, 0xa2, 0x48, 0x96, 0xf8, 0x91, 0x78,
0x00, 0x43, 0x42, 0x58, 0x14, 0x40, 0x03, 0x22,
0x50, 0x14, 0x84, 0x22, 0x40, 0x04, 0x24, 0x84,
0x24, 0xc0, 0x26, 0x10, 0x08, 0x10, 0x43, 0x04,
0x00, 0x28, 0x00, 0x20, 0x04, 0x00, 0x10, 0x02,
0x26, 0x02, 0x80, 0x04, 0x00, 0x00, 0x12, 0x28,
0x16, 0x08, 0x12, 0x85, 0x02, 0x00, 0x83, 0xf4,
0xb6, 0x5b, 0x80, 0xf2, 0x11, 0x11, 0x9f, 0x32,
0xf2, 0x2e, 0x24, 0xcc, 0xfa, 0x3c, 0x1d, 0xaf,
0xc1, 0xf1, 0x64, 0x14, 0x18, 0x1d, 0x22, 0xb5,
0xb8, 0x28, 0x32, 0x42, 0x46, 0x88, 0xf4, 0x34,
0x54, 0xcb, 0x46, 0x8f, 0x62, 0xf5, 0x15, 0x31,
0x2b, 0x32, 0x2a, 0xf2, 0x24, 0x24, 0x41, 0x4f,
0x76, 0xd2, 0x37, 0xf2, 0x28, 0x2a, 0x6a, 0xf6,
0x64, 0x64, 0x4b, 0x47, 0x3a, 0x73, 0x38, 0xe8,
0x83, 0xf3, 0x14, 0x14, 0x3a, 0xa2, 0x11, 0x36,
0x72, 0x26, 0x46, 0x52, 0x77, 0x50, 0x6c, 0xa5,
0xf8, 0x14, 0x14, 0x1a, 0xd1, 0x88, 0xac, 0xa2,
0x18, 0x8a, 0x09, 0x8a, 0xcc, 0x12, 0x29, 0xf8,
0x14, 0x14, 0x19, 0xf1, 0x13, 0x3a, 0x8f, 0x85,
0xf5, 0x7c, 0x7c, 0x3a, 0xfd, 0x54, 0x44, 0x3f,
0xde, 0x03, 0x41, 0x5f, 0x91, 0xf5, 0x39, 0x13,
0xe5, 0xdd, 0xf5, 0xf2, 0x31, 0x1e, 0xdf, 0x85,
0xf5, 0x7c, 0x34, 0x6e, 0x72, 0xff, 0xe6, 0x56,
0xce, 0x8f, 0x26, 0xf2, 0x62, 0x24, 0x4f, 0xe4,
0xf1, 0x42, 0x42, 0xcf, 0x63, 0xf7, 0x2e, 0x7c,
0xef, 0x43, 0xf7, 0x57, 0x75, 0x4f, 0x65, 0xf5,
0x3a, 0x12, 0xef, 0x82, 0x52, 0x44, 0x5f, 0x77,
0xf3, 0x2c, 0x2f, 0xef, 0xca, 0xba, 0x22, 0xf2,
0x66, 0xe2, 0x6f, 0x66, 0xa4, 0x13, 0x8f, 0x83,
0xe1, 0x93, 0xf3, 0x74, 0x74, 0x5e, 0x11, 0x3e,
0x28, 0x36, 0xf2, 0x26, 0x4e, 0x27, 0x22, 0x77,
0x52, 0xaf, 0x82, 0xf2, 0x24, 0x24, 0xa5, 0xf8,
0x14, 0x14, 0x8f, 0x85, 0xd7, 0x88, 0xeb, 0x42,
0xaa, 0x17, 0x9a, 0x8b, 0xa1, 0xdd, 0xfe, 0xf2,
0xaf, 0x81, 0xfb, 0x84, 0x9c, 0x8e, 0x93, 0x2f,
0xa8, 0xfa, 0xca, 0x4a, 0xef, 0xce, 0xaf, 0xb3,
0xcf, 0xd5, 0xfd, 0x74, 0x87, 0x80, 0xf2, 0x35,
0x75, 0x9f, 0xf3, 0xf3, 0x2f, 0x25, 0x4f, 0x82,
0xf2, 0x35, 0x3d, 0xff, 0xc5, 0xf6, 0x68, 0x64,
0x45, 0xf4, 0x21, 0x22, 0xbf, 0xf2, 0xb2, 0x2a,
0xd2, 0x22, 0xf2, 0x42, 0x18, 0x4d, 0x44, 0x6f,
0x47, 0xf7, 0x7e, 0x7c, 0xef, 0x63, 0xf5, 0x17,
0x35, 0x2f, 0x23, 0xf1, 0x16, 0x1c, 0x6f, 0xc2,
0xe2, 0x44, 0xf4, 0x65, 0x76, 0xf5, 0xdd, 0xaa,
0xf8, 0x44, 0x44, 0x4f, 0x46, 0xe6, 0x65, 0xb6,
0x72, 0x77, 0x38, 0xf8, 0x38, 0x39, 0x4f, 0x43,
0xa1, 0x77, 0x1e, 0x18, 0x2e, 0x22, 0x6f, 0x62,
0xf6, 0x22, 0x22, 0x5f, 0x71, 0xf1, 0x3a, 0x28,
0xcf, 0x62, 0x72, 0x2a, 0xf8, 0x14, 0x14, 0x8f,
0x82, 0xd2, 0x88, 0xcc, 0x84, 0x5a, 0xa1, 0xb9,
0x18, 0xda, 0xed, 0x2e, 0xbf, 0x9a, 0xfb, 0x1c,
0x14, 0x8f, 0xa1, 0xf1, 0x12, 0x3b, 0x2b, 0x31,
0xef, 0xca, 0xfb, 0xb8, 0xf8, 0x4f, 0x44, 0xf5,
0xf7, 0x89, 0xf0, 0x24, 0x24, 0xcf, 0x53, 0xf3,
0x1d, 0x3f, 0xe5, 0xfd, 0xa5, 0x2f, 0xff, 0xa3,
0xf3, 0x5f, 0x4c, 0xcb, 0x77, 0x4f, 0x66, 0xf2,
0x67, 0x66, 0xaf, 0xd2, 0xf2, 0x68, 0x6a, 0x6f,
0x24, 0xf2, 0x46, 0x1e, 0xef, 0x64, 0xf4, 0x7c,
0x76, 0xef, 0xc6, 0xf7, 0x7a, 0x54, 0x7f, 0x17,
0xf3, 0x74, 0x76, 0xef, 0x43, 0xf3, 0x28, 0x2c,
0x45, 0xf4, 0x65, 0x35, 0xff, 0xe2, 0xd2, 0xee,
0xd8, 0x66, 0xf4, 0xa2, 0x66, 0x6f, 0x65, 0xa7,
0x77, 0x8f, 0x83, 0xf2, 0x38, 0x38, 0x4f, 0x47,
0xa7, 0x15, 0x2a, 0xe2, 0x22, 0xf2, 0x26, 0x26,
0x2b, 0x22, 0x77, 0x72, 0xaf, 0xa2, 0xf2, 0x26,
0x24, 0xaf, 0x82, 0x52, 0x44, 0x8f, 0x86, 0xf6,
0x48, 0x68, 0x2a, 0xa8, 0x17, 0x9a, 0x89, 0xa1,
0xdd, 0xfe, 0xf2, 0xaf, 0x89, 0xfb, 0x84, 0x9c,
0x8f, 0x81, 0xf9, 0x92, 0xba, 0x9a, 0xfb, 0xac,
0xbc, 0x8f, 0x8f, 0xff, 0xcc, 0xdc, 0xbf, 0xdb,
0x0d, 0x00, 0x2a, 0x01, 0x24, 0x84, 0x49, 0x61,
0x81, 0x22, 0x20, 0x02, 0x60, 0x81, 0x24, 0x1e,
0x48, 0x80, 0x44, 0x26, 0x11, 0x08, 0x46, 0x04,
0x4c, 0x12, 0x02, 0x21, 0x44, 0x29, 0x11, 0x28,
0x22, 0x04, 0xa0, 0x12, 0x00, 0x70, 0x22, 0x02,
0x00, 0x22, 0x12, 0x22, 0x1a, 0x04, 0x00, 0x82,
0x28, 0x00, 0x12, 0x28, 0x92, 0x00, 0xaf, 0xd3,
0x06, 0x00, 0x14, 0x40, 0xc1, 0x24, 0x14, 0x48,
0xc0, 0x2c, 0x25, 0x81, 0x54, 0x22, 0x41, 0x21,
0x22, 0x44, 0x90, 0x22, 0x43, 0x02, 0x00, 0x11,
0x50, 0x84, 0xa0, 0x42, 0x80, 0x04, 0x18, 0x20,
0x02, 0x00, 0x23, 0x22, 0x01, 0x84, 0x22, 0x30,
0x48, 0x00, 0x00, 0x00, 0x48, 0x81, 0x00, 0x84,
0x21, 0x00, 0x86, 0xf8, 0xd2, 0x98, 0xc0, 0x42,
0x1f, 0x41, 0x52, 0x28, 0x1f, 0x41, 0x72, 0x48,
0xf3, 0x11, 0x64, 0xa7, 0x14, 0x1d, 0x24, 0xab,
0x14, 0x1d, 0x24, 0xaf, 0x16, 0x99, 0x21, 0xab,
0x94, 0x51, 0xaf, 0x24, 0x39, 0x24, 0xaf, 0x34,
0x39, 0x24, 0x2f, 0x34, 0x79, 0x25, 0xd8, 0x1a,
0xfb, 0x24, 0x48, 0x1c, 0xf9, 0x25, 0x4a, 0x1c,
0xf8, 0x26, 0x4a, 0x87, 0x11, 0x4f, 0xa2, 0x64,
0x19, 0x2e, 0x4a, 0x96, 0xc5, 0x4a, 0xde, 0x24,
0x4d, 0x4a, 0x1f, 0x49, 0xc2, 0x43, 0x1f, 0x49,
0x52, 0x38, 0x1f, 0xc9, 0x72, 0x6a, 0xf2, 0x91,
0x24, 0xa3, 0xf4, 0x81, 0x24, 0xeb, 0x14, 0x1d,
0x64, 0xab, 0x94, 0x19, 0xb2, 0x4a, 0x19, 0xf5,
0x4a, 0x91, 0x43, 0xb2, 0x4a, 0x7b, 0x24, 0xf8,
0x42, 0x92, 0x47, 0x82, 0x2d, 0x94, 0x4f, 0x82,
0x84, 0xf9, 0x24, 0x4a, 0x17, 0x68, 0xe0, 0x22,
0xf4, 0x11, 0x68, 0xb0, 0x11, 0x36, 0x48, 0x1f,
0x41, 0x12, 0xd8, 0x41, 0x92, 0x1a, 0x19, 0xf2,
0x42, 0x94, 0x11, 0xad, 0xb4, 0x11, 0xab, 0xb4,
0x43, 0xe2, 0x14, 0x39, 0x24, 0x2d, 0x81, 0x26,
0xd8, 0x12, 0xbb, 0x26, 0xc4, 0xb1, 0x6f, 0xa2,
0x85, 0xc8, 0xda, 0x1e, 0x81, 0x4f, 0xa2, 0x64,
0x19, 0x2e, 0x4a, 0x96, 0xc5, 0x4b, 0x8e, 0x24,
0xbc, 0xf4, 0x91, 0x2c, 0x2c, 0xf4, 0x91, 0x68,
0x85, 0xf2, 0x91, 0x64, 0x83, 0x74, 0x91, 0x34,
0x48, 0x1b, 0x29, 0xa9, 0x91, 0x21, 0xaf, 0x44,
0x91, 0x21, 0xaf, 0x44, 0x1b, 0xb5, 0x4a, 0x1a,
0x24, 0x24, 0x32, 0x42, 0x22, 0x29, 0x31, 0x22,
0x18, 0x2f, 0x22, 0xf4, 0xce, 0x1c, 0xc0, 0x48,
0x82, 0x00, 0x00, 0x00, 0x00, 0x40, 0x04, 0x80,
0x01, 0x00, 0x00, 0x00, 0x80, 0x82, 0x01, 0x88,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x11, 0x00, 0x80, 0x01, 0x00, 0x00, 0x00,
0x14, 0xd0, 0x4e, 0x01, 0x1e, 0x42, 0x4c, 0x03,
0x10, 0x13, 0x19, 0xca, 0x24, 0x2d, 0x11, 0x4a,
0x22, 0x22, 0x41, 0x02, 0x00, 0x36, 0x84, 0x92,
0x22, 0x84, 0x29, 0xf4, 0x28, 0x14, 0x52, 0x00,
0x21, 0x10, 0x08, 0x21, 0xb0, 0x24, 0x11, 0xa2,
0x12, 0x2b, 0x42, 0x45, 0xc2, 0x31, 0x22, 0x2c,
0x41, 0x02, 0x31, 0x40, 0x88, 0x84, 0x25, 0x81,
0xc4, 0x11, 0x30, 0x28, 0x84, 0x40, 0x82, 0x08,
0xa0, 0x28, 0xd0, 0x4b, 0x0b, 0x49, 0x32, 0x81,
0x99, 0x72, 0x11, 0x04, 0x42, 0x10, 0x82, 0x92,
0x14, 0x48, 0x27, 0x12, 0x48, 0x49, 0x11, 0x4a,
0x94, 0x43, 0x85, 0x64, 0x81, 0x27, 0x42, 0x51,
0x00, 0xcc, 0x12, 0x12, 0x28, 0x42, 0x02, 0x84,
0x23, 0x25, 0x04, 0x29, 0x04, | |
<reponame>Kaushal28/FOTS-PyTorch
import warnings
import numpy as np
import cv2
import torch
from scipy.spatial import ConvexHull
from shapely.geometry import Polygon
# Reference: https://bitbucket.org/william_rusnack/minimumboundingbox/src/master/
# This helps to generate the rotated rectangle with minimum area that covers the
# quadrangle bbox ground. It uses convex hull under the hoods to solve this problem.
# The entire concept is well explained here: https://stackoverflow.com/q/13542855/5353128
def minimum_bounding_rectangle(points):
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
:param points: an n * 2 matrix of coordinates
:rval: an n * 2 matrix of coordinates
"""
from scipy.ndimage.interpolation import rotate
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = hull_points[1:] - hull_points[:-1]
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
# XXX both work
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
return rval
def icdar_collate(batch):
"""
Collate function for ICDAR dataset. It receives a batch of ground truths
and formats it in required format.
"""
image_paths, img, boxes, training_mask, transcripts, score_map, geo_map = zip(*batch)
batch_size = len(score_map)
images, score_maps, geo_maps, training_masks = [], [], [], []
# convert all numpy arrays to tensors
for idx in range(batch_size):
if img[idx] is not None:
images.append(torch.from_numpy(img[idx]).permute(2, 0, 1))
score_maps.append(torch.from_numpy(score_map[idx]).permute(2, 0, 1))
geo_maps.append(torch.from_numpy(geo_map[idx]).permute(2, 0, 1))
training_masks.append(torch.from_numpy(training_mask[idx]).permute(2, 0, 1))
images = torch.stack(images, 0)
score_maps = torch.stack(score_maps, 0)
geo_maps = torch.stack(geo_maps, 0)
training_masks = torch.stack(training_masks, 0)
texts, bboxs, mapping = [], [], []
for idx, (text, bbox) in enumerate(zip(transcripts, boxes)):
for txt, box in zip(text, bbox):
mapping.append(idx)
texts.append(txt)
bboxs.append(box)
mapping = np.array(mapping)
texts = np.array(texts)
bboxs = np.stack(bboxs, axis=0)
bboxs = np.concatenate([bboxs, np.ones((len(bboxs), 1))], axis = 1).astype(np.float32)
return image_paths, images, bboxs, training_masks, texts, score_maps, geo_maps, mapping
def synth800k_collate(batch):
"""
Collate function for ICDAR dataset. It receives a batch of ground truths
and formats it in required format.
"""
image_paths, img, boxes, training_mask, transcripts, score_map, geo_map = zip(*batch)
batch_size = len(score_map)
images, score_maps, geo_maps, training_masks = [], [], [], []
# convert all numpy arrays to tensors
for idx in range(batch_size):
if img[idx] is not None:
images.append(torch.from_numpy(img[idx]).permute(2, 0, 1))
score_maps.append(torch.from_numpy(score_map[idx]).permute(2, 0, 1))
geo_maps.append(torch.from_numpy(geo_map[idx]).permute(2, 0, 1))
training_masks.append(torch.from_numpy(training_mask[idx]).permute(2, 0, 1))
images = torch.stack(images, 0)
score_maps = torch.stack(score_maps, 0)
geo_maps = torch.stack(geo_maps, 0)
training_masks = torch.stack(training_masks, 0)
return image_paths, images, score_maps, geo_maps, training_masks
def l2_norm(p1, p2=np.array([0, 0])):
"""
Calculates the L2 norm (euclidean distance) between given two points.
point (pi) should be in format (x, y)
"""
return np.linalg.norm(p1 - p2)
def shrink_bbox(bbox, reference_lens, shrink_ratio=0.3):
"""
Shrink the given bbox by given ratio.
It first shrinks the two longer edges of a quadrangle, and then the
two shorter ones. For each pair of two opposing edges, it determines
the “longer” pair by comparing the mean of their lengths.
For each edge (pi, p(i mod 4)+1),
it shrinks it by moving its two endpoints inward along the
edge by shrink_ratio*reference_lens[i] and
shrink_ratio*reference_lens[(i mod 4)+1] respectively.
bbox shape: (4, 2) (clock wise from top left).
"""
reference_lens = shrink_ratio * reference_lens
# First find the "longer" edge pair
if (
# top horizontal edge + bottom horizontal edge
l2_norm(bbox[0] - bbox[1]) + l2_norm(bbox[2] - bbox[3]) >
# left vertical edge + right vertical edge
l2_norm(bbox[0] - bbox[3]) + l2_norm(bbox[1] - bbox[2])
):
# This means pair of horizontal edge is "longer" than pair
# of vertical edges. So first shrink (p0, p1) and (p2, p3)
# then shrink (p1, p2) and (p3, p0)
# angle of edge between p0 and p1. Which is tan-1((y2-y1)/(x2-x1))
theta = np.arctan2((bbox[1][1] - bbox[0][1]), (bbox[1][0] - bbox[0][0]))
bbox[0][0] += reference_lens[0] * np.cos(theta)
bbox[0][1] += reference_lens[0] * np.sin(theta)
bbox[1][0] -= reference_lens[1] * np.cos(theta)
bbox[1][1] -= reference_lens[1] * np.sin(theta)
# shrink p2 and p3
theta = np.arctan2((bbox[2][1] - bbox[3][1]), (bbox[2][0] - bbox[3][0]))
bbox[2][0] -= reference_lens[2] * np.cos(theta)
bbox[2][1] -= reference_lens[2] * np.sin(theta)
bbox[3][0] += reference_lens[3] * np.cos(theta)
bbox[3][1] += reference_lens[3] * np.sin(theta)
# Then shrink p0 and p3
theta = np.arctan2((bbox[3][0] - bbox[0][0]), (bbox[3][1] - bbox[0][1]))
bbox[0][0] += reference_lens[0] * np.sin(theta)
bbox[0][1] += reference_lens[0] * np.cos(theta)
bbox[3][0] -= reference_lens[3] * np.sin(theta)
bbox[3][1] -= reference_lens[3] * np.cos(theta)
# shrink p1 and p2
theta = np.arctan2((bbox[2][0] - bbox[1][0]), (bbox[2][1] - bbox[1][1]))
bbox[1][0] += reference_lens[1] * np.sin(theta)
bbox[1][1] += reference_lens[1] * np.cos(theta)
bbox[2][0] -= reference_lens[2] * np.sin(theta)
bbox[2][1] -= reference_lens[2] * np.cos(theta)
else:
# This means pair of vertical edge is "longer" than pair
# of horizontal edges. So first shrink (p1, p2) and (p3, p0)
# then shrink (p0, p1) and (p2, p3)
theta = np.arctan2((bbox[3][0] - bbox[0][0]), (bbox[3][1] - bbox[0][1]))
bbox[0][0] += reference_lens[0] * np.sin(theta)
bbox[0][1] += reference_lens[0] * np.cos(theta)
bbox[3][0] -= reference_lens[3] * np.sin(theta)
bbox[3][1] -= reference_lens[3] * np.cos(theta)
# shrink p1, p2
theta = np.arctan2((bbox[2][0] - bbox[1][0]), (bbox[2][1] - bbox[1][1]))
bbox[1][0] += reference_lens[1] * np.sin(theta)
bbox[1][1] += reference_lens[1] * np.cos(theta)
bbox[2][0] -= reference_lens[2] * np.sin(theta)
bbox[2][1] -= reference_lens[2] * np.cos(theta)
# shrink p0, p1
theta = np.arctan2((bbox[1][1] - bbox[0][1]), (bbox[1][0] - bbox[0][0]))
bbox[0][0] += reference_lens[0] * np.cos(theta)
bbox[0][1] += reference_lens[0] * np.sin(theta)
bbox[1][0] -= reference_lens[1] * np.cos(theta)
bbox[1][1] -= reference_lens[1] * np.sin(theta)
# shrink p2, p3
theta = np.arctan2((bbox[2][1] - bbox[3][1]), (bbox[2][0] - bbox[3][0]))
bbox[3][0] += reference_lens[3] * np.cos(theta)
bbox[3][1] += reference_lens[3] * np.sin(theta)
bbox[2][0] -= reference_lens[2] * np.cos(theta)
bbox[2][1] -= reference_lens[2] * np.sin(theta)
return bbox
def _point_to_line_dist(p1, p2, p3):
"""
Find perpendicular distance from point p3 to line passing through
p1 and p2.
Reference: https://stackoverflow.com/a/39840218/5353128
"""
return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1)
def _align_vertices(bbox):
"""
Align (sort) the vertices of the given bbox (rectangle) in such a way
that the base of the rectangle forms minimum angle with horizontal axis.
This is required because a single rectangle can be written in many
ways (just by rotating the vertices in the list notation) such that the
base of the rectangle will get changed in different notations and will form
the angle which is multiple of original minimum angle.
Reference: EAST implementation for ICDAR-2015 dataset:
https://github.com/argman/EAST/blob/dca414de39a3a4915a019c9a02c1832a31cdd0ca/icdar.py#L352
"""
p_lowest = np.argmax(bbox[:, 1])
if np.count_nonzero(bbox[:, 1] == bbox[p_lowest, 1]) == 2:
# This means there are two points in the horizantal axis (because two lowest points).
# That means 0 angle.
# The bottom edge is parallel to the X-axis, then p0 is the upper left corner.
p0_index = np.argmin(np.sum(bbox, axis=1))
p1_index = (p0_index + 1) % 4
p2_index = (p0_index + 2) % 4
p3_index = (p0_index + 3) % 4
return bbox[[p0_index, p1_index, p2_index, p3_index]], 0.0
else:
# Find the point to the right of the lowest point.
p_lowest_right = (p_lowest - 1) % 4
angle = np.arctan(
-(bbox[p_lowest][1] - bbox[p_lowest_right][1]) / (bbox[p_lowest][0] - bbox[p_lowest_right][0])
)
if angle / np.pi * 180 > 45:
# Lowest point is p2
p2_index = p_lowest
p1_index = (p2_index - 1) % 4
p0_index = (p2_index - 2) % 4
p3_index = (p2_index + 1) % 4
return bbox[[p0_index, p1_index, p2_index, p3_index]], -(np.pi/2 - angle)
else:
# Lowest point is p3
p3_index = p_lowest
p0_index = (p3_index + 1) % 4
p1_index = (p3_index + 2) % 4
p2_index = (p3_index + 3) % 4
return bbox[[p0_index, p1_index, p2_index, p3_index]], angle
def generate_rbbox(image, bboxes, transcripts):
"""
Generate RBOX (Rotated bbox) as per | |
import logging
from tool_shed.base.twilltestcase import common, ShedTwillTestCase
log = logging.getLogger( __name__ )
category_name = 'Test 0460 Automatic repository revision completion'
category_description = 'Test 0460 Automatic repository revision completion'
datatypes_repository_name = 'emboss_datatypes_0460'
datatypes_repository_description = "Galaxy applicable data formats used by Emboss tools."
datatypes_repository_long_description = "Galaxy applicable data formats used by Emboss tools. This repository contains no tools."
bwa_repository_name = 'package_bwa_0_5_9_0460'
bwa_repository_description = "Contains a tool dependency definition that downloads and compiles version 0.5.9 of the BWA package"
bwa_repository_long_description = "bwa (alignment via Burrows-Wheeler transformation) 0.5.9-r16 by <NAME>"
'''
For all steps, verify that the generated dependency points to the tip of the specified repository.
1) Create and populate emboss_datatypes_0460.
2) Create and populate package_bwa_0_5_9_0460
3) Create complex_dependency_test_1_0460, complex_dependency_test_2_0460, complex_dependency_test_3_0460,
complex_dependency_test_4_0460, complex_dependency_test_5_0460.
4) Upload an uncompressed tool_dependencies.xml to complex_dependency_test_1_0460 that specifies a complex
repository dependency on package_bwa_0_5_9_0460 without a specified changeset revision or tool shed url.
5) Upload a tarball to complex_dependency_test_2_0460 with a tool_dependencies.xml in the root of the tarball.
6) Upload a tarball to complex_dependency_test_3_0460 with a tool_dependencies.xml in a subfolder within the tarball.
7) Create hg_tool_dependency_0460 and hg_subfolder_tool_dependency_0460 and populate with tool dependencies.
8) Upload to complex_dependency_test_4_0460 using the url hg://<tool shed url>/repos/user1/hg_tool_dependency_0460.
9) Upload to complex_dependency_test_5_0460 using the url hg://<tool shed url>/repos/user1/hg_subfolder_tool_dependency_0460.
10) Create repository_dependency_test_1_0460, repository_dependency_test_2_0460, repository_dependency_test_3_0460,
repository_dependency_test_4_0460, repository_dependency_test_4_0460.
11) Upload an uncompressed repository_dependencies.xml to repository_dependency_test_1_0460 that specifies a
repository dependency on emboss_datatypes_0460 without a specified changeset revision or tool shed url.
12) Upload a tarball to repository_dependency_test_1_0460 with a repository_dependencies.xml in the root of the tarball.
13) Upload a tarball to repository_dependency_test_1_0460 with a repository_dependencies.xml in a subfolder within the tarball.
14) Create hg_repository_dependency_0460 and populate with repository_dependencies.xml.
15) Upload to repository_dependency_test_4_0460 using the url hg://<tool shed url>/repos/user1/hg_repository_dependency_0460.
16) Upload to repository_dependency_test_5_0460 using the url hg://<tool shed url>/repos/user1/hg_repository_dependency_0460.
'''
class TestAutomaticDependencyRevision( ShedTwillTestCase ):
'''Test defining repository dependencies without specifying the changeset revision.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.test_user_2_email, username=common.test_user_2_name )
test_user_2 = self.test_db_util.get_user( common.test_user_2_email )
assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
self.test_db_util.get_private_role( test_user_2 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
self.test_db_util.get_private_role( admin_user )
def test_0005_create_datatypes_repository( self ):
'''Create and populate the emboss_datatypes_0460 repository'''
'''
This is step 1 - Create and populate emboss_datatypes_0460.
'''
self.create_category( name=category_name, description=category_description )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=datatypes_repository_name,
description=datatypes_repository_description,
long_description=datatypes_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='emboss/datatypes/datatypes_conf.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate emboss_datatypes_0460 with datatype definitions.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_create_bwa_package_repository( self ):
'''Create and populate the package_bwa_0_5_9_0460 repository.'''
'''
This is step 2 - Create and populate package_bwa_0_5_9_0460.
'''
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=bwa_repository_name,
description=bwa_repository_description,
long_description=bwa_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='bwa/complex/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_bwa_0_5_9_0460 with a tool dependency definition.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0015_create_tool_dependency_repositories( self ):
'''Create repositories for testing complex dependency generation.'''
'''
This is step 3 - Create complex_dependency_test_1_0460, complex_dependency_test_2_0460, complex_dependency_test_3_0460,
complex_dependency_test_4_0460, complex_dependency_test_5_0460. Each of these repositories will be populated in a way
that tests a different way to achieve the same resulting dependency structure using complex tool dependencies.
The different methods being tested are:
- Upload an uncompressed tool_dependencies.xml to the root of the repository.
- Upload a tool_dependencies.xml in a tarball, not in a subfolder.
- Upload a tool_dependencies.xml in a subfolder within a tarball.
- Upload via url, with the tool_dependencies.xml in the root of another repository.
- Upload via url, with the tool_dependencies.xml in a subfolder within another repository.
'''
category = self.test_db_util.get_category_by_name( category_name )
repository_base_name = 'complex_dependency_test_%d_0460'
repository_base_description = 'Test #%d for complex repository dependency definitions.'
repository_base_long_description = 'Test #%d for complex repository dependency definitions.'
for number in range( 1, 6 ):
self.get_or_create_repository( name=repository_base_name % number,
description=repository_base_description % number,
long_description=repository_base_long_description % number,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
def test_0020_populate_complex_dependency_test_1_0460( self ):
'''Populate complex_dependency_test_1_0460.'''
'''
This is step 4 - Upload an uncompressed tool_dependencies.xml to complex_dependency_test_1_0460 that specifies
a complex repository dependency on package_bwa_0_5_9_0460 without a specified changeset revision or tool shed url.
'''
repository = self.test_db_util.get_repository_by_name_and_owner( 'complex_dependency_test_1_0460', common.test_user_1_name )
package_repository = self.test_db_util.get_repository_by_name_and_owner( 'package_bwa_0_5_9_0460', common.test_user_1_name )
self.upload_file( repository,
filename='0460_files/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded complex repository dependency definition.',
strings_displayed=[],
strings_not_displayed=[] )
changeset_revision = self.get_repository_tip( package_repository )
strings_displayed = [ 'package_bwa_0_5_9_0460', 'bwa', '0.5.9', 'package', changeset_revision ]
self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
self.display_repository_file_contents( repository, filename='tool_dependencies.xml', strings_displayed=[ changeset_revision ] )
def test_0025_populate_complex_dependency_test_2_0460( self ):
'''Populate complex_dependency_test_2_0460.'''
'''
This is step 5 - Upload an tarball with tool_dependencies.xml to complex_dependency_test_2_0460 that specifies
a complex repository dependency on package_bwa_0_5_9_0460 without a specified changeset revision or tool shed url.
'''
repository = self.test_db_util.get_repository_by_name_and_owner( 'complex_dependency_test_2_0460', common.test_user_1_name )
package_repository = self.test_db_util.get_repository_by_name_and_owner( 'package_bwa_0_5_9_0460', common.test_user_1_name )
self.upload_file( repository,
filename='0460_files/tool_dependencies_in_root.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=True,
commit_message='Uploaded complex repository dependency definition.',
strings_displayed=[],
strings_not_displayed=[] )
changeset_revision = self.get_repository_tip( package_repository )
strings_displayed = [ 'package_bwa_0_5_9_0460', 'bwa', '0.5.9', 'package', changeset_revision ]
self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
self.display_repository_file_contents( repository, filename='tool_dependencies.xml', strings_displayed=[ changeset_revision ] )
def test_0030_populate_complex_dependency_test_3_0460( self ):
'''Populate complex_dependency_test_3_0460.'''
'''
This is step 6 - Upload an tarball with tool_dependencies.xml in a subfolder to complex_dependency_test_3_0460 that
specifies a complex repository dependency on package_bwa_0_5_9_0460 without a specified changeset revision or tool shed url.
'''
repository = self.test_db_util.get_repository_by_name_and_owner( 'complex_dependency_test_3_0460', common.test_user_1_name )
package_repository = self.test_db_util.get_repository_by_name_and_owner( 'package_bwa_0_5_9_0460', common.test_user_1_name )
self.upload_file( repository,
filename='0460_files/tool_dependencies_in_subfolder.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=True,
commit_message='Uploaded complex repository dependency definition.',
strings_displayed=[],
strings_not_displayed=[] )
changeset_revision = self.get_repository_tip( package_repository )
strings_displayed = [ 'package_bwa_0_5_9_0460', 'bwa', '0.5.9', 'package', changeset_revision ]
self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
self.display_repository_file_contents( repository,
filename='tool_dependencies.xml',
filepath='subfolder',
strings_displayed=[ changeset_revision ] )
def test_0035_create_repositories_for_url_upload( self ):
'''Create and populate hg_tool_dependency_0460 and hg_subfolder_tool_dependency_0460.'''
'''
This is step 7 - Create hg_tool_dependency_0460 and hg_subfolder_tool_dependency_0460 and populate with tool dependencies.
'''
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name='hg_tool_dependency_0460',
description=bwa_repository_description,
long_description=bwa_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='0460_files/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate hg_tool_dependency_0460 with a tool dependency definition.',
strings_displayed=[],
strings_not_displayed=[] )
repository = self.get_or_create_repository( name='hg_subfolder_tool_dependency_0460',
description=bwa_repository_description,
long_description=bwa_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='0460_files/tool_dependencies_in_subfolder.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate hg_subfolder_tool_dependency_0460 with a tool dependency definition.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0040_url_upload_to_complex_test( self ):
'''Populate complex_dependency_test_4_0460.'''
'''
This is step 8 - Upload to complex_dependency_test_4_0460 using the url hg://<tool shed url>/repos/user1/hg_tool_dependency_0460.
'''
url = 'hg://%s:%s/repos/user1/hg_tool_dependency_0460' % ( self.host, self.port )
repository = self.test_db_util.get_repository_by_name_and_owner( 'complex_dependency_test_4_0460', common.test_user_1_name )
package_repository = self.test_db_util.get_repository_by_name_and_owner( 'package_bwa_0_5_9_0460', common.test_user_1_name )
self.upload_url( repository,
url=url,
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=True,
commit_message='Uploaded complex repository dependency definition.',
strings_displayed=[],
strings_not_displayed=[] )
changeset_revision = self.get_repository_tip( package_repository )
strings_displayed = [ 'package_bwa_0_5_9_0460', 'bwa', '0.5.9', 'package', changeset_revision ]
self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
self.display_repository_file_contents( repository,
filename='tool_dependencies.xml',
strings_displayed=[ changeset_revision ] )
def test_0045_url_upload_to_complex_test( self ):
'''Populate complex_dependency_test_4_0460.'''
'''
This is step 9 - Upload to complex_dependency_test_5_0460 using the url hg://<tool shed url>/repos/user1/hg_subfolder_tool_dependency_0460.
'''
url = 'hg://%s:%s/repos/user1/hg_subfolder_tool_dependency_0460' % ( self.host, self.port )
repository = self.test_db_util.get_repository_by_name_and_owner( 'complex_dependency_test_5_0460', common.test_user_1_name )
package_repository = self.test_db_util.get_repository_by_name_and_owner( 'package_bwa_0_5_9_0460', common.test_user_1_name )
self.upload_url( repository,
url=url,
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=True,
commit_message='Uploaded complex repository dependency definition.',
strings_displayed=[],
strings_not_displayed=[] )
changeset_revision = self.get_repository_tip( package_repository )
strings_displayed = [ 'package_bwa_0_5_9_0460', 'bwa', '0.5.9', 'package', changeset_revision ]
self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
self.display_repository_file_contents( repository,
filename='tool_dependencies.xml',
filepath='subfolder',
strings_displayed=[ changeset_revision ] )
def test_0050_create_repositories_for_simple_dependencies( self ):
'''Create repositories for testing simple dependency generation.'''
'''
This is step 10 - Create repository_dependency_test_1_0460, repository_dependency_test_2_0460, repository_dependency_test_3_0460,
repository_dependency_test_4_0460, repository_dependency_test_4_0460.. Each of these repositories will be populated in a way
that tests a different way to achieve the same resulting dependency structure using complex tool dependencies.
The different methods being tested are:
- Upload an uncompressed repository_dependencies.xml to the root of the repository.
- Upload a repository_dependencies.xml in a tarball, not in a subfolder.
- Upload a repository_dependencies.xml in a subfolder within a tarball.
- Upload via url, with the repository_dependencies.xml in the root of another repository.
| |
<filename>fangSpider/spiders/fangIndex.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from fangSpider.items import FangspiderLoupanItem
from fangSpider.items import NewhouseIndexItem
import time
import sys
import re
from bs4 import BeautifulSoup
from fangSpider.items import NewhouseDetailItem
from fangSpider.items import NewhouseKaipanDetail
from fangSpider.items import NewhouseKaipanPostDetail
from fangSpider.items import NewhouseDeliveryTimeDetailIndex
from fangSpider import mylogger
pageCouner = 0
loupan_index = []
# 网站域名 为避免争议 这里留空 请自行填入 “房” 的拼音
host = 'fang'
site = host+'.com'
#是否只抓取一个城市
isSoloCityOnly = True
# 从相应网站获得 城市的代码 比如郑州是 zz
citycode = 'zz'
start_url = citycode + '.' + site
# 获得logger
ml = mylogger.myLogger()
logger = ml.getLogger()
if isSoloCityOnly:
startList = ['https://'+start_url]
ruleList =(
Rule(LinkExtractor(allow=r'http://'+citycode+'\.'+host+'\.com/$'), callback='dircet_to_family', follow=True),
)
else:
startList = ['https://www.'+site+'/SoufunFamily.htm']
ruleList = (
Rule(LinkExtractor(allow=r'http://.+\.'+host+'\.com/$'), callback='dircet_to_family', follow=False),
)
class FangindexSpider(CrawlSpider):
name = 'fangIndex'
allowed_domains = [site]
start_urls = startList
rules = ruleList
def parse_item(self, response):
item = {}
return item
def dircet_to_family(self, response):
"""处理城市首页的跳转
"""
# 提取链接
newhouse_family = None
esf_family = None
zu_family = None
newhouse_family_list = response.xpath('//a[contains(text(),"新房")]/@href').re('.*new.*')
if len(newhouse_family_list)>0:
newhouse_family = newhouse_family_list[0]
esf_family_list = response.xpath('//a[contains(text(),"二手房")]/@href').re('.*esf.*')
if len(esf_family_list)>0:
esf_family = esf_family_list[0]
zu_family_list = response.xpath('//a[contains(text(),"找租房")]/@href').re('.*zu.*')
if len(zu_family_list)>0:
zu_family = zu_family_list[0]
# 若链接不为空 则将页面交给对应 方法抓取
if newhouse_family is None:
pass
else:
yield scrapy.Request(newhouse_family, callback=self.parse_family)
# ☑️todo 二手房
if esf_family is None:
pass
else:
pass
# ☑️todo 租房信息
if zu_family is None:
pass
else:
pass
pass
def parse_family(self, response):
"""处理新房首页的跳转和抓取
"""
ml.l3(response.url)
global pageCouner
loupans = response.xpath('//div[@id="newhouse_loupai_list"]/ul//li')
# city = response.xpath('//div[@class="s4Box"]/a/text()').get()
loupans = loupans.xpath('.//div[@class="nlcd_name"]/a/@href').getall()
# urlList = []
# for i in loupans:
# name = i.xpath('.//div[@class="nlcd_name"]/a/text()').get()
# if name is None:
# continue
# name = name.replace('\t','').strip()
# url = i.xpath('.//div[@class="nlcd_name"]/a/@href').get()
# url = response.urljoin(url)
# huxin_ = i.xpath('.//div[@class="house_type clearfix"]//text()').getall()
# huxin = "".join(huxin_).strip().replace('\t','').replace('/','').replace('\n','').replace('-','')
# area_ = i.xpath('.//div[@class="house_type clearfix"]//text()').getall()
# area = "".join(area_).replace('\t','').strip().replace('\n','')
# address = i.xpath('.//div[@class="address"]/a/@title').get()
# tag_ = i.xpath('.//div[@class="fangyuan"]//text()').getall()
# tag = " ".join(tag_).split()
# phone_plat_ = i.xpath('.//div[@class="tel"]/p/text()').getall()
# phone_plat = "".join(phone_plat_).strip()
# unit_price_ = i.xpath('.//div[@class="nhouse_price"]//text()').getall()
# unit_price = "".join(unit_price_).strip()
# kanes = i.xpath('.//div[@class="kanesf"]/text()').get()
# urlList.append(url)
# if not kanes is None:
# kanesf = i.xpath('.//div[@class="kanesf"]/p/a/@href').get()
# unit_price = kanes+' '+kanesf
# #这里发起新房已售完 二手房页面抓取
# else:
# unit_price = kanes
# item = FangspiderLoupanItem(name=name,url=url,huxin=huxin,
# area=area,address=address,tag=tag,
# phone_plat=phone_plat,unit_price=unit_price,city=city)
# print(item)
# yield item
# next_page
# nextpage = response.xpath('//a[@class="next"]/@href').get()
# if not nextpage is None:
# nextpage = response.urljoin(nextpage)
# yield scrapy.Request(nextpage)
for i in loupans:
if 'newhouse.fang.com' in i:
continue
else:
u = response.urljoin(i)
ml.l3(str(len(loupan_index))+ '---'+u)
loupan_index.append(u)
ml.l3('发起小区抓取:'+u)
yield scrapy.Request(u, callback=self.parse_loupanindex, meta={'midtag': False})
next_page_url = response.xpath('//a[contains(text(),"下一页")]/@href').get()
if next_page_url is None:
lis = response.xpath('//li[@class="fr"]/a')
for index,li in enumerate(lis):
if index == 0:
continue
if len(li.re('last'))>0:
break
if len(li.re('active'))>0:
if index+1>=len(lis):
break
url = response.urljoin(lis[index+1].xpath('./@href').get())
yield scrapy.Request(url ,callback=self.parse_family)
break
#print(UnicodeTranslateError)
#处理最后四页的情况
else:
if True:
pageCouner += 1
next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(next_page_url, callback=self.parse_family)
def parse_loupanindex(self, response):
# print('解析详情页的parse启动+'+response.url)
ml.l4(response.url)
url = self.format_url(response.url)
script = response.xpath('//script')
part_dis = script.re('district.*')
if len(part_dis) > 0:
part = part_dis[0].split('"')[1]
else:
part_dis = script.re('address.*')
if len(part_dis) > 0:
part = part_dis[0].split('"')[1]
else:
pass
compart = response.xpath('//script[@type="text/javascript"]').re('ub_com.*')[0].split('"')[1]
name = response.xpath('//div[@class="tit"]/h1//text()').get()
other_name = response.xpath('//div[class="title"]/span/@title').get()
tag_ = response.xpath('//div[@class="biaoqian1"]//text()').getall()
tag = ' '.join(tag_).strip('')
tag = self.format_text(tag)
unit_price_ = response.xpath('//div[@class="inf_left fl mr30"]//text()').getall()
unit_price_ = "".join(unit_price_).strip()
unit_price__ = response.xpath('//div[@class="inf_left fl "]//text()').getall()
if len(unit_price__):
unit_price__ = ''.join(unit_price__).strip()
unit_price = unit_price_ + unit_price__
else:
unit_price = unit_price_
unit_price = self.format_text(unit_price)
huxin_main_ = response.xpath('//div[@class="fl zlhx"]//text()').getall()
huxin_main = "".join(huxin_main_).strip()
huxin_main = self.format_text(huxin_main)
# louaddress_ = response.xpath('//div[@id="xfptxq_B04_12"]//text()').getall()
# louaddress = "".join(louaddress_).strip()
# louaddress = self.format_text(louaddress)
# 处理部分项目没有值的问题
louaddress_ = response.xpath('//div[contains(string(),"项目地址")]').re('项目地址.*')[0].split('"')
louaddress_len = len(louaddress_)
louaddress = louaddress_[louaddress_len - 2]
# sale_time = response.xpath('//a[@class="kaipan"]/text()').get()
sale_time = ''
deliver_table = response.xpath('//table[@class="tf"]/tbody//tr')
delivery_time = ''
for tr in deliver_table:
delivery_time += "".join(tr.xpath('.//text()').getall()).strip()
delivery_time = self.format_text(delivery_time)
city = response.xpath('//div[@class="s4Box"]/a/text()').get()
item = NewhouseIndexItem(url=url, name=name, unit_price=unit_price, tag=tag, louaddress=louaddress,
sale_time=sale_time, delivery_time=delivery_time,
huxin_main=huxin_main, other_name=other_name, part=part, compart=compart, city=city)
ml.l4('楼盘index抓取到:'+str(item))
#print(item)
yield item
detail_url = response.xpath('//a[contains(text(),"楼盘详情")]/@href').get()
#print('0' * 100 + detail_url)
detail_url = response.urljoin(detail_url)
yield scrapy.Request(detail_url, callback=self.parse_loupanDetail)
# 开盘时间
sale_time_url = response.xpath('//a[@class="kaipan"]/@href').get()
sale_time_url = response.urljoin(sale_time_url)
yield scrapy.Request(sale_time_url, callback=self.parse_sale_time)
# 处理post信息
post_history_url = response.xpath('//a[@id="xfptxq_B03_12"]/@href').get()
post_history_url = response.urljoin(post_history_url)
yield scrapy.Request(post_history_url, callback=self.parse_history_post_index)
#交房时间抓取
delivery_time_url = response.xpath('//a[contains(text(),"更多交房详情>>")]/@href').get()
delivery_time_url = response.urljoin(delivery_time_url)
yield scrapy.Request(delivery_time_url, callback=self.parse_delivery_time_index)
def parse_delivery_time_index(self, response):
"""抓取交房时间详情页所有的信息
"""
url = self.format_url(response.url)
delivery_list = []
trs = response.xpath('//div[@class="kpjjlu"]//tr')
if len(trs) > 1:
for index, tr in enumerate(trs):
if index == 0:
continue
else:
try:
tds = tr.xpath('.//td/text()').getall()
date = tds[0]
note = tds[1]
except Exception as e:
date = ''
if tds is None:
pass
else:
note = '出现错误 下面为原文' + str(tds)
delivery_list.append({'date': date, 'note': note})
item = NewhouseDeliveryTimeDetailIndex(url=url, delivery_time=delivery_list)
#print(item)
ml.l5("交房详情页:"+str(item))
#print('-'*100)
yield item
def parse_history_post_index(self, response):
url = self.format_url(response.url)
short_post = True
if short_post:
short_post_list = []
lis = response.xpath('//div[@id="gushi_all"]//li')
for li in lis:
tex = li.xpath('.//text()').getall()
text = "".join(tex)
text = self.format_text(text)
short_post_list.append(self.format_red(text))
post = {'url': response.url, 'short_data': short_post_list}
item = NewhouseKaipanPostDetail(url=url, post_list=post)
ml.l5("开盘时间:"+str(item))
#print(item)
yield item
return
else:
if 'item' in response.meta.keys():
item = response.meta['item']
else:
item = NewhouseKaipanPostDetail(url=url, post_list=[])
lis = response.xpath('//div[@id="gushi_all"]//li//a/@href').getall()
for li in lis:
if ',' in li:
url_ = response.urljoin(li)
yield scrapy.Request(url_, meta={'item': item}, callback=self.parse_history_post_index)
# 处理发起下一页
else:
url_ = response.urljoin(li)
yield scrapy.Request(url_, meta={'item': item}, callback=self.parse_history_post_detail)
def parse_history_post_detail(self, response):
url = self.format_url(response.url)
if 'item' in response.meta.keys():
item = response.meta['item']
else:
ml.error('&' * 80 + '程序出错 这里应该能获得item对象' + response.url)
return
title = response.xpath('//h1[@class="atc-tit"]/text()').get()
source_ = response.xpath('//h2[@class="atc-source"]//text()').getall()
source = self.format_text("".join(source_).strip().replace('\r', '').replace(' ', ''))
content_ = response.xpath('//div[@class="leftboxcom"]//text()').getall()
content = self.format_text("".join(content_).strip())
item['post_list'].append({'title': title, 'source': source, 'content': content})
syp = response.xpath('//a[@class="syp"]/@href').get()
if 'javascript' in syp:
ml.l5("动态:"+str(item))
return item
def request_family(self, request):
print(request)
pass
def parse_sale_time(self, response):
trs = response.xpath('//div[@class="kpjjlu"]').xpath('.//tr')
url = self.format_url(response.url)
td_l = []
for index, tr in enumerate(trs):
if index == 0:
continue
td = tr.xpath('td//text()').getall()
if len(td) > 1:
td_l.append({'data': td[0], 'note': td[1]})
elif len(td) == 1:
td_l.append({'data': td[0]})
else:
print('^' * 20 + '数据量可能不够' + response.url)
return
# for td in tr.xpath('td//text()').getall():
# dic['kaipan'] = td_l
item = NewhouseKaipanDetail(kaipan=td_l, url=url)
#print(item)
ml.l5("开盘详情:"+str(item))
yield item
pass
def parse_loupanDetail(self, response):
ml.l5('详情页抓取:'+response.url)
url = self.format_url(response.url)
contents = response.xpath('//div[@class="main-info-price"]/../..').xpath('.//li')[1].get()
contents = self.format_text(contents)
bs = BeautifulSoup(contents, 'lxml')
lis = bs.find_all('li')
poi_tag = 0
poi = ''
buiding_type, alright, location, property_, status, marker_address, phone_plat, floor_area, gross_area, gross_area_ratio, parking, counter_buidings, counter_households, wuye_corp, wuye_cost, wuye_note, status_buidings, sale_time = None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
for i in lis:
t = i.get_text().replace(' ', '').replace('\n', '')
if '建筑类别' in t:
buiding_type = t.replace('建筑类别:', '')
elif '装修状况' in t:
pass
# 没有考虑
elif '产权年限' in t:
alright = t.replace('产权年限:', '')
elif '环线位置' in t:
location = t.replace('环线位置:', '')
elif '开发商' in t:
property_ = t.replace('开发商:', '')
elif '销售状态' in t:
status = t.replace('销售状态:', '')
elif '开盘时间' in t:
sale_time = t.replace('开盘时间:', '')
elif '售楼地址' in t:
marker_address = t.replace('售楼地址:', '')
elif '主力户型' in t:
huxin_main = t.replace('主力户型:', '')
huxin_main = huxin_main
elif '预售许可证' in t:
pass
# 这里只捕获内容 但不会在这个item中处理
elif '咨询电话' in t:
phone_plat = t.replace('咨询电话:', '')
poi_tag = 1
elif '占地面积' in t:
poi_tag = 0
floor_area = t.replace('占地面积:', '')
elif poi_tag:
poi += (t + ' ')
elif '建筑面积' in t:
gross_area = t.replace('建筑面积:', '')
elif '容积率' in t:
gross_area_ratio = t.replace('容积率:', '')
elif '绿化率' in t:
greening_ratio = t.replace('绿化率:', '')
elif '停车位' in t:
parking = t.replace('停车位:', '')
elif '楼栋总数' in t:
counter_buidings = t.replace('楼栋总数:', '')
elif '总户数' in t:
counter_households = t.replace('总户数:', '')
elif '物业公司' in t:
wuye_corp = t.replace('物业公司:', '')
elif '物业费:' in t:
wuye_cost = t.replace('物业费:', '')
elif '物业费描述' in t:
wuye_note = t.replace('物业费描述:', '')
elif '楼层状况' in t:
status_buidings = t.replace('楼层状况:', '')
else:
pass
profile_ = response.xpath('//div[@class="main-item"]/p[@class="intro"]').get()
profile = self.format_text(profile_).replace('<p class="intro">', '').replace('<br>', '').replace('</p>', '')
tbs = response.xpath('//div[@class="main-table"]//table')
# 处理没有预售证或者其他情况
presale_list = []
price_history_list = []
t_presale, t_price_history = None, None
for tb in tbs:
if len(tb.re('绑定楼栋')) > 0:
if t_presale is None:
t_presale = tb.get()
else:
if len(tb.get()) > len(t_presale):
t_presale = tb.get()
if len(tb.re('价格描述')) > 0:
if t_price_history is None:
t_price_history = tb.get()
else:
if len(tb.get()) > len(t_price_history):
t_price_history = tb.get()
if not t_presale is None:
bs_presale = BeautifulSoup(t_presale, 'lxml')
trs = bs_presale.find_all('tr')
for index, tr in enumerate(trs):
if index == 0:
continue
tds = tr.find_all('td')
td_dict | |
<reponame>kevinbro96/perceptual-advex
import torch
import torchvision.models as torchvision_models
from torchvision.models.utils import load_state_dict_from_url
import math
from torch import nn
from torch.nn import functional as F
from .vae import CVAE_s2
import pdb
from . import utilities
from .utilities import MarginLoss
CIFAR_MEAN = [0.4914, 0.4822, 0.4465]
CIFAR_STD = [0.2470, 0.2435, 0.2616]
def get_eps_params(base_eps, resol):
eps_list = []
max_list = []
min_list = []
for i in range(3):
eps_list.append(torch.full((resol, resol), base_eps, device='cuda'))
min_list.append(torch.full((resol, resol), 0., device='cuda'))
max_list.append(torch.full((resol, resol), 255., device='cuda'))
eps_t = torch.unsqueeze(torch.stack(eps_list), 0)
max_t = torch.unsqueeze(torch.stack(max_list), 0)
min_t = torch.unsqueeze(torch.stack(min_list), 0)
return eps_t, max_t, min_t
def get_cifar_params(resol):
mean_list = []
std_list = []
for i in range(3):
mean_list.append(torch.full((resol, resol), CIFAR_MEAN[i], device='cuda'))
std_list.append(torch.full((resol, resol), CIFAR_STD[i], device='cuda'))
return torch.unsqueeze(torch.stack(mean_list), 0), torch.unsqueeze(torch.stack(std_list), 0)
class CIFARNORMALIZE(nn.Module):
def __init__(self, resol):
super().__init__()
self.mean, self.std = get_cifar_params(resol)
def forward(self, x):
'''
Parameters:
x: input image with pixels normalized to ([0, 1] - IMAGENET_MEAN) / IMAGENET_STD
'''
x = x.sub(self.mean)
x = x.div(self.std)
return x
class CIFARINNORMALIZE(nn.Module):
def __init__(self, resol):
super().__init__()
self.mean, self.std = get_cifar_params(resol)
def forward(self, x):
'''
Parameters:
x: input image with pixels normalized to ([0, 1] - IMAGENET_MEAN) / IMAGENET_STD
'''
x = x.mul(self.std)
x = x.add(self.mean)
return x
class NoProjection(nn.Module):
def __init__(self, bound, lpips_model):
super().__init__()
def forward(self, inputs, adv_inputs, input_features=None):
return adv_inputs
class NewtonsPerceptualProjection(nn.Module):
def __init__(self, bound, vae, projection_overshoot=1e-1,
max_iterations=100):
super().__init__()
self.bound = bound
self.vae = vae
self.projection_overshoot = projection_overshoot
self.max_iterations = max_iterations
self.cifarnormalize =CIFARNORMALIZE(32)
self.cifarinnormalize = CIFARINNORMALIZE(32)
def forward(self, adv_inputs, xd, input_features):
needs_projection = torch.ones_like(adv_inputs[:, 0]) \
.bool()
needs_projection.requires_grad = False
iteration = 0
while needs_projection.sum() > 0 and iteration < self.max_iterations:
adv_inputs.requires_grad = True
adv_features = self.cifarinnormalize(self.vae(adv_inputs[needs_projection], 'hi-xi')+xd[needs_projection]).\
view(needs_projection.sum(), -1)
adv_l2 = (input_features[needs_projection] -
adv_features).norm(dim=1)
#print("projection iteratoin: {:.0f}, l2 norm = {:.2f}".format(iteration, torch.mean(adv_l2)))
adv_l2.sum().backward()
projection_step_size = (adv_l2- self.bound) \
.clamp(min=0)
projection_step_size[projection_step_size > 0] += \
self.projection_overshoot
grad_norm = adv_inputs.grad.data[needs_projection] \
.view(needs_projection.sum(), -1).norm(dim=1)
inverse_grad = adv_inputs.grad.data[needs_projection] / \
grad_norm[:, None] ** 2
adv_inputs.data[needs_projection] = (
adv_inputs.data[needs_projection] -
projection_step_size[:, None] *
(1 + self.projection_overshoot) *
inverse_grad
).detach()
needs_projection[needs_projection] = \
projection_step_size > 0
iteration += 1
return adv_inputs.detach()
PROJECTIONS = {
'none': NoProjection,
'newtons': NewtonsPerceptualProjection,
}
class FirstOrderStepPerceptualAttack(nn.Module):
def __init__(self, model, bound=0.5, num_iterations=5,
h=1e-3, kappa=1, vae_model=None,
targeted=False, randomize=False
):
"""
Perceptual attack using conjugate gradient to solve the constrained
optimization problem.
bound is the (approximate) bound on the LPIPS distance.
num_iterations is the number of CG iterations to take.
h is the step size to use for finite-difference calculation.
"""
super().__init__()
assert randomize is False
self.model = model
self.bound = bound
self.num_iterations = num_iterations
self.h = h
self.cifarnormalize =CIFARNORMALIZE(32)
self.cifarinnormalize = CIFARINNORMALIZE(32)
self.vae = vae_model
self.loss = MarginLoss(kappa=kappa, targeted=targeted)
def _multiply_matrix(self, v):
"""
If (D phi) is the Jacobian of the features function for the model
at inputs, then approximately calculates
(D phi)T (D phi) v
"""
self.inputs.grad.data.zero_()
with torch.no_grad():
v_features = self.cifarinnormalize(self.vae(self.inputs.detach() + self.h * v, 'hi-xi') + self.xd)
D_phi_v = (
v_features.view(v_features.size(0), -1) -
self.input_features
) / self.h
torch.sum(self.input_features * D_phi_v).backward(retain_graph=True)
return self.inputs.grad.data.clone()
def forward(self, inputs, labels, xd):
self.inputs = inputs
self.xd = xd
inputs.requires_grad = True
x = self.cifarinnormalize(self.vae(inputs, 'hi-xi') + xd)
orig_logits = self.model(x)
self.input_features = x.view(x.size(0), -1)
loss = self.loss(orig_logits, labels)
loss.sum().backward(retain_graph=True)
inputs_grad = inputs.grad.data.clone()
if inputs_grad.abs().max() < 1e-4:
return inputs
# Variable names are from
# https://en.wikipedia.org/wiki/Conjugate_gradient_method#The_resulting_algorithm
x = torch.zeros_like(inputs)
r = inputs_grad - self._multiply_matrix(x)
p = r
for cg_iter in range(self.num_iterations):
r_last = r
p_last = p
x_last = x
del r, p, x
r_T_r = (r_last ** 2).sum(dim=[1])
if r_T_r.max() < 1e-1 and cg_iter > 0:
# If the residual is small enough, just stop the algorithm.
x = x_last
break
A_p_last = self._multiply_matrix(p_last)
# print('|r|^2 =', ' '.join(f'{z:.2f}' for z in r_T_r))
alpha = (
r_T_r /
(p_last * A_p_last).sum(dim=[1])
)[:, None]
x = x_last + alpha * p_last
# These calculations aren't necessary on the last iteration.
if cg_iter < self.num_iterations - 1:
r = r_last - alpha * A_p_last
beta = (
(r ** 2).sum(dim=[1]) /
r_T_r
)[:, None]
p = r + beta * p_last
x_features = self.cifarinnormalize(self.vae(self.inputs.detach() + self.h * x, 'hi-xi') + xd)
D_phi_x = (
x_features.view(x_features.size(0), -1 ) -
self.input_features
) / self.h
lam = (self.bound / D_phi_x.norm(dim=1))[:, None]
inputs_grad_norm = inputs_grad.reshape(
inputs_grad.size()[0], -1).norm(dim=1)
# If the grad is basically 0, don't perturb that input. It's likely
# already misclassified, and trying to perturb it further leads to
# numerical instability.
lam[inputs_grad_norm < 1e-4] = 0
x[inputs_grad_norm < 1e-4] = 0
# print('LPIPS', self.lpips_distance(
# inputs,
# inputs + lam * x,
# ))
return (inputs + lam * x).detach()
class HiddenAttack(nn.Module):
def __init__(self, model, vae, resol=32, bound=1.0, step=None, num_iterations=20,
cg_iterations=5, h=1e-3,
decay_step_size=False, kappa=10,
projection='newtons', randomize=False,
random_targets=False, num_classes=None,):
"""
Iterated version of the conjugate gradient attack.
step_size is the step size in LPIPS distance.
num_iterations is the number of steps to take.
cg_iterations is the conjugate gradient iterations per step.
h is the step size to use for finite-difference calculation.
project is whether or not to project the perturbation into the LPIPS
ball after each step.
"""
super().__init__()
assert randomize is False
self.model = model
self.bound = bound
self.num_iterations = num_iterations
self.decay_step_size = decay_step_size
self.step = step
self.random_targets = random_targets
self.num_classes = num_classes
self.cifarnormalize =CIFARNORMALIZE(resol)
self.cifarinnormalize = CIFARINNORMALIZE(32)
if self.step is None:
if self.decay_step_size:
self.step = self.bound
else:
self.step = 2 * self.bound / self.num_iterations
self.vae = vae
self.first_order_step = FirstOrderStepPerceptualAttack(
model, bound=self.step, num_iterations=cg_iterations, h=h,
kappa=kappa, vae_model=self.vae,
targeted=self.random_targets)
self.projection = PROJECTIONS[projection](self.bound, self.vae)
def _attack(self, inputs, labels):
#pdb.set_trace()
with torch.no_grad():
hi = self.vae(self.cifarnormalize(inputs),'x-hi')
xi = self.vae(hi,'hi-xi')
xd = self.cifarnormalize(inputs) - xi
x_features = inputs.view(inputs.size(0), -1) #3x32x32
start_perturbations = torch.zeros_like(hi)
start_perturbations.normal_(0, 0.1)
adv_hi = hi + start_perturbations
for attack_iter in range(self.num_iterations):
if self.decay_step_size:
step_size = self.step * \
0.1 ** (attack_iter / self.num_iterations)
self.first_order_step.bound = step_size
adv_hi = self.first_order_step(adv_hi, labels, xd.detach())
print("attack iteratoin: %d" %(attack_iter))
adv_hi = self.projection( adv_hi, xd.detach, x_features)
adv_inputs = self.cifarinnormalize(self.vae(adv_hi, 'hi-xi') + xd)
return adv_inputs
def forward(self, inputs, labels):
if self.random_targets:
return utilities.run_attack_with_random_targets(
self._attack,
self.model,
inputs,
labels,
self.num_classes,
)
else:
return self._attack(inputs, labels)
class LagrangeHiddenAttack(nn.Module):
def __init__(self, model, vae,
bound=1.0, step=None, num_iterations=20,
binary_steps=5, h=0.1, kappa=1,
projection='newtons', decay_step_size=True,
num_classes=None,
randomize=False, random_targets=False):
"""
Perceptual attack using a Lagrangian relaxation of the
LPIPS-constrainted optimization problem.
bound is the (soft) bound on the LPIPS distance.
step is the LPIPS step size.
num_iterations is the number of steps to take.
lam is the lambda value multiplied by the regularization term.
h is the step size to use for finite-difference calculation.
lpips_model is the model to use to calculate LPIPS or 'self' or
'alexnet'
"""
super().__init__()
assert randomize is False
self.model = model
self.bound = bound
self.decay_step_size = decay_step_size
self.num_iterations = num_iterations
if step is None:
if self.decay_step_size:
self.step = self.bound
else:
self.step = self.bound * 2 / self.num_iterations
else:
self.step = step
self.binary_steps = binary_steps
self.h = h
self.random_targets = random_targets
self.num_classes = num_classes
self.vae = vae
self.cifarnormalize = CIFARNORMALIZE(32)
self.cifarinnormalize = CIFARINNORMALIZE(32)
self.loss = MarginLoss(kappa=kappa, targeted=self.random_targets)
self.projection = PROJECTIONS[projection](self.bound, self.vae)
def threat_model_contains(self, inputs, adv_inputs):
"""
Returns a boolean tensor which indicates if each of the given
adversarial examples given is within this attack's threat model for
the given natural input.
"""
return self.lpips_distance(inputs, adv_inputs) <= self.bound
def _attack(self, imgs, labels):
with torch.no_grad():
hi = self.vae(self.cifarnormalize(imgs), 'x-hi')
xi = self.vae(hi, 'hi-xi')
xd = self.cifarnormalize(imgs) - xi
perturbations = torch.zeros_like(hi)
perturbations.normal_(0, 0.1)
perturbations.requires_grad = True
batch_size = imgs.shape[0]
step_size = self.step
lam = 0.01 * torch.ones(batch_size, device=imgs.device)
input_features = imgs.view(batch_size, -1).detach()
live = torch.ones(batch_size, device=imgs.device, dtype=torch.bool)
for binary_iter in range(self.binary_steps):
for attack_iter in range(self.num_iterations):
if self.decay_step_size:
step_size = self.step * \
(0.1 ** (attack_iter / self.num_iterations))
else:
step_size = self.step
if perturbations.grad is not None:
perturbations.grad.data.zero_()
adv_hi = (hi + perturbations)[live]
adv_features = self.cifarinnormalize(self.vae(adv_hi, 'hi-xi') + xd[live])
adv_logits = self.model(adv_features)
adv_labels = adv_logits.argmax(1)
adv_loss = self.loss(adv_logits, labels[live])
adv_features = adv_features.view(live.sum(), -1)
l2_dists = (adv_features - input_features[live]).norm(dim=1)
all_l2_dists = torch.zeros(batch_size, device=imgs.device)
all_l2_dists[live] = l2_dists
loss = -adv_loss + lam[live] * F.relu(l2_dists - self.bound)
loss.sum().backward()
grad = perturbations.grad.data[live]
grad_normed = grad / \
(grad.reshape(grad.size()[0], -1).norm(dim=1)
[:, None] + 1e-8)
dist_grads = (
| |
input_directories(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InputDirectoryArgs']]]]):
pulumi.set(self, "input_directories", value)
@property
@pulumi.getter(name="jobName")
def job_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the job within the specified resource group. Job names can only contain a combination of alphanumeric characters along with dash (-) and underscore (_). The name must be from 1 through 64 characters long.
"""
return pulumi.get(self, "job_name")
@job_name.setter
def job_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_name", value)
@property
@pulumi.getter(name="jobPreparation")
def job_preparation(self) -> Optional[pulumi.Input['JobPreparationArgs']]:
"""
A command line to be executed on each node allocated for the job before tool kit is launched.
"""
return pulumi.get(self, "job_preparation")
@job_preparation.setter
def job_preparation(self, value: Optional[pulumi.Input['JobPreparationArgs']]):
pulumi.set(self, "job_preparation", value)
@property
@pulumi.getter(name="mountVolumes")
def mount_volumes(self) -> Optional[pulumi.Input['MountVolumesArgs']]:
"""
Information on mount volumes to be used by the job. These volumes will be mounted before the job execution and will be unmounted after the job completion. The volumes will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable.
"""
return pulumi.get(self, "mount_volumes")
@mount_volumes.setter
def mount_volumes(self, value: Optional[pulumi.Input['MountVolumesArgs']]):
pulumi.set(self, "mount_volumes", value)
@property
@pulumi.getter(name="outputDirectories")
def output_directories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OutputDirectoryArgs']]]]:
"""
A list of output directories for the job.
"""
return pulumi.get(self, "output_directories")
@output_directories.setter
def output_directories(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OutputDirectoryArgs']]]]):
pulumi.set(self, "output_directories", value)
@property
@pulumi.getter(name="pyTorchSettings")
def py_torch_settings(self) -> Optional[pulumi.Input['PyTorchSettingsArgs']]:
"""
Settings for pyTorch job.
"""
return pulumi.get(self, "py_torch_settings")
@py_torch_settings.setter
def py_torch_settings(self, value: Optional[pulumi.Input['PyTorchSettingsArgs']]):
pulumi.set(self, "py_torch_settings", value)
@property
@pulumi.getter(name="schedulingPriority")
def scheduling_priority(self) -> Optional[pulumi.Input[Union[str, 'JobPriority']]]:
"""
Scheduling priority associated with the job. Possible values: low, normal, high.
"""
return pulumi.get(self, "scheduling_priority")
@scheduling_priority.setter
def scheduling_priority(self, value: Optional[pulumi.Input[Union[str, 'JobPriority']]]):
pulumi.set(self, "scheduling_priority", value)
@property
@pulumi.getter
def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableWithSecretValueArgs']]]]:
"""
A list of user defined environment variables with secret values which will be setup for the job. Server will never report values of these variables back.
"""
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableWithSecretValueArgs']]]]):
pulumi.set(self, "secrets", value)
@property
@pulumi.getter(name="tensorFlowSettings")
def tensor_flow_settings(self) -> Optional[pulumi.Input['TensorFlowSettingsArgs']]:
"""
Settings for Tensor Flow job.
"""
return pulumi.get(self, "tensor_flow_settings")
@tensor_flow_settings.setter
def tensor_flow_settings(self, value: Optional[pulumi.Input['TensorFlowSettingsArgs']]):
pulumi.set(self, "tensor_flow_settings", value)
class Job(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
caffe2_settings: Optional[pulumi.Input[pulumi.InputType['Caffe2SettingsArgs']]] = None,
caffe_settings: Optional[pulumi.Input[pulumi.InputType['CaffeSettingsArgs']]] = None,
chainer_settings: Optional[pulumi.Input[pulumi.InputType['ChainerSettingsArgs']]] = None,
cluster: Optional[pulumi.Input[pulumi.InputType['ResourceIdArgs']]] = None,
cntk_settings: Optional[pulumi.Input[pulumi.InputType['CNTKsettingsArgs']]] = None,
constraints: Optional[pulumi.Input[pulumi.InputType['JobBasePropertiesConstraintsArgs']]] = None,
container_settings: Optional[pulumi.Input[pulumi.InputType['ContainerSettingsArgs']]] = None,
custom_mpi_settings: Optional[pulumi.Input[pulumi.InputType['CustomMpiSettingsArgs']]] = None,
custom_toolkit_settings: Optional[pulumi.Input[pulumi.InputType['CustomToolkitSettingsArgs']]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentVariableArgs']]]]] = None,
experiment_name: Optional[pulumi.Input[str]] = None,
horovod_settings: Optional[pulumi.Input[pulumi.InputType['HorovodSettingsArgs']]] = None,
input_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InputDirectoryArgs']]]]] = None,
job_name: Optional[pulumi.Input[str]] = None,
job_preparation: Optional[pulumi.Input[pulumi.InputType['JobPreparationArgs']]] = None,
mount_volumes: Optional[pulumi.Input[pulumi.InputType['MountVolumesArgs']]] = None,
node_count: Optional[pulumi.Input[int]] = None,
output_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutputDirectoryArgs']]]]] = None,
py_torch_settings: Optional[pulumi.Input[pulumi.InputType['PyTorchSettingsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scheduling_priority: Optional[pulumi.Input[Union[str, 'JobPriority']]] = None,
secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentVariableWithSecretValueArgs']]]]] = None,
std_out_err_path_prefix: Optional[pulumi.Input[str]] = None,
tensor_flow_settings: Optional[pulumi.Input[pulumi.InputType['TensorFlowSettingsArgs']]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Information about a Job.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['Caffe2SettingsArgs']] caffe2_settings: Settings for Caffe2 job.
:param pulumi.Input[pulumi.InputType['CaffeSettingsArgs']] caffe_settings: Settings for Caffe job.
:param pulumi.Input[pulumi.InputType['ChainerSettingsArgs']] chainer_settings: Settings for Chainer job.
:param pulumi.Input[pulumi.InputType['ResourceIdArgs']] cluster: Resource ID of the cluster on which this job will run.
:param pulumi.Input[pulumi.InputType['CNTKsettingsArgs']] cntk_settings: Settings for CNTK (aka Microsoft Cognitive Toolkit) job.
:param pulumi.Input[pulumi.InputType['JobBasePropertiesConstraintsArgs']] constraints: Constraints associated with the Job.
:param pulumi.Input[pulumi.InputType['ContainerSettingsArgs']] container_settings: Docker container settings for the job. If not provided, the job will run directly on the node.
:param pulumi.Input[pulumi.InputType['CustomMpiSettingsArgs']] custom_mpi_settings: Settings for custom MPI job.
:param pulumi.Input[pulumi.InputType['CustomToolkitSettingsArgs']] custom_toolkit_settings: Settings for custom tool kit job.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentVariableArgs']]]] environment_variables: A list of user defined environment variables which will be setup for the job.
:param pulumi.Input[str] experiment_name: The name of the experiment. Experiment names can only contain a combination of alphanumeric characters along with dash (-) and underscore (_). The name must be from 1 through 64 characters long.
:param pulumi.Input[pulumi.InputType['HorovodSettingsArgs']] horovod_settings: Settings for Horovod job.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InputDirectoryArgs']]]] input_directories: A list of input directories for the job.
:param pulumi.Input[str] job_name: The name of the job within the specified resource group. Job names can only contain a combination of alphanumeric characters along with dash (-) and underscore (_). The name must be from 1 through 64 characters long.
:param pulumi.Input[pulumi.InputType['JobPreparationArgs']] job_preparation: A command line to be executed on each node allocated for the job before tool kit is launched.
:param pulumi.Input[pulumi.InputType['MountVolumesArgs']] mount_volumes: Information on mount volumes to be used by the job. These volumes will be mounted before the job execution and will be unmounted after the job completion. The volumes will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable.
:param pulumi.Input[int] node_count: Number of compute nodes to run the job on. The job will be gang scheduled on that many compute nodes.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutputDirectoryArgs']]]] output_directories: A list of output directories for the job.
:param pulumi.Input[pulumi.InputType['PyTorchSettingsArgs']] py_torch_settings: Settings for pyTorch job.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[Union[str, 'JobPriority']] scheduling_priority: Scheduling priority associated with the job. Possible values: low, normal, high.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentVariableWithSecretValueArgs']]]] secrets: A list of user defined environment variables with secret values which will be setup for the job. Server will never report values of these variables back.
:param pulumi.Input[str] std_out_err_path_prefix: The path where the Batch AI service will store stdout, stderror and execution log of the job.
:param pulumi.Input[pulumi.InputType['TensorFlowSettingsArgs']] tensor_flow_settings: Settings for Tensor Flow job.
:param pulumi.Input[str] workspace_name: The name of the workspace. Workspace names can only contain a combination of alphanumeric characters along with dash (-) and underscore (_). The name must be from 1 through 64 characters long.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: JobArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Information about a Job.
:param str resource_name: The name of the resource.
:param JobArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(JobArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
caffe2_settings: Optional[pulumi.Input[pulumi.InputType['Caffe2SettingsArgs']]] = None,
caffe_settings: Optional[pulumi.Input[pulumi.InputType['CaffeSettingsArgs']]] = None,
chainer_settings: Optional[pulumi.Input[pulumi.InputType['ChainerSettingsArgs']]] = None,
cluster: Optional[pulumi.Input[pulumi.InputType['ResourceIdArgs']]] = None,
cntk_settings: Optional[pulumi.Input[pulumi.InputType['CNTKsettingsArgs']]] = None,
constraints: Optional[pulumi.Input[pulumi.InputType['JobBasePropertiesConstraintsArgs']]] = None,
container_settings: Optional[pulumi.Input[pulumi.InputType['ContainerSettingsArgs']]] = None,
custom_mpi_settings: Optional[pulumi.Input[pulumi.InputType['CustomMpiSettingsArgs']]] = None,
custom_toolkit_settings: Optional[pulumi.Input[pulumi.InputType['CustomToolkitSettingsArgs']]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentVariableArgs']]]]] = None,
experiment_name: Optional[pulumi.Input[str]] = None,
horovod_settings: Optional[pulumi.Input[pulumi.InputType['HorovodSettingsArgs']]] = None,
input_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InputDirectoryArgs']]]]] = None,
job_name: Optional[pulumi.Input[str]] = None,
job_preparation: Optional[pulumi.Input[pulumi.InputType['JobPreparationArgs']]] = None,
mount_volumes: Optional[pulumi.Input[pulumi.InputType['MountVolumesArgs']]] = None,
node_count: Optional[pulumi.Input[int]] = None,
output_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutputDirectoryArgs']]]]] = None,
py_torch_settings: Optional[pulumi.Input[pulumi.InputType['PyTorchSettingsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scheduling_priority: Optional[pulumi.Input[Union[str, 'JobPriority']]] = None,
secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentVariableWithSecretValueArgs']]]]] = None,
std_out_err_path_prefix: Optional[pulumi.Input[str]] = None,
tensor_flow_settings: Optional[pulumi.Input[pulumi.InputType['TensorFlowSettingsArgs']]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = JobArgs.__new__(JobArgs)
__props__.__dict__["caffe2_settings"] = caffe2_settings
__props__.__dict__["caffe_settings"] = caffe_settings
__props__.__dict__["chainer_settings"] = chainer_settings
if cluster is None and not opts.urn:
raise TypeError("Missing required property 'cluster'")
__props__.__dict__["cluster"] = cluster
__props__.__dict__["cntk_settings"] = cntk_settings
__props__.__dict__["constraints"] = constraints
__props__.__dict__["container_settings"] = container_settings
__props__.__dict__["custom_mpi_settings"] = custom_mpi_settings
__props__.__dict__["custom_toolkit_settings"] = custom_toolkit_settings
__props__.__dict__["environment_variables"] = environment_variables
if experiment_name is None and not opts.urn:
raise TypeError("Missing required property 'experiment_name'")
__props__.__dict__["experiment_name"] = experiment_name
__props__.__dict__["horovod_settings"] = horovod_settings
__props__.__dict__["input_directories"] = input_directories
__props__.__dict__["job_name"] = job_name
__props__.__dict__["job_preparation"] = job_preparation
__props__.__dict__["mount_volumes"] = mount_volumes
if node_count is None and not opts.urn:
raise TypeError("Missing required property 'node_count'")
__props__.__dict__["node_count"] = node_count
__props__.__dict__["output_directories"] = output_directories
__props__.__dict__["py_torch_settings"] = py_torch_settings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if scheduling_priority is None:
scheduling_priority = 'normal'
__props__.__dict__["scheduling_priority"] = scheduling_priority
__props__.__dict__["secrets"] = secrets
if std_out_err_path_prefix is None and not opts.urn:
raise TypeError("Missing required property 'std_out_err_path_prefix'")
__props__.__dict__["std_out_err_path_prefix"] = std_out_err_path_prefix
__props__.__dict__["tensor_flow_settings"] = tensor_flow_settings
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["creation_time"] = None
__props__.__dict__["execution_info"] = None
__props__.__dict__["execution_state"] = None
__props__.__dict__["execution_state_transition_time"] = None
__props__.__dict__["job_output_directory_path_segment"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["provisioning_state_transition_time"] | |
<reponame>ryanp543/autotuner
#!/usr/bin/env python
"""
Python program that calculates the necessary constants needed to find bounds on alpha and PID gains. Running this file
generates a .csv file containing all these constants as well as the bounds on alpha.
Date 2/2/2021
"""
import sys
import csv
import numpy as np
import scipy.optimize
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from matplotlib import colors
from sympy import symbols, Eq, solve
import klampt
from SuspensionMatrices import Suspension_8legs
# NOTE: Edit for specific robot
FILEPATH = './robot_sim.xml'
FILEPATH_CSV = './GenerateGainsConstants.csv'
# Function: Optimization Callback
# After ever iteration, the cost value is added to a list for future plotting of cost over iteration
def fminCallback(states_current_iter):
global cost, iteration, iter_count, CURRENT_FUNCTION, robot, sus
iter_count = iter_count + 1
iteration.append(iter_count)
cost.append(CURRENT_FUNCTION(states_current_iter, robot, sus))
# Function: Plot Cost
# Plots the cost value with respect to optimizer iteration
def costPlot():
global cost, iteration
plt.plot(iteration, cost)
plt.ylabel("Cost")
plt.xlabel("Iteration")
plt.show()
# Function: Max Eigenvalue H
# Returns the maximum eigenvalue of the H matrix for a given state. Used with minimizer to find maximum possible
# eigenvalue of the H matrix
def FindMaxEigH(var_init, robot, sus):
state = [0, 0, var_init[0], 0]
state.extend(var_init[1:])
robot.setConfig(state)
H_array = np.asarray(robot.getMassMatrix())
H = np.delete(H_array, [0,1,3], 0)
H = np.delete(H, [0,1,3], 1)
w, v = np.linalg.eig(H)
return -max(w)
# Function: Min Eigenvalue H
# Returns the minimum eigenvalue of the H matrix for a given state. Used with minimizer to find smallest possible
# eigenvalue of the H matrix
def FindMinEigH(var_init, robot, sus):
state = [0, 0, var_init[0], 0]
state.extend(var_init[1:])
robot.setConfig(state)
H_array = np.asarray(robot.getMassMatrix())
H = np.delete(H_array, [0,1,3], 0)
H = np.delete(H, [0,1,3], 1)
w, v = np.linalg.eig(H)
return min(w)
# Function: Max Eigenvalue K
# Returns the maximum eigenvalue of the stiffness matrix. Used with minimizer to find largest possible eigenvalue of
# the stiffness matrix.
def FindMaxEigK(var_init, robot, sus):
K = sus.GetStiffnessMatrix(qPitch=var_init[1], qRoll=var_init[0])
w, v = np.linalg.eig(K)
return -max(w)
# Function: Max Eigenvalue B
# Returns the maximum eigenvalue of the damping matrix. Used with minimizer to find largest possible eigenvalue of
# the damping matrix.
def FindMaxEigB(var_init, robot, sus):
B = sus.GetDampingMatrix(qPitch=var_init[1], qRoll=var_init[0])
w, v = np.linalg.eig(B)
return -max(w)
# Function: Maximum Gravity Magnitude
# Calculates and returns the magnitude of the gravity vector for a given robot state. Used with the minimizer to find
# the largest possible magnitude.
def FindMaxG(var_init, robot, sus):
gravity = (0, 0, -9.81)
x = [0, 0, var_init[0], 0] + list(var_init[1:])
robot.setConfig(x)
G = robot.getGravityForces(gravity)
return -np.linalg.norm(G)
# Function: Find Constant kG
# Calculates the constant kG for determining alpha.
def FindKg(var_init, robot, sus):
gravity = (0, 0, -9.81)
list_dGdx = []
list_max = []
x = [0, 0, 0, 0] + list(var_init[0:len(var_init)]) # roll pitch 4dof
dx = 0.001 # [0, 0, 0, 0] + list(var_init[len(var_init)/2:len(var_init)])
x_original = x[:]
# For each individual DOF derivative (with the other derivatives held at 0), dGdx is calculated
for j in range(4, len(x)):
x = x_original[:]
robot.setConfig(x)
G1 = robot.getGravityForces(gravity)
x[j] = x[j] + dx
robot.setConfig(x)
G2 = robot.getGravityForces(gravity)
for i in range(4, len(G1)): # range(4, len(G1)):
list_dGdx.append(abs((G2[i]-G1[i])/dx))
# Find the maximum dGdx of dx[j]
list_max.append(max(list_dGdx))
list_dGdx = []
# Returns the maximum dGdx overall
return -max(list_max)
# Function: Find Constant kK
# Calculates the kK constant used for determining alpha
def FindKk(var_init, robot, sus):
list_dKdx = []
list_max = []
x_p = [var_init[0], var_init[1]] # only pitch and roll are considered
dx_p = 0.001 #var_init[2:4]
# print dx_p
x_p_original = x_p[:]
# For each relevant passive state derivative
for k in range(len(x_p)):
x_p = x_p_original[:]
K1 = sus.GetStiffnessMatrix(qPitch=x_p[1], qRoll=x_p[0])
x_p[k] = x_p[k] + dx_p # [k]
K2 = sus.GetStiffnessMatrix(qPitch=x_p[1], qRoll=x_p[0])
for i in range(len(K1)):
for j in range(len(K1[0])):
dK = K2[i][j]-K1[i][j]
list_dKdx.append(abs(dK/dx_p)) # 1D array of dK/dx for x[elem]
list_max.append(max(list_dKdx))
list_dKdx = []
return -(len(var_init)/2)*max(list_max)
# Function: Find Constant kB
# Calculates the kB constant used for determining alpha
def FindKb(var_init, robot, sus):
list_dBdx = []
list_max = []
x_p = [var_init[0], var_init[1]]
dx_p = 0.001 # var_init[2:4]
x_p_original = x_p[:]
# For each relevant passive state variable
for elem in range(len(x_p)):
x_p = x_p_original[:]
B1 = sus.GetDampingMatrix(qPitch=x_p[1], qRoll=x_p[0])
x_p[elem] = x_p[elem] + dx_p
B2 = sus.GetDampingMatrix(qPitch=x_p[1], qRoll=x_p[0])
for i in range(len(B1)):
for j in range(len(B1[0])):
dB = B2[i][j]-B1[i][j]
list_dBdx.append(abs(dB/dx_p)) # 1D array of dK/dx for x[elem]
list_max.append(max(list_dBdx))
list_dBdx = []
return -(len(var_init)/2)*max(list_max) # times 3 because K is 3x3
# Function: Find Constant kC
# Calculates the kC constant used for plotting stability regions
def FindKc(var_init, robot, sus):
# This sub-function calculates the coriolis constant kC using scipy's fmin() function for a specific state vector
# and provided velocity
def GetMaxKc(x0, dx, robot):
x = [0, 0, x0[0], 0] + list(x0[1:])
robot.setConfig(x)
C_v = np.asarray(robot.getCoriolisForces())
kC = np.linalg.norm(C_v) / (np.linalg.norm(dx) ** 2)
return -kC
dx = [0, 0, var_init[0], 0] + list(var_init[1:])
robot.setVelocity(dx)
x0 = [0] * (robot.numLinks()-3) # z, p, r, 4 dof (the -3 is ignoring x y and yaw)
kC_states = scipy.optimize.fmin(GetMaxKc, x0, args=(dx, robot), disp=False)
return GetMaxKc(kC_states, dx, robot)
# Function: Solve Passive States
# This function returns the square of the static state at which only the suspension stiffness and gravity are acting
# on all states. The square is returned so a minimizing optimization function can be used to solve for the passive
# states.
def SolveXp(x_p, sus, robot, var_init, gravity):
x = [0, 0, x_p[0], 0, x_p[1], x_p[2]] + list(var_init)
robot.setConfig(x)
G = robot.getGravityForces(gravity)
K = np.asarray(sus.GetStiffnessMatrix(qPitch=x_p[2], qRoll=x_p[1]))
G_p = np.array([G[2], G[4], G[5]])
v = G_p + np.dot(K, x_p)
return np.dot(v, v)
# Function: Find Constant kX
# Calculates the magnitude of the passive states by solving the SolveXp() function. Used with optimizer to find the kX
# constant
def FindKx(var_init, robot, sus):
gravity = (0, 0, -9.81)
x_p = [0, 0, 0]
x_p_optimized = scipy.optimize.fmin(SolveXp, x_p, args=(sus, robot, var_init, gravity), xtol=0.000001, disp=False)
return -np.linalg.norm(x_p_optimized)
# Function: Find Minimum K Eigenvalue (Passive States)
# Finds the final passive states using the SolveXp() function and then uses this "optimized" passive state to calculate
# the eigenvalues of the stiffness matrix.
def FindMinEigK_passive(var_init, robot, sus):
gravity = (0, 0, -9.81)
x_p = [0, 0, 0]
x_p_optimized = scipy.optimize.fmin(SolveXp, x_p, args=(sus, robot, var_init, gravity), xtol=0.000001, disp=False)
K_p = sus.GetStiffnessMatrix(qPitch=x_p_optimized[2], qRoll=x_p_optimized[1])
w, v = np.linalg.eig(K_p)
return min(w)
# Function: Find Minimum B Eigenvalue (Passive States)
# Finds the final passive states using the SolveXp() function and then uses this "optimized" passive state to calculate
# the eigenvalues of the damping matrix.
def FindMinEigB_passive(var_init, robot, sus):
gravity = (0, 0, -9.81)
x_p = [0, 0, 0]
x_p_optimized = scipy.optimize.fmin(SolveXp, x_p, args=(sus, robot, var_init, gravity), xtol=0.000001, disp=False)
B_p = sus.GetDampingMatrix(qPitch=x_p_optimized[2], qRoll=x_p_optimized[1])
w, v = np.linalg.eig(B_p)
return min(w)
# Function: Calculates All Constants and Eigenvalues
# This calculates all constants and eigenvalues necessary for finding the bounds on alpha and PID gains.
def GetConstants(robot, sus):
# For plotting cost function only
global CURRENT_FUNCTION, iter_count, iteration, cost
iter_count = 0
iteration = []
cost = []
# Prints out data about the robot
print("Number of DOFs: ", robot.numLinks())
print("Number of Links: ", robot.numDrivers())
total_mass = 0
for index in range(robot.numLinks()):
link = robot.link(index)
# print link.getName()
total_mass = total_mass + link.getMass().mass
# Initializes alphas and constants list
alphas = []
constants = []
# Initializing state vectors
x_init = [0] * robot.numLinks()
dx_init = [0.1] * robot.numLinks()
states = x_init + dx_init
# CALCULATING MAX EIGEN VALUE OF H MATRIX (z, qroll, qpitch, 4 DOF)
print "Calculating Max Eig H..."
CURRENT_FUNCTION = FindMaxEigH
stateMaxEigH = scipy.optimize.fmin(FindMaxEigH, states[2:3]+states[4:robot.numLinks()], args=(robot,sus), maxiter=1500) # callback=fminCallback)
maxEigH = -FindMaxEigH(stateMaxEigH, robot, sus)
# CALCULATING MIN EIGENVALUE OF H MATRIX (z, qroll, qpitch, 4 DOF)
print "Calculating Min Eig H..."
CURRENT_FUNCTION = FindMinEigH
stateMinEigH = scipy.optimize.fmin(FindMinEigH, states[2:3]+states[4:robot.numLinks()], args=(robot,sus), maxiter=1500) # callback=fminCallback)
minEigH = FindMinEigH(stateMinEigH, robot, sus)
# CALCULATING MAX MAGNITUDE G VECTOR (z, qroll, qpitch, 4 DOF)
print "Calculating Max G magnitude..."
CURRENT_FUNCTION = FindMaxG
stateMaxG = scipy.optimize.fmin(FindMaxG, states[2:3]+states[4:robot.numLinks()], args=(robot,sus), maxiter=1500) # callback=fminCallback)
maxG = -FindMaxG(stateMaxG, robot, sus)
# CALCULATING MAX K_G OF dg_i/dx_j MATRIX (qroll, qpitch, 4 DOF + velocities)
print "Calculating Kg..."
CURRENT_FUNCTION = FindKg
stateMaxKg = scipy.optimize.fmin(FindKg, states[4:robot.numLinks()], args=(robot,sus), maxiter=4000) # callback=fminCallback) +states[14:20]
kG = -FindKg(stateMaxKg, robot, sus)
# CALCULATING MAX K_K OF dK/dx MATRIX (qpitch, qroll, | |
<filename>vim/vim.symlink/plugin/py/lib/evernote/edam/notestore/ttypes.py<gh_stars>0
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
import evernote.edam.userstore.ttypes
import evernote.edam.type.ttypes
import evernote.edam.error.ttypes
import evernote.edam.limits.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class SyncState(object):
"""
This structure encapsulates the information about the state of the
user's account for the purpose of "state based" synchronization.
<dl>
<dt>currentTime</dt>
<dd>
The server's current date and time.
</dd>
<dt>fullSyncBefore</dt>
<dd>
The cutoff date and time for client caches to be
updated via incremental synchronization. Any clients that were last
synched with the server before this date/time must do a full resync of all
objects. This cutoff point will change over time as archival data is
deleted or special circumstances on the service require resynchronization.
</dd>
<dt>updateCount</dt>
<dd>
Indicates the total number of transactions that have
been committed within the account. This reflects (for example) the
number of discrete additions or modifications that have been made to
the data in this account (tags, notes, resources, etc.).
This number is the "high water mark" for Update Sequence Numbers (USN)
within the account.
</dd>
<dt>uploaded</dt>
<dd>
The total number of bytes that have been uploaded to
this account in the current monthly period. This can be compared against
Accounting.uploadLimit (from the UserStore) to determine how close the user
is to their monthly upload limit.
This value may not be present if the SyncState has been retrieved by
a caller that only has read access to the account.
</dd>
</dl>
Attributes:
- currentTime
- fullSyncBefore
- updateCount
- uploaded
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'currentTime', None, None, ), # 1
(2, TType.I64, 'fullSyncBefore', None, None, ), # 2
(3, TType.I32, 'updateCount', None, None, ), # 3
(4, TType.I64, 'uploaded', None, None, ), # 4
)
def __init__(self, currentTime=None, fullSyncBefore=None, updateCount=None, uploaded=None,):
self.currentTime = currentTime
self.fullSyncBefore = fullSyncBefore
self.updateCount = updateCount
self.uploaded = uploaded
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.currentTime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.fullSyncBefore = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.updateCount = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.uploaded = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SyncState')
if self.currentTime != None:
oprot.writeFieldBegin('currentTime', TType.I64, 1)
oprot.writeI64(self.currentTime)
oprot.writeFieldEnd()
if self.fullSyncBefore != None:
oprot.writeFieldBegin('fullSyncBefore', TType.I64, 2)
oprot.writeI64(self.fullSyncBefore)
oprot.writeFieldEnd()
if self.updateCount != None:
oprot.writeFieldBegin('updateCount', TType.I32, 3)
oprot.writeI32(self.updateCount)
oprot.writeFieldEnd()
if self.uploaded != None:
oprot.writeFieldBegin('uploaded', TType.I64, 4)
oprot.writeI64(self.uploaded)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SyncChunk(object):
"""
This structure is given out by the NoteStore when a client asks to
receive the current state of an account. The client asks for the server's
state one chunk at a time in order to allow clients to retrieve the state
of a large account without needing to transfer the entire account in
a single message.
The server always gives SyncChunks using an ascending series of Update
Sequence Numbers (USNs).
<dl>
<dt>currentTime</dt>
<dd>
The server's current date and time.
</dd>
<dt>chunkHighUSN</dt>
<dd>
The highest USN for any of the data objects represented
in this sync chunk. If there are no objects in the chunk, this will not be
set.
</dd>
<dt>updateCount</dt>
<dd>
The total number of updates that have been performed in
the service for this account. This is equal to the highest USN within the
account at the point that this SyncChunk was generated. If updateCount
and chunkHighUSN are identical, that means that this is the last chunk
in the account ... there is no more recent information.
</dd>
<dt>notes</dt>
<dd>
If present, this is a list of non-expunged notes that
have a USN in this chunk. This will include notes that are "deleted"
but not expunged (i.e. in the trash). The notes will include their list
of tags and resources, but the resource content and recognition data
will not be supplied.
</dd>
<dt>notebooks</dt>
<dd>
If present, this is a list of non-expunged notebooks that
have a USN in this chunk. This will include notebooks that are "deleted"
but not expunged (i.e. in the trash).
</dd>
<dt>tags</dt>
<dd>
If present, this is a list of the non-expunged tags that have a
USN in this chunk.
</dd>
<dt>searches</dt>
<dd>
If present, this is a list of non-expunged searches that
have a USN in this chunk.
</dd>
<dt>resources</dt>
<dd>
If present, this is a list of the non-expunged resources
that have a USN in this chunk. This will include the metadata for each
resource, but not its binary contents or recognition data, which must be
retrieved separately.
</dd>
<dt>expungedNotes</dt>
<dd>
If present, the GUIDs of all of the notes that were
permanently expunged in this chunk.
</dd>
<dt>expungedNotebooks</dt>
<dd>
If present, the GUIDs of all of the notebooks that
were permanently expunged in this chunk. When a notebook is expunged,
this implies that all of its child notes (and their resources) were
also expunged.
</dd>
<dt>expungedTags</dt>
<dd>
If present, the GUIDs of all of the tags that were
permanently expunged in this chunk.
</dd>
<dt>expungedSearches</dt>
<dd>
If present, the GUIDs of all of the saved searches
that were permanently expunged in this chunk.
</dd>
<dt>linkedNotebooks</dt>
<dd>
If present, this is a list of non-expunged LinkedNotebooks that
have a USN in this chunk.
</dd>
<dt>expungedLinkedNotebooks</dt>
<dd>
If present, the GUIDs of all of the LinkedNotebooks
that were permanently expunged in this chunk.
</dd>
</dl>
Attributes:
- currentTime
- chunkHighUSN
- updateCount
- notes
- notebooks
- tags
- searches
- resources
- expungedNotes
- expungedNotebooks
- expungedTags
- expungedSearches
- linkedNotebooks
- expungedLinkedNotebooks
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'currentTime', None, None, ), # 1
(2, TType.I32, 'chunkHighUSN', None, None, ), # 2
(3, TType.I32, 'updateCount', None, None, ), # 3
(4, TType.LIST, 'notes', (TType.STRUCT,(evernote.edam.type.ttypes.Note, evernote.edam.type.ttypes.Note.thrift_spec)), None, ), # 4
(5, TType.LIST, 'notebooks', (TType.STRUCT,(evernote.edam.type.ttypes.Notebook, evernote.edam.type.ttypes.Notebook.thrift_spec)), None, ), # 5
(6, TType.LIST, 'tags', (TType.STRUCT,(evernote.edam.type.ttypes.Tag, evernote.edam.type.ttypes.Tag.thrift_spec)), None, ), # 6
(7, TType.LIST, 'searches', (TType.STRUCT,(evernote.edam.type.ttypes.SavedSearch, evernote.edam.type.ttypes.SavedSearch.thrift_spec)), None, ), # 7
(8, TType.LIST, 'resources', (TType.STRUCT,(evernote.edam.type.ttypes.Resource, evernote.edam.type.ttypes.Resource.thrift_spec)), None, ), # 8
(9, TType.LIST, 'expungedNotes', (TType.STRING,None), None, ), # 9
(10, TType.LIST, 'expungedNotebooks', (TType.STRING,None), None, ), # 10
(11, TType.LIST, 'expungedTags', (TType.STRING,None), None, ), # 11
(12, TType.LIST, 'expungedSearches', (TType.STRING,None), None, ), # 12
(13, TType.LIST, 'linkedNotebooks', (TType.STRUCT,(evernote.edam.type.ttypes.LinkedNotebook, evernote.edam.type.ttypes.LinkedNotebook.thrift_spec)), None, ), # 13
(14, TType.LIST, 'expungedLinkedNotebooks', (TType.STRING,None), None, ), # 14
)
def __init__(self, currentTime=None, chunkHighUSN=None, updateCount=None, notes=None, notebooks=None, tags=None, searches=None, resources=None, expungedNotes=None, expungedNotebooks=None, expungedTags=None, expungedSearches=None, linkedNotebooks=None, expungedLinkedNotebooks=None,):
self.currentTime = currentTime
self.chunkHighUSN = chunkHighUSN
self.updateCount = updateCount
self.notes = notes
self.notebooks = notebooks
self.tags = tags
self.searches = searches
self.resources = resources
self.expungedNotes = expungedNotes
self.expungedNotebooks = expungedNotebooks
self.expungedTags = expungedTags
self.expungedSearches = expungedSearches
self.linkedNotebooks = linkedNotebooks
self.expungedLinkedNotebooks = expungedLinkedNotebooks
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.currentTime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.chunkHighUSN = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
| |
<filename>tests/test_models.py<gh_stars>0
import datetime
import pytest
from pytvdb.models import SeriesData, SeriesSearchData, SeriesActorsData, BasicEpisode, SeriesEpisodes, BaseModel, \
SeriesEpisodesSummary, Episode
class TestModels:
@pytest.mark.unit
def test_jsonify(self):
data = {
"aliases": [
"Doctor Who (1963)",
"Dr Who"
],
"banner": "graphical/76107-g14.jpg",
"firstAired": "1963-11-23",
"id": 76107,
"network": "BBC One",
"overview": "Doctor Who is the longest-running science fiction TV series in history, airing initially from "
"1963 to 1989. Doctor Who is about ideas. It pioneered sophisticated mixed-level storytelling. "
"Its format was the key to its longevity: the Doctor, a mysterious traveller in space and time,"
" travels in his ship, the TARDIS. The TARDIS can take him and his companions anywhere in time"
" and space. Inevitably he finds evil at work wherever he goes...",
"seriesName": "Doctor Who",
"status": "Ended"
}
model = SeriesSearchData(**data)
json = model.json()
assert isinstance(json, str)
@pytest.mark.unit
def test_search_series_data(self):
data = {
"aliases": [
"Doctor Who (1963)",
"Dr Who"
],
"banner": "graphical/76107-g14.jpg",
"firstAired": "1963-11-23",
"id": 76107,
"network": "BBC One",
"overview": "Doctor Who is the longest-running science fiction TV series in history, airing initially from "
"1963 to 1989. Doctor Who is about ideas. It pioneered sophisticated mixed-level storytelling. "
"Its format was the key to its longevity: the Doctor, a mysterious traveller in space and time,"
" travels in his ship, the TARDIS. The TARDIS can take him and his companions anywhere in time"
" and space. Inevitably he finds evil at work wherever he goes...",
"seriesName": "<NAME>",
"status": "Ended"
}
s = SeriesSearchData(**data)
assert len(s.aliases) == 2
assert s.banner == data['banner']
assert s.first_aired == datetime.date(1963, 11, 23)
assert s.id == data['id']
assert s.network == data['network']
assert s.overview == data['overview']
assert s.series_name == data['seriesName']
assert s.status == data['status']
@pytest.mark.unit
def test_series_data(self):
data = {
"id": 76107,
"seriesName": "<NAME>",
"aliases": [
"Doctor Who (1963)",
"<NAME>"
],
"banner": "graphical/76107-g14.jpg",
"seriesId": "355",
"status": "Ended",
"firstAired": "1963-11-23",
"network": "BBC One",
"networkId": "GJ8dPeHC",
"runtime": "25",
"genre": [
"Action",
"Adventure",
"Science-Fiction"
],
"overview": "Doctor Who is the longest-running science fiction TV series in history, airing initially from"
" 1963 to 1989. Doctor Who is about ideas. It pioneered sophisticated mixed-level"
" storytelling. Its format was the key to its longevity: the Doctor, a mysterious traveller in"
" space and time, travels in his ship, the TARDIS. The TARDIS can take him and his companions"
" anywhere in time and space. Inevitably he finds evil at work wherever he goes...",
"lastUpdated": 1500845512,
"airsDayOfWeek": "Saturday",
"airsTime": "5:15 PM",
"rating": "TV-PG",
"imdbId": "tt0056751",
"zap2itId": "EP001301",
"added": "2014-05-13 07:21:40",
"siteRating": 9.4,
"siteRatingCount": 177
}
s = SeriesData(**data)
assert isinstance(s, SeriesSearchData)
assert s.added == datetime.datetime(2014, 5, 13, 7, 21, 40)
assert s.airs_day_of_week == data['airsDayOfWeek']
assert s.airs_time == data['airsTime']
assert len(s.genre) == len(data['genre'])
assert s.imdb_id == data['imdbId']
assert int(s.last_updated.timestamp()) == data['lastUpdated']
assert s.network_id == data['networkId']
assert s.rating == data['rating']
assert s.runtime == data['runtime']
assert s.series_id == int(data['seriesId'])
assert s.site_rating == data['siteRating']
assert s.site_rating_count == data['siteRatingCount']
assert s.zap2it_id == data['zap2itId']
@pytest.mark.unit
def test_series_actor_data(self):
data = {
"id": 43198,
"seriesId": 76107,
"name": "<NAME>",
"role": "Nyssa",
"sortOrder": 3,
"image": "actors/43198.jpg",
"imageAuthor": 7570,
"imageAdded": "2009-01-12 17:45:32",
"lastUpdated": "2009-01-12 17:45:32"
}
a = SeriesActorsData(**data)
assert a.id == data['id']
assert a.series_id == data['seriesId']
assert a.name == data['name']
assert a.role == data['role']
assert a.sort_order == data['sortOrder']
assert a.image == data['image']
assert a.image_author == data['imageAuthor']
assert a.image_added.strftime('%Y-%m-%d %H:%M:%S') == data['imageAdded']
assert a.last_updated.strftime('%Y-%m-%d %H:%M:%S') == data['lastUpdated']
@pytest.mark.unit
def test_basic_episode(self):
data = {
"absoluteNumber": None,
"airedEpisodeNumber": 1,
"airedSeason": 1,
"airedSeasonID": 9666,
"dvdEpisodeNumber": None,
"dvdSeason": None,
"episodeName": "An Unearthly Child (1)",
"firstAired": "1963-11-23",
"id": 183204,
"lastUpdated": 1462622449,
"overview": "London, 1963. Schoolteachers <NAME> and <NAME> are perplexed by the behaviour"
" of one of their pupils, <NAME>. Her knowledge of science and history exceeds theirs,"
" yet she seems totally ignorant of many common aspects of everyday life. They follow her to"
" her home address, a junkyard with a police telephone box standing in it, and encounter her"
" grandfather, the enigmatic Doctor. When they force their way past him into the police box,"
" Susan's secret is revealed: she and the Doctor are aliens, and the police box is a time"
" machine, the TARDIS, capable of visiting any point in the universe at any moment in time…"
}
e = BasicEpisode(**data)
assert e.absolute_number == data['absoluteNumber']
assert e.aired_episode_number == data['airedEpisodeNumber']
assert e.aired_season == data['airedSeason']
assert e.dvd_episode_number == data['dvdEpisodeNumber']
assert e.dvd_season == data['dvdSeason']
assert e.episode_name == data['episodeName']
assert e.first_aired.strftime('%Y-%m-%d') == data['firstAired']
assert e.id == data['id']
assert int(e.last_updated.timestamp()) == data['lastUpdated']
assert e.overview == data['overview']
@pytest.mark.unit
def test_series_episodes(self):
data = [1,2,3,4,5]
e = SeriesEpisodes(data)
assert len(e) == 5
for i in range(0, 5):
assert e[i] == data[i]
assert 3 in e
for i, ep in zip(data, e):
assert i == ep
for i, ep in zip(reversed(data), reversed(e)):
assert i == ep
@pytest.mark.unit
def test_series_episodes_summary(self):
data = {
"airedSeasons": [
"21",
"20",
"19",
"0",
"18",
"26",
"25",
"24",
"22",
"23",
"14",
"13",
"12",
"11",
"17",
"16",
"15",
"9",
"3",
"5",
"6",
"10",
"8",
"2",
"1",
"7",
"4"
],
"airedEpisodes": "809",
"dvdSeasons": [],
"dvdEpisodes": "0"
}
s = SeriesEpisodesSummary(**data)
assert len(s.aired_seasons) == 27
assert s.aired_episodes == 809
assert s.dvd_seasons == []
assert s.dvd_episodes == 0
@pytest.mark.unit
def test_episode(self):
data = {
"id": 183284,
"airedSeason": 13,
"airedSeasonID": 9670,
"airedEpisodeNumber": 2,
"episodeName": "Terror of the Zygons (2)",
"firstAired": "1975-09-06",
"guestStars": [
"<NAME>",
" <NAME>",
" <NAME>",
" <NAME>",
" <NAME>"
],
"director": "<NAME>",
"directors": [
"<NAME>"
],
"writers": [
"<NAME>"
],
"overview": "Scotland, the near future. Something is smashing oil rigs off the Scottish coast, and UNIT"
" have been called in to investigate. The aliens responsible, the Zygons, try to kill the"
" Doctor and Sarah Jane, then attempt to recover their signal device using their ability"
" to mimic human beings.\r\n",
"language": {
"episodeName": "en",
"overview": "en"
},
"productionCode": "4H",
"showUrl": "http://www.tv.com/episode/441716/summary.html",
"lastUpdated": 1237793481,
"dvdDiscid": "",
"dvdSeason": None,
"dvdEpisodeNumber": None,
"dvdChapter": None,
"absoluteNumber": None,
"filename": "episodes/76107/183284.jpg",
"seriesId": 76107,
"lastUpdatedBy": 593,
"airsAfterSeason": None,
"airsBeforeSeason": None,
"airsBeforeEpisode": None,
"thumbAuthor": 6222,
"thumbAdded": "",
"thumbWidth": "400",
"thumbHeight": "300",
"imdbId": "",
"siteRating": 7,
"siteRatingCount": 2
}
e = Episode(**data)
assert isinstance(e, BasicEpisode)
assert e.airs_after_season == data['airsAfterSeason']
assert e.airs_before_episode == data['airsBeforeEpisode']
assert e.airs_before_season == data['airsBeforeSeason']
assert e.director == data['director']
assert e.directors == data['directors']
assert e.dvd_chapter == data['dvdChapter']
assert e.dvd_disc_id == data['dvdDiscid']
assert e.guest_stars == data['guestStars']
assert e.last_updated_by == data['lastUpdatedBy']
assert e.production_code == data['productionCode']
assert e.series_id == data['seriesId']
assert e.show_url == data['showUrl']
assert e.thumb_added == data['thumbAdded']
assert e.thumb_author == data['thumbAuthor']
assert e.thumb_height == data['thumbHeight']
assert e.thumb_width == data['thumbWidth']
assert e.writers == data['writers']
class TestBaseModel:
@pytest.mark.unit
@pytest.mark.parametrize("input,func, expected", [
([], lambda l: l + [1], []),
([1], lambda l: l + [1], [1, 1]),
('', lambda l: l + '1', ''),
('1', lambda l: l + '1', '11'),
('0', lambda l: l + '1', '01'),
(0, lambda l: l + 1, 1),
(1, lambda l: l + 1, 2)
])
def test_apply_func_or_none(self, input, func, expected):
res = BaseModel._apply_func_or_none(func, input)
assert res == expected
class TestSeriesEpisodeQuery:
@pytest.fixture(scope='session')
def data(self):
data = [
{
"absoluteNumber": 2,
"airedEpisodeNumber": 1,
"airedSeason": 1,
"airedSeasonID": 15791,
"dvdEpisodeNumber": 2,
"dvdSeason": 1,
"episodeName": "The Train Job",
"firstAired": "2002-09-20",
"id": 297989,
"language": {
"episodeName": "en",
"overview": "en"
},
"lastUpdated": 1458047554,
},{
"absoluteNumber": 15,
"airedEpisodeNumber": 1,
"airedSeason": 0,
"airedSeasonID": 26328,
"dvdEpisodeNumber": None,
"dvdSeason": None,
"episodeName": "Serenity",
"firstAired": "2005-09-30",
"id": 415679,
"language": {
"episodeName": "en",
"overview": "en"
},
"lastUpdated": 1358950216,
},{
"absoluteNumber": 3,
"airedEpisodeNumber": 2,
"airedSeason": 1,
"airedSeasonID": 15791,
"dvdEpisodeNumber": 3,
"dvdSeason": 1,
"episodeName": "Bushwhacked",
"firstAired": "2002-09-27",
"id": 297990,
"language": {
"episodeName": "en",
"overview": "en"
},
"lastUpdated": 1458047574,
},{
"absoluteNumber": None,
"airedEpisodeNumber": 2,
"airedSeason": 0,
"airedSeasonID": 26328,
"dvdEpisodeNumber": None,
"dvdSeason": None,
"episodeName": "Here’s How It Was: The Making of “Firefly”",
"firstAired": "2003-12-09",
"id": 1000141,
"language": {
"episodeName": "en",
"overview": "en"
},
"lastUpdated": 1465738872,
},{
"absoluteNumber": 6,
"airedEpisodeNumber": 3,
"airedSeason": 1,
"airedSeasonID": 15791,
"dvdEpisodeNumber": 6,
"dvdSeason": 1,
"episodeName": "<NAME>",
"firstAired": "2002-10-04",
"id": 297991,
"language": {
"episodeName": "en",
"overview": "en"
},
"lastUpdated": 1458047585,
},{
"absoluteNumber": None,
| |
To be used in combination with either the first or last timestamp.
:param first_timestamp: Limit the results to resource actions that started later
than the value of this parameter (exclusive)
:param last_timestamp: Limit the results to resource actions that started earlier
than the value of this parameter (exclusive).
Only the first_timestamp or last_timestamp parameter should be supplied
:return: the list of matching Resource Actions in a descending order according to the 'started' timestamp.
If a limit was specified, also return the links to the next and previous pages.
The "next" page always refers to the actions that started earlier,
while the "prev" page refers to actions that started later.
:raises BadRequest: When the supplied parameters are not valid.
"""
@typedmethod(
path="/resource/<rvid>/deploy/done",
operation="POST",
agent_server=True,
arg_options={**methods.ENV_OPTS, **methods.RVID_OPTS},
client_types=[ClientType.agent],
api_version=2,
)
def resource_deploy_done(
tid: uuid.UUID,
rvid: model.ResourceVersionIdStr,
action_id: uuid.UUID,
status: ResourceState,
messages: List[model.LogLine] = [],
changes: Dict[str, model.AttributeStateChange] = {},
change: Optional[Change] = None,
) -> None:
"""
Report to the server that an agent has finished the deployment of a certain resource.
:param tid: The id of the environment the resource belongs to
:param rvid: The resource version id of the resource for which the deployment is finished.
:param action_id: A unique ID associated with this resource deployment action. This should be the same ID that was
passed to the `/resource/<resource_id>/deploy/start` API call.
:param status: The current status of the resource (if known)
:param messages: A list of log entries produced by the deployment action.
:param changes: A dict of changes to this resource. The key of this dict indicates the attributes/fields that
have been changed. The value contains the new value and/or the original value.
:param change: The type of change that was done the given resource.
"""
@typedmethod(
path="/resource/<rvid>/deploy/start",
operation="POST",
agent_server=True,
arg_options={**methods.ENV_OPTS, **methods.RVID_OPTS},
client_types=[ClientType.agent],
api_version=2,
)
def resource_deploy_start(
tid: uuid.UUID,
rvid: model.ResourceVersionIdStr,
action_id: uuid.UUID,
) -> Dict[model.ResourceVersionIdStr, ResourceState]:
"""
Report to the server that the agent will start the deployment of the given resource.
:param tid: The id of the environment the resource belongs to
:param rvid: The resource version id of the resource for which the deployment will start
:param action_id: A unique id used to track the action of this deployment
:return: A dict mapping the resource version id of each dependency of resource_id to
the last deployment status of that resource.
"""
# No pagination support is provided for this endpoint because there is no elegant way to page the output of this endpoint.
@typedmethod(
path="/resource/<rvid>/events",
operation="GET",
arg_options={**methods.ENV_OPTS, **methods.RVID_OPTS},
agent_server=True,
client_types=[ClientType.agent],
api_version=2,
)
def get_resource_events(
tid: uuid.UUID,
rvid: model.ResourceVersionIdStr,
) -> Dict[model.ResourceIdStr, List[model.ResourceAction]]:
"""
Return relevant events for a resource, i.e. all deploy actions for each of its dependencies since this resources' last
deploy or all deploy actions if this resources hasn't been deployed before. The resource actions are sorted in descending
order according to their started timestamp.
:param tid: The id of the environment this resource belongs to
:param rvid: The id of the resource to get events for.
:raises BadRequest: When this endpoint in called while the resource with the given resource version is not
in the deploying state.
"""
@typedmethod(
path="/resource/<rvid>/did_dependency_change",
operation="GET",
arg_options={**methods.ENV_OPTS, **methods.RVID_OPTS},
agent_server=True,
client_types=[ClientType.agent],
api_version=2,
)
def resource_did_dependency_change(
tid: uuid.UUID,
rvid: model.ResourceVersionIdStr,
) -> bool:
"""
Returns True iff this resources' events indicate a change in its dependencies since the resource's last deployment.
:param tid: The id of the environment this resource belongs to
:param rvid: The id of the resource.
:raises BadRequest: When this endpoint in called while the resource with the given resource version is not
in the deploying state.
"""
@typedmethod(path="/resource", operation="GET", arg_options=methods.ENV_OPTS, client_types=[ClientType.api], api_version=2)
def resource_list(
tid: uuid.UUID,
limit: Optional[int] = None,
first_id: Optional[model.ResourceVersionIdStr] = None,
last_id: Optional[model.ResourceVersionIdStr] = None,
start: Optional[str] = None,
end: Optional[str] = None,
filter: Optional[Dict[str, List[str]]] = None,
sort: str = "resource_type.desc",
) -> List[model.LatestReleasedResource]:
"""
:param tid: The id of the environment this resource belongs to
:param limit: Limit the number of instances that are returned
:param first_id: The resource_version_id to use as a continuation token for paging, in combination with the 'start' value,
because the order by column might contain non-unique values
:param last_id: The resource_version_id to use as a continuation token for paging, in combination with the 'end' value,
because the order by column might contain non-unique values
:param start: The lower limit for the order by column (exclusive).
Only one of 'start' and 'end' should be specified at the same time.
:param end: The upper limit for the order by column (exclusive).
Only one of 'start' and 'end' should be specified at the same time.
:param filter: Filter the list of returned resources.
Filters should be specified with the syntax `?filter.<filter_key>=value`, for example `?filter.status=deployed`
It's also possible to provide multiple values for the same filter, in this case resources are returned,
if they match any of these filter values.
For example: `?filter.status=deployed&filter.status=available` returns instances with either of the statuses
deployed or available.
Multiple different filters narrow the results however (they are treated as an 'AND' operator).
For example `filter.status=deployed&filter.agent=internal` returns resources
with 'deployed' status, where the 'agent' is set to 'internal_agent'.
The following options are available:
agent: filter by the agent of the resource
resource_type: filter by the type of the resource
resource_id_value: filter by the attribute values of the resource
status: filter by the current status of the resource
The values for the 'agent', 'resource_type' and 'value' filters are matched partially.
:param sort: Return the results sorted according to the parameter value.
It should follow the pattern `<attribute_to_sort_by>.<order>`, for example `resource_type.desc`
(case insensitive).
The following sorting attributes are supported: 'resource_type', 'agent', 'resource_id_value', 'status'.
The following orders are supported: 'asc', 'desc'
:return: A list of all matching released resources
:raise NotFound: This exception is raised when the referenced environment is not found
:raise BadRequest: When the parameters used for filtering, sorting or paging are not valid
"""
@typedmethod(
path="/resource/<rid>", operation="GET", arg_options=methods.ENV_OPTS, client_types=[ClientType.api], api_version=2
)
def resource_details(tid: uuid.UUID, rid: model.ResourceIdStr) -> model.ResourceDetails:
"""
:return: The details of the latest released version of a resource
:raise NotFound: This exception is raised when the referenced environment or resource is not found
"""
@typedmethod(
path="/resource/<rid>/history", operation="GET", arg_options=methods.ENV_OPTS, client_types=[ClientType.api], api_version=2
)
def resource_history(
tid: uuid.UUID,
rid: model.ResourceIdStr,
limit: Optional[int] = None,
first_id: Optional[str] = None,
last_id: Optional[str] = None,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
sort: str = "date.desc",
) -> List[model.ResourceHistory]:
"""
:param tid: The id of the environment this resource belongs to
:param rid: The id of the resource
:param limit: Limit the number of instances that are returned
:param first_id: The attribute_hash to use as a continuation token for paging, in combination with the 'start' value,
because the order by column might contain non-unique values
:param last_id: The attribute_hash to use as a continuation token for paging, in combination with the 'end' value,
because the order by column might contain non-unique values
:param start: The lower limit for the order by column (exclusive).
Only one of 'start' and 'end' should be specified at the same time.
:param end: The upper limit for the order by column (exclusive).
Only one of 'start' and 'end' should be specified at the same time.
:param sort: Return the results sorted according to the parameter value.
It should follow the pattern `<attribute_to_sort_by>.<order>`, for example `date.desc`
(case insensitive).
Sorting by `date` is supported.
The following orders are supported: 'asc', 'desc'
:return: The history of a resource, according to its attributes
:raise NotFound: This exception is raised when the referenced environment is not found
:raise BadRequest: When the parameters used for sorting or paging are not valid
"""
@typedmethod(
path="/resource/<rid>/logs", operation="GET", arg_options=methods.ENV_OPTS, client_types=[ClientType.api], api_version=2
)
def resource_logs(
tid: uuid.UUID,
rid: model.ResourceIdStr,
limit: Optional[int] = None,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
filter: Optional[Dict[str, List[str]]] = None,
sort: str = "timestamp.desc",
) -> List[model.ResourceLog]:
"""
Get the logs of a specific resource
:param tid: The id of the environment this resource belongs to
:param rid: The | |
<reponame>ahansenlab/connect_the_dots
## Particle tracking functions for 4D time series data.
# Written by <NAME> (<EMAIL>)
# (c) 2021, <NAME>
## Import required modules
import matplotlib.pyplot as plt # for plotting
import matplotlib # for plotting
import numpy as np # for manipulating arrays
import pickle # for saving python objects and other data
from scipy.optimize import curve_fit # for making fits to the PSF
from scipy.ndimage import gaussian_laplace, gaussian_filter # for dot localization (image filtering)
from skimage import measure # for segmenting images
from skimage.morphology import remove_small_objects, closing, disk, dilation # for morphological filtering of images
from skimage.segmentation import clear_border # for filtering images
from skimage.filters import threshold_otsu
import pandas as pd # for creating and manipulating tabulated data
from collections import Iterable
import itertools
from itertools import product, groupby
import copy
import scipy
import trackpy # library containing tracking algorithms
import re # regex
import warnings
from pathlib import Path
import connect_the_dots
from connect_the_dots.filtering import get_image_objects, create_mask, create_mask_symmetric
from functools import reduce
# settings for making nice pdfs
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.sans-serif'] = "DejaVu Sans"
plt.rcParams['font.family'] = "sans-serif"
trackpy.quiet()
def get_localizations_iterative(filtered_zstack_timeseries,
zstack_timeseries,
frames,
channel,
percentile_threshold=99.95,
max_dot_size = 10000,
min_dot_size = 20,
search_range=(5,15,15),
min_track_length = 15,
max_iterations=5,
min_dot_size_increment=5,
percentile_threshold_increment=0.01,
current_iteration=0,
verbose=True):
"""
Use trackpy to create trajectories from dot localizations.
Parameters
----------
filtered_zstack_timeseries : numpy.ndarray
Post-filtering 4-D zstack of the image series data used to make a binary mask.
zstack_timeseries : numpy.ndarray
Raw 4-D zstack of the image series data.
frames : range or list of int
Frames to process.
channel : int
percentile_threshold : float
Percentile from 0 to 100 by which to threshold the `filtered_zstack_timeseries`.
max_dot_size : int
Maximum size of the dots in numbers of pixels
min_dot_size : int
Minimum size of the dots in numbers of pixels
search_range : 3-tuple of ints
Maximum frame-to-frame displacement (Z,Y,X) of dots
memory : int
Number of frames a dot may "disappear" and still be called the same trajectory.
min_track_length : int
Minimum length of trajectories kept
max_iterations : int
Maximum number of iterations of parameter updates for trajectory creation
min_dot_size_increment : int
Increment added to `min_dot_size` if trajectory creation fails.
percentile_threshold_increment : float
Increment added to `percentile_threshold` if trajectory creation fails by `dot_size_increment`.
current_iteration : int
Keeps track of the number of iterations.
Returns
-------
loc_df_dict : dict
Dictionary of particle localizations.
linked_df_dict : dict
Dictionary of particle trajectories (i.e. localizations, linked across time)
"""
if verbose == True:
print(f"Starting iteration: {current_iteration}")
ch = channel
loc_df = []
for frame in frames:
if verbose==True:
print(f"Getting objects channel: {ch}, frame: {frame}")
df = get_image_objects(filtered_zstack_timeseries[:,:,:,frame],
zstack_timeseries[:,:,:,frame],
percentile_threshold=percentile_threshold,
min_size_pixels=min_dot_size,
max_size_pixels=max_dot_size)
df['frame'] = [frame]*len(df)
df['channel'] = [channel]*len(df)
loc_df.append(df)
loc_df = pd.concat(loc_df,ignore_index=True)
if verbose==True:
print(f"Linking trajectories, channel: {ch}")
linked_df = None
count = 0
while (linked_df is None) and (count <= max_iterations):
#while (linked_df is None):
try:
linked_df = create_trajectories(loc_df,
max_dot_size = max_dot_size,
min_dot_size = min_dot_size+min_dot_size_increment*count,
search_range=search_range,
min_track_length = min_track_length)
except:
# increase the threshold on the minimum dot size
count += 1
if verbose==True:
print(f"Trajectory linking failed. Incrementing the minimum dot size to {min_dot_size+min_dot_size_increment*count}.")
linked_df = None
# if increasing the minimum dot size threshold does not help
if linked_df is None:
if verbose==True:
print(f"Trajectory linking failed. Re-processing image series with higher percentile threshold.")
# increase the percentile threshold
percentile_threshold += percentile_threshold_increment
loc_df, linked_df = get_localizations_iterative(filtered_zstack_timeseries,
zstack_timeseries,
frames,
channel,
percentile_threshold=percentile_threshold,
max_dot_size = max_dot_size,
min_dot_size = min_dot_size,
search_range=search_range,
min_track_length = min_track_length,
max_iterations=max_iterations,
current_iteration=current_iteration+1,
verbose=verbose)
warnings.warn("One iteration failed")
return loc_df, linked_df
def create_trajectories(loc_df,
max_dot_size = np.inf,
min_dot_size = 0,
search_range=(5,15,15),
memory=5,
min_track_length = 15):
"""
Use trackpy to create trajectories from dot localizations.
Parameters
----------
loc_df : Pandas DataFrame
DataFrame of object localizations in 3D and time.
max_dot_size : int
Maximum size of the dots in numbers of pixels
min_dot_size : int
Minimum size of the dots in numbers of pixels
search_range : 3-tuple of ints
Maximum frame-to-frame displacement (Z,Y,X) of dots
memory : int
Number of frames a dot may "disappear" and still be called the same trajectory.
min_track_length : int
Minimum length of trajectories kept
Returns
-------
dict : dictionary of particle trajectories
"""
# filter the trajectories based on dot size
tmp_df = loc_df.copy()
tmp_df = tmp_df[(tmp_df.dot_size_in_pixels<max_dot_size) & ((tmp_df.dot_size_in_pixels>min_dot_size))]
# create trajectories
tmp_df = trackpy.link_df(tmp_df, \
search_range=tuple(search_range), \
memory=memory)
# filter trajectories based on minimum length
tmp_df = tmp_df.groupby('particle').filter(lambda x: len(x) > min_track_length)
return tmp_df
def link_trajectories_across_channels(linked_df,
min_overlap_length=10,
corrcoeff_min =0.3,
distance_max = 35):
"""
Links trajectories together across channels
Parameters
----------
linked_df : pandas.DataFrame
DataFrame of particle trajectories (connected separately for each channel) in 3D and time.
min_overlap_length : int
Minimum number of frames of overlap between channels necessary to connect two trajectories
corrcoeff_min : float
Minimum correlation coefficient between dot pairs (for each dimension separately) in order
for trajectories to be linked together.
distance_max : int
Maximum distance in pixels between dots in connected trajectories
Returns
-------
joined_df : pandas.DataFrame
DataFrame of particle trajectories (connected separately for each channel) in 3D and time.
This DataFrame may contain some trajectories that do not meet the likely pair criteria. To
obtain a clean list, use `linked_trajectory_ids`.
linked_trajectory_ids : pandas.DataFrame
List of trajectory IDs in `joined_df` meeting the criteria for likely pairs.
"""
fake_linked_df = linked_df.copy()
# get 4D tracks for Channel 0
particles0 = list(set(fake_linked_df[fake_linked_df.channel==0].particle))
tracks_df0 = {}
for p in particles0:
X = fake_linked_df[fake_linked_df.particle==p][['frame','x','y','z']].values
tracks_df0[p] = X
# get 4D tracks for Channel 1
particles1 = list(set(fake_linked_df[fake_linked_df.channel==1].particle))
tracks_df1 = {}
for p in particles1:
X = fake_linked_df[fake_linked_df.particle==p][['frame','x','y','z']].values
tracks_df1[p] = X
min_id_value = np.min(particles1)
## Find likely pairs of trajectories
likely_pairs = []
for p0i in tracks_df0:
p0 = tracks_df0[p0i]
for p1i in tracks_df1:
p1 = tracks_df1[p1i]
times, comm0, comm1 = np.intersect1d(p0[:,0],p1[:,0],return_indices=True)
if len(times) > min_overlap_length:
corrcoeffX = np.corrcoef(p0[comm0,1],p1[comm1,1])[0,1]
corrcoeffY = np.corrcoef(p0[comm0,2],p1[comm1,2])[0,1]
corrcoeffZ = np.corrcoef(p0[comm0,3],p1[comm1,3])[0,1]
# residualsX = np.mean(np.abs(p0[comm0,1]-p1[comm1,1]))
# residualsY = np.mean(np.abs(p0[comm0,2]-p1[comm1,2]))
# residualsZ = np.mean(np.abs(p0[comm0,3]-p1[comm1,3]))
residualsX = (np.abs(p0[comm0,1]-p1[comm1,1]))
residualsY = (np.abs(p0[comm0,2]-p1[comm1,2]))
residualsZ = (np.abs(p0[comm0,3]-p1[comm1,3]))
distance = np.sqrt(residualsX**2+residualsY**2+residualsZ**2)
# link trajectories if they satisfy a minimum correlation coefficient
# and are within an expected maximum distance (in nm) of residuals_max
corrcoeff_good = all([corrcoeff_min < c for c in [corrcoeffX,corrcoeffY,corrcoeffZ]])
residuals_good = all(distance < distance_max)
if corrcoeff_good and residuals_good:
likely_pairs.append((int(p0i),int(p1i)))
################## REFINE THE PAIRING###################
# cluster trajectories together
# find conflicts or overlaps
clusters = find_clusters(likely_pairs)
likelier_pairs = []
# generate all possible orderings within the cluster
# for each ordering:
# check for overlaps within a channel
# -- if overlap exists, discard this; continue
# check for continuous overlap across channels for all segments
# -- if overlap is non-contiguous discard; continue
# if overlaps are contiguous in time and non-overlapping within a channel
# -- score the total length of contig, score average correlation, score average distance; store
# for each stored/scored ordering
for ci, c in enumerate(clusters):
if len(c) < 2:
continue
elif len(c)==2:
likelier_pairs.append(tuple(sorted(c)))
else:
# generate all possible orderings within the cluster
orderings = partition_select(c,min_sz=1)
# for each ordering
good_ordering_metrics = {}
for si, seq in enumerate(orderings):
# assert the ordering is at least of size 2
if len(seq) <= 1:
#bad_orderings.append(si)
continue
# split the channels and get time points for each group
group0 = [tracks_df0[s][:,0] for s in seq if s < min_id_value]
group1 = [tracks_df1[s][:,0] for s in seq if s >= min_id_value]
# allow for an overlap equal to the number of groups collected -1
max_overlap0 = 0#len(group0)-1
max_overlap1 = 0#len(group1)-1
# CHECK THE FRAME RANGES FOR OVERLAPS
if len(group0) == 0:
continue
elif len(group0)> 1:
# check that the start and end times are non-overlapping
starts = [l[0] for l in group0]
ends = [l[len(group0)-1] for l in group0]
time_range_overlap | |
# Copyright (c) <NAME>. All Rights Reserved.
r"""
STB dataset
A Hand joint Tracking Benchmark from Stereo Matching, ICIP 2017
"""
import math
import os
import pickle
import PIL
import cv2
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import torch.utils.data
from PIL import Image
from termcolor import colored
from tqdm import tqdm
import config as cfg
import utils.handutils as handutils
CACHE_HOME = os.path.expanduser(cfg.DEFAULT_CACHE_DIR)
# some globals, ugly but work
sk_fx_color = 607.92271
sk_fy_color = 607.88192
sk_tx_color = 314.78337
sk_ty_color = 236.42484
bb_fx = 822.79041
bb_fy = 822.79041
bb_tx = 318.47345
bb_ty = 250.31296
sk_rot_vec = [0.00531, -0.01196, 0.00301]
sk_trans_vec = [-24.0381, -0.4563, -1.2326] # mm
snap_joint_name2id = {w: i for i, w in enumerate(cfg.snap_joint_names)}
stb_joint_name2id = {w: i for i, w in enumerate(cfg.stb_joints)}
stb_to_snap_id = [snap_joint_name2id[joint_name] for joint_name in cfg.stb_joints]
def sk_rot_mx(rot_vec):
"""
use Rodrigues' rotation formula to transform the rotation vector into rotation matrix
:param rot_vec:
:return:
"""
theta = np.linalg.norm(rot_vec)
vector = np.array(rot_vec) * math.sin(theta / 2.0) / theta
a = math.cos(theta / 2.0)
b = -vector[0]
c = -vector[1]
d = -vector[2]
return np.array(
[
[
a * a + b * b - c * c - d * d,
2 * (b * c + a * d),
2 * (b * d - a * c)
],
[
2 * (b * c - a * d),
a * a + c * c - b * b - d * d,
2 * (c * d + a * b)
],
[
2 * (b * d + a * c),
2 * (c * d - a * b),
a * a + d * d - b * b - c * c
]
]
)
def sk_xyz_depth2color(depth_xyz, trans_vec, rot_mx):
"""
in the STB dataset: 'rotation and translation vector can transform the coordinates
relative to color camera to those relative to depth camera'.
however here we want depth_xyz -> color_xyz
a inverse transformation happen:
T = [rot_mx | trans_vec | 0 1], Tinv = T.inv, then output Tinv * depth_xyz
:param depth_xyz: N x 21 x 3, trans_vec: 3, rot_mx: 3 x 3
:return: color_xyz: N x 21 x 3
"""
color_xyz = depth_xyz - np.tile(trans_vec, [depth_xyz.shape[0], depth_xyz.shape[1], 1])
return color_xyz.dot(rot_mx)
def stb_palm2wrist(joint_xyz):
root = snap_joint_name2id['loc_bn_palm_L'] # 0
index = snap_joint_name2id['loc_bn_index_L_01'] # 5
mid = snap_joint_name2id['loc_bn_mid_L_01'] # 9
ring = snap_joint_name2id['loc_bn_ring_L_01'] # 13
pinky = snap_joint_name2id['loc_bn_pinky_L_01'] # 17
def _new_root(joint_xyz, id, root_id):
return joint_xyz[:, id, :] + \
2.25 * (joint_xyz[:, root_id, :] - joint_xyz[:, id, :]) # N x K x 3
joint_xyz[:, root, :] = \
_new_root(joint_xyz, index, root) + \
_new_root(joint_xyz, mid, root) + \
_new_root(joint_xyz, ring, root) + \
_new_root(joint_xyz, pinky, root)
joint_xyz[:, root, :] = joint_xyz[:, root, :] / 4.0
return joint_xyz
def _stb_palm2wrist(joint_xyz):
root_id = snap_joint_name2id['loc_bn_palm_L'] # 0
mid_root_id = snap_joint_name2id['loc_bn_mid_L_01'] # 9
joint_xyz[:, root_id, :] = \
joint_xyz[:, mid_root_id, :] + \
2.2 * (joint_xyz[:, root_id, :] - joint_xyz[:, mid_root_id, :]) # N x K x 3
return joint_xyz
def ge_palm2wrist(pose_xyz):
root_id = snap_joint_name2id['loc_bn_palm_L']
ring_root_id = snap_joint_name2id['loc_bn_ring_L_01']
pose_xyz[:, root_id, :] = pose_xyz[:, ring_root_id, :] + \
2.0 * (pose_xyz[:, root_id, :] - pose_xyz[:, ring_root_id, :]) # N x K x 3
return pose_xyz
class STBDataset(torch.utils.data.Dataset):
def __init__(
self,
data_root,
data_split='train',
hand_side='right',
njoints=21,
use_cache=True,
visual=False
):
if not os.path.exists(data_root):
raise ValueError("data_root: %s not exist" % data_root)
self.name = 'stb'
self.data_split = data_split
self.hand_side = hand_side
self.img_paths = []
self.dep_paths = []
self.joints = []
self.kp2ds = []
self.centers = []
self.my_scales = []
self.njoints = njoints # total 21 hand parts
self.visual = visual
self.root_id = snap_joint_name2id['loc_bn_palm_L']
self.mid_mcp_id = snap_joint_name2id['loc_bn_mid_L_01']
ann_base = os.path.join(data_root, "labels")
img_base = os.path.join(data_root, "images")
sk_rot = sk_rot_mx(sk_rot_vec)
self.sk_intr = np.array([
[sk_fx_color, 0.0, sk_tx_color],
[0.0, sk_fy_color, sk_ty_color],
[0.0, 0.0, 1.0],
], dtype=np.float32) # (3,3)
self.sequence = []
if data_split == 'train':
self.sequence = [
"B2Counting",
"B2Random",
"B3Counting",
"B3Random",
"B4Counting",
"B4Random",
"B5Counting",
"B5Random",
"B6Counting",
"B6Random"
]
elif data_split == 'test':
self.sequence = [
"B1Counting",
"B1Random"
]
elif data_split == 'val':
self.sequence = [
"B2Counting",
"B2Random"
]
elif data_split == "train_val":
self.sequence = [
"B3Counting",
"B3Random",
"B4Counting",
"B4Random",
"B5Counting",
"B5Random",
"B6Counting",
"B6Random"
]
elif data_split == "all":
self.sequence = [
"B1Counting",
"B1Random",
"B2Counting",
"B2Random",
"B3Counting",
"B3Random",
"B4Counting",
"B4Random",
"B5Counting",
"B5Random",
"B6Counting",
"B6Random"
]
else:
raise ValueError("split {} not in [train|test|val|train_val|all]")
self.cache_folder = os.path.join(CACHE_HOME, "my-{}".format(data_split), "stb")
os.makedirs(self.cache_folder, exist_ok=True)
cache_path = os.path.join(
self.cache_folder, "{}.pkl".format(self.data_split)
)
if os.path.exists(cache_path) and use_cache:
with open(cache_path, "rb") as fid:
annotations = pickle.load(fid)
self.img_paths = annotations["img_paths"]
self.dep_paths = annotations["dep_paths"]
self.joints = annotations["joints"]
self.kp2ds = annotations["kp2ds"]
self.centers = annotations["centers"]
self.my_scales = annotations["my_scales"]
print("stb {} gt loaded from {}".format(self.data_split, cache_path))
return
self.imgpath_list = [
os.path.join(img_base, seq) for seq in self.sequence
]
imgsk_prefix = "SK_color"
depsk_prefix = "SK_depth_seg"
annsk_list = [
os.path.join(
ann_base,
"{}_{}.mat".format(seq, imgsk_prefix[:2])
) for seq in self.sequence
]
self.ann_list = annsk_list
for imgpath, ann in zip(self.imgpath_list, self.ann_list):
''' we only use SK image '''
assert "SK" in ann
''' 1. load joint '''
rawmat = sio.loadmat(ann)
rawjoint = rawmat["handPara"].transpose((2, 1, 0)) # N x K x 3
num = rawjoint.shape[0] # N
rawjoint = sk_xyz_depth2color(rawjoint, sk_trans_vec, sk_rot)
# reorder idx
joint = rawjoint[:, stb_to_snap_id, :]
# scale to meter
joint = joint / 1000.0
# root from palm to wrist
# joint = _stb_palm2wrist(joint) # N x K x 3 # yang lixin
joint = ge_palm2wrist(joint) # N x K x 3 #liu hao ge // vae//
self.joints.append(joint)
''' 4. load images pth '''
for idx in range(joint.shape[0]):
self.img_paths.append(os.path.join(
imgpath, "{}_{}.png".format(imgsk_prefix, idx)
))
self.dep_paths.append(os.path.join(
imgpath, "{}_{}.png".format(depsk_prefix, idx)
))
self.joints = np.concatenate(self.joints, axis=0).astype(np.float32) ##(30000, 21, 3)
for i in tqdm(range(len(self.img_paths))):
joint = self.joints[i]
kp2d_homo = self.sk_intr.dot(joint.T).T
kp2d = kp2d_homo / kp2d_homo[:, 2:3]
kp2d = kp2d[:, :2]
center = handutils.get_annot_center(kp2d)
# caculate my_scale
dep = Image.open(self.dep_paths[i]).convert("RGB")
rel_dep = self.real_dep_img(dep)
mask_rel_dep = np.argwhere(rel_dep > 1e-6)
# my_scale = handutils.get_ori_crop_scale(mask_rel_dep, side=0, kp2d=kp2d) # ori
my_scale = handutils.get_ori_crop_scale(mask_rel_dep, side=0, kp2d=kp2d,
mask_flag=False) # get bbx only from kp2d ,比起ori差距不大,略好一点点一点点
my_scale = (np.atleast_1d(my_scale))[np.newaxis, :]
self.my_scales.append(my_scale)
self.kp2ds.append(kp2d[np.newaxis, :, :])
self.centers.append(center[np.newaxis, :])
# self.scales.append((np.atleast_1d(scale))[np.newaxis, :])
self.kp2ds = np.concatenate(self.kp2ds, axis=0).astype(np.float32) # (N, 21, 2)
self.centers = np.concatenate(self.centers, axis=0).astype(np.float32) # (N, 2)
# self.scales = np.concatenate(self.scales, axis=0).astype(np.float32) # (N, 1)
self.my_scales = np.concatenate(self.my_scales, axis=0).astype(np.float32) # (N, 1)
if use_cache:
full_info = {
"img_paths": self.img_paths,
"dep_paths": self.dep_paths,
"joints": self.joints,
"kp2ds": self.kp2ds,
"centers": self.centers,
# "scales": self.scales,
"my_scales": self.my_scales,
}
with open(cache_path, "wb") as fid:
pickle.dump(full_info, fid)
print("Wrote cache for dataset stb {} to {}".format(
self.data_split, cache_path
))
return
def __len__(self):
"""for STB dataset total (1,500 * 2) * 2 * 6 = 36,000 samples
:return - if is train: 30,000 samples
:return - if is eval: 6,000 samples
"""
return len(self.img_paths)
def __str__(self):
info = "STB {} set. lenth {}".format(
self.data_split, len(self.img_paths)
)
return colored(info, 'blue', attrs=['bold'])
def _is_valid(self, clr, index):
valid_data = isinstance(clr, (np.ndarray, PIL.Image.Image))
if not valid_data:
raise Exception("Encountered error processing stb[{}]".format(index))
return valid_data
def get_sample(self, index): # replace __getitem__
flip = True if self.hand_side != 'left' else False
intr = self.sk_intr
# prepare color image
clr = Image.open(self.img_paths[index]).convert("RGB")
self._is_valid(clr, index)
# prepare joint
joint = self.joints[index].copy() # (21, 3)
# prepare kp2d
kp2d = self.kp2ds[index].copy()
center = self.centers[index].copy()
# scale = self.scales[index].copy()
my_scale = self.my_scales[index].copy()
if self.dep_paths[index]:
dep = Image.open(self.dep_paths[index]).convert("RGB")
### dep values now are stored as |mod|div|0| (RGB)
self._is_valid(dep, index)
valid_dep = True
else:
dep = None
valid_dep = False
if flip:
clr = clr.transpose(Image.FLIP_LEFT_RIGHT)
center[0] = clr.size[0] - center[0]
kp2d[:, 0] = clr.size[0] - kp2d[:, 0]
joint[:, 0] = -joint[:, 0]
if valid_dep:
dep = dep.transpose(Image.FLIP_LEFT_RIGHT)
# visualization
if self.visual:
clr_ = np.array(clr)
dep_ = np.array(dep)
fig = plt.figure(figsize=(20, 20))
plt.subplot(2, 4, 1)
clr1 = clr_.copy()
rel_dep = self.real_dep_img(dep_)
mask_rel_dep = np.argwhere(rel_dep > 1e-6)
rmin, cmin = mask_rel_dep.min(0)
rmax, cmax = mask_rel_dep.max(0)
cv2.rectangle(clr1, (cmin, rmin), (cmax, rmax), (255), thickness=3)
plt.imshow(clr1)
plt.title('Color+BounduingBox')
plt.subplot(2, 4, 2)
dep1 = dep_.copy()
plt.imshow(dep1)
plt.title('Depth')
clr_dep = clr_ + dep_
plt.subplot(2, 4, 3)
plt.imshow(clr_dep)
plt.title('Color+Depth')
rel_dep_ = rel_dep.copy()
plt.subplot(2, 4, 4)
plt.imshow(rel_dep_)
plt.title('real_Depth')
plt.subplot(2, 4, 5)
plt.imshow(clr.copy())
plt.title('Color')
plt.subplot(2, 4, 6)
plt.imshow(clr.copy())
plt.plot(kp2d[:, :1], kp2d[:, 1:], 'ro')
plt.title('Color+2D annotations')
| |
s, g = check("Hún skrifar fyrir Dv og Rúv.")
assert "DV" in s
assert "RÚV" in s
assert g[4].error_code == "Z006" # DV
assert g[6].error_code == "Z006" # RÚV
s, g = check("Guðrún lék hlutverk Ms. Abercrombie í þáttunum")
assert "Ms." in s
assert g[4].error_code != "Z006"
s, g = check("Sigurður lék hlutverk Mr. Smith í leikritinu")
assert "Mr." in s
assert g[4].error_code != "Z006"
s, g = check("Hr. Hnetusmjör hélt tónleika í Kópavogi í kvöld")
assert "Hr." in s
assert g[1].error_code != "Z006"
s, g = check("Hann er Suðurkóreskur og er suður-kóreumaður frá suður-kóreu.")
assert "suðurkóreskur" in s
assert "Suður-Kóreumaður" in s
assert "Suður-Kóreu" in s
s, g = check("Hann er Norðurkóreskur og er norður-kóreumaður frá norður-kóreu.")
assert "norðurkóreskur" in s
assert "Norður-Kóreumaður" in s
assert "Norður-Kóreu" in s
s, g = check("Hann er Nýsjálenskur og er nýsjálendingur frá nýja-sjálandi.")
assert "nýsjálenskur" in s
assert "Nýsjálendingur" in s
assert "Nýja-Sjálandi" in s
s, g = check("Hann er Suðurafrískur og er suður-afríkumaður frá suður-afríku.")
assert "suðurafrískur" in s
assert "Suður-Afríkumaður" in s
assert "Suður-Afríku" in s
s, g = check("Þau heimsóttu norðurland og hittu norðlendinga í Meistaradeild.")
assert "Norðurland" in s
assert "Norðlendinga" in s
# assert "meistaradeild" in s
s, g = check("Haldið er upp á Páskadag, Verslunarmannahelgina, Jólin og Bóndadag.")
assert "páskadag" in s
assert "verslunarmannahelgina" in s
assert "jólin" in s
assert "bóndadag" in s
assert g[5].error_code == "Z001" # páskadag
assert g[7].error_code == "Z001" # verslunarmannahelgina
assert g[9].error_code == "Z001" # jólin
assert g[11].error_code == "Z001" # bóndadag
s, g = check(
"Talað var við Dómsmálaráðherra, Ríkissaksóknara, Biskupinn og Doktorinn "
"á Mánudögum og Þriðjudögum."
)
assert "dómsmálaráðherra" in s
# assert "ríkissaksóknara" in s
# assert "biskupinn" in s
assert "doktorinn" in s
assert "mánudögum" in s
assert "þriðjudögum" in s
assert g[4].error_code == "Z001" # dómsmálaráðherra
# assert g[6].error_code == "Z001" #ríkissaksóknara
# assert g[8].error_code == "Z001" # biskupinn
assert g[10].error_code == "Z001" # doktorinn
assert g[12].error_code == "Z001" # mánudögum
assert g[14].error_code == "Z001" # þriðjudögum
s, g = check(
"Þau læra Íslensku og Landafræði með Allsherjarþinginu og Öryggisráðinu en líka um Indóevrópsk mál og Óðinshana."
)
assert "íslensku" in s
assert "landafræði" in s
assert "allsherjarþinginu" in s
assert "öryggisráðinu" in s
assert "indóevrópsk" in s
assert g[3].error_code == "Z001" # íslensku
assert g[5].error_code == "Z001" # landafræði
assert g[7].error_code == "Z001" # allsherjarþinginu
assert g[9].error_code == "Z001" # öryggisráðinu
assert g[13].error_code == "Z001" # indóevrópsk
assert g[16].error_code == "Z001" # óðinshana
s, g = check(
"Í Vín má kaupa Vínartertu og Rínarvín en Póstmódernismi og Maríutásur eru vinsælar."
)
assert "Vín" in s
assert "vínartertu" in s
assert "rínarvín" in s
assert "póstmódernismi" in s
assert "maríutásur" in s
assert g[5].error_code == "Z001" # vínartertu
assert g[7].error_code == "Z001" # rínarvín
assert g[9].error_code == "Z001" # póstmódernismi
assert g[11].error_code == "Z001" # maríutásur
def test_inflectional_errors(verbose=False):
# beygingarvillur
# sama og test_error_forms í test_tokenizer.py
s, g = check("Tréið gekk til rekstar rúmmsins.")
assert "Tréð" in s
assert "rekstrar" in s
assert "rúmsins" in s
assert g[1].error_code == "S002" # tréð
assert g[4].error_code == "S002" or g[4].error_code == "ASLSTAFVANTAR" # rekstrar
assert g[5].error_code == "S002" # rúmsins
s, g = check("Þér finndist víðfermt í árverkni.")
assert "fyndist" in s
assert "víðfeðmt" in s # TODO greinir sem so. víð-ferma!
assert "árvekni" in s
assert g[2].error_code in frozenset(["S001", "I4Y"]) # fyndist
assert g[3].error_code == "S004" # víðfeðmt
assert g[5].error_code in frozenset(["S001", "EKKIORD"]) # árvekni
s, g = check("Ein kúin kom aldrei til baka vegna eldingunnar.")
assert "kýrin" in s
assert "eldingarinnar" in s
assert g[2].error_code == "BEYGVILLA" # kýrin
assert g[7].error_code == "S002" # eldingarinnar
s, g = check("Lítum til áttunda áratugsins til upplýsingu.")
assert "áratugarins" in s
assert "upplýsingar" in s
assert g[4].error_code == "S001" # áratugarins
# assert g[6].error_code == "P_NT_FsMeðFallstjórn" # TODO Picked up in sentence-level annotation, not available in the tokens
s, g = check("Nánar tiltekið árins 1978, fjóru og hálfu ári fyrir byltinguna.")
assert "ársins" in s
assert "fjórum" in s
assert g[2].error_code == "S001" # ársins
assert g[5].error_code in frozenset(["S001", "ASLSTAFVANTAR"]) # fjórum
s, g = check(
"Frumkvöðullinn aldist upp í litlu sjávarþorpi án föðurs og ýmsra þæginda."
)
assert "ólst upp" in s
assert "föður" in s
assert "ýmissa" in s
assert g[2].error_code in frozenset(["S002", "S001"]) # ólst
assert g[8].error_code == "BEYGVILLA" # föður
assert g[10].error_code == "S002" # ýmissa
s, g = check(
"Friðsælari leið hefði verið að hundruðir leituðu í geiminum að kílómeter "
"af féinu vegna ástandins."
)
assert "Friðsælli" in s
assert "hundruð" in s
assert "geimnum" in s
# assert "kílómetra" in s # TODO Parsing chooses 'kílómetri'
# assert "fénu" in s # TODO cut out of sentence for now
assert "ástandsins" in s
assert g[1].error_code == "S002" # friðsælli
assert g[6].error_code in frozenset(["S001", "BEYGVILLA"]) # hundruð
assert g[9].error_code in frozenset(["S001", "I40-ÞGF"]) # geimnum
assert g[11].error_code == "S001" # kílómetra
assert g[13].error_code == "S002" # fénu
assert g[15].error_code == "S002" # ástandsins
s, g = check("Loks gekk hann til Selfosss tuttugusta dag samningins.")
assert "Selfoss" in s
assert "tuttugasta" in s
assert "samningsins" in s
assert g[5].error_code == "S002" # Selfoss
assert g[6].error_code in frozenset(["S001", "BEYGVILLA"]) # tuttugasta
assert g[8].error_code in frozenset(["S001", "S-EFGR"]) # samningsins
def test_wrong_first_parts(verbose=False):
# C004: Rangur fyrri hluti í samsetningu (heyrna-laus, náms-skrá)
s, g = check("Kvenngormar eru feyknaskemmtilegir en ekki fyrnauppteknir.")
assert "Kvengormar" in s
assert "Kvenngormar" not in s
# assert "feiknaskemmtilegir" in s # TODO virkar ekki eins og er
# assert "feyknaskemmtilegir" not in s
assert "firnauppteknir" in s
assert "fyrnauppteknir" not in s
# assert g[1].error_code == "C004" # Fæ C002; eftir að útfæra villukóða
# assert g[3].error_code == "C004" # Virkar ekki
# assert g[6].error_code == "C004" # TODO fæ C002
s, g = check("Ég fékk heyrnatól hjá eyrnarlækninum.")
assert "heyrnartól" in s
assert "heyrnatól" not in s
# assert "eyrnalækninum" in s # TODO Ætti að virka þegar geri ný orðanet
# assert "eyrnarlækninum" not in s
assert g[3].error_code == "C006"
# assert g[5].error_code == "C006" # TODO virkar ekki
s, g = check("Lundúnarloftið er næringaríkt í ár.")
# assert "Lundúnaloftið" in s
# assert "Lundúnarloftið" not in s
assert "næringarríkt" in s
assert "næringaríkt" not in s
# assert g[1].error_code == "C006"
assert g[3].error_code == "C006"
s, g = check("Öldungardeildarþingmaðurinn keyrði díselbíl á hringveginum.")
# assert "Öldungadeildarþingmaðurinn" in s # TODO Ætti að virka þegar ný orðanet
# assert "Öldungardeildarþingmaðurinn" not in s
# assert "dísilbíl" in s # TODO Ætti að virka þegar ný orðanet
# assert "díselbíl" not in s
# assert g[1].error_code == "C006"
# assert g[3].error_code == "C006"
def test_single_first_parts(verbose=False):
# C003: Stakir fyrri hlutar í setningu sem alveg viss um (all kaldur, )
# C005: Mögulega stakir fyrri hlutar en ekki viss, uppástunga um sameiningu
s, g = check("Hann var all kaldur þegar hann fannst enda var hann hálf ber.")
# assert "allkaldur" in s # Sameina ekki, því 'kaldur' gæti verið no.
# assert "all kaldur" not in s
assert "hálfber" in s
assert "hálf ber" not in s
assert g[3].error_code in frozenset(["C005/w", "S001"]) # all
assert g[11].error_code == "C003" # hálfber
s, g = check("Hún setti honum afar kosti í for vinnunni.")
assert "afarkosti" in s
assert "afar kosti" not in s
assert "forvinnunni" in s
assert "for vinnunni" not in s
assert g[4].error_code == "C003"
assert g[6].error_code == "C003"
s, g = check("Hér er afbragðs matur fyrir allsherjar gesti í langtíma gistingu.")
assert "afbragðsmatur" in s
assert "allsherjargesti" in s
assert "langtímagistingu" in s
assert g[3].error_code == "C003"
assert g[5].error_code == "C003"
assert g[7].error_code == "C003"
def test_single_last_parts(verbose=False):
# M003: Stakir seinni hlutar í setningu (græn keri, arf beri, barn dómur)
s, g = check(
"Hann gekk í barn dóm þegar hann komst að því að | |
sequence of tuples specifying the uniform volume fractions to be assigned. This
argument is valid only when *useFields*=FALSE. Each tuple contains two entries:A Region
object.A tuple of Floats specifying the uniform volume fraction values. The length of
the tuple must match the number of material instance names specified in the Eulerain
section assigned to part instances specified by *instanceList*.
fieldList
A sequence of tuples specifying the discrete volume fractions to be assigned. This
argument is valid only when *useFields*=TRUE. Each tuple contains two entries:A Region
object.A tuple of Strings specifying Discrete Field names. The length of the tuple must
match the number of material instance names specified in the Eulerain section assigned
to part instances specified by *instanceList*.
colorList
A sequence of three Ints specifying colors used to display the material instance
assignments. This is a sequence of R,G,B colors, where the values are represented by
integers between 0 and 255. The default value is an empty sequence.
Returns
-------
A MaterialAssignment object.
"""
self.predefinedFields[name] = predefinedField = MaterialAssignment(name, instanceList, useFields,
assignmentList, fieldList, colorList)
return predefinedField
def Stress(self, name: str, region: Region, distributionType: SymbolicConstant = UNIFORM,
sigma11: float = None, sigma22: float = None, sigma33: float = None,
sigma12: float = None, sigma13: float = None, sigma23: float = None) -> Stress:
"""This method creates a Stress predefined field object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].Stress
Parameters
----------
name
A String specifying the repository key.
region
A Region object specifying the region to which the predefined field is applied. Region
is ignored if the predefined field has *distributionType*=FROM_FILE.
distributionType
A SymbolicConstant specifying whether the load is uniform. Possible values are UNIFORM
and FROM_FILE. The default value is UNIFORM.
sigma11
A Float specifying the first principal component of the stress.
sigma22
A Float specifying the second principal component of the stress.
sigma33
A Float specifying the third principal component of the stress.
sigma12
A Float specifying the first shear component of the stress.
sigma13
A Float specifying the second shear component of the stress.
sigma23
A Float specifying the third shear component of the stress.
Returns
-------
A Stress object.
"""
self.predefinedFields[name] = predefinedField = Stress(name, region, distributionType, sigma11, sigma22,
sigma33, sigma12, sigma13, sigma23)
return predefinedField
def Temperature(self, name: str, createStepName: str, region: Region,
distributionType: SymbolicConstant = UNIFORM,
crossSectionDistribution: SymbolicConstant = CONSTANT_THROUGH_THICKNESS,
field: str = '', amplitude: str = UNSET, fileName: str = '',
beginStep: SymbolicConstant = None, beginIncrement: SymbolicConstant = None,
endStep: SymbolicConstant = None, endIncrement: SymbolicConstant = None,
interpolate: SymbolicConstant = OFF, magnitudes: str = '',
absoluteExteriorTolerance: float = 0, exteriorTolerance: float = 0) -> Temperature:
"""This method creates a Temperature object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].Temperature
Parameters
----------
name
A String specifying the repository key.
createStepName
A String specifying the name of the step in which the predefined field is created.
region
A Region object specifying the region to which the predefined field is applied. *Region*
is ignored if the predefined field has a *distributionType* member available, and
*distributionType*=FROM_FILE .
distributionType
A SymbolicConstant specifying how the predefined field varies spatially. Possible values
are UNIFORM, USER_DEFINED, FROM_FILE, FIELD, FROM_FILE_AND_USER_DEFINED, and
DISCRETE_FIELD. The default value is UNIFORM.
crossSectionDistribution
A SymbolicConstant specifying how the predefined field is distributed over the cross
section of the region. Possible values are
- CONSTANT_THROUGH_THICKNESS
- GRADIENTS_THROUGH_SHELL_CS
- GRADIENTS_THROUGH_BEAM_CS
- POINTS_THROUGH_SECTION
The default value is CONSTANT_THROUGH_THICKNESS.
field
A String specifying the name of the AnalyticalField or DiscreteField object associated
with this predefined field. The *field* argument applies only when
*distributionType*=FIELD or *distributionType*=DISCRETE_FIELD. The default value is an
empty string.
amplitude
A String or the SymbolicConstant UNSET specifying the name of the amplitude reference.
UNSET should be used if the predefined field has no amplitude reference. The default
value is UNSET.Note:*amplitude* should be given only if it is valid for the specified
step.
fileName
A String specifying the name of the file from which the temperature values are to be
read when *distributionType*=FROM_FILE or *distributionType*=FROM_FILE_AND_USER_DEFINED.
beginStep
An Int specifying the first step from which temperature values are to be read or the
SymbolicConstant FIRST_STEP or LAST_STEP. This argument is valid only when
*distributionType*=FROM_FILE or *distributionType*=FROM_FILE_AND_USER_DEFINED. The
default value is None.
beginIncrement
An Int specifying the first increment of the step set in *beginStep* or the
SymbolicConstants STEP_START or STEP_END. This argument is valid only when
*distributionType*=FROM_FILE or *distributionType*=FROM_FILE_AND_USER_DEFINED. The
default value is None.
endStep
An Int specifying the last step from which temperature values are to be read or the
SymbolicConstants FIRST_STEP and LAST_STEP. This argument is valid only when
*distributionType*=FROM_FILE or *distributionType*=FROM_FILE_AND_USER_DEFINED. The
default value is None.
endIncrement
An Int specifying the last increment of the step set in *endStep* or the
SymbolicConstants STEP_START and STEP_END. This argument is valid only when
*distributionType*=FROM_FILE or *distributionType*=FROM_FILE_AND_USER_DEFINED. The
default value is None.
interpolate
A SymbolicConstant specifying whether to interpolate a field read from an output
database or results file. Possible values are OFF, ON or MIDSIDE_ONLY. The default value
is OFF.
magnitudes
A Sequence of Doubles specifying the temperature values when *distributionType*=UNIFORM
or FIELD. The value of the *magnitudes* argument is a function of the
*crossSectionDistribution* argument, as shown in the following list:
- If *crossSectionDistribution*=CONSTANT_THROUGH_THICKNESS then *magnitudes* is a Double
specifying the temperature.
- If *crossSectionDistribution*=GRADIENTS_THROUGH_SHELL_CS then *magnitudes* is a
sequence of Doubles specifying the mean value and the gradient in the thickness
direction.
- If *crossSectionDistribution*=GRADIENTS_THROUGH_BEAM_CS then *magnitudes* is a
sequence of Doubles specifying the mean value, the gradient in the N1 direction, and the
gradient in the N2 direction.
- If *crossSectionDistribution*=POINTS_THROUGH_SECTION then *magnitudes* is a sequence
of Doubles specifying the temperature at each point.
absoluteExteriorTolerance
A Float specifying the absolute value by which a driven node of the field can lie
outside the region of the elements of the global model. The default value is 0.0. This
argument cannot be used with *midside*.
exteriorTolerance
A Float specifying the fraction of the average element size in the global model by which
a driven node of the field can lie outside the region of the elements of the global
model. The default value is 0.0. This argument cannot be used with *midside*.
Returns
-------
A Temperature object.
"""
self.predefinedFields[name] = predefinedField = Temperature(name, createStepName, region, distributionType,
crossSectionDistribution, field, amplitude,
fileName, beginStep, beginIncrement, endStep,
endIncrement, interpolate, magnitudes,
absoluteExteriorTolerance, exteriorTolerance)
return predefinedField
def Velocity(self, name: str, region: Region, velocity1: float, velocity2: float, velocity3: float,
omega: float, axisBegin: tuple, axisEnd: tuple, field: str = '',
distributionType: SymbolicConstant = MAGNITUDE) -> Velocity:
"""This method creates a Velocity predefined field object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].Velocity
Parameters
----------
name
A String specifying the repository key.
region
A Region object specifying the region to which the predefined field is applied.
velocity1
A Float specifying the first component of the velocity.
velocity2
A Float specifying the second component of the velocity.
velocity3
A Float specifying the third component of the velocity.
omega
A Float specifying the angular velocity.
axisBegin
A sequence of Floats specifying the *X-*, *Y-*, and *Z*- coordinates of the starting
point of the axis about which *omega* is defined.
axisEnd
A sequence of Floats specifying the *X-*, *Y-*, and *Z*- coordinates of the end point of
the axis about which *omega* is defined.
field
A String specifying the name of the AnalyticalField object associated with this
predefined field. The *field* argument applies only when
*distributionType*=FIELD_ANALYTICAL. The default value is an empty string.
distributionType
A SymbolicConstant specifying whether the load is uniform. Possible values are MAGNITUDE
and FIELD_ANALYTICAL. The default value is MAGNITUDE.
Returns
-------
A Velocity object.
"""
self.predefinedFields[name] = predefinedField = Velocity(name, region, velocity1, velocity2, velocity3, omega,
axisBegin, axisEnd, field, | |
of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
scene_label : str
Scene label
Default value "None"
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.crossvalidation_data_train:
self.crossvalidation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.crossvalidation_data_train[fold]:
self.crossvalidation_data_train[fold][scene_label_] = MetaDataContainer()
if fold > 0:
self.crossvalidation_data_train[fold][scene_label_] = MetaDataContainer(
filename=self._get_evaluation_setup_filename(setup_part='train', fold=fold, scene_label=scene_label_)).load()
else:
self.crossvalidation_data_train[0][scene_label_] = self.meta_container.filter(
scene_label=scene_label_
)
for item in self.crossvalidation_data_train[fold][scene_label_]:
item['file'] = self.relative_to_absolute_path(item['file'])
if scene_label:
return self.crossvalidation_data_train[fold][scene_label]
else:
data = MetaDataContainer()
for scene_label_ in self.scene_labels:
data += self.crossvalidation_data_train[fold][scene_label_]
return data
def test(self, fold=0, scene_label=None):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
scene_label : str
Scene label
Default value "None"
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.crossvalidation_data_test:
self.crossvalidation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.crossvalidation_data_test[fold]:
self.crossvalidation_data_test[fold][scene_label_] = MetaDataContainer()
if fold > 0:
self.crossvalidation_data_test[fold][scene_label_] = MetaDataContainer(
filename=self._get_evaluation_setup_filename(
setup_part='test', fold=fold, scene_label=scene_label_)
).load()
else:
self.crossvalidation_data_test[0][scene_label_] = self.meta_container.filter(
scene_label=scene_label_
)
for item in self.crossvalidation_data_test[fold][scene_label_]:
item['file'] = self.relative_to_absolute_path(item['file'])
if scene_label:
return self.crossvalidation_data_test[fold][scene_label]
else:
data = MetaDataContainer()
for scene_label_ in self.scene_labels:
data += self.crossvalidation_data_test[fold][scene_label_]
return data
class SyntheticSoundEventDataset(SoundEventDataset):
def __init__(self, *args, **kwargs):
super(SyntheticSoundEventDataset, self).__init__(*args, **kwargs)
self.dataset_group = 'base class'
def initialize(self):
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
if not self.check_filelist():
self.download_packages()
self.extract()
self._save_filelist_hash()
self.synthesize()
return self
@before_and_after_function_wrapper
def synthesize(self):
pass
class AudioTaggingDataset(Dataset):
def __init__(self, *args, **kwargs):
super(AudioTaggingDataset, self).__init__(*args, **kwargs)
self.dataset_group = 'base class'
# =====================================================
# DCASE 2017
# =====================================================
class TUTAcousticScenes_2017_DevelopmentSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2017 development dataset
This dataset is used in DCASE2017 - Task 1, Acoustic scene classification
"""
def __init__(self, *args, **kwargs):
kwargs['storage_name'] = kwargs.get('storage_name', 'TUT-acoustic-scenes-2017-development')
super(TUTAcousticScenes_2017_DevelopmentSet, self).__init__(*args, **kwargs)
self.dataset_group = 'acoustic scene'
self.dataset_meta = {
'authors': '<NAME>, <NAME>, and <NAME>',
'name_remote': 'TUT Acoustic Scenes 2017, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
}
self.crossvalidation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.error.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.error.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.9.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.9.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.audio.10.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2017-development.audio.10.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def _after_extract(self, to_return=None):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in range(1, self.crossvalidation_folds):
# Read train files in
fold_data = MetaDataContainer(
filename=os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')).load()
fold_data += MetaDataContainer(
filename=os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')).load()
for item in fold_data:
if item['file'] not in meta_data:
raw_path, raw_filename = os.path.split(item['file'])
relative_path = self.absolute_to_relative(raw_path)
location_id = raw_filename.split('_')[0]
item['file'] = os.path.join(relative_path, raw_filename)
item['identifier'] = location_id
meta_data[item['file']] = item
self.meta_container.update(meta_data.values())
self.meta_container.save()
else:
self.meta_container.load()
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.crossvalidation_data_train:
self.crossvalidation_data_train[fold] = []
if fold > 0:
self.crossvalidation_data_train[fold] = MetaDataContainer(
filename=os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')).load()
for item in self.crossvalidation_data_train[fold]:
item['file'] = self.relative_to_absolute_path(item['file'])
raw_path, raw_filename = os.path.split(item['file'])
location_id = raw_filename.split('_')[0]
item['identifier'] = location_id
else:
self.crossvalidation_data_train[0] = self.meta_container
return self.crossvalidation_data_train[fold]
class TUTAcousticScenes_2017_EvaluationSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2017 evaluation dataset
This dataset is used in DCASE2017 - Task 1, Acoustic scene classification
"""
def __init__(self, *args, **kwargs):
kwargs['storage_name'] = kwargs.get('storage_name', 'TUT-acoustic-scenes-2017-evaluation')
super(TUTAcousticScenes_2017_EvaluationSet, self).__init__(*args, **kwargs)
self.reference_data_present = False
self.dataset_group = 'acoustic scene'
self.dataset_meta = {
'authors': '<NAME>, <NAME>, and <NAME>',
'name_remote': 'TUT Acoustic Scenes 2017, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
}
self.crossvalidation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def _after_extract(self, to_return=None):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in range(1, self.crossvalidation_folds):
# Read train files in
fold_data = MetaDataContainer(
filename=os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')).load()
for item in fold_data:
if item['file'] not in meta_data:
raw_path, raw_filename = os.path.split(item['file'])
relative_path = self.absolute_to_relative(raw_path)
location_id = raw_filename.split('_')[0]
item['file'] = os.path.join(relative_path, raw_filename)
meta_data[item['file']] = item
self.meta_container.update(meta_data.values())
self.meta_container.save()
else:
self.meta_container.load()
def train(self, fold=0):
return []
def test(self, fold=0):
return []
class TUTRareSoundEvents_2017_DevelopmentSet(SyntheticSoundEventDataset):
"""TUT Acoustic scenes 2017 development dataset
This dataset is used in DCASE2017 - Task 1, Acoustic scene classification
"""
def __init__(self, *args, **kwargs):
kwargs['storage_name'] = kwargs.get('storage_name', 'TUT-rare-sound-events-2017-development')
kwargs['filelisthash_exclude_dirs'] = kwargs.get('filelisthash_exclude_dirs', ['generated_data'])
self.synth_parameters = DottedDict({
'train': {
'seed': 42,
'mixture': {
'fs': 44100,
'bitdepth': 24,
'length_seconds': 30.0,
'anticlipping_factor': 0.2,
},
'event_presence_prob': 0.5,
'mixtures_per_class': 500,
'ebr_list': [-6, 0, 6],
},
'test': {
'seed': 42,
'mixture': {
'fs': 44100,
'bitdepth': 24,
'length_seconds': 30.0,
'anticlipping_factor': 0.2,
},
'event_presence_prob': 0.5,
'mixtures_per_class': 500,
'ebr_list': [-6, 0, 6],
}
})
# Override synth parameters
if kwargs.get('synth_parameters'):
self.synth_parameters.merge(kwargs.get('synth_parameters'))
# Meta filename depends on synth parameters
meta_filename = 'meta_'+self.synth_parameters.get_hash_for_path()+'.txt'
kwargs['meta_filename'] = kwargs.get('meta_filename', os.path.join('generated_data', meta_filename))
# Initialize baseclass
super(TUTRareSoundEvents_2017_DevelopmentSet, self).__init__(*args, **kwargs)
self.dataset_group = 'sound event'
self.dataset_meta = {
'authors': '<NAME>, <NAME>, <NAME>, and <NAME>',
'name_remote': 'TUT Rare Sound Events 2017, development dataset',
'url': None,
'audio_source': 'Synthetic',
'audio_type': 'Natural',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
self.crossvalidation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.code.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.code.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'http://www.cs.tut.fi/sgn/arg/dcase2017/data/TUT-rare-sound-events-2017-development/TUT-rare-sound-events-2017-development.source_data_events.zip',
'local_package': os.path.join(self.local_path, 'TUT-rare-sound-events-2017-development.source_data_events.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
@property
def event_labels(self, scene_label=None):
"""List of unique event labels in the meta data.
Parameters
----------
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = ['babycry', 'glassbreak', 'gunshot']
labels.sort()
return labels
def train(self, fold=0, event_label=None):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
event_label : str
Event label
Default value "None"
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.crossvalidation_data_train:
self.crossvalidation_data_train[fold] = {}
for event_label_ in self.event_labels:
if event_label_ not in self.crossvalidation_data_train[fold]:
self.crossvalidation_data_train[fold][event_label_] = MetaDataContainer()
if fold == 1:
params_hash = self.synth_parameters.get_hash_for_path('train')
mixture_meta_path = os.path.join(
self.local_path,
'generated_data',
'mixtures_devtrain_' + params_hash,
'meta'
)
event_list_filename = os.path.join(
mixture_meta_path,
'event_list_devtrain_' + event_label_ + '.csv'
)
self.crossvalidation_data_train[fold][event_label_] = MetaDataContainer(
filename=event_list_filename).load()
elif fold == 0:
params_hash = self.synth_parameters.get_hash_for_path('train')
mixture_meta_path = os.path.join(
self.local_path,
'generated_data',
'mixtures_devtrain_' + params_hash,
'meta'
)
event_list_filename | |
self.ff_height > (bfh_chk + self.joist_space):
logger.warning('basement is above grade!')
#get the minimum value
if self.boh_max_val is None: #calculate and set
'this means we are non dynamic'
s_raw = self.model.bsmt_opn_ht_code
s = re.sub('\)', '',s_raw[5:])
self.boh_max_val = float(s) #pull the number out of the brackets
max_val = self.boh_max_val
# get the basement anchor el
B_f_height = float(self.geo_dxcol.loc['height',('B','t')]) #pull from frame
bsmt_anchor_el = self.anchor_el - B_f_height - self.joist_space #basement curve
#get the distance to grade
bsmt_to_dem = self.dem_el - bsmt_anchor_el
if bsmt_to_dem <0: #floating basements
bsmt_opn_ht = 0
else:
#take the min of all three
bsmt_opn_ht = min(B_f_height, bsmt_to_dem, max_val)
#===================================================================
# wrap
#===================================================================
if self.db_f:
#check basement anchor elevation logic
if bsmt_anchor_el > self.anchor_el:
raise Error('%s basement anchor el (%.2f) is above the main anchor el (%.2f)'
%(self.name, bsmt_anchor_el, self.anchor_el))
"""letting this happen for now"""
if bsmt_to_dem < 0:
logger.debug('\n dem_el=%.2f, bsmt_anchor_el=%.2f, B_f_heigh=%.2f, anchor_el=%.2f'
%(self.dem_el, bsmt_anchor_el, B_f_height, self.anchor_el))
logger.warning('%s bassement is above grade! bsmt_anchor_el(%.2f) > dem _el (%.2f) '
%(self.name, bsmt_anchor_el, self.dem_el))
#detailed output
boolar = np.array([B_f_height, bsmt_to_dem, max_val, 0]) == bsmt_opn_ht #identify which one you pulled from
selected = np.array(['B_f_height', 'bsmt_to_dem', 'max_val', 'zero'])[boolar]
logger.debug('got bsmt_opn_ht = %.2f from \'%s\''%(bsmt_opn_ht, selected[0]))
else:
logger.debug('got bsmt_opn_ht = %.2f ')
#=======================================================================
# from user provided float
#=======================================================================
else:
bsmt_opn_ht = float(self.model.bsmt_opn_ht_code)
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
if not bsmt_opn_ht >= 0:
logger.error('\n dem_el=%.2f, bsmt_anchor_el=%.2f, B_f_heigh=%.2f, anchor_el=%.2f'
%(self.dem_el, bsmt_anchor_el, B_f_height, self.anchor_el))
raise Error('%s got a negative bsmt_opn_ht (%.2f)'%(self.name, bsmt_opn_ht))
#=======================================================================
# wrap up
#=======================================================================
self.handle_upd('bsmt_opn_ht', bsmt_opn_ht, proxy(self), call_func = 'set_bsmt_opn_ht')
return True
def set_damp_spill_ht(self):
damp_spill_ht = self.bsmt_opn_ht / 2.0
self.handle_upd('damp_spill_ht', damp_spill_ht, proxy(self), call_func = 'set_damp_spill_ht')
return True
def set_bsmt_egrd(self): #calculate the basement exposure grade
"""
bkflowv_f sumpump_f genorat_f
There is also a globabl flag to indicate whether bsmt_egrd should be considered or not
for the implementation of the bsmt_egrd in determining damages, see Dfunc.get_dmg_wsl()
#=======================================================================
# CALLS
#=======================================================================
this is now called during every get_dmgs_wsls()... as gpwr_f is a function of the Flood object
consider only calling w
"""
#=======================================================================
# shortcuts
#=======================================================================
if self.is_frozen('bsmt_egrd'):
return 'frozen'
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_bsmt_egrd')
if self.bsmt_f:
#=======================================================================
# from plpms
#=======================================================================
if self.model.bsmt_egrd_code == 'plpm':
#store the plpm status into the cond string
if self.db_f:
cond = 'plpm.'
for tag, flag in {'s':self.sumpump_f, 'g':self.genorat_f, 'b':self.bkflowv_f}.items():
if flag:
cond = '%s%s'%(cond, tag)
else:
cond = 'plpm'
#=======================================================================
# get the grid power state
#=======================================================================
if self.session.state == 'init':
gpwr_f = self.model.gpwr_f
cond = cond + '.init'
else:
gpwr_f = self.floodo.gpwr_f
cond = '%s.%s'%(cond, self.floodo.ari)
#=======================================================================
# grid power is on
#=======================================================================
if gpwr_f:
cond = cond + '.on'
if self.bkflowv_f and self.sumpump_f:
bsmt_egrd = 'dry'
elif self.bkflowv_f or self.sumpump_f:
bsmt_egrd = 'damp'
else:
bsmt_egrd = 'wet'
#=======================================================================
# grid power is off
#=======================================================================
else:
cond = cond + '.off'
if self.bkflowv_f and self.sumpump_f and self.genorat_f:
bsmt_egrd = 'dry'
elif self.bkflowv_f or (self.sumpump_f and self.genorat_f):
bsmt_egrd = 'damp'
else: bsmt_egrd = 'wet'
logger.debug('set bsmt_egrd = %s (from \'%s\') with grid_power_f = %s'%(bsmt_egrd,self.bsmt_egrd, gpwr_f))
#=======================================================================
# ignore bsmt_egrd
#=======================================================================
elif self.model.bsmt_egrd_code == 'none':
cond = 'none'
bsmt_egrd = 'wet'
gpwr_f = self.model.gpwr_f
#=======================================================================
# allow the user to override all
#=======================================================================
elif self.model.bsmt_egrd_code in ['wet', 'damp', 'dry']:
cond = 'global'
bsmt_egrd = self.model.bsmt_egrd_code
gpwr_f = self.model.gpwr_f
else:
raise IOError
else:
gpwr_f = self.model.gpwr_f
cond = 'nobsmt'
bsmt_egrd = 'nobsmt'
#=======================================================================
# wrap up
#=======================================================================
self.bsmt_egrd = bsmt_egrd
self.gpwr_f = gpwr_f #set this
"""report/collect on the flood
self.parent.childmeta_df.loc[self.dfloc,'bsmt_egrd'] = bsmt_egrd"""
return cond
def set_geo_dxcol(self): #calculate the geometry of each floor based on the geo_build_code
"""
builds a dxcol with all the geometry attributes of this house
called by load_data when self.session.wdfeats_f = True
#=======================================================================
# KEY VARS
#=======================================================================
geo_build_code: code to indicate what geometry to use for the house. see the dfunc tab
'defaults': see House.get_default_geo()
'from_self': expect all geo atts from the binv.
'any': take what you can from the binv, everything else use defaults.
'legacy': use gis area for everything
gbc_override: used to override the geo_build_code
geo_dxcol: house geometry
#=======================================================================
# UDPATES
#=======================================================================
when a specific geometry attribute of the house is updated (i.e. B_f_height)
this dxcol needs to be rebuilt
and all the dfuncs need to run build_dd_ar()
#=======================================================================
# TODO
#=======================================================================
add some isolated updating?
for when we only change one floor
need to add some kwargs to the dynp_handles
"""
logger = self.logger.getChild('set_geo_dxcol')
if self.is_frozen('geo_dxcol', logger=logger):
return True
pars_dxcol = self.session.pars_df_d['hse_geo'] #pull the pars frame
#=======================================================================
# get default geometry for this house
#=======================================================================
self.defa = self.gis_area #default area
if self.defa <=0:
logger.error('got negative area = %.2f'%self.defa)
raise IOError
self.defp = 4*math.sqrt(self.defa)
#=======================================================================
# setup the geo_dxcol
#=======================================================================
dxcol = self.model.geo_dxcol_blank.copy() #get a copy of the blank one\
'I need to place the reference herer so that geometry attributes have access to each other'
#self.geo_dxcol = dxcol
place_codes = dxcol.columns.get_level_values(0).unique().tolist()
#finish_codes = dxcol.columns.get_level_values(1).unique().tolist()
#geo_codes = dxcol.index
logger.debug("from geo_dxcol_blank %s filling:"%(str(dxcol.shape)))
#=======================================================================
# #loop through each place code and compile the appropriate geometry
#=======================================================================
for place_code in place_codes:
geo_df = dxcol[place_code] #geometry for just this place
pars_df = pars_dxcol[place_code]
#logger.debug('filling geo_df for place_code: \'%s\' '%(place_code))
#===================================================================
# #loop through and build the geometry by each geocode
#===================================================================
for geo_code, row in geo_df.iterrows():
for finish_code, value in row.items():
#===========================================================
# total column
#===========================================================
if finish_code == 't':
uval = dxcol.loc[geo_code, (place_code, 'u')]
fval = dxcol.loc[geo_code, (place_code, 'f')]
if self.db_f:
if np.any(pd.isnull([uval, fval])):
raise IOError
if geo_code == 'height': #for height, take the maximum
att_val = max(uval, fval)
else: #for other geometry, take the total
att_val = uval + fval
#===========================================================
# finish/unfinished
#===========================================================
else:
#get the user passed par for this
gbc = pars_df.loc[geo_code, finish_code]
try:gbc = float(gbc)
except: pass
#===========================================================
# #assemble per the geo_build_code
#===========================================================
#user specified code
if isinstance(gbc, str):
gbc = str(gbc)
if gbc == '*binv':
att_name = place_code +'_'+finish_code+'_'+ geo_code #get the att name for this
att_val = getattr(self, att_name) #get this attribute from self
""""
mostly using this key for the B_f_height
"""
elif gbc == '*geo':
att_val = self.calc_secondary_geo(place_code, finish_code, geo_code, dxcol=dxcol) #calculate the default value
elif gbc.startswith('*tab'):
#get the pars
tabn = re.sub('\)',"",gbc[5:]) #remove the end parentheisis
df = self.session.pars_df_d[tabn]
att_name = place_code +'_'+finish_code+'_'+ geo_code #get the att name for this
att_val = self.get_geo_from_other(df, att_name)
else:
att_val = getattr(self, gbc)
#user speciifed value
elif isinstance(gbc, float): #just use the default value provided in the pars
att_val = gbc
else: raise IOError
logger.debug('set %s.%s.%s = %.2f with gbc \'%s\''%(place_code,finish_code,geo_code, att_val, gbc))
#===========================================================
# value checks
#===========================================================
if self.db_f:
att_name = place_code +'_'+finish_code+'_'+ geo_code
if not 'float' in type(att_val).__name__:
raise Error('got unexpected type for \"%s\': %s'%(att_name, type(att_val)))
if pd.isnull(att_val):
raise IOError
if att_val < 0:
raise IOError
#===========================================================
# set the value
#===========================================================
dxcol.loc[geo_code, (place_code, finish_code)] = att_val
#row[finish_code] = att_val #update the ser
#logger.debug('set \'%s\' as \'%s\''%(att_name, att_val))
#=======================================================================
# rounding
#=======================================================================
dxcol = dxcol.round(decimals=2)
#=======================================================================
# special attribute setting
#=======================================================================
'need this as an attribute for reporting'
B_f_height = float(dxcol.loc['height', ('B', 'f')]) #to set the type
#===============================================================
# POST
#===============================================================
"""todo:
add some checking that we are not changing any geometry attributes with a dynp
that would be overwritten here
"""
#logger.debug('built house_geo_dxcol %s'%str(dxcol.shape))
self.handle_upd('geo_dxcol', dxcol, weakref.proxy(self), call_func = 'set_geo_dxcol')
self.handle_upd('B_f_height', B_f_height, weakref.proxy(self), call_func | |
<reponame>rhasspy/rhasspy-remote-http-hermes<filename>rhasspyremote_http_hermes/__init__.py
"""Hermes MQTT server for Rhasspy remote server"""
import asyncio
import gzip
import io
import json
import logging
import ssl
import subprocess
import time
import typing
import wave
from dataclasses import dataclass
from uuid import uuid4
import aiohttp
import networkx as nx
import rhasspynlu
from paho.mqtt.matcher import MQTTMatcher
from rhasspyhermes.asr import (
AsrAudioCaptured,
AsrError,
AsrStartListening,
AsrStopListening,
AsrTextCaptured,
AsrToggleOff,
AsrToggleOn,
AsrToggleReason,
AsrTrain,
AsrTrainSuccess,
)
from rhasspyhermes.audioserver import AudioFrame, AudioPlayBytes, AudioSessionFrame
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.handle import HandleToggleOff, HandleToggleOn
from rhasspyhermes.intent import Intent, Slot, SlotRange
from rhasspyhermes.nlu import (
NluError,
NluIntent,
NluIntentNotRecognized,
NluIntentParsed,
NluQuery,
NluTrain,
NluTrainSuccess,
)
from rhasspyhermes.tts import TtsError, TtsSay, TtsSayFinished
from rhasspyhermes.wake import (
HotwordDetected,
HotwordToggleOff,
HotwordToggleOn,
HotwordToggleReason,
)
from rhasspysilence import (
SilenceMethod,
VoiceCommandRecorder,
VoiceCommandResult,
WebRtcVadRecorder,
)
_LOGGER = logging.getLogger("rhasspyremote_http_hermes")
# -----------------------------------------------------------------------------
@dataclass
class AsrSession:
"""WAV buffer for an ASR session"""
start_listening: AsrStartListening
recorder: VoiceCommandRecorder
sample_rate: typing.Optional[int] = None
sample_width: typing.Optional[int] = None
channels: typing.Optional[int] = None
audio_data: bytes = bytes()
# -----------------------------------------------------------------------------
class RemoteHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy remote server."""
def __init__(
self,
client,
asr_url: typing.Optional[str] = None,
asr_command: typing.Optional[typing.List[str]] = None,
asr_train_url: typing.Optional[str] = None,
asr_train_command: typing.Optional[typing.List[str]] = None,
nlu_url: typing.Optional[str] = None,
nlu_command: typing.Optional[typing.List[str]] = None,
nlu_train_url: typing.Optional[str] = None,
nlu_train_command: typing.Optional[typing.List[str]] = None,
tts_url: typing.Optional[str] = None,
wake_command: typing.Optional[typing.List[str]] = None,
wake_sample_rate: int = 16000,
wake_sample_width: int = 2,
wake_channels: int = 1,
handle_url: typing.Optional[str] = None,
handle_command: typing.Optional[typing.List[str]] = None,
word_transform: typing.Optional[typing.Callable[[str], str]] = None,
certfile: typing.Optional[str] = None,
keyfile: typing.Optional[str] = None,
make_recorder: typing.Callable[[], VoiceCommandRecorder] = None,
recorder_sample_rate: int = 16000,
recorder_sample_width: int = 2,
recorder_channels: int = 1,
webhooks: typing.Optional[typing.Dict[str, typing.List[str]]] = None,
skip_seconds: float = 0.0,
min_seconds: float = 1.0,
max_seconds: typing.Optional[float] = None,
speech_seconds: float = 0.3,
silence_seconds: float = 0.5,
before_seconds: float = 0.5,
vad_mode: int = 3,
max_energy: typing.Optional[float] = None,
max_current_energy_ratio_threshold: typing.Optional[float] = None,
current_energy_threshold: typing.Optional[float] = None,
silence_method: SilenceMethod = SilenceMethod.VAD_ONLY,
site_ids: typing.Optional[typing.List[str]] = None,
lang: typing.Optional[str] = None,
):
super().__init__("rhasspyremote_http_hermes", client, site_ids=site_ids)
# Speech to text
self.asr_url = asr_url
self.asr_command = asr_command
self.asr_train_url = asr_train_url
self.asr_train_command = asr_train_command
self.asr_enabled = True
self.asr_used = self.asr_url or self.asr_command
self.asr_train_used = self.asr_train_url or self.asr_train_command
self.asr_disabled_reasons: typing.Set[str] = set()
# Intent recognition
self.nlu_url = nlu_url
self.nlu_command = nlu_command
self.nlu_train_url = nlu_train_url
self.nlu_train_command = nlu_train_command
self.nlu_used = self.nlu_url or self.nlu_command
self.nlu_train_used = self.nlu_train_url or self.nlu_train_command
# Text to speech
self.tts_url = tts_url
self.tts_used = self.tts_url
# Wake word detection
self.wake_command = wake_command
self.wake_enabled = True
self.wake_proc: typing.Optional[subprocess.Popen] = None
self.wake_sample_rate = wake_sample_rate
self.wake_sample_width = wake_sample_width
self.wake_channels = wake_channels
self.wake_used = self.wake_command
self.wake_disabled_reasons: typing.Set[str] = set()
# Intent handling
self.handle_url = handle_url
self.handle_command = handle_command
self.handle_enabled = True
self.handle_used = self.handle_url or self.handle_command
self.word_transform = word_transform
# SSL
self.ssl_context = ssl.SSLContext()
if certfile:
_LOGGER.debug("Using SSL with certfile=%s, keyfile=%s", certfile, keyfile)
self.ssl_context.load_cert_chain(certfile, keyfile)
# Async HTTP
self._http_session: typing.Optional[aiohttp.ClientSession] = None
def default_recorder():
return WebRtcVadRecorder(
max_seconds=max_seconds,
vad_mode=vad_mode,
skip_seconds=skip_seconds,
min_seconds=min_seconds,
speech_seconds=speech_seconds,
silence_seconds=silence_seconds,
before_seconds=before_seconds,
silence_method=silence_method,
current_energy_threshold=current_energy_threshold,
max_energy=max_energy,
max_current_ratio_threshold=max_current_energy_ratio_threshold,
)
self.make_recorder = make_recorder or default_recorder
self.recorder_sample_rate = recorder_sample_rate
self.recorder_sample_width = recorder_sample_width
self.recorder_channels = recorder_channels
# Webhooks
self.webhook_matcher: typing.Optional[MQTTMatcher] = None
self.webhook_topics: typing.List[str] = []
if webhooks:
self.webhook_matcher = MQTTMatcher()
self.webhook_topics = list(webhooks.keys())
for topic, urls in webhooks.items():
for url in urls:
self.webhook_matcher[topic] = url
# session_id -> AsrSession
self.asr_sessions: typing.Dict[typing.Optional[str], AsrSession] = {}
self.first_audio: bool = True
self.lang = lang
# Start up
if self.wake_command:
self.start_wake_command()
# Webhooks
self.subscribe_topics(*self.webhook_topics)
# Wake
if self.wake_used:
self.subscribe(HotwordToggleOn, HotwordToggleOff)
# ASR
if self.asr_used:
self.subscribe(
AsrStartListening,
AsrStopListening,
AsrToggleOn,
AsrToggleOff,
AudioFrame,
AudioSessionFrame,
)
if self.asr_train_used:
self.subscribe(AsrTrain)
# NLU
if self.nlu_used:
self.subscribe(NluQuery)
if self.nlu_train_used:
self.subscribe(NluTrain)
# TTS
if self.tts_used:
self.subscribe(TtsSay)
# Intent Handling
if self.handle_used:
self.subscribe(NluIntent, HandleToggleOn, HandleToggleOff)
@property
def http_session(self):
"""Get or create async HTTP session"""
if self._http_session is None:
self._http_session = aiohttp.ClientSession()
return self._http_session
# -------------------------------------------------------------------------
async def handle_query(
self, query: NluQuery
) -> typing.AsyncIterable[
typing.Union[
typing.Tuple[NluIntent, TopicArgs],
NluIntentParsed,
NluIntentNotRecognized,
NluError,
]
]:
"""Do intent recognition."""
try:
input_text = query.input
# Fix casing
if self.word_transform:
input_text = self.word_transform(input_text)
if self.nlu_url:
# Use remote server
params = {"siteId": query.site_id}
# Add intent filter
if query.intent_filter:
params["intentFilter"] = ",".join(query.intent_filter)
_LOGGER.debug("%s params=%s", self.nlu_url, params)
async with self.http_session.post(
self.nlu_url, data=input_text, params=params, ssl=self.ssl_context
) as response:
response.raise_for_status()
intent_dict = await response.json()
elif self.nlu_command:
# Run external command
_LOGGER.debug(self.nlu_command)
proc = await asyncio.create_subprocess_exec(
*self.nlu_command,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
)
input_bytes = (input_text.strip() + "\n").encode()
output, error = await proc.communicate(input_bytes)
if error:
_LOGGER.debug(error.decode())
intent_dict = json.loads(output)
else:
_LOGGER.warning("Not handling NLU query (no URL or command)")
return
intent_name = intent_dict["intent"].get("name", "")
if intent_name:
# Recognized
tokens = query.input.split()
slots = [
Slot(
entity=e["entity"],
slot_name=e["entity"],
confidence=1,
value=e.get("value_details", {"value": ["value"]}),
raw_value=e.get("raw_value", e["value"]),
range=SlotRange(
start=e.get("start", 0),
end=e.get("end", 1),
raw_start=e.get("raw_start"),
raw_end=e.get("raw_end"),
),
)
for e in intent_dict.get("entities", [])
]
yield NluIntentParsed(
input=query.input,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
intent=Intent(
intent_name=intent_name,
confidence_score=intent_dict["intent"].get("confidence", 1.0),
),
slots=slots,
)
yield (
NluIntent(
input=query.input,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
intent=Intent(
intent_name=intent_name,
confidence_score=intent_dict["intent"].get(
"confidence", 1.0
),
),
slots=slots,
asr_tokens=[NluIntent.make_asr_tokens(tokens)],
raw_input=query.input,
wakeword_id=query.wakeword_id,
lang=(query.lang or self.lang),
custom_data=query.custom_data,
),
{"intent_name": intent_name},
)
else:
# Not recognized
yield NluIntentNotRecognized(
input=query.input,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
custom_data=query.custom_data,
)
except Exception as e:
_LOGGER.exception("handle_query")
yield NluError(
error=repr(e),
context=repr(query),
site_id=query.site_id,
session_id=query.session_id,
)
# -------------------------------------------------------------------------
async def handle_say(
self, say: TtsSay
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[AudioPlayBytes, TopicArgs], TtsSayFinished, TtsError]
]:
"""Do text to speech."""
try:
if self.tts_url:
# Remote text to speech server
_LOGGER.debug(self.tts_url)
params = {"play": "false", "siteId": say.site_id}
if say.lang:
# Add ?language=<lang> query parameter
params["language"] = say.lang
async with self.http_session.post(
self.tts_url, data=say.text, params=params, ssl=self.ssl_context
) as response:
response.raise_for_status()
content_type = response.headers["Content-Type"]
if content_type != "audio/wav":
_LOGGER.warning(
"Expected audio/wav content type, got %s", content_type
)
wav_bytes = await response.read()
if wav_bytes:
yield (
AudioPlayBytes(wav_bytes=wav_bytes),
{"site_id": say.site_id, "request_id": say.id},
)
else:
_LOGGER.error("Received empty response")
except Exception as e:
_LOGGER.exception("handle_say")
yield TtsError(
error=str(e),
context=say.id,
site_id=say.site_id,
session_id=say.session_id,
)
finally:
yield TtsSayFinished(
id=say.id, site_id=say.site_id, session_id=say.session_id
)
# -------------------------------------------------------------------------
async def handle_start_listening(
self, start_listening: AsrStartListening
) -> typing.AsyncIterable[AsrError]:
"""Start ASR session."""
_LOGGER.debug("<- %s", start_listening)
try:
session = AsrSession(
start_listening=start_listening, recorder=self.make_recorder()
)
self.asr_sessions[start_listening.session_id] = session
session.recorder.start()
except Exception as e:
_LOGGER.exception("handle_start_listening")
yield AsrError(
error=str(e),
context="",
site_id=start_listening.site_id,
session_id=start_listening.session_id,
)
# -------------------------------------------------------------------------
async def handle_audio_frame(
self,
wav_bytes: bytes,
site_id: str = "default",
session_id: typing.Optional[str] = None,
) -> typing.AsyncIterable[
typing.Union[
typing.Tuple[HotwordDetected, TopicArgs],
AsrTextCaptured,
typing.Tuple[AsrAudioCaptured, TopicArgs],
AsrError,
]
]:
"""Add audio frame to open sessions."""
try:
if self.asr_enabled:
if session_id is None:
# Add to every open session
target_sessions = list(self.asr_sessions.items())
else:
# Add to single session
target_sessions = [(session_id, self.asr_sessions[session_id])]
with io.BytesIO(wav_bytes) as in_io:
with wave.open(in_io) as in_wav:
# Get WAV details from first frame
sample_rate = in_wav.getframerate()
sample_width = in_wav.getsampwidth()
channels = in_wav.getnchannels()
audio_data = in_wav.readframes(in_wav.getnframes())
# Add to target ASR sessions
for target_id, session in target_sessions:
# Skip non-matching site_id
if session.start_listening.site_id != site_id:
continue
session.sample_rate = sample_rate
session.sample_width = sample_width
session.channels = channels
session.audio_data += audio_data
if session.start_listening.stop_on_silence:
# Detect silence (end of command)
audio_data = self.maybe_convert_wav(
wav_bytes,
self.recorder_sample_rate,
self.recorder_sample_width,
self.recorder_channels,
)
command = session.recorder.process_chunk(audio_data)
if command and (command.result == VoiceCommandResult.SUCCESS):
# Complete session
stop_listening = AsrStopListening(
site_id=site_id, session_id=target_id
)
async for message in self.handle_stop_listening(
stop_listening
):
yield message
if self.wake_enabled and (session_id is None) and self.wake_proc:
# Convert and send to wake command
audio_bytes = self.maybe_convert_wav(
wav_bytes,
self.wake_sample_rate,
self.wake_sample_width,
self.wake_channels,
)
assert self.wake_proc.stdin
self.wake_proc.stdin.write(audio_bytes)
if self.wake_proc.poll():
stdout, stderr = self.wake_proc.communicate()
if stderr:
_LOGGER.debug(stderr.decode())
wakeword_id = stdout.decode().strip()
_LOGGER.debug("Detected wake word %s", wakeword_id)
yield (
HotwordDetected(
model_id=wakeword_id,
model_version="",
model_type="personal",
current_sensitivity=1.0,
site_id=site_id,
lang=self.lang,
),
{"wakeword_id": wakeword_id},
)
# Restart wake process
self.start_wake_command()
except Exception:
_LOGGER.exception("handle_audio_frame")
# -------------------------------------------------------------------------
async def handle_stop_listening(
self, stop_listening: AsrStopListening
) -> typing.AsyncIterable[
typing.Union[
AsrTextCaptured, typing.Tuple[AsrAudioCaptured, TopicArgs], AsrError
]
]:
"""Stop ASR session."""
_LOGGER.debug("<- %s", stop_listening)
try:
session = self.asr_sessions.pop(stop_listening.session_id, None)
if session is None:
_LOGGER.warning("Session not found for %s", stop_listening.session_id)
return
assert session.sample_rate is not None, "No sample rate"
assert session.sample_width is not None, "No sample width"
assert session.channels is not None, "No channels"
if session.start_listening.stop_on_silence:
# Use recorded voice command
audio_data = session.recorder.stop()
else:
# Use entire audio
audio_data = session.audio_data
# Process entire WAV file
wav_bytes = self.to_wav_bytes(
audio_data, session.sample_rate, session.sample_width, session.channels
)
_LOGGER.debug("Received %s byte(s) of WAV data", len(wav_bytes))
if self.asr_url:
params = {"siteId": stop_listening.site_id}
_LOGGER.debug("%s params=%s", self.asr_url, params)
# Remote ASR server
| |
<filename>lib/encoding_tree.py
import math
import heapq
import numba as nb
import numpy as np
import copy
def get_id():
i = 0
while True:
yield i
i += 1
def graph_parse(adj_matrix):
g_num_nodes = adj_matrix.shape[0]
adj_table = {}
VOL = 0
node_vol = []
for i in range(g_num_nodes):
n_v = 0
adj = set()
for j in range(g_num_nodes):
if adj_matrix[i,j] != 0:
n_v += adj_matrix[i,j]
VOL += adj_matrix[i,j]
adj.add(j)
adj_table[i] = adj
node_vol.append(n_v)
return g_num_nodes,VOL,node_vol,adj_table
@nb.jit(nopython=True)
def cut_volume(adj_matrix,p1,p2):
c12 = 0
for i in range(len(p1)):
for j in range(len(p2)):
c = adj_matrix[p1[i],p2[j]]
if c != 0:
c12 += c
return c12
def LayerFirst(node_dict,start_id):
stack = [start_id]
while len(stack) != 0:
node_id = stack.pop(0)
yield node_id
if node_dict[node_id].children:
for c_id in node_dict[node_id].children:
stack.append(c_id)
def merge(new_ID, id1, id2, cut_v, node_dict):
new_partition = node_dict[id1].partition + node_dict[id2].partition
v = node_dict[id1].vol + node_dict[id2].vol
g = node_dict[id1].g + node_dict[id2].g - 2 * cut_v
child_h = max(node_dict[id1].child_h,node_dict[id2].child_h) + 1
new_node = PartitionTreeNode(ID=new_ID,partition=new_partition,children={id1,id2},
g=g, vol=v,child_h= child_h,child_cut = cut_v)
node_dict[id1].parent = new_ID
node_dict[id2].parent = new_ID
node_dict[new_ID] = new_node
def compressNode(node_dict, node_id, parent_id):
p_child_h = node_dict[parent_id].child_h
node_children = node_dict[node_id].children
node_dict[parent_id].child_cut += node_dict[node_id].child_cut
node_dict[parent_id].children.remove(node_id)
node_dict[parent_id].children = node_dict[parent_id].children.union(node_children)
for c in node_children:
node_dict[c].parent = parent_id
com_node_child_h = node_dict[node_id].child_h
node_dict.pop(node_id)
if (p_child_h - com_node_child_h) == 1:
while True:
max_child_h = max([node_dict[f_c].child_h for f_c in node_dict[parent_id].children])
if node_dict[parent_id].child_h == (max_child_h + 1):
break
node_dict[parent_id].child_h = max_child_h + 1
parent_id = node_dict[parent_id].parent
if parent_id is None:
break
def child_tree_deepth(node_dict,nid):
node = node_dict[nid]
deepth = 0
while node.parent is not None:
node = node_dict[node.parent]
deepth+=1
deepth += node_dict[nid].child_h
return deepth
def CompressDelta(node1,p_node):
a = node1.child_cut
v1 = node1.vol
v2 = p_node.vol
return a * math.log(v2 / v1)
def CombineDelta(node1, node2, cut_v, g_vol):
v1 = node1.vol
v2 = node2.vol
g1 = node1.g
g2 = node2.g
v12 = v1 + v2
return ((v1 - g1) * math.log(v12 / v1,2) + (v2 - g2) * math.log(v12 / v2,2) - 2 * cut_v * math.log(g_vol / v12,2)) / g_vol
class PartitionTreeNode():
def __init__(self, ID, partition, vol, g, children:set = None,parent = None,child_h = 0, child_cut = 0):
self.ID = ID
self.partition = partition
self.parent = parent
self.children = children
self.vol = vol
self.g = g
self.merged = False
self.child_h = child_h #不包括该节点的子树高度
self.child_cut = child_cut
def __str__(self):
return "{" + "{}:{}".format(self.__class__.__name__, self.gatherAttrs()) + "}"
def gatherAttrs(self):
return ",".join("{}={}"
.format(k, getattr(self, k))
for k in self.__dict__.keys())
class PartitionTree():
def __init__(self,adj_matrix):
self.adj_matrix = adj_matrix
self.tree_node = {}
self.g_num_nodes, self.VOL, self.node_vol, self.adj_table = graph_parse(adj_matrix)
self.id_g = get_id()
self.leaves = []
self.build_leaves()
def build_leaves(self):
for vertex in range(self.g_num_nodes):
ID = next(self.id_g)
v = self.node_vol[vertex]
leaf_node = PartitionTreeNode(ID=ID, partition=[vertex], g = v, vol=v)
self.tree_node[ID] = leaf_node
self.leaves.append(ID)
def build_sub_leaves(self,node_list,p_vol):
subgraph_node_dict = {}
ori_ent = 0
for vertex in node_list:
ori_ent += -(self.tree_node[vertex].g / self.VOL)\
* math.log2(self.tree_node[vertex].vol / p_vol)
sub_n = set()
vol = 0
for vertex_n in node_list:
c = self.adj_matrix[vertex,vertex_n]
if c != 0:
vol += c
sub_n.add(vertex_n)
sub_leaf = PartitionTreeNode(ID=vertex,partition=[vertex],g=vol,vol=vol)
subgraph_node_dict[vertex] = sub_leaf
self.adj_table[vertex] = sub_n
return subgraph_node_dict,ori_ent
def build_root_down(self):
root_child = self.tree_node[self.root_id].children
subgraph_node_dict = {}
ori_en = 0
g_vol = self.tree_node[self.root_id].vol
for node_id in root_child:
node = self.tree_node[node_id]
ori_en += -(node.g / g_vol) * math.log2(node.vol / g_vol)
new_n = set()
for nei in self.adj_table[node_id]:
if nei in root_child:
new_n.add(nei)
self.adj_table[node_id] = new_n
new_node = PartitionTreeNode(ID=node_id,partition=node.partition,vol=node.vol,g = node.g,children=node.children)
subgraph_node_dict[node_id] = new_node
return subgraph_node_dict, ori_en
def entropy(self,node_dict = None):
if node_dict is None:
node_dict = self.tree_node
ent = 0
for node_id,node in node_dict.items():
if node.parent is not None:
node_p = node_dict[node.parent]
node_vol = node.vol
node_g = node.g
node_p_vol = node_p.vol
ent += - (node_g / self.VOL) * math.log2(node_vol / node_p_vol)
return ent
def __build_k_tree(self,g_vol,nodes_dict:dict,k = None,):
min_heap = []
cmp_heap = []
nodes_ids = nodes_dict.keys()
new_id = None
for i in nodes_ids:
for j in self.adj_table[i]:
if j > i:
n1 = nodes_dict[i]
n2 = nodes_dict[j]
if len(n1.partition) == 1 and len(n2.partition) == 1:
cut_v = self.adj_matrix[n1.partition[0],n2.partition[0]]
else:
cut_v = cut_volume(self.adj_matrix,p1 = np.array(n1.partition),p2=np.array(n2.partition))
diff = CombineDelta(nodes_dict[i], nodes_dict[j], cut_v, g_vol)
heapq.heappush(min_heap, (diff, i, j, cut_v))
unmerged_count = len(nodes_ids)
while unmerged_count > 1:
if len(min_heap) == 0:
break
diff, id1, id2, cut_v = heapq.heappop(min_heap)
if nodes_dict[id1].merged or nodes_dict[id2].merged:
continue
nodes_dict[id1].merged = True
nodes_dict[id2].merged = True
new_id = next(self.id_g)
merge(new_id, id1, id2, cut_v, nodes_dict)
self.adj_table[new_id] = self.adj_table[id1].union(self.adj_table[id2])
for i in self.adj_table[new_id]:
self.adj_table[i].add(new_id)
#compress delta
if nodes_dict[id1].child_h > 0:
heapq.heappush(cmp_heap,[CompressDelta(nodes_dict[id1],nodes_dict[new_id]),id1,new_id])
if nodes_dict[id2].child_h > 0:
heapq.heappush(cmp_heap,[CompressDelta(nodes_dict[id2],nodes_dict[new_id]),id2,new_id])
unmerged_count -= 1
for ID in self.adj_table[new_id]:
if not nodes_dict[ID].merged:
n1 = nodes_dict[ID]
n2 = nodes_dict[new_id]
cut_v = cut_volume(self.adj_matrix,np.array(n1.partition), np.array(n2.partition))
new_diff = CombineDelta(nodes_dict[ID], nodes_dict[new_id], cut_v, g_vol)
heapq.heappush(min_heap, (new_diff, ID, new_id, cut_v))
root = new_id
if unmerged_count > 1:
#combine solitary node
# print('processing solitary node')
assert len(min_heap) == 0
unmerged_nodes = {i for i, j in nodes_dict.items() if not j.merged}
new_child_h = max([nodes_dict[i].child_h for i in unmerged_nodes]) + 1
new_id = next(self.id_g)
new_node = PartitionTreeNode(ID=new_id,partition=list(nodes_ids),children=unmerged_nodes,
vol=g_vol,g = 0,child_h=new_child_h)
nodes_dict[new_id] = new_node
for i in unmerged_nodes:
nodes_dict[i].merged = True
nodes_dict[i].parent = new_id
if nodes_dict[i].child_h > 0:
heapq.heappush(cmp_heap, [CompressDelta(nodes_dict[i], nodes_dict[new_id]), i, new_id])
root = new_id
if k is not None:
while nodes_dict[root].child_h > k:
diff, node_id, p_id = heapq.heappop(cmp_heap)
if child_tree_deepth(nodes_dict, node_id) <= k:
continue
children = nodes_dict[node_id].children
compressNode(nodes_dict, node_id, p_id)
if nodes_dict[root].child_h == k:
break
for e in cmp_heap:
if e[1] == p_id:
if child_tree_deepth(nodes_dict, p_id) > k:
e[0] = CompressDelta(nodes_dict[e[1]], nodes_dict[e[2]])
if e[1] in children:
if nodes_dict[e[1]].child_h == 0:
continue
if child_tree_deepth(nodes_dict, e[1]) > k:
e[2] = p_id
e[0] = CompressDelta(nodes_dict[e[1]], nodes_dict[p_id])
heapq.heapify(cmp_heap)
return root
def check_balance(self,node_dict,root_id):
root_c = copy.deepcopy(node_dict[root_id].children)
for c in root_c:
if node_dict[c].child_h == 0:
self.single_up(node_dict,c)
def single_up(self,node_dict,node_id):
new_id = next(self.id_g)
p_id = node_dict[node_id].parent
grow_node = PartitionTreeNode(ID=new_id, partition=node_dict[node_id].partition, parent=p_id,
children={node_id}, vol=node_dict[node_id].vol, g=node_dict[node_id].g)
node_dict[node_id].parent = new_id
node_dict[p_id].children.remove(node_id)
node_dict[p_id].children.add(new_id)
node_dict[new_id] = grow_node
node_dict[new_id].child_h = node_dict[node_id].child_h + 1
self.adj_table[new_id] = self.adj_table[node_id]
for i in self.adj_table[node_id]:
self.adj_table[i].add(new_id)
def root_down_delta(self):
if len(self.tree_node[self.root_id].children) < 3:
return 0 , None , None
subgraph_node_dict, ori_entropy = self.build_root_down()
g_vol = self.tree_node[self.root_id].vol
new_root = self.__build_k_tree(g_vol=g_vol,nodes_dict=subgraph_node_dict,k=2)
self.check_balance(subgraph_node_dict,new_root)
new_entropy = self.entropy(subgraph_node_dict)
delta = (ori_entropy - new_entropy) / len(self.tree_node[self.root_id].children)
return delta, new_root, subgraph_node_dict
def leaf_up_entropy(self,sub_node_dict,sub_root_id,node_id):
ent = 0
for sub_node_id in LayerFirst(sub_node_dict,sub_root_id):
if sub_node_id == sub_root_id:
sub_node_dict[sub_root_id].vol = self.tree_node[node_id].vol
sub_node_dict[sub_root_id].g = self.tree_node[node_id].g
elif sub_node_dict[sub_node_id].child_h == 1:
node = sub_node_dict[sub_node_id]
inner_vol = node.vol - node.g
partition = node.partition
ori_vol = sum(self.tree_node[i].vol for i in partition)
ori_g = ori_vol - inner_vol
node.vol = ori_vol
node.g = ori_g
node_p = sub_node_dict[node.parent]
ent += -(node.g / self.VOL) * math.log2(node.vol / node_p.vol)
else:
node = sub_node_dict[sub_node_id]
node.g = self.tree_node[sub_node_id].g
node.vol = self.tree_node[sub_node_id].vol
node_p = sub_node_dict[node.parent]
ent += -(node.g / self.VOL) * math.log2(node.vol / node_p.vol)
return ent
def leaf_up(self):
h1_id = set()
h1_new_child_tree = {}
id_mapping = {}
for l in self.leaves:
p = self.tree_node[l].parent
h1_id.add(p)
delta = 0
for node_id in h1_id:
candidate_node = self.tree_node[node_id]
sub_nodes = candidate_node.partition
if len(sub_nodes) == 1:
id_mapping[node_id] = None
if len(sub_nodes) == 2:
id_mapping[node_id] = None
if len(sub_nodes) >= 3:
sub_g_vol = candidate_node.vol - candidate_node.g
subgraph_node_dict,ori_ent = self.build_sub_leaves(sub_nodes,candidate_node.vol)
sub_root = self.__build_k_tree(g_vol=sub_g_vol,nodes_dict=subgraph_node_dict,k = 2)
self.check_balance(subgraph_node_dict,sub_root)
new_ent = self.leaf_up_entropy(subgraph_node_dict,sub_root,node_id)
delta += (ori_ent - new_ent)
h1_new_child_tree[node_id] = subgraph_node_dict
id_mapping[node_id] = sub_root
delta = delta / self.g_num_nodes
return delta,id_mapping,h1_new_child_tree
def leaf_up_update(self,id_mapping,leaf_up_dict):
for node_id,h1_root in id_mapping.items():
if h1_root is None:
children = copy.deepcopy(self.tree_node[node_id].children)
for i in children:
self.single_up(self.tree_node,i)
else:
h1_dict = leaf_up_dict[node_id]
self.tree_node[node_id].children = h1_dict[h1_root].children
for h1_c in h1_dict[h1_root].children:
assert h1_c not in self.tree_node
h1_dict[h1_c].parent = node_id
h1_dict.pop(h1_root)
self.tree_node.update(h1_dict)
self.tree_node[self.root_id].child_h += 1
def root_down_update(self, new_id , root_down_dict):
self.tree_node[self.root_id].children = root_down_dict[new_id].children
for node_id in root_down_dict[new_id].children:
assert node_id not in self.tree_node
root_down_dict[node_id].parent = self.root_id
root_down_dict.pop(new_id)
self.tree_node.update(root_down_dict)
self.tree_node[self.root_id].child_h += 1
def build_encoding_tree(self, k=2, mode='v2'):
if k == 1:
return
if mode == 'v1' or k is None:
self.root_id = self.__build_k_tree(self.VOL, self.tree_node, k = k)
elif mode == 'v2':
self.root_id = self.__build_k_tree(self.VOL, self.tree_node, k = 2)
self.check_balance(self.tree_node,self.root_id)
if self.tree_node[self.root_id].child_h < 2:
self.tree_node[self.root_id].child_h = 2
flag = 0
while self.tree_node[self.root_id].child_h < k:
if flag == 0:
leaf_up_delta,id_mapping,leaf_up_dict = self.leaf_up()
root_down_delta, new_id , root_down_dict = self.root_down_delta()
elif flag == 1:
leaf_up_delta, id_mapping, leaf_up_dict = self.leaf_up()
elif flag == 2:
root_down_delta, new_id , root_down_dict = self.root_down_delta()
else:
raise ValueError
if leaf_up_delta < root_down_delta:
# | |
day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[dsname, 'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0])])
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[-1], x_times[0]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self, data_field=None, dsname=None, cmap='rainbow',
alt_label=None, alt_field='alt', cb_label=None, **kwargs):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the object to plot on second y-axis.
height_field : str
Name of height field in the object to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the object to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][data_field]
altitude = self._obj[dsname][alt_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
if alt_label is None:
try:
alt_label = (altitude.attrs['long_name'] +
''.join([' (', altitude.attrs['units'], ')']))
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = (data.attrs['long_name'] +
''.join([' (', data.attrs['units'], ')']))
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86,
bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values,
marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[self.fig.subplotpars.right + 0.02, self.fig.subplotpars.bottom,
0.02, self.fig.subplotpars.top - self.fig.subplotpars.bottom])
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self, data_field=None, dsname=None,
subplot_index=(0, ), time_rng=None, assessment_color=None,
edgecolor='face', set_shading='auto', **kwargs):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the object to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green'}
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = ['Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*']
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._obj[dsname][data_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._obj[dsname].qcfilter.check_for_ancillary_qc(data_field,
add_if_missing=False,
cleanup=False)
if qc_data_field is None:
raise ValueError(f"No quality control ancillary variable in Dataset for {data_field}")
flag_masks = self._obj[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._obj[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._obj[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta,
broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._obj[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._obj[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._obj[dsname].qcfilter.get_masked_data(data_field,
rm_assessments=assess)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._obj[dsname].qcfilter.get_masked_data(data_field,
rm_tests=missing_test_nums)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._obj[dsname][qc_data_field].dims
xvalues = self._obj[dsname][dims[0]].values
yvalues = self._obj[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(xvalues, yvalues, np.transpose(qc_data),
cmap=cMap, vmin=0, shading=set_shading)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = ((np.arange(0, len(tick_names) * 2 + 1) /
(len(tick_names) * 2) * np.nanmax(qc_data))[1::2])
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(mesh, cax=cax, orientation='horizontal', spacing='uniform',
ticks=tick_nums, shrink=0.5)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._obj[dsname][qc_data_field].dims) - set(['time']))
try:
ytitle = f"{dim_name[0]} ({self._obj[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._obj[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(0.5, -0.35, f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes, horizontalalignment='center',
verticalalignment='center', fontweight='bold')
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(barh_list_green, (ii, ii + 1), facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor, **kwargs)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1])
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(xdata.values[data.mask],
time_delta=time_delta,
broken_barh=True)
# | |
service['sonata_usr']['phone']
u = monitoring_users.objects.all().filter(
Q(email=customer['email']) & Q(mobile=customer['phone']) & Q(type='cst'))
if len(u) == 0:
usr = monitoring_users(mobile=customer['phone'], email=customer['email'], type='cst')
usr.save()
else:
usr = u[0]
dev = None
if 'sonata_dev' in service:
developer = {}
developer['email'] = None
developer['phone'] = None
if 'email' in service['sonata_dev']:
developer['email'] = service['sonata_dev']['email']
if 'phone' in service['sonata_dev']:
developer['phone'] = service['sonata_dev']['phone']
u = monitoring_users.objects.all().filter(
Q(email=developer['email']) & Q(mobile=developer['phone']) & Q(type='dev'))
if len(u) == 0:
dev = monitoring_users(mobile=developer['phone'], email=developer['email'], type='dev')
dev.save()
else:
dev = u[0]
srv_pop_id = ''
srv_host_id = ''
if service['pop_id']:
srv_pop_id = service['pop_id']
pop = monitoring_pops.objects.all().filter(sonata_pop_id=srv_pop_id)
if pop.count() == 0:
pop = monitoring_pops(sonata_pop_id=srv_pop_id, sonata_sp_id="undefined", name="undefined",
prom_url="undefined", type="undefined")
pop.save()
if service['host_id']:
srv_host_id = service['host_id']
srv = monitoring_services.objects.all().filter(sonata_srv_id=service['sonata_srv_id'])
if srv.count() > 0:
old_vnf = monitoring_functions.objects.all().filter(service_id=srv.values('id'))
if old_vnf.count() > 0:
old_vnf.delete()
srv=srv[0]
else:
srv = monitoring_services(sonata_srv_id=service['sonata_srv_id'], name=service['name'],
description=service['description'], host_id=srv_host_id, pop_id=srv_pop_id)
srv.save()
if isinstance(usr, monitoring_users):
srv.user.add(usr)
if isinstance(dev, monitoring_users):
srv.user.add(dev)
srv.save()
oids_status = 0
metrics_status = 0
for f in functions:
fnc_pop_id = f['pop_id']
pop = monitoring_pops.objects.all().filter(sonata_pop_id=fnc_pop_id)
functions_status = len(functions)
pop_type = 'undefined'
sch_key = 'resource_id'
if 'host_id' in f:
vdu = f['host_id']
sch_key = 'resource_id'
pop_type = 'openstack'
if 'cnt_name' in f:
vdu = f['cnt_name'][0]
sch_key = 'container_name'
pop_type = 'k8s'
if pop.count() == 0:
pop = monitoring_pops(sonata_pop_id=fnc_pop_id, sonata_sp_id="undefined",
name="undefined",prom_url="undefined",type=pop_type)
pop.save()
func = monitoring_functions(service=srv, host_id=vdu, name=f['name'], host_type=sch_key,
sonata_func_id=f['sonata_func_id'], description=f['description'],
pop_id=f['pop_id'])
func.save()
for m in f['metrics']:
metrics_status += 1
metric = monitoring_metrics(function=func, name=m['name'], cmd=m['cmd'], threshold=m['threshold'],
interval=m['interval'], description=m['description'])
metric.save()
old_snmps = monitoring_snmp_entities.objects.all().filter(entity_id=vdu)
if old_snmps.count() > 0:
old_snmps.update(status='DELETED')
if 'snmp' in f:
if len(f['snmp']) > 0:
snmp = f['snmp']
if 'port' in snmp:
port = snmp['port']
else:
port = 161
ent = monitoring_snmp_entities(entity_id=vdu, version=snmp['version'],
auth_protocol=snmp['auth_protocol'],
security_level=snmp['security_level'],
ip=snmp['ip'], port=port, username=snmp['username'],
password='<PASSWORD>', interval=snmp['interval'],
entity_type='vnf')
ent.save()
for o in snmp['oids']:
oid = monitoring_snmp_oids(snmp_entity=ent, oid=o['oid'], metric_name=o['metric_name'],
metric_type=o['metric_type'], unit=o['unit'], mib_name=o['mib_name'])
oid.save()
oids_status += 1
rls = {}
rls['service'] = service['sonata_srv_id']
rls['vnf'] = "To be found..."
rls['rules'] = []
for r in rules:
nt = monitoring_notif_types.objects.all().filter(id=r['notification_type'])
if nt.count() == 0:
LOG.info('Alert notification type does not supported. Action Aborted')
return Response({'error': 'Alert notification type does not supported. Action Aborted'},
status=status.HTTP_400_BAD_REQUEST)
srv.delete()
else:
rules_status = len(rules)
rule = monitoring_rules(service=srv, summary=r['summary'], notification_type=nt[0], name=r['name'],
condition=r['condition'], duration=r['duration'], description=r['description'])
rule.save()
rl = {}
rl['name'] = r['name']
rl['description'] = r['description']
rl['summary'] = r['summary']
rl['duration'] = r['duration']
rl['notification_type'] = r['notification_type']
rl['condition'] = r['condition']
rl['labels'] = ["serviceID=\"" + rls['service'] + "\", tp=\"DEV\""]
rls['rules'].append(rl)
if len(rules) > 0:
cl = Http()
rsp = cl.POST('http://prometheus:9089/prometheus/rules', [], json.dumps(rls))
if rsp == 200:
LOG.info('New NS addition successed')
return Response(
{'status': "success", "vnfs": functions_status, "metrics": metrics_status, "rules": rules_status,
"snmp_oids": oids_status})
else:
srv.delete()
LOG.info('Service update failed')
return Response({'error': 'Service update fail ' + str(rsp)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
LOG.info('New NS addition successed')
return Response(
{'status': "success", "vnfs": functions_status, "metrics": metrics_status, "rules": rules_status,
"snmp_oids": oids_status})
def getVnfId(funct_, host_):
for fn in funct_:
if fn['host_id'] == host_:
return fn['sonata_func_id']
else:
return 'Undefined'
class SntMetricsDetail(generics.DestroyAPIView):
queryset = monitoring_metrics.objects.all()
serializer_class = SntMetricsSerializer
class SntRulesList(generics.ListAPIView):
serializer_class = SntRulesSerializer
def get_queryset(self):
queryset = monitoring_rules.objects.all()
return queryset.filter(consumer='DEV')
class SntRulesPerServiceList(generics.ListAPIView):
# queryset = monitoring_functions.objects.all()
serializer_class = SntRulesPerSrvSerializer
def get_queryset(self):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['srvID']
return queryset.filter(service__sonata_srv_id=srvid, consumer='DEV')
class SntRulesDetail(generics.DestroyAPIView):
# queryset = monitoring_rules.objects.all()
serializer_class = SntRulesSerializer
def delete(self, request, *args, **kwargs):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['sonata_srv_id']
fq = queryset.filter(service__sonata_srv_id=srvid)
if fq.count() > 0:
fq.delete()
cl = Http()
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str(srvid), [])
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str('plc-' + srvid), [])
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str('sla-' + srvid), [])
LOG.info("Service's rules removed (inl. SLA, POLICY)")
return Response({'status': "Service's rules removed (inl. SLA, POLICY)"}, status=status.HTTP_204_NO_CONTENT)
else:
LOG.info("Service's rules not found")
return Response({'status': "rules not found"}, status=status.HTTP_404_NOT_FOUND)
class SntPromMetricList(generics.RetrieveAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
mt = ProData('prometheus', 9090)
data = mt.getMetrics()
response = {}
if 'data' in data:
response['metrics'] = data['data']
else:
response = data
print (response)
return Response(response)
class SntPromNSMetricListVnf(generics.RetrieveAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
time_window = request.GET.get('tm',None)
mt = ProData('prometheus', 9090)
queryset = monitoring_functions.objects.all()
srvid = self.kwargs['srv_id']
vnfs = queryset.filter(service__sonata_srv_id=srvid)
response = {}
if vnfs.count() == 0:
response['status'] = "Fail (VNF not found)"
return Response(response)
response['vnfs'] = []
response['status'] = 'Success'
for vnf in vnfs:
f = {}
f['vnf_id'] = vnf.sonata_func_id
f['vdus'] = []
vdu={}
vdu['vdu_id'] = vnf.host_id
data = mt.getMetricsResId(vnf.host_type,vnf.host_id,time_window)
if 'data' in data:
vdu['metrics'] = data['data']
else:
vdu['metrics'] = []
f['vdus'].append(vdu)
response['vnfs'].append(f)
return Response(response)
class SntPromMetricListVnf(generics.RetrieveAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
time_window = request.GET.get('tm', None)
mt = ProData('prometheus', 9090)
vnfid = self.kwargs['vnf_id']
queryset = monitoring_functions.objects.all()
vnf = queryset.filter(sonata_func_id=vnfid)
response = {}
if vnf.count() == 0:
response['status'] = "Fail (VNF not found)"
return Response(response)
vdus = []
vdus.append(vnf[0].host_id)
response['status'] = 'Success'
response['vdus'] = []
for vdu in vdus:
dt = {}
dt['vdu_id'] = vdu
data = mt.getMetricsResId(vnf[0].host_type,vdu,time_window)
if 'data' in data:
dt['metrics'] = data['data']
else:
dt['metrics'] = []
response['vdus'].append(dt)
return Response(response)
class SntPromMetricListVnfVdu(generics.RetrieveAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
time_window = request.GET.get('tm', None)
mt = ProData('prometheus', 9090)
vnfid = self.kwargs['vnf_id']
vduid = self.kwargs['vdu_id']
queryset = monitoring_functions.objects.all()
vnf = queryset.filter(sonata_func_id=vnfid)
response = {}
if vnf.count() == 0:
response['status'] = "Fail (VNF: " + vnfid + " not found)"
return Response(response)
vdus = []
vdus.append(vnf[0].host_id)
if vduid not in vdus:
response['status'] = "Fail (VDU: " + vduid + " doesn't belong in VNF:" + vnfid + ")"
return Response(response)
response['status'] = 'Success'
response['vdus'] = []
for vdu in vdus:
dt = {}
dt['vdu_id'] = vdu
data = mt.getMetricsResId(vnf[0].host_type,vdu,time_window)
if 'data' in data:
dt['metrics'] = data['data']
else:
dt['metrics'] = []
response['vdus'].append(dt)
return Response(response)
class SntPromVnfMetricDetail(generics.ListAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
metric_name = self.kwargs['metricName']
vnfid = self.kwargs['vnf_id']
mt = ProData('prometheus', 9090)
queryset = monitoring_functions.objects.all()
vnf = queryset.filter(sonata_func_id=vnfid)
response = {}
if vnf.count() == 0:
response['status'] = "Fail (VNF: " + vnfid + " not found)"
return Response(response)
vdus = []
vdus.append(vnf[0].host_id)
response['status'] = 'Success'
response['vdus'] = []
for vdu in vdus:
dt = {}
dt['vdu_id'] = vdu
data = mt.getMetricDetail(vdu, metric_name)
if 'data' in data:
dt['metrics'] = data['data']['result']
else:
dt['metrics'] = []
response['vdus'].append(dt)
return Response(response)
class SntWSreq(generics.CreateAPIView):
serializer_class = SntWSreqSerializer
def post(self, request, *args, **kwargs):
filters = []
psw = socket.gethostbyname('pushgateway')
if 'filters' in request.data.keys():
filters = request.data['filters']
metric = request.data['metric']
url = "http://" + psw + ":8002/new/?metric=" + metric + "¶ms=" + json.dumps(filters).replace(" ", "")
cl = Http()
rsp = cl.GET(url, [])
response = {}
try:
if 'name_space' in rsp:
response['status'] = "SUCCESS"
response['metric'] = request.data['metric']
response['ws_url'] = "ws://" + psw + ":8002/ws/" + str(rsp['name_space'])
else:
response['status'] = "FAIL"
response['ws_url'] = None
except KeyError:
response = request.data
pass
return Response(response)
class SntWSreqPerPOP(generics.CreateAPIView):
serializer_class = SntWSreqSerializer
def post(self, request, *args, **kwargs):
filters = []
if 'filters' in request.data.keys():
filters = request.data['filters']
metric = request.data['metric']
pop_id = self.kwargs['popID']
prom_url = getPromIP(pop_id)
if prom_url['status'] == 'failed':
return Response({'status': prom_url['msg']}, status=status.HTTP_404_NOT_FOUND)
ip = socket.gethostbyname(prom_url['addr'])
url = "http://" + ip + ":8002/new/?metric=" + metric + "¶ms=" + json.dumps(filters).replace(" ", "")
cl = Http()
rsp = cl.GET(url, [])
response = {}
try:
if 'name_space' in rsp:
response['status'] = "SUCCESS"
response['metric'] = request.data['metric']
response['ws_url'] = "ws://" + ip + ":8002/ws/" + str(rsp['name_space'])
else:
response['status'] = "FAIL"
response['ws_url'] = None
except KeyError:
response = request.data
pass
return Response(response)
class SntRuleconf(generics.CreateAPIView):
serializer_class = SntRulesConfSerializer
def post(self, request, *args, **kwargs):
srvid = self.kwargs['srvID']
if 'rules' in request.data.keys():
rules = request.data['rules']
else:
return Response({'error': 'Undefined rules'}, status=status.HTTP_400_BAD_REQUEST)
# Check if service exists
srv = monitoring_services.objects.all().filter(sonata_srv_id=srvid)
if srv.count() == 0:
if srvid != 'generic':
return Response({'error': 'Requested Service not found'}, status=status.HTTP_404_NOT_FOUND)
else:
srvid = 'alerts'
# Delete old rule from DB
rules_db = monitoring_rules.objects.all().filter(service__sonata_srv_id=srvid, consumer='DEV')
rules_db.delete()
# Create prometheus configuration file
rls = {}
rls['service'] = srvid
rls['vnf'] = "To be found..."
rls['rules'] = []
rules_status = len(rules)
for r in rules:
nt = monitoring_notif_types.objects.all().filter(id=r['notification_type'])
if nt.count() == 0:
return Response({'error': 'Alert notification type does not supported. Action Aborted'},
status=status.HTTP_400_BAD_REQUEST)
else:
if srvid != "alerts":
rule = monitoring_rules(service=srv[0], summary=r['summary'], notification_type=nt[0],
name=r['name'], condition=r['condition'], duration=r['duration'],
description=r['description'])
rule.save()
rl = {}
rl['name'] = r['name']
rl['description'] = r['description']
rl['summary'] = r['summary']
rl['duration'] = r['duration']
rl['notification_type'] | |
from render import Render
from random import uniform, randint
from math import floor
ren = Render()
####### Item 1
def random_point():
ren.glCreateWindow(600, 400)
ren.glClearColor(0, 0, 0)
ren.glViewPort(0, 0, 600, 400)
ren.glVertex(uniform(-1, 1), uniform(-1, 1))
ren.glColor(1, 1, 1)
ren.display("item1")
###### --------
# ##### Item 2
def white_point_each_corner():
ren.glCreateWindow(500, 500)
ren.glClearColor(0, 0, 0)
ren.glViewPort(0, 0, 500, 500)
ren.glColor(1, 1, 1)
ren.glVertex(-1, 1)
ren.glVertex(1, 1)
ren.glVertex(1, -1)
ren.glVertex(-1, -1)
ren.display('item2')
# ##### --------
###### Item 3
###### ------
###### Item 4
def square():
width = 500
height = 500
ren.glCreateWindow(width, height)
ren.glClearColor(0, 0, 0)
xVP = 0
yVP = 0
ren.glColor(1, 1, 1)
ren.glViewPort(5, 5, 490, 490)
i = 2/486
cont = -1
while (cont <= 1):
ren.glVertex(1, cont)
ren.glVertex(cont, 1)
ren.glVertex(-1, cont)
ren.glVertex(cont, -1)
cont += i
ren.display('item4')
###### ------
###### Item 5
def diagonal():
width = 500
height = 500
ren.glCreateWindow(width, height)
ren.glClearColor(0, 0, 0)
ren.glColor(1, 1, 1)
ren.glViewPort(0, 0, 500, 500)
i = 2/500
cont = -1
while (cont <= 1):
ren.glVertex(cont, cont)
cont += i
ren.display('item5')
###### ------
###### Item 6
def static():
width = 500
height = 500
ren.glCreateWindow(width, height)
ren.glClearColor(0, 0, 0)
ren.glColor(1, 1, 1)
ren.glViewPort(0, 0, 500, 500)
s = 2/500
i = -1
while (i <= 1):
j = -1
while (j <= 1):
if randint(0,1) == 0:
ren.glVertex(i, j)
j += s
i += s
ren.display('item6')
###### ------
###### Item 7
def random_colors():
width = 500
height = 500
ren.glCreateWindow(width, height)
ren.glViewPort(0, 0, width, height)
s = 2/500
i = -1
while (i <= 1):
j = -1
while (j <= 1):
r = uniform(0, 1)
g = uniform(0, 1)
b = uniform(0, 1)
# print(r)
# print(g)
# print(b)
ren.glColor(r, g, b)
ren.glVertex(i, j)
j += s
i += s
ren.display('item7')
###### ------
###### Item 8
def stars():
width = 500
height = 500
ren.glCreateWindow(width, height)
ren.glClearColor(0, 0, 0)
ren.glColor(1, 1, 1)
for i in range(0, width, 5):
for j in range(0, height, 5):
num = uniform(0, 1)
if (num < 0.01):
type = randint(1, 3)
if (type == 1):
ren.glViewPort(i, j, 5, 5)
ren.glVertex(0, 0)
ren.glVertex(0, 0.5)
ren.glVertex(0, 1)
ren.glVertex(0, -0.5)
ren.glVertex(0, -1)
ren.glVertex(0.5, 0)
ren.glVertex(1, 0)
ren.glVertex(-0.5, 0)
ren.glVertex(-1, 0)
ren.glVertex(0.5, 0.5)
ren.glVertex(-0.5, 0.5)
ren.glVertex(0.5, -0.5)
ren.glVertex(-0.5, -0.5)
elif (type == 2):
ren.glViewPort(i,j,3,3)
ren.glVertex(0, 0)
ren.glVertex(0, 1)
ren.glVertex(0, -1)
ren.glVertex(1, 0)
ren.glVertex(-1, 0)
elif (type == 3):
ren.glViewPort(i,j,1,1)
ren.glVertex(0,0)
ren.display('item8')
###### ------
###### Item 9
def atari():
width = 192
height = 160
ren.glCreateWindow(width, height)
ren.glClearColor(0, 0, 0)
vpWidth = 12
vpHeight = 100
#909090
#144, 144, 144
#
ren.glColor(0.5625, 0.5625, 0.5625)
ren.glViewPort(0, 20, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 192 - vpWidth * 2 + 2
vpHeight = 12
ren.glViewPort(11, 20 + 100 - 12 - 1, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 12
vpHeight = 100
ren.glViewPort(width - 12, 20, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 170
vpHeight = 5
#909090
#176, 60, 60
ren.glColor(0.6875, 0.234375, 0.234375)
ren.glViewPort(11, 90, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 170
vpHeight = 5
#909090
#208, 128, 92
ren.glColor(0.8125, 0.5, 0.359375)
ren.glViewPort(11, 85, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 170
vpHeight = 5
#909090
#224, 148, 112
ren.glColor(0.875, 0.578125, 0.4375)
ren.glViewPort(11, 80, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 170
vpHeight = 5
#909090
#232, 232, 92
ren.glColor(0.90625, 0.90625, 0.359375)
ren.glViewPort(11, 75, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 170
vpHeight = 5
#909090
#64, 124, 64
ren.glColor(0.25, 0.5, 0.25)
ren.glViewPort(11, 70, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 170
vpHeight = 5
#909090
#56, 64, 176
ren.glColor(0.21875, 0.25, 0.6875)
ren.glViewPort(11, 65, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 20
vpHeight = 3
#909090
#192, 88, 88
ren.glColor(0.75, 0.34375, 0.34375)
ren.glViewPort(100, 20, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 3
vpHeight = 3
#909090
#192, 88, 88
ren.glColor(0.75, 0.34375, 0.34375)
ren.glViewPort(125, 40, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 12
vpHeight = 5
#909090
#112, 208, 172
ren.glColor(0.4375, 0.8125, 0.671875)
ren.glViewPort(0, 20, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 12
vpHeight = 5
#909090
#156, 32, 32
ren.glColor(0.609375, 0.125, 0.125)
ren.glViewPort(180, 20, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 16
vpHeight = 5
#909090
#156, 32, 32
ren.glColor(0,0,0)
ren.glViewPort(150, 65, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
## 00000000000000000000000000
vpWidth = 3
vpHeight = 9
ren.glColor(0.5625,0.5625,0.5625)
ren.glViewPort(40, 120, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 9
vpHeight = 3
ren.glColor(0.5625,0.5625,0.5625)
ren.glViewPort(40, 120, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 9
vpHeight = 3
ren.glColor(0.5625,0.5625,0.5625)
ren.glViewPort(40, 129, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 3
vpHeight = 9
ren.glColor(0.5625,0.5625,0.5625)
ren.glViewPort(46, 120, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
## 0000000000000000000000000
## 00000000000000000000000000
vpWidth = 3
vpHeight = 9
ren.glColor(0.5625,0.5625,0.5625)
ren.glViewPort(55, 120, vpWidth, vpHeight)
sx = 2 / vpWidth
sy = 2 / vpHeight
i = -1
while (i <= 1):
j = -1
while(j <= 1):
ren.glVertex(i, j)
j += sy
i += sx
vpWidth = 9
vpHeight = 3
ren.glColor(0.5625,0.5625,0.5625)
ren.glViewPort(55, 120, vpWidth, vpHeight)
sx = 2 | |
<reponame>intellstartup/django-caravaggio-rest-api
"""
Django settings for Caravaggio REST API project.
# Copyright (c) 2019 BuildGroup Data Services Inc.
# All rights reserved.
"""
import os
import sys
try:
from dse import ConsistencyLevel
except ImportError:
from cassandra import ConsistencyLevel
from configurations import Configuration
class Common(Configuration):
CARAVAGGIO_API_TITLE = "Caravaggio API"
CARAVAGGIO_API_VERSION = "v1"
CARAVAGGIO_API_DESCRIPTION = "Example API for Caravaggio RESTful applications"
CARAVAGGIO_API_TERMS_URL = "https://www.google.com/policies/terms/"
CARAVAGGIO_API_CONTACT = "<EMAIL>"
CARAVAGGIO_API_LICENSE = "BSD License"
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("SECRET_KEY", "2w=es4^%3i4n2cya(0)ws&bq+@h)m1nepzkvd&pi+wvgsue%ms")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv("DEBUG", "False") == "True"
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER", "<EMAIL>")
EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD", "***")
DSE_SUPPORT = os.getenv("DSE_SUPPORT", "True") == "True"
# SECURITY WARNING: App Engine's security features ensure that it is
# safe to have ALLOWED_HOSTS = ['*'] when the app is deployed. If you
# deploy a Django app not on App Engine, make sure to set an appropriate
# host here.
# See https://docs.djangoproject.com/en/1.10/ref/settings/
ALLOWED_HOSTS = ["*"]
INTERNAL_IPS = []
# Application definition
INSTALLED_APPS = [
"django_cassandra_engine",
"django_cassandra_engine.sessions",
"django.contrib.auth",
"django.contrib.contenttypes",
# 'django.contrib.sessions',
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# Comment the next line to disable the admin:
"django.contrib.admin",
# Comment the next line to disable admin documentation:
"django.contrib.admindocs",
"rest_framework",
"rest_framework_filters",
"rest_framework.authtoken",
"rest_framework_cache",
"drf_yasg",
"haystack",
"caravaggio_rest_api",
"caravaggio_rest_api.logging",
"caravaggio_rest_api.users",
"caravaggio_rest_api.example.company",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "caravaggio_rest_api.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "caravaggio_rest_api/templates"),],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "caravaggio_rest_api.wsgi.application"
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING_FILE = "/data/caravaggio_rest_api/" "log/caravaggio_rest_api-debug.log"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {"format": "%(levelname)s %(asctime)s %(module)s %(process)d " "%(thread)d %(message)s"},
"simple": {"format": "%(levelname)s %(message)s"},
},
"handlers": {
"console": {"level": "DEBUG", "class": "logging.StreamHandler", "formatter": "simple"},
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"debug_log": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": LOGGING_FILE,
"maxBytes": 1024 * 1024 * 100,
"backupCount": 1,
"formatter": "verbose",
},
},
"loggers": {
"django.request": {"handlers": ["mail_admins"], "level": "ERROR", "propagate": True,},
"django_cassandra_engine": {
"handlers": ["console", "debug_log", "mail_admins"],
"level": "DEBUG",
"propagate": True,
},
"caravaggio_rest_api": {"handlers": ["console", "mail_admins"], "level": "DEBUG", "propagate": True,},
},
}
# Database
# Check to see if MySQLdb is available; if not, have pymysql masquerade as
# MySQLdb. This is a convenience feature for developers who cannot install
# MySQLdb locally; when running in production on Google App Engine Standard
# Environment, MySQLdb will be used.
# try:
# import MySQLdb # noqa: F401
# except ImportError:
# import pymysql
# pymysql.install_as_MySQLdb()
# [START db_setup]
DB_HOST = os.getenv("DB_HOST", "127.0.0.1")
DB_PORT = os.getenv("DB_PORT", "6543")
DB_NAME = os.getenv("DB_NAME", "caravaggio")
DB_USER = os.getenv("DB_USER", "caravaggio")
DB_PASSWORD = os.getenv("DB_PASSWORD", "<PASSWORD>")
CASSANDRA_DB_HOST = os.getenv("CASSANDRA_DB_HOST", "127.0.0.1,127.0.0.2,127.0.0.3")
CASSANDRA_DB_NAME = os.getenv("CASSANDRA_DB_NAME", "caravaggio")
CASSANDRA_DB_USER = os.getenv("CASSANDRA_DB_USER", "caravaggio")
CASSANDRA_DB_PASSWORD = os.getenv("CASSANDRA_DB_PASSWORD", "<PASSWORD>")
CASSANDRA_DB_REPLICATION = os.getenv("CASSANDRA_DB_REPLICATION", 1)
try:
from dse.cqlengine import models
except ImportError:
from cassandra.cqlengine import models
models.DEFAULT_KEYSPACE = CASSANDRA_DB_NAME
# Running on production App Engine, so connect to Google Cloud SQL using
# the unix socket at /cloudsql/<your-cloudsql-connection string>
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"HOST": DB_HOST,
"PORT": DB_PORT,
"NAME": DB_NAME,
"USER": DB_USER,
"PASSWORD": DB_PASSWORD,
},
"cassandra": {
"ENGINE": "django_cassandra_engine",
"NAME": CASSANDRA_DB_NAME,
"TEST": {"NAME": "test_{}".format(CASSANDRA_DB_NAME)},
"HOST": CASSANDRA_DB_HOST,
"USER": CASSANDRA_DB_USER,
"PASSWORD": <PASSWORD>,
"OPTIONS": {
"replication": {"strategy_class": "SimpleStrategy", "replication_factor": CASSANDRA_DB_REPLICATION},
"connection": {
"consistency": ConsistencyLevel.LOCAL_ONE,
"retry_connect": True
# + All connection options for cassandra.cluster.Cluster()
},
"session": {
"default_timeout": 10,
"default_fetch_size": 10000
# + All options for cassandra.cluster.Session()
},
},
},
}
# [END db_setup]
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#
# auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator",},
{"NAME": "django.contrib.auth.password_validation." "MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation." "CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation." "NumericPasswordValidator",},
]
# Internationalization
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = "UTC"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded
# files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ""
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ""
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR + "/caravaggio_rest_api/static")
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# STATIC_URL = '/static/'
STATIC_URL = os.getenv("STATIC_URL", "/static/")
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# os.path.join(BASE_DIR + '/caravaggio_rest_api/static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
REST_FRAMEWORK = {
"PAGE_SIZE": 10,
"DEFAULT_PAGINATION_CLASS": "caravaggio_rest_api.pagination.CustomPageNumberPagination",
"DEFAULT_THROTTLE_CLASSES": (
"rest_framework.throttling.AnonRateThrottle",
"rest_framework.throttling.UserRateThrottle",
"rest_framework.throttling.ScopedRateThrottle",
),
"DEFAULT_THROTTLE_RATES": {"anon": "100/day", "user": "60/minute"},
# The name of the alternative query string be can use for authenticate
# users in each request
# Ex. http://mydomain.com/users/user/?auth_token=<token_key>"
"QUERY_STRING_AUTH_TOKEN": "auth_token",
# Do we want to log any access made to the API?
"LOG_ACCESSES": True,
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"caravaggio_rest_api.drf.authentication." "TokenAuthSupportQueryString",
),
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
"DEFAULT_PERMISSION_CLASSES": [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
"rest_framework.permissions.IsAuthenticated"
],
# REST framework also includes support for generic filtering backends
# that allow you to easily construct complex searches and filters
"DEFAULT_FILTER_BACKENDS": (
"drf_haystack.filters.HaystackFilter",
"drf_haystack.filters.HaystackBoostFilter",
"drf_haystack.filters.HaystackOrderingFilter",
),
"TEST_REQUEST_DEFAULT_FORMAT": "json",
"ORDERING_PARAM": "order_by",
# https://www.django-rest-framework.org/api-guide/fields/#decimalfield
# To use decimal as representation by default
"COERCE_DECIMAL_TO_STRING": False,
"EXCEPTION_HANDLER": "caravaggio_rest_api.drf.exceptions.caravaggio_exception_handler",
}
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_USER_EMAIL_FIELD = "email"
ACCOUNT_LOGOUT_ON_GET = True
AUTH_USER_MODEL = "users.CaravaggioUser"
REST_AUTH_SERIALIZERS = {
"USER_DETAILS_SERIALIZER": "caravaggio_rest_api.users.serializers." "CaravaggioUserDetailsSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
"REGISTER_SERIALIZER": "caravaggio_rest_api.users.serializers." "CaravaggioUserRegisterSerializer",
}
SESSION_ENGINE = "django_cassandra_engine.sessions.backends.db"
CASSANDRA_FALLBACK_ORDER_BY_PYTHON = True
# Enable/Disable throttling
THROTTLE_ENABLED = os.getenv("THROTTLE_ENABLED", "False") == "True"
GET_THROTTLE_RATE = "6000/minute"
LIST_THROTTLE_RATE = "200/minute"
POST_THROTTLE_RATE = "100/minute"
PUT_THROTTLE_RATE = "100/minute"
DELETE_THROTTLE_RATE = "60/minute"
VALIDATE_THROTTLE_RATE = "60/minute"
PATCH_THROTTLE_RATE = "100/minute"
METADATA_THROTTLE_RATE = "6000/minute"
FACETS_THROTTLE_RATE = "6000/minute"
THROTTLE_OPERATIONS = {
"retrieve": GET_THROTTLE_RATE,
"highlight": GET_THROTTLE_RATE,
"list": LIST_THROTTLE_RATE,
"create": POST_THROTTLE_RATE,
"update": PUT_THROTTLE_RATE,
"destroy": DELETE_THROTTLE_RATE,
"validate": VALIDATE_THROTTLE_RATE,
"partial_update": PATCH_THROTTLE_RATE,
"metadata": METADATA_THROTTLE_RATE,
"facets": FACETS_THROTTLE_RATE,
}
HAYSTACK_DJANGO_ID_FIELD = "id"
HAYSTACK_KEYSPACE = CASSANDRA_DB_NAME
if "test" in sys.argv:
HAYSTACK_KEYSPACE = "test_{}".format(HAYSTACK_KEYSPACE)
HAYSTACK_URL = os.getenv("HAYSTACK_URL", "http://127.0.0.1:8983/solr")
HAYSTACK_ADMIN_URL = os.getenv("HAYSTACK_ADMIN_URL", "http://127.0.0.1:8983/solr/admin/cores")
HAYSTACK_CONNECTIONS = {
"default": {
"ENGINE": "caravaggio_rest_api.haystack.backends." "solr_backend.CassandraSolrEngine",
"URL": HAYSTACK_URL,
"KEYSPACE": HAYSTACK_KEYSPACE,
"ADMIN_URL": HAYSTACK_ADMIN_URL,
"BATCH_SIZE": 100,
"INCLUDE_SPELLING": True,
"DISTANCE_AVAILABLE": True,
},
}
# Caching: Redis backend for caching
REDIS_HOST_PRIMARY = os.getenv("REDIS_HOST_PRIMARY", "127.0.0.1")
REDIS_PORT_PRIMARY = os.getenv("REDIS_PORT_PRIMARY", "6379")
REDIS_PASS_PRIMARY = os.getenv("REDIS_PASS_PRIMARY", "")
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://{0}{1}:{2}/1".format(
":{0}@".format(REDIS_PASS_PRIMARY) if REDIS_PASS_PRIMARY else "", REDIS_HOST_PRIMARY, REDIS_PORT_PRIMARY
),
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient",},
"KEY_PREFIX": "caravaggio_rest_api",
},
"disk_cache": {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"LOCATION": "/var/tmp/caravaggio_rest_api_cache",
"TIMEOUT": 300,
"OPTIONS": {"MAX_ENTRIES": 10000},
"KEY_PREFIX": "caravaggio_rest_api",
},
"mem_cache": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "caravaggio_rest_api_cache",
"KEY_PREFIX": "caravaggio_rest_api",
},
}
# DRF Caching
REST_FRAMEWORK_CACHE = {
# The caching system must be always a distributed cache if we plan
# to start multiple instances | |
<gh_stars>0
# coding: utf-8
"""
Tts API
Description # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_tts.configuration import Configuration
class Project(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'name': 'str',
'description': 'str',
'status': 'str',
'language': 'str',
'sample_rate': 'int',
'profanity_filter': 'bool',
'generate_proxy': 'bool',
'custom_words': 'str',
'capabilities': 'list[str]',
'created_at': 'str',
'updated_at': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'status': 'status',
'language': 'language',
'sample_rate': 'sample_rate',
'profanity_filter': 'profanity_filter',
'generate_proxy': 'generate_proxy',
'custom_words': 'custom_words',
'capabilities': 'capabilities',
'created_at': 'created_at',
'updated_at': 'updated_at'
}
def __init__(self, id=None, name=None, description=None, status=None, language=None, sample_rate=None, profanity_filter=None, generate_proxy=None, custom_words=None, capabilities=None, created_at=None, updated_at=None, local_vars_configuration=None): # noqa: E501
"""Project - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._description = None
self._status = None
self._language = None
self._sample_rate = None
self._profanity_filter = None
self._generate_proxy = None
self._custom_words = None
self._capabilities = None
self._created_at = None
self._updated_at = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if description is not None:
self.description = description
if status is not None:
self.status = status
if language is not None:
self.language = language
if sample_rate is not None:
self.sample_rate = sample_rate
if profanity_filter is not None:
self.profanity_filter = profanity_filter
if generate_proxy is not None:
self.generate_proxy = generate_proxy
if custom_words is not None:
self.custom_words = custom_words
if capabilities is not None:
self.capabilities = capabilities
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
@property
def id(self):
"""Gets the id of this Project. # noqa: E501
The ID of the Project. # noqa: E501
:return: The id of this Project. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Project.
The ID of the Project. # noqa: E501
:param id: The id of this Project. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this Project. # noqa: E501
The name of the Project. # noqa: E501
:return: The name of this Project. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Project.
The name of the Project. # noqa: E501
:param name: The name of this Project. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this Project. # noqa: E501
The description of the Project. # noqa: E501
:return: The description of this Project. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Project.
The description of the Project. # noqa: E501
:param description: The description of this Project. # noqa: E501
:type: str
"""
self._description = description
@property
def status(self):
"""Gets the status of this Project. # noqa: E501
Determines a stage of training. # noqa: E501
:return: The status of this Project. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Project.
Determines a stage of training. # noqa: E501
:param status: The status of this Project. # noqa: E501
:type: str
"""
allowed_values = ["available", "modified", "training", "failed"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def language(self):
"""Gets the language of this Project. # noqa: E501
The language code of model. # noqa: E501
:return: The language of this Project. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this Project.
The language code of model. # noqa: E501
:param language: The language of this Project. # noqa: E501
:type: str
"""
self._language = language
@property
def sample_rate(self):
"""Gets the sample_rate of this Project. # noqa: E501
The sample rate of model. # noqa: E501
:return: The sample_rate of this Project. # noqa: E501
:rtype: int
"""
return self._sample_rate
@sample_rate.setter
def sample_rate(self, sample_rate):
"""Sets the sample_rate of this Project.
The sample rate of model. # noqa: E501
:param sample_rate: The sample_rate of this Project. # noqa: E501
:type: int
"""
self._sample_rate = sample_rate
@property
def profanity_filter(self):
"""Gets the profanity_filter of this Project. # noqa: E501
If true, the service replaces profanity from output with asterisks. # noqa: E501
:return: The profanity_filter of this Project. # noqa: E501
:rtype: bool
"""
return self._profanity_filter
@profanity_filter.setter
def profanity_filter(self, profanity_filter):
"""Sets the profanity_filter of this Project.
If true, the service replaces profanity from output with asterisks. # noqa: E501
:param profanity_filter: The profanity_filter of this Project. # noqa: E501
:type: bool
"""
self._profanity_filter = profanity_filter
@property
def generate_proxy(self):
"""Gets the generate_proxy of this Project. # noqa: E501
Indicates whether video preview should be generated. # noqa: E501
:return: The generate_proxy of this Project. # noqa: E501
:rtype: bool
"""
return self._generate_proxy
@generate_proxy.setter
def generate_proxy(self, generate_proxy):
"""Sets the generate_proxy of this Project.
Indicates whether video preview should be generated. # noqa: E501
:param generate_proxy: The generate_proxy of this Project. # noqa: E501
:type: bool
"""
self._generate_proxy = generate_proxy
@property
def custom_words(self):
"""Gets the custom_words of this Project. # noqa: E501
Words used for model training, separated by space. # noqa: E501
:return: The custom_words of this Project. # noqa: E501
:rtype: str
"""
return self._custom_words
@custom_words.setter
def custom_words(self, custom_words):
"""Sets the custom_words of this Project.
Words used for model training, separated by space. # noqa: E501
:param custom_words: The custom_words of this Project. # noqa: E501
:type: str
"""
self._custom_words = custom_words
@property
def capabilities(self):
"""Gets the capabilities of this Project. # noqa: E501
:return: The capabilities of this Project. # noqa: E501
:rtype: list[str]
"""
return self._capabilities
@capabilities.setter
def capabilities(self, capabilities):
"""Sets the capabilities of this Project.
:param capabilities: The capabilities of this Project. # noqa: E501
:type: list[str]
"""
self._capabilities = capabilities
@property
def created_at(self):
"""Gets the created_at of this Project. # noqa: E501
A date and time when the project was created # noqa: E501
:return: The created_at of this Project. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Project.
A date and time when the project was created # noqa: E501
:param created_at: The created_at of this Project. # noqa: E501
:type: str
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this Project. # noqa: E501
A date and time when the project was updated # noqa: E501
:return: The updated_at of this Project. # noqa: E501
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this Project.
A date and time when the project was updated # noqa: E501
:param updated_at: The updated_at of this Project. # noqa: E501
:type: str
"""
self._updated_at = updated_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, | |
<reponame>peckhams/stochastic_conflict_model
#
# Copyright (c) 2020, <NAME>
#
# Note: This file contains a set of functions for visualizing the
# contents of output files in netCDF format
# (e.g. TopoFlow or Stochastic Conflict Model)
#
# May 2020. Moved all routines from Jupyter notebook called
# TopoFlow_Visualization.ipynb to here.
# Tested all of them in a new Jupyter notebook called
# TopoFlow_Visualization2.ipynb.
#
#--------------------------------------------------------------------
#
# Define some stretch functions for 2D color images:
# normalize_grid()
# histogram_equalize()
# power_stretch0()
# power_stretch1()
# power_stretch2()
# power_stretch3()
# log_stretch()
# linear_stretch()
# stretch_grid()
#
# Define functions to show grids as color images:
# read_grid_from_nc_file()
# read_and_show_rtg()
# show_grid_as_image()
# save_grid_stack_as_images()
# save_rts_as_images()
#
# Create movies from set of images:
# (works for grid images, profile images, etc.)
# create_movie_from_images()
#
# plot_data()
# create_visualization_files()
# delete_png_files()
#--------------------------------------------------------------------
# import os.path
# import shutil
import glob, os, time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap
import imageio
import imageio_ffmpeg as imff # for adding opacity
from conflict.utils import ncgs_files
from conflict.utils import rtg_files
from conflict.utils import rts_files
#--------------------------------------------------------------------
def normalize_grid( grid ):
gmin = grid.min()
gmax = grid.max()
if (gmin != gmax):
norm = (grid - gmin) / (gmax - gmin)
else:
# Avoid divide by zero
norm = np.zeros( grid.shape, dtype=grid.dtype )
return norm
# normalize_grid()
#--------------------------------------------------------------------
def histogram_equalize( grid, PLOT_NCS=False):
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
(hist, bin_edges) = np.histogram( grid, bins=256)
# hmin = hist.min()
# hmax = hist.max()
cs = hist.cumsum()
ncs = (cs - cs.min()) / (cs.max() - cs.min())
ncs.astype('uint8');
if (PLOT_NCS):
plt.plot( ncs )
flat = grid.flatten()
if (flat.max() != flat.min()):
flat2 = np.uint8( 255 * (flat - flat.min()) / (flat.max() - flat.min()) )
grid2 = ncs[ flat2 ].reshape( grid.shape )
else:
flat2 = np.zeros( flat.size, dtype='uint8' )
grid2 = ncs[ flat2 ].reshape( grid.shape )
return grid2
# histogram_equalize()
#--------------------------------------------------------------------
def power_stretch0( grid, p ):
norm = normalize_grid( grid )
return norm**p
# power_stretch0()
#--------------------------------------------------------------------
def power_stretch1( grid, p ):
return grid**p
# power_stretch1()
#--------------------------------------------------------------------
def power_stretch2( grid, a=1000, b=0.5):
# Note: Try a=1000 and b=0.5
norm = normalize_grid( grid )
return (1 - (1 + a * norm)**(-b))
# power_stretch2()
#--------------------------------------------------------------------
def power_stretch3( grid, a=1, b=2):
# Note: Try a=1, b=2 (shape of a quarter circle)
norm = normalize_grid( grid )
return (1 - (1 - norm**a)**b)**(1/b)
# power_stretch3()
#--------------------------------------------------------------------
def log_stretch( grid, a=1 ):
return np.log( (a * grid) + 1 )
# log_stretch()
#--------------------------------------------------------------------
def linear_stretch( grid ):
norm = normalize_grid( grid )
return norm
# linear_stretch()
#--------------------------------------------------------------------
def stretch_grid( grid, stretch, a=1, b=2, p=0.5 ):
name = stretch
if (name == 'hist_equal'):
grid2 = histogram_equalize( grid, PLOT_NCS=False)
elif (name == 'linear'):
grid2 = linear_stretch(grid)
elif (name == 'log'):
grid2 = log_stretch( grid, a=a )
elif (name == 'power'):
grid2 = power_stretch0( grid, p=p )
elif (name == 'power1'):
# Try: p = 0.3
grid2 = power_stretch1( grid, p)
elif (name == 'power2'):
# Try: a=1000, b=0.5.
grid2 = power_stretch2( grid, a=a, b=b )
elif (name == 'power3'):
# Try: a=1, b=2.
grid2 = power_stretch3( grid, a=a, b=b)
else:
print('### SORRY, Unknown stretch =', name)
return grid
return grid2
# stretch_grid()
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def read_grid_from_nc_file( nc_file, time_index=1, REPORT=True ):
# Typical 2D nc files
# nc_file = case_prefix + '_2D-Q.nc'
# nc_file = case_prefix + '_2D-d-flood.nc'
## if ('_2D' not in nc_file):
if ('2D' not in nc_file):
print('ERROR: This function is only for "2D" netCDF files.')
print(' Filename must contain "2D" for grid stacks.')
return
ncgs = ncgs_files.ncgs_file()
ncgs.open_file( nc_file )
var_name_list = ncgs.get_var_names()
if (REPORT):
print('var_names in netCDF file =' )
print( var_name_list )
#----------------------------
# Determine valid var_index
#-----------------------------------------
# Old: 0=time, 1=X, 2=Y, 3=V
# New: 0=time, 1=datetime, 2=X, 3=Y, 4=V
#-----------------------------------------
var_index = 1
other_vars = ['time','datetime','X','Y','Z']
while (True):
var_name = var_name_list[ var_index ]
if (var_name not in other_vars):
break
var_index += 1
### var_index = 3 # 0=time, 1=X, 2=Y, 3=V ###############
### var_name = var_name_list[ var_index ]
long_name = ncgs.get_var_long_name( var_name )
var_units = ncgs.get_var_units( var_name )
n_grids = ncgs.ncgs_unit.variables[ var_name ].n_grids
if (REPORT):
print('long_name =', long_name)
print('var_name =', var_name)
print('var_units =', var_units)
print('n_grids =', n_grids)
#--------------------------------------------
# Use these to set "extent" in plt.imshow()
#--------------------------------------------
minlon = ncgs.ncgs_unit.variables['X'].geospatial_lon_min
maxlon = ncgs.ncgs_unit.variables['X'].geospatial_lon_max
minlat = ncgs.ncgs_unit.variables['Y'].geospatial_lat_min
maxlat = ncgs.ncgs_unit.variables['Y'].geospatial_lat_max
extent = [minlon, maxlon, minlat, maxlat]
#----------------------------------------------
# Read grid from nc_file for given time_index
#----------------------------------------------
grid = ncgs.get_grid( var_name, time_index )
if (REPORT):
print( 'extent = ')
print( extent )
print( 'grid shape =', grid.shape )
print( 'min(grid) =', grid.min() )
print( 'max(grid) =', grid.max() )
ncgs.close_file()
return (grid, long_name, extent)
# read_grid_from_nc_file()
#--------------------------------------------------------------------
def read_and_show_rtg( rtg_filename, long_name, VERBOSE=True,
cmap='jet', BLACK_ZERO=False,
stretch='hist_equal',
a=1, b=2, p=0.5, im_file=None,
xsize=8, ysize=8, dpi=None ):
rtg = rtg_files.rtg_file()
OK = rtg.open_file( rtg_filename )
if not(OK):
print('Sorry, Could not open RTG file:')
print( rtg_filename )
return
grid = rtg.read_grid( VERBOSE=VERBOSE )
extent = rtg.get_bounds()
rtg.close_file()
if (VERBOSE):
print('Byte swap needed =', rtg.byte_swap_needed())
print('Reading grid from RTG file...')
print('extent =', extent)
print('min(grid), max(grid) =', grid.min(), grid.max())
print('Finished.')
print()
show_grid_as_image( grid, long_name, extent=extent, cmap=cmap,
BLACK_ZERO=BLACK_ZERO, stretch=stretch,
a=a, b=b, p=p, im_file=im_file,
xsize=xsize, ysize=ysize, dpi=dpi)
# read_and_show_rtg()
#--------------------------------------------------------------------
def show_grid_as_image( grid, long_name, extent=None,
cmap='rainbow', BLACK_ZERO=False,
LAND_SEA_BACKDROP=False,
LAND_SEA_RED_BACKDROP=False,
stretch='power3',
a=1, b=2, p=0.5,
NO_SHOW=False, im_file=None,
xsize=8, ysize=8, dpi=None):
# Note: extent = [minlon, maxlon, minlat, maxlat]
#-------------------------
# Other color map names
#--------------------------------------------
# hsv, jet, gist_rainbow (reverse rainbow),
# gist_ncar, gist_stern
#--------------------------------------------
#--------------------------
# Other stretch functions
#--------------------------
grid2 = stretch_grid( grid, stretch, a=a, b=b, p=p )
# if (stretch == 'power_stretch3'):
# grid2 = power_stretch3( grid, a=0.5 )
# elif (stretch == 'power_stretch1a'):
# grid2 = power_stretch1( grid, 0.5)
# elif (stretch == 'power_stretch1b'):
# grid2 = power_stretch1( grid, 0.2)
# elif (stretch == 'power_stretch2'):
# grid2 = power_stretch2( grid )
# elif (stretch == 'log_stretch'):
# grid2 = log_stretch( grid )
# elif (stretch == 'hist_equal'):
# grid2 = histogram_equalize( grid, PLOT_NCS=True)
# else:
# print('SORRY, Unknown stretch =', stretch)
# return
#---------------------------------------
# Modify the colormap (0 = black) ?
# cmap is name of colormap, a string
#--------------------------------------------------------
# cmap arg to imshow can be name (as str) or cmap array
# 4th entry is opacity, or alpha channel (I think)
#--------------------------------------------------------
# See: "Creating listed colormaps" section at:
# https://matplotlib.org/3.1.0/tutorials/colors/
# colormap-manipulation.html
#--------------------------------------------------------
# "Land green" = #c6e5bc = (198, 229, 188)
# "Sea blue" = #aad3df = (170, 211, 223)
#--------------------------------------------------------
if (BLACK_ZERO):
n_colors = 256
color_map = cm.get_cmap( cmap, n_colors )
new_colors = color_map( np.linspace(0, 1, n_colors) )
black = np.array([0.0, 0.0, 0.0, 1.0])
new_colors[0,:] = black
new_cmap = ListedColormap( new_colors )
elif (LAND_SEA_BACKDROP):
n_colors = 256
color_map = cm.get_cmap( cmap, n_colors )
new_colors = color_map( np.linspace(0, 1, n_colors) )
land_green = np.array([198, 229, 188, 256]) / 256.0
sea_blue = np.array([170, 211, 223, 256]) / 256.0
new_colors[0,:] = land_green
new_colors[255,:] = sea_blue
new_cmap = ListedColormap( new_colors )
elif (LAND_SEA_RED_BACKDROP):
n_colors = 3
color_map = cm.get_cmap( cmap, n_colors )
new_colors = color_map( np.linspace(0, 1, n_colors) )
land_green = np.array([198, 229, 188, 256]) / 256.0
sea_blue = np.array([170, 211, 223, 256]) / 256.0
red = np.array([256, 0, 0, 256]) / 256.0
new_colors[0,:] = land_green
new_colors[1,:] = red
new_colors[2,:] = sea_blue
new_cmap = ListedColormap( new_colors )
else:
new_cmap = cmap
#----------------------------
# Set up and show the image
#----------------------------
# figure = plt.figure(1, figsize=(xsize, ysize))
fig, ax = plt.subplots( figsize=(xsize, ysize), dpi=dpi)
im_title = long_name.replace('_', ' ').title()
ax.set_title( im_title )
ax.set_xlabel('Longitude [deg]')
ax.set_ylabel('Latitude [deg]')
gmin = grid2.min()
gmax = grid2.max()
im = ax.imshow(grid2, interpolation='nearest', cmap=new_cmap,
vmin=gmin, vmax=gmax, extent=extent)
#--------------------------------------------------------
# NOTE! Must save before "showing" or get blank image.
# File format is inferred from extension.
# e.g. TMP_Image.png, TMP_Image.jpg.
#--------------------------------------------------------
if (im_file is not None):
plt.savefig( im_file )
else:
plt.show() # Ignore NO_SHOW arg for now.
#-----------------------------------------------
# if (im_file is not None):
# plt.savefig( im_file )
# if not(NO_SHOW):
# plt.show()
plt.close()
# Information on matplotlib color maps
# https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
#
# Information on matplotlib.pyplot.imshow
# https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.imshow.html
#
# Information on matplotlib.pyplot.savefig
# https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.savefig.html
#
# plt.savefig(fname, dpi=None, facecolor='w', edgecolor='w',
# orientation='portrait', papertype=None, format=None,
# transparent=False, bbox_inches=None, pad_inches=0.1,
# frameon=None, metadata=None)
# show_grid_as_image()
#--------------------------------------------------------------------
def save_grid_stack_as_images( nc_file, png_dir, extent=None,
stretch='power3', a=1, b=2, p=0.5,
cmap='rainbow', REPORT=True,
BLACK_ZERO=False,
LAND_SEA_BACKDROP=False,
LAND_SEA_RED_BACKDROP=False,
xsize=6, ysize=6, dpi=192 ):
# Example nc_files:
# nc_file | |
incoming_action_set = set(action.keys())
if no_l2_classifier_set.issubset(incoming_classifier_set) and \
no_l2_action_set.issubset(incoming_action_set) and \
len(incoming_action_set) == 1:
return True
return False
def divide_and_add_flow(self, intf_id, onu_id, uni_id, port_no, classifier,
action, flow, tp_id, us_meter_id, ds_meter_id):
self.log.debug('sorting flow', intf_id=intf_id, onu_id=onu_id,
uni_id=uni_id, port_no=port_no,
classifier=classifier, action=action,
tp_id=tp_id, us_meter=us_meter_id,
ds_meter=ds_meter_id)
tp_instance = self.get_tech_profile_instance(intf_id, onu_id, uni_id, tp_id)
if tp_instance is None:
self.log.error("flow-not-added--tp-instance-unavailable")
return
pon_intf_onu_id = (intf_id, onu_id, uni_id)
alloc_id = \
self.resource_mgr.get_current_alloc_ids_for_onu(pon_intf_onu_id)
gem_ports = \
self.resource_mgr.get_current_gemport_ids_for_onu(pon_intf_onu_id)
if alloc_id is None or gem_ports is None:
self.log.error("alloc-id-or-gem-ports-unavailable",
alloc_id=alloc_id, gem_ports=gem_ports)
return
self.create_us_scheduler_queues(intf_id, onu_id, uni_id, tp_instance, us_meter_id)
self.create_ds_scheduler_queues(intf_id, onu_id, uni_id, tp_instance, ds_meter_id)
self.log.debug('Generated required alloc and gemport ids',
alloc_id=alloc_id, gemports=gem_ports)
ds_gem_port_attr_list = tp_instance.downstream_gem_port_attribute_list
us_gem_port_attr_list = tp_instance.upstream_gem_port_attribute_list
kwargs = dict()
kwargs['intf_id'] = intf_id
kwargs['onu_id'] = onu_id
kwargs['uni_id'] = uni_id
kwargs['port_no'] = port_no
kwargs['classifier'] = classifier
kwargs['action'] = action
kwargs['logical_flow'] = flow
kwargs['alloc_id'] = alloc_id
if IP_PROTO in classifier:
if classifier[IP_PROTO] == 17:
self.log.debug('dhcp flow add')
if VLAN_PCP in classifier:
gemport_id = self._get_gem_port_for_pcp(
classifier[VLAN_PCP], us_gem_port_attr_list
)
self.add_dhcp_trap_uni(intf_id, onu_id, uni_id, port_no,
classifier, action, flow, alloc_id,
gemport_id)
else:
self._install_flow_on_all_gemports(self.add_dhcp_trap_uni,
kwargs,
us_gem_port_attr_list)
elif classifier[IP_PROTO] == 2:
self.log.warn('igmp flow add ignored, not implemented yet')
return
else:
self.log.warn("Invalid-Classifier-to-handle",
classifier=classifier,
action=action)
return
elif ETH_TYPE in classifier:
if classifier[ETH_TYPE] == EAP_ETH_TYPE:
self.log.debug('eapol flow add')
vlan_id = classifier[VLAN_VID]
if vlan_id is None:
vlan_id = DEFAULT_MGMT_VLAN
if VLAN_PCP in classifier:
gemport_id = self._get_gem_port_for_pcp(
classifier[VLAN_PCP], us_gem_port_attr_list
)
self.add_eapol_flow(
intf_id, onu_id, uni_id, port_no, flow, alloc_id, gemport_id,
vlan_id=vlan_id)
else:
kwargs['vlan_id'] = vlan_id
self._install_flow_on_all_gemports(self.add_eapol_flow,
kwargs,
us_gem_port_attr_list)
elif PUSH_VLAN in action:
if VLAN_PCP in classifier:
gemport_id = self._get_gem_port_for_pcp(
classifier[VLAN_PCP], us_gem_port_attr_list
)
self.add_upstream_data_flow(intf_id, onu_id, uni_id, port_no, classifier,
action, flow, alloc_id, gemport_id)
else:
self._install_flow_on_all_gemports(self.add_upstream_data_flow,
kwargs, us_gem_port_attr_list
)
elif POP_VLAN in action:
if VLAN_PCP in classifier:
gemport_id = self._get_gem_port_for_pcp(
classifier[VLAN_PCP], ds_gem_port_attr_list
)
self.add_downstream_data_flow(intf_id, onu_id, uni_id, port_no, classifier,
action, flow, alloc_id, gemport_id)
else:
self._install_flow_on_all_gemports(self.add_downstream_data_flow,
kwargs, ds_gem_port_attr_list
)
elif self.is_no_l2_modification_flow(classifier, action) and \
self._is_upstream_flow(classifier[IN_PORT]):
kwargs['is_l2_mod_flow'] = False
if VLAN_PCP in classifier:
kwargs['gemport_id'] = self._get_gem_port_for_pcp(
classifier[VLAN_PCP], us_gem_port_attr_list
)
self.add_upstream_data_flow(**kwargs)
else:
self._install_flow_on_all_gemports(self.add_upstream_data_flow,
kwargs, us_gem_port_attr_list
)
elif self.is_no_l2_modification_flow(classifier, action) and \
self._is_downstream_flow(classifier[IN_PORT]):
kwargs['is_l2_mod_flow'] = False
if VLAN_PCP in classifier:
kwargs['gemport_id'] = self._get_gem_port_for_pcp(
classifier[VLAN_PCP], ds_gem_port_attr_list
)
self.add_downstream_data_flow(**kwargs)
else:
self._install_flow_on_all_gemports(self.add_downstream_data_flow,
kwargs, ds_gem_port_attr_list
)
else:
self.log.debug('Invalid-flow-type-to-handle',
classifier=classifier,
action=action, flow=flow)
return
# Download tech-profile to ONU
self.download_tech_profile(intf_id, onu_id, uni_id)
def download_tech_profile(self, intf_id, onu_id, uni_id):
(ofp_port_name, ofp_port_no) = \
self.data_model.get_ofp_port_name(intf_id, onu_id, uni_id)
if ofp_port_name is None:
self.log.error("port-name-not-found")
return
tp_id = self.resource_mgr.get_tech_profile_id_for_onu(intf_id, onu_id, uni_id)
tp_path = self.get_tp_path(intf_id, ofp_port_name, tp_id)
self.log.debug('Load-tech-profile-request-to-brcm-handler',
tp_path=tp_path)
self.data_model.onu_download_tech_profile(
intf_id, onu_id, uni_id, tp_path)
def get_scheduler(self, tech_profile_instance, direction, meter_id):
if direction == Direction.UPSTREAM:
scheduler = tech_profile_instance.us_scheduler
elif direction == Direction.DOWNSTREAM:
scheduler = tech_profile_instance.ds_scheduler
else:
raise Exception("invalid-direction")
meter_band = self.data_model.meter_band(meter_id)
traffic_shaping_info = None
if meter_band is not None:
cir = meter_band.bands[0].rate
cbs = meter_band.bands[0].burst_size
eir = meter_band.bands[1].rate
ebs = meter_band.bands[1].burst_size
pir = cir + eir
pbs = cbs + ebs
traffic_shaping_info = tech_profile_pb2.TrafficShapingInfo(
cir=cir,
cbs=cbs,
pir=pir,
pbs=pbs
)
scheduler_config = tech_profile_pb2.SchedulerConfig(
direction=TechProfile.get_parameter(
'direction', scheduler.direction),
additional_bw=TechProfile.get_parameter(
'additional_bw', scheduler.additional_bw),
priority=scheduler.priority,
weight=scheduler.weight,
sched_policy=TechProfile.get_parameter(
'q_sched_policy', scheduler.q_sched_policy)
)
traffic_scheduler = tech_profile_pb2.TrafficScheduler(
direction=scheduler.direction,
scheduler=scheduler_config,
alloc_id=scheduler.alloc_id,
traffic_shaping_info=traffic_shaping_info
)
return traffic_scheduler
@staticmethod
def get_traffic_queues(tech_profile_instance, direction):
if direction == Direction.UPSTREAM:
gemport_attribute_list = tech_profile_instance. \
upstream_gem_port_attribute_list
tp_scheduler_direction = tech_profile_instance.us_scheduler.direction
elif direction == Direction.DOWNSTREAM:
gemport_attribute_list = tech_profile_instance. \
downstream_gem_port_attribute_list
tp_scheduler_direction = tech_profile_instance.ds_scheduler.direction
else:
raise Exception("invalid-direction")
traffic_queues = list()
for i in range(len(gemport_attribute_list)):
traffic_queues.append(tech_profile_pb2.TrafficQueue(
direction=TechProfile.get_parameter('direction',
tp_scheduler_direction),
gemport_id=gemport_attribute_list[i].gemport_id,
pbit_map=gemport_attribute_list[i].pbit_map,
aes_encryption=ast.literal_eval(gemport_attribute_list[i].
aes_encryption),
sched_policy=TechProfile.get_parameter(
'sched_policy', gemport_attribute_list[i].
scheduling_policy),
priority=gemport_attribute_list[i].priority_q,
weight=gemport_attribute_list[i].weight,
discard_policy=TechProfile.get_parameter(
'discard_policy', gemport_attribute_list[i].
discard_policy)))
return traffic_queues
def create_us_scheduler_queues(self, intf_id, onu_id, uni_id, tp_instance, us_meter_id):
if us_meter_id is None:
self.log.debug("us-meter-unavailable--no-action")
return
kv_store_meter_id = self.resource_mgr.get_meter_id_for_onu(UPSTREAM,
intf_id,
onu_id, uni_id)
# Lets make a simple assumption that if the meter-id is present on the KV store,
# then the scheduler and queues configuration is applied on the OLT device
# in the given direction.
if kv_store_meter_id is not None:
# TODO: Dynamic meter update not supported for now
# TODO: The subscriber has to be un-provisioned and re-provisioned for meter update
assert kv_store_meter_id == us_meter_id
self.log.debug("scheduler-already-created-in-us")
return
traffic_sched = self.get_scheduler(tp_instance, Direction.UPSTREAM, us_meter_id)
try:
ofp_port_no = self.platform.mk_uni_port_num(intf_id,
onu_id, uni_id)
self.stub.CreateTrafficSchedulers(
tech_profile_pb2.TrafficSchedulers(
intf_id=intf_id,
onu_id=onu_id,
uni_id=uni_id,
port_no=ofp_port_no,
traffic_scheds=[traffic_sched]
))
except grpc.RpcError as grpc_e:
if grpc_e.code() == grpc.StatusCode.ALREADY_EXISTS:
self.log.warn("us-scheduler-already-exists")
else:
self.log.error("failure-to-create-us-scheduler")
return
# On receiving the CreateTrafficQueues request, the driver should create corresponding
# downstream queues.
try:
self.stub.CreateTrafficQueues(
tech_profile_pb2.TrafficQueues(
intf_id=intf_id,
onu_id=onu_id,
uni_id=uni_id,
port_no=ofp_port_no,
traffic_queues=
OpenOltFlowMgr.get_traffic_queues(tp_instance, Direction.UPSTREAM)
))
except grpc.RpcError as grpc_e:
if grpc_e.code() == grpc.StatusCode.ALREADY_EXISTS:
self.log.warn("ds-queues-already-exists")
else:
self.log.error("failure-to-create-ds-queues")
return
# After we succesfully applied the scheduler configuration on the OLT device,
# store the meter id on the KV store, for further reference
self.resource_mgr.update_meter_id_for_onu(UPSTREAM, intf_id, onu_id, uni_id, us_meter_id)
def create_ds_scheduler_queues(self, intf_id, onu_id, uni_id, tp_instance, ds_meter_id):
if ds_meter_id is None:
self.log.debug("ds-meter-unavailable--no-action")
return
kv_store_meter_id = self.resource_mgr.get_meter_id_for_onu(DOWNSTREAM,
intf_id,
onu_id, uni_id)
# Lets make a simple assumption that if the meter-id is present on the KV store,
# then the scheduler and queues configuration is applied on the OLT device
if kv_store_meter_id is not None:
# TODO: Dynamic meter update not supported for now
# TODO: The subscriber has to be un-provisioned and re-provisioned for meter update
assert kv_store_meter_id == ds_meter_id
self.log.debug("scheduler-already-created-in-ds")
return
traffic_sched = self.get_scheduler(tp_instance, Direction.DOWNSTREAM, ds_meter_id)
_, ofp_port_no = self.data_model.get_ofp_port_name(intf_id, onu_id, uni_id)
try:
self.stub.CreateTrafficSchedulers(
tech_profile_pb2.TrafficSchedulers(
intf_id=intf_id,
onu_id=onu_id,
uni_id=uni_id,
port_no=ofp_port_no,
traffic_scheds=[traffic_sched]
))
except grpc.RpcError as grpc_e:
if grpc_e.code() == grpc.StatusCode.ALREADY_EXISTS:
self.log.warn("ds-scheduler-already-exists")
else:
self.log.error("failure-to-create-ds-scheduler")
return
# On receiving the CreateTrafficQueues request, the driver should create corresponding
# downstream queues.
try:
self.stub.CreateTrafficQueues(
tech_profile_pb2.TrafficQueues(
intf_id=intf_id,
onu_id=onu_id,
uni_id=uni_id,
port_no=ofp_port_no,
traffic_queues=
OpenOltFlowMgr.get_traffic_queues(tp_instance, Direction.DOWNSTREAM)
))
except grpc.RpcError as grpc_e:
if grpc_e.code() == grpc.StatusCode.ALREADY_EXISTS:
self.log.warn("ds-queues-already-exists")
else:
self.log.error("failure-to-create-ds-queues")
return
# After we successfully applied the scheduler configuration on the OLT device,
# store the meter id on the KV store, for further reference
self.resource_mgr.update_meter_id_for_onu(DOWNSTREAM, intf_id, onu_id, uni_id, ds_meter_id)
def remove_us_scheduler_queues(self, intf_id, onu_id, uni_id, tp_instance):
us_meter_id = self.resource_mgr.get_meter_id_for_onu(UPSTREAM,
intf_id,
onu_id, uni_id)
traffic_sched = self.get_scheduler(tp_instance, Direction.UPSTREAM, us_meter_id)
_, ofp_port_no = self.data_model.get_ofp_port_name(intf_id, onu_id, uni_id)
try:
self.stub.RemoveTrafficQueues(
tech_profile_pb2.TrafficQueues(
intf_id=intf_id,
onu_id=onu_id,
uni_id=uni_id,
port_no=ofp_port_no,
traffic_queues=
OpenOltFlowMgr.get_traffic_queues(tp_instance, Direction.UPSTREAM)
))
self.log.debug("removed-upstream-Queues")
except grpc.RpcError as e:
self.log.error("failure-to-remove-us-queues", e=e)
try:
self.stub.RemoveTrafficSchedulers(
tech_profile_pb2.TrafficSchedulers(
intf_id=intf_id,
onu_id=onu_id,
uni_id=uni_id,
port_no=ofp_port_no,
traffic_scheds=[traffic_sched]
))
self.log.debug("removed-upstream-Schedulers")
except grpc.RpcError as e:
self.log.error("failure-to-remove-us-scheduler", e=e)
self.resource_mgr.remove_meter_id_for_onu(UPSTREAM, intf_id, onu_id, uni_id)
def remove_ds_scheduler_queues(self, intf_id, onu_id, uni_id, tp_instance):
ds_meter_id = self.resource_mgr.get_meter_id_for_onu(DOWNSTREAM,
intf_id,
onu_id, uni_id)
traffic_sched = self.get_scheduler(tp_instance, Direction.DOWNSTREAM, ds_meter_id)
_, ofp_port_no = self.data_model.get_ofp_port_name(intf_id, onu_id, uni_id)
try:
self.stub.RemoveTrafficQueues(
tech_profile_pb2.TrafficQueues(
intf_id=intf_id,
onu_id=onu_id,
uni_id=uni_id,
port_no=ofp_port_no,
traffic_queues=
OpenOltFlowMgr.get_traffic_queues(tp_instance, Direction.DOWNSTREAM)
))
self.log.debug("removed-downstream-Queues")
except grpc.RpcError as grpc_e:
self.log.error("failure-to-remove-ds-queues")
try:
self.stub.RemoveTrafficSchedulers(
tech_profile_pb2.TrafficSchedulers(
intf_id=intf_id,
onu_id=onu_id,
uni_id=uni_id,
port_no=ofp_port_no,
traffic_scheds=[traffic_sched]
))
self.log.debug("removed-downstream-Schedulers")
except grpc.RpcError as grpc_e:
self.log.error("failure-to-remove-ds-scheduler")
self.resource_mgr.remove_meter_id_for_onu(DOWNSTREAM, intf_id, onu_id, uni_id)
def get_tech_profile_instance(self, intf_id, onu_id, uni_id, tp_id):
(ofp_port_name, ofp_port_no) \
= self.data_model.get_ofp_port_name(intf_id, onu_id, uni_id)
if ofp_port_name is None:
self.log.error("port-name-not-found")
return None
# Check tech profile instance already exists for derived port name
tech_profile_instance = self.tech_profile[intf_id]. \
get_tech_profile_instance(tp_id, ofp_port_name)
if tech_profile_instance is None:
# create tech profile instance
tech_profile_instance = self.tech_profile[intf_id]. \
create_tech_profile_instance(tp_id, ofp_port_name,
intf_id)
if tech_profile_instance is None:
raise Exception('Tech-profile-instance-creation-failed')
self.resource_mgr.update_tech_profile_id_for_onu(intf_id, onu_id,
uni_id, tp_id)
# Fetch alloc id and gemports from tech profile instance
alloc_id = tech_profile_instance.us_scheduler.alloc_id
gem_port_ids = []
for i in range(len(
tech_profile_instance.upstream_gem_port_attribute_list)):
gem_port_ids.append(
tech_profile_instance.upstream_gem_port_attribute_list[i].
gemport_id)
# Update the allocated alloc_id and gem_port_id for the ONU/UNI to KV
# store
pon_intf_onu_id = (intf_id, onu_id, uni_id)
self.resource_mgr.resource_mgrs[intf_id].update_alloc_ids_for_onu(
pon_intf_onu_id,
list([alloc_id])
)
self.resource_mgr.resource_mgrs[intf_id].update_gemport_ids_for_onu(
pon_intf_onu_id,
gem_port_ids
)
self.resource_mgr.update_gemports_ponport_to_onu_map_on_kv_store(
gem_port_ids, intf_id, onu_id, uni_id
)
for gemport_id in gem_port_ids:
self.data_model.gemport_id_add(intf_id, onu_id, gemport_id)
else:
self.log.debug(
'Tech-profile-instance-already-exist-for-given port-name',
ofp_port_name=ofp_port_name)
return tech_profile_instance
def get_alloc_id_gem_port(self, intf_id, onu_id):
pon_intf_onu_id = (intf_id, onu_id)
# If we already have allocated alloc_id and gem_ports earlier, render them
alloc_id = \
self.resource_mgr.get_current_alloc_ids_for_onu(pon_intf_onu_id)
gem_port_ids = \
self.resource_mgr.get_current_gemport_ids_for_onu(pon_intf_onu_id)
return alloc_id, gem_port_ids
def add_upstream_data_flow(self, intf_id, onu_id, uni_id, port_no, classifier,
action, logical_flow, alloc_id, gemport_id, is_l2_mod_flow=True):
if is_l2_mod_flow:
classifier[PACKET_TAG_TYPE] = SINGLE_TAG
else:
classifier[PACKET_TAG_TYPE] = DOUBLE_TAG
self.add_hsia_flow(intf_id, onu_id, uni_id, port_no, classifier,
action, UPSTREAM,
logical_flow, alloc_id, gemport_id)
def add_downstream_data_flow(self, intf_id, onu_id, uni_id, port_no, classifier,
action, logical_flow, alloc_id, gemport_id, is_l2_mod_flow=True):
if is_l2_mod_flow:
classifier[PACKET_TAG_TYPE] = DOUBLE_TAG
classifier[POP_VLAN] = True
action[VLAN_VID] = classifier[VLAN_VID]
else:
classifier[PACKET_TAG_TYPE] = DOUBLE_TAG
self.add_hsia_flow(intf_id, onu_id, uni_id, port_no, classifier,
action, DOWNSTREAM,
logical_flow, alloc_id, gemport_id)
def add_hsia_flow(self, intf_id, onu_id, uni_id, port_no, classifier,
action, direction, logical_flow, alloc_id, gemport_id):
flow_store_cookie = self._get_flow_store_cookie(classifier,
gemport_id)
if self.resource_mgr.is_flow_cookie_on_kv_store(intf_id, onu_id,
uni_id,
flow_store_cookie):
self.log.debug('flow-exists--not-re-adding')
else:
# One of the OLT platform (Broadcom BAL) requires that symmetric
# flows require the same | |
#!/usr/bin/env python
import argparse, sys, copy, gzip, os
import math, time, re
import numpy as np
from scipy import stats
from collections import Counter
from argparse import RawTextHelpFormatter
from operator import itemgetter
__author__ = "<NAME> (<EMAIL>)"
__version__ = "$Revision: 0.0.2 $"
__date__ = "$Date: 2014-04-28 14:31 $"
# --------------------------------------
# define functions
def get_args():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="\
sv_classifier.py\n\
author: " + __author__ + "\n\
version: " + __version__ + "\n\
description: classify structural variants")
parser.add_argument('-i', '--input', metavar='VCF', dest='vcf_in', type=argparse.FileType('r'), default=None, help='VCF input [stdin]')
parser.add_argument('-g', '--gender', metavar='FILE', dest='gender', type=argparse.FileType('r'), required=True, default=None, help='tab delimited file of sample genders (male=1, female=2)\nex: SAMPLE_A\t2')
parser.add_argument('-e', '--exclude', metavar='FILE', dest='exclude', type=argparse.FileType('r'), required=False, default=None, help='list of samples to exclude from classification algorithms')
parser.add_argument('-a', '--annotation', metavar='BED', dest='ae_path', type=str, default=None, help='BED file of annotated elements')
parser.add_argument('-f', '--fraction', metavar='FLOAT', dest='f_overlap', type=float, default=0.9, help='fraction of reciprocal overlap to apply annotation to variant [0.9]')
parser.add_argument('-s', '--slope_threshold', metavar='FLOAT', dest='slope_threshold', type=float, default=1.0, help='minimum slope absolute value of regression line to classify as DEL or DUP[1.0]')
parser.add_argument('-r', '--rsquared_threshold', metavar='FLOAT', dest='rsquared_threshold', type=float, default=0.2, help='minimum R^2 correlation value of regression line to classify as DEL or DUP [0.2]')
# parse the arguments
args = parser.parse_args()
# if no input, check if part of pipe and if so, read stdin.
if args.vcf_in == None:
if sys.stdin.isatty():
parser.print_help()
exit(1)
else:
args.vcf_in = sys.stdin
# send back the user input
return args
class Vcf(object):
def __init__(self):
self.file_format = 'VCFv4.2'
# self.fasta = fasta
self.reference = ''
self.sample_list = []
self.info_list = []
self.format_list = []
self.alt_list = []
self.add_format('GT', 1, 'String', 'Genotype')
def add_header(self, header):
for line in header:
if line.split('=')[0] == '##fileformat':
self.file_format = line.rstrip().split('=')[1]
elif line.split('=')[0] == '##reference':
self.reference = line.rstrip().split('=')[1]
elif line.split('=')[0] == '##INFO':
a = line[line.find('<')+1:line.find('>')]
r = re.compile(r'(?:[^,\"]|\"[^\"]*\")+')
self.add_info(*[b.split('=')[1] for b in r.findall(a)])
elif line.split('=')[0] == '##ALT':
a = line[line.find('<')+1:line.find('>')]
r = re.compile(r'(?:[^,\"]|\"[^\"]*\")+')
self.add_alt(*[b.split('=')[1] for b in r.findall(a)])
elif line.split('=')[0] == '##FORMAT':
a = line[line.find('<')+1:line.find('>')]
r = re.compile(r'(?:[^,\"]|\"[^\"]*\")+')
self.add_format(*[b.split('=')[1] for b in r.findall(a)])
elif line[0] == '#' and line[1] != '#':
self.sample_list = line.rstrip().split('\t')[9:]
# return the VCF header
def get_header(self):
header = '\n'.join(['##fileformat=' + self.file_format,
'##fileDate=' + time.strftime('%Y%m%d'),
'##reference=' + self.reference] + \
[i.hstring for i in self.info_list] + \
[a.hstring for a in self.alt_list] + \
[f.hstring for f in self.format_list] + \
['\t'.join([
'#CHROM',
'POS',
'ID',
'REF',
'ALT',
'QUAL',
'FILTER',
'INFO',
'FORMAT'] + \
self.sample_list
)])
return header
def add_info(self, id, number, type, desc):
if id not in [i.id for i in self.info_list]:
inf = self.Info(id, number, type, desc)
self.info_list.append(inf)
def add_alt(self, id, desc):
if id not in [a.id for a in self.alt_list]:
alt = self.Alt(id, desc)
self.alt_list.append(alt)
def add_format(self, id, number, type, desc):
if id not in [f.id for f in self.format_list]:
fmt = self.Format(id, number, type, desc)
self.format_list.append(fmt)
def add_sample(self, name):
self.sample_list.append(name)
# get the VCF column index of a sample
# NOTE: this is zero-based, like python arrays
def sample_to_col(self, sample):
return self.sample_list.index(sample) + 9
class Info(object):
def __init__(self, id, number, type, desc):
self.id = str(id)
self.number = str(number)
self.type = str(type)
self.desc = str(desc)
# strip the double quotes around the string if present
if self.desc.startswith('"') and self.desc.endswith('"'):
self.desc = self.desc[1:-1]
self.hstring = '##INFO=<ID=' + self.id + ',Number=' + self.number + ',Type=' + self.type + ',Description=\"' + self.desc + '\">'
class Alt(object):
def __init__(self, id, desc):
self.id = str(id)
self.desc = str(desc)
# strip the double quotes around the string if present
if self.desc.startswith('"') and self.desc.endswith('"'):
self.desc = self.desc[1:-1]
self.hstring = '##ALT=<ID=' + self.id + ',Description=\"' + self.desc + '\">'
class Format(object):
def __init__(self, id, number, type, desc):
self.id = str(id)
self.number = str(number)
self.type = str(type)
self.desc = str(desc)
# strip the double quotes around the string if present
if self.desc.startswith('"') and self.desc.endswith('"'):
self.desc = self.desc[1:-1]
self.hstring = '##FORMAT=<ID=' + self.id + ',Number=' + self.number + ',Type=' + self.type + ',Description=\"' + self.desc + '\">'
class Variant(object):
def __init__(self, var_list, vcf):
self.chrom = var_list[0]
self.pos = int(var_list[1])
self.var_id = var_list[2]
self.ref = var_list[3]
self.alt = var_list[4]
if var_list[5] == '.':
self.qual = 0
else:
self.qual = float(var_list[5])
self.filter = var_list[6]
self.sample_list = vcf.sample_list
self.info_list = vcf.info_list
self.info = dict()
self.format_list = vcf.format_list
self.active_formats = list()
self.gts = dict()
# fill in empty sample genotypes
if len(var_list) < 8:
sys.stderr.write('\nError: VCF file must have at least 8 columns\n')
exit(1)
if len(var_list) < 9:
var_list.append("GT")
# make a genotype for each sample at variant
for s in self.sample_list:
try:
s_gt = var_list[vcf.sample_to_col(s)].split(':')[0]
self.gts[s] = Genotype(self, s, s_gt)
# import the existing fmt fields
for j in zip(var_list[8].split(':'), var_list[vcf.sample_to_col(s)].split(':')):
self.gts[s].set_format(j[0], j[1])
except IndexError:
self.gts[s] = Genotype(self, s, './.')
self.info = dict()
i_split = [a.split('=') for a in var_list[7].split(';')] # temp list of split info column
for i in i_split:
if len(i) == 1:
i.append(True)
self.info[i[0]] = i[1]
def set_info(self, field, value):
if field in [i.id for i in self.info_list]:
self.info[field] = value
else:
sys.stderr.write('\nError: invalid INFO field, \"' + field + '\"\n')
exit(1)
def get_info(self, field):
return self.info[field]
def get_info_string(self):
i_list = list()
for info_field in self.info_list:
if info_field.id in self.info.keys():
if info_field.type == 'Flag':
i_list.append(info_field.id)
else:
i_list.append('%s=%s' % (info_field.id, self.info[info_field.id]))
return ';'.join(i_list)
def get_format_string(self):
f_list = list()
for f in self.format_list:
if f.id in self.active_formats:
f_list.append(f.id)
return ':'.join(f_list)
def genotype(self, sample_name):
if sample_name in self.sample_list:
return self.gts[sample_name]
else:
sys.stderr.write('\nError: invalid sample name, \"' + sample_name + '\"\n')
def get_var_string(self):
s = '\t'.join(map(str,[
self.chrom,
self.pos,
self.var_id,
self.ref,
self.alt,
'%0.2f' % self.qual,
self.filter,
self.get_info_string(),
self.get_format_string(),
'\t'.join(self.genotype(s).get_gt_string() for s in self.sample_list)
]))
return s
class Genotype(object):
def __init__(self, variant, sample_name, gt):
self.format = dict()
self.variant = variant
self.set_format('GT', gt)
def set_format(self, field, value):
if field in [i.id for i in self.variant.format_list]:
self.format[field] = value
if field not in self.variant.active_formats:
self.variant.active_formats.append(field)
# sort it to be in the same order as the format_list in header
self.variant.active_formats.sort(key=lambda x: [f.id for f in self.variant.format_list].index(x))
else:
sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n')
exit(1)
def get_format(self, field):
return self.format[field]
def get_gt_string(self):
g_list = list()
for f in self.variant.active_formats:
if f in self.format:
if type(self.format[f]) == float:
g_list.append('%0.2f' % self.format[f])
else:
g_list.append(self.format[f])
else:
g_list.append('.')
return ':'.join(map(str,g_list))
# http://stackoverflow.com/questions/8930370/where-can-i-find-mad-mean-absolute-deviation-in-scipy
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
# test whether variant has read depth support by regression
def has_high_freq_depth_support(var, gender, exclude, slope_threshold, rsquared_threshold, writedir=None):
# slope_threshold = 0.1
# rsquared_threshold = 0.1
if 'CN' in var.active_formats:
# allele balance list
ab_list = []
for s in var.sample_list:
# if s in exclude:
# continue
ab_str = var.genotype(s).get_format('AB')
if ab_str == '.':
ab_list.append(-1)
continue
ab_list.append(float(ab_str))
# populate read-depth list, accounting for sample gender
rd_list = []
for s in var.sample_list:
# if s in exclude:
# continue
if (var.chrom == 'X' or var.chrom == 'Y') and gender[s] == 1:
rd_list.append(float(var.genotype(s).get_format('CN')) * 2)
else:
rd_list.append(float(var.genotype(s).get_format('CN')))
rd = np.array([ab_list, rd_list])
# remove missing genotypes
rd = rd[:, rd[0]!=-1]
# ensure non-uniformity in genotype and read depth
if len(np.unique(rd[0,:])) > 1 and len(np.unique(rd[1,:])) > 1:
# calculate regression
(slope, intercept, r_value, p_value, std_err) = stats.linregress(rd)
# print slope, intercept, r_value, var.info['SVTYPE'], var.var_id
# write the scatterplot to a file
if writedir is not None:
try:
os.makedirs(writedir)
except OSError as exc: # Python >2.5
if os.path.isdir(writedir):
pass
else: raise
f = open('%s/reg_%s_%s_%sbp.txt' % (writedir, var.info['SVTYPE'], var.var_id, var.info['SVLEN']), 'w')
np.savetxt(f, np.transpose(rd), delimiter='\t')
f.close()
if r_value ** 2 < rsquared_threshold:
return False
if var.info['SVTYPE'] == 'DEL':
slope = -slope
if slope < slope_threshold:
return False
return True
return False
# test for read depth support of low frequency variants
def has_low_freq_depth_support(var, gender, exclude, writedir=None):
mad_threshold = 2
mad_quorum = 0.5 # this fraction of the pos. genotyped results must meet the mad_threshold
absolute_cn_diff = 0.5
hom_ref_cn = []
het_cn = []
hom_alt_cn = []
# determine whether majority of
# if on the sex chromosomes, only compare against the majority sex
if (var.chrom == | |
from __future__ import division
import os
import time
from shutil import copyfile
from glob import glob
import tensorflow as tf
import numpy as np
# import config
from collections import namedtuple
# from module import *
# from utils import *
# from ops import *
# from metrics import *
import tensorflow_addons as tfa
import tensorflow.keras.backend as kb
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
# os.environ["CUDA_VISIBLE_DEVICES"] = os.environ['SGE_GPU']
import matplotlib.pyplot as plt
from PIL import Image
from tensorflow import keras as keras
class Generator(object):
def __init__(self,input_dim):
self.input_dim = input_dim
self.output_channel = self.input_dim[2]
self.model = self.build_model()
def build_model(self,num_res_net_blocks = 10):
def residue_block(input_data, filters, conv_size,strides=1):
p = int((conv_size - 1) / 2)
x = tf.pad(input_data, [[0, 0], [p, p], [p, p], [0, 0]], "REFLECT")
x = keras.layers.Conv2D(filters=filters, kernel_size=conv_size, strides=strides,activation='relu', padding='VALID')(x)
x = tfa.layers.InstanceNormalization()(x)
x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], "REFLECT")
x = keras.layers.Conv2D(filters, conv_size, activation='relu', padding='VALID')(x)
x = tfa.layers.InstanceNormalization()(x)
x = keras.layers.Add()([x, input_data])
x = keras.layers.Activation('relu')(x)
return x
# 3 conv
input = keras.Input(shape=(self.input_dim))
c0 = tf.pad(input, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
# c0 is (# of images * 262 * 262 * 3)
c1 = keras.layers.Conv2D(filters=64, kernel_size=7, strides=1,activation='relu', padding='VALID')(c0)
c1 = tfa.layers.InstanceNormalization()(c1)
# c1 is (# of images * 256 * 256 * 64)
c2 = keras.layers.Conv2D(filters=128, kernel_size=3, strides=2, activation='relu', padding ='SAME')(c1)
c2 = tfa.layers.InstanceNormalization()(c2)
# c2 is (# of images * 128 * 128 * 128)
c3 = keras.layers.Conv2D(filters=256, kernel_size=3, strides=2, activation='relu',padding ='SAME')(c2)
c3 = tfa.layers.InstanceNormalization()(c3)
# c3 is (# of images * 64 * 64 * 256)
res = None
# 10 resnet
for i in range(num_res_net_blocks):
res = residue_block(c3, 256, 3)
#3 deconv
d1 = keras.layers.Conv2DTranspose(filters=128, kernel_size=3, strides=2,activation='relu',padding = 'SAME',name = 'g_d1')(res)
d1 = tfa.layers.InstanceNormalization()(d1)
d2 = keras.layers.Conv2DTranspose(filters=64, kernel_size=3, strides=2, activation='relu',padding = 'SAME',name = 'g_d2')(d1)
d2 = tfa.layers.InstanceNormalization()(d2)
d3 = tf.pad(d2, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT",name = 'g_d3')
output = keras.layers.Conv2D(filters=self.output_channel, kernel_size=7, strides=1, activation='sigmoid')(d3)
model = keras.Model(input,output)
model.summary()
return model
def trainable(self,trainable = True):
self.model.trainable = trainable
def save(self,path):
self.model.save(path)
def load(self,path):
self.model = keras.models.load_model(path)
def train_on_batch(self, X, y):
print("generator trainable weight",self.model.trainable_weights)
return self.model.train_on_batch(X, y)
def translate_domain(self,input_data):
return self.model.predict(input_data)
class Discriminator(object):
def __init__(self,input_dim):
self.input_dim = input_dim
self.output_channel = self.input_dim[2]
self.model = self.build_model()
def build_model(self):
model = keras.Sequential()
model.add(keras.layers.Conv2D(filters=64, kernel_size=4, strides=2,padding = 'SAME',input_shape=self.input_dim))
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(keras.layers.Conv2D(filters=256, kernel_size=4, strides=2,padding = 'SAME'))
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(tfa.layers.InstanceNormalization())
model.add(keras.layers.Conv2D(filters=self.output_channel, kernel_size=1, strides=1))
model.compile(loss='mse', optimizer=Adam(lr=0.0002, beta_1=0.5), loss_weights=[0.5])
model.summary()
return model
# def train(self):
#
def trainable(self,trainable = True):
self.model.trainable = trainable
def save(self,path):
self.model.save(path)
def load(self,path):
keras.models.load_model(path)
def predict(self,input_data):
return self.model.predict(input_data)
def train_on_batch(self,X,y):
# print("discriminator trainable weight",len(self.model.trainable_weights))
return self.model.train_on_batch(X,y)
class CycleGan(object):
def __init__(self, sess, batch_size, crop_size,generatorAtoB,generatorBtoA, discriminatorA,discriminatorB, discriminatorAandM = None,discriminatorBandM = None,
generator_weight_factor = 10,discriminator_weight_factor = 10, use_D_M = False, gamma = 0.9,lamda = 0.9):
self.sess = sess
self.batch_size = batch_size
self.image_size = crop_size # cropped size
# self.input_c_dim = input_channels # number of input image channels
# self.output_c_dim = output_channels # number of output image channels
self.generatorAToB = generatorAtoB
self.generatorBToA = generatorBtoA
self.discriminatorA = discriminatorA
self.discriminatorB = discriminatorB
self.use_D_M = use_D_M
self.discriminatorAandM = discriminatorAandM
self.discriminatorBandM = discriminatorBandM
self.gamma = gamma
self.lamda = lamda
self.composite_model_A = self.build_composite_generator_network(self.generatorAToB,self.generatorBToA,self.discriminatorA)
self.composite_model_B = self.build_composite_generator_network(self.generatorBToA,self.generatorAToB,self.discriminatorB)
def build_composite_generator_network(self,generatorAtoB,generatorBtoA,discriminatorB):
# ensure the model we're updating is trainable
generatorAtoB.trainable()
# mark discriminator as not trainable
generatorBtoA.trainable(False)
# mark other generator model as not trainable
discriminatorB.trainable(False)
input_real_A = keras.Input(shape=self.image_size)
input_real_B = keras.Input(shape=self.image_size)
# discriminator element
fake_B = generatorAtoB.model(input_real_A)
dis_B = discriminatorB.model(fake_B)
#compare A vs Fake A through A->B->A
fake_A_ = generatorBtoA.model(fake_B)
# compare B vs Fake B through B->A->B
fake_A = generatorBtoA.model(input_real_B)
fake_B_ = generatorAtoB.model(fake_A)
# define model graph
model = keras.Model([input_real_A, input_real_B], [dis_B, fake_A_, fake_B_])
# define optimization algorithm configuration
opt = Adam(lr=0.0002, beta_1=0.5)
# compile model with weighting of least squares loss and L1 loss
model.compile(loss=['mse', 'mae', 'mae'], loss_weights=[1, 10, 10], optimizer=opt)
return model
# def build_composite_discriminator_network(self, generatorAtoB, generatorBtoA, discriminatorB,discriminatorBandM):
def generate_real_samples_batch(self,dataset,batch_size,output_shape,channel_shape):
# choose random instances
ix = np.random.randint(0, dataset.shape[0], batch_size)
# retrieve selected images
X = dataset[ix]
# generate 'real' class labels (1)
y = np.ones((batch_size, output_shape, output_shape, channel_shape))
return X, y
# generate a batch of images, returns images and targets
def generate_fake_samples_batch(self,generator_model, dataset, output_shape,channel_shape):
# generate fake instance
X = generator_model.translate_domain(dataset)
# create 'fake' class labels (0)
y = np.zeros((len(X), output_shape, output_shape, channel_shape))
return X, y
def generate_mix_sample_batch(self,datasetA,datasetB,batch_size,output_shape,channel_shape):
ix_A = np.random.randint(0, datasetA.shape[0], int(batch_size/2))
X_A = datasetA[ix_A]
ix_B = np.random.randint(0, datasetB.shape[0], int(batch_size /2))
X_B = datasetA[ix_B]
y = np.ones((batch_size, output_shape, output_shape, channel_shape))
print ( np.concatenate((X_A,X_B),axis=0).shape)
return [np.concatenate((X_A,X_B),axis=0),y]
# save the generator models to file
def save_models(self,epoch, generator_model_AtoB, generator_model_BtoA,path):
# save the first generator model
folder = os.path.join(path,str(epoch))
if not os.path.isdir(folder):
os.makedirs(folder)
filename1 = 'g_model_AtoB_.h5'
filename1 = os.path.join(folder,filename1)
generator_model_AtoB.save(filename1)
# save the second generator model
filename2 = 'g_model_BtoA_.h5'
filename2 = os.path.join(folder, filename2)
generator_model_BtoA.save(filename2)
print('>Saved: %s and %s' % (filename1, filename2))
def load_models(self,path,filename1,filename2):
filename1 = os.path.join(path, filename1)
self.generatorAToB.load(filename1)
filename2 = os.path.join(path, filename2)
self.generatorBToA.load(filename2)
# generate samples and save as a plot and save the model
def summarize_performance(self,epochs, g_model, trainX, name, n_samples=5):
# select a sample of input images
X_in, _ = self.generate_real_samples_batch(trainX, n_samples, 0)
# generate translated images
X_out, _ = self.generate_fake_samples_batch(g_model, X_in, 0)
# scale all pixels from [-1,1] to [0,1]
X_in = (X_in + 1) / 2.0
X_out = (X_out + 1) / 2.0
# plot real images
for i in range(n_samples):
plt.subplot(2, n_samples, 1 + i)
plt.axis('off')
plt.imshow(X_in[i])
# plot translated image
for i in range(n_samples):
plt.subplot(2, n_samples, 1 + n_samples + i)
plt.axis('off')
plt.imshow(X_out[i])
# save plot to file
filename1 = '%s_generated_plot_%06d.png' % (name, (step + 1))
plt.savefig(filename1)
plt.close()
def update_image_pool(self,pool, images, max_size=50):
selected = list()
for image in images:
if len(pool) < max_size:
# stock the pool
pool.append(image)
selected.append(image)
elif np.random.rand() < 0.5:
# use image, but don't add it to the pool
selected.append(image)
else:
# replace an existing image and use replaced image
ix = np.random.randint(0, len(pool))
selected.append(pool[ix])
pool[ix] = image
return np.asarray(selected)
def summarize(self, tag_value_pairs, step):
with self.writer.as_default():
for tag, value in tag_value_pairs:
tf.summary.scalar(tag, value, step=step)
self.writer.flush()
def train(self,epochs,batches,datasetA,datasetB,save_path = "",summary_folder = "logs/scalars/",history_path = "history_log",model_save_frequenly = 10):
# summary_folder = "logs/scalars/"
self.writer = tf.summary.create_file_writer(summary_folder)
#discriminator output square shape
d_output_shape = self.discriminatorA.model.output_shape[1]
channel_num = self.discriminatorA.model.output_shape[3]
data_pool_A,data_pool_B = list(),list()
# batch_per_epoch = int(len(datasetA)/batches)
# calculate the number of training iterations
n_steps = int(len(datasetA)/batches)
# manually enumerate
list_epoch_g_loss = []
list_epoch_d_loss = []
list_epoch_g_A_to_B_loss = []
list_epoch_g_B_to_A_loss = []
list_epoch_d_A__loss = []
list_epoch_d_B__loss = []
list_epoch_d_A_M_loss = []
list_epoch_d_B_M_loss = []
for e in range(epochs):
epoch_g_loss = 0
epoch_d_loss = 0
epoch_g_A_to_B_loss = 0
epoch_g_B_to_A_loss = 0
epoch_d_A__loss = 0
epoch_d_B__loss = 0
epoch_d_A_M_loss = 0
epoch_d_B_M_loss = 0
for i in range(n_steps):
# select a batch of real samples
X_realA, y_realA = self.generate_real_samples_batch(datasetA, batches, d_output_shape,channel_num)
X_realB, y_realB = self.generate_real_samples_batch(datasetB, batches, d_output_shape,channel_num)
# generate a batch of fake samples
X_fakeA, y_fakeA = self.generate_fake_samples_batch(self.generatorBToA, X_realB, d_output_shape,channel_num)
X_fakeB, y_fakeB = self.generate_fake_samples_batch(self.generatorAToB, X_realA, d_output_shape,channel_num)
# update fakes from pool
# X_fakeA = self.update_image_pool(data_pool_A, X_fakeA)
# X_fakeB = self.update_image_pool(data_pool_B, X_fakeB)
# update generator B->A via adversarial and cycle loss
# print("step : ",i)
# print("A->B trainable weight : ",len(self.generatorAToB.model.trainable_weights))
# print("B->A trainable weight : ",len(self.generatorBToA.model.trainable_weights))
g_loss2, cycle_loss_2, _,_ = self.composite_model_B.train_on_batch([X_realB, X_realA], [y_realA, X_realB, X_realA])
self.generatorBToA.trainable(False)
self.generatorAToB.trainable(True)
# print("A->B trainable weight : ", len(self.generatorAToB.model.trainable_weights))
# print("B->A trainable weight : ", len(self.generatorBToA.model.trainable_weights))
# update discriminator for A -> [real/fake]
self.discriminatorA.trainable(True)
dA_loss1 = self.discriminatorA.train_on_batch(X_realA, y_realA)
dA_loss2 = self.discriminatorA.train_on_batch(X_fakeA, y_fakeA)
self.discriminatorA.trainable(False)
dA_loss = (dA_loss1+dA_loss2)/2
# update generator A->B via adversarial and cycle loss
# print("A->B trainable weight : ", len(self.generatorAToB.model.trainable_weights))
# print("B->A trainable weight : ", len(self.generatorBToA.model.trainable_weights))
g_loss1, cycle_loss_1, _, _ = self.composite_model_A.train_on_batch([X_realA, X_realB], [y_realB, X_realA, X_realB])
self.generatorBToA.trainable(True)
self.generatorAToB.trainable(False)
# print("A->B trainable weight : ", len(self.generatorAToB.model.trainable_weights))
# print("B->A trainable weight : ", len(self.generatorBToA.model.trainable_weights))
# update discriminator for B -> [real/fake]
self.discriminatorB.trainable(True)
dB_loss1 = self.discriminatorB.train_on_batch(X_realB, y_realB)
dB_loss2 = self.discriminatorB.train_on_batch(X_fakeB, y_fakeB)
self.discriminatorB.trainable(False)
dB_loss = (dB_loss1+dB_loss2)/2
epoch_g_A_to_B_loss += g_loss1
epoch_g_B_to_A_loss += g_loss2
epoch_d_A__loss += dA_loss
epoch_d_B__loss += dB_loss
epoch_g_loss = epoch_g_loss+g_loss1+g_loss2+self.lamda*(cycle_loss_1+cycle_loss_2)
epoch_d_loss = epoch_d_loss+dA_loss+dB_loss
if self.use_D_M:
# print("fake shahep :",X_fakeA.shape)
X_real_mix_M,y_real_mix_M = self.generate_mix_sample_batch(datasetA,datasetB,batches,d_output_shape,channel_num)
dAM_loss1 = self.discriminatorAandM.train_on_batch(X_real_mix_M, | |
have to make sure self.attr==None
if attr == None:
if len(operands) == 1:
rej_types = [[]]
outs = []
temp =TypeObject("type", 0)
temp.elementtype = operands[0].types
outs.append(temp)
return rej_types, outs
elif len(operands) == 3:
rej_types = [[], [], []]
outs = []
for t in operands[0].types:
if not TypeObject.existSame(t, ["str"]):
rej_types[0].append(t)
for t in operands[1].types:
if not TypeObject.existSame(t, ["tuple"]):
rej_types[1].append(t)
for t in operands[2].types:
if not TypeObject.existSame(t, ["dict"]):
rej_types[2].append(t)
temp = TypeObject("type", 0)
outs.append(temp)
return rej_types, outs
else:
rej_types = []
outs = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "next":
if attr == None:
if len(operands) == 1:
rej_types = []
outs = []
elementtype = []
for i in range(0, len(operands)):
rej_types.append([])
for t in operands[0].types:
if not TypeObject.existSame(t, ["Generator"]):
rej_types[0].append(t)
else:
elementtype = t.elementtype
outs += elementtype
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "max" or func=="min":
# the input can be iterable or some argument
# if only one iterable e.g b = max([1,2,4]) / b = max({'a':1,'b':2})
if attr==None:
ltypes = operands[0].types
rej_target_types = []
rej_ltypes = []
outs = []
if len(operands)==1:
temp = []
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["Dict"]):
if isinstance(t.keytype, list):
outs += t.keytype
else:
outs.append(t.keytype)
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
outs.append(t)
else:
if t.elementtype!=[]:
if t.elementtype[0]!= None:
outs.append(t.elementtype[0]) # for list/dict is also okay as it returns keytype
rej_ltypes.append(rej_target_types)
for i in range(1, len(operands)):
rej_ltypes.append([])
# outs.append(temp)
return rej_ltypes, outs
# with many arguments e.g b = max(1,2,3.1) b = max([1,2],[0,4])
else:
ifsimple = False
first = operands[0]
simplelevel_up =simplelevel_down = 0
isinitial = True
for indexop in operands:
for ftypes in indexop.types:
if not TypeObject.existSame(ftypes, special_types["@iterable@"]):
ifsimple = True
if isinitial:
isinitial = False
if TypeObject.existSame(ftypes, special_types["@number@"]):
simplelevel_up = simplelevel_down = special_types["@number@"].index(ftypes.type)
else:
simplelevel_up = simplelevel_down = 1
else:
if TypeObject.existSame(ftypes, special_types["@number@"]):
simplelevel_up = max(special_types["@number@"].index(ftypes.type),simplelevel_up)
simplelevel_down = min(special_types["@number@"].index(ftypes.type), simplelevel_down)
else:
simplelevel_up = max(1,simplelevel_up)
simplelevel_down = max(1,simplelevel_down)
# if it's like b = max([1,2],[0,4])
elif TypeObject.existSame(ftypes, ["Dict"]):
outs += ftypes.keytype
for i in range(0, len(operands)):
rej_ltypes.append([])
return rej_ltypes, outs
else:
if len(outs)==0:
outs.append(ftypes)
elif not TypeObject.existSame(ftypes,outs):
outs.append(ftypes)
# add all the possible types
if ifsimple:
for i in range(simplelevel_down,simplelevel_up+1):
temp = TypeObject(special_types["@number@"][i],"0")
outs.append(temp)
for i in range(0, len(operands)):
rej_ltypes.append([])
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "get":
if attr!=None:
# a.get('Sex', "Never")
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Dict"]):
rej_target_types.append(t)
else:
for item in t.valuetype:
if item!=None:
outs.append(item)
rej_types.append(rej_target_types)
# the second one has to be int(for list),if there is
for i in range(1, len(operands)):
rej_types.append([])
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
#====================================================================
# case 3: inter-procedural Analysis
#====================================================================
elif curnode.tg != None and curnode.tg.globaltg != None:
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
outs = []
for t in curnode.tg.globaltg.tgs:
if t.classname == curnode.tg.classname and t.name == func and func != curnode.tg.name:
returntype = t.getReturnType()
if len(returntype) == 0:
return rej_types, outs
else:
outs += returntype
return rej_types, outs
return rej_types, outs
#====================================================================
# case 4: unrecognized functions
#====================================================================
else:
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
outs = []
return rej_types, outs
def List_Read(self, operands):
# here we do not consider about userdefine first. e.g list1 = [1,'2',user-define instance]
# mention that the depth here is 2 e.g List = [1,1,'2',[1,2,3,4],[1,2,'hello']] then the result out will
# only be Typeobject(list) with elementtype [int, int, text, list, list]
# we would not merge two same types together because it could help in the List_write inference e.g a= [1,2,'3'] [x,y,z] = a
outs = []
temp = TypeObject("List",0)
for i in range(len(operands)):
# add the type not exists in the elementtype
if isinstance(operands[i].types, list):
temp.elementtype += operands[i].types
else:
temp.elementtype.append(operands[i].types)
outs.append(temp)
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types,outs
def List_Write(self,operands):
# input: operands output:[[]], [outs_types]
# in this func, we first find the possible types from the input, then we add all the possible types into them
# e.g. c= [1,'2',3] [a,a1,a2]= c
# due to the reason that type pass after this node spreads, we will infer that out = [int,text]
rej_types = []
outs = []
inputtypes = []
if len(operands) != 1:
logger.error("The length of input nodes for ListWrite should be 1, we get {} here.".format(len(operands)))
raise ValueError("The length of input nodes for ListWrite should be 1")
'''
elif len(operands[0].outs) != 1:
logger.error("The operand's out length should be 1, we get {} here.".format(len(operands[0].outs)))
raise ValueError("The operand's out length should be 1")
'''
for insnode in operands[0].ins:
if isinstance(insnode, hityper.tdg.SymbolNode):
if isinstance(insnode.types, list):
# here we add all the elementtype rather than types
for eacheletype in insnode.types:
if not isinstance(eacheletype.elementtype, list):
inputtypes.append(eacheletype.elementtype)
elif isinstance(eacheletype.elementtype, list):
inputtypes += eacheletype.elementtype
outs = TypeObject.removeRedundantTypes(inputtypes)
return [rej_types], outs
def Tuple_Read(self,operands):
# similiar to List Read
outs = []
temp = TypeObject("Tuple", 0)
for i in range(len(operands)):
if isinstance(operands[i].types, list):
temp.elementtype += operands[i].types
else:
temp.elementtype.append(operands[i].types)
outs.append(temp)
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types,outs
def Tuple_Write(self,operands):
# input: operands output:[[]], [outs_types]
# in this func, we first find the possible types from the input, then we add all the possible types into them
# e.g. c= (1,'2',3) (a,a1,a2)= c => we will infer out=[int, text].
rej_types = []
outs = []
inputtypes = []
if len(operands) != 1:
logger.error("The length of input nodes for TupleWrite should be 1, we get {} here.".format(len(operands)))
raise ValueError("The length of input nodes for TupleWrite should be 1")
# here we do not constrain the out length because it can be like below:
# for i, (setting_value, setting_type) in enumerate(zip(all_values, all_types)):
# elif len(operands[0].outs) != 1:
# raise ValueError("The operand's out length should be 1")
if operands[0].name != "forin":
for insnode in operands[0].ins:
if isinstance(insnode, hityper.tdg.SymbolNode):
if isinstance(insnode.types, list):
# here we add all the elementtype rather than types
for eacheletype in insnode.types:
if not isinstance(eacheletype.elementtype, list):
inputtypes.append(eacheletype.elementtype)
elif isinstance(eacheletype.elementtype, list):
inputtypes += eacheletype.elementtype
elif isinstance(insnode, hityper.tdg.TypeGenNode): # like forin node
if isinstance(insnode.types, list):
# here we add all the elementtype rather than types
for eacheletype in insnode.types:
if not isinstance(eacheletype.elementtype, list):
inputtypes.append(eacheletype.elementtype)
elif isinstance(eacheletype.elementtype, list):
inputtypes += eacheletype.elementtype
outs = TypeObject.removeRedundantTypes(inputtypes)
return [rej_types], outs
# if it's realized by forin
else:
for insnode in operands[0].types:
if isinstance(insnode, hityper.tdg.TypeObject):
if not isinstance(insnode.elementtype, list):
# here we add all the elementtype rather than types
inputtypes.append(insnode.elementtype)
elif isinstance(insnode.elementtype, list):
inputtypes += insnode.elementtype
outs = TypeObject.removeRedundantTypes(inputtypes)
return [rej_types], outs
def Set_Read(self,operands):
# similiar to List Read
rej_types = []
outs = []
temp = TypeObject("Set", 0)
for i in range(len(operands)):
if isinstance(operands[i].types, list):
temp.elementtype += operands[i].types
else:
temp.elementtype.append(operands[i].types)
outs.append(temp)
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types,outs
def Dict_Read(self,operands):
# similiar to List Read,but add keytype and valuetype
rej_types = []
outs = []
temp = TypeObject("Dict", 0)
# according to the rules, the first half are keytypes and the left half are valuetypes
if(len(operands)%2!=0):
logger.warning('len(operands) is odd. case a: lambda case b: {**kw}' )
for i in range(int(len(operands)/2)):
if isinstance(operands[i].types, list):
temp.elementtype += operands[i].types
else:
temp.elementtype.append(operands[i].types)
temp.keytype = temp.elementtype
for i in range(int(len(operands)/2),len(operands)):
if isinstance(operands[i].types, list):
temp.valuetype += operands[i].types
else:
temp.valuetype.append(operands[i].types)
outs.append(temp)
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types,outs
def JoinedStr(self,operands):
rej_types = []
outs = []
for i in range(0, len(operands)):
| |
f.is_array and f.base_type == 'uint8':
if not f.array_len:
s.write('(write-long (length %s) s)'%slot)
s.write('(princ %s s)'%slot)
elif f.is_array and is_string(f.base_type):
s.write('(write-long (length %s) s)'%slot)
s.write('(dolist (elem %s)'%slot)
var = 'elem'
elif f.is_array:
if not f.array_len:
write_serialize_length(s, slot, True)
if f.is_builtin and f.array_len:
s.write('(dotimes (i %s)'%f.array_len)
elif f.is_builtin and not f.array_len:
s.write('(dotimes (i (length %s))'%var)
else:
s.write('(dolist (elem %s)'%slot)
slot = 'elem'
var = '(elt %s i)'%var
s.block_next_indent()
s.write('')
if f.is_array and f.base_type == 'uint8':
pass
elif f.is_builtin:
with Indent(s):
write_serialize_builtin(s, f, var)
else:
with Indent(s):
s.write('(send %s :serialize s)'%slot)
if f.is_array and f.base_type != 'uint8':
s.write(' )')
def write_serialize(s, spec):
"""
Write the serialize method
"""
with Indent(s):
s.write('(:serialize')
with Indent(s,inc=1):
s.write('(&optional strm)')
s.write('(let ((s (if strm strm')
s.write(' (make-string-output-stream (send self :serialization-length)))))')
with Indent(s):
for f in spec.parsed_fields():
write_serialize_field(s, f)
s.write(';;')
s.write('(if (null strm) (get-output-stream-string s))))')
def write_deserialize_length(s, f, v, is_array=False):
if is_array:
s.write('(let (n)') ## TODO should not be here
with Indent(s):
s.write('(setq n (sys::peek buf ptr- :integer)) (incf ptr- 4)')
s.write('(setq %s (let (r) (dotimes (i n) (push (instance %s :init) r)) r))'%(v,field_type(f)))
else:
set = 'setf' if v[0] == '(' else 'setq'
s.write('(let (n) (setq n (sys::peek buf ptr- :integer)) (incf ptr- 4) (%s %s (subseq buf ptr- (+ ptr- n))) (incf ptr- n))'%(set, v))
def write_deserialize_bits(s, v, num_bytes):
if num_bytes == 1:
type = ':char'
elif num_bytes == 2:
type = ':short'
elif num_bytes == 4:
type = ':integer'
elif num_bytes == 8:
type = ':long'
s.write('')
return write_deserialize_bits_signed(s,v,num_bytes)
else:
raise ValueError('Unknown size: %s', num_bytes)
set = 'setf' if v[0] == '(' else 'setq'
s.write('(%s %s (sys::peek buf ptr- %s)) (incf ptr- %s)'%(set,v,type,num_bytes))
def write_deserialize_bits_signed(s, v, num_bytes):
if num_bytes in [1,2,4]:
write_deserialize_bits(s, v, num_bytes)
else:
s.write('\n', indent=False)
s.write('#+(or :alpha :irix6 :x86_64)', indent=False)
s.write(' (setf %s (prog1 (sys::peek buf ptr- :long) (incf ptr- 8)))\n'%v)
s.write('#-(or :alpha :irix6 :x86_64)', indent=False)
s.write(' (setf %s (let ((b0 (prog1 (sys::peek buf ptr- :integer) (incf ptr- 4)))'%v)
s.write(' (b1 (prog1 (sys::peek buf ptr- :integer) (incf ptr- 4))))')
s.write(' (cond ((= b1 -1) b0)')
s.write(' ((and (= b1 0)')
s.write(' (<= lisp::most-negative-fixnum b0 lisp::most-positive-fixnum))')
s.write(' b0)')
s.write(' ((= b1 0) (make-instance bignum :size 1 :bv (integer-vector b0)))')
s.write(' (t (make-instance bignum :size 2 :bv (integer-vector b0 (ash b1 1)))))))')
def write_deserialize_builtin(s, f, v):
set = 'setf' if v[0] == '(' else 'setq'
if f.base_type == 'string':
write_deserialize_length(s,f,v)
elif f.base_type == 'float32':
s.write('(%s %s (sys::peek buf ptr- :float)) (incf ptr- 4)'%(set, v))
elif f.base_type == 'float64':
s.write('(%s %s (sys::peek buf ptr- :double)) (incf ptr- 8)'%(set, v))
elif f.base_type == 'bool':
s.write('(%s %s (not (= 0 (sys::peek buf ptr- :char)))) (incf ptr- 1)'%(set, v))
elif f.base_type in ['duration', 'time']:
s.write('(send %s :sec (sys::peek buf ptr- :integer)) (incf ptr- 4) (send %s :nsec (sys::peek buf ptr- :integer)) (incf ptr- 4)'%(v,v))
elif is_signed_int(f.base_type):
write_deserialize_bits_signed(s, v, NUM_BYTES[f.base_type])
if NUM_BYTES[f.base_type] == 1: # if signed byte, we have to convert to -128-127
s.write('(if (> %s 127) (%s %s (- %s 256)))'%(v,set,v,v))
elif is_unsigned_int(f.base_type):
write_deserialize_bits(s, v, NUM_BYTES[f.base_type])
else:
raise ValueError('%s unknown'%f.base_type)
def write_deserialize_field(s, f, pkg):
var = '_%s'%f.name
s.write(';; %s %s'%(f.type, var))
if f.is_array:
if f.is_builtin:
if f.base_type == 'uint8':
if f.array_len:
s.write('(setq %s (make-array %d :element-type :char))'%(var,f.array_len))
s.write('(replace %s buf :start2 ptr-) (incf ptr- %d)'%(var,f.array_len))
else:
s.write('(let ((n (sys::peek buf ptr- :integer))) (incf ptr- 4)')
s.write(' (setq %s (make-array n :element-type :char))'%var)
s.write(' (replace %s buf :start2 ptr-) (incf ptr- n))'%(var))
elif f.array_len:
s.write('(dotimes (i (length %s))'%var)
var = '(elt %s i)'%var
else:
if is_float(f.base_type) or is_integer(f.base_type) or is_string(f.base_type) or is_bool(f.base_type):
s.write('(let (n)')
with Indent(s):
s.write('(setq n (sys::peek buf ptr- :integer)) (incf ptr- 4)')
if is_string(f.base_type) or is_bool(f.base_type):
s.write('(setq %s (make-list n))'%var)
else:
s.write('(setq %s (instantiate %s-vector n))'%(var, lisp_type(f.base_type, f.is_array)))
s.write('(dotimes (i n)')
var = '(elt %s i)'%var
else:
write_deserialize_length(s, f, var, True)
var = 'elem-'
with Indent(s):
s.write('(dolist (%s _%s)'%(var, f.name))
else: # array but not builtin
if f.array_len:
s.write('(dotimes (i %s)'%f.array_len)
var = '(elt _%s i)'%f.name
else:
write_deserialize_length(s, f, var, True)
var = 'elem-'
with Indent(s):
s.write('(dolist (%s _%s)'%(var, f.name))
if f.is_array and f.base_type == 'uint8':
pass
elif f.is_builtin:
with Indent(s):
write_deserialize_builtin(s, f, var)
else:
with Indent(s):
s.write('(send %s :deserialize buf ptr-) (incf ptr- (send %s :serialization-length))'%(var, var))
if f.is_array and not f.base_type == 'uint8':
with Indent(s):
if f.array_len:
s.write(')')
else:
s.write('))')
def write_deserialize(s, spec):
"""
Write the deserialize method
"""
with Indent(s):
s.write('(:deserialize')
with Indent(s,inc=1):
s.write('(buf &optional (ptr- 0))')
for f in spec.parsed_fields():
write_deserialize_field(s, f, spec.package)
s.write(';;')
s.write('self)')
s.write(')')
s.newline()
def write_md5sum(s, msg_context, spec, parent=None):
md5sum = genmsg.compute_md5(msg_context, parent or spec)
s.write('(setf (get %s::%s :md5sum-) "%s")'%(spec.package, spec.actual_name, md5sum))
def write_ros_datatype(s, spec):
s.write('(setf (get %s::%s :datatype-) "%s/%s")'%(spec.package, spec.actual_name, spec.package, spec.actual_name))
def write_message_definition(s, msg_context, spec):
s.write('(setf (get %s::%s :definition-)'%(spec.package, spec.actual_name))
with Indent(s,6):
s.write('"')
definition = genmsg.compute_full_text(msg_context, spec)
lines = definition.split('\n')
for line in lines:
l = line.replace('\\', '\\\\')
l = l.replace('"', '\\"')
s.write('%s\n'%l, indent=False, newline=False)
s.write('")', newline=False)
s.write('\n\n')
def write_service_definition(s, msg_context, spec, parent):
s.write('(setf (get %s::%s :definition-)'%(parent.package, parent.actual_name))
with Indent(s,6):
s.write('"')
for spec_service in [spec.request, spec.response]:
definition = genmsg.compute_full_text(msg_context, spec_service)
lines = definition.split('\n')
for line in lines[:-1]:
l = line.replace('\\', '\\\\')
l = l.replace('"', '\\"')
s.write('%s\n'%l, indent=False, newline=False)
if spec_service == spec.request:
s.write('---\n', indent=False, newline=False)
s.write('")', newline=False)
def write_builtin_length(s, f, var='msg'):
if f.base_type in ['int8', 'uint8']:
s.write('1')
elif f.base_type in ['int16', 'uint16']:
s.write('2')
elif f.base_type in ['int32', 'uint32', 'float32']:
s.write('4')
elif f.base_type in ['int64', 'uint64', 'float64', 'duration', 'time']:
s.write('8')
elif f.base_type == 'string':
s.write('4 (length _%s)'%f.name)
elif f.base_type in ['bool', 'byte', 'char']:
s.write('1')
else:
raise ValueError('Unknown: %s', f.base_type)
def write_serialization_length(s, spec):
with Indent(s):
s.write('(:serialization-length')
with Indent(s, inc=1):
s.write('()')
s.write('(+')
with Indent(s, 1):
if not spec.parsed_fields():
s.write('0')
for field in spec.parsed_fields():
s.write(';; %s _%s'%(field.type, field.name))
if field.is_array:
if field.is_builtin and not is_string(field.base_type):
s.write('(* ')
else:
s.write('(apply #\'+ ')
s.block_next_indent()
if field.is_builtin:
if not field.array_len:
if is_string(field.base_type):
s.write('(mapcar #\'(lambda (x) (+ 4 (length x))) _%s)) 4'%(field.name))
else:
write_builtin_length(s, field)
s.write('(length _%s)) 4'%field.name, newline=False)
else:
write_builtin_length(s, field)
s.write('%s)'%field.array_len, newline=False)
else:
if field.array_len:
s.write('(send-all _%s :serialization-length))'%field.name)
else:
s.write('(send-all _%s :serialization-length)) 4'%field.name)
else:
if field.is_builtin:
write_builtin_length(s, field)
else:
s.write('(send _%s :serialization-length)'%field.name)
s.write('))')
def write_provide(s, msg_context, spec):
md5sum = genmsg.compute_md5(msg_context, spec)
s.write('(provide :%s/%s "%s")'%(spec.package, spec.actual_name,md5sum))
s.write('\n')
def write_constants(s, spec):
if spec.constants:
for c in spec.constants:
s.write('(intern "*%s*" (find-package "%s::%s"))'%(c.name.upper(), spec.package.upper(), spec.actual_name.upper()))
s.write('(shadow \'*%s* (find-package "%s::%s"))'%(c.name.upper(), spec.package.upper(), spec.actual_name.upper()))
if c.type == 'string':
s.write('(defconstant %s::%s::*%s* "%s")'%(spec.package, spec.actual_name, c.name.upper(), c.val.replace('"', '\\"')))
elif c.type == 'bool':
s.write('(defconstant %s::%s::*%s* %s)'%(spec.package, spec.actual_name, c.name.upper(), "t" if c.val == "True" else "nil"))
else:
s.write('(defconstant %s::%s::*%s* %s)'%(spec.package, spec.actual_name, c.name.upper(), c.val))
def write_srv_component(s, spec, context, parent):
spec.component_type='service'
write_constants(s, spec)
write_defclass(s, spec)
write_defmethod(s, spec)
write_accessors(s, spec)
write_serialization_length(s, spec)
write_serialize(s, spec)
write_deserialize(s, spec)
def write_service_specific_methods(s, context, spec):
### this should be move to previsou definition section ???
s.write('(defclass %s::%s'%(spec.package, spec.actual_name))
with Indent(s):
s.write(':super ros::object')
s.write(':slots ())')
s.newline()
write_md5sum(s, context, spec, parent=spec)
write_ros_datatype(s, spec)
s.write('(setf (get %s::%s :request) %s::%s)'%(spec.package, spec.actual_name, spec.request.package, spec.request.actual_name))
s.write('(setf (get %s::%s :response) %s::%s)'%(spec.package, spec.actual_name, spec.response.package, spec.response.actual_name))
s.newline()
s.write('(defmethod %s::%s'%(spec.request.package, spec.request.actual_name))
s.write(' (:response () (instance %s::%s :init)))'%(spec.response.package, spec.response.actual_name))
s.newline()
for spec_service in [spec.request, spec.response]:
write_md5sum(s, context, spec_service, parent=spec)
write_ros_datatype(s, spec_service)
write_service_definition(s, context, spec, spec_service)
s.newline()
s.write('\n')
write_provide(s, context, spec)
s.write('\n', newline=False)
def generate_msg(pkg, files, out_dir, search_path):
"""
Generate euslisp code for all messages in a package
"""
msg_context = MsgContext.create_default()
for f in files:
f = os.path.abspath(f)
infile = os.path.basename(f)
full_type = genmsg.gentools.compute_full_type_name(pkg, infile)
spec = genmsg.msg_loader.load_msg_from_file(msg_context, f, full_type)
generate_msg_from_spec(msg_context, spec, search_path, out_dir, pkg)
def generate_srv(pkg, files, out_dir, search_path):
"""
Generate euslisp code for all services in a package
"""
msg_context = MsgContext.create_default()
for f in files:
f = os.path.abspath(f)
infile = os.path.basename(f)
full_type = genmsg.gentools.compute_full_type_name(pkg, infile)
spec = genmsg.msg_loader.load_srv_from_file(msg_context, f, full_type)
generate_srv_from_spec(msg_context, spec, search_path, out_dir, pkg, f)
def msg_list(pkg, search_path, ext):
dir_list = search_path[pkg]
files = []
for d in dir_list:
files.extend([f for f in os.listdir(d) if f.endswith(ext)])
return [f[:-len(ext)] for f in files]
def generate_msg_from_spec(msg_context, spec, search_path, output_dir, package):
"""
Generate a message
@param msg_path: The path to the .msg file
@type msg_path: str
"""
genmsg.msg_loader.load_depends(msg_context, spec, search_path)
spec.actual_name=spec.short_name
spec.component_type='message'
msgs = msg_list(package, search_path, '.msg')
for m in msgs:
genmsg.load_msg_by_type(msg_context, '%s/%s'%(package, | |
# boykovkolmogorov.py - Boykov Kolmogorov algorithm for maximum flow problems.
#
# Copyright 2016-2017 NetworkX developers.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
#
# Author: <NAME> <<EMAIL>>
"""
Boykov-Kolmogorov algorithm for maximum flow problems.
"""
from collections import deque
from operator import itemgetter
import networkx as nx
from networkx.algorithms.flow.utils import build_residual_network
__all__ = ['boykov_kolmogorov']
def boykov_kolmogorov(G, s, t, capacity='capacity', residual=None,
value_only=False, cutoff=None):
r"""Find a maximum single-commodity flow using Boykov-Kolmogorov algorithm.
This function returns the residual network resulting after computing
the maximum flow. See below for details about the conventions
NetworkX uses for defining residual networks.
This algorithm has worse case complexity `O(n^2 m |C|)` for `n` nodes, `m`
edges, and `|C|` the cost of the minimum cut [1]_. This implementation
uses the marking heuristic defined in [2]_ which improves its running
time in many practical problems.
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
residual : NetworkX graph
Residual network on which the algorithm is to be executed. If None, a
new residual network is created. Default value: None.
value_only : bool
If True compute only the value of the maximum flow. This parameter
will be ignored by this algorithm because it is not applicable.
cutoff : integer, float
If specified, the algorithm will terminate when the flow value reaches
or exceeds the cutoff. In this case, it may be unable to immediately
determine a minimum cut. Default value: None.
Returns
-------
R : NetworkX DiGraph
Residual network after computing the maximum flow.
Raises
------
NetworkXError
The algorithm does not support MultiGraph and MultiDiGraph. If
the input graph is an instance of one of these two classes, a
NetworkXError is raised.
NetworkXUnbounded
If the graph has a path of infinite capacity, the value of a
feasible flow on the graph is unbounded above and the function
raises a NetworkXUnbounded.
See also
--------
:meth:`maximum_flow`
:meth:`minimum_cut`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Notes
-----
The residual network :samp:`R` from an input graph :samp:`G` has the
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
in :samp:`G`.
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
in :samp:`G` or zero otherwise. If the capacity is infinite,
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
that does not affect the solution of the problem. This value is stored in
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
The flow value, defined as the total flow into :samp:`t`, the sink, is
stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not
specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such
that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
:samp:`s`-:samp:`t` cut.
Examples
--------
>>> import networkx as nx
>>> from networkx.algorithms.flow import boykov_kolmogorov
The functions that implement flow algorithms and output a residual
network, such as this one, are not imported to the base NetworkX
namespace, so you have to explicitly import them from the flow package.
>>> G = nx.DiGraph()
>>> G.add_edge('x','a', capacity=3.0)
>>> G.add_edge('x','b', capacity=1.0)
>>> G.add_edge('a','c', capacity=3.0)
>>> G.add_edge('b','c', capacity=5.0)
>>> G.add_edge('b','d', capacity=4.0)
>>> G.add_edge('d','e', capacity=2.0)
>>> G.add_edge('c','y', capacity=2.0)
>>> G.add_edge('e','y', capacity=3.0)
>>> R = boykov_kolmogorov(G, 'x', 'y')
>>> flow_value = nx.maximum_flow_value(G, 'x', 'y')
>>> flow_value
3.0
>>> flow_value == R.graph['flow_value']
True
A nice feature of the Boykov-Kolmogorov algorithm is that a partition
of the nodes that defines a minimum cut can be easily computed based
on the search trees used during the algorithm. These trees are stored
in the graph attribute `trees` of the residual network.
>>> source_tree, target_tree = R.graph['trees']
>>> partition = (set(source_tree), set(G) - set(source_tree))
Or equivalently:
>>> partition = (set(G) - set(target_tree), set(target_tree))
References
----------
.. [1] <NAME>., & <NAME>. (2004). An experimental comparison
of min-cut/max-flow algorithms for energy minimization in vision.
Pattern Analysis and Machine Intelligence, IEEE Transactions on,
26(9), 1124-1137.
http://www.csd.uwo.ca/~yuri/Papers/pami04.pdf
.. [2] <NAME>. Graph-based Algorithms for Multi-camera
Reconstruction Problem. PhD thesis, Cornell University, CS Department,
2003. pp. 109-114.
https://pub.ist.ac.at/~vnk/papers/thesis.pdf
"""
R = boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff)
R.graph['algorithm'] = 'boykov_kolmogorov'
return R
def boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff):
if s not in G:
raise nx.NetworkXError('node %s not in graph' % str(s))
if t not in G:
raise nx.NetworkXError('node %s not in graph' % str(t))
if s == t:
raise nx.NetworkXError('source and sink are the same node')
if residual is None:
R = build_residual_network(G, capacity)
else:
R = residual
# Initialize/reset the residual network.
# This is way too slow
#nx.set_edge_attributes(R, 0, 'flow')
for u in R:
for e in R[u].values():
e['flow'] = 0
# Use an arbitrary high value as infinite. It is computed
# when building the residual network.
INF = R.graph['inf']
if cutoff is None:
cutoff = INF
R_succ = R.succ
R_pred = R.pred
def grow():
"""Bidirectional breadth-first search for the growth stage.
Returns a connecting edge, that is and edge that connects
a node from the source search tree with a node from the
target search tree.
The first node in the connecting edge is always from the
source tree and the last node from the target tree.
"""
while active:
u = active[0]
if u in source_tree:
this_tree = source_tree
other_tree = target_tree
neighbors = R_succ
else:
this_tree = target_tree
other_tree = source_tree
neighbors = R_pred
for v, attr in neighbors[u].items():
if attr['capacity'] - attr['flow'] > 0:
if v not in this_tree:
if v in other_tree:
return (u, v) if this_tree is source_tree else (v, u)
this_tree[v] = u
dist[v] = dist[u] + 1
timestamp[v] = timestamp[u]
active.append(v)
elif v in this_tree and _is_closer(u, v):
this_tree[v] = u
dist[v] = dist[u] + 1
timestamp[v] = timestamp[u]
_ = active.popleft()
return None, None
def augment(u, v):
"""Augmentation stage.
Reconstruct path and determine its residual capacity.
We start from a connecting edge, which links a node
from the source tree to a node from the target tree.
The connecting edge is the output of the grow function
and the input of this function.
"""
attr = R_succ[u][v]
flow = min(INF, attr['capacity'] - attr['flow'])
path = [u]
# Trace a path from u to s in source_tree.
w = u
while w != s:
n = w
w = source_tree[n]
attr = R_pred[n][w]
flow = min(flow, attr['capacity'] - attr['flow'])
path.append(w)
path.reverse()
# Trace a path from v to t in target_tree.
path.append(v)
w = v
while w != t:
n = w
w = target_tree[n]
attr = R_succ[n][w]
flow = min(flow, attr['capacity'] - attr['flow'])
path.append(w)
# Augment flow along the path and check for saturated edges.
it = iter(path)
u = next(it)
these_orphans = []
for v in it:
R_succ[u][v]['flow'] += flow
R_succ[v][u]['flow'] -= flow
if R_succ[u][v]['flow'] == R_succ[u][v]['capacity']:
if v in source_tree:
source_tree[v] = None
these_orphans.append(v)
if u in target_tree:
target_tree[u] = None
these_orphans.append(u)
u = v
orphans.extend(sorted(these_orphans, key=dist.get))
return flow
def adopt():
"""Adoption stage.
Reconstruct search trees by adopting or discarding orphans.
During augmentation stage some edges got saturated and thus
the source and target search trees broke down to forests, with
orphans as roots of some of its trees. We have to reconstruct
the search trees rooted to source and target before we can | |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 20 21:29:32 2021
@author: qcao
Analysis code for example_topop_tb_v3.py
Parses and cleans load-driven phantoms. Computes Radiomic signatures. Compares with BvTv.
Compare with ROIs
"""
# FEA and BoneBox Imports
import os
import sys
sys.path.append('../') # use bonebox from source without having to install/build
from bonebox.phantoms.TrabeculaeVoronoi import *
from bonebox.FEA.fea import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import vtk
from pyvistaqt import BackgroundPlotter
from skimage.morphology import ball, closing, binary_dilation, binary_closing
import pyvista as pv
pv.set_plot_theme("document")
# For PyRadiomics
import logging
import six
import SimpleITK as sitk
import radiomics
from radiomics import featureextractor
from radiomics import firstorder, getTestCase, glcm, glrlm, glszm, imageoperations, shape
volumeShape = (100,100,100)
def ind2dir(ss,uu):
# converts ss and uu to output directories
saveNameAppend = "_phantom_ss_"+str(ss)+"_uu_"+str(uu)
return "/data/BoneBox-out/topopt/lazy_v3_sweep/randstate_"+str(ss)+saveNameAppend+"/"
def getBVFandE(ss,uu):
# Parse output directory given series and Ul index
BVF = np.nan
elasticModulus = np.nan
out_dir = ind2dir(ss,uu)
if os.path.exists(out_dir):
if os.path.exists(out_dir+"bvf7.npy"):
BVF = np.load(out_dir+"bvf7.npy")
if os.path.exists(out_dir+"elasticModulus7.npy"):
elasticModulus = np.load(out_dir+"elasticModulus7.npy")
return BVF, elasticModulus
def getVolume(ss,uu):
# Parse output directory and get volume
volume = np.zeros(volumeShape)
volume[:] = np.nan
out_dir = ind2dir(ss,uu)
if os.path.exists(out_dir):
if os.path.exists(out_dir+"volume_8.npy"):
volume = np.load(out_dir+"volume_8.npy")
return volume
def computeWaveletFeatures(image, mask, featureFunc=glcm.RadiomicsGLCM):
"""
featureFunc:
firstorder.RadiomicsFirstOrder
glcm.RadiomicsGLCM
"""
featureNames = []
featureVals = []
for decompositionImage, decompositionName, inputKwargs in imageoperations.getWaveletImage(image, mask):
waveletFirstOrderFeaturs = featureFunc(decompositionImage, mask, **inputKwargs)
waveletFirstOrderFeaturs.enableAllFeatures()
results = waveletFirstOrderFeaturs.execute()
print('Calculated firstorder features with wavelet ', decompositionName)
for (key, val) in six.iteritems(results):
waveletFeatureName = '%s_%s' % (str(decompositionName), key)
print(' ', waveletFeatureName, ':', val)
featureNames.append(waveletFeatureName)
featureVals.append(val)
return featureNames, np.array(featureVals)
def calculate_fid(act1, act2):
# calculate mean and covariance statistics
mu1, sigma1 = act1.mean(axis=0), cov(act1, rowvar=False)
mu2, sigma2 = act2.mean(axis=0), cov(act2, rowvar=False)
# calculate sum squared difference between means
ssdiff = numpy.sum((mu1 - mu2)**2.0)
# calculate sqrt of product between cov
covmean = sqrtm(sigma1.dot(sigma2))
# check and correct imaginary numbers from sqrt
if iscomplexobj(covmean):
covmean = covmean.real
# calculate score
fid = ssdiff + trace(sigma1 + sigma2 - 2.0 * covmean)
return fid
if __name__ == "__main__":
save_dir = "/data/BoneBox-out/topopt/lazy_v3_sweep/"
# Generate N phantom series, 3 resorption intensities per series
Nseries = 400
Nresorption = 3
# Create array of BVFs and ElasticModuli
bvfs = np.zeros((Nseries, Nresorption))
Es = np.zeros((Nseries, Nresorption))
# Array of random Uls (between 0.1 and 0.25), should be same as in example script.
randStateUls = 3012
Ulmin = 0.1
Ulmax = 0.25
Uls = sampleUniformZeroOne(((Nseries,Nresorption)), randState=randStateUls)*(Ulmax-Ulmin) + Ulmin
# Retrieve BVF and ElasticModulus
for ss in range(Nseries):
for uu in range(Nresorption):
bvfs[ss,uu], Es[ss,uu] = getBVFandE(ss,uu)
inds = np.invert(np.isnan(bvfs))
inds_nz = np.nonzero(inds)
# Correlation Coefficients
def linearFit(xx, yy):
# r2 with radiomics
# returns fit x, fit y, rs
mfit, bfit = np.polyfit(xx, yy, 1)
rs = np.corrcoef(xx, yy)[0,1]**2
mi, ma = np.min(xx), np.max(yy)
xxx = np.array([mi, ma])
yyy = mfit*xxx + bfit
return xxx, yyy, rs
def reject_outliers(data, m=2):
ind = abs(data - np.mean(data)) < m * np.std(data)
return ind, data[ind]
# Correlation Coefficients
def linearFitRejectOutliers(xx, yy):
# r2 with radiomics
# returns fit x, fit y, rs
ind, yy = reject_outliers(yy, m=2)
xx = xx[ind]
mfit, bfit = np.polyfit(xx, yy, 1)
rs = np.corrcoef(xx, yy)[0,1]**2
mi, ma = np.min(xx), np.max(yy)
xxx = np.array([mi, ma])
yyy = mfit*xxx + bfit
return xxx, yyy, rs
# Correlation Coefficients
def polyFitRejectOutliers(xx, yy, order = 2):
# r2 with radiomics
# returns fit x, fit y, rs
ind, yy = reject_outliers(yy, m=2)
xx = xx[ind]
p = np.polyfit(xx, yy, 1)
yyf = np.polyval(p,xx)
rs = np.corrcoef(yy, yyf)[0,1]**2
mi, ma = np.min(xx), np.max(yy)
xxx = np.array([mi, ma])
return np.sort(xx), np.sort(yyf), rs
# Plot BVF and Elastic Modulus vs Uls
fig, ax1 = plt.subplots()
xx, yy, rs1 = linearFitRejectOutliers(Uls[inds].flatten(), bvfs[inds].flatten())
ax1.plot(Uls[inds].flatten(), bvfs[inds].flatten(),'ko')
ax1.plot(xx, yy, 'k-')
ax1.set_ylim(0.16,0.28)
ax1.set_xlabel("Resorption Threshold $U_l$")
ax1.set_ylabel("BVF")
ax1.grid("major")
ax1.set_xlim(0.1,0.25)
xx, yy, rs2 = linearFitRejectOutliers(Uls[inds].flatten(), Es[inds].flatten())
ax2 = ax1.twinx()
ax2.plot(Uls[inds].flatten(), Es[inds].flatten(),'rv')
ax2.plot(xx, yy, 'r--')
ax2.set_ylabel("Elastic Modulus $E$",color='r')
ax2.set_ylim(0,10e7)
ax2.tick_params(axis ='y', labelcolor = 'r')
plt.savefig(save_dir+"BVF_Es_vs_Ul.png")
print("BVF vs Ul: r2="+str(rs1))
print("Es vs Ul: r2="+str(rs2))
# np.corrcoef(bvfs[inds], Es[inds])
# np.corrcoef(bvfs[inds], Uls[inds])
# # np.corrcoef(Es[inds], Uls[inds])
# Plot Es vs BVF
fig, ax1 = plt.subplots()
xx, yy, rs3 = polyFitRejectOutliers(bvfs[inds].flatten(), Es[inds].flatten())
ax1.plot(bvfs[inds].flatten(), Es[inds].flatten(),'ko')
ax1.plot(xx, yy, 'k-')
ax1.set_ylim(0,3e7)
ax1.set_xlim(0.16,0.28)
ax1.set_xlabel("BVF")
ax1.set_ylabel("Elastic Modulus $E$")
ax1.grid("major")
plt.savefig(save_dir+"Es_vs_BVF.png")
print("Es vs BVF: r2="+str(rs3))
#% Look at radiomics features
# Initialize array of features
features = np.zeros((Nseries, Nresorption, 93))
features[:] = np.nan
# Define settings for signature calculation
# These are currently set equal to the respective default values
settings = {}
settings['binWidth'] = 25
settings['resampledPixelSpacing'] = None # [3,3,3] is an example for defining resampling (voxels with size 3x3x3mm)
settings['interpolator'] = sitk.sitkBSpline
settings['imageType'] = ['original','wavelet']
# Initialize feature extractor
extractor = featureextractor.RadiomicsFeatureExtractor(**settings)
extractor.enableImageTypeByName("Wavelet")
# extractor.disableAllImageTypes()
# extractor.enableImageTypeByName(imageType="Original")
# extractor.enableImageTypeByName(imageType="Wavelet")
# extractor.enableFeatureClassByName("glcm")
# Test extraction pipeline on one volume
ss = 0; uu = 0
volume = getVolume(ss,uu).astype(int)*255
volumeSITK = sitk.GetImageFromArray(volume)
maskSITK = sitk.GetImageFromArray(np.ones(volume.shape).astype(int))
wvltFeatureNames, wvltFeatures = computeWaveletFeatures(volumeSITK, maskSITK)
featureVectorOriginal = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original")
volumeSITKWavelets = radiomics.imageoperations.getWaveletImage(volumeSITK, maskSITK)
featureVectorWavelet = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="wavelet")
featureVector = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original")
#%
computeFeatures = False
if computeFeatures:
wvltFeatures = np.zeros((Nseries, Nresorption, 192))
wvltFeatures[:] = np.nan
# Extract volume and compute features
for ss in range(Nseries):
for uu in range(Nresorption):
if inds[ss,uu]:
volume = getVolume(ss,uu).astype(int)*255
volumeSITK = sitk.GetImageFromArray(volume)
maskSITK = sitk.GetImageFromArray(np.ones(volume.shape).astype(int))
featureVector = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original")
featureVectorArray = np.array([featureVector[featureName].item() for featureName in featureVector.keys()])
features[ss,uu,:] = featureVectorArray
wvltFeatureNames, wvltFeatures[ss,uu,:] = computeWaveletFeatures(volumeSITK, maskSITK)
# Reshape feature matrices
featuresReshaped = features.reshape((-1,93), order='F')
wvltFeaturesReshaped = wvltFeatures.reshape((-1,192), order='F')
indsReshaped = inds.reshape((-1,), order='F')
featuresReshaped = featuresReshaped[indsReshaped,:]
wvltFeaturesReshaped = wvltFeaturesReshaped[indsReshaped,:]
# Save feature vectors
np.save(save_dir+"features",features)
np.save(save_dir+"featuresReshaped",featuresReshaped)
np.save(save_dir+"wvltFeaturesReshaped",wvltFeaturesReshaped)
#%% Radiomic Features of ROIs
plt.close('all')
import nrrd
import glob
def readROI(filename):
roiBone, header = nrrd.read(filename)
roiBone[roiBone==255] = 1 # units for this is volume
return roiBone
roi_dir = "/data/BoneBox/data/rois/"
Nrois = len(glob.glob(roi_dir+"isodata_*_roi_*.nrrd"))
featuresROI = np.zeros((Nrois,93))
for ind in range(Nrois):
print(ind)
fn = glob.glob(roi_dir+"isodata_*_roi_"+str(ind)+".nrrd")[0]
roiBone = readROI(fn)
volume = roiBone.astype(int)*255
# Take ROI center
volume = volume[50:150,50:150,50:150]
volumeSITK = sitk.GetImageFromArray(volume)
maskSITK = sitk.GetImageFromArray(np.ones(volume.shape).astype(int))
featureVector = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original")
featureVectorArray = np.array([featureVector[featureName].item() for featureName in featureVector.keys()])
featuresROI[ind,:] = featureVectorArray
# wvltFeatureNames, wvltFeatures[ss,uu,:] = computeWaveletFeatures(volumeSITK, maskSITK)
np.save(save_dir+"featuresROI",featuresROI)
#%%
featureNames = list(featureVector.keys())
import seaborn as sns
import pandas as pd
sns.set_theme(style="whitegrid")
featuresReshaped = np.load(save_dir+"featuresReshaped.npy")
featuresROI = np.load(save_dir+"featuresROI.npy")
featuresAll = np.vstack((featuresReshaped,featuresROI))
sourceList = []
for ii in range(200):
sourceList.append("Phantom")
for ii in range(208):
sourceList.append("L1 Spine")
df = pd.DataFrame(data = featuresAll,
columns = featureNames)
df["source"] = sourceList
df["all"] = ""
fig_dir = save_dir+"comparison_with_rois/"
if not os.path.exists(fig_dir):
os.mkdir(fig_dir)
# Draw a nested violinplot and split the violins for easier comparison
for ind in range(93):
fig, ax = plt.subplots(figsize=(5,10))
sns.violinplot(data=df, x="all", y=featureNames[ind], hue="source",
split=True, inner="quart", linewidth=1)
sns.despine(left=True)
plt.savefig(fig_dir+"fig_"+str(ind)+"_"+featureNames[ind])
plt.close("all")
#%% Komogorov-smirnov test
from scipy.stats import ks_2samp
kss = np.zeros(93)
ps = np.zeros(93)
for ind in range(93):
kss[ind], ps[ind] = scipy.stats.ks_2samp(featuresReshaped[:,ind], featuresROI[:,ind])
#%% Prep data for regressor
# # Extract Feature Names
# featureNames = list(featureVector.keys())
# indsReshaped = inds.reshape((-1,), order='F')
# features = np.load(save_dir+"features.npy")
# featuresReshaped = np.load(save_dir+"featuresReshaped.npy")
# wvltFeaturesReshaped = np.load(save_dir+"wvltFeaturesReshaped.npy")
# EsReshaped = Es.reshape((-1,), order='F')[indsReshaped]
# bvfsReshaped = bvfs.reshape((-1,), order='F')[indsReshaped]
# # combine BVF with wavelet GLCM features
# # features_norm = np.concatenate((bvfsReshaped[:,None],wvltFeaturesReshaped),axis=1) # featuresReshaped # Feature Vector
# features_norm = np.concatenate((bvfsReshaped[:,None],featuresReshaped),axis=1) # featuresReshaped # Feature Vector
# features_norm -= np.mean(features_norm,axis=0) # center on mean
# features_norm /= np.std(features_norm,axis=0) # scale to standard deviation
# features_norm[np.isnan(features_norm)] = 0
# # features_norm_names = ["BVF"]+wvltFeatureNames
# features_norm_names = ["BVF"]+featureNames
# roi_vm_mean = EsReshaped # Label
# # Reject pathologic outliers in the dataset
# ii, roi_vm_mean = reject_outliers(roi_vm_mean, m=1)
# features_norm = features_norm[ii,:]
# bvfsReshaped = bvfsReshaped[ii]
# Ntrain = 110 # | |
must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["suffix"]
@suffix.setter
def suffix(self, val):
self["suffix"] = val
# valueformat
# -----------
@property
def valueformat(self):
"""
Sets the value formatting rule using d3 formatting mini-
language which is similar to those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
The 'valueformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["valueformat"]
@valueformat.setter
def valueformat(self, val):
self["valueformat"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "indicator"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Set the font used to display main number
prefix
Sets a prefix appearing before the number.
suffix
Sets a suffix appearing next to the number.
valueformat
Sets the value formatting rule using d3 formatting
mini-language which is similar to those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
"""
def __init__(
self, arg=None, font=None, prefix=None, suffix=None, valueformat=None, **kwargs
):
"""
Construct a new Number object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.Number`
font
Set the font used to display main number
prefix
Sets a prefix appearing before the number.
suffix
Sets a suffix appearing next to the number.
valueformat
Sets the value formatting rule using d3 formatting
mini-language which is similar to those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
Returns
-------
Number
"""
super(Number, self).__init__("number")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.Number
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.Number`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.indicator import number as v_number
# Initialize validators
# ---------------------
self._validators["font"] = v_number.FontValidator()
self._validators["prefix"] = v_number.PrefixValidator()
self._validators["suffix"] = v_number.SuffixValidator()
self._validators["valueformat"] = v_number.ValueformatValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("prefix", None)
self["prefix"] = prefix if prefix is not None else _v
_v = arg.pop("suffix", None)
self["suffix"] = suffix if suffix is not None else _v
_v = arg.pop("valueformat", None)
self["valueformat"] = valueformat if valueformat is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Gauge(_BaseTraceHierarchyType):
# axis
# ----
@property
def axis(self):
"""
The 'axis' property is an instance of Axis
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.Axis`
- A dict of string/value properties that will be passed
to the Axis constructor
Supported dict properties:
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
range
Sets the range of this axis.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.indicat
or.gauge.axis.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.indicator.gauge.axis.tickformatstopdefaults),
sets the default property values to use for
elements of
indicator.gauge.axis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to | |
it.
raise e
# In case the query will be executed as a "CREATE TABLE <name> AS ..." or
# "CREATE VIEW <name> AS ...", this will be the value of "<name>".
self._table_or_view_name = None
def set_impala_query_options(self, cursor):
opts = """
SET MEM_LIMIT={mem_limit};
SET BATCH_SIZE={batch_size};
SET DISABLE_CODEGEN={disable_codegen};
SET DISABLE_OUTERMOST_TOPN={disable_outermost_topn};
SET DISABLE_ROW_RUNTIME_FILTERING={disable_row_runtime_filtering};
SET DISABLE_STREAMING_PREAGGREGATIONS={disable_streaming_preaggregations};
SET DISABLE_UNSAFE_SPILLS={disable_unsafe_spills};
SET EXEC_SINGLE_NODE_ROWS_THRESHOLD={exec_single_node_rows_threshold};
SET BUFFER_POOL_LIMIT={buffer_pool_limit};
SET MAX_IO_BUFFERS={max_io_buffers};
SET MAX_SCAN_RANGE_LENGTH={max_scan_range_length};
SET NUM_NODES={num_nodes};
SET NUM_SCANNER_THREADS={num_scanner_threads};
SET OPTIMIZE_PARTITION_KEY_SCANS={optimize_partition_key_scans};
SET RUNTIME_BLOOM_FILTER_SIZE={runtime_bloom_filter_size};
SET RUNTIME_FILTER_MODE={runtime_filter_mode};
SET RUNTIME_FILTER_WAIT_TIME_MS={runtime_filter_wait_time_ms};
SET SCAN_NODE_CODEGEN_THRESHOLD={scan_node_codegen_threshold}""".format(
mem_limit=randint(1024 ** 3, 10 * 1024 ** 3),
batch_size=randint(1, 4096),
disable_codegen=choice((0, 1)),
disable_outermost_topn=choice((0, 1)),
disable_row_runtime_filtering=choice((0, 1)),
disable_streaming_preaggregations=choice((0, 1)),
disable_unsafe_spills=choice((0, 1)),
exec_single_node_rows_threshold=randint(1, 100000000),
buffer_pool_limit=randint(1, 100000000),
max_io_buffers=randint(1, 100000000),
max_scan_range_length=randint(1, 100000000),
num_nodes=randint(3, 3),
num_scanner_threads=randint(1, 100),
optimize_partition_key_scans=choice((0, 1)),
random_replica=choice((0, 1)),
replica_preference=choice(("CACHE_LOCAL", "DISK_LOCAL", "REMOTE")),
runtime_bloom_filter_size=randint(4096, 16777216),
runtime_filter_mode=choice(("OFF", "LOCAL", "GLOBAL")),
runtime_filter_wait_time_ms=randint(1, 100000000),
scan_node_codegen_threshold=randint(1, 100000000))
LOG.debug(opts)
for opt in opts.strip().split(";"):
cursor.execute(opt)
def fetch_query_results(self, query):
'''Concurrently execute the query using each cursor and return a list of tuples
containing the result information for each cursor. The tuple format is
(<exception or None>, <data set or None>).
If query_timeout_seconds is reached and the connection is killable then the
query will be cancelled and the connection reset. Otherwise the query will
continue to run in the background.
"query" should be an instance of query.Query.
'''
if query.execution in (StatementExecutionMode.CREATE_TABLE_AS,
StatementExecutionMode.CREATE_VIEW_AS):
self._table_or_view_name = self._create_random_table_name()
elif isinstance(query, (InsertStatement,)):
self._table_or_view_name = query.dml_table.name
query_threads = list()
for sql_writer, cursor, log_file in izip(
self.sql_writers, self.cursors, self.query_logs
):
if self.ENABLE_RANDOM_QUERY_OPTIONS and cursor.db_type == IMPALA:
self.set_impala_query_options(cursor)
query_thread = Thread(
target=self._fetch_sql_results,
args=[query, cursor, sql_writer, log_file],
name='{db_type}-exec-{id_}'.format(
db_type=cursor.db_type, id_=id(query)))
query_thread.daemon = True
query_thread.sql = ''
query_thread.data_set = None
query_thread.cursor_description = None
query_thread.exception = None
query_thread.start()
query_threads.append(query_thread)
end_time = time() + self.query_timeout_seconds
for query_thread, cursor in izip(query_threads, self.cursors):
join_time = end_time - time()
if join_time > 0:
query_thread.join(join_time)
if query_thread.is_alive():
# Kill connection and reconnect to return cursor to initial state.
if cursor.conn.supports_kill:
LOG.debug('Attempting to kill connection')
cursor.conn.kill()
LOG.debug('Kill connection')
try:
# TODO: Sometimes this takes a very long time causing the program to appear to
# hang. Maybe this should be done in another thread so a timeout can be
# applied?
cursor.close()
except Exception as e:
LOG.info('Error closing cursor: %s', e)
cursor.reconnect()
query_thread.exception = QueryTimeout(
'Query timed out after %s seconds' % self.query_timeout_seconds)
if (query.execution in (StatementExecutionMode.CREATE_TABLE_AS,
StatementExecutionMode.DML_TEST)):
cursor.drop_table(self._table_or_view_name)
elif query.execution == StatementExecutionMode.CREATE_VIEW_AS:
cursor.drop_view(self._table_or_view_name)
return [(query_thread.sql, query_thread.exception, query_thread.data_set,
query_thread.cursor_description) for query_thread in query_threads]
def _fetch_sql_results(self, query, cursor, sql_writer, log_file):
'''Execute the query using the cursor and set the result or exception on the local
thread.
'''
try:
log_file.write('/***** Start Query *****/\n')
if sql_writer.DIALECT == self.flatten_dialect:
# Converts the query model for the flattened version of the data. This is for
# testing of Impala nested types support.
query = deepcopy(query)
QueryFlattener().flatten(query)
if query.execution == StatementExecutionMode.CREATE_TABLE_AS:
setup_sql = sql_writer.write_create_table_as(query, self._table_or_view_name)
query_sql = 'SELECT * FROM ' + self._table_or_view_name
elif query.execution == StatementExecutionMode.CREATE_VIEW_AS:
setup_sql = sql_writer.write_create_view(query, self._table_or_view_name)
query_sql = 'SELECT * FROM ' + self._table_or_view_name
elif isinstance(query, (InsertStatement,)):
setup_sql = sql_writer.write_query(query)
# TODO: improve validation (IMPALA-4599). This is good enough for looking for
# crashes on DML statements
query_sql = 'SELECT COUNT(*) FROM ' + self._table_or_view_name
else:
setup_sql = None
query_sql = sql_writer.write_query(query)
if setup_sql:
LOG.debug("Executing on %s:\n%s", cursor.db_type, setup_sql)
current_thread().sql = setup_sql + ';\n'
log_file.write(setup_sql + ';\n')
log_file.flush()
cursor.execute(setup_sql)
LOG.debug("Executing on %s:\n%s", cursor.db_type, query_sql)
current_thread().sql += query_sql
log_file.write(query_sql + ';\n')
log_file.write('/***** End Query *****/\n')
log_file.flush()
cursor.execute(query_sql)
col_count = len(cursor.description)
batch_size = max(10000 / col_count, 1)
row_limit = self.TOO_MUCH_DATA / col_count
data_set = list()
current_thread().data_set = data_set
current_thread().cursor_description = cursor.description
LOG.debug("Fetching results from %s", cursor.db_type)
while True:
batch = cursor.fetchmany(batch_size)
data_set.extend(batch)
if len(batch) < batch_size:
if cursor.db_type == IMPALA:
impala_log = cursor.get_log()
if 'Expression overflowed, returning NULL' in impala_log:
raise TypeOverflow('Numeric overflow; data may not match')
break
if len(data_set) > row_limit:
raise DataLimitExceeded('Too much data')
if isinstance(query, (InsertStatement,)):
LOG.debug('Total row count for {0}: {1}'.format(
cursor.db_type, str(data_set)))
except Exception as e:
current_thread().exception = e
def _create_random_table_name(self):
char_choices = ascii_lowercase
chars = list()
for idx in xrange(4): # will result in ~1M combinations
if idx == 1:
char_choices += '_' + digits
chars.append(choice(char_choices))
return 'qgen_' + ''.join(chars)
class ComparisonResult(object):
'''Represents a result.'''
def __init__(self, query, test_db_type, ref_db_type):
self.query = query
self.test_db_type = test_db_type
self.ref_db_type = ref_db_type
self.ref_sql = None
self.test_sql = None
self.query_resulted_in_data = False
self.ref_row_count = None
self.test_row_count = None
self.mismatch_at_row_number = None
self.mismatch_at_col_number = None
self.ref_row = None # The test row where mismatch happened
self.test_row = None # The reference row where mismatch happened
self.exception = None
self.modified_rows_count = None
self._error_message = None
@property
def error(self):
if not self._error_message:
if self.exception:
self._error_message = str(self.exception)
elif (self.ref_row_count or self.test_row_count) and \
self.ref_row_count != self.test_row_count:
self._error_message = 'Row counts do not match: %s %s rows vs %s %s rows' \
% (self.test_row_count,
self.test_db_type,
self.ref_db_type,
self.ref_row_count)
elif self.mismatch_at_row_number is not None:
# Write a row like "[a, b, <<c>>, d]" where c is a bad value
test_row = '[' + ', '.join(
'<<' + str(val) + '>>' if idx == self.mismatch_at_col_number - 1 else str(val)
for idx, val in enumerate(self.test_row)
) + ']'
ref_row = '[' + ', '.join(
'<<' + str(val) + '>>' if idx == self.mismatch_at_col_number - 1 else str(val)
for idx, val in enumerate(self.ref_row)
) + ']'
self._error_message = \
'Column %s in row %s does not match: %s %s row vs %s %s row' \
% (self.mismatch_at_col_number,
self.mismatch_at_row_number,
test_row,
self.test_db_type,
ref_row,
self.ref_db_type)
return self._error_message
@property
def is_known_error(self):
return isinstance(self.exception, KnownError)
@property
def query_timed_out(self):
return isinstance(self.exception, QueryTimeout)
QueryTimeout = type('QueryTimeout', (Exception, ), {})
TypeOverflow = type('TypeOverflow', (Exception, ), {})
DataLimitExceeded = type('DataLimitExceeded', (Exception, ), {})
class KnownError(Exception):
def __init__(self, jira_url):
Exception.__init__(self, 'Known issue: ' + jira_url)
self.jira_url = jira_url
class FrontendExceptionSearcher(object):
def __init__(self, query_profile, ref_conn, test_conn):
'''query_profile should be an instance of one of the profiles in query_profile.py'''
self.query_profile = query_profile
self.ref_conn = ref_conn
self.test_conn = test_conn
self.ref_sql_writer = SqlWriter.create(dialect=ref_conn.db_type)
self.test_sql_writer = SqlWriter.create(dialect=test_conn.db_type)
with ref_conn.cursor() as ref_cursor:
with test_conn.cursor() as test_cursor:
self.common_tables = DbCursor.describe_common_tables([ref_cursor, test_cursor])
if not self.common_tables:
raise Exception("Unable to find a common set of tables in both databases")
def search(self, number_of_test_queries):
def on_ref_db_error(e, sql):
LOG.warn("Error generating explain plan for reference db:\n%s\n%s" % (e, sql))
def on_test_db_error(e, sql):
LOG.error("Error generating explain plan for test db:\n%s" % sql)
raise e
for idx in xrange(number_of_test_queries):
LOG.info("Explaining query #%s" % (idx + 1))
statement_type = self.query_profile.choose_statement()
statement_generator = get_generator(statement_type)(self.query_profile)
if issubclass(statement_type, (InsertStatement,)):
dml_table = self.query_profile.choose_table(self.common_tables)
else:
dml_table = None
query = statement_generator.generate_statement(
self.common_tables, dml_table=dml_table)
if not self._explain_query(self.ref_conn, self.ref_sql_writer, query,
on_ref_db_error):
continue
self._explain_query(self.test_conn, self.test_sql_writer, query,
on_test_db_error)
def _explain_query(self, conn, writer, query, exception_handler):
sql = writer.write_query(query)
try:
with conn.cursor() as cursor:
cursor.execute("EXPLAIN %s" % sql)
return True
except Exception as e:
exception_handler(e, sql)
return False
class QueryResultDiffSearcher(object):
'''This class uses the query generator (query_generator.py) along with the
query profile (query_profile.py) to randomly generate queries then executes the
queries on the reference and test databases, then compares the results.
'''
# Sometimes things get into a bad state and the same error loops forever
ABORT_ON_REPEAT_ERROR_COUNT = 2
COPY_TABLE_SUFFIX = '__qgen_copy'
def __init__(self, query_profile, ref_conn, test_conn):
'''query_profile should be an instance of one of the profiles in query_profile.py'''
self.query_profile = query_profile
self.ref_conn = ref_conn
self.test_conn = test_conn
with ref_conn.cursor() as ref_cursor:
with test_conn.cursor() as test_cursor:
self.common_tables = DbCursor.describe_common_tables([ref_cursor, test_cursor])
if not self.common_tables:
raise Exception("Unable to find a common set of tables in both databases")
def _concurrently_copy_table(self, src_table):
"""
Given a Table object, create another Table with the same schema and return the new
Table object. The schema will be created in both the test and reference databases.
The data is then copied in both the ref and test databases using threads.
"""
with test_conn.cursor() as test_cursor:
test_cursor.execute('SHOW CREATE TABLE {0}'.format(src_table.name))
(create_table_sql,) = test_cursor.fetchall()[0]
new_table_name = src_table.name + self.COPY_TABLE_SUFFIX
create_table_sql = create_table_sql.replace(src_table.name, new_table_name, 1)
test_cursor.drop_table(new_table_name)
test_cursor.execute(create_table_sql)
new_table = test_cursor.describe_table(new_table_name)
with ref_conn.cursor() as ref_cursor:
ref_cursor.drop_table(new_table_name)
ref_cursor.create_table(new_table)
copy_select_query = Query()
copy_select_query.select_clause = SelectClause(
[SelectItem(col) for col in src_table.cols])
copy_select_query.from_clause = FromClause(src_table)
if new_table.primary_keys:
conflict_action = InsertClause.CONFLICT_ACTION_IGNORE
else:
conflict_action | |
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Full DrQA pipeline."""
import heapq
import logging
import math
import time
from multiprocessing import Pool as ProcessPool
from multiprocessing.util import Finalize
import numpy as np
import regex
import torch
from . import DEFAULTS
from .. import reader
from .. import tokenizers
from ..reader.data import ReaderDataset, SortedBatchSampler
from ..reader.vector import batchify
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Multiprocessing functions to fetch and tokenize text
# ------------------------------------------------------------------------------
PROCESS_TOK = None
PROCESS_CANDS = None
# DOC_MEAN = 8.5142
# DOC_STD = 2.8324
def init(tokenizer_class, tokenizer_opts, candidates=None):
global PROCESS_TOK, PROCESS_CANDS
PROCESS_TOK = tokenizer_class(**tokenizer_opts)
Finalize(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)
PROCESS_CANDS = candidates
def tokenize_text(text):
global PROCESS_TOK
return PROCESS_TOK.tokenize(text)
# ------------------------------------------------------------------------------
# Main DrQA pipeline
# ------------------------------------------------------------------------------
class DrQA(object):
# Target size for squashing short paragraphs together.
# 0 = read every paragraph independently
# infty = read all paragraphs together
GROUP_LENGTH = 0
def __init__(
self,
reader_model=None,
normalize=False,
embedding_file=None,
tokenizer=None,
fixed_candidates=None,
batch_size=128,
cuda=True,
data_parallel=False,
max_loaders=5,
num_workers=None,
ranker=None,
et_model=None,
et_threshold=None
):
"""Initialize the pipeline.
Args:
reader_model: model file from which to load the DocReader.
embedding_file: if given, will expand DocReader dictionary to use
all available pretrained embeddings.
tokenizer: string option to specify tokenizer used on docs.
fixed_candidates: if given, all predictions will be constrated to
the set of candidates contained in the file. One entry per line.
batch_size: batch size when processing paragraphs.
cuda: whether to use the gpu.
data_parallel: whether to use multile gpus.
max_loaders: max number of async data loading workers when reading.
(default is fine).
num_workers: number of parallel CPU processes to use for tokenizing
and post processing resuls.
"""
self.batch_size = batch_size
self.max_loaders = max_loaders
self.fixed_candidates = fixed_candidates is not None
self.cuda = cuda
logger.info('Initializing document ranker...')
self.ranker = ranker
logger.info('Initializing document reader...')
t0 = time.time()
reader_model = reader_model or DEFAULTS['reader_model']
self.reader = reader.DocReader.load(reader_model, normalize=normalize)
t1 = time.time()
logger.info('document reader model load [time]: %.4f s' % (t1 - t0))
if embedding_file:
logger.info('embedding_file')
logger.info('Expanding dictionary...')
words = reader.utils.index_embedding_words(embedding_file)
added = self.reader.expand_dictionary(words)
self.reader.load_embeddings(added, embedding_file)
if cuda:
logger.info('cuda')
self.reader.cuda()
t2 = time.time()
logger.info('cuda initialized [time]: %.4f s' % (t2 - t1))
if data_parallel:
logger.info('data_parallel')
self.reader.parallelize()
annotators = tokenizers.get_annotators_for_model(self.reader)
tok_opts = {'annotators': annotators}
logger.debug('tokenizer')
if not tokenizer:
tok_class = DEFAULTS['tokenizer']
else:
tok_class = tokenizers.get_class(tokenizer)
logger.debug('annotators')
self.num_workers = num_workers
self.processes = ProcessPool(num_workers,
initializer=init,
initargs=(tok_class, tok_opts, fixed_candidates))
if et_model:
self.et_threshold = et_threshold if 0 < et_threshold < 1 else 0.5
logger.info('Initializing early stopping model...')
import treelite.runtime
self.et_model = treelite.runtime.Predictor(et_model, verbose=True)
logger.info('early stopping model (et threshold: %s) loaded.' % self.et_threshold)
else:
self.et_threshold = None
def _split_doc(self, doc):
"""Given a doc, split it into chunks (by paragraph)."""
curr = []
curr_len = 0
for split in regex.split(r'\n+', doc):
split = split.strip()
if len(split) == 0:
continue
# Maybe group paragraphs together until we hit a length limit
if len(curr) > 0 and curr_len + len(split) > self.GROUP_LENGTH:
yield ' '.join(curr)
curr = []
curr_len = 0
curr.append(split)
curr_len += len(split)
if len(curr) > 0:
yield ' '.join(curr)
def _get_loader(self, data, num_loaders):
"""Return a pytorch data iterator for provided examples."""
dataset = ReaderDataset(data, self.reader)
sampler = SortedBatchSampler(
dataset.lengths(),
self.batch_size,
shuffle=False
)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
sampler=sampler,
num_workers=num_loaders,
collate_fn=batchify,
pin_memory=self.cuda,
)
return loader
def process_single(self, query, top_n=1, n_docs=5,
return_context=False):
"""Run a single query."""
predictions = self.process_batch(
[query],
top_n, n_docs, return_context
)
return predictions[0]
def process(self, query, top_n=1, n_docs=5):
if self.et_threshold:
predictions = self.process_batch_et(query, n_docs)
else:
predictions = self.process_batch(query, top_n=top_n, n_docs=n_docs)
return predictions
def process_batch(self, queries, top_n=1, n_docs=5,
return_context=False):
"""Run a batch of queries (more efficient)."""
t3 = time.time()
logger.info('Processing %d queries...' % len(queries))
logger.info('Retrieving top %d docs...' % n_docs)
# Rank documents for queries.
if len(queries) == 1:
ranked = [self.ranker.closest_docs(queries[0], k=n_docs)]
else:
ranked = self.ranker.batch_closest_docs(queries, k=n_docs, num_workers=self.num_workers)
t4 = time.time()
logger.info('docs retrieved [time]: %.4f s' % (t4 - t3))
all_docids, all_doc_scores, all_doc_texts = zip(*ranked)
# Flatten document ids and retrieve text from database.
# We remove duplicates for processing efficiency.
flat_docids, flat_doc_texts = zip(*{(d, t) for doc_ids, doc_texts in zip(all_docids, all_doc_texts)
for d, t in zip(doc_ids, doc_texts)})
# flat_docids = list({d for docids in all_docids for d in docids})
did2didx = {did: didx for didx, did in enumerate(flat_docids)}
# flat_doc_texts = list({t for doc_texts in all_doc_texts for t in doc_texts})
# logger.info('doc_texts for top %d docs extracted' % n_docs)
# Split and flatten documents. Maintain a mapping from doc (index in
# flat list) to split (index in flat list).
flat_splits = []
didx2sidx = []
for text in flat_doc_texts:
splits = self._split_doc(text)
didx2sidx.append([len(flat_splits), -1])
for split in splits:
flat_splits.append(split)
didx2sidx[-1][1] = len(flat_splits)
t5 = time.time()
# logger.debug('doc_texts flattened')
# Push through the tokenizers as fast as possible.
q_tokens = self.processes.map_async(tokenize_text, queries)
s_tokens = self.processes.map_async(tokenize_text, flat_splits)
q_tokens = q_tokens.get()
s_tokens = s_tokens.get()
# logger.info('q_tokens: %s' % q_tokens)
# logger.info('s_tokens: %s' % s_tokens)
t6 = time.time()
logger.info('doc texts tokenized [time]: %.4f s' % (t6 - t5))
# Group into structured example inputs. Examples' ids represent
# mappings to their question, document, and split ids.
examples = []
for qidx in range(len(queries)):
q_text = q_tokens[qidx].words()
para_lens = []
for rel_didx, did in enumerate(all_docids[qidx]):
start, end = didx2sidx[did2didx[did]]
for sidx in range(start, end):
para_text = s_tokens[sidx].words()
if len(q_text) > 0 and len(para_text) > 0:
examples.append({
'id': (qidx, rel_didx, sidx),
'question': q_text,
# 'qlemma': q_tokens[qidx].lemmas(),
'document': para_text,
'document_char': s_tokens[sidx].chars(),
'question_char': q_tokens[qidx].chars(),
# 'lemma': s_tokens[sidx].lemmas(),
# 'pos': s_tokens[sidx].pos(),
# 'ner': s_tokens[sidx].entities(),
'doc_score': float(all_doc_scores[qidx][rel_didx])
})
# r = {'w': para_text}
# f = open('/tmp/data.json', 'w')
# f.write(json.dumps(r))
# f.close()
# exit(0)
para_lens.append(len(s_tokens[sidx].words()))
# logger.debug('question_p: %s paragraphs: %s' % (queries[qidx], para_lens))
t7 = time.time()
logger.info('paragraphs prepared [time]: %.4f s' % (t7 - t6))
result_handles = []
num_loaders = min(self.max_loaders, int(math.floor(len(examples) / 1e3)))
for batch in self._get_loader(examples, num_loaders):
handle = self.reader.predict(batch, async_pool=self.processes)
result_handles.append((handle, batch[-1], batch[0].size(0)))
t8 = time.time()
logger.info('paragraphs predicted [time]: %.4f s' % (t8 - t7))
# Iterate through the predictions, and maintain priority queues for
# top scored answers for each question in the batch.
queues = [[] for _ in range(len(queries))]
for result, ex_ids, batch_size in result_handles:
s, e, score = result.get()
for i in range(batch_size):
# We take the top prediction per split.
if len(score[i]) > 0:
item = (score[i][0], ex_ids[i], s[i][0], e[i][0])
queue = queues[ex_ids[i][0]]
if len(queue) < top_n:
heapq.heappush(queue, item)
else:
heapq.heappushpop(queue, item)
logger.info('answers processed...')
# Arrange final top prediction data.
all_predictions = []
for queue in queues:
predictions = []
while len(queue) > 0:
score, (qidx, rel_didx, sidx), s, e = heapq.heappop(queue)
prediction = {
'doc_id': all_docids[qidx][rel_didx],
'start': int(s),
'end': int(e),
'span': s_tokens[sidx].slice(s, e + 1).untokenize(),
'doc_score': float(all_doc_scores[qidx][rel_didx]),
'span_score': float(score)
}
if return_context:
prediction['context'] = {
'text': s_tokens[sidx].untokenize(),
'start': s_tokens[sidx].offsets()[s][0],
'end': s_tokens[sidx].offsets()[e][1],
}
predictions.append(prediction)
all_predictions.append(predictions[-1::-1])
logger.info('%d queries processed [time]: %.4f s' %
(len(queries), time.time() - t3))
return all_predictions
def process_batch_et(self, queries, n_docs):
"""Run a batch of queries (more efficient)."""
t3 = time.time()
logger.info('ET Processing %d queries...' % len(queries))
logger.info('ET Retrieving top %d docs...' % n_docs)
# Rank documents for queries.
if len(queries) == 1:
ranked = [self.ranker.closest_docs(queries[0], k=n_docs)]
else:
ranked = self.ranker.batch_closest_docs(queries, k=n_docs, num_workers=self.num_workers)
t4 = time.time()
logger.info('ET docs retrieved [time]: %.4f s' % (t4 - t3))
all_docids, all_doc_scores, all_doc_texts = zip(*ranked)
# Flatten document ids and retrieve text from database.
# We remove duplicates for processing efficiency.
flat_docids, flat_doc_texts = zip(*{(d, t) for doc_ids, doc_texts in zip(all_docids, all_doc_texts)
for d, t in zip(doc_ids, doc_texts)})
# flat_docids = list({d for docids in all_docids for d in docids})
did2didx = {did: didx for didx, did in enumerate(flat_docids)}
# flat_doc_texts = list({t for doc_texts in all_doc_texts for t in doc_texts})
# logger.info('doc_texts for top %d docs extracted' % n_docs)
# Split and flatten documents. Maintain a mapping from doc (index in
# flat list) to split (index in flat list).
flat_splits = []
didx2sidx = []
for text in flat_doc_texts:
splits = self._split_doc(text)
didx2sidx.append([len(flat_splits), -1])
for split in splits:
flat_splits.append(split)
didx2sidx[-1][1] = len(flat_splits)
t5 = time.time()
logger.debug('ET doc_texts flattened')
# Push through the tokenizers | |
"""A list of usages.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.Usage]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsageListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class UserIdentity(msrest.serialization.Model):
"""Azure Active Directory identity configuration for a resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The Azure Active Directory principal id.
:vartype principal_id: str
:ivar client_id: The Azure Active Directory client id.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserIdentity, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class VirtualCluster(TrackedResource):
"""An Azure SQL virtual cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar subnet_id: Subnet resource ID for the virtual cluster.
:vartype subnet_id: str
:param family: If the service has different generations of hardware, for the same SKU, then
that can be captured here.
:type family: str
:ivar child_resources: List of resources in this virtual cluster.
:vartype child_resources: list[str]
:param maintenance_configuration_id: Specifies maintenance configuration id to apply to this
virtual cluster.
:type maintenance_configuration_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'subnet_id': {'readonly': True},
'child_resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'subnet_id': {'key': 'properties.subnetId', 'type': 'str'},
'family': {'key': 'properties.family', 'type': 'str'},
'child_resources': {'key': 'properties.childResources', 'type': '[str]'},
'maintenance_configuration_id': {'key': 'properties.maintenanceConfigurationId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualCluster, self).__init__(**kwargs)
self.subnet_id = None
self.family = kwargs.get('family', None)
self.child_resources = None
self.maintenance_configuration_id = kwargs.get('maintenance_configuration_id', None)
class VirtualClusterListResult(msrest.serialization.Model):
"""A list of virtual clusters.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.VirtualCluster]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualCluster]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualClusterListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class VirtualClusterUpdate(msrest.serialization.Model):
"""An update request for an Azure SQL Database virtual cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar subnet_id: Subnet resource ID for the virtual cluster.
:vartype subnet_id: str
:param family: If the service has different generations of hardware, for the same SKU, then
that can be captured here.
:type family: str
:ivar child_resources: List of resources in this virtual cluster.
:vartype child_resources: list[str]
:param maintenance_configuration_id: Specifies maintenance configuration id to apply to this
virtual cluster.
:type maintenance_configuration_id: str
"""
_validation = {
'subnet_id': {'readonly': True},
'child_resources': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'subnet_id': {'key': 'properties.subnetId', 'type': 'str'},
'family': {'key': 'properties.family', 'type': 'str'},
'child_resources': {'key': 'properties.childResources', 'type': '[str]'},
'maintenance_configuration_id': {'key': 'properties.maintenanceConfigurationId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualClusterUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.subnet_id = None
self.family = kwargs.get('family', None)
self.child_resources = None
self.maintenance_configuration_id = kwargs.get('maintenance_configuration_id', None)
class VirtualNetworkRule(ProxyResource):
"""A virtual network rule.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param virtual_network_subnet_id: The ARM resource id of the virtual network subnet.
:type virtual_network_subnet_id: str
:param ignore_missing_vnet_service_endpoint: Create firewall rule before the virtual network
has vnet service endpoint enabled.
:type ignore_missing_vnet_service_endpoint: bool
:ivar state: Virtual Network Rule State. Possible values include: "Initializing", "InProgress",
"Ready", "Failed", "Deleting", "Unknown".
:vartype state: str or ~azure.mgmt.sql.models.VirtualNetworkRuleState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'virtual_network_subnet_id': {'key': 'properties.virtualNetworkSubnetId', 'type': 'str'},
'ignore_missing_vnet_service_endpoint': {'key': 'properties.ignoreMissingVnetServiceEndpoint', 'type': 'bool'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkRule, self).__init__(**kwargs)
self.virtual_network_subnet_id = kwargs.get('virtual_network_subnet_id', None)
self.ignore_missing_vnet_service_endpoint = kwargs.get('ignore_missing_vnet_service_endpoint', None)
self.state = None
class VirtualNetworkRuleListResult(msrest.serialization.Model):
"""A list of virtual network rules.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.VirtualNetworkRule]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualNetworkRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkRuleListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class VulnerabilityAssessmentRecurringScansProperties(msrest.serialization.Model):
"""Properties of a Vulnerability Assessment recurring scans.
:param is_enabled: Recurring scans state.
:type is_enabled: bool
:param email_subscription_admins: Specifies that the schedule scan notification will be is sent
to the subscription administrators.
:type email_subscription_admins: bool
:param emails: Specifies an array of e-mail addresses to which the scan notification is sent.
:type emails: list[str]
"""
_attribute_map = {
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'email_subscription_admins': {'key': 'emailSubscriptionAdmins', 'type': 'bool'},
'emails': {'key': 'emails', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(VulnerabilityAssessmentRecurringScansProperties, self).__init__(**kwargs)
self.is_enabled = kwargs.get('is_enabled', None)
self.email_subscription_admins = kwargs.get('email_subscription_admins', True)
self.emails = kwargs.get('emails', None)
class VulnerabilityAssessmentScanError(msrest.serialization.Model):
"""Properties of a vulnerability assessment scan error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VulnerabilityAssessmentScanError, self).__init__(**kwargs)
self.code = None
self.message = None
class VulnerabilityAssessmentScanRecord(ProxyResource):
"""A vulnerability assessment scan record.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar scan_id: The scan ID.
:vartype scan_id: str
:ivar trigger_type: The scan trigger type. Possible values include: "OnDemand", "Recurring".
:vartype trigger_type: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentScanTriggerType
:ivar state: The scan status. Possible values include: "Passed", "Failed", "FailedToRun",
"InProgress".
:vartype state: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentScanState
:ivar start_time: The scan start time (UTC).
:vartype start_time: ~datetime.datetime
:ivar end_time: The scan end time (UTC).
:vartype end_time: ~datetime.datetime
:ivar errors: The scan errors.
:vartype errors: list[~azure.mgmt.sql.models.VulnerabilityAssessmentScanError]
:ivar storage_container_path: The scan results storage container path.
:vartype storage_container_path: str
:ivar number_of_failed_security_checks: The number of failed security checks.
:vartype number_of_failed_security_checks: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'scan_id': {'readonly': True},
'trigger_type': {'readonly': True},
'state': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'errors': {'readonly': True},
'storage_container_path': {'readonly': True},
'number_of_failed_security_checks': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'scan_id': {'key': 'properties.scanId', 'type': 'str'},
'trigger_type': {'key': 'properties.triggerType', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'errors': {'key': 'properties.errors', 'type': '[VulnerabilityAssessmentScanError]'},
'storage_container_path': {'key': 'properties.storageContainerPath', 'type': 'str'},
'number_of_failed_security_checks': {'key': 'properties.numberOfFailedSecurityChecks', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(VulnerabilityAssessmentScanRecord, | |
import builtins
import math
import numpy as np
import matplotlib.animation as animation
import matplotlib.patches as patches
import matplotlib.lines as mlines
import matplotlib.transforms as transforms
import random as rand
def to_plt_color(args):
max_value = 255.0
color = None
if len(args) == 1:
r = args[0]
g = args[0]
b = args[0]
color = (r / max_value, g / max_value, b / max_value)
elif len(args) == 2:
r = args[0]
g = args[0]
b = args[0]
a = args[1]
color = (r / max_value, g / max_value, b / max_value, a / max_value)
elif len(args) == 3:
r = args[0]
g = args[1]
b = args[2]
color = (r / max_value, g / max_value, b / max_value)
elif len(args) == 4:
r = args[0]
g = args[1]
b = args[2]
a = args[3]
color = (r / max_value, g / max_value, b / max_value, a / max_value)
else:
assert False
return color
# from https://github.com/Abdur-rahmaanJ/ppython
def random(*args):
if len(args) == 0:
return rand.random()
elif len(args) == 1:
endnum = args[0]
return rand.random() * endnum
elif len(args) == 2:
s = args[0]
e = args[1]
return rand.random() * (e - s) + s
def dist(x1, y1, x2, y2):
squared_delta_x = (x2 - x1) ** 2
squared_delta_y = (y2 - y1) ** 2
return sqrt(squared_delta_x + squared_delta_y)
def lerp(start, stop, amt):
return (stop - start) * amt + start
def cos(angle):
return math.cos(angle)
def sin(angle):
return math.sin(angle)
def radians(degrees):
return degrees / 180.0 * math.pi
class PFont:
def __init__(self, name):
self.name = name
class PatchCache:
def __init__(self, ax):
self.ax = ax
self.patches = []
self.updates = []
self.cache = None
self._background = None
self._buffer = None
def begin(self):
self.updates = []
def flush(self):
self.flush_buffer()
if self.cache is None:
return
remainps = [cp for _, cp in self.cache]
[rp.remove() for rp in reversed(remainps)]
self.cache = None
def clear(self):
self.cache = self.patches
self.patches = []
self.updates = []
self._buffer = None
def flush_buffer(self):
self._add_buffer_as_patch(self._buffer)
self._buffer = None
def add_rect(self, x, y, w, h, kwargs):
if not self._should_push_buffer(x, y, w, h, kwargs):
self.flush_buffer()
if self._buffer is None:
self._buffer = (x, y, w, h, kwargs)
return
x0, y0, w0, h0, kwargs0 = self._buffer
if x0 == x and w0 == w:
self._buffer = (x, min(y, y0), w, max(y + h, y0 + h0) - min(y, y0), kwargs)
if y0 == y and h0 == h:
self._buffer = (min(x, x0), y, max(x + w, x0 + w0) - min(x, x0), h, kwargs)
def _should_push_buffer(self, x, y, w, h, kwargs):
if self._buffer is None:
return True
x0, y0, w0, h0, kwargs0 = self._buffer
if kwargs0 != kwargs:
return False
if 'linewidth' in kwargs and kwargs['linewidth'] is not None and kwargs['linewidth'] > 0:
return False
if x0 == x and w0 == w and (y + h == y0 or y0 + h0 == y):
return True
if y0 == y and h0 == h and (x + w == x0 or x0 + w0 == x):
return True
return False
def _add_buffer_as_patch(self, buffer):
if buffer is None:
return
x, y, w, h, kwargs = buffer
self._add_rect(x, y, w, h, kwargs)
def _is_rect(self, points):
if len(points) != 4:
return False
p = points + points[:1]
for p1, p2 in zip(p, p[1:]):
if p1[0] != p2[0] and p1[1] != p2[1]:
return False
return True
def _add_rect(self, x, y, w, h, kwargs):
cached = self._get_cache('rect')
if cached is not None:
changed = False
oldx, oldy = cached.get_xy()
if oldx != x or oldy != y:
cached.set_xy((x, y))
changed = True
if cached.get_width() != w:
cached.set_width(w)
changed = True
if cached.get_height() != h:
cached.set_height(h)
changed = True
if self._set_kwargs(cached, kwargs):
changed = True
p = cached
if changed:
self.updates.append(p)
else:
rect = patches.Rectangle((x, y), w, h, **kwargs)
p = self.ax.add_patch(rect)
self.updates.append(p)
self.patches.append(('rect', p))
def add_ellipse(self, x, y, w, h, kwargs):
self.flush_buffer()
cached = self._get_cache('ellipse{},{}'.format(w, h))
if cached is not None:
cached.set_center((x, y))
self._set_kwargs(cached, kwargs)
p = cached
else:
p = patches.Ellipse((x, y), w, h, **kwargs)
p = self.ax.add_patch(p)
self.patches.append(('ellipse{},{}'.format(w, h), p))
self.updates.append(p)
def add_polygon(self, xy, closed, kwargs):
self.flush_buffer()
self._add_polygon(xy, closed, kwargs)
def _add_polygon(self, xy, closed, kwargs):
cached = self._get_cache('polygon')
if cached is not None:
cached.set_xy(xy)
cached.set_closed(closed)
self._set_kwargs(cached, kwargs)
p = cached
else:
p = patches.Polygon(xy,
closed=closed,
**kwargs)
p = self.ax.add_patch(p)
self.patches.append(('polygon', p))
self.updates.append(p)
def _get_cache(self, typename):
if self.cache is None or len(self.cache) == 0:
return None
cachetype, p = self.cache.pop(0)
if cachetype == typename:
return p
# Remove all cached objects when any requested objects not found
remainps = [cp for _, cp in self.cache] + [p]
[rp.remove() for rp in reversed(remainps)]
self.cache = None
return None
def _set_kwargs(self, patch, kwargs):
changed = False
for k, v in kwargs.items():
if getattr(patch, 'get_{}'.format(k))() == v:
continue
getattr(patch, 'set_{}'.format(k))(v)
changed = True
return changed
class DrawingContextBase:
def __init__(self):
self.CLOSE = 1
self.P2D = 1
self.P3D = 2
self.LEFT = 1
self.CENTER = 2
self.RIGHT = 3
self.TOP = 4
self.BOTTOM = 5
self.BASELINE = 6
class DrawingContext(DrawingContextBase):
def __init__(self, fig, ax, figsize):
super().__init__()
self.fig = fig
self.ax = ax
self.width, self.height = figsize
self.patches = PatchCache(ax)
self.texts = []
self._antialiased = True
self._fill = (1.0, 1.0, 1.0)
self._stroke = (0.0, 0.0, 0.0)
self._strokeSize = 1
self._points = []
self._transforms = []
self._currentTransform = None
self._textAlignX = 'left'
self._textAlignY = 'baseline'
def clear(self):
self._transforms = []
self._currentTransform = None
self.patches.begin()
def flush(self):
self.patches.flush()
def _to_patch_args(self, line=False):
args = {'antialiased': self._antialiased}
if not line:
if self._fill is not None:
args['facecolor'] = self._fill
args['fill'] = True
else:
args['fill'] = False
if self._stroke is not None:
args['linewidth'] = self._strokeSize
args['edgecolor' if not line else 'color'] = self._stroke
else:
args['linewidth'] = 0
if self._currentTransform is not None:
trans, rotate = self._currentTransform
args['transform'] = rotate + trans + self.ax.transData
else:
args['transform'] = self.ax.transData
return args
def background(self, *args):
for t in self.texts:
t.remove()
self.texts = []
self.ax.lines.clear()
self.patches.clear()
self.patches.add_rect(0, 0, self.width, self.height, {
'facecolor': to_plt_color(args)
})
def fill(self, *args):
color = to_plt_color(args)
self._fill = color
def stroke(self, *args):
color = to_plt_color(args)
self._stroke = color
def strokeSize(self, thickness):
self._strokeSize = thickness
def noStroke(self):
self._stroke = None
def noFill(self):
self._fill = None
def noSmooth(self):
self._antialiased = False
def rect(self, x, y, w, h):
self.patches.add_rect(x, y, w, h, self._to_patch_args())
def ellipse(self, x, y, w, h):
self.patches.add_ellipse(x, y, w, h, self._to_patch_args())
def line(self, x1, y1, x2, y2):
self.patches.flush_buffer()
line = mlines.Line2D([x1, x2], [y1, y2], **self._to_patch_args(line=True))
self.ax.add_line(line)
def beginShape(self):
self._points = []
def vertex(self, x, y):
self._points.append((x, y))
def endShape(self, mode=None):
self.patches.add_polygon([[x, y] for x, y in self._points],
closed=mode == self.CLOSE,
kwargs=self._to_patch_args())
def size(self, width, height, mode=None):
if mode is None:
mode = self.P2D
assert mode != self.P3D, 'Not supported'
self.ax.set_xlim(0, width)
self.ax.set_ylim(0, height)
self.ax.set_aspect(1)
builtins.width = width
self.width = width
builtins.height = height
self.height = height
def pushMatrix(self):
self.patches.flush_buffer()
self._transforms.append(self._currentTransform)
if self._currentTransform is not None:
trans, rotate = self._currentTransform
self._currentTransform = (transforms.Affine2D(trans.get_matrix()), transforms.Affine2D(rotate.get_matrix()))
def popMatrix(self):
self.patches.flush_buffer()
self._currentTransform = self._transforms.pop()
def translate(self, dx, dy):
self.patches.flush_buffer()
trans, rotate = self._getTransform()
trans.translate(dx, dy)
def rotate(self, theta):
self.patches.flush_buffer()
trans, rotate = self._getTransform()
rotate.rotate(theta)
def _getTransform(self):
if self._currentTransform is None:
self._currentTransform = (transforms.Affine2D(), transforms.Affine2D())
return self._currentTransform
def createFont(self, name, size=None, smooth=None, charset=None):
pass
def textAlign(self, alignX, alignY):
halign = {self.LEFT: 'left', self.CENTER: 'center', self.RIGHT: 'right'}
valign = {self.TOP: 'top', self.CENTER: 'center', self.BOTTOM: 'bottom', self.BASELINE: 'baseline'}
self._textAlignX = halign[alignX]
self._textAlignY = 'baseline' if alignY is None else valign[alignY]
def text(self, *args):
self.patches.flush_buffer()
if len(args) == 3:
t = self.ax.text(args[1], args[2], args[0],
color=self._fill,
horizontalalignment=self._textAlignX,
verticalalignment=self._textAlignY)
self.texts.append(t)
else:
assert False
class DrawingContextProxy(DrawingContextBase):
def __init__(self):
super().__init__()
self.base = None
def clear(self):
if self.base is None:
return
self.base.clear()
def flush(self):
if self.base is None:
return
self.base.flush()
def background(self, *args):
if self.base is None:
return
self.base.background(*args)
def fill(self, *args):
if self.base is None:
return
self.base.fill(*args)
def stroke(self, *args):
if self.base is None:
return
self.base.stroke(*args)
def strokeSize(self, thickness):
if self.base is None:
return
self.base.strokeSize(thickness)
def noStroke(self):
if self.base is None:
return
self.base.noStroke()
def noFill(self):
if self.base is None:
return
self.base.noFill()
def noSmooth(self):
if self.base is None:
return
self.base.noSmooth()
def | |
WHERE Price >= (?);'.format(table)
try:
result_sum = conn.execute(sql_a1 + sql_b, (base_value,))
candidates = conn.execute(sql_a2 + sql_b, (base_value,))
if candidates == None:
return None
# The number of items.
total = result_sum.fetchone()[0]
# Roll a random number in the total range.
roll = random.randrange(total)
# Go through the candidates until an item is found.
accum = 0
for c in candidates:
count = c['Count']
accum += count
if roll < accum:
return DatabaseItem(c['Subtype'], c['Item'], c['Price'])
#return '{0}: {1}; {2}'.format(c['Subtype'], c['Item'],
# str(Price(c['Price'])) )
return None
except:
return None
def generate_treasure_item(conn, expression, roller, listener):
results = []
m = RE_TREASURE_COINS.match(expression)
if m:
# 1 is the dice expression
dice = m.group(1)
# 2 is the entire multiplier expression
# 3 is the multiplier symbol
# 4 is the coin amount
# 5 is the coin type
coefficient = rollers.rollDice(dice)
multiplier = m.group(4)
if multiplier == None:
multiplier = 1
else:
multiplier = int(multiplier.replace(",",""))
coinage = m.group(5)
results.append(expression + ': ' + str(coefficient * multiplier) + ' ' + coinage)
return results
m = RE_TREASURE_PRETTIES.match(expression)
if m:
# 1 is number as word
# 2 is grade number
# 3 is object type
count = m.group(1)
if count == None: count = 'one'
count = MAP_NUMBER_WORD_DECIMAL[count]
grade = 'grade ' + m.group(2)
kind = m.group(3)
for i in range(count):
x = generate_specific_item(conn, grade, kind, roller, listener)
results.append(unicode(x))
return results
m = RE_TREASURE_MAGIC.match(expression)
if m:
count = m.group(1)
if count == None: count = 'one'
count = MAP_NUMBER_WORD_DECIMAL[count]
degree = m.group(2)
strength = m.group(3)
kind = m.group(4)
for i in range(count):
x = generate_specific_item(conn, degree + ' ' + strength, kind, roller, listener)
results.append(unicode(x))
return results
m = RE_TREASURE_MASTERWORK.match(expression)
if m:
kind = m.group(1).lower()
table = None
where = ''
where_vars = None
masterwork_fee = 0
# TODO fix 'masterwork shield'
if kind in 'light armor or shield':
table = TABLE_RANDOM_ARMOR_OR_SHIELD
where = 'WHERE (? == ?) OR (? == ?)'
where_vars = ('Subtype', 'light armor', 'Subtype', 'shield')
masterwork_fee = 150
elif kind == 'shield':
table = TABLE_RANDOM_ARMOR_OR_SHIELD
where = 'WHERE (? == ?)'
where_vars = ('Subtype', kind)
masterwork_fee = 150
elif kind == 'medium armor':
table = TABLE_RANDOM_ARMOR_OR_SHIELD
where = 'WHERE (? == ?)'
where_vars = ('Subtype', kind)
masterwork_fee = 150
elif kind == 'heavy armor':
table = TABLE_RANDOM_ARMOR_OR_SHIELD
where = 'WHERE (? == ?)'
where_vars = ('Subtype', kind)
masterwork_fee = 150
elif kind == 'weapon':
table = TABLE_RANDOM_WEAPON
where = ''
where_vars = None
masterwork_fee = 300
else:
#results.append('*** ' + kind + ' ***')
return
result = table.find_flat_custom(conn, where, where_vars)
if result:
item = result['Result']
price = Price(result['Price'])
price.add(masterwork_fee)
results.append('Masterwork ' + item + '; ' + str(price))
return results
if len(results) == 0:
# For regular usage.
results.append('Failed to generate for: ' + expression)
# For debugging:
#results.append("unknown: ("+expression+")[" + ':'.join([hex(ord(a)) for a in expression]) + "]")
return results
def create_item(kind):
# Look up the kind of item for its official name.
(main_kind, subtype) = ITEM_SUBTYPE_MAP[kind.lower()]
# Create the apropriate Item subclass.
subclass = ITEM_SUBCLASSES[main_kind]
result = subclass.__new__(subclass)
result.__init__()
# Set the subtype (applicable only sometimes)
result.subtype = subtype
return result
def get_item_type(conn, strength, roll):
cursor = conn.cursor()
columns = None
if strength == 'minor':
columns = ('Minor_low', 'Minor_high')
elif strength == 'medium':
columns = ('Minor_low', 'Minor_high')
elif strength == 'major':
columns = ('Major_low', 'Major_high')
else:
# TODO an exception would be nice
return ''
# Search the database.
cursor.execute('''SELECT Item from Item_Types WHERE (? >= {0}) AND
(? <= {1});'''.format(*columns), (roll, roll) )
one = cursor.fetchone()
if one is not None:
return one['Item']
return None
def item_str(x):
# Convert the item to a string (it has a __str__ method).
s = str(x)
# Some characters cause problems in Windows' command prompt (sigh).
# Replace problem characters with their equivalents.
s = s.replace('\u2019', "'")
return s
def print_item(x):
'''Prints an item to standard out. Parameter 'x' is already a string,
but this function catches Unicode-related exceptions, which occur when
standard out happens to be the Windows console, which is not Unicode .'''
s = item_str(x)
try:
print(s)
except:
print('Error: Unable to print item ({0}).'.format(x.kind()))
def rolls_str(x):
return '[' + ','.join([str(t[1]) for t in x.rolls]) + ']'
def print_rolls(x):
'''Prints an item's roll history.'''
print(rolls_str(x), sep='', end='')
SPECIAL_FAMILIES = [
['Designating, lesser', 'Designating, greater'],
['Energy resistance', 'Energy resistance, improved', 'Energy resistance, greater'],
['Fortification (light)', 'Fortification (moderate)', 'Fortification (heavy)'],
['Lucky', 'Lucky, greater'],
['Reliable', 'Reliable, greater'],
['Shadow', 'Shadow, improved', 'Shadow, greater'],
['Slick', 'Slick, improved', 'Slick, greater'],
['Spell resistance (13)', 'Spell resistance (15)', 'Spell resistance (17)', 'Spell resistance (19)']
]
# Note on overlapping possibilities:
# The only time a weapon can do this is with:
# This is in greater major.
# A +4 and a +1: ammo-no, melee-no, ranged-LUCKY,RELIABLE,DESIGNATING
# A +3 and a +2: ammo-no, melee-no, ranged-
# and armor:
# Can't happen
def filter_specials(specials):
'''Removes weaker versions of special abilities.'''
del_keys = set()
# Iterate over the keys.
for special in specials.keys():
# Search families of specials.
for family in SPECIAL_FAMILIES:
# If we got the right family,
if special in family:
# Look for "overlaps".
special_i = family.index(special)
for i in range(len(family)):
test = family[i]
# See if this is in the specials dict.
if test in specials.keys() and i > special_i:
# There is a better entry
del_keys.add(special)
#print ('Eliminating redundant', special, 'since', test, 'is also present in', specials)
# Now, delete all the keys we have picked out.
for k in del_keys:
del specials[k]
#
# Classes
#
class BadPrice(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class Price(object):
def __init__(self, initial_value, enhancement_type=''):
self.enhancement_type = enhancement_type
self.gold = 0.0
self.enhancement = 0
self.piece_expr = re.compile('(((\d{1,3},)*\d+) *(pp|gp|sp|cp)?[, ]*)', re.I)
# Initialize with the provided string
self.add(initial_value)
def __lt__(self, other):
return self.gold < other.gold
def __le__(self, other):
return self.gold <= other.gold
def __str__(self):
f = self.as_float()
if math.isnan(f):
return '<error> gp'
return locale.format_string('%.2f', f, grouping=True) + ' gp'
def as_float(self):
cost = self.gold
if self.enhancement > 0:
temp = (self.enhancement ** 2) * 1000
if self.enhancement_type == 'weapon':
temp *= 2
cost += temp
return cost
def add(self, price_piece):
# If the value provided is int or float, add it directly.
if type(price_piece) == int or type(price_piece) == float:
self.gold += float(price_piece)
# If it's a bonus, the price depends on the sum, so it defers.
elif price_piece.endswith(' bonus'):
self.add_enhancement(price_piece)
# Empty string is non-value.
elif price_piece == None or price_piece == '':
self.gold = float('nan')
# Otherwise, it might be a pp/gp/sp/cp string.
else:
self.add_expression(price_piece)
def multiply(self, factor):
self.gold *= (float(factor))
def add_expression(self, expr):
for piece in self.piece_expr.finditer(expr):
# Group 2 is the count, group 4 is the type.
scale = 0.0
count = float(piece.group(2).replace(',',''))
coin_type = piece.group(4)
if coin_type == 'pp': scale = 10.00
elif coin_type == 'gp': scale = 1.00
elif coin_type == 'sp': scale = 0.10
elif coin_type == 'cp': scale = 0.01
elif coin_type == None: scale = 1.00
self.gold += (count * scale)
def add_enhancement(self, price_str):
if type(price_str) == int:
self.enhancement += price_str
return
match = re.match('\+(\d+) bonus', price_str)
if match:
self.enhancement += int(match.group(1))
else:
raise BadPrice('cannot extract enhancement bonus from ' +
price_str)
class Table(object):
def __init__(self, table):
self.table = table
self.cache = {}
self.cache_style = CACHE_TYPE
self.query_nostrength = '''SELECT * FROM {0} WHERE (? >= Roll_low) AND
(? <= Roll_high);'''.format(self.table)
self.query_strength = '''SELECT * FROM {0} WHERE (? >= Roll_low) AND
(? <= Roll_high) AND (? = Strength);'''.format(self.table)
def find_roll(self, conn, roll, strength, purpose, listener):
# If caching is enabled, go for it
if ENABLE_CACHE:
if self.cache_style == 1:
if strength not in self.cache:
self.cache[strength] = []
for line in self.cache[strength]:
if roll >= line['low'] and roll <= line['high']:
return line['result']
elif self.cache_style == 2:
if strength not in self.cache:
self.cache[strength] = {}
if strength in self.cache:
a = self.cache[strength]
if roll in a:
return a[roll]
cursor = conn.cursor()
if strength == None:
cursor.execute(self.query_nostrength, (roll, roll))
else:
cursor.execute(self.query_strength, (roll, roll, strength))
result | |
= tf.abs(new-curr_height)//2 if height else tf.abs(new-curr_width)//2
offset_x = 0 if height else abs_diff
offset_y = abs_diff if height else 0
# We process height first, so always pad/crop to new height.
target_height = new
# We process height first, so pad/crop to new width only if not doing height.
target_width = curr_width if height else new
if crop:
image = tf.image.crop_to_bounding_box(
image, offset_y, offset_x, target_height, target_width)
else:
image = tf.image.pad_to_bounding_box(
image, offset_y, offset_x, target_height, target_width)
return image
def get_central_bbox(min_side, new_size):
"""Gets the central bounding box for an image.
If image is square, returns bounding box [0,0,1,1].
Otherwise, returns the bounding box containing the central
smallest side x smallest side square.
Args:
min_side: Int, size of smallest side in pixels.
new_size: Int, resize image to a square of new_size x new_size pixels.
Returns:
bbox: A 4-D Int `Tensor`, holding the coordinates of the central bounding
box.
"""
max_shape = tf.cast(new_size, tf.float32)
min_shape = tf.cast(min_side, tf.float32)
top_xy = ((max_shape-min_shape)/2)/max_shape
bottom_xy = (min_shape+(max_shape-min_shape)/2)/max_shape
# Create a bbox for the center region of interest.
bbox = tf.stack([[[top_xy, top_xy, bottom_xy, bottom_xy]]])
bbox.set_shape([1, 1, 4])
return bbox
def pad_to_max(image, max_scale):
"""Pads an image to max_scale times the current center crop size.
E.g.: For an image with dimensions 1920x1080 and a max_scale of 1.5,
returns an image that is 1.5 * (1080x1080).
Args:
image: 3-D float32 `Tensor` image.
max_scale: Float, maximum scale of the image, as a multiplier on the
central bounding box.
Returns:
image: 3-D float32 `Tensor` image.
"""
orig_shape = tf.shape(image)
orig_height = orig_shape[0]
orig_width = orig_shape[1]
# Find the smallest side and corresponding new size.
min_side = tf.cast(tf.minimum(orig_height, orig_width), tf.float32)
new_shape = tf.cast(tf.sqrt(max_scale*min_side*min_side), tf.int32)
# Crop or pad height.
# pylint: disable=g-long-lambda
image = tf.cond(
orig_height >= new_shape,
lambda: crop_or_pad(
image, orig_height, orig_width, new_shape, height=True, crop=True),
lambda: crop_or_pad(
image, orig_height, orig_width, new_shape, height=True, crop=False))
# Crop or pad width.
image = tf.cond(
orig_width >= new_shape,
lambda: crop_or_pad(
image, orig_height, orig_width, new_shape, height=False, crop=True),
lambda: crop_or_pad(
image, orig_height, orig_width, new_shape, height=False, crop=False))
# Get the bounding box of the original centered box in the new resized image.
original_bounding_box = get_central_bbox(min_side, new_shape)
return image, original_bounding_box
def scale_up_augmentation(image, max_scale):
"""Scales an image randomly >100% up to some max scale."""
# Pad to max size.
image, original_central_bbox = pad_to_max(image, max_scale)
# Determine area range of the augmented crop, as a percentage of the
# new max area.
# aug_max == 100% of new max area.
aug_max = 1.0
# aug_min == original_area/new_area == original_area/(max_scale*original_area)
# == 1/max_scale.
aug_min = 1.0/max_scale
area_range = (aug_min, aug_max)
# Since we're doing >100% scale, always have the full original crop in frame.
min_object_covered = 1.0
# Get a random scaled, cropped image.
image = scale_augment_crop(image, original_central_bbox, area_range,
min_object_covered)
return image
def scale_down_augmentation(image, min_scale):
"""Scales an image randomly <100% down to some min scale."""
# Crop the center, and consider the whole image the bounding box ROI.
image = crop_center(image)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
# Determine area range of the augmented crop, as a percentage of the
# original crop center area.
# aug_max == 100% of original area.
area_range = (min_scale, 1.0)
# Get a random scaled, cropped image.
image = scale_augment_crop(image, bbox, area_range, min_scale)
return image
def augment_image_scale(image, min_scale, max_scale, p_scale_up):
"""Training time scale augmentation.
Args:
image: 3-d float tensor representing image.
min_scale: minimum scale augmentation allowed, as a fraction of the
central min_side * min_side area of the original image.
max_scale: maximum scale augmentation allowed, as a fraction of the
central min_side * min_side area of the original image.
p_scale_up: Fraction of images scaled up.
Returns:
image: The scale-augmented image.
"""
assert max_scale >= 1.0
assert min_scale <= 1.0
if min_scale == max_scale == 1.0:
tf.logging.info('Min and max scale are 1.0, don`t augment.')
# Do no augmentation, just crop the center.
return crop_center(image)
elif (max_scale == 1.0) and (min_scale < 1.0):
tf.logging.info('Max scale is 1.0, only scale down augment.')
# Always do <100% augmentation.
return scale_down_augmentation(image, min_scale)
elif (min_scale == 1.0) and (max_scale > 1.0):
tf.logging.info('Min scale is 1.0, only scale up augment.')
# Always do >100% augmentation.
return scale_up_augmentation(image, max_scale)
else:
tf.logging.info('Sample both augmentations.')
# Choose to scale image up or down.
rn = tf.random_uniform([], minval=0., maxval=1., dtype=tf.float32)
image = tf.cond(rn >= p_scale_up,
lambda: scale_up_augmentation(image, max_scale),
lambda: scale_down_augmentation(image, min_scale))
return image
def decode_image(image_str):
"""Decodes a jpeg-encoded image string into a image in range [0,1]."""
# Decode jpeg string into np.uint8 tensor.
image = tf.image.decode_jpeg(image_str, channels=3)
# Convert the image to range [0,1].
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def decode_images(image_strs):
"""Decodes a tensor of image strings."""
return tf.map_fn(decode_image, image_strs, dtype=tf.float32)
def preprocess_training_images(images, height, width, min_scale, max_scale,
p_scale_up, aug_color=True, fast_mode=True):
"""Preprocesses a batch of images for training.
This applies training-time scale and color augmentation, crops/resizes,
and scales images to the [-1,1] range expected by pre-trained Inception nets.
Args:
images: A 4-D float32 `Tensor` holding raw images to be preprocessed.
height: Int, height in pixels to resize image to.
width: Int, width in pixels to resize image to.
min_scale: Float, minimum scale augmentation allowed, as a fraction of the
central min_side * min_side area of the original image.
max_scale: Float, maximum scale augmentation allowed, as a fraction of the
central min_side * min_side area of the original image.
p_scale_up: Float, fraction of images scaled up.
aug_color: Whether or not to do color augmentation.
fast_mode: Boolean, avoids slower ops (random_hue and random_contrast).
Returns:
preprocessed_images: A 4-D float32 `Tensor` holding preprocessed images.
"""
def _prepro_train(im):
"""Map this preprocessing function over each image in the batch."""
return preprocess_training_image(
im, height, width, min_scale, max_scale, p_scale_up,
aug_color=aug_color, fast_mode=fast_mode)
return tf.map_fn(_prepro_train, images)
def preprocess_training_image(
image, height, width, min_scale, max_scale, p_scale_up,
aug_color=True, fast_mode=True):
"""Preprocesses an image for training.
Args:
image: A 3-d float tensor representing the image.
height: Target image height.
width: Target image width.
min_scale: Minimum scale of bounding box (as a percentage of full
bounding box) used to crop image during scale augmentation.
max_scale: Minimum scale of bounding box (as a percentage of full
bounding box) used to crop image during scale augmentation.
p_scale_up: Fraction of images to scale >100%.
aug_color: Whether or not to do color augmentation.
fast_mode: Avoids slower ops (random_hue and random_contrast).
Returns:
scaled_image: An scaled image tensor in the range [-1,1].
"""
# Get a random scaled, cropped image.
image = augment_image_scale(image, min_scale, max_scale, p_scale_up)
# Resize image to desired height, width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
# Optionally augment the color.
# pylint: disable=g-long-lambda
if aug_color:
image = apply_with_random_selector(
image,
lambda x, ordering: distort_color(
x, ordering, fast_mode=fast_mode), num_cases=4)
# Scale to [-1,1] range as expected by inception.
scaled_image = scale_to_inception_range(image)
return scaled_image
def preprocess_test_image(image, height, width, crop_strategy):
"""Preprocesses an image for test/inference.
Args:
image: A 3-d float tensor representing the image.
height: Target image height.
width: Target image width.
crop_strategy: String, name of the strategy used to crop test-time images.
Can be: 'crop_center', 'pad', 'pad_200', 'pad_crop_central'.
Returns:
scaled_image: An scaled image tensor in the range [-1,1].
"""
image = crop_image_by_strategy(image, crop_strategy)
# Resize.
image = resize_image(image, height, width)
# Scale the input range to [-1,1] as expected by inception.
image = scale_to_inception_range(image)
return image
def preprocess_test_images(images, height, width, crop_strategy):
"""Apply test-time preprocessing to a batch of images.
This crops images (given a named strategy for doing so), resizes them,
and scales them to the [-1,1] range expected by pre-trained Inception nets.
Args:
images: A 4-D float32 `Tensor` holding raw images to be preprocessed.
height: Int, height in pixels to resize image to.
width: Int, width in pixels to resize image to.
crop_strategy: String, name of the strategy used to crop test-time images.
Can be: 'crop_center', 'pad', 'pad_200', 'pad_crop_central'.
Returns:
preprocessed_images: A 4-D float32 `Tensor` holding preprocessed images.
"""
def _prepro_test(im):
"""Map this preprocessing function over each image in the batch."""
return preprocess_test_image(im, height, | |
'SEG'],)),
'ORD_O02_RESPONSE': ('sequence',
(['ORD_O02_PATIENT', None, (0, 1), 'GRP'],
['ORD_O02_ORDER_DIET', None, (1, -1), 'GRP'],
['ORD_O02_ORDER_TRAY', None, (0, -1), 'GRP'],)),
'ORF_R04_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORF_R04_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['ORF_R04_OBSERVATION', None, (1, -1), 'GRP'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'ORF_R04_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORF_R04_QUERY_RESPONSE': ('sequence',
(['ORF_R04_PATIENT', None, (0, 1), 'GRP'],
['ORF_R04_ORDER', None, (1, -1), 'GRP'],)),
'ORM_O01_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (1, 1), 'SEG'],
['ODS', SEGMENTS['ODS'], (1, 1), 'SEG'],
['ODT', SEGMENTS['ODT'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],)),
'ORM_O01_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],)),
'ORM_O01_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORM_O01_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORM_O01_ORDER_DETAIL', None, (0, 1), 'GRP'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'ORM_O01_ORDER_DETAIL': ('sequence',
(['ORM_O01_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['ORM_O01_OBSERVATION', None, (0, -1), 'GRP'],)),
'ORM_O01_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['ORM_O01_PATIENT_VISIT', None, (0, 1), 'GRP'],
['ORM_O01_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'ORM_O01_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'ORN_O02_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORN_O02_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORN_O02_RESPONSE': ('sequence',
(['ORN_O02_PATIENT', None, (0, 1), 'GRP'],
['ORN_O02_ORDER', None, (1, -1), 'GRP'],)),
'ORR_O02_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['ODS', SEGMENTS['ODS'], (1, 1), 'SEG'],
['ODT', SEGMENTS['ODT'], (1, 1), 'SEG'],)),
'ORR_O02_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORR_O02_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'ORR_O02_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORR_O02_RESPONSE': ('sequence',
(['ORR_O02_PATIENT', None, (0, 1), 'GRP'],
['ORR_O02_ORDER', None, (1, -1), 'GRP'],)),
'ORS_O02_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORS_O02_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORS_O02_RESPONSE': ('sequence',
(['ORS_O02_PATIENT', None, (0, 1), 'GRP'],
['ORS_O02_ORDER', None, (1, -1), 'GRP'],)),
'ORU_R01_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORU_R01_ORDER_OBSERVATION': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['ORU_R01_OBSERVATION', None, (1, -1), 'GRP'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'ORU_R01_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['ORU_R01_VISIT', None, (0, 1), 'GRP'],)),
'ORU_R01_PATIENT_RESULT': ('sequence',
(['ORU_R01_PATIENT', None, (0, 1), 'GRP'],
['ORU_R01_ORDER_OBSERVATION', None, (1, -1), 'GRP'],)),
'ORU_R01_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OSR_Q06_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (1, 1), 'SEG'],
['ODS', SEGMENTS['ODS'], (1, 1), 'SEG'],
['ODT', SEGMENTS['ODT'], (1, 1), 'SEG'],)),
'OSR_Q06_OBSERVATION': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OSR_Q06_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'OSR_Q06_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OSR_Q06_RESPONSE': ('sequence',
(['OSR_Q06_PATIENT', None, (0, 1), 'GRP'],
['OSR_Q06_OBSERVATION', None, (1, -1), 'GRP'],)),
'PEX_P07_ASSOCIATED_PERSON': ('sequence',
(['NK1', SEGMENTS['NK1'], (1, 1), 'SEG'],
['PEX_P07_ASSOCIATED_RX_ORDER', None, (0, 1), 'GRP'],
['PEX_P07_ASSOCIATED_RX_ADMIN', None, (0, -1), 'GRP'],
['PRB', SEGMENTS['PRB'], (0, -1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],)),
'PEX_P07_ASSOCIATED_RX_ADMIN': ('sequence',
(['RXA', SEGMENTS['RXA'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (0, 1), 'SEG'],)),
'PEX_P07_ASSOCIATED_RX_ORDER': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (0, -1), 'SEG'],)),
'PEX_P07_EXPERIENCE': ('sequence',
(['PES', SEGMENTS['PES'], (1, 1), 'SEG'],
['PEX_P07_PEX_OBSERVATION', None, (1, -1), 'GRP'],)),
'PEX_P07_PEX_CAUSE': ('sequence',
(['PCR', SEGMENTS['PCR'], (1, 1), 'SEG'],
['PEX_P07_RX_ORDER', None, (0, 1), 'GRP'],
['PEX_P07_RX_ADMINISTRATION', None, (0, -1), 'GRP'],
['PRB', SEGMENTS['PRB'], (0, -1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['PEX_P07_ASSOCIATED_PERSON', None, (0, 1), 'GRP'],
['PEX_P07_STUDY', None, (0, -1), 'GRP'],)),
'PEX_P07_PEX_OBSERVATION': ('sequence',
(['PEO', SEGMENTS['PEO'], (1, 1), 'SEG'],
['PEX_P07_PEX_CAUSE', None, (1, -1), 'GRP'],)),
'PEX_P07_RX_ADMINISTRATION': ('sequence',
(['RXA', SEGMENTS['RXA'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (0, 1), 'SEG'],)),
'PEX_P07_RX_ORDER': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (0, -1), 'SEG'],)),
'PEX_P07_STUDY': ('sequence',
(['CSR', SEGMENTS['CSR'], (1, 1), 'SEG'],
['CSP', SEGMENTS['CSP'], (0, -1), 'SEG'],)),
'PEX_P07_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PGL_PC6_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],)),
'PGL_PC6_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PGL_PC6_GOAL_ROLE', None, (0, -1), 'GRP'],
['PGL_PC6_PATHWAY', None, (0, -1), 'GRP'],
['PGL_PC6_OBSERVATION', None, (0, -1), 'GRP'],
['PGL_PC6_PROBLEM', None, (0, -1), 'GRP'],
['PGL_PC6_ORDER', None, (0, -1), 'GRP'],)),
'PGL_PC6_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PGL_PC6_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PGL_PC6_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PGL_PC6_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PGL_PC6_ORDER_DETAIL': ('sequence',
(['PGL_PC6_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PGL_PC6_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PGL_PC6_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PGL_PC6_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PGL_PC6_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PGL_PC6_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PGL_PC6_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PGL_PC6_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],)),
'PGL_PC6_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PGL_PC6_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PIN_I07_GUARANTOR_INSURANCE': ('sequence',
(['GT1', SEGMENTS['GT1'], (0, -1), 'SEG'],
['PIN_I07_INSURANCE', None, (1, -1), 'GRP'],)),
'PIN_I07_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'PIN_I07_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'PPG_PCG_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],)),
'PPG_PCG_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPG_PCG_GOAL_ROLE', None, (0, -1), 'GRP'],
['PPG_PCG_GOAL_OBSERVATION', None, (0, -1), 'GRP'],
['PPG_PCG_PROBLEM', None, (0, -1), 'GRP'],
['PPG_PCG_ORDER', None, (0, -1), 'GRP'],)),
'PPG_PCG_GOAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPG_PCG_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPG_PCG_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PPG_PCG_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PPG_PCG_ORDER_DETAIL': ('sequence',
(['PPG_PCG_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPG_PCG_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPG_PCG_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPG_PCG_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPG_PCG_PATHWAY_ROLE', None, (0, -1), 'GRP'],
['PPG_PCG_GOAL', None, (0, -1), 'GRP'],)),
'PPG_PCG_PATHWAY_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPG_PCG_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PPG_PCG_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPG_PCG_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PPG_PCG_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPG_PCG_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPG_PCG_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPP_PCB_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],)),
'PPP_PCB_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPP_PCB_GOAL_ROLE', None, (0, -1), 'GRP'],
['PPP_PCB_GOAL_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPP_PCB_GOAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPP_PCB_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPP_PCB_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PPP_PCB_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PPP_PCB_ORDER_DETAIL': | |
# -*- coding: iso-8859-15 -*-
import os, re, sys
import numpy as np, scipy.sparse as sp, scipy.stats as stats
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV, ParameterGrid, StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC, SVC
from sklearn.externals import joblib
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
BASE_PATH = '/'.join(CURRENT_PATH.split('/')[:-1])
DATA_PATH = BASE_PATH + '/datasets/data'
def _write_in_file(fname, content, mode='w', makedirs_recursive=True):
dir_ = '/'.join(fname.split('/')[:-1])
if not os.path.isdir(dir_) and makedirs_recursive:
os.makedirs(dir_)
with open(fname, mode) as f:
f.write(content)
def report_model_selection_results(negation_id, lexicon_id, analyzer,
word_ngram_range, char_ngram_range,
lowercase, max_df, min_df, binary,
algo, C, cv_score,
corpus):
line = '{negation_id}\t{lexicon_id}\t{analyzer}\t'.\
format(negation_id=negation_id, lexicon_id=lexicon_id,
analyzer=analyzer)
line += '({min_w},{max_w})\t({min_c},{max_c})\t'.\
format(min_w=word_ngram_range[0], max_w=word_ngram_range[1],
min_c=char_ngram_range[0], max_c=char_ngram_range[1])
line += '%s\t' % ('True' if lowercase else 'False')
line += '%.2f\t' % max_df
line += '%i\t' % min_df
line += '%s\t' % ('True' if binary else 'False')
line += '%s\t' % algo
line += '%.10f\t' % C
line += '%.4f\n' % cv_score
fname = CURRENT_PATH + '/%s-model-selection-results.tsv' % corpus
with open(fname, 'a') as f:
f.write(line)
def vectorize_tweet_collection(fname, analyzer, ngram_range, lowercase,
max_df, min_df, binary, split_underscore=True,
return_vectorizer=False):
"""Vectoriza una colección de tweets utilizando el esquema Tf-Idf.
Retorna la matriz documentos-términos calculada utilizando el esquema Tf-Idf.
La matriz retornada es dispersa, de tipo csr (scipy.sparse.csr_matrix).
paráms:
fname: str
Nombre de archivo que contiene la colección de tweets.
split_underscore: bool
Divide una palabra que tiene el prefijo NEG_. Es decir, separa la
palabra removiendo el guion bajo.
NOTA: este parámetro es válido si analyzer == 'char'
"""
vectorizer = TfidfVectorizer(analyzer=analyzer,
ngram_range=ngram_range,
lowercase=lowercase,
max_df=max_df,
min_df=min_df, binary=binary)
tweets = []
with open(fname) as f:
for tweet in f:
t = tweet.rstrip('\n').decode('utf-8')
if analyzer == 'char' and split_underscore:
t = t.replace(u'_', u' ').strip()
tweets.append(t)
if not return_vectorizer:
return vectorizer.fit_transform(tweets)
else:
return vectorizer.fit_transform(tweets), vectorizer
def perform_grid_search(estimator, features, target_labels,
param_grid='default', n_jobs=4):
# las siguientes probabilidades se calcularon de los resultados
# consignados en 'intertass-model-selection-results.tsv'
C_values = np.random.choice(np.power(2., np.arange(-5, 10, dtype=float)),
size=6,
replace=False,
p=[0.02, 0.016, 0.104, 0.146, 0.081, 0.119, 0.214,
0.147, 0.059, 0.027, 0.019, 0.012, 0.014,
0.011, 0.011])
C_values = np.sort(C_values)
if isinstance(param_grid, str) and param_grid == 'default':
param_grid = {'C': C_values}
clf = GridSearchCV(estimator=estimator,
param_grid=param_grid,
scoring='accuracy',
n_jobs=n_jobs,
cv=5,
refit=False)
clf.fit(features, target_labels)
return clf.best_params_, clf.best_score_
def build_vectorization_based_classifiers(corpus):
"""Método principal para construir clasificadores basados en vectorización.
paráms:
corpus: str
"""
corpus = corpus.lower()
##################
# ngram settings #
##################
word_ngram_range = [(1, i) for i in xrange(1, 5)]
char_ngram_range = [(i, j)
for i in xrange(2, 6) for j in xrange(2, 6) if i < j]
ngram_params = ParameterGrid({'analyzer': ['word', 'char', 'both'],
'word_ngram_idx': range(len(word_ngram_range)),
'char_ngram_idx': range(len(char_ngram_range))})
ngram_settings = []
for params in ngram_params:
if params['analyzer'] == 'word' and params['char_ngram_idx'] == 0:
ngram_settings.append('analyzer:word-word_idx:%i-char_idx:%i' %
(params['word_ngram_idx'], -1))
elif params['analyzer'] == 'char' and params['word_ngram_idx'] == 0:
ngram_settings.append('analyzer:char-word_idx:%i-char_idx:%i' %
(-1, params['char_ngram_idx']))
elif params['analyzer'] == 'both':
ngram_settings.append('analyzer:both-word_idx:%i-char_idx:%i' %
(params['word_ngram_idx'], params['char_ngram_idx']))
ngram_params = None
###################
# global settings #
###################
model_selection = ParameterGrid({'ngram_settings': ngram_settings,
'lowercase': [True, False],
'max_df': [.85, .9],
'min_df': [1, 2, 4],
'binary': [True, False]})
corpus_path = DATA_PATH + '/train/' + corpus
for negation_id in os.listdir(corpus_path):
negation_path = corpus_path + '/' + negation_id
if not os.path.isdir(negation_path):
continue
fname = negation_path + '/tweets.txt'
target_labels = np.loadtxt(negation_path + '/target-labels.dat',
dtype=int)
lexicons = []
for metaftures_fname in os.listdir(negation_path):
if re.match(r'metafeatures-lexicon-(?:[0-9]+)\.tsv$', metaftures_fname):
lexicons.append(
'-'.join(metaftures_fname.rstrip('.tsv').split('-')[1:3]))
for lexicon_id in lexicons:
metaftures_fname = negation_path + '/metafeatures-%s.tsv' % lexicon_id
metafeatures = np.loadtxt(metaftures_fname, dtype=float, delimiter='\t')
metafeatures = sp.csr_matrix(metafeatures)
random_idx = np.random.choice(len(model_selection),
size=41, replace=False)
for idx in random_idx:
params = model_selection[idx]
m = re.match('analyzer:([a-z]+)-word_idx:(-?[0-9]+)-char_idx:(-?[0-9]+)',
params['ngram_settings'])
analyzer = m.group(1)
w_idx = int(m.group(2))
c_idx = int(m.group(3))
ngram_range = None
ngrams_features = None
analyzers = ['word', 'char'] if analyzer == 'both' else [analyzer,]
for analyzer in analyzers:
if analyzer == 'word':
ngram_range = word_ngram_range[w_idx]
else:
ngram_range = char_ngram_range[c_idx]
features_ = vectorize_tweet_collection(fname=fname,
analyzer=analyzer,
ngram_range=ngram_range,
lowercase=params['lowercase'],
max_df=params['max_df'],
min_df=params['min_df'],
binary=params['binary'])
if ngrams_features is None:
ngrams_features = features_
else:
ngrams_features = sp.hstack([ngrams_features, features_],
format='csr')
features = sp.hstack([metafeatures, ngrams_features], format='csr')
algorithms = ['LinearSVC', 'LogisticRegression']
algo = np.random.choice(algorithms, p=[.37, .63])
estimator = LinearSVC() if algo == 'LinearSVC' else LogisticRegression()
best_params, best_score = perform_grid_search(
estimator=estimator,
features=features,
target_labels=target_labels)
report_model_selection_results(
negation_id=negation_id,
lexicon_id=lexicon_id,
analyzer=m.group(1),
word_ngram_range=word_ngram_range[w_idx] if w_idx != -1 else (-1, -1),
char_ngram_range=char_ngram_range[c_idx] if c_idx != -1 else (-1, -1),
lowercase=params['lowercase'],
max_df=params['max_df'],
min_df=params['min_df'],
binary=params['binary'],
algo=algo,
C=best_params['C'],
cv_score=best_score,
corpus=corpus)
def prepare_level_one_data(corpus, n_classifiers=100):
"""Prepara los datos de nivel 'uno' que utilizarán los 'ensembles'.
Los datos de nivel 'cero' corresponden a los datos originales provistos para
entrenar modelos de clasificación supervisada. Entonces, las predicciones que
se realizan durante la respectiva validación cruzada, se utilizan para entrenar
los 'ensembles'; es a esto a que llamamos datos de nivel 'uno'.
Referencias:
[1] http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/stacked-ensembles.html
[2] https://www.kaggle.com/general/18793 ("Strategy A")
paráms:
corpus: str
n_classifiers: int
Utilizar las predicciones de los mejores 'n' clasificadores para
preparar los datos de nivel uno.
Esta función, además de preparar los datos de nivel uno, realiza la persisten-
cia tanto de los clasificadores como de los 'vectorizadores'.
"""
corpus = corpus.lower()
corpus_path = DATA_PATH + '/train/' + corpus
# cargar los resultados de selección de modelos
model_selection_results = np.loadtxt(
CURRENT_PATH + '/%s-model-selection-results.tsv' % corpus,
dtype=str, delimiter='\t')
# los resultados entonces se ordenan descendentemente,
# obteniéndose los respectivos índices
indexes = np.argsort(np.array(model_selection_results[:,-1], dtype=float))[::-1]
indexes = indexes[:n_classifiers]
persistence_path = BASE_PATH + '/model_persistence/%s' % corpus
if not os.path.isdir(persistence_path):
os.makedirs(persistence_path)
level_one_data_path = CURRENT_PATH + '/level-one-data/%s' % corpus
if not os.path.isdir(level_one_data_path):
os.makedirs(level_one_data_path)
for idx in indexes:
# Leer parámetros
tmp = model_selection_results[idx,:]
negation_id = tmp[0]
lexicon_id = tmp[1]
analyzer = tmp[2]
word_ngram_range =\
tuple([int(i) for i in re.sub('[\(\)]', '', tmp[3]).split(',')])
char_ngram_range =\
tuple([int(i) for i in re.sub('[\(\)]', '', tmp[4]).split(',')])
lowercase = True if tmp[5] == 'True' else False
max_df = float(tmp[6])
min_df = int(tmp[7])
binary = True if tmp[8] == 'True' else False
algo = tmp[9]
C = float(tmp[10])
temp = None
# Cargar colección de documentos, "ground truth" y "metafeatures"
negation_path = corpus_path + '/' + negation_id
if not os.path.isdir(negation_path):
continue
fname = negation_path + '/tweets.txt'
target_labels = np.loadtxt(negation_path + '/target-labels.dat',
dtype=int)
metaftures_fname = negation_path + '/metafeatures-%s.tsv' % lexicon_id
metafeatures = np.loadtxt(metaftures_fname, dtype=float, delimiter='\t')
metafeatures = sp.csr_matrix(metafeatures)
# Vectorizar colección de documentos
ngram_range = None
ngrams_features = None
analyzers = ['word', 'char'] if analyzer == 'both' else [analyzer,]
for analyzer in analyzers:
ngram_range = word_ngram_range if analyzer == 'word' else char_ngram_range
features_, vectorizer =\
vectorize_tweet_collection(fname=fname,
analyzer=analyzer,
ngram_range=ngram_range,
lowercase=lowercase,
max_df=max_df,
min_df=min_df,
binary=binary,
return_vectorizer=True)
if ngrams_features is None:
ngrams_features = features_
else:
ngrams_features = sp.hstack([ngrams_features, features_],
format='csr')
vectorizer_fname = '%s-%s-%i_%i-%s-%.2f-%i-%s.pkl' %\
(negation_id, analyzer,
ngram_range[0], ngram_range[1],
tmp[5], max_df, min_df, tmp[8])
vectorizer_fname = persistence_path + '/vectorizers/' + vectorizer_fname
# realizar persistencia del 'vectorizer'
if not os.path.isfile(vectorizer_fname):
joblib.dump(vectorizer, vectorizer_fname)
features = sp.hstack([metafeatures, ngrams_features], format='csr')
skf = list(StratifiedKFold(n_splits=5, shuffle=False, random_state=None).\
split(np.zeros(features.shape[0], dtype=float), target_labels))
class_label_prediction = np.zeros(features.shape[0], dtype=int)
class_proba_prediction = np.zeros((features.shape[0],
np.unique(target_labels).shape[0]),
dtype=float)
for train_index, test_index in skf:
X_train = features[train_index]
y_train = target_labels[train_index]
clf = LinearSVC(C=C) if algo == 'LinearSVC' else LogisticRegression(C=C)
clf.fit(X_train, y_train)
X_test = features[test_index]
y_test = target_labels[test_index]
class_label_prediction[test_index] = clf.predict(X_test)
if algo == 'LogisticRegression':
class_proba_prediction[test_index] = clf.predict_proba(X_test)
class_label_fname = level_one_data_path + '/clf_%i-label.tsv' % idx
class_proba_fname = level_one_data_path + '/clf_%i-proba.tsv' % idx
np.savetxt(fname=class_label_fname, X=class_label_prediction, fmt='%i',
delimiter='\t')
if algo == 'LogisticRegression':
np.savetxt(fname=class_proba_fname, X=class_proba_prediction,
fmt='%.4f', delimiter='\t')
# realizar persistencia del clasificador
clf_fname = persistence_path + '/classifiers/' + 'clf_%i.pkl' % idx
if not os.path.isfile(clf_fname):
clf = LinearSVC(C=C) if algo == 'LinearSVC' else LogisticRegression(C=C)
clf.fit(features, target_labels)
joblib.dump(clf, clf_fname)
_write_in_file(
fname=CURRENT_PATH + '/%s-model-selection-filtered-results.tsv' % corpus,
content='\t'.join(['%i' % idx,] + model_selection_results[idx,:].tolist()) + '\n',
mode='a')
def find_low_correlated_combinations(corpus, n_classifiers=50):
"""Encuentra las combinaciones de más baja correlación.
paráms:
corpus: str
n_classifiers: int
Límite de clasificadores que pueden constituir una combinación.
Nota: los datos de nivel uno deben haber sido generados; esto es, debió
haberse ejecutado el método 'prepare_level_one_data'.
"""
corpus = corpus.lower()
level_one_data_path = CURRENT_PATH + '/level-one-data/%s' % corpus
filtered_results = np.loadtxt(
CURRENT_PATH + '/%s-model-selection-filtered-results.tsv' % corpus,
dtype=str, delimiter='\t', usecols=(0, 10))
logit_results =\
filtered_results[np.where(filtered_results[:,1] == 'LogisticRegression')]
low_correlated_combinations = {
1: {'filtered_results': [[i] for i in xrange(filtered_results.shape[0])],
'logit_results': [[i] for i in xrange(logit_results.shape[0])]}}
output_fname = CURRENT_PATH +\
'/%s-model-selection-low-correlated-combinations.tsv' % corpus
for i in xrange(2, n_classifiers + 1):
for which_results_to_use in low_correlated_combinations[i-1].iterkeys():
results = filtered_results
all_clf_ids = range(filtered_results.shape[0])
if which_results_to_use | |
"""
<NAME>
Pygame Menu System
Last Edit: 1 January 2021
"""
# Imports
import pygame
import string
# Initialize pygame
pygame.init()
# Settings
menu_manager_settings = {
"element_colorkey" : [0, 0, 0],
"menu_background_color" : [0, 0, 0],
"menu_fps" : 60
}
class Action:
"""
Holds function and argument data for buttons.
Attributes:
function: The function to execute.
arguments: Arguments for the function.
keyword_arguments: Keyword arguments for the function.
"""
def __init__ (self, function, args, kwargs):
"""
Instantiate an Action object.
Arguments:
function: The function to execute.
*args: Arguments for the function.
**kwargs: Keyword arguments for the function.
"""
self.function = function
self.arguments = args
self.keyword_arguments = kwargs
def execute (self):
"""
Calls the function, passing it the args and kwargs.
"""
self.function(*self.arguments, **self.keyword_arguments)
class ButtonPicture(pygame.sprite.Sprite):
"""
Button object for menu manager.
Attributes:
image (pygame.image): Image for button.
action (function): Function to execute when button is pressed.
rect (pygame.image.rect): Position, height, width values for image.
action_args (*args): Any arguments required by the action.
"""
def __init__ (self, image, pos = [0,0]):
"""
Instantiate a button object.
Arguments:
image (string): Path of image file to be used for button.
action (function): Function to execute when button is pressed.
action_args (*args): Any arguments required by the action.
pos (tuple): XY position for the button.
"""
super(ButtonPicture, self).__init__()
self.image = pygame.image.load(image)
self.image.set_colorkey(menu_manager_settings["element_colorkey"])
self.rect = self.image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
self.actions = []
def get_dimensions (self):
"""
Get the width and height of the ButtonPicture.
Returns:
list: Width and Height of the ButtonPicture. Uses width and height
from self.rect.
Format: [width, height]
"""
dimensions = [self.rect.width, self.rect.height]
return dimensions
def get_pos (self):
"""
Get the position of this picture button element.
Returns:
list: XY position of the picture button.
"""
position = [self.rect.x, self.rect.y]
return position
def set_pos (self, pos):
"""
Set position of the button.
Arguments:
list: XY position to set the button to.
Format: [x, y]
"""
self.rect.x = pos[0]
self.rect.y = pos[1]
def add_action (self, function, *args, **kwargs):
"""
Adds an action to the list of actions for this button.
Arguments:
function: The function to execute.
*args: Arguments for the function.
**kwargs: Keyword arguments for the function.
"""
new_action = Action(function, args, kwargs)
self.actions.append(new_action)
def execute_actions (self):
"""
Execute function linked to this button.
"""
for action in self.actions:
action.execute()
def is_clicked (self, mouse_pos):
"""
Returns true if the mouse cursor position is on this sprite.
Arguments:
mouse_pos (tuple): XY position of the cursor.
"""
# Check x axis
within_x = mouse_pos[0] >= self.rect.x and mouse_pos[0] <= self.rect.x + self.rect.width
# Check y axis
within_y = mouse_pos[1] >= self.rect.y and mouse_pos[1] <= self.rect.y + self.rect.height
# True if within x and y area
return within_x and within_y
class ButtonText (pygame.sprite.Sprite):
"""
Text Button object for menu manager.
Attributes:
image (pygame.image): Pygame image for the text button.
action (function): Function to execute when button is pressed.
rect (pygame.image.rect): Position, height, width values for image.
action_args (*args): Any arguments required by the action.
"""
def __init__ (self, text, font, pos = [0,0], color = [255, 255, 255], \
antialias = True, background_color = None):
"""
Instantiate a button object.
Arguments:
text (string): Text to make the button from.
font (pygame.font.SysFont): Font to render the text in.
action (function): Function to execute when button is pressed.
action_args (*args): Any arguments required by the action.
pos (tuple): XY position for the button.
"""
super(ButtonText, self).__init__()
self.text = text
self.font = font
self.antialias = antialias
self.color = color
self.background_color = background_color
self.image = font.render(str(text), antialias, color, background_color)
self.image.set_colorkey(menu_manager_settings["element_colorkey"])
self.rect = self.image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
self.actions = []
def get_pos (self):
"""
Get the position of this text button element.
Returns:
pos (list): XY position of the text button.
"""
position = [self.rect.x, self.rect.y]
return position
def get_text(self):
"""
Get the text of the button.
Returns:
self.text (String): Text of the button.
"""
return self.text
def set_pos (self, pos):
"""
Set position of the text.
Arguments:
pos (list): XY position to set the text button to.
"""
self.rect.x = pos[0]
self.rect.y = pos[1]
def set_text (self, new_text):
"""
Changes the text of the button.
Arguments:
new_text (String): New text of the button.
"""
self.text = new_text
self.image = self.font.render(str(new_text), self.antialias, self.color, self.background_color)
def add_action (self, function, *args, **kwargs):
"""
Adds an action to the list of actions for this button.
Arguments:
function: The function to execute.
*args: Arguments for the function.
**kwargs: Keyword arguments for the function.
"""
new_action = Action(function, args, kwargs)
self.actions.append(new_action)
def execute_actions (self):
"""
Execute function linked to this button.
"""
for action in self.actions:
action.execute()
def is_clicked (self, mouse_pos):
"""
Returns true if the mouse cursor position is on this sprite.
Arguments:
mouse_pos (tuple): XY position of the cursor.
"""
# Check x area
within_x = mouse_pos[0] >= self.rect.x and mouse_pos[0] <= self.rect.x + self.rect.width
# Check y area
within_y = mouse_pos[1] >= self.rect.y and mouse_pos[1] <= self.rect.y + self.rect.height
# True if within x and y area
return within_x and within_y
class Picture (pygame.sprite.Sprite):
"""
Picture object for menu manager.
Attributes:
image (pygame.image): Image for picture.
rect (pygame.image.rect): Position, height, width values for picture.
"""
def __init__ (self, image, pos = [0,0]):
"""
Instantiate a picture object.
Arguments:
image (string): Path of image file to be used for picture.
pos (tuple): XY position for the picture.
"""
super(Picture, self).__init__()
self.image = pygame.image.load(image)
self.image.set_colorkey(menu_manager_settings["element_colorkey"])
self.rect = self.image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
def get_pos (self):
"""
Get the position of this picture element.
Returns:
pos (list): XY position of the picture.
"""
position = [self.rect.x, self.rect.y]
return position
def set_pos (self, pos):
"""
Set position of the picture.
Arguments:
pos (tuple): XY position to set the picture to.
"""
self.rect.x = pos[0]
self.rect.y = pos[1]
def set_image (self, new_image):
"""
Set a new picture for this instance of Picture. Preserves x and y
position of the old picture.
Arguments:
new_image (String): File name of the new picture.
"""
# Store the current position
temp_old_pos = self.get_pos()
# Load the new image
self.image = pygame.image.load(new_image)
self.image.set_colorkey(menu_manager_settings["element_colorkey"])
self.rect = self.image.get_rect()
# Set the x and y position using the old position
self.rect.x = temp_old_pos[0]
self.rect.y = temp_old_pos[1]
class Text (pygame.sprite.Sprite):
"""
Text object for MenuManager.
Attributes:
text (String): Text to be rendered.
font (pygame.font): Font used to render the text.
pos (tuple): Position of the text.
color (List): Color of the text.
antialias (Boolean): Adds antialias to text.
background_color (List): Background color of the text.
image (pygame.image): Rendered text.
rect (pygame.image.rect): Position, height, width values for Text.
"""
def __init__ (self, text, font, pos = [0,0], color = [255, 255, 255], \
antialias = True, background_color = None):
"""
Instantiates a new Text object.
Arguments:
text (String): Text to be rendered.
font (pygame.font): Font used to render the text.
pos (tuple): Position of the text.
color (List): Color of the text.
antialias (Boolean): Adds antialias to text.
background_color (List): Background color of the text.
"""
super(Text, self).__init__()
self.text = text
self.font = font
self.pos = pos
self.color = color
self.antialias = antialias
self.background_color = background_color
self.image = font.render(str(text), antialias, color, background_color)
self.image.set_colorkey(menu_manager_settings["element_colorkey"])
self.rect = self.image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
def get_dimensions (self):
"""
Get the width and height of the Text.
Returns:
list: Width and Height of the Text. Uses width and height from
self.rect.
Format: [width, height]
"""
dimensions = [self.rect.width, self.rect.height]
return dimensions
def get_pos (self):
"""
Get the position of this text element.
Returns:
pos (list): XY position of the text.
"""
position = [self.rect.x, self.rect.y]
return position
def set_pos (self, pos):
"""
Set position of the text.
Arguments:
pos (list): XY position to set the text to.
"""
self.rect.x = pos[0]
self.rect.y = pos[1]
class MenuManager:
"""
Menu manager for pygame.
Attributes:
pages (List): List of pages in the menu manager.
current_page (Page): Page currently being displayed.
screen (pygame.display): Surface to blit | |
Schedule.objects.update(start_date=THREE_YEARS_AGO)
# ensure that the user who has indefinite access
self.client.login(username=user.username, password=<PASSWORD>)
response = self.client.get(url)
assert response.status_code == 200, 'Should not expire access for user'
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
@ddt.data(
InstructorFactory,
StaffFactory,
BetaTesterFactory,
OrgStaffFactory,
OrgInstructorFactory,
)
def test_course_does_not_expire_for_course_staff(self, role_factory):
"""
There are a number of different roles/users that should not lose access after the expiration date.
Ensure that users who should not lose access get a 200 (ok) response
when attempting to visit the course after their would be expiration date.
"""
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
user = role_factory.create(password=<PASSWORD>, course_key=course.id)
CourseEnrollment.enroll(user, self.course.id, mode=CourseMode.AUDIT)
Schedule.objects.update(start_date=THREE_YEARS_AGO)
# ensure that the user has indefinite access
self.client.login(username=user.username, password=<PASSWORD>)
response = self.client.get(url)
assert response.status_code == 200, 'Should not expire access for user'
@ddt.data(
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_ADMINISTRATOR
)
def test_course_does_not_expire_for_user_with_course_role(self, role_name):
"""
Test that users with the above roles for a course do not lose access
"""
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
user = UserFactory.create()
role = RoleFactory(name=role_name, course_id=course.id)
role.users.add(user)
# ensure the user has indefinite access
self.client.login(username=user.username, password=<PASSWORD>)
response = self.client.get(url)
assert response.status_code == 200, 'Should not expire access for user'
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
@ddt.data(
GlobalStaffFactory,
)
def test_course_does_not_expire_for_global_users(self, role_factory):
"""
There are a number of different roles/users that should not lose access after the expiration date.
Ensure that users who should not lose access get a 200 (ok) response
when attempting to visit the course after their would be expiration date.
"""
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
user = role_factory.create(password=<PASSWORD>)
CourseEnrollment.enroll(user, self.course.id, mode=CourseMode.AUDIT)
Schedule.objects.update(start_date=THREE_YEARS_AGO)
# ensure that the user who has indefinite access
self.client.login(username=user.username, password=<PASSWORD>)
response = self.client.get(url)
assert response.status_code == 200, 'Should not expire access for user'
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
def test_expired_course(self):
"""
Ensure that a user accessing an expired course sees a redirect to
the student dashboard, not a 404.
"""
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2010, 1, 1, tzinfo=UTC))
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
for mode in [CourseMode.AUDIT, CourseMode.VERIFIED]:
CourseModeFactory.create(course_id=course.id, mode_slug=mode)
# assert that an if an expired audit user tries to access the course they are redirected to the dashboard
audit_user = UserFactory(password=<PASSWORD>)
self.client.login(username=audit_user.username, password=<PASSWORD>.TEST_PASSWORD)
audit_enrollment = CourseEnrollment.enroll(audit_user, course.id, mode=CourseMode.AUDIT)
audit_enrollment.created = THREE_YEARS_AGO + timedelta(days=1)
audit_enrollment.save()
response = self.client.get(url)
expiration_date = strftime_localized(course.start + timedelta(weeks=4) + timedelta(days=1), 'SHORT_DATE')
expected_params = QueryDict(mutable=True)
course_name = CourseOverview.get_from_id(course.id).display_name_with_default
expected_params['access_response_error'] = 'Access to {run} expired on {expiration_date}'.format(
run=course_name,
expiration_date=expiration_date
)
expected_url = '{url}?{params}'.format(
url=reverse('dashboard'),
params=expected_params.urlencode()
)
self.assertRedirects(response, expected_url)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
def test_expiration_banner_with_expired_upgrade_deadline(self):
"""
Ensure that a user accessing a course with an expired upgrade deadline
will still see the course expiration banner without the upgrade related text.
"""
past = datetime(2010, 1, 1, tzinfo=UTC)
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=past)
course = CourseFactory.create(start=now() - timedelta(days=10))
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.VERIFIED, expiration_datetime=past)
user = UserFactory(password=self.TEST_PASSWORD)
self.client.login(username=user.username, password=self.TEST_PASSWORD)
CourseEnrollment.enroll(user, course.id, mode=CourseMode.AUDIT)
url = course_home_url(course)
response = self.client.get(url)
bannerText = get_expiration_banner_text(user, course)
self.assertContains(response, bannerText, html=True)
self.assertContains(response, TEST_BANNER_CLASS)
def test_audit_only_not_expired(self):
"""
Verify that enrolled users are NOT shown the course expiration banner and can
access the course home page if course audit only
"""
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2010, 1, 1, tzinfo=UTC))
audit_only_course = CourseFactory.create()
self.create_user_for_course(audit_only_course, CourseUserType.ENROLLED)
response = self.client.get(course_home_url(audit_only_course))
assert response.status_code == 200
self.assertContains(response, TEST_COURSE_TOOLS)
self.assertNotContains(response, TEST_BANNER_CLASS)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
def test_expired_course_in_holdback(self):
"""
Ensure that a user accessing an expired course that is in the holdback
does not get redirected to the student dashboard, not a 404.
"""
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2010, 1, 1, tzinfo=UTC))
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
for mode in [CourseMode.AUDIT, CourseMode.VERIFIED]:
CourseModeFactory.create(course_id=course.id, mode_slug=mode)
# assert that an if an expired audit user in the holdback tries to access the course
# they are not redirected to the dashboard
audit_user = UserFactory(password=self.<PASSWORD>)
self.client.login(username=audit_user.username, password=self.<PASSWORD>)
audit_enrollment = CourseEnrollment.enroll(audit_user, course.id, mode=CourseMode.AUDIT)
Schedule.objects.update(start_date=THREE_YEARS_AGO)
FBEEnrollmentExclusion.objects.create(
enrollment=audit_enrollment
)
response = self.client.get(url)
assert response.status_code == 200
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
@mock.patch("common.djangoapps.util.date_utils.strftime_localized")
def test_non_live_course_other_language(self, mock_strftime_localized):
"""
Ensure that a user accessing a non-live course sees a redirect to
the student dashboard, not a 404, even if the localized date is unicode
"""
future_course = self.create_future_course()
self.create_user_for_course(future_course, CourseUserType.ENROLLED)
fake_unicode_start_time = "üñîçø∂é_ßtå®t_tîµé"
mock_strftime_localized.return_value = fake_unicode_start_time
url = course_home_url(future_course)
response = self.client.get(url)
expected_params = QueryDict(mutable=True)
expected_params['notlive'] = fake_unicode_start_time
expected_url = '{url}?{params}'.format(
url=reverse('dashboard'),
params=expected_params.urlencode()
)
self.assertRedirects(response, expected_url)
def test_nonexistent_course(self):
"""
Ensure a non-existent course results in a 404.
"""
self.create_user_for_course(self.course, CourseUserType.ANONYMOUS)
url = course_home_url_from_string('not/a/course')
response = self.client.get(url)
assert response.status_code == 404
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
@override_waffle_flag(COURSE_PRE_START_ACCESS_FLAG, active=True)
def test_masters_course_message(self):
enroll_button_html = "<button class=\"enroll-btn btn-link\">Enroll now</button>"
# Verify that unenrolled users visiting a course with a Master's track
# that is not the only track are shown an enroll call to action message
add_course_mode(self.course, CourseMode.MASTERS, 'Master\'s Mode', upgrade_deadline_expired=False)
remove_course_mode(self.course, CourseMode.AUDIT)
self.create_user_for_course(self.course, CourseUserType.UNENROLLED)
url = course_home_url(self.course)
response = self.client.get(url)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE_UNENROLLED)
self.assertContains(response, enroll_button_html)
# Verify that unenrolled users visiting a course that contains only a Master's track
# are not shown an enroll call to action message
remove_course_mode(self.course, CourseMode.VERIFIED)
response = self.client.get(url)
expected_message = ('You must be enrolled in the course to see course content. '
'Please contact your degree administrator or edX Support if you have questions.')
self.assertContains(response, TEST_COURSE_HOME_MESSAGE)
self.assertContains(response, expected_message)
self.assertNotContains(response, enroll_button_html)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
@override_waffle_flag(COURSE_PRE_START_ACCESS_FLAG, active=True)
def test_course_messaging(self):
"""
Ensure that the following four use cases work as expected
1) Anonymous users are shown a course message linking them to the login page
2) Unenrolled users are shown a course message allowing them to enroll
3) Enrolled users who show up on the course page after the course has begun
are not shown a course message.
4) Enrolled users who show up on the course page after the course has begun will
see the course expiration banner if course duration limits are on for the course.
5) Enrolled users who show up on the course page before the course begins
are shown a message explaining when the course starts as well as a call to
action button that allows them to add a calendar event.
"""
# Verify that anonymous users are shown a login link in the course message
url = course_home_url(self.course)
response = self.client.get(url)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE_ANONYMOUS)
# Verify that unenrolled users are shown an enroll call to action message
user = self.create_user_for_course(self.course, CourseUserType.UNENROLLED)
url = course_home_url(self.course)
response = self.client.get(url)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE_UNENROLLED)
# Verify that enrolled users are not shown any state warning message when enrolled and course has begun.
CourseEnrollment.enroll(user, self.course.id)
url = course_home_url(self.course)
response = self.client.get(url)
self.assertNotContains(response, TEST_COURSE_HOME_MESSAGE_ANONYMOUS)
self.assertNotContains(response, TEST_COURSE_HOME_MESSAGE_UNENROLLED)
self.assertNotContains(response, TEST_COURSE_HOME_MESSAGE_PRE_START)
# Verify that enrolled users are shown the course expiration banner if content gating is enabled
# We use .save() explicitly here (rather than .objects.create) in order to force the
# cache to refresh.
config = CourseDurationLimitConfig(
course=CourseOverview.get_from_id(self.course.id),
enabled=True,
enabled_as_of=datetime(2018, 1, 1, tzinfo=UTC)
)
config.save()
url = course_home_url(self.course)
response = self.client.get(url)
bannerText = get_expiration_banner_text(user, self.course)
self.assertContains(response, bannerText, html=True)
# Verify that enrolled users are not shown the course expiration banner if content gating is disabled
config.enabled = False
config.save()
url = course_home_url(self.course)
response = self.client.get(url)
bannerText = get_expiration_banner_text(user, self.course)
self.assertNotContains(response, bannerText, html=True)
# Verify that enrolled users are shown 'days until start' message before start date
future_course = self.create_future_course()
CourseEnrollment.enroll(user, future_course.id)
url = course_home_url(future_course)
response = self.client.get(url)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE_PRE_START)
@override_waffle_flag(COURSE_HOME_USE_LEGACY_FRONTEND, active=True)
def test_course_messaging_for_staff(self):
"""
Staff users will not see the expiration banner when course duration limits
are on for the course.
"""
config = CourseDurationLimitConfig(
course=CourseOverview.get_from_id(self.course.id),
enabled=True,
enabled_as_of=datetime(2018, 1, 1, tzinfo=UTC)
)
config.save()
url = course_home_url(self.course)
CourseEnrollment.enroll(self.staff_user, self.course.id)
response = self.client.get(url)
bannerText = get_expiration_banner_text(self.staff_user, self.course)
self.assertNotContains(response, bannerText, html=True)
@override_waffle_flag(COURSE_PRE_START_ACCESS_FLAG, active=True)
@override_waffle_flag(ENABLE_COURSE_GOALS, active=True)
def test_course_goals(self):
"""
Ensure that the following five use cases work as expected.
1) Unenrolled users are not shown the set course goal message.
2) Enrolled users are shown the set course goal message if they have not yet set a course goal.
3) Enrolled users are not shown the set course goal message if they have set a course goal.
4) Enrolled and verified users are not shown the set course goal message.
5) Enrolled users are not shown the set | |
from __future__ import absolute_import, unicode_literals
import fakeredis
import pytest
import redis
import unittest
import socket
from array import array
from case import ANY, ContextMock, Mock, call, mock, skip, patch
from collections import defaultdict
from contextlib import contextmanager
from itertools import count
from kombu import Connection, Exchange, Queue, Consumer, Producer
from kombu.exceptions import InconsistencyError, VersionMismatch
from kombu.five import Empty, Queue as _Queue
from kombu.transport import virtual
from kombu.utils import eventio # patch poll
from kombu.utils.json import dumps
_fake_redis_client = None
def _get_fake_redis_client():
global _fake_redis_client
if _fake_redis_client is None:
_fake_redis_client = FakeRedisClient()
return _fake_redis_client
class _poll(eventio._select):
def register(self, fd, flags):
if flags & eventio.READ:
self._rfd.add(fd)
def poll(self, timeout):
events = []
for fd in self._rfd:
if fd.data:
events.append((fd.fileno(), eventio.READ))
return events
eventio.poll = _poll
# must import after poller patch, pep8 complains
from kombu.transport import redis as kombu_redis # noqa
class ResponseError(Exception):
pass
class DummyParser(object):
def __init__(self, *args, **kwargs):
self.socket_read_size = 1
self.encoder = None
self._sock = None
self._buffer = None
def on_disconnect(self):
self.socket_read_size = 1
self.encoder = None
self._sock = None
self._buffer = None
def on_connect(self, connection):
pass
class FakeRedisSocket(fakeredis._server.FakeSocket):
blocking = True
filenos = count(30)
def __init__(self, server):
super(FakeRedisSocket, self).__init__(server)
self._server = server
self._fileno = next(self.filenos)
self.data = []
self.connection = None
self.channel = None
self.transport_options = {}
self.hostname = None
self.port = None
self.password = <PASSWORD>
self.virtual_host = '/'
self.max_connections = 10
self.ssl = None
class FakeRedisConnection(fakeredis.FakeConnection):
disconnected = False
default_port = 6379
channel_max = 65535
def __init__(self, client, server, **kwargs):
kwargs['parser_class'] = DummyParser
super(fakeredis.FakeConnection, self).__init__(**kwargs)
if client is None:
client = _get_fake_redis_client()
self.client = client
if server is None:
server = fakeredis.FakeServer()
self._server = server
self._sock = FakeRedisSocket(server=server)
try:
self.on_connect()
except redis.exceptions.RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
self._parser = ()
self._avail_channel_ids = array(
virtual.base.ARRAY_TYPE_H, range(self.channel_max, 0, -1),
)
self.cycle = kombu_redis.MultiChannelPoller()
conn_errs, channel_errs = kombu_redis.get_redis_error_classes()
self.connection_errors, self.channel_errors = conn_errs, channel_errs
def disconnect(self):
self.disconnected = True
class FakeRedisConnectionPool(redis.ConnectionPool):
def __init__(self, connection_class, max_connections=None,
**connection_kwargs):
connection_class = FakeRedisConnection
connection_kwargs['client'] = None
connection_kwargs['server'] = None
self._connections = []
super(FakeRedisConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs
)
def get_connection(self, *args, **kwargs):
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def release(self, connection):
pass
class FakeRedisClient(fakeredis.FakeStrictRedis):
queues = {}
shard_hint = None
def __init__(self, db=None, port=None, connection_pool=None, **kwargs):
self._called = []
self._connection = None
self.bgsave_raises_ResponseError = False
self.server = server = fakeredis.FakeServer()
connection_pool = FakeRedisConnectionPool(FakeRedisConnection)
self.connection_pool = connection_pool
super(FakeRedisClient, self).__init__(
db=db, port=port, connection_pool=connection_pool, server=server)
self.connection = FakeRedisConnection(self, server)
self.response_callbacks = dict()
def __del__(self, key=None):
if key:
self.delete(key)
def ping(self, *args, **kwargs):
return True
def pipeline(self):
return FakePipeline(self.server, self.connection_pool, [], '1234', '')
def set_response_callback(self, command, callback):
pass
def _new_queue(self, queue, auto_delete=False, **kwargs):
self.queues[queue] = _Queue()
if auto_delete:
self.auto_delete_queues.add(queue)
def rpop(self, key):
try:
return self.queues[key].get_nowait()
except (KeyError, Empty):
pass
def llen(self, key):
try:
return self.queues[key].qsize()
except KeyError:
return 0
def lpush(self, key, value):
self.queues[key].put_nowait(value)
def pubsub(self, *args, **kwargs):
self.connection_pool = FakeRedisConnectionPool(FakeRedisConnection)
return self
def delete(self, key):
self.queues.pop(key, None)
class FakeRedisClientLite(object):
"""The original FakeRedis client from Kombu to support the
Producer/Consumer TestCases, preferred to use FakeRedisClient."""
queues = {}
sets = defaultdict(set)
hashes = defaultdict(dict)
shard_hint = None
def __init__(self, db=None, port=None, connection_pool=None, **kwargs):
self._called = []
self._connection = None
self.bgsave_raises_ResponseError = False
self.connection = self._sconnection(self)
def exists(self, key):
return key in self.queues or key in self.sets
def hset(self, key, k, v):
self.hashes[key][k] = v
def hget(self, key, k):
return self.hashes[key].get(k)
def hdel(self, key, k):
self.hashes[key].pop(k, None)
def sadd(self, key, member, *args):
self.sets[key].add(member)
def zadd(self, key, *args):
(mapping,) = args
for item in mapping:
self.sets[key].add(item)
def smembers(self, key):
return self.sets.get(key, set())
def sismember(self, name, value):
return value in self.sets.get(name, set())
def scard(self, key):
return len(self.sets.get(key, set()))
def ping(self, *args, **kwargs):
return True
def srem(self, key, *args):
self.sets.pop(key, None)
zrem = srem
def llen(self, key):
try:
return self.queues[key].qsize()
except KeyError:
return 0
def lpush(self, key, value):
self.queues[key].put_nowait(value)
def parse_response(self, connection, type, **options):
cmd, queues = self.connection._sock.data.pop()
queues = list(queues)
assert cmd == type
self.connection._sock.data = []
if type == 'BRPOP':
timeout = queues.pop()
item = self.brpop(queues, timeout)
if item:
return item
raise Empty()
def brpop(self, keys, timeout=None):
for key in keys:
try:
item = self.queues[key].get_nowait()
except Empty:
pass
else:
return key, item
def rpop(self, key):
try:
return self.queues[key].get_nowait()
except (KeyError, Empty):
pass
def __contains__(self, k):
return k in self._called
def pipeline(self):
return FakePipelineLite(self)
def encode(self, value):
return str(value)
def _new_queue(self, key):
self.queues[key] = _Queue()
class _sconnection(object):
disconnected = False
class _socket(object):
blocking = True
filenos = count(30)
def __init__(self, *args):
self._fileno = next(self.filenos)
self.data = []
def fileno(self):
return self._fileno
def setblocking(self, blocking):
self.blocking = blocking
def __init__(self, client):
self.client = client
self._sock = self._socket()
def disconnect(self):
self.disconnected = True
def send_command(self, cmd, *args):
self._sock.data.append((cmd, args))
class FakePipelineLite(object):
def __init__(self, client):
self.client = client
self.stack = []
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
def __getattr__(self, key):
if key not in self.__dict__:
def _add(*args, **kwargs):
self.stack.append((getattr(self.client, key), args, kwargs))
return self
return _add
return self.__dict__[key]
def execute(self):
stack = list(self.stack)
self.stack[:] = []
return [fun(*args, **kwargs) for fun, args, kwargs in stack]
class FakePipeline(redis.client.Pipeline):
def __init__(self, server, connection_pool,
response_callbacks, transaction, shard_hint):
if not server:
server = fakeredis.FakeServer()
self._server = server
correct_pool_instance = isinstance(
connection_pool, FakeRedisConnectionPool)
if connection_pool is not None and not correct_pool_instance:
connection_pool = FakeRedisConnectionPool(FakeRedisConnection)
self.connection_pool = connection_pool
self.connection = FakeRedisConnection(self, server)
self.client = connection_pool.get_connection().client
self.response_callbacks = response_callbacks
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.reset()
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
def __getattr__(self, key):
if key not in self.__dict__:
def _add(*args, **kwargs):
self.command_stack.append(
(getattr(self.connection.client, key), args, kwargs))
return self
return _add
return self.__dict__[key]
def reset(self):
# Do nothing with the real connection
self.command_stack = []
self.scripts = set()
def execute(self):
stack = list(self.command_stack)
all_cmds = self.connection.pack_commands(
[args for args, _ in self.command_stack])
self.connection.send_packed_command(all_cmds)
response = []
for cmd in all_cmds:
try:
response.append(
self.parse_response(self.connection, cmd))
except ResponseError:
import sys
response.append(sys.exc_info()[1])
self.raise_first_error(self.command_stack, response)
results = []
for t, kwargs in stack:
redis_func_name = t[0]
redis_func_name = redis_func_name.lower()
if redis_func_name == 'del':
redis_func_name = 'delete'
args = t[1:]
fun = getattr(self.client, redis_func_name)
r = fun(*args, **kwargs)
results.append(r)
self.command_stack[:] = []
self.reset()
return results
class FakeRedisKombuChannelLite(kombu_redis.Channel):
def _get_client(self):
return FakeRedisClientLite
def _get_pool(self, asynchronous=False):
return Mock()
def _get_response_error(self):
return ResponseError
def _new_queue(self, queue, **kwargs):
for pri in self.priority_steps:
self.client._new_queue(self._queue_for_priority(queue, pri))
def pipeline(self):
return FakePipelineLite(FakeRedisClientLite())
class FakeRedisKombuChannel(kombu_redis.Channel):
_fanout_queues = {}
def __init__(self, *args, **kwargs):
super(FakeRedisKombuChannel, self).__init__(*args, **kwargs)
def _get_client(self):
return FakeRedisClient
def _create_client(self, asynchronous=False):
global _fake_redis_client
if _fake_redis_client is None:
_fake_redis_client = self._get_client()()
return _fake_redis_client
@contextmanager
def conn_or_acquire(self, client=None):
if client:
yield client
else:
yield self._create_client()
def _get_pool(self, asynchronous=False):
params = self._connparams(asynchronous=asynchronous)
self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db'])
return FakeRedisConnectionPool(**params)
def _get_response_error(self):
return ResponseError
def _new_queue(self, queue, **kwargs):
for pri in self.priority_steps:
self.client._new_queue(self._queue_for_priority(queue, pri))
def pipeline(self):
yield _get_fake_redis_client().pipeline()
def basic_publish(self, message, exchange='', routing_key='', **kwargs):
self._inplace_augment_message(message, exchange, routing_key)
# anon exchange: routing_key is the destination queue
return self._put(routing_key, message, **kwargs)
class FakeRedisKombuTransportLite(kombu_redis.Transport):
Channel = FakeRedisKombuChannelLite
def __init__(self, *args, **kwargs):
super(FakeRedisKombuTransportLite, self).__init__(*args, **kwargs)
def _get_errors(self):
return ((KeyError,), (IndexError,))
class FakeRedisKombuTransport(FakeRedisKombuTransportLite):
Channel = FakeRedisKombuChannel
@skip.unless_module('redis')
class TestRedisChannel(unittest.TestCase):
def setUp(self):
self.connection = self.create_connection()
self.channel = self.connection.default_channel
def tearDown(self):
self.connection = None
self.channel = None
global _fake_redis_client
_fake_redis_client = None
def create_connection(self, **kwargs):
kwargs.setdefault('transport_options', {'fanout_patterns': True})
return Connection(transport=FakeRedisKombuTransport, **kwargs)
def _get_one_delivery_tag(self, n='test_uniq_tag'):
chan = self.connection.default_channel
chan.exchange_declare(n)
chan.queue_declare(n)
chan.queue_bind(n, n, n)
msg = chan.prepare_message('quick brown fox')
chan.basic_publish(msg, n, n)
payload = chan._get(n)
assert payload
pymsg = chan.message_to_python(payload)
return pymsg.delivery_tag
def test_delivery_tag_is_uuid(self):
seen = set()
for i in range(100):
tag = self._get_one_delivery_tag()
assert tag not in seen
seen.add(tag)
with pytest.raises(ValueError):
int(tag)
assert len(tag) == 36
def test_disable_ack_emulation(self):
conn = Connection(
transport=FakeRedisKombuTransport,
transport_options={'ack_emulation': False}
)
chan = conn.channel()
assert not chan.ack_emulation
assert chan.QoS == virtual.QoS
def test_redis_ping_raises(self):
pool = Mock(name='pool')
pool_at_init = [pool]
client = Mock(name='client')
class XChannel(FakeRedisKombuChannel):
def __init__(self, *args, **kwargs):
self._pool = pool_at_init[0]
super(XChannel, self).__init__(*args, **kwargs)
def _get_client(self):
return lambda *_, **__: client
def _create_client(self, asynchronous=False):
if asynchronous:
return self.Client(connection_pool=self.async_pool)
return self.Client(connection_pool=self.pool)
class XTransport(FakeRedisKombuTransport):
Channel = XChannel
conn = Connection(transport=XTransport)
client.ping.side_effect = RuntimeError()
with pytest.raises(RuntimeError):
conn.channel()
pool.disconnect.assert_called_with()
pool.disconnect.reset_mock()
pool_at_init = [None]
with pytest.raises(RuntimeError):
conn.channel()
pool.disconnect.assert_not_called()
def test_after_fork(self):
self.channel._pool = None
self.channel._after_fork()
pool = self.channel._pool = Mock(name='pool')
self.channel._after_fork()
pool.disconnect.assert_called_with()
def test_next_delivery_tag(self):
assert (self.channel._next_delivery_tag() | |
pass
def wait_until_response(self, draw_stim=True):
"""
Waits until a response key is pressed.
Returns last key pressed, timestamped.
:Kwargs:
draw_stim (bool, default: True)
Controls if stimuli should be drawn or have already
been drawn (useful if you only want to redefine
the drawing bit of this function).
:Returns:
A list of tuples with a key name (str) and a response time (float).
"""
if draw_stim:
self.before_event()
event_keys = []
event.clearEvents() # key presses might be stored from before
while len(event_keys) == 0: # if the participant did not respond earlier
if 'autort' in self.this_trial:
if self.trial_clock.getTime() > self.this_trial['autort']:
event_keys = [(self.this_trial['autoresp'], self.this_trial['autort'])]
else:
event_keys = self.last_keypress(
keyList=self.computer.valid_responses.keys(),
timeStamped=self.trial_clock)
return event_keys
def idle_event(self, draw_stim=True):
"""
Default idle function for an event.
Sits idle catching default keys (exit and trigger).
:Kwargs:
draw_stim (bool, default: True)
Controls if stimuli should be drawn or have already
been drawn (useful if you only want to redefine
the drawing bit of this function).
:Returns:
A list of tuples with a key name (str) and a response time (float).
"""
if draw_stim:
self.before_event()
event_keys = None
event.clearEvents() # key presses might be stored from before
if self.this_event.dur == 0 or self.this_event.dur == np.inf:
event_keys = self.last_keypress()
else:
event_keys = self.wait()
return event_keys
def feedback(self):
"""
Gives feedback by changing fixation color.
- Correct: fixation change to green
- Wrong: fixation change to red
"""
this_resp = self.all_keys[-1]
if hasattr(self, 'respmap'):
subj_resp = this_resp[2]
else:
subj_resp = self.computer.valid_responses[this_resp[0]]
#subj_resp = this_resp[2] #self.computer.valid_responses[this_resp[0]]
# find which stimulus is fixation
if isinstance(self.this_event.display, (list, tuple)):
for stim in self.this_event.display:
if stim.name in ['fixation', 'fix']:
fix = stim
break
else:
if self.this_event.display.name in ['fixation', 'fix']:
fix = self.this_event.display
if fix is not None:
orig_color = fix.color # store original color
if self.this_trial['corr_resp'] == subj_resp:
fix.setFillColor('DarkGreen') # correct response
else:
fix.setFillColor('DarkRed') # incorrect response
for stim in self.this_event.display:
stim.draw()
self.win.flip()
# sit idle
self.wait()
# reset fixation color
fix.setFillColor(orig_color)
def wait(self):
"""
Wait until the event is over, register key presses.
:Returns:
A list of tuples with a key name (str) and a response time (float).
"""
all_keys = []
while self.check_continue():
keys = self.last_keypress()
if keys is not None:
all_keys += keys
return all_keys
def check_continue(self):
"""
Check if the event is not over yet.
Uses ``event_clock``, ``trial_clock``, and, if
``self.global_timing`` is True, ``glob_clock`` to check whether
the current event is not over yet. The event cannot last longer
than event and trial durations and also fall out of sync from
global clock.
:Returns:
A list of tuples with a key name (str) and a response time (float).
"""
event_on = self.event_clock.getTime() < self.this_event.dur
if self.global_timing:
trial_on = self.trial_clock.getTime() < self.this_trial['dur']
time_on = self.glob_clock.getTime() < self.cumtime + self.this_trial['dur']
else:
trial_on = True
time_on = True
return (event_on and trial_on and time_on)
def set_autorun(self, exp_plan):
"""
Automatically runs experiment by simulating key responses.
This is just the absolute minimum for autorunning. Best practice would
be extend this function to simulate responses according to your
hypothesis.
:Args:
exp_plan (list of dict)
A list of trial definitions.
:Returns:
exp_plan with ``autoresp`` and ``autort`` columns included.
"""
def rt(mean):
add = np.random.normal(mean,scale=.2)/self.rp['autorun']
return self.trial[0].dur + add
inverse_resp = invert_dict(self.computer.valid_responses)
for trial in exp_plan:
# here you could do if/else to assign different values to
# different conditions according to your hypothesis
trial['autoresp'] = random.choice(inverse_resp.values())
trial['autort'] = rt(.5)
return exp_plan
def set_TrialHandler(self, trial_list, trialmap=None):
"""
Converts a list of trials into a `~psychopy.data.TrialHandler`,
finalizing the experimental setup procedure.
"""
if len(self.blocks) > 1:
self.set_seed()
TrialHandler.__init__(self,
trial_list,
nReps=self.nReps,
method=self.method,
extraInfo=self.info,
name=self.name,
seed=self.seed)
if trialmap is None:
self.trialmap = range(len(trial_list))
else:
self.trialmap = trialmap
def get_blocks(self):
"""
Finds blocks in the given column of ``self.exp_plan``.
The relevant column is stored in ``self.blockcol`` which is
given by the user when initializing the experiment class.
Produces a list of trial lists and trial mapping for each block.
Trial mapping indicates where each trial is in the original
`exp_plan` list.
The output is stored in ``self.blocks``.
"""
if self.blockcol is not None:
blocknos = np.array([trial[self.blockcol] for trial in self.exp_plan])
_, idx = np.unique(blocknos, return_index=True)
blocknos = blocknos[idx].tolist()
blocks = [None] * len(blocknos)
for trialno, trial in enumerate(self.exp_plan):
blockno = blocknos.index(trial[self.blockcol])
if blocks[blockno] is None:
blocks[blockno] = [[trial], [trialno]]
else:
blocks[blockno][0].append(trial)
blocks[blockno][1].append(trialno)
else:
blocks = [[self.exp_plan, range(len(self.exp_plan))]]
self.blocks = blocks
def before_task(self, text=None, wait=.5, wait_stim=None, **kwargs):
"""Shows text from docstring explaining the task.
:Kwargs:
- text (str, default: None)
Text to show.
- wait (float, default: .5)
How long to wait after the end of showing instructions,
in seconds.
- wait_stim (stimulus or a list of stimuli, default: None)
During this waiting, which stimuli should be shown.
Usually, it would be a fixation spot.
- \*\*kwargs
Other parameters for :func:`~psychopy_ext.exp.Task.show_text()`
"""
if len(self.parent.tasks) > 1:
# if there are no blocks, try to show fixation
if wait_stim is None:
if len(self.blocks) <= 1:
try:
wait_stim = self.s['fix']
except:
wait = 0
else:
wait = 0
if text is None:
self.show_text(text=self.__doc__, wait=wait,
wait_stim=wait_stim, **kwargs)
else:
self.show_text(text=text, wait=wait,
wait_stim=wait_stim, **kwargs)
def run_task(self):
"""Sets up the task and runs it.
If ``self.blockcol`` is defined, then runs block-by-block.
"""
self.setup_task()
self.before_task()
self.datafile.open()
for blockno, (block, trialmap) in enumerate(self.blocks):
self.this_blockn = blockno
# set TrialHandler only to the current block
self.set_TrialHandler(block, trialmap=trialmap)
self.run_block()
self.datafile.close()
self.after_task()
def after_task(self, text=None, auto=1, **kwargs):
"""Useful for showing feedback after a task is done.
For example, you could display accuracy.
:Kwargs:
- text (str, default: None)
Text to show. If None, this is skipped.
- auto (float, default: 1)
Duration of time-out of the instructions screen,
in seconds.
- \*\*kwargs
Other parameters for :func:`~psychopy_ext.exp.Task.show_text()`
"""
if text is not None:
self.show_text(text, auto=auto, **kwargs)
def before_block(self, text=None, auto=1, wait=.5, wait_stim=None):
"""Show text before the block starts.
Will not show anything if there's only one block.
:Kwargs:
- text (str, default: None)
Text to show. If None, defaults to showing block number.
- wait (float, default: .5)
How long to wait after the end of showing instructions,
in seconds.
- wait_stim (stimulus or a list of stimuli, default: None)
During this waiting, which stimuli should be shown.
Usually, it would be a fixation spot. If None, this
fixation spot will be attempted to be drawn.
- auto (float, default: 1)
Duration of time-out of the instructions screen,
in seconds.
"""
if len(self.blocks) > 1:
if wait_stim is None:
try:
wait_stim = self.s['fix']
except:
pass
if text is None:
self.show_text(text='Block %d' % (self.this_blockn+1),
auto=auto, wait=wait, wait_stim=wait_stim)
else:
self.show_text(text=text, auto=auto, wait=wait, wait_stim=wait_stim)
def run_block(self):
"""Run a block in a task.
"""
self.before_block()
# set up clocks
self.glob_clock = core.Clock()
self.trial_clock = core.Clock()
self.event_clock = core.Clock()
self.cumtime = 0
# go over the trial sequence
for this_trial in self:
self.this_trial = this_trial
self.run_trial()
self.after_block()
def after_block(self, text=None, **kwargs):
"""Show text at the end of a block.
Will not show this text after the last block in the task.
:Kwargs:
- text (str, default: None)
Text to show. If None, will default to
'Pause. Hit ``trigger`` to continue.'
- \*\*kwargs
Other parameters for :func:`~psychopy_ext.exp.Task.show_text()`
"""
# clear trial counting in the terminal
sys.stdout.write('\r' + ' '*70)
sys.stdout.write('\r')
sys.stdout.flush()
if text is None:
text = ('Pause. Hit %s to continue.' %
self.computer.default_keys['trigger'])
# don't show this after the last block
if self.this_blockn+1 < len(self.blocks):
self.show_text(text=text, **kwargs)
def before_trial(self):
"""What to do before trial -- nothing by default.
"""
pass
def run_trial(self):
"""Presents a trial.
"""
self.before_trial()
self.trial_clock.reset()
self.this_trial['onset'] = self.glob_clock.getTime()
sys.stdout.write('\rtrial %s' % (self.thisTrialN+1))
sys.stdout.flush()
self.this_trial['dur'] = 0
for ev in self.trial:
if ev.durcol is not None:
ev.dur = self.this_trial[ev.durcol]
self.this_trial['dur'] += ev.dur
self.all_keys = []
self.rectimes = []
for event_no, this_event in enumerate(self.trial):
self.this_event = this_event
self.event_no = event_no
self.run_event()
# if autorun and responses were not set yet, get them now
if len(self.all_keys) == 0 and self.rp['autorun'] > 0:
| |
that is used for subsequent
actions such as plotting or saving.
These modes can be produced by various processing steps within the
class and include the following
'input' - just the spectrum as it was read in
'smooth' - the smoothed spectrum
'atmcorr' - the spectrum after applying an atmospheric absorption
correction
'respcorr' - the spectrum after applying a response correction
Inputs:
mode - one of the values listed above ('input', 'smooth', etc.)
"""
if mode == 'input':
spec = self
elif mode == 'smooth':
spec = self.smospec
elif mode == 'atmcorr':
spec = self.atmcorr
elif mode == 'respcorr':
spec = self.respcorr
else:
print('')
errstr = 'Invalid mode (%s) for select_mode. See help for ' % mode
errstr += 'allowed values\n\n'
raise ValueError(errstr)
return spec
# -----------------------------------------------------------------------
def __add__(self, other):
"""
Do a variance-weighted sum of this spectrum with another
"""
""" Initialize """
nx = self['wav'].size
mask = self['var'] != 0
wtsum = np.zeros(nx)
wtsum[mask] = 1.0 / self['var'][mask]
wtflux = wtsum * self['flux']
if self.sky:
skysum = self['sky']
else:
skysum = np.zeros(nx)
""" Create the weighted sum """
wt = np.zeros(nx)
mask = other['var'] != 0
wt[mask] = 1.0 / other['var'][mask]
wtflux += wt * other['flux']
if other.sky:
skysum += other['sky']
wtsum += wt
del wt
"""
Normalize the flux, and calculate the variance of the coadded
spectrum.
Note that the equation below for the variance only works for the case
of inverse variance weighting.
"""
wtflux[wtsum == 0] = 0
wtsum[wtsum == 0] = 1
outflux = wtflux / wtsum
outvar = 1.0 / wtsum
if self.sky is None:
outsky = None
else:
outsky = skysum / 2.
""" Return the coadded spectrum as a Spec1d object """
return Spec1d(wav=self['wav'], flux=outflux, var=outvar,
sky=outsky)
# -----------------------------------------------------------------------
def __radd__(self, other):
"""
This is the "reverse add" method that is needed in order to sum
spectra with the sum function
NOTE: This doesn't seem to be working yet
"""
if isinstance(other, (int, float)):
return self
else:
return self.__add__(other)
# -----------------------------------------------------------------------
def make_atm_trans(self, fwhm=15., modfile='default'):
"""
Creates an extension to the class that contains the
transmission of the Earth's atmosphere as a function of wavelength.
For now this is just for the near-infrared (NIR) part of the spectrum,
which is what the default gives, but there is some functionality for
a different transmission spectrum to be provided.
The transmission spectrum is stored as self.atm_trans
Inputs:
fwhm - smoothing parameter for the output spectrum
modfile - the full path+name of the file containing the atmospheric
transmission data. The default location is in the Data
subdirectory contained within the directory in which this
code is found.
"""
""" Read in the atmospheric transmission data"""
if modfile != 'default':
infile = modfile
else:
moddir = '%s' % (os.path.split(__file__)[0])
infile = '%s/Data/atm_trans_maunakea.fits' % moddir
print('Loading atmospheric data from %s' % infile)
try:
atm0 = self.read_file(infile, informat='fitstab')
except IOError:
print('ERROR: Cannot read atmospheric transmission data file')
raise IOError
atm0['wav'] *= 1.0e4
"""
Only use the relevant part of the atmospheric transmission spectrum
"""
w0 = atm0['wav']
w = self['wav']
mask = (w0 >= w.min()) & (w0 <= w.max())
if mask.sum() == 0:
print('')
print('Warning: %s only has data outside the requested wavelength'
'range' % infile)
print(' %8.2f - %8.2f' % (w.min(), w.max()))
self.atm_trans = None
del atm0
raise ValueError
else:
watm = atm0['wav'][mask]
trans = atm0['flux'][mask]
""" Smooth the spectrum """
trans = ndimage.gaussian_filter(trans, fwhm)
""" Resample the smoothed spectrum """
tmpspec = Spec1d(wav=watm, flux=trans)
tmpspec.resample(w)
""" Store result in the atm_trans holder """
self.atm_trans = tmpspec.rsflux
""" Clean up """
del atm0, watm, trans, tmpspec
# -----------------------------------------------------------------------
def plot_atm_trans(self, scale=1., offset=0., ls='-', color='g',
fwhm=15., modfile='default', label='default',
title=None, xlabel=None, ylabel=None,
mode='input'):
"""
Plots the atmospheric transmission for the wavelength range
corresponding to the spectrum contained in this Spec1d instance.
If the transmission spectrum does not yet exist, then the
make_atm_trans method gets called first.
"""
"""
Make the atmospheric transmission spectrum if it doesn't already
exist
"""
if self.atm_trans is None:
try:
self.make_atm_trans(fwhm=fwhm, modfile=modfile)
except IOError:
return
except ValueError:
return
"""
Set the flux array to be used for the scaling based on the passed mode
variable.
The default is to just use the unmodified input spectrum
(mode='input')
"""
spec = self.select_mode(mode)
""" Set some plotting parameters """
if label == 'default':
plabel = 'atmTrans'
elif label is None:
plabel = None
""" Now do the plotting """
tmp = self.atm_trans.copy()
tmp *= spec['flux'].max() * scale
tmp += offset
if plabel is not None:
self.ax1.plot(self['wav'], tmp, color, linestyle=ls,
drawstyle='steps', label=plabel)
else:
self.ax1.plot(self['wav'], tmp, color, linestyle=ls,
drawstyle='steps')
""" Label things if requested """
if xlabel:
self.ax1.xlabel(xlabel)
if ylabel:
self.ax1.ylabel(ylabel)
if title:
self.ax1.title(title)
""" Clean up """
del tmp
# -----------------------------------------------------------------------
def atm_corr(self, mode='input', airmass=1.0, atm='model', fwhm=15.,
model='default', airmass_std=1.0):
"""
Does a correction for atmospheric transmission.
For now this is done via the model spectrum
"""
""" Make sure that there is an atmospheric spectrum to use """
if self.atm_trans is None:
if atm == 'model':
""" Make a model spectrum if one doesn't exist"""
self.make_atm_trans(fwhm=fwhm, modfile=model)
""" Scale the atmospheric transmission for airmass(???) """
atmflux = self.atm_trans**airmass
atmvar = 0.
# try:
# self.make_atm_trans(fwhm=fwhm, modfile=modfile)
# except IOError:
# raise IOError
# except ValueError:
# raise ValueError
elif atm == 'telluric':
atmflux = model['flux']
atmvar = model['var']
atmflux[atmflux == 0.] = 1.
mask = (atmflux == 0.) | (atmvar <= 0.)
atmvar[mask] = 5. * atmvar.max()
else:
print('')
print('Warning: No atmospheric correction applied')
print('Right now "model" and "telluric" are the only options')
print(' for the atm parameter')
print('')
return
""" Divide the input spectrum by the transmission """
atmcorr = self['flux'] / atmflux
atmcvar = atmcorr**2 * (self['var'] / self['flux']**2 +
atmvar / atmflux**2)
""" Save the output in a Data1d container """
self.atmcorr = df.Data1d(self['wav'], atmcorr, atmcvar,
names=self.names0)
# -----------------------------------------------------------------------
def resp_corr(self, response, mode='input', action='multiply'):
"""
Given a response curve, corrects the spectrum by either multiplying
(the default) or dividing the spectrum by the response curve. The
version of the spectrum to correct is set by the mode parameter.
The result is stored in the respcorr version of the spectrum.
"""
"""
Set the arrays to use based on the passed mode variable.
The default is to just use the unmodified input spectrum
(mode='input')
"""
spec = self.select_mode(mode)
""" Correct the spectrum """
if action == 'divide':
spec['flux'] /= response
spec['var'] /= response**2
else:
spec['flux'] *= response
spec['var'] *= response**2
""" Save the result """
self.respcorr = df.Data1d(self['wav'], spec['flux'], spec['var'],
names=self.names0)
# -----------------------------------------------------------------------
def plot(self, mode='input',
xlabel='Wavelength (Angstroms)', ylabel='Relative Flux',
title='default', docolor=True, color='b', linestyle='solid',
showzero=True, model=None, modcolor='g',
label=None, fontsize=12, rmscolor='r', rmsoffset=0, rmsls=None,
add_atm_trans=False, atmscale=1.05, atmfwhm=15., atmoffset=0.,
atmls='-', atmmodfile='default', usesmooth=False, verbose=True,
fig=None, ax=None, **kwargs):
"""
Plots the spectrum
Inputs:
model - If not None, then plot a model on top of the spectrum.
NOTE: this model must be in the form of an
astropy.modeling model
modcolor - Color to use for the model plot
atmscale
usesmooth
"""
"""
Set the flux and var arrays to use based on the passed mode variable.
The default is to just use the unmodified input spectrum
(mode='input')
"""
if fig is None:
self.fig = plt.figure()
else:
self.fig = fig
if ax is not None:
self.ax1 = ax
else:
self.ax1 = self.fig.add_subplot(111)
if usesmooth:
mode = 'smooth'
spec = self.select_mode(mode)
""" Set the arrays to be plotted """
wav = spec['wav']
flux = spec['flux']
try:
var = spec['var']
except KeyError:
var = None
""" Set the title """
if title == 'default':
if self.infile is None:
title = 'Extracted Spectrum'
else:
title = 'Spectrum for %s' % self.infile
""" Override color assignments if docolor is False"""
if not docolor:
color = 'k'
rmscolor = 'k'
""" Draw the flux=0 line"""
if showzero:
self.ax1.axhline(color='k')
""" Set the label """
if label is not | |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
import pathlib
import tarfile
from collections import Counter
from urllib.request import Request, urlopen
import numpy as np
"""
Downloads and pre-preocess the 20 bAbI task. It also augmets task 16 as described in paper.
"""
DEFAULT_DATA_FOLDER = "data_babi"
LONGEST_SAMPLE_LENGTH = 1920
bAbI_URL = 'http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz'
class bAbI():
def __init__(self, config, word_dict=None, re_word_dict=None):
self.seed = config['seed']
self.rng = np.random.RandomState(seed=self.seed)
self.set_type = config['set_type']
if config['task_selection'][0] == 'all':
self.task_selection = [i + 1 for i in range(20)]
else:
self.task_selection = config['task_selection']
self.valid_ratio = config['valid_ratio']
if 'max_len' in config:
self.max_len = config['max_len']
else:
self.max_len = LONGEST_SAMPLE_LENGTH + 1
if 'augment16' in config:
self.augment16 = config['augment16']
else:
self.augment16 = False
if 'data_dir' in config:
data_dir = pathlib.Path(config['data_dir'])
else:
data_dir = pathlib.Path(DEFAULT_DATA_FOLDER)
self.data_dir = self.download_data(data_dir)
self.samples, self.vocabulary = self.process_samples()
def one_hot(i, size):
one_hot = np.zeros(size)
one_hot[i] = 1
return one_hot
if word_dict == None:
v_size = self.vocabulary.__len__()
self.word_dict = {k: one_hot(v, v_size) for v, k in enumerate(sorted(self.vocabulary.keys()))}
self.re_word_dict = {v: k for v, k in enumerate(sorted(self.vocabulary.keys()))}
else:
self.word_dict = word_dict
self.re_word_dict = re_word_dict
@staticmethod
def download_data(data_dir):
folder_name = 'tasks_1-20_v1-2'
if (data_dir / folder_name).exists():
data_dir = data_dir / folder_name
if not data_dir.name == folder_name:
data_dir.mkdir(parents=True, exist_ok=True)
print("Download bAbI data")
req = Request(bAbI_URL, headers={'User-Agent': 'Mozilla/5.0'})
with urlopen(req) as files:
with tarfile.open(fileobj=files, mode="r|gz") as tar:
tar.extractall(path=DEFAULT_DATA_FOLDER)
data_dir = data_dir / folder_name
return data_dir
def load_data(self, task_selection):
text_train = []
text_test = []
for task_no in task_selection:
for subset in self.set_type:
if (self.data_dir / subset).exists():
for file_name in (self.data_dir / subset).iterdir():
file_name = file_name.name
task, task_name, set = file_name.split("_")
if task == "qa" + str(task_no):
if set == 'test.txt':
test_set_location = self.data_dir / subset / file_name
if not test_set_location.exists():
raise UserWarning("File not found: {}".format(test_set_location))
with open(str(test_set_location), mode='r') as f:
complete_text = f.readlines()
complete_text = [str(task_no) + ' ' + f for f in complete_text]
text_test += complete_text
elif set == "train.txt":
train_set_location = self.data_dir / subset / file_name
if not train_set_location.exists():
raise UserWarning("File not found: {}".format(train_set_location))
with open(str(train_set_location), mode='r') as f:
complete_text = f.readlines()
complete_text = [str(task_no) + ' ' + f for f in complete_text]
text_train += complete_text
else:
raise UserWarning("Inconsistent bAbI data.")
else:
raise UserWarning("Folder of set type not found, incomplete bAbI data folder or wrong set type")
return text_train, text_test
def process_samples(self):
train_text_list, test_text_list = self.load_data(self.task_selection)
train_samples, train_word_list = self.build_samples(train_text_list, augment16=self.augment16)
test_samples, test_word_list = self.build_samples(test_text_list)
vocabulary = Counter(train_word_list + test_word_list)
train_samples = self.add_mask(train_samples)
test_samples = self.add_mask(test_samples)
valid_amount = int(train_samples.__len__() * self.valid_ratio)
train_samples = self.rng.permutation(train_samples)
samples = {}
samples['train'] = train_samples[valid_amount:]
samples['valid'] = train_samples[:valid_amount]
samples['test'] = test_samples
return samples, vocabulary
def build_samples(self, text_list, augment16=False):
word_list = []
samples = []
tmp_sample_x = []
tmp_sample_y = []
numb_ = 0
sen_x = ''
task = 0
for s in text_list:
# get sentence number
numb = int(s.split()[1])
task = int(s.split()[0])
if numb < numb_:
word_list += tmp_sample_x + tmp_sample_y
if tmp_sample_x.__len__() < self.max_len:
if augment16 and int(task) == 16:
tmp_sample_x, tmp_sample_y = self.augment_task_16(tmp_sample_x, tmp_sample_y)
samples.append({'x': tmp_sample_x, 'y': tmp_sample_y, 'task': int(task)})
tmp_sample_x = []
tmp_sample_y = []
numb_ = numb
else:
numb_ = numb
# remove numbers
sen = ''.join(i for i in s if not i.isdigit())
# remove \n
sen = sen.strip()
sen = sen.replace('.', ' .')
sen = sen.replace('?', ' ?')
sen = sen.lower()
if '\t' in sen:
# question
quest = sen.split("\t")
sen_x_pre = quest[0].split() + ['-' for i in quest[1].split(",")]
sen_y = ['-' for i in range(quest[0].split().__len__())] + quest[1].split(",")
else:
sen_x_pre = sen.split()
sen_y = ['-' for i in range(sen_x_pre.__len__())]
if not 'sen_x' in locals():
sen_x = sen_x_pre
elif sen_x_pre == sen_x:
pass
else:
sen_x = sen_x_pre
tmp_sample_x += sen_x_pre
tmp_sample_y += sen_y
word_list += tmp_sample_x + tmp_sample_y
if augment16 and int(task) == 16:
tmp_sample_x, tmp_sample_y = self.augment_task_16(tmp_sample_x, tmp_sample_y)
samples.append({'x': tmp_sample_x, 'y': tmp_sample_y, 'task': int(task)})
return samples, word_list
@staticmethod
def add_mask(samples):
for sample in samples:
y = sample['y']
m = [np.where(_y == '-', False, True) for _y in y]
sample['m'] = np.asarray(m)
return samples
def augment_task_16(self, sample_x, sample_y):
colores = ['white', 'green', 'gray', 'yellow']
animals = ['lion', 'rhino', 'swan', 'frog']
first_numb, second_numb = -1, -1
first_word, second_word, third_word = '', '', ''
x = sample_x
x = ' '.join(x)
x = x.split('.')
sa = np.asarray(x)
if sa.shape[0] == 10: # some samples are longer but even distributed
sa = np.reshape(sa, (10, 1))
s_list = []
for i in range(10):
if i != 9:
s_list.append([x for x in sa[i, 0].split(' ') if x != ''] + ['.'])
else:
s_list.append([x for x in sa[i, 0].split(' ') if x != ''])
vec = np.zeros((10)).astype(int)
quest_word = s_list[-1][3]
for i in range(9):
if quest_word in s_list[i]:
first_word = s_list[i][3]
vec[i] = 1
first_numb = i
for i in range(9):
if first_word == s_list[i][3] and i != first_numb:
second_word = s_list[i][0]
vec[i] = 2
second_numb = i
for i in range(9):
if second_word == s_list[i][0] and i != second_numb:
third_word = s_list[i][2]
vec[i] = 3
used_animal_list = []
used_animal_list.append(first_word)
for i in range(9):
word = s_list[i][3]
if vec[i] == 0 and word in animals:
if word in used_animal_list:
available_animals = [x for x in animals if x not in used_animal_list]
sub_animal = self.rng.choice(available_animals)
s_list[i][3] = sub_animal
used_animal_list.append(sub_animal)
else:
used_animal_list.append(word)
# replace double color
used_color_list = []
used_color_list.append(third_word)
for i in range(9):
word = s_list[i][2]
if vec[i] == 0 and word in colores:
if word in used_color_list:
available_colors = [x for x in colores if x not in used_color_list]
sub_color = self.rng.choice(available_colors)
s_list[i][2] = sub_color
used_color_list.append(sub_color)
else:
used_color_list.append(word)
sample_x = [item for sublist in s_list for item in sublist]
return sample_x, sample_y
def get_sample(self, set, number):
sample = copy.deepcopy(self.samples[set][number])
sample['x_word'] = copy.deepcopy(sample['x'])
new_x = []
new_y = []
for x_word, y_word in zip(sample['x'], sample['y']):
new_x.append(self.word_dict[x_word])
new_y.append(self.word_dict[y_word])
sample['x'] = np.stack(new_x, axis=0)
sample['y'] = np.stack(new_y, axis=0)
return sample
def patch_batch(self, list_of_samples):
batch = {'x': [], 'y': [], 'm': [], 'x_word': []}
len = []
for sample in list_of_samples:
len.append(sample['x'].shape[0])
batch['x_word'].append(sample['x_word'])
max_len = np.max(len)
for sample in list_of_samples:
cur_len = sample['x'].shape[0]
if cur_len < max_len:
add_len = max_len - cur_len
x_add = np.zeros([add_len, self.x_size])
batch['x'].append(np.concatenate([sample['x'], x_add], axis=0))
y_add = np.zeros([add_len, self.y_size])
batch['y'].append(np.concatenate([sample['y'], y_add], axis=0))
m_add = np.zeros([add_len])
batch['m'].append(np.concatenate([sample['m'], m_add], axis=0))
else:
for key in ['x', 'y', 'm']:
batch[key].append(sample[key])
for key in ['x', 'y', 'm']:
batch[key] = np.stack(batch[key], axis=0)
batch['x'] = np.transpose(batch['x'], axes=(1, 0, 2))
batch['y'] = np.transpose(batch['y'], axes=(1, 0, 2))
batch['m'] = np.transpose(batch['m'], axes=(1, 0))
return batch
def decode_output(self, sample, prediction):
if prediction.shape.__len__() == 3:
prediction_decode_list = []
target_decode_list = []
for b in range(prediction.shape[1]):
target_decode_list.append(
[self.re_word_dict[np.argmax(sample['y'][i, b, :])] for i in range(sample['y'].shape[0])])
prediction_decode_list.append(
[self.re_word_dict[np.argmax(prediction[i, b, :])] for i in range(prediction.shape[0])])
return target_decode_list, prediction_decode_list
else:
target_decode = [self.re_word_dict[np.argmax(sample['y'][i, :])] for i in range(sample['y'].shape[0])]
prediction_decode = [self.re_word_dict[np.argmax(prediction[i, :])] for i in range(prediction.shape[0])]
return target_decode, prediction_decode
@property
def vocabulary_size(self):
return self.word_dict.__len__()
@property
def x_size(self):
return self.vocabulary_size
@property
def y_size(self):
return self.vocabulary_size
def sample_amount(self, set, max_len=False):
if max_len != False:
lengths = [sample['x'].__len__() for sample in self.samples[set]]
return sum(np.asarray(lengths) <= max_len)
else:
return self.samples[set].__len__()
if __name__ == '__main__':
config = {'set_type': ['en-10k'], 'task_selection': ['all'], 'valid_ratio': 0, 'seed': 211}
print(" 20 bAbI Tasks - Statistics")
print(
"________________________________________________________________________________________________________________")
total_len = []
total_sum = 0
for s in range(20):
config['task_selection'] = [s + 1]
sd = bAbI(config)
samples = [sd.get_sample('train', i) for i in range(sd.sample_amount('train'))]
len = [sample['x'].__len__() for sample in samples]
len = np.asarray(len)
quest_per_sample = [sample['m'].sum() for sample in samples]
quest_per_sample = np.mean(quest_per_sample)
vocab_size = sd.vocabulary_size
print("\033[96m task: {:3}\033[0m, samples: {:5}, quest_per_sample {:5.3f}, vocab_size: {:3}, min len: {:3.0f},"
" | |
ball_radius)",
leaf_found_op)
+ TRAVERSAL_PREAMBLE_MAKO_DEFS
+ GUIDING_BOX_FINDER_MACRO
+ AREA_QUERY_WALKER_BODY,
name=name,
preamble=preamble)
def generate(self, context,
dimensions, coord_dtype, box_id_dtype,
peer_list_idx_dtype, max_levels,
extra_var_values=(), extra_type_aliases=(),
extra_preamble=""):
from pyopencl.tools import dtype_to_ctype
from boxtree import box_flags_enum
from boxtree.traversal import TRAVERSAL_PREAMBLE_TYPEDEFS_AND_DEFINES
from boxtree.tree_build import TreeBuilder
render_vars = (
("np", np),
("dimensions", dimensions),
("dtype_to_ctype", dtype_to_ctype),
("box_id_dtype", box_id_dtype),
("particle_id_dtype", None),
("coord_dtype", coord_dtype),
("vec_types", tuple(cl.cltypes.vec_types.items())),
("max_levels", max_levels),
("AXIS_NAMES", AXIS_NAMES),
("box_flags_enum", box_flags_enum),
("peer_list_idx_dtype", peer_list_idx_dtype),
("debug", False),
("root_extent_stretch_factor", TreeBuilder.ROOT_EXTENT_STRETCH_FACTOR),
)
preamble = Template(
# HACK: box_flags_t and coord_t are defined here and
# in the template below, so disable typedef redifinition warnings.
"""
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wtypedef-redefinition"
"""
+ TRAVERSAL_PREAMBLE_TYPEDEFS_AND_DEFINES
+ """
#pragma clang diagnostic pop
""",
strict_undefined=True).render(**dict(render_vars))
return self.elwise_template.build(context,
type_aliases=(
("coord_t", coord_dtype),
("box_id_t", box_id_dtype),
("peer_list_idx_t", peer_list_idx_dtype),
("box_level_t", np.uint8),
("box_flags_t", box_flags_enum.dtype),
) + extra_type_aliases,
var_values=render_vars + extra_var_values,
more_preamble=preamble + extra_preamble)
SPACE_INVADER_QUERY_TEMPLATE = AreaQueryElementwiseTemplate(
extra_args="""
coord_t *ball_radii,
float *outer_space_invader_dists,
%for ax in AXIS_NAMES[:dimensions]:
coord_t *ball_${ax},
%endfor
""",
ball_center_and_radius_expr=r"""
${ball_radius} = ball_radii[${i}];
%for ax in AXIS_NAMES[:dimensions]:
${ball_center}.${ax} = ball_${ax}[${i}];
%endfor
""",
leaf_found_op=r"""
{
${load_center("leaf_center", leaf_box_id)}
coord_t max_dist = 0;
%for i in range(dimensions):
max_dist = fmax(max_dist,
distance(${ball_center}.s${i}, leaf_center.s${i}));
%endfor
// The atomic max operation supports only integer types.
// However, max_dist is of a floating point type.
// For comparison purposes we reinterpret the bits of max_dist
// as an integer. The comparison result is the same as for positive
// IEEE floating point numbers, so long as the float/int endianness
// matches (fingers crossed).
atomic_max(
(volatile __global int *)
&outer_space_invader_dists[${leaf_box_id}],
as_int((float) max_dist));
}""",
name="space_invader_query")
# }}}
# {{{ area query build
class AreaQueryBuilder(object):
r"""Given a set of :math:`l^\infty` "balls", this class helps build a
look-up table from ball to leaf boxes that intersect with the ball.
.. versionadded:: 2016.1
.. automethod:: __call__
"""
def __init__(self, context):
self.context = context
self.peer_list_finder = PeerListFinder(self.context)
# {{{ Kernel generation
@memoize_method
def get_area_query_kernel(self, dimensions, coord_dtype, box_id_dtype,
ball_id_dtype, peer_list_idx_dtype, max_levels):
from pyopencl.tools import dtype_to_ctype
from boxtree import box_flags_enum
logger.debug("start building area query kernel")
from boxtree.traversal import TRAVERSAL_PREAMBLE_TEMPLATE
from boxtree.tree_build import TreeBuilder
template = Template(
TRAVERSAL_PREAMBLE_TEMPLATE
+ AREA_QUERY_TEMPLATE,
strict_undefined=True)
render_vars = dict(
np=np,
dimensions=dimensions,
dtype_to_ctype=dtype_to_ctype,
box_id_dtype=box_id_dtype,
particle_id_dtype=None,
coord_dtype=coord_dtype,
vec_types=cl.cltypes.vec_types,
max_levels=max_levels,
AXIS_NAMES=AXIS_NAMES,
box_flags_enum=box_flags_enum,
peer_list_idx_dtype=peer_list_idx_dtype,
ball_id_dtype=ball_id_dtype,
debug=False,
root_extent_stretch_factor=TreeBuilder.ROOT_EXTENT_STRETCH_FACTOR)
from boxtree.tools import VectorArg, ScalarArg
arg_decls = [
VectorArg(coord_dtype, "box_centers", with_offset=False),
ScalarArg(coord_dtype, "root_extent"),
VectorArg(np.uint8, "box_levels"),
ScalarArg(box_id_dtype, "aligned_nboxes"),
VectorArg(box_id_dtype, "box_child_ids", with_offset=False),
VectorArg(box_flags_enum.dtype, "box_flags"),
VectorArg(peer_list_idx_dtype, "peer_list_starts"),
VectorArg(box_id_dtype, "peer_lists"),
VectorArg(coord_dtype, "ball_radii"),
] + [
ScalarArg(coord_dtype, "bbox_min_"+ax)
for ax in AXIS_NAMES[:dimensions]
] + [
VectorArg(coord_dtype, "ball_"+ax)
for ax in AXIS_NAMES[:dimensions]]
from pyopencl.algorithm import ListOfListsBuilder
area_query_kernel = ListOfListsBuilder(
self.context,
[("leaves", box_id_dtype)],
str(template.render(**render_vars)),
arg_decls=arg_decls,
name_prefix="area_query",
count_sharing={},
complex_kernel=True)
logger.debug("done building area query kernel")
return area_query_kernel
# }}}
def __call__(self, queue, tree, ball_centers, ball_radii, peer_lists=None,
wait_for=None):
"""
:arg queue: a :class:`pyopencl.CommandQueue`
:arg tree: a :class:`boxtree.Tree`.
:arg ball_centers: an object array of coordinate
:class:`pyopencl.array.Array` instances.
Their *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg ball_radii: a
:class:`pyopencl.array.Array`
of positive numbers.
Its *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg peer_lists: may either be *None* or an instance of
:class:`PeerListLookup` associated with `tree`.
:arg wait_for: may either be *None* or a list of :class:`pyopencl.Event`
instances for whose completion this command waits before starting
exeuction.
:returns: a tuple *(aq, event)*, where *aq* is an instance of
:class:`AreaQueryResult`, and *event* is a :class:`pyopencl.Event`
for dependency management.
"""
from pytools import single_valued
if single_valued(bc.dtype for bc in ball_centers) != tree.coord_dtype:
raise TypeError("ball_centers dtype must match tree.coord_dtype")
if ball_radii.dtype != tree.coord_dtype:
raise TypeError("ball_radii dtype must match tree.coord_dtype")
ball_id_dtype = tree.particle_id_dtype # ?
from pytools import div_ceil
# Avoid generating too many kernels.
max_levels = div_ceil(tree.nlevels, 10) * 10
if peer_lists is None:
peer_lists, evt = self.peer_list_finder(queue, tree, wait_for=wait_for)
wait_for = [evt]
if len(peer_lists.peer_list_starts) != tree.nboxes + 1:
raise ValueError("size of peer lists must match with number of boxes")
area_query_kernel = self.get_area_query_kernel(tree.dimensions,
tree.coord_dtype, tree.box_id_dtype, ball_id_dtype,
peer_lists.peer_list_starts.dtype, max_levels)
aq_plog = ProcessLogger(logger, "area query")
result, evt = area_query_kernel(
queue, len(ball_radii),
tree.box_centers.data, tree.root_extent,
tree.box_levels, tree.aligned_nboxes,
tree.box_child_ids.data, tree.box_flags,
peer_lists.peer_list_starts,
peer_lists.peer_lists, ball_radii,
*(tuple(tree.bounding_box[0])
+ tuple(bc for bc in ball_centers)),
wait_for=wait_for)
aq_plog.done()
return AreaQueryResult(
tree=tree,
leaves_near_ball_starts=result["leaves"].starts,
leaves_near_ball_lists=result["leaves"].lists).with_queue(None), evt
# }}}
# {{{ area query transpose (leaves-to-balls) lookup build
class LeavesToBallsLookupBuilder(object):
r"""Given a set of :math:`l^\infty` "balls", this class helps build a
look-up table from leaf boxes to balls that overlap with each leaf box.
.. automethod:: __call__
"""
def __init__(self, context):
self.context = context
from pyopencl.algorithm import KeyValueSorter
self.key_value_sorter = KeyValueSorter(context)
self.area_query_builder = AreaQueryBuilder(context)
@memoize_method
def get_starts_expander_kernel(self, idx_dtype):
"""
Expands a "starts" array into a length starts[-1] array of increasing
indices:
Eg: [0 2 5 6] => [0 0 1 1 1 2]
"""
return STARTS_EXPANDER_TEMPLATE.build(
self.context,
type_aliases=(("idx_t", idx_dtype),))
def __call__(self, queue, tree, ball_centers, ball_radii, peer_lists=None,
wait_for=None):
"""
:arg queue: a :class:`pyopencl.CommandQueue`
:arg tree: a :class:`boxtree.Tree`.
:arg ball_centers: an object array of coordinate
:class:`pyopencl.array.Array` instances.
Their *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg ball_radii: a
:class:`pyopencl.array.Array`
of positive numbers.
Its *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg peer_lists: may either be *None* or an instance of
:class:`PeerListLookup` associated with `tree`.
:arg wait_for: may either be *None* or a list of :class:`pyopencl.Event`
instances for whose completion this command waits before starting
execution.
:returns: a tuple *(lbl, event)*, where *lbl* is an instance of
:class:`LeavesToBallsLookup`, and *event* is a :class:`pyopencl.Event`
for dependency management.
"""
from pytools import single_valued
if single_valued(bc.dtype for bc in ball_centers) != tree.coord_dtype:
raise TypeError("ball_centers dtype must match tree.coord_dtype")
if ball_radii.dtype != tree.coord_dtype:
raise TypeError("ball_radii dtype must match tree.coord_dtype")
ltb_plog = ProcessLogger(logger, "leaves-to-balls lookup: run area query")
area_query, evt = self.area_query_builder(
queue, tree, ball_centers, ball_radii, peer_lists, wait_for)
wait_for = [evt]
logger.debug("leaves-to-balls lookup: expand starts")
nkeys = tree.nboxes
nballs_p_1 = len(area_query.leaves_near_ball_starts)
assert nballs_p_1 == len(ball_radii) + 1
# We invert the area query in two steps:
#
# 1. Turn the area query result into (ball number, box number) pairs.
# This is done in the "starts expander kernel."
#
# 2. Key-value sort the (ball number, box number) pairs by box number.
starts_expander_knl = self.get_starts_expander_kernel(tree.box_id_dtype)
expanded_starts = cl.array.empty(
queue, len(area_query.leaves_near_ball_lists), tree.box_id_dtype)
evt = starts_expander_knl(
expanded_starts,
area_query.leaves_near_ball_starts.with_queue(queue),
nballs_p_1)
wait_for = [evt]
logger.debug("leaves-to-balls lookup: key-value sort")
balls_near_box_starts, balls_near_box_lists, evt \
= self.key_value_sorter(
queue,
# keys
area_query.leaves_near_ball_lists.with_queue(queue),
# values
expanded_starts,
nkeys, starts_dtype=tree.box_id_dtype,
wait_for=wait_for)
ltb_plog.done()
return LeavesToBallsLookup(
tree=tree,
balls_near_box_starts=balls_near_box_starts,
balls_near_box_lists=balls_near_box_lists).with_queue(None), evt
# }}}
# {{{ space invader query build
class SpaceInvaderQueryBuilder(object):
r"""
Given a set of :math:`l^\infty` "balls", this class helps build a look-up
table which maps leaf boxes to the *outer space invader distance*.
This is defined below but roughly, from the point of view
of a leaf box, it is the farthest "leaf center to ball center" distance among
all balls that intersect the leaf box.
Formally, given a leaf box :math:`b`, the *outer space invader distance* is
defined by the following expression (here :math:`d_\infty` is the
:math:`\infty` norm):
.. math::
\max \left( \{ d_{\infty}(\text{center}(b), \text{center}(b^*))
: b^* \text{ is a ball}, b^* \cap b \neq \varnothing \}
\cup \{ 0 \} \right)
.. automethod:: __call__
"""
def __init__(self, context):
self.context = context
self.peer_list_finder = PeerListFinder(self.context)
# {{{ Kernel generation
@memoize_method
def get_space_invader_query_kernel(self, dimensions, coord_dtype,
box_id_dtype, peer_list_idx_dtype, max_levels):
return SPACE_INVADER_QUERY_TEMPLATE.generate(
self.context,
dimensions,
coord_dtype,
box_id_dtype,
peer_list_idx_dtype,
max_levels)
# }}}
def __call__(self, queue, tree, ball_centers, ball_radii, peer_lists=None,
wait_for=None):
"""
:arg queue: a :class:`pyopencl.CommandQueue`
:arg tree: a :class:`boxtree.Tree`.
:arg ball_centers: an object array of coordinate
:class:`pyopencl.array.Array` instances.
Their *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg ball_radii: a
:class:`pyopencl.array.Array`
of positive numbers.
Its *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg peer_lists: may either be *None* or an instance of
:class:`PeerListLookup` associated with `tree`.
:arg wait_for: may either be *None* or a list of :class:`pyopencl.Event`
instances for whose completion this command waits before starting
execution.
:returns: a tuple *(sqi, event)*, where *sqi* is an instance of
:class:`pyopencl.array.Array`, and *event* is a :class:`pyopencl.Event`
for dependency management. The *dtype* of *sqi* is
*tree*'s :attr:`boxtree.Tree.coord_dtype` and its shape is
*(tree.nboxes,)* (see :attr:`boxtree.Tree.nboxes`).
The entries of *sqi* are indexed by the global box index and are
as follows:
* if *i* is not the index of a leaf box, *sqi[i] = 0*.
* if *i* is the index | |
#scrub angle
rom.write_bytes(0xECAE90, [0x27, 0x18, 0xFD, 0x04]) #skip straight to giving item
rom.write_bytes(0xECB618, [0x25, 0x6B, 0x00, 0xD4]) #skip straight to digging back in
rom.write_bytes(0xECAE70, [0x00, 0x00, 0x00, 0x00]) #never initialize cs camera
rom.write_bytes(0xE5972C, [0x24, 0x08, 0x00, 0x01]) #timer set to 1 frame for giving item
# Remove remaining owls
rom.write_bytes(0x1FE30CE, [0x01, 0x4B])
rom.write_bytes(0x1FE30DE, [0x01, 0x4B])
rom.write_bytes(0x1FE30EE, [0x01, 0x4B])
rom.write_bytes(0x205909E, [0x00, 0x3F])
rom.write_byte(0x2059094, 0x80)
# Darunia won't dance
rom.write_bytes(0x22769E4, [0xFF, 0xFF, 0xFF, 0xFF])
# Zora moves quickly
rom.write_bytes(0xE56924, [0x00, 0x00, 0x00, 0x00])
# Speed Jabu Jabu swallowing Link
rom.write_bytes(0xCA0784, [0x00, 0x18, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02])
# Ruto no longer points to Zora Sapphire
rom.write_bytes(0xD03BAC, [0xFF, 0xFF, 0xFF, 0xFF])
# Ruto never disappears from Jabu Jabu's Belly
rom.write_byte(0xD01EA3, 0x00)
#Shift octorock in jabu forward
rom.write_bytes(0x275906E, [0xFF, 0xB3, 0xFB, 0x20, 0xF9, 0x56])
#Move fire/forest temple switches down 1 unit to make it easier to press
rom.write_bytes(0x24860A8, [0xFC, 0xF4]) #forest basement 1
rom.write_bytes(0x24860C8, [0xFC, 0xF4]) #forest basement 2
rom.write_bytes(0x24860E8, [0xFC, 0xF4]) #forest basement 3
rom.write_bytes(0x236C148, [0x11, 0x93]) #fire hammer room
# Speed up Epona race start
rom.write_bytes(0x29BE984, [0x00, 0x00, 0x00, 0x02])
rom.write_bytes(0x29BE9CA, [0x00, 0x01, 0x00, 0x02])
# Speed start of Horseback Archery
#rom.write_bytes(0x21B2064, [0x00, 0x00, 0x00, 0x02])
#rom.write_bytes(0x21B20AA, [0x00, 0x01, 0x00, 0x02])
# Speed up Epona escape
rom.write_bytes(0x1FC8B36, [0x00, 0x2A])
# Speed up draining the well
rom.write_bytes(0xE0A010, [0x00, 0x2A, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02])
rom.write_bytes(0x2001110, [0x00, 0x2B, 0x00, 0xB7, 0x00, 0xB8, 0x00, 0xB8])
# Speed up opening the royal tomb for both child and adult
rom.write_bytes(0x2025026, [0x00, 0x01])
rom.write_bytes(0x2023C86, [0x00, 0x01])
rom.write_byte(0x2025159, 0x02)
rom.write_byte(0x2023E19, 0x02)
#Speed opening of Door of Time
rom.write_bytes(0xE0A176, [0x00, 0x02])
rom.write_bytes(0xE0A35A, [0x00, 0x01, 0x00, 0x02])
# Speed up Lake Hylia Owl Flight
rom.write_bytes(0x20E60D2, [0x00, 0x01])
# Speed up Death Mountain Trail Owl Flight
rom.write_bytes(0x223B6B2, [0x00, 0x01])
# Poacher's Saw no longer messes up Forest Stage
rom.write_bytes(0xAE72CC, [0x00, 0x00, 0x00, 0x00])
# Change Prelude CS to check for medallion
rom.write_bytes(0x00C805E6, [0x00, 0xA6])
rom.write_bytes(0x00C805F2, [0x00, 0x01])
# Change Nocturne CS to check for medallions
rom.write_bytes(0x00ACCD8E, [0x00, 0xA6])
rom.write_bytes(0x00ACCD92, [0x00, 0x01])
rom.write_bytes(0x00ACCD9A, [0x00, 0x02])
rom.write_bytes(0x00ACCDA2, [0x00, 0x04])
# Change King Zora to move even if Zora Sapphire is in inventory
rom.write_bytes(0x00E55BB0, [0x85, 0xCE, 0x8C, 0x3C])
rom.write_bytes(0x00E55BB4, [0x84, 0x4F, 0x0E, 0xDA])
# Remove extra Forest Temple medallions
rom.write_bytes(0x00D4D37C, [0x00, 0x00, 0x00, 0x00])
# Remove extra Fire Temple medallions
rom.write_bytes(0x00AC9754, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0x00D0DB8C, [0x00, 0x00, 0x00, 0x00])
# Remove extra Water Temple medallions
rom.write_bytes(0x00D57F94, [0x00, 0x00, 0x00, 0x00])
# Remove extra Spirit Temple medallions
rom.write_bytes(0x00D370C4, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0x00D379C4, [0x00, 0x00, 0x00, 0x00])
# Remove extra Shadow Temple medallions
rom.write_bytes(0x00D116E0, [0x00, 0x00, 0x00, 0x00])
# Change Mido, Saria, and Kokiri to check for Deku Tree complete flag
# bitwise pointer for 0x80
kokiriAddresses = [0xE52836, 0xE53A56, 0xE51D4E, 0xE51F3E, 0xE51D96, 0xE51E1E, 0xE51E7E, 0xE51EDE, 0xE51FC6, 0xE51F96, 0xE293B6, 0xE29B8E, 0xE62EDA, 0xE630D6, 0xE633AA, 0xE6369E]
for kokiri in kokiriAddresses:
rom.write_bytes(kokiri, [0x8C, 0x0C])
# Kokiri
rom.write_bytes(0xE52838, [0x94, 0x48, 0x0E, 0xD4])
rom.write_bytes(0xE53A58, [0x94, 0x49, 0x0E, 0xD4])
rom.write_bytes(0xE51D50, [0x94, 0x58, 0x0E, 0xD4])
rom.write_bytes(0xE51F40, [0x94, 0x4B, 0x0E, 0xD4])
rom.write_bytes(0xE51D98, [0x94, 0x4B, 0x0E, 0xD4])
rom.write_bytes(0xE51E20, [0x94, 0x4A, 0x0E, 0xD4])
rom.write_bytes(0xE51E80, [0x94, 0x59, 0x0E, 0xD4])
rom.write_bytes(0xE51EE0, [0x94, 0x4E, 0x0E, 0xD4])
rom.write_bytes(0xE51FC8, [0x94, 0x49, 0x0E, 0xD4])
rom.write_bytes(0xE51F98, [0x94, 0x58, 0x0E, 0xD4])
# Saria
rom.write_bytes(0xE293B8, [0x94, 0x78, 0x0E, 0xD4])
rom.write_bytes(0xE29B90, [0x94, 0x68, 0x0E, 0xD4])
# Mido
rom.write_bytes(0xE62EDC, [0x94, 0x6F, 0x0E, 0xD4])
rom.write_bytes(0xE630D8, [0x94, 0x4F, 0x0E, 0xD4])
rom.write_bytes(0xE633AC, [0x94, 0x68, 0x0E, 0xD4])
rom.write_bytes(0xE636A0, [0x94, 0x48, 0x0E, 0xD4])
# Change adult Kokiri Forest to check for Forest Temple complete flag
rom.write_bytes(0xE5369E, [0xB4, 0xAC])
rom.write_bytes(0xD5A83C, [0x80, 0x49, 0x0E, 0xDC])
# Change adult Goron City to check for Fire Temple complete flag
rom.write_bytes(0xED59DC, [0x80, 0xC9, 0x0E, 0xDC])
# Change Pokey to check DT complete flag
rom.write_bytes(0xE5400A, [0x8C, 0x4C])
rom.write_bytes(0xE5400E, [0xB4, 0xA4])
if world.settings.open_forest != 'closed':
rom.write_bytes(0xE5401C, [0x14, 0x0B])
# Fix Shadow Temple to check for different rewards for scene
rom.write_bytes(0xCA3F32, [0x00, 0x00, 0x25, 0x4A, 0x00, 0x10])
# Fix Spirit Temple to check for different rewards for scene
rom.write_bytes(0xCA3EA2, [0x00, 0x00, 0x25, 0x4A, 0x00, 0x08])
# Fix Biggoron to check a different flag.
rom.write_byte(0xED329B, 0x72)
rom.write_byte(0xED43E7, 0x72)
rom.write_bytes(0xED3370, [0x3C, 0x0D, 0x80, 0x12])
rom.write_bytes(0xED3378, [0x91, 0xB8, 0xA6, 0x42, 0xA1, 0xA8, 0xA6, 0x42])
rom.write_bytes(0xED6574, [0x00, 0x00, 0x00, 0x00])
# Remove the check on the number of days that passed for claim check.
rom.write_bytes(0xED4470, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xED4498, [0x00, 0x00, 0x00, 0x00])
# Fixed reward order for Bombchu Bowling
rom.write_bytes(0xE2E698, [0x80, 0xAA, 0xE2, 0x64])
rom.write_bytes(0xE2E6A0, [0x80, 0xAA, 0xE2, 0x4C])
rom.write_bytes(0xE2D440, [0x24, 0x19, 0x00, 0x00])
# Offset kakariko carpenter starting position
rom.write_bytes(0x1FF93A4, [0x01, 0x8D, 0x00, 0x11, 0x01, 0x6C, 0xFF, 0x92, 0x00, 0x00, 0x01, 0x78, 0xFF, 0x2E, 0x00, 0x00, 0x00, 0x03, 0xFD, 0x2B, 0x00, 0xC8, 0xFF, 0xF9, 0xFD, 0x03, 0x00, 0xC8, 0xFF, 0xA9, 0xFD, 0x5D, 0x00, 0xC8, 0xFE, 0x5F]) # re order the carpenter's path
rom.write_byte(0x1FF93D0, 0x06) # set the path points to 6
rom.write_bytes(0x20160B6, [0x01, 0x8D, 0x00, 0x11, 0x01, 0x6C]) # set the carpenter's start position
# Give hp after first ocarina minigame round
rom.write_bytes(0xDF2204, [0x24, 0x03, 0x00, 0x02])
# Allow owl to always carry the kid down Death Mountain
rom.write_bytes(0xE304F0, [0x24, 0x0E, 0x00, 0x01])
# Fix Vanilla Dodongo's Cavern Gossip Stone to not use a permanent flag for the fairy
if not world.dungeon_mq['Dodongos Cavern']:
rom.write_byte(0x1F281FE, 0x38)
# Fix "...???" textbox outside Child Colossus Fairy to use the right flag and disappear once the wall is destroyed
rom.write_byte(0x21A026F, 0xDD)
# Remove the "...???" textbox outside the Crater Fairy (change it to an actor that does nothing)
rom.write_int16s(0x225E7DC, [0x00B5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF])
# Forbid Sun's Song from a bunch of cutscenes
Suns_scenes = [0x2016FC9, 0x2017219, 0x20173D9, 0x20174C9, 0x2017679, 0x20C1539, 0x20C15D9, 0x21A0719, 0x21A07F9, 0x2E90129, 0x2E901B9, 0x2E90249, 0x225E829, 0x225E939, 0x306D009]
for address in Suns_scenes:
rom.write_byte(address,0x01)
# Allow Warp Songs in additional places
rom.write_byte(0xB6D3D2, 0x00) # Gerudo Training Ground
rom.write_byte(0xB6D42A, 0x00) # Inside Ganon's Castle
#Tell Sheik at Ice Cavern we are always an Adult
rom.write_int32(0xC7B9C0, 0x00000000)
rom.write_int32(0xC7BAEC, 0x00000000)
rom.write_int32(0xc7BCA4, 0x00000000)
# Allow Farore's Wind in dungeons where it's normally forbidden
rom.write_byte(0xB6D3D3, 0x00) # Gerudo Training Ground
rom.write_byte(0xB6D42B, 0x00) # Inside Ganon's Castle
# Remove disruptive text from Gerudo Training Ground and early Shadow Temple (vanilla)
Wonder_text = [0x27C00BC, 0x27C00CC, 0x27C00DC, 0x27C00EC, 0x27C00FC, 0x27C010C, 0x27C011C, 0x27C012C, 0x27CE080,
0x27CE090, 0x2887070, 0x2887080, 0x2887090, 0x2897070, 0x28C7134, 0x28D91BC, 0x28A60F4, 0x28AE084,
0x28B9174, 0x28BF168, 0x28BF178, 0x28BF188, 0x28A1144, 0x28A6104, 0x28D0094]
for address in Wonder_text:
rom.write_byte(address, 0xFB)
# Speed dig text for Dampe
rom.write_bytes(0x9532F8, [0x08, 0x08, 0x08, 0x59])
# Make item descriptions into a single box
Short_item_descriptions = [0x92EC84, 0x92F9E3, 0x92F2B4, 0x92F37A, 0x92F513, 0x92F5C6, 0x92E93B, 0x92EA12]
for address in Short_item_descriptions:
rom.write_byte(address,0x02)
et_original = rom.read_bytes(0xB6FBF0, 4 * 0x0614)
exit_updates = []
def copy_entrance_record(source_index, destination_index, count=4):
ti = source_index * 4
rom.write_bytes(0xB6FBF0 + destination_index * 4, et_original[ti:ti+(4 * count)])
def generate_exit_lookup_table():
# Assumes that the last exit on a scene's exit list cannot be 0000
exit_table = {
0x0028: [0xAC95C2] #Jabu with the fish is entered from a cutscene hardcode
}
def add_scene_exits(scene_start, offset = 0):
current = scene_start + offset
exit_list_start_off = 0
exit_list_end_off = 0
command = 0
while command != 0x14:
command = rom.read_byte(current)
if command == 0x18: # Alternate header list
header_list = scene_start + (rom.read_int32(current + 4) & 0x00FFFFFF)
for alt_id in range(0,3):
header_offset = rom.read_int32(header_list) & 0x00FFFFFF
if header_offset != 0:
add_scene_exits(scene_start, header_offset)
header_list += 4
if command == 0x13: # Exit List
exit_list_start_off = rom.read_int32(current + 4) & 0x00FFFFFF
if command == 0x0F: # Lighting list, follows exit list
exit_list_end_off = rom.read_int32(current + 4) & 0x00FFFFFF
current += 8
if exit_list_start_off == 0 or exit_list_end_off == 0:
return
# calculate the exit list length
list_length = (exit_list_end_off - exit_list_start_off) // 2
last_id = rom.read_int16(scene_start + exit_list_end_off - 2)
if last_id == 0:
list_length -= 1
# update
addr = scene_start + exit_list_start_off
for _ in range(0, list_length):
index = rom.read_int16(addr)
if index not in exit_table:
exit_table[index] = []
exit_table[index].append(addr)
addr += 2
scene_table |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.