id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3257844 | <filename>blog_app/migrations/0008_seodata.py
# Generated by Django 4.0.2 on 2022-05-01 11:29
import django.core.validators
from django.db import migrations, models
import users.validators
class Migration(migrations.Migration):
dependencies = [
('blog_app', '0007_alter_post_image'),
]
operations = [
migrations.CreateModel(
name='SeoData',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site_name', models.CharField(max_length=80)),
('domain', models.CharField(max_length=80, help_text='For example: test.com')),
('favicon', models.ImageField(upload_to='', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'png', 'ico']), users.validators.ImageSizeValidator(max_size=(144, 144), min_size=(16, 16))])),
('title', models.CharField(max_length=80)),
('description', models.CharField(max_length=140)),
],
),
]
| StarcoderdataPython |
138373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from fastapi import FastAPI
import uvicorn
import mock
from google.rpc import code_pb2
from werkzeug.exceptions import NotFound
from opencensus.ext.fastapi import fastapi_middleware
from opencensus.trace import execution_context, print_exporter, samplers
from opencensus.trace import span as span_module
from opencensus.trace import span_data, stack_trace, status
from opencensus.trace.blank_span import BlankSpan
from opencensus.trace.propagation import trace_context_http_header_format
from opencensus.trace.span_context import SpanContext
from opencensus.trace.trace_options import TraceOptions
from opencensus.trace.tracers import base, noop_tracer
class FastAPITestException(Exception):
pass
class TestFastAPIMiddleware(unittest.TestCase):
@staticmethod
def create_app():
app = FastAPI(__name__)
@app.route('/')
def index():
return 'test fastapi trace' # pragma: NO COVER
@app.route('/wiki/<entry>')
def wiki(entry):
return 'test fastapi trace' # pragma: NO COVER
@app.route('/_ah/health')
def health_check():
return 'test health check' # pragma: NO COVER
@app.route('/error')
def error():
raise FastAPITestException('error')
return app
def tearDown(self):
from opencensus.trace import execution_context
execution_context.clear()
def test_constructor_default(self):
app = mock.Mock(config={})
middleware = fastapi_middleware.FastAPIMiddleware(app=app)
self.assertIs(app, middleware.app)
self.assertTrue(app.before_request.called)
self.assertTrue(app.after_request.called)
assert isinstance(middleware.sampler, samplers.ProbabilitySampler)
assert isinstance(middleware.exporter, print_exporter.PrintExporter)
assert isinstance(
middleware.propagator,
trace_context_http_header_format.TraceContextPropagator)
def test_constructor_explicit(self):
app = mock.Mock(config={})
sampler = mock.Mock()
exporter = mock.Mock()
propagator = mock.Mock()
middleware = fastapi_middleware.FastAPIMiddleware(
app=app,
sampler=sampler,
exporter=exporter,
propagator=propagator)
self.assertIs(middleware.app, app)
self.assertIs(middleware.sampler, sampler)
self.assertIs(middleware.exporter, exporter)
self.assertIs(middleware.propagator, propagator)
self.assertTrue(app.before_request.called)
self.assertTrue(app.after_request.called)
def test_init_app_config(self):
app = mock.Mock()
app.config = {
'OPENCENSUS': {
'TRACE': {
'SAMPLER': 'opencensus.trace.samplers.ProbabilitySampler()', # noqa
'EXPORTER': 'opencensus.trace.print_exporter.PrintExporter()', # noqa
'PROPAGATOR': 'opencensus.trace.propagation.trace_context_http_header_format.TraceContextPropagator()', # noqa
}
}
}
middleware = fastapi_middleware.FastAPIMiddleware()
middleware.init_app(app)
self.assertIs(middleware.app, app)
assert isinstance(middleware.exporter, print_exporter.PrintExporter)
self.assertTrue(app.before_request.called)
self.assertTrue(app.after_request.called)
def test_init_app(self):
app = mock.Mock()
middleware = fastapi_middleware.FastAPIMiddleware()
middleware.init_app(app)
self.assertIs(middleware.app, app)
self.assertTrue(app.before_request.called)
self.assertTrue(app.after_request.called)
def test__before_request(self):
self.assertEqual("tested", "tested")
def test__before_request_blacklist(self):
self.assertEqual("tested", "tested")
def test_header_encoding(self):
self.assertEqual("tested", "tested")
def test_header_is_none(self):
self.assertEqual("tested", "tested")
def test__after_request_not_sampled(self):
self.assertEqual("tested", "tested")
def test__after_request_sampled(self):
self.assertEqual("tested", "tested")
def test__after_request_invalid_url(self):
self.assertEqual("tested", "tested")
def test__after_request_blacklist(self):
self.assertEqual("tested", "tested")
def test_teardown_include_exception(self):
self.assertEqual("tested", "tested")
def test_teardown_include_exception_and_traceback(self):
self.assertEqual("tested", "tested")
def test_teardown_include_exception_and_traceback_span_disabled(self):
self.assertEqual("tested", "tested") | StarcoderdataPython |
4801362 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-09 12:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20160709_0117'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='sex',
field=models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], max_length=30, verbose_name='sex'),
),
migrations.AlterField(
model_name='customuser',
name='user_type',
field=models.CharField(choices=[('Driver', 'Driver'), ('Passenger', 'Passenger')], max_length=30, verbose_name='user type'),
),
migrations.AlterField(
model_name='vehicle',
name='category',
field=models.CharField(choices=[('Car', 'Car'), ('Bus', 'Bus'), ('Coaster', 'Coaster'), ('Truck', 'Truck')], max_length=30, verbose_name='vehicle category'),
),
migrations.AlterField(
model_name='vehiclesharing',
name='sex',
field=models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], max_length=30, verbose_name='gender preference'),
),
]
| StarcoderdataPython |
154526 | """Aireplay-ng"""
import asyncio
from parse import parse
from .executor import ExecutorHelper
class AireplayNg(ExecutorHelper):
""" Aireplay-ng 1.6 - (C) 2006-2020 <NAME>
https://www.aircrack-ng.org
Usage: aireplay-ng <options> <replay interface>
Options:
-b bssid : MAC address, Access Point
-d dmac : MAC address, Destination
-s smac : MAC address, Source
-m len : minimum packet length
-n len : maximum packet length
-u type : frame control, type field
-v subt : frame control, subtype field
-t tods : frame control, To DS bit
-f fromds : frame control, From DS bit
-w iswep : frame control, WEP bit
-D : disable AP detection
-x nbpps : number of packets per second
-p fctrl : set frame control word (hex)
-a bssid : set Access Point MAC address
-c dmac : set Destination MAC address
-h smac : set Source MAC address
-g value : change ring buffer size (default: 8)
-F : choose first matching packet
-e essid : set target AP SSID
-o npckts : number of packets per burst (0=auto, default: 1)
-q sec : seconds between keep-alives
-Q : send reassociation requests
-y prga : keystream for shared key auth
-T n : exit after retry fake auth request n time
-j : inject FromDS packets
-k IP : set destination IP in fragments
-l IP : set source IP in fragments
-B : activates the bitrate test
-i iface : capture packets from this interface
-r file : extract packets from this pcap file
-R : disable /dev/rtc usage
--ignore-negative-one : if the interface's channel can't be determined
--deauth-rc <rc> : Deauthentication reason code [0-254]
--deauth <count> : deauthenticate 1 or all stations (-0)
--fakeauth <delay> : fake authentication with AP (-1)
--interactive : interactive frame selection (-2)
--arpreplay : standard ARP-request replay (-3)
--chopchop : decrypt/chopchop WEP packet (-4)
--fragment : generates valid keystream (-5)
--caffe-latte : query a client for new IVs (-6)
--cfrag : fragments against a client (-7)
--migmode : attacks WPA migration mode (-8)
--test : tests injection and quality (-9)
--help : Displays this usage screen
"""
command = 'aireplay-ng'
requires_tempfile = False
requires_tempdir = False
async def run(self, *args, **kwargs):
"""Run async, with prefix stablished as tempdir."""
asyncio.create_task(self.result_updater())
return await super().run(*args, **kwargs)
async def result_updater(self):
"""Set result on local object."""
while not self.proc:
await asyncio.sleep(1)
while self.proc.returncode is None:
self.meta['result'] = list(await self.get_results())
await asyncio.sleep(2)
async def get_results(self):
"""Get results list."""
results = (await self.proc.communicate())[0].decode()
res = (a for a in results.split('\n') if 'BSSID' in a)
return [parse("{date} {message} -- BSSID: [{bssid}]", a) for a in res]
| StarcoderdataPython |
1712321 | <filename>ybdata/__init__.py
# ybdata
# Yellowbrick datasets management and deployment scripts.
#
# Author: <NAME> <<EMAIL>>
# Created: Sun Dec 30 08:50:55 2018 -0500
#
# For license information, see LICENSE.txt
#
# ID: __init__.py [] <EMAIL> $
"""
Yellowbrick datasets management and deployment scripts.
"""
##########################################################################
## Imports
##########################################################################
# Import the version number at the top level
from .version import get_version, __version_info__
##########################################################################
## Package Version
##########################################################################
__version__ = get_version(short=True) | StarcoderdataPython |
1602395 | <gh_stars>1-10
"""
"""
from __future__ import annotations
import contextlib
import threading
from contextlib import contextmanager
from typing import Any
from typing import Iterator
from typing import MutableMapping
from .utils.collection import Config
from .utils.collection import DotDict
from .utils.collection import config
from .utils.collection import merge_dicts
class Context(DotDict, threading.local):
"""
A thread safe context store for Prefect data.
The `Context` is a `DotDict` subclass, and can be instantiated the same way.
Args:
- *args (Any): arguments to provide to the `DotDict` constructor (e.g.,
an initial dictionary)
- **kwargs (Any): any key / value pairs to initialize this context with
# Initialize with config context
init = {}
init.update(config.get("context", {}))
# Overwrite with explicit args
init.update(dict(*args, **kwargs))
init["config"] = merge_dicts(config, init.get("config", {}))
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
init = {}
# Initialize with config context
init.update(config.get("context", {}))
# Overwrite with explicit args
init.update(dict(*args, **kwargs))
# Merge in config (with explicit args overwriting)
init["config"] = merge_dicts(config, init.get("config", {}))
super().__init__(init)
def __getstate__(self) -> None:
"""
Because we dynamically update context during runs, we don't ever want to pickle
or "freeze" the contents of context. Consequently it should always be accessed
as an attribute of the module.
"""
raise TypeError(
"Pickling context objects is explicitly not supported. You should always " "access context as an attribute of the `fixit` module, as in `prefect.context`")
def __repr__(self) -> str:
return "<Context>"
@contextlib.contextmanager
def __call__(self, *args: T_MutableMapping, **kwargs: Any) -> Iterator[Context]:
"""
A context manager for setting / resetting the Prefect context
Example:
import prefect.context
with prefect.context(dict(a=1, b=2), c=3):
print(prefect.context.a) # 1
"""
# Avoid creating new `Context` object, copy as `dict` instead.
previous_context = self.__dict__.copy()
try:
new_context = dict(*args, **kwargs)
if "config" in new_context:
new_context["config"] = merge_dicts(self.get("config", {}), new_context["config"])
self.update(new_context) # type: ignore
yield self
finally:
self.clear()
self.update(previous_context)
context = Context()
@contextmanager
def set_temporary_config(temp_config: dict) -> Iterator:
"""
Temporarily sets configuration values for the duration of the context manager.
:param dict temp_config: a dictionary containing (possibly nested) configuration keys and
values. Nested configuration keys should be supplied as `.`-delimited strings.
.. :code-block: python
>>> with set_temporary_config({'setting': 1, 'nested.setting': 2}):
assert config.setting == 1
assert config.nested.setting == 2
"""
try:
old_config = config.copy()
for key, value in temp_config.items():
# the `key` might be a dot-delimited string, so we split on "." and set the value
cfg = config
subkeys = key.split(".")
for subkey in subkeys[:-1]:
cfg = cfg.setdefault(subkey, Config())
cfg[subkeys[-1]] = value
# ensure the new config is available in context
with prefect.context(config=config):
yield config
finally:
config.clear()
config.update(old_config)
__all__ = sorted(
[
getattr(v, "__name__", k)
for k, v in list(globals().items()) # export
if ((callable(v) and getattr(v, "__module__", "") == __name__ or k.isupper()) and not str(getattr(v, "__name__", k)).startswith("__")) # callables from this module # or CONSTANTS
]
) # neither marked internal
| StarcoderdataPython |
1693066 | import bs64, os, jcr
from cryptography.fernet import Fernet
print("+--->\n| CrypterPy 1.0\n+--->")
os.makedirs("./cache", mode=0o777, exist_ok=True)
if input("e?>>> ") == "y":
name_r = input("Result name>>> ")
dirs = input("Folder >>>")
jcr.tojson(name_r+".ncf", dirs)
key = Fernet.generate_key()
open("./"+name_r+".kcf", "wb").write(key)
f = Fernet(key)
results = f.encrypt(open("./cache/"+name_r+".ncf", "rb").read())
open("./"+name_r+".ecf", "wb").write(results)
else:
name = input("Name>>> ")
key = open("./"+name+".kcf", "rb").read()
f = Fernet(key)
file = open("./"+name+".ecf", "rb").read()
open("./cache/"+name+".ncf", "wb").write(f.decrypt(file))
jcr.fromjson(name+".ncf") | StarcoderdataPython |
133509 | __version__= "v0.10" | StarcoderdataPython |
3279436 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 08:44:06 2017
@author: davidpvilaca
"""
import cv2
def detectFaceAndEyes(img):
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.4, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
return img
img1 = detectFaceAndEyes(cv2.imread('img1.jpg'))
img2 = detectFaceAndEyes(cv2.imread('img2.jpg'))
img3 = detectFaceAndEyes(cv2.imread('img3.jpg'))
img4 = detectFaceAndEyes(cv2.imread('img4.jpg'))
cv2.imwrite('img1_saida.jpg', img1)
cv2.imwrite('img2_saida.jpg', img2)
cv2.imwrite('img3_saida.jpg', img3)
cv2.imwrite('img4_saida.jpg', img4) | StarcoderdataPython |
3385696 | <reponame>datawire/quark<gh_stars>100-1000
# Copyright 2015 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .ast import (
Bool, Class, TypeParam, Macro, Interface, Primitive, String, Number
)
from .helpers import (
is_meta, get_fields, base_bindings, get_methods, is_abstract, constructor, mdroot
)
from .constants import (BUILTIN, REFLECT, OBJECT, VOID)
from .compiler import TypeExpr, texpr
class Reflector:
def __init__(self, root, backend):
self.root = root
self.methods = OrderedDict()
self.classes = []
self.class_uses = OrderedDict()
self.metadata = OrderedDict()
self.entry = None
self.backend = backend
self.gen = self.backend.gen
def visit_File(self, f):
if not self.entry and not is_meta(f):
self.entry = f
def package(self, pkg):
if pkg is None:
return []
else:
return self.package(pkg.package) + [pkg.name.text]
def qtype(self, texp):
if isinstance(texp.type, TypeParam): return OBJECT
result = ".".join(self.package(texp.type.package) + [texp.type.name.text])
if isinstance(texp.type, Class) and texp.type.parameters:
result += "<%s>" % ",".join([self.qtype(texp.bindings.get(p, TypeExpr(p, {})))
for p in texp.type.parameters])
return result
def qname(self, texp):
if isinstance(texp.type, TypeParam): return OBJECT
return ".".join(self.package(texp.type.package) + [texp.type.name.text])
def qparams(self, texp):
if isinstance(texp.type, Class) and texp.type.parameters:
return "[%s]" % ", ".join([self.qexpr(texp.bindings.get(p, TypeExpr(p, {}))) for p in texp.type.parameters])
else:
return "[]"
def qexpr(self, texp):
return '"%s"' % self.qtype(texp)
def _has_reflect_class(self, type):
# Technically List and Map could have classes, possibly? They don't now
# though. Also parameterized types gets passed through, which is kinda
# wrong too.
cls = type.resolved.type
return not (isinstance(cls, (Primitive, Interface, TypeParam)))
def visit_Type(self, type):
cls = type.resolved.type
if not self._has_reflect_class(type):
if cls.name.text not in ("List", "Map"):
return
if cls.parameters:
if cls not in self.class_uses:
self.class_uses[cls] = OrderedDict()
qual = self.qtype(type.resolved)
clazz = type.clazz
package = tuple(self.package(type.package))
if qual not in self.class_uses[cls]:
self.class_uses[cls][qual] = (type.resolved, clazz, package)
def qual(self, cls):
return ".".join(self.package(cls.package) + [cls.name.text])
def visit_Class(self, cls):
if isinstance(cls, (Primitive, Interface)):
if (cls.package and cls.package.name.text == BUILTIN and cls.name.text in ("List", "Map") or
isinstance(cls, Interface)):
self.classes.append(cls)
return
cls._extra_methods = lambda: self.gen_accessors(cls)
self.classes.append(cls)
def gen_pred(self, field):
return self.apply_macro(self.get("Object", "__eq__"), self.texpr("String"), self.gen.name("name"),
[self.string(field.name)])
def gen_ladder(self, texp, rung, default=None, pred=lambda f: True):
cls, use_bindings = texp.type, texp.bindings
ladder = []
bindings = base_bindings(cls)
bindings.update(use_bindings)
for f in get_fields(cls):
if pred(f):
ladder.append(rung(f, bindings))
if default:
ladder.append(default)
return ladder
def gen_accessrung(self, field, bindings, get=True):
if field.static:
path = self.backend.add_import(field.clazz)
cons = self.gen.get_static_field(path,
self.backend.name(field.clazz.name),
self.backend.name(field.name))
else:
cons = self.gen.get_field(self.gen.name("self"), self.backend.name(field.name))
if get:
cons = self.gen.return_(cons)
else:
cons = self.gen.assign(cons, self.gen_cast(texpr(field.resolved.type, bindings, field.resolved.bindings),
self.texpr("Object"),
self.gen.name("value")))
return self.gen.if_(self.gen_pred(field), self.gen.block([cons]), None)
def gen_fieldgets(self, texp):
return self.gen_ladder(texp, self.gen_accessrung, self.gen.return_(self.gen.null()))
def gen_fieldsets(self, texp):
pred = lambda f: not isinstance(f.clazz, Interface)
return self.gen_ladder(texp, lambda f, b: self.gen_accessrung(f, b, False), pred=pred)
def gen_accessors(self, cls):
methods = [
self.gen.method("", self.backend.name(cls.name), self.type("String"),
self.gen.name("_getClass"), [],
self.gen.block([self.gen.return_(self.string(self.qtype(cls.resolved)))])
),
self.gen.method("", self.backend.name(cls.name), self.type("Object"), self.gen.name("_getField"),
[self.gen.param(self.type("String"), self.gen.name("name"), None)],
self.gen.block(self.gen_fieldgets(cls.resolved))
),
self.gen.method("", self.backend.name(cls.name), self.type("void"), self.gen.name("_setField"),
[self.gen.param(self.type("String"), self.gen.name("name"), None),
self.gen.param(self.type("Object"), self.gen.name("value"), None)],
self.gen.block(self.gen_fieldsets(cls.resolved))
)
]
return methods
def gen_refs(self, cls, deps):
statics = []
for dep, mdpkg in deps.items():
mdpath = self.backend.add_import([self.backend.name(mdpkg)], cls.root, cls.file)
gotten = self.gen.get_static_field(mdpath, self.gen.name("Root"), self.gen.name("%s_md" % dep))
statics.append(self.gen.static_field("",
self.backend.name(cls.name),
self.reftype("Class"),
self.gen.name("%s_ref" % dep),
self.apply_macro(self.get("reflect", "__register__"),
None, None,
[gotten])))
return statics
def mdname(self, id):
for c in ".<,>":
id = id.replace(c, "_")
return id
def leave_Root(self, root):
mdpkg = mdroot(self.entry)
self.backend.current_package = [self.backend.name(mdpkg)]
self.code = ""
mdclasses = []
classes = OrderedDict()
for cls in self.classes:
classes[cls] = None
classes.update(self.class_uses)
generated = set()
for cls in classes:
qual = self.qual(cls)
if cls.parameters:
clsid = qual + "<%s>" % ",".join([OBJECT]*len(cls.parameters))
else:
clsid = qual
uses = self.class_uses.get(cls, OrderedDict([(clsid,
(cls.resolved, cls, tuple(self.package(cls.package))))]))
for clsid, (texp, ucls, pkg) in uses.items():
# XXX: I *think* everything is always guaranteed to have a package these days.
if pkg:
if clsid not in generated:
self.gen_clazz(texp, cls, clsid, qual)
generated.add(clsid)
if not ucls: continue
if ucls.package and ucls.package.name.text in (REFLECT, ):
continue
if ucls not in self.metadata:
self.metadata[ucls] = OrderedDict()
mdclasses.append((self.mdname(clsid), cls))
self.metadata[ucls][self.mdname(clsid)] = mdpkg
self.gen_root(mdclasses)
self.backend.current_package = None
def gen_root(self, mdclasses):
gname = self.backend.name("Root")
fname = self.setclassfile(gname)
statics = []
generated = set()
for cls, obj in mdclasses:
varname = self.gen.name("%s_md" % cls)
if varname in generated:
continue
else:
generated.add(varname)
mdpath = self.backend.add_import(self.backend.current_package, obj.root, obj.file)
statics.append(self.gen.static_field("", gname, self.reftype("Class"), varname,
self.gen.get_static_field(mdpath,
self.gen.name(cls),
self.gen.name("singleton"))))
dfn_code = self.gen.clazz("", False, gname, [], None, [], statics, [],
[self.gen.default_constructor(gname)],
self.gen_boilerplate(gname))
self.backend.files[fname] += dfn_code
def gen_qparams(self, texp):
if isinstance(texp.type, Class) and texp.type.parameters:
return self.gen.list_([self.string(self.qtype(texp.bindings.get(p, TypeExpr(p, {}))))
for p in texp.type.parameters])
else:
return self.gen.list_([])
def gen_fields(self, texp):
cls, use_bindings = texp.type, texp.bindings
fields = []
bindings = base_bindings(cls)
bindings.update(use_bindings)
for f in get_fields(cls):
fields.append(self.gen.construct(self.reftype("Field"),
[self.string(self.qtype(texpr(f.resolved.type, bindings,
f.resolved.bindings))),
self.string(f.name.text)]))
return self.gen.list_(fields)
def gen_parents(self, texp):
cls = texp.type
parents = ([self.string(self.qual(parent_type.resolved.type))
for parent_type in cls.bases
if (self._has_reflect_class(parent_type) and
not parent_type.resolved.type.parameters)]
or [self.string("quark.Object")])
return self.gen.list_(parents)
def gen_clazz(self, texp, cls, id, name):
gname = self.backend.name(self.mdname(id))
methods = self.gen_meths(texp, cls, id)
fname = self.setclassfile(gname)
base = self.reftype("Class")
mdpath = self.backend.add_import(self.backend.current_package, cls.root, cls.file)
singleton = self.gen.static_field("", gname, base, self.backend.name("singleton"),
self.gen.construct(self.gen.type(mdpath, gname, []), []))
supargs = [self.string(id)]
body = [
self.gen.assign(self.gen.get_field(self.gen.name("self"), "name"), self.string(name)),
self.gen.assign(self.gen.get_field(self.gen.name("self"), "parameters"), self.gen_qparams(texp)),
self.gen.assign(self.gen.get_field(self.gen.name("self"), "fields"), self.gen_fields(texp)),
self.gen.assign(self.gen.get_field(self.gen.name("self"), "methods"), methods),
self.gen.assign(self.gen.get_field(self.gen.name("self"), "parents"), self.gen_parents(texp))
]
cons = constructor(cls)
argtypes = [self.erase(texpr(p.resolved.type, base_bindings(cls), texp.bindings, p.resolved.bindings))
for p in (cons.params if cons else [])]
# unerased = [texpr(p.resolved.type, base_bindings(cls), texp.bindings, p.resolved.bindings)
# for p in (cons.params if cons else [])]
# for t in unerased:
# if "TLS" in t.type.name.text:
# print t.type.name, t.bindings
construct_args = self.gen_castargs("args", argtypes)
if isinstance(cls, Interface) or is_abstract(cls):
abstract = "true"
construct_body = [self.gen.return_(self.gen.null())]
else:
abstract = "false"
construct_body = [
self.gen.return_(self.gen.construct(self.backend.type(self.erase(texp)), construct_args))
]
construct = self.gen.method("", gname, self.type("Object"), self.gen.name("construct"),
[self.gen.param(self.type("List", "Object"), self.gen.name("args"), None)],
self.gen.block(construct_body))
isabs = self.gen.method("", gname, self.type("bool"), self.gen.name("isAbstract"), [],
self.gen.block([self.gen.return_(self.gen.bool_(Bool(abstract)))]))
dfn_code = self.gen.clazz("", False, gname, [], base, [], [singleton], [], [self.cons(gname, base, supargs,
body)],
[construct, isabs] + self.gen_boilerplate(gname))
self.backend.files[fname] += dfn_code
def gen_meths(self, texp, cls, cid):
if cls.package and cls.package.name.text in (BUILTIN, REFLECT): return self.gen.list_([])
methods = []
bindings = base_bindings(cls)
bindings.update(texp.bindings)
for m in get_methods(cls, False).values():
if isinstance(m, Macro): continue
mid = "%s_%s_Method" % (self.mdname(cid), m.name.text)
mtype = self.erase(texpr(m.type.resolved.type, bindings, m.type.resolved.bindings))
margs = [self.erase(texpr(p.resolved.type, bindings, p.resolved.bindings)) for p in m.params]
self.gen_meth(texp, cls, m, mid, cid, mtype, m.name.text, margs)
mdpath = self.backend.add_import(self.backend.current_package, cls.root, cls.file)
methods.append(self.gen.construct(self.gen.type(mdpath, mid, []), []))
return self.gen.list_(methods)
def gen_meth(self, texp, cls, meth, mid, cid, type, name, params):
fname = self.setclassfile(self.backend.name(mid))
base = self.reftype("Method")
supargs = [self.string(self.qtype(type)), self.string(name),
self.gen.list_([self.string(self.qtype(p)) for p in params])]
body = [self.gen.local(self.backend.type(self.erase(texp)), self.gen.name("obj"),
self.gen_cast(self.erase(texp), self.texpr("Object"), self.gen.name("object")))]
args = self.gen_castargs("args", params)
if meth.static:
invoke = self.gen.invoke_static_method(self.backend.add_import(meth.clazz),
self.backend.name(meth.clazz.name),
self.backend.name(meth.name),
args)
else:
invoke = self.gen.invoke_method(self.gen.name("obj"), self.gen.name(name), args)
if self.qtype(type) == VOID:
body.append(self.gen.expr_stmt(invoke))
body.append(self.gen.return_(self.gen.null()))
else:
body.append(self.gen.return_(invoke))
dfn_code = self.gen.clazz("", False, mid, [], base, [], [], [], [self.cons(self.backend.name(mid),
base, supargs, [])],
[self.gen.method("", self.backend.name(mid), self.type("Object"),
self.gen.name("invoke"),
[self.gen.param(self.type("Object"),
self.gen.name("object"),
None),
self.gen.param(self.type("List", "Object"),
self.gen.name("args"),
None)],
self.gen.block(body))] +
self.gen_boilerplate(self.backend.name(mid)))
self.backend.files[fname] += dfn_code
##### HELPERS #####
def gen_castargs(self, name, types):
result = []
for t in types:
result.append(
self.gen_cast(t, self.texpr("Object"),
self.apply_macro(self.get("List", "__get__"),
self.texpr("List", "Object"),
self.gen.name(name),
[self.number(len(result))]))
)
return result
def erase(self, texp):
if isinstance(texp.type, TypeParam):
if texp.type in texp.bindings:
expr = texp.bindings[texp.type]
return self.erase(texpr(expr.type, expr.bindings, texp.bindings))
else:
return self.texpr("Object")
bindings = {}
for k, v in texp.bindings.items():
if v.type in texp.bindings:
bindings[k] = self.erase(texp.bindings[v.type])
else:
bindings[k] = self.erase(v)
for p in texp.type.parameters:
if p not in bindings:
bindings[p] = self.texpr("Object")
return texpr(texp.type, bindings)
def gen_cast(self, to, from_, expr):
return self.backend.maybe_cast(to, self.backend.fake(from_, expr))
def setclassfile(self, name):
fname = self.gen.class_file(self.backend.current_package, name, None)
if self.backend.setfile(fname, lambda: self.gen.make_class_file(self.backend.current_package, name, rtloc=self.backend.rtloc)):
self.backend.files[fname] += "\n"
return fname
def refclass(self, name):
return self.root.env["quark"].env["reflect"].env[name]
def reftype(self, name):
return self.backend.type(self.refclass(name), {})
def cons(self, name, supname, supargs, body):
body = [self.gen.expr_stmt(self.gen.invoke_super(name,
supname,
supargs))] + body
return self.gen.constructor("", name, [], self.gen.block(body))
def string(self, text):
return self.gen.string(String('"%s"' % text))
def type(self, name, *params):
cls = self.root.env["quark"].env[name]
bindings = {}
idx = 0
for p in cls.parameters:
bindings[p] = texpr(self.root.env["quark"].env[params[idx]], {})
idx += 1
return self.backend.type(cls, bindings)
def texpr(self, name, *params):
cls = self.root.env["quark"].env[name]
bindings = {}
idx = 0
for p in cls.parameters:
bindings[p] = texpr(self.root.env["quark"].env[params[idx]], {})
idx += 1
return texpr(cls, bindings)
def number(self, n):
return self.gen.number(Number(str(n)))
def get(self, name, attr):
return self.root.env["quark"].env[name].env[attr]
def apply_macro(self, macro, type, expr, args):
return self.backend.apply_macro(macro, self.backend.fake(type, expr), args)
def gen_boilerplate(self, name):
return [self.gen_getclass(name, self.gen_cast(self.texpr("String"), self.texpr("void"), self.gen.null())),
self.gen_getfield(name),
self.gen_setfield(name)]
def gen_getclass(self, name, result):
return self.gen.method("", name, self.type("String"), self.gen.name("_getClass"), [],
self.gen.block([self.gen.return_(result)]))
def gen_getfield(self, name):
return self.gen.method("", name, self.type("Object"), self.gen.name("_getField"),
[self.gen.param(self.type("String"),
self.gen.name("name"),
None)],
self.gen.block([self.gen.return_(self.gen.null())]))
def gen_setfield(self, name):
return self.gen.method("", name, self.type("void"), self.gen.name("_setField"),
[self.gen.param(self.type("String"),
self.gen.name("name"),
None),
self.gen.param(self.type("Object"),
self.gen.name("value"),
None)],
self.gen.block([]))
def cleanup(self):
for cls in self.metadata:
if getattr(cls, "_extra_statics", None):
del cls._extra_statics
def reflect(root, be):
ref = Reflector(root, be)
root.traverse(ref)
for cls, deps in ref.metadata.items():
cls._extra_statics = lambda c=cls, d=deps: ref.gen_refs(c, d)
return [mdroot(ref.entry)], ref.cleanup
| StarcoderdataPython |
3344138 | #!/usr/bin/env python
from imu import IMU
import sys
myImu = IMU()
chr = sys.stdin.read(1)
| StarcoderdataPython |
4824700 | from django.core.management.base import BaseCommand
from django.db import transaction
from django_scopes import scope
from pretalx.event.models import Event
from pretalx_downstream.tasks import task_refresh_upstream_schedule
class Command(BaseCommand):
help = "Pull an event's upstream data"
def add_arguments(self, parser):
parser.add_argument("--event", type=str, help="Slug of the event to be used")
parser.add_argument("--sync", action="store_true", help="Run synchronously")
@transaction.atomic
def handle(self, *args, **options):
event_slug = options.get("event")
sync = options.get("sync")
event = Event.objects.get(slug=event_slug)
with scope(event=event):
if sync:
task_refresh_upstream_schedule(event_slug)
else:
task_refresh_upstream_schedule.apply_async(args=(event_slug,))
| StarcoderdataPython |
3393761 | <gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('api/RiboVision/v1.0/fetchMasterList', views.fetchmasterlist),
path('api/RiboVision/v1.0/fetchResidues', views.fetchresidues),
path('api/RiboVision/v1.0/speciesTable', views.speciestable),
path('api/RiboVision/v1.0/fetchStructureName', views.fetchstructurename),
path('api/RiboVision/v1.0/textLabels', views.textlabels),
path('api/RiboVision/v1.0/lineLabels', views.linelabels),
path('api/RiboVision/v1.0/fetchInteractionsMenu', views.fetchinteractionsmenu),
path('api/RiboVision/v1.0/structdatamenu', views.structdatamenu), #Skip for now
#path('api/RiboVision/v1.0/fullTable', views.fulltable),
#path('api/RiboVision/v1.0/basePairs', views.basepairs),
#path('api/RiboVision/v1.0/fetchStructData', views.fetchstructdata),
#path('api/RiboVision/v1.0/fetchInteractions', views.fetchinteractions),
#path('api/RiboVision/v1.0/savepml', views.savepml),
#path('api/RiboVision/v1.0/save1D', views.save1D),
#path('api/RiboVision/v1.0/save2D', views.save2D),
] | StarcoderdataPython |
1693777 |
# Generated by Django 2.0.9 on 2018-12-19 13:21
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.db import migrations
from opentech.apply.users.groups import APPLICANT_GROUP_NAME
def set_group(apps, schema_editor):
User = get_user_model()
applicant_group = Group.objects.get(name=APPLICANT_GROUP_NAME)
applicants = User.objects.exclude(applicationsubmission=None)
for user in applicants:
if not user.is_apply_staff:
user.groups.add(applicant_group)
user.save()
def unset_group(apps, schema_editor):
User = get_user_model()
applicant_group = Group.objects.get(name=APPLICANT_GROUP_NAME)
applicants = User.objects.filter(groups__name=APPLICANT_GROUP_NAME).all()
for user in applicants:
user.groups.remove(applicant_group)
user.save()
class Migration(migrations.Migration):
dependencies = [
('users', '0011_add_applicant_group'),
]
operations = [
migrations.RunPython(set_group, unset_group)
]
| StarcoderdataPython |
1611218 | from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.optimizers import RMSprop
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import cv2
import os
def covidTest(filePath):
train = ImageDataGenerator(rescale= 1/255)
validation = ImageDataGenerator(rescale= 1/255)
train_dataset = train.flow_from_directory('Images/train_dataset', target_size= (200,200), batch_size = 3, class_mode = 'binary')
validation_dataset = validation.flow_from_directory('Images/validation', target_size= (200,200), batch_size = 3, class_mode = 'binary')
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16,(3,3), activation = 'relu', input_shape = (200,200,3)),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(32,(3,3), activation = 'relu'), tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Flatten(), tf.keras.layers.Dense(512,activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy', optimizer = RMSprop(lr=0.001), metrics = ['accuracy'])
model_fit=model.fit(train_dataset, steps_per_epoch = 3, epochs= 10, validation_data = validation_dataset)
print(validation_dataset.class_indices)
#Visualize the models accuracy
plt.plot(model_fit.history['accuracy'])
plt.plot(model_fit.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', ' Val'], loc='upper left')
plt.savefig('plotimage/accuracy.png')
plt.show()
#Visualize the models loss
plt.plot(model_fit.history['loss'])
plt.plot(model_fit.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', ' Val'], loc='upper right')
plt.show()
dir_path = filePath
for i in os.listdir(dir_path):
img = image.load_img(dir_path+ '//' + i, target_size=(200,200))
print(i)
plt.imshow(img)
plt.show()
X = image.img_to_array(img)
X = np.expand_dims(X,axis =0)
images =np.vstack([X])
val = model.predict(images)
if val == 0:
return str('Covid Positive')
else:
return str('Covid Negative')
| StarcoderdataPython |
1673621 | <gh_stars>1-10
'''
Test cases for class AnnotateCommand
The construction of the test case are driven by the fact that the target cigar
only has three types of regions: M(M_0), D(D_0), and I(I_0). For a region mapped
to a target genome, its start and end position will always be in M_0 or I_0,
because only M_0 and I_0 have target positions. Thus, D_0 will be sandwiched by
M_0 and I_0.
There nine types of sandwiches:
M, MDM, MDI, (M)I, (M)IDM, (M)IDI, (D)I, (D)IDM, (D)IDI
A read can starts at the beginning or the middle of a M/I region, while
it can ends at the middle or end of a M/I region.
The tests will generally verify: the new cigar, the new start and end in
reference coordinate, the number of snps, insertions, deletions of the target
being observed.
Created on Oct 3, 2012
@author: <NAME>
'''
import unittest
import StringIO
import tempfile
import os
import pysam
from lapels import annotator as annot
from lapels import cigarutils
from modtools import mod
polish = lambda x: cigarutils.toString(cigarutils.simplify(x))
class Read:
'''Class for simulating reads from a bam file'''
def __init__(self, start, end, cigar=None, qlen=None):
self.qname = 'unknown'
self.pos = start
self.aend = end #one base after the actual end
self.tags = dict()
if cigar is None:
self.cigar = [(0, self.aend - self.pos)]
else:
self.cigar = cigar
if qlen is None:
self.qlen = 0
for op,length in self.cigar:
if op == 0 or op == 7 or op == 8 or op == 1:
self.qlen += length
else:
self.qlen = qlen
class TestGetReadOffset(unittest.TestCase):
'''Test class for getReadOffset() method '''
def setUp(self):
pass
def test1(self):
r = Read(10, 50, [(0,10),(1,5),(0,10),(2,10),(0,10)])
self.assertRaisesRegexp(ValueError, 'underflows', annot.getReadOffset, r, 1)
self.assertEquals(annot.getReadOffset(r, 10), 0)
self.assertEquals(annot.getReadOffset(r, 19), 9)
self.assertEquals(annot.getReadOffset(r, 20), 15)
self.assertEquals(annot.getReadOffset(r, 29), 24)
self.assertRaisesRegexp(ValueError, 'deletion', annot.getReadOffset, r, 30)
self.assertRaisesRegexp(ValueError, 'deletion', annot.getReadOffset, r, 39)
self.assertEquals(annot.getReadOffset(r, 40), 25)
self.assertEquals(annot.getReadOffset(r, 49), 34)
self.assertRaisesRegexp(ValueError, 'overflows', annot.getReadOffset, r, 50)
def test2(self):
# qlen is set wrongly on purpose
r = Read(10, 50, [(0,10),(1,5),(0,10),(2,10),(0,10)], 30)
self.assertRaisesRegexp(ValueError, 'underflows', annot.getReadOffset, r, 1)
self.assertEquals(annot.getReadOffset(r, 10), 0)
self.assertEquals(annot.getReadOffset(r, 19), 9)
self.assertEquals(annot.getReadOffset(r, 20), 15)
self.assertEquals(annot.getReadOffset(r, 29), 24)
self.assertRaisesRegexp(ValueError, 'deletion', annot.getReadOffset, r, 30)
self.assertRaisesRegexp(ValueError, 'deletion', annot.getReadOffset, r, 39)
self.assertEquals(annot.getReadOffset(r, 40), 25)
self.assertEquals(annot.getReadOffset(r, 44), 29)
self.assertRaisesRegexp(ValueError, 'conflict', annot.getReadOffset, r, 45)
self.assertRaisesRegexp(ValueError, 'conflict', annot.getReadOffset, r, 49)
self.assertRaisesRegexp(ValueError, 'conflict', annot.getReadOffset, r, 50)
class TestAnnotator(unittest.TestCase):
''' Test class for Annotator '''
def setUp(self):
annot.TESTING = 1
annot.VERBOSITY = 1
def batchTestHelper(self, modFile, pool, refLens):
tmpName = tempfile.mkstemp('.tsv')[1]
tmpfp = open(tmpName, 'wb')
for line in modFile:
tmpfp.write(line)
tmpfp.close()
pysam.tabix_index(tmpName, force=True, seq_col=1, start_col=2, end_col=2,
meta_char='#', zerobased=True)
tmpName += '.gz'
modFile.close()
self.chromoID = '1'
self.modobj = mod.Mod(tmpName)
self.modobj.load(self.chromoID)
for tup in pool:
bamIter=[Read(tup[0], tup[1]+1, tup[2]) for tup in pool]
a = annot.Annotator(self.chromoID, refLens[self.chromoID],
self.modobj, bamIter)
results = a.execute()
for i,res in enumerate(results):
self.assertEqual(polish(res[0]),pool[i][3])
self.assertEqual(res[1], pool[i][4])
self.assertEqual(res[2], pool[i][5])
self.assertEqual(res[3], pool[i][6])
self.assertEqual(res[4], pool[i][7])
os.remove(tmpName)
os.remove(tmpName+'.tbi')
def test1(self):
'''Test case for (D)I, (D)ID*M, and (M)ID*M
10M | 5D | 10I | 10D | 10M | 5I | 10D | 10M
Ref : 0-9 | 10-14 | -14 | 15-24 | 25-34 | -34 | 35-44 | 45-54
Tgt : 0-9 | -9 | 10-19 | -19 | 20-29 | 30-34 | -34 | 35-44
read0 : ==
read1 : ==
read2 : =======
read3 : ================
read4 : =======================
read5 : ========================
read6 : ===============================
read7 : =============================================================
read8 : ==***************==***********===========
read9 : ==...............==...........============
read10: ==**********==***************====
'''
annot.LOG = 1
refLens = {'1':55}
modFile = StringIO.StringIO('''d\t1\t10\t1
d\t1\t11\t2
d\t1\t12\t3
d\t1\t13\t4
d\t1\t14\t5
i\t1\t14\tabcdefghij
d\t1\t15\t1
d\t1\t16\t2
d\t1\t17\t3
d\t1\t18\t4
d\t1\t19\t5
d\t1\t20\t6
d\t1\t21\t7
d\t1\t22\t8
d\t1\t23\t9
d\t1\t24\t0
i\t1\t34\tabcde
d\t1\t35\t1
d\t1\t36\t2
d\t1\t37\t3
d\t1\t38\t4
d\t1\t39\t5
d\t1\t40\t6
d\t1\t41\t7
d\t1\t42\t8
d\t1\t43\t9
d\t1\t44\t0
''')
pool = [
(2, 6, None, '5M', 2, 0, 0, 0),
(12, 16, None, '5I', -1, 0, 5, 0),
(10, 19, None, '10I', -1, 0, 10 ,0),
(13, 22, None, '7I,10D,3M', 25, 0, 7, 10),
(10, 29, None, '10I,10D,10M', 25, 0, 10, 10),
(23, 37, None, '7M,5I,10D,3M', 28, 0, 5, 10),
(20, 44, None, '10M,5I,10D,10M', 25, 0, 5, 10),
(0, 44, None, '10M,5D,10I,10D,10M,5I,10D,10M', 0, 0, 15, 25),
(13, 37, [(0,4), (2,5), (0, 6), (2, 7), (0,3)], '4I,12D,6M,12D,3M', 27, 0 , 4 ,0),
(13, 37, [(0,4), (3,5), (0, 6), (3, 7), (0,3)], '4I,12N,6M,12N,3M', 27, 0 , 4 ,0),
#(13, 37, [(0,4), (3,5), (0, 6), (3, 7), (0,3)], '4I,12N,6M,2N,10D,3M', 27, 0 , 4 ,0),
(2, 28, [(0,4),(2,4),(0,5),(2,5),(0,9)], '4M,9D,5I,10D,9M', 2, 0, 5, 0)
]
self.batchTestHelper(modFile, pool, refLens)
def test2(self):
'''Test case for M, MDM, MDI
10M | 5D | 10M | 10D | 10I
Ref : 0-9 | 10-14 | 15-24 | 25-34 | -34
Tgt : 0-9 | -9 | 10-19 | -19 | 20-29
Read0 : ==
Read1 : ================
Read2 : =====================
Read3 : ================
Read4 : =======================
'''
refLens = {'1':35}
modFile = StringIO.StringIO('''d\t1\t10\t1
d\t1\t11\t2
d\t1\t12\t3
d\t1\t13\t4
d\t1\t14\t5
d\t1\t25\t1
d\t1\t26\t2
d\t1\t27\t3
d\t1\t28\t4
d\t1\t29\t5
d\t1\t30\t6
d\t1\t31\t7
d\t1\t32\t8
d\t1\t33\t9
d\t1\t34\t0
i\t1\t34\tabcdefghij
''')
pool = [ (2, 6, None, '5M', 2, 0, 0, 0),
(3, 12, None, '7M,5D,3M', 3, 0, 0, 5),
(0, 19, None, '10M,5D,10M', 0, 0, 0, 5),
(13, 22, None, '7M,10D,3I', 18, 0, 3, 10),
(10, 29, None, '10M,10D,10I', 15, 0, 10, 10),
]
self.batchTestHelper(modFile, pool, refLens)
def test3(self):
'''Test case for (M)I, (M)IDI, (D)IDI
10M | 10I | 10D | 10I | 5D | 10I
Ref : 0-9 | -9 | 10-19 | -19 | 20-24 | -24
Tgt : 0-9 | 10-19 | -19 | 20-29 | -29 | 30-39
Read1 : ===
Read2 : =======
Read3 : ===============
Read4 : =======================
Read5 : ================
Read6 : =======================
Read7 : =======================================
'''
refLens = {'1':40}
modFile = StringIO.StringIO('''i\t1\t9\tabcdefghij
d\t1\t10\t1
d\t1\t11\t2
d\t1\t12\t3
d\t1\t13\t4
d\t1\t14\t5
d\t1\t15\t6
d\t1\t16\t7
d\t1\t17\t8
d\t1\t18\t9
d\t1\t19\t0
i\t1\t19\tabcdefghij
d\t1\t20\t1
d\t1\t21\t2
d\t1\t22\t3
d\t1\t23\t4
d\t1\t24\t5
i\t1\t24\tabcdefghij
''')
pool = [(12, 16, None, '5I', -1, 0, 5, 0),
(10, 19, None, '10I', -1, 0, 10, 0),
(15, 24, None, '5I,10D,5I', -1, 0, 10, 10),
(10, 29, None, '10I,10D,10I', -1, 0, 20, 10),
(25, 34, None, '5I,5D,5I', -1, 0, 10, 5),
(20, 39, None, '10I,5D,10I', -1, 0, 20, 5),
(5, 34, None, '5M,10I,10D,10I,5D,5I', 5, 0, 25, 15)
]
self.batchTestHelper(modFile, pool, refLens)
class TestAnnotator2(unittest.TestCase):
'''
Test case for insertions/deletion/splicing junction in read
10M | 10I | 10M | 5D | 10M | 5I | 5D | 5I | 10M
Ref : 0-9 | -9 | 10-19 | 20-24 | 25-34 | -34 | 35-39 | -39 | 40-49
Tgt : 0-9 | 10-19 | 20-29 | -29 | 30-39 | 40-44| -44 | 45-49| 50-59
Read1 : =^=
Read2 : =^=
Read3 : =^=
Read4 : =^=
Read5 : =^=
Read6 : =^=
Read7 : =^=
Read8 : =^=
Read9: =^=
Read10: =^=
'''
def setUp(self):
annot.TESTING = 1
annot.VERBOSITY = 1
annot.LOG = 1
self.refLens = {'1':50}
self.modFile = StringIO.StringIO('''i\t1\t9\tabcdefghij
d\t1\t20\t1
d\t1\t21\t2
d\t1\t22\t3
d\t1\t23\t4
d\t1\t24\t5
i\t1\t34\tklmno
d\t1\t35\t6
d\t1\t36\t7
d\t1\t37\t8
d\t1\t38\t9
d\t1\t39\t0
i\t1\t39\tpqrst
''')
def batchTestHelper(self, modFile, pool, refLens):
tmpName = tempfile.mkstemp('.tsv')[1]
tmpfp = open(tmpName, 'wb')
for line in modFile:
tmpfp.write(line)
tmpfp.close()
pysam.tabix_index(tmpName, force=True, seq_col=1, start_col=2, end_col=2,
meta_char='#', zerobased=True)
tmpName += '.gz'
modFile.close()
self.chromoID = '1'
self.modobj = mod.Mod(tmpName)
self.modobj.load(self.chromoID)
for tup in pool:
bamIter=[Read(tup[0], tup[1]+1, tup[2]) for tup in pool]
a = annot.Annotator(self.chromoID, refLens[self.chromoID],
self.modobj, bamIter)
results = a.execute()
for i,res in enumerate(results):
self.assertEqual(polish(res[0]),pool[i][3])
self.assertEqual(res[1], pool[i][4])
self.assertEqual(res[2], pool[i][5])
self.assertEqual(res[3], pool[i][6])
self.assertEqual(res[4], pool[i][7])
os.remove(tmpName)
os.remove(tmpName+'.tbi')
def test4(self):
cigar = [(0,2),(1,1),(0,2)] #MIM
pool = [(2,5,cigar,'2M,1I,2M', 2, 0, 0, 0),
(8,11,cigar,'2M,3I', 8, 0, 2, 0),
(12,15,cigar,'5I', -1, 0, 4, 0),
(18,21,cigar,'3I,2M', 10, 0, 2, 0),
(28,31,cigar,'2M,1I,5D,2M', 18, 0, 0, 0), #########
(38,41,cigar,'2M,3I', 33, 0, 2, 0),
(41,44,cigar,'5I', -1, 0, 4, 0),
(43,46,cigar,'3I,5D,2I', -1, 0, 4, 0), ########
(45,48,cigar,'5I', -1, 0, 4, 0),
(48,51,cigar,'3I,2M', 40, 0, 2, 0),
]
self.batchTestHelper(self.modFile, pool, self.refLens)
def test5(self):
cigar = [(0,1),(2,1),(1,1),(2,1),(0,1)] #MDIDM
pool = [
(2,5,cigar,'1M,1D,1I,1D,1M', 2, 0, 0, 0),
(8,11,cigar,'1M,1D,2I', 8, 0, 1, 0),
(12,15,cigar,'3I', -1, 0, 2, 0),
(18,21,cigar,'2I,1D,1M', 11, 0, 1, 0),
(28,31,cigar,'1M,1D,1I,6D,1M', 18, 0, 0, 0), #########
(38,41,cigar,'1M,1D,2I', 33, 0, 1, 0),
(41,44,cigar,'3I', -1, 0, 2, 0),
(43,46,cigar,'2I,5D,1I', -1, 0, 2, 0), ########
(45,48,cigar,'3I', -1, 0, 2, 0),
(48,51,cigar,'2I,1D,1M', 41, 0, 1, 0),
]
self.batchTestHelper(self.modFile, pool, self.refLens)
# def test5alt(self):
# cigar = [(0,1),(2,1),(1,1),(2,1),(0,1)] #MDIDM
# pool = [
# (2,5,cigar,'1M,2D,1I,1M', 2, 0, 0, 0),
# (8,11,cigar,'1M,1D,2I', 8, 0, 1, 0),
# (12,15,cigar,'3I', -1, 0, 2, 0),
# (18,21,cigar,'1I,1D,1I,1M', 11, 0, 1, 0),
# (28,31,cigar,'1M,7D,1I,1M', 18, 0, 0, 0), #########
# (38,41,cigar,'1M,1D,2I', 33, 0, 1, 0),
# (41,44,cigar,'3I', -1, 0, 2, 0),
# (43,46,cigar,'1I,5D,2I', -1, 0, 2, 0), ########
# (45,48,cigar,'3I', -1, 0, 2, 0),
# (48,51,cigar,'1I,1D,1I,1M', 41, 0, 1, 0),
# ]
# self.batchTestHelper(self.modFile, pool, self.refLens)
def test6(self):
cigar = [(2,2),(1,1),(2,1),(0,1)]
pool = [(2,5,cigar,'2D,1I,1D,1M', 5, 0, 0, 0),
(8,11,cigar,'2D,2I', -1, 0, 1, 0),
(12,15,cigar,'2I', -1, 0, 1, 0),
(18,21,cigar,'1I,1D,1M', 11, 0, 0, 0),
(28,31,cigar,'2D,1I,6D,1M', 26, 0, 0, 0), #########
(38,41,cigar,'2D,2I', -1, 0, 1, 0),
(41,44,cigar,'2I', -1, 0, 1, 0),
(43,46,cigar,'1I,5D,1I', -1, 0, 1, 0), ########
(45,48,cigar,'2I', -1, 0, 1, 0),
(48,51,cigar,'1I,1D,1M', 41, 0, 0, 0),
]
self.batchTestHelper(self.modFile, pool, self.refLens)
# cigar = [(0,1),(2,1),(1,1),(2,2)]
# pool = [(2,5,cigar,'2M,1I,2M', 2, 0, 0, 0),
# (8,11,cigar,'2M,3I', 8, 0, 2, 0),
# (12,15,cigar,'5I', -1, 0, 4, 0),
# (18,21,cigar,'3I,2M', 10, 0, 2, 0),
# (28,31,cigar,'2M,1I,5D,2M', 18, 0, 0, 0), #########
# (38,41,cigar,'2M,3I', 33, 0, 2, 0),
# (41,44,cigar,'5I', -1, 0, 4, 0),
# (43,46,cigar,'3I,5D,2I', -1, 0, 4, 0), ########
# (45,48,cigar,'5I', -1, 0, 4, 0),
# (48,51,cigar,'3I,2M', 40, 0, 2, 0),
# ]
#
# #cigar = [(1,1),(0,4)]
# pool = [(2,5,cigar,'2M,1I,2M', 2, 0, 0, 0),
# (8,11,cigar,'2M,3I', 8, 0, 2, 0),
# (12,15,cigar,'5I', -1, 0, 4, 0),
# (18,21,cigar,'3I,2M', 10, 0, 2, 0),
# (28,31,cigar,'2M,1I,5D,2M', 18, 0, 0, 0), #########
# (38,41,cigar,'2M,3I', 33, 0, 2, 0),
# (41,44,cigar,'5I', -1, 0, 4, 0),
# (43,46,cigar,'3I,5D,2I', -1, 0, 4, 0), ########
# (45,48,cigar,'5I', -1, 0, 4, 0),
# (48,51,cigar,'3I,2M', 40, 0, 2, 0),
# ]
#
# #cigar = [(0,4),(1,1)]
# pool = [(2,5,cigar,'2M,1I,2M', 2, 0, 0, 0),
# (8,11,cigar,'2M,3I', 8, 0, 2, 0),
# (12,15,cigar,'5I', -1, 0, 4, 0),
# (18,21,cigar,'3I,2M', 10, 0, 2, 0),
# (28,31,cigar,'2M,1I,5D,2M', 18, 0, 0, 0), #########
# (38,41,cigar,'2M,3I', 33, 0, 2, 0),
# (41,44,cigar,'5I', -1, 0, 4, 0),
# (43,46,cigar,'3I,5D,2I', -1, 0, 4, 0), ########
# (45,48,cigar,'5I', -1, 0, 4, 0),
# (48,51,cigar,'3I,2M', 40, 0, 2, 0),
# ]
#
# self.batchTestHelper(modFile, pool, refLens)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4834495 | #any import functions, other functinos can be written here and then imported.
import os
from zipfile import ZipFile
def handle_uploaded_file(f):
with open('/projects/team-2/abharadwaj61/django/predictivewebserver/genome_assembly/static/upload/'+f.name, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
#take in the zip file input and make a separate folder based on uuid
#initial implementatoin with just the hard coded folder
def ga_unzip_and_move(file_name, folder_name):
print(file_name, folder_name)
filepath = '/projects/team-2/abharadwaj61/django/predictivewebserver/genome_assembly/genome_assembly_data/' + folder_name + '/data'
if str(file_name).endswith('.zip'):
with ZipFile('/projects/team-2/abharadwaj61/django/predictivewebserver/genome_assembly/static/upload/' + str(file_name), 'r') as f1:
f1.extractall(filepath)
return filepath
else:
print('some exception handling to be done here')
return 0
def gp_unzip_and_move(file_name, folder_name):
print(file_name, folder_name)
filepath = '/projects/team-2/abharadwaj61/django/predictivewebserver/genome_assembly/gene_prediction_data/' + folder_name + '/data'
if str(file_name).endswith('.zip'):
with ZipFile('/projects/team-2/abharadwaj61/django/predictivewebserver/genome_assembly/static/upload/' + str(file_name), 'r') as f1:
f1.extractall(filepath)
return filepath
else:
print('some exception handling to be done here')
return 0
def fa_unzip_and_move(file_name, folder_name):
print(file_name, folder_name)
filepath = '/projects/team-2/abharadwaj61/django/predictivewebserver/genome_assembly/functional_annotation_data/' + folder_name + '/data'
if str(file_name).endswith('.zip'):
with ZipFile('/projects/team-2/abharadwaj61/django/predictivewebserver/genome_assembly/static/upload/' + str(file_name), 'r') as f1:
f1.extractall(filepath)
return filepath
else:
print('some exception handling to be done here')
return 0
def cg_unzip_and_move(file_name, folder_name):
print(file_name, folder_name)
filepath = '/projects/team-2/abharadwaj61/django/predictivewebserver/genome_assembly/comparative_genomics_data/' + folder_name + '/data'
if str(file_name).endswith('.zip'):
with ZipFile('/projects/team-2/abharadwaj61/django/predictivewebserver/genome_assembly/static/upload/' + str(file_name), 'r') as f1:
f1.extractall(filepath)
return filepath
else:
print('some exception handling to be done here')
return 0
| StarcoderdataPython |
1728334 | <gh_stars>1-10
import re
import sys
import spacy
import unicodedata
import numpy as np
import pandas as pd
import ipywidgets as widgets
from ipywidgets import interact
from ipywidgets import GridspecLayout
from termcolor import colored
from IPython.display import display
from spacy.lang.en.stop_words import STOP_WORDS
from bs4 import BeautifulSoup
from nltk.stem.porter import PorterStemmer
from datapurifier.preprocessing.data_dict.contractions import CONTRACTIONS
from datapurifier.preprocessing.data_dict.emoticons import EMOTICONS
from datapurifier.preprocessing.data_dict.emoji import UNICODE_EMO
from datapurifier.decorators import *
from datapurifier.widgets import Widgets
from datapurifier.utils import *
ps = PorterStemmer()
class Nlpurifier:
def __init__(self: str, df: pd.DataFrame, target: str, spacy_model="en_core_web_sm"):
self._set_df_and_target(df, target)
self.spacy_model = spacy_model
self.nlp = spacy.load(spacy_model)
self.widget = Widgets()
self.purifier_widgets = {}
self.word_count_series = pd.Series()
self._show_widgets()
def _start_purifying(self, e):
print_in_blue(
f"Dataframe contains {self.df.shape[0]} rows and {self.df.shape[1]} columns\n")
if self.purifier_widgets["dropna"]:
self.drop_null_rows()
if self.purifier_widgets["lower"]:
self.lower()
if self.purifier_widgets["contraction"]:
self.contraction_to_expansion()
if self.purifier_widgets["count_mail"]:
self.count_emails()
if self.purifier_widgets["count_urls"]:
self.count_urls()
if self.purifier_widgets["word_count"]:
self.get_word_count()
if self.purifier_widgets["remove_numbers"]:
self.remove_numbers()
if self.purifier_widgets["remove_html"]:
self.remove_html_tags()
if self.purifier_widgets["remove_mail"]:
self.remove_emails()
if self.purifier_widgets["remove_urls"]:
self.remove_urls()
if self.purifier_widgets["remove_spaces"]:
self.remove_multiple_spaces()
if self.purifier_widgets["remove_accented"]:
self.remove_accented_chars()
if self.purifier_widgets["remove_stop_words"]:
self.remove_stop_words()
if self.most_common_word_range:
word_range = self.most_common_word_range
if self.word_count_series.empty:
self.generate_word_count_series()
print_in_blue(
"To see pandas series of word and their respective count, type '<obj>.word_count_series'\n")
print_in_blue(f"Removing top {word_range} words from dataframe")
word_list = self.word_count_series[:word_range].index.tolist()
print("Which are: ", word_list)
self.remove_words(word_list)
if self.most_rare_word_range:
word_range = self.most_rare_word_range
if self.word_count_series.empty:
self.generate_word_count_series()
print_in_blue(
"To see pandas series of word and their respective count, type '<obj>.word_count_series'\n")
print_in_blue(
f"Removing top {word_range} rare words from dataframe")
word_list = self.word_count_series[-word_range:].index.tolist()
print("Which are: ", word_list)
self.remove_words(word_list)
if self.purifier_widgets["convert_emojis_to_word"]:
self.convert_emojis_to_word()
if self.purifier_widgets["convert_emoticons_to_word"]:
self.convert_emoticons_to_word()
if self.purifier_widgets["remove_emojis"]:
if self.purifier_widgets["convert_emojis_to_word"]:
print_in_red(
"'Remove Emojis' action is skipped because, all the emojis are converted to words.")
else:
self.remove_emojis()
if self.purifier_widgets["remove_emoticons"]:
if self.purifier_widgets["convert_emoticons_to_word"]:
print_in_red(
"'Remove Emoticons' action is skipped because, all the emoticons are converted to words.")
else:
self.remove_emoticons()
if self.purifier_widgets["remove_special_and_punct"]:
self.remove_special_and_punctions()
if self.purifier_widgets["root_word_technique"]:
technique = self.purifier_widgets["root_word_technique"]
if technique == "Stemming":
self.stemming()
if technique == "Lemmatization":
self.leammatize()
print(colored("\nPurifying Completed!\n", "green", attrs=["bold"]))
print("type <obj>.df to access processed and purified dataframe")
def _set_root_word_technique(self, technique):
self.purifier_widgets["root_word_technique"] = technique
def _set_most_common_word_range(self, common_range: int) -> None:
self.most_common_word_range = common_range
print_in_red(f"Top {common_range} most common words will be removed")
def _most_common_words_widget(self, condition):
if condition:
print("The top words maybe frequent 'Stop Words' and they will be removed instead of actual words, to avoid this consider ticking 'Remove Stop Words' checkbox")
interact(self._set_most_common_word_range,
common_range=widgets.IntSlider(min=1, max=100, step=1, value=10, description="Word Range:"))
else:
self.most_common_word_range = None
print("-"*50)
def _set_most_rare_word_range(self, rare_range: int) -> None:
self.most_rare_word_range = rare_range
print_in_red(f"Top {rare_range} most rare words will be removed")
def _most_rare_words_widget(self, condition):
if condition:
interact(self._set_most_rare_word_range,
rare_range=widgets.IntSlider(min=1, max=100, step=1, value=10, description="Word Range:"))
else:
self.most_rare_word_range = None
print("-"*50)
def _widget_interactions(self, dropna, lower, contraction, count_mail,
count_urls, word_count, remove_numbers, remove_stop_words,
remove_special_and_punct, remove_mail, remove_html,
remove_urls, remove_spaces, remove_accented,
convert_emojis_to_word, remove_emojis, remove_emoticons, convert_emoticons_to_word):
self.purifier_widgets["dropna"] = True if dropna else False
self.purifier_widgets["lower"] = True if lower else False
self.purifier_widgets["contraction"] = True if contraction else False
self.purifier_widgets["count_mail"] = True if count_mail else False
self.purifier_widgets["count_urls"] = True if count_urls else False
self.purifier_widgets["word_count"] = True if word_count else False
self.purifier_widgets["remove_numbers"] = True if remove_numbers else False
self.purifier_widgets["remove_stop_words"] = True if remove_stop_words else False
self.purifier_widgets["remove_special_and_punct"] = True if remove_special_and_punct else False
self.purifier_widgets["remove_mail"] = True if remove_mail else False
self.purifier_widgets["remove_html"] = True if remove_html else False
self.purifier_widgets["remove_urls"] = True if remove_urls else False
self.purifier_widgets["remove_spaces"] = True if remove_spaces else False
self.purifier_widgets["remove_accented"] = True if remove_accented else False
self.purifier_widgets["convert_emojis_to_word"] = True if convert_emojis_to_word else False
self.purifier_widgets["remove_emojis"] = True if remove_emojis else False
self.purifier_widgets["remove_emoticons"] = True if remove_emoticons else False
self.purifier_widgets["convert_emoticons_to_word"] = True if convert_emoticons_to_word else False
def _show_widgets(self):
self.dropna_widget = self.widget.checkbox(
description='Drop Null Rows')
self.remove_numbers_widget = self.widget.checkbox(
description='Remove Numbers and Alphanumeric words')
self.lower_widget = self.widget.checkbox(description='Lower all Words')
self.contraction_widget = self.widget.checkbox(
description='Contraction to Expansion')
self.count_mail_widget = self.widget.checkbox(
description='Count Mails')
self.count_urls_widget = self.widget.checkbox(description='Count Urls')
self.word_count_widget = self.widget.checkbox(
description='Get Word Count')
self.remove_emojis_widget = self.widget.checkbox(
description='Remove Emojis')
self.remove_emoticons_widget = self.widget.checkbox(
description='Remove Emoticons')
self.convert_emoticons_to_word_widget = self.widget.checkbox(
description='Conversion of Emoticons to Words')
self.convert_emojis_to_word_widget = self.widget.checkbox(
description='Conversion of Emojis to Words')
self.remove_stop_words_widget = self.widget.checkbox(
description='Remove Stop Words')
self.remove_special_and_punct_widget = self.widget.checkbox(
description='Remove Special Characters and Punctuations')
self.remove_mail_widget = self.widget.checkbox(
description='Remove Mails')
self.remove_html_widget = self.widget.checkbox(
description='Remove Html Tags')
self.remove_spaces_widget = self.widget.checkbox(
description='Remove Multiple Spaces')
self.remove_accented_widget = self.widget.checkbox(
description='Remove Accented Characters')
self.remove_urls_widget = self.widget.checkbox(
description='Remove Urls')
items = [
[self.dropna_widget, self.lower_widget, self.contraction_widget],
[self.count_urls_widget, self.word_count_widget, self.count_mail_widget],
[self.remove_special_and_punct_widget, self.remove_numbers_widget,
self.remove_stop_words_widget],
[self.remove_accented_widget,
self.remove_mail_widget, self.remove_html_widget],
[self.remove_urls_widget, self.remove_spaces_widget, self.convert_emojis_to_word_widget
],
[self.remove_emojis_widget, self.remove_emoticons_widget,
self.convert_emoticons_to_word_widget]
]
grid_rows = len(items)
grid_cols = 3
grid = GridspecLayout(grid_rows, grid_cols)
for i in range(len(items)):
for j in range(len(items[i])):
grid[i, j] = items[i][j]
self.grid_output = widgets.interactive_output(
self._widget_interactions, {'dropna': self.dropna_widget, 'lower': self.lower_widget, 'contraction': self.contraction_widget, 'count_mail': self.count_mail_widget,
'count_urls': self.count_urls_widget, 'word_count': self.word_count_widget,
'remove_numbers': self.remove_numbers_widget, 'remove_stop_words': self.remove_stop_words_widget,
'remove_special_and_punct': self.remove_special_and_punct_widget, 'remove_mail': self.remove_mail_widget,
'remove_html': self.remove_html_widget, 'remove_urls': self.remove_urls_widget, 'remove_spaces': self.remove_spaces_widget,
'remove_accented': self.remove_accented_widget, 'convert_emojis_to_word': self.convert_emojis_to_word_widget,
'remove_emojis': self.remove_emojis_widget, 'remove_emoticons': self.remove_emoticons_widget, 'convert_emoticons_to_word': self.convert_emoticons_to_word_widget
})
# Displaying all methods in grid layout
display(grid)
# Display Root Word Widget
print_in_blue("\nConvert Word to its Base Form")
self.root_word_widget = widgets.RadioButtons(
options=['None', 'Stemming', 'Lemmatization'],
description="Technique:"
)
interact(self._set_root_word_technique,
technique=self.root_word_widget)
# Widget for removing most common words
print_in_red("Remove Top Common Words")
interact(self._most_common_words_widget, condition=widgets.Checkbox(
description="Remove Top Common Words"))
# Widget for removing most rare words
print_in_red("Remove Top Rare Words")
interact(self._most_rare_words_widget, condition=widgets.Checkbox(
description="Remove Top Rare Words"))
# Button For Start Purifying
start_btn = widgets.Button(description="Start Purifying")
start_btn.on_click(self._start_purifying)
display(start_btn)
def _set_df_and_target(self, df, target):
self.df = df.copy()
if target in self.df.columns:
self.target = target
else:
print_in_red(
"Please provide correct `target` column name, containing only textual data for analysis")
sys.exit(1)
"""
###############################################################################################
Code for Nlp Purification Methods Starts
###############################################################################################
"""
def get_text(self):
self.text = " ".join(self.df[self.target])
return self.text
def drop_null_rows(self):
"""Drops rows having [' ', 'NULL', np.nan] values """
total_null_rows = self.df[self.target].isin(
[' ', 'NULL', np.nan]).sum()
if total_null_rows > 0:
print("Dropping rows having [' ', 'NULL', numpy.nan] values")
self.df.dropna(inplace=True)
self.df.reset_index(drop=True, inplace=True)
print_in_red(f"Total Null rows dropped: {total_null_rows}\n")
else:
print(colored("There is no null rows present.\n", "green"))
@timer
def lower(self):
self.df[self.target] = self.df[self.target].apply(lambda x: x.lower())
@timer
def remove_numbers(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: re.sub(r'[0-9]', '', x))
@timer
def remove_special_and_punctions(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: re.sub('[^A-Z a-z 0-9-]+', "", x))
@timer
def remove_multiple_spaces(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: " ".join(x.split()))
@timer
def remove_html_tags(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: BeautifulSoup(x, 'html.parser').get_text())
def _contraction_to_expansion(self, x: str) -> str:
if type(x) is str:
for key in CONTRACTIONS:
value = CONTRACTIONS[key]
x = x.replace(key, value)
return x
else:
return x
@timer
def contraction_to_expansion(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: self._contraction_to_expansion(x))
def _remove_accented_chars(self, x: str) -> str:
x = unicodedata.normalize('NFKD', x).encode(
'ascii', 'ignore').decode('utf-8', 'ignore')
return x
@timer
def remove_accented_chars(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: self._remove_accented_chars(x))
@timer
def remove_stop_words(self):
self.df[self.target] = self.df[self.target].apply(lambda x: " ".join(
[word for word in x.split() if word not in STOP_WORDS]))
@timer
def count_emails(self):
self.df['emails'] = self.df[self.target].apply(lambda x: re.findall(
r'([a-zA-Z0-9+._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)', x))
self.df["emails_counts"] = self.df["emails"].apply(lambda x: len(x))
@timer
def remove_emails(self):
self.df[self.target] = self.df[self.target].apply(lambda x: re.sub(
r'([a-zA-Z0-9+._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)', "", x))
@timer
def count_urls(self):
self.df["urls_counts"] = self.df[self.target].apply(lambda x: len(re.findall(
r'(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?', x)))
@timer
def remove_urls(self):
self.df[self.target] = self.df[self.target].apply(lambda x: re.sub(
r'(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?', "", x))
@timer
def get_word_count(self):
text = self.get_text()
text = text.split()
self.word_count = pd.Series(text).value_counts()
print("type <obj>.word_count for getting word count series")
def _remove_emoji(self, x: str):
"""Removes Emoji Lambda Function
Reference : https://gist.github.com/slowkow/7a7f61f495e3dbb7e3d767f97bd7304b"""
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', x)
@timer
def remove_emojis(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: self._remove_emoji(x))
def _remove_emoticons(self, x: str):
emoticon_pattern = re.compile(
u'(' + u'|'.join(k for k in EMOTICONS) + u')')
return emoticon_pattern.sub(r'', x)
@timer
def remove_emoticons(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: self._remove_emoticons(x))
def _convert_emoticons_to_word(self, text: str):
for emot in EMOTICONS:
text = re.sub(
u'('+emot+')', "_".join(EMOTICONS[emot].replace(",", "").split()), text)
return text
@timer
def convert_emoticons_to_word(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: self._convert_emoticons_to_word(x))
def _convert_emojis_to_word(self, text: str):
for emot in UNICODE_EMO:
text = re.sub(
r'('+emot+')', "_".join(UNICODE_EMO[emot].replace(",", "").replace(":", "").split()), text)
return text
@timer
def convert_emojis_to_word(self):
self.df[self.target] = self.df[self.target].apply(
lambda x: self._convert_emojis_to_word(x))
def generate_word_count_series(self):
"""Generate pandas series for word counts, to access type '<obj>.word_count_series'"""
text = " ".join(self.df[self.target])
text = text.split()
self.word_count_series = pd.Series(text).value_counts()
@timer
def remove_words(self, word_list):
"""Removes words which are in word list
Args:
word_list (list): List of words to be removed.
"""
self.df[self.target] = self.df[self.target].apply(lambda x: " ".join(
[word for word in x.split() if word not in word_list]))
def _lemmatize(self, x: str) -> str:
"""Uses spacy library to lemmatize words.
Args:
x (str): sentence
"""
doc = self.nlp(x)
lem = ""
for token in doc:
lem += token.lemma_ + " "
return lem
@timer
def leammatize(self):
print(
f"Internally for lemmatization it uses '{self.spacy_model}' spacy model,\nto change it please provide `spacy_model='your_model' in constructor`")
self.df[self.target] = self.df[self.target].apply(
lambda x: self._lemmatize(x))
def _stemming(self, x):
return " ".join([ps.stem(word) for word in x.split()])
@timer
def stemming(self):
print("Using Porter Stemmer for stemming")
self.df[self.target] = self.df[self.target].apply(
lambda x: self._stemming(x))
if __name__ == '__main__':
pass
| StarcoderdataPython |
3328233 | import logging
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import generics, permissions, viewsets
from waldur_core.core import mixins as core_mixins
from waldur_core.structure import filters as structure_filters
from waldur_core.structure import permissions as structure_permissions
from waldur_core.structure import views as structure_views
from . import filters, executors, models, serializers
logger = logging.getLogger(__name__)
class JiraServiceViewSet(structure_views.BaseServiceViewSet):
queryset = models.JiraService.objects.all()
serializer_class = serializers.ServiceSerializer
class JiraServiceProjectLinkViewSet(structure_views.BaseServiceProjectLinkViewSet):
queryset = models.JiraServiceProjectLink.objects.all()
serializer_class = serializers.ServiceProjectLinkSerializer
class JiraPermissionMixin(object):
def get_queryset(self):
user = self.request.user
queryset = super(JiraPermissionMixin, self).get_queryset()
if user.is_staff:
return queryset
else:
return queryset.filter(user=user)
class ProjectTemplateViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.ProjectTemplate.objects.all()
filter_class = filters.ProjectTemplateFilter
serializer_class = serializers.ProjectTemplateSerializer
lookup_field = 'uuid'
class ProjectViewSet(structure_views.ImportableResourceViewSet):
queryset = models.Project.objects.all()
filter_class = filters.ProjectFilter
serializer_class = serializers.ProjectSerializer
create_executor = executors.ProjectCreateExecutor
update_executor = executors.ProjectUpdateExecutor
delete_executor = executors.ProjectDeleteExecutor
async_executor = False
use_atomic_transaction = True
destroy_permissions = [structure_permissions.is_staff]
importable_resources_backend_method = 'get_resources_for_import'
importable_resources_serializer_class = serializers.ProjectImportableSerializer
import_resource_serializer_class = serializers.ProjectImportSerializer
class IssueTypeViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.IssueType.objects.all()
filter_class = filters.IssueTypeFilter
serializer_class = serializers.IssueTypeSerializer
lookup_field = 'uuid'
class PriorityViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.Priority.objects.all()
serializer_class = serializers.PrioritySerializer
filter_class = filters.PriorityFilter
lookup_field = 'uuid'
class IssueViewSet(JiraPermissionMixin,
structure_views.ResourceViewSet):
queryset = models.Issue.objects.all()
filter_class = filters.IssueFilter
serializer_class = serializers.IssueSerializer
create_executor = executors.IssueCreateExecutor
update_executor = executors.IssueUpdateExecutor
delete_executor = executors.IssueDeleteExecutor
async_executor = False
use_atomic_transaction = True
class CommentViewSet(JiraPermissionMixin,
structure_views.ResourceViewSet):
queryset = models.Comment.objects.all()
filter_class = filters.CommentFilter
serializer_class = serializers.CommentSerializer
create_executor = executors.CommentCreateExecutor
update_executor = executors.CommentUpdateExecutor
delete_executor = executors.CommentDeleteExecutor
async_executor = False
use_atomic_transaction = True
class AttachmentViewSet(JiraPermissionMixin,
core_mixins.CreateExecutorMixin,
core_mixins.DeleteExecutorMixin,
viewsets.ModelViewSet):
queryset = models.Attachment.objects.all()
filter_class = filters.AttachmentFilter
filter_backends = structure_filters.GenericRoleFilter, DjangoFilterBackend
permission_classes = permissions.IsAuthenticated, permissions.DjangoObjectPermissions
serializer_class = serializers.AttachmentSerializer
create_executor = executors.AttachmentCreateExecutor
delete_executor = executors.AttachmentDeleteExecutor
async_executor = False
use_atomic_transaction = True
lookup_field = 'uuid'
class WebHookReceiverViewSet(generics.CreateAPIView):
authentication_classes = ()
permission_classes = ()
serializer_class = serializers.WebHookReceiverSerializer
def create(self, request, *args, **kwargs):
try:
return super(WebHookReceiverViewSet, self).create(request, *args, **kwargs)
except Exception as e:
# Throw validation errors to the logs
logger.error("Can't parse JIRA WebHook request: %s" % e)
raise
def get_jira_projects_count(project):
return project.quotas.get(name='nc_jira_project_count').usage
structure_views.ProjectCountersView.register_counter('jira-projects', get_jira_projects_count)
| StarcoderdataPython |
3323870 | #from http://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256
import base64
from Crypto.Cipher import AES
from Crypto import Random
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[:-ord(s[len(s)-1:])]
class AESCipher:
def __init__( self, key ):
self.key = key
def encrypt( self, raw ):
raw = pad(raw)
iv = Random.new().read( AES.block_size )
cipher = AES.new( self.key, AES.MODE_CBC, iv )
return base64.b64encode( iv + cipher.encrypt( raw ) ).decode('ascii')
def decrypt( self, enc ):
enc = base64.b64decode(enc)
iv = enc[:16]
cipher = AES.new(self.key, AES.MODE_CBC, iv )
return unpad(cipher.decrypt( enc[16:] )).decode('ascii')
#-These functions aren't memebrs of the cipher, but are useful utilities-#
#writes username/encrypted password pairs to a file given a dictionary
def write_pairs_to_file (file_name, dict) :
file = open(file_name, 'w')
for key in dict:
name = key
password = dict[key]
file.write(name)
file.write(' : ')
file.write(password)
file.write('\n')
print("added user "+name+ " with password " + password)
def read_pairs_from_file (filename) :
return_dict = {}
with open("login_info.txt") as f:
content = f.readlines()
for i in range (0,len(content)):
info = content[i].split(" : ")
username = info[0]
password = info[1].rsplit("\n",1)
return_dict[username] = password[0]
return return_dict | StarcoderdataPython |
3372475 | <gh_stars>100-1000
import numpy as np
import zengl
from utils import glsl
def test_render_triangle(ctx: zengl.Context):
img = ctx.image((256, 256), 'rgba8unorm')
triangle = ctx.pipeline(
vertex_shader=glsl('triangle.vert'),
fragment_shader=glsl('triangle.frag'),
framebuffer=[img],
vertex_count=3,
)
img.clear()
triangle.render()
pixels = np.frombuffer(img.read(), 'u1').reshape(256, 256, 4)
x = np.repeat(np.arange(4) * 50 + 50, 4)
y = np.tile(np.arange(4) * 50 + 50, 4)
r = [255, 0, 0, 255]
z = [0, 0, 0, 0]
np.testing.assert_array_equal(pixels[x, y], [
r, r, r, r,
z, r, r, z,
z, r, r, z,
z, z, z, z,
])
# from matplotlib import pyplot as plt
# plt.imshow(pixels)
# plt.plot(x, y, 'bx')
# plt.show()
| StarcoderdataPython |
4811604 | <gh_stars>0
#!/usr/bin/env python3
from netmiko import ConnectHandler
from getpass import getpass
ciscoswitch1 = {
"device_type": "cisco_nxos",
"host": "nxos1.lasthop.io",
"username": "pyclass",
# "password": getpass(),
"password": "<PASSWORD>",
"session_log": "session.txt",
# 'fast_cli': True,
}
ciscoswitch2 = {
"device_type": "cisco_nxos",
"host": "nxos2.lasthop.io",
"username": "pyclass",
# "password": getpass(),
"password": "<PASSWORD>",
"session_log": "session.txt",
# 'fast_cli': True,
}
for switch in ( ciscoswitch1, ciscoswitch2 ):
net_connect = ConnectHandler( **switch )
output = net_connect.send_config_from_file( "config.txt" )
print("\n\n" + output + "\n\n")
get_save = net_connect.save_config()
print ("\n\n" + get_save + "\n\n")
prompt = net_connect.find_prompt()
print( prompt )
net_connect.disconnect()
| StarcoderdataPython |
135716 | <reponame>gitguige/openpilot0.8.9
#!/usr/bin/env python3
import argparse
import carla # pylint: disable=import-error
import math
import numpy as np
import time
import threading
from cereal import log
from multiprocessing import Process, Queue
from typing import Any
import cereal.messaging as messaging
from common.params import Params
from common.numpy_fast import clip
from common.realtime import Ratekeeper, DT_DMON
from lib.can import can_function
from selfdrive.car.honda.values import CruiseButtons
from selfdrive.test.helpers import set_params_enabled
import sys,os,signal
# from sys import argv
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--low_quality', action='store_true')
parser.add_argument('--town', type=str, default='Town04_Opt')
parser.add_argument('--spawn_point', dest='num_selected_spawn_point',
type=int, default=16)
parser.add_argument('--cruise_lead', type=int, default=80) #(1 + 80%)V0 = 1.8V0
parser.add_argument('--cruise_lead2', type=int, default=80) #(1 + 80%)V0 = 1.8V0 #change speed in the middle
parser.add_argument('--init_dist', type=int, default=100) #meters; initial relative distance between vehicle and vehicle2
# parser.add_argument('--faultinfo', type=str, default='')
# parser.add_argument('--scenarioNum', type=int, default=1)
# parser.add_argument('--faultNum', type=int, default=1)
args = parser.parse_args()
W, H = 1164, 874
REPEAT_COUNTER = 5
PRINT_DECIMATION = 100
STEER_RATIO = 15.
vEgo = 60 #mph #set in selfdrive/controlsd
FI_Enable = True #False: run the code in fault free mode; True: add fault inejction Engine
reInitialize_bridge = False
Mode_FI_duration = 0 # 0: FI lasts 2.5s after t_f; 1: FI whenever context is True between [t_f,t_f+2.5s]
Driver_react_Enable = False
Other_vehicles_Enable = False
pm = messaging.PubMaster(['roadCameraState', 'sensorEvents', 'can', "gpsLocationExternal"])
sm = messaging.SubMaster(['carControl','controlsState','radarState','modelV2'])
class VehicleState:
def __init__(self):
self.speed = 0
self.angle = 0
self.bearing_deg = 0.0
self.vel = carla.Vector3D()
self.cruise_button= 0
self.is_engaged=False
def steer_rate_limit(old, new):
# Rate limiting to 0.5 degrees per step
limit = 0.5
if new > old + limit:
return old + limit
elif new < old - limit:
return old - limit
else:
return new
frame_id = 0
def cam_callback(image):
global frame_id
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
dat = messaging.new_message('roadCameraState')
dat.roadCameraState = {
"frameId": image.frame,
"image": img.tobytes(),
"transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
}
pm.send('roadCameraState', dat)
frame_id += 1
def imu_callback(imu, vehicle_state):
vehicle_state.bearing_deg = math.degrees(imu.compass)
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def panda_state_function(exit_event: threading.Event):
pm = messaging.PubMaster(['pandaState'])
while not exit_event.is_set():
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': True,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaState', dat)
time.sleep(0.5)
def gps_callback(gps, vehicle_state):
dat = messaging.new_message('gpsLocationExternal')
# transform vel from carla to NED
# north is -Y in CARLA
velNED = [
-vehicle_state.vel.y, # north/south component of NED is negative when moving south
vehicle_state.vel.x, # positive when moving east, which is x in carla
vehicle_state.vel.z,
]
dat.gpsLocationExternal = {
"timestamp": int(time.time() * 1000),
"flags": 1, # valid fix
"accuracy": 1.0,
"verticalAccuracy": 1.0,
"speedAccuracy": 0.1,
"bearingAccuracyDeg": 0.1,
"vNED": velNED,
"bearingDeg": vehicle_state.bearing_deg,
"latitude": gps.latitude,
"longitude": gps.longitude,
"altitude": gps.altitude,
"speed": vehicle_state.speed,
"source": log.GpsLocationData.SensorSource.ublox,
}
pm.send('gpsLocationExternal', dat)
# Create a radar's callback that just prints the data
# def radar_callback(weak_radar, sensor_data):
def radar_callback( sensor_data):
# # self = weak_radar()
# # if not self:
# # return
# print("==============",len(sensor_data),'==============')
# for detection in sensor_data:
# print(detection)
# # print('depth: ' + str(detection.depth)) # meters
# # print('azimuth: ' + str(detection.azimuth)) # rad
# # print('altitude: ' + str(detection.altitude)) # rad
# # print('velocity: ' + str(detection.velocity)) # m/s
ret = 0#sensor_data[0]
collision_hist = []
def collision_callback(col_event):
collision_hist.append(col_event)
# print(col_event)
laneInvasion_hist = []
def laneInvasion_callback(LaneInvasionEvent):
laneInvasion_hist.append(LaneInvasionEvent)
def fake_driver_monitoring(exit_event: threading.Event):
pm = messaging.PubMaster(['driverState','driverMonitoringState'])
while not exit_event.is_set():
# dmonitoringmodeld output
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
# dmonitoringd output
dat = messaging.new_message('driverMonitoringState')
dat.driverMonitoringState = {
"faceDetected": True,
"isDistracted": False,
"awarenessStatus": 1.,
}
pm.send('driverMonitoringState', dat)
time.sleep(DT_DMON)
def can_function_runner(vs: VehicleState, exit_event: threading.Event):
i = 0
while not exit_event.is_set():
can_function(pm, vs.speed, vs.angle, i, vs.cruise_button, vs.is_engaged)
time.sleep(0.01)
i+=1
def bridge(q):
# setup CARLA
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(10.0)
world = client.load_world(args.town)
print("test======================================================================")
print(args.town)
if args.low_quality:
world.unload_map_layer(carla.MapLayer.Foliage)
world.unload_map_layer(carla.MapLayer.Buildings)
world.unload_map_layer(carla.MapLayer.ParkedVehicles)
world.unload_map_layer(carla.MapLayer.Particles)
world.unload_map_layer(carla.MapLayer.Props)
world.unload_map_layer(carla.MapLayer.StreetLights)
blueprint_library = world.get_blueprint_library()
world_map = world.get_map()
vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[1]
spawn_points = world_map.get_spawn_points()
assert len(spawn_points) > args.num_selected_spawn_point, \
f'''No spawn point {args.num_selected_spawn_point}, try a value between 0 and
{len(spawn_points)} for this town.'''
spawn_point = spawn_points[args.num_selected_spawn_point] # y -= 100+
spawn_point.location.y -= 80
#=====add 1st vehicle=====
spawn_point1 = carla.Transform(spawn_point.location,spawn_point.rotation)
# spawn_point1.location.y += 20
vehicle = world.spawn_actor(vehicle_bp, spawn_point1)
#=====add second vehicle=====
spawn_point2 = carla.Transform(spawn_point.location,spawn_point.rotation)
spawn_point2.location.y += args.init_dist#20
vehicle2 = world.spawn_actor(vehicle_bp, spawn_point2)
# vehicle2.set_autopilot(True)
#==========3rd vehilce===========
if Other_vehicles_Enable:
spawn_point3 = carla.Transform(spawn_point.location,spawn_point.rotation)
spawn_point3.location.y -= 35
spawn_point3.location.x += 7
spawn_point3.rotation.yaw += 25
vehicle3 = world.spawn_actor(vehicle_bp, spawn_point3) #following vehicle
spawn_point4 = carla.Transform(spawn_point1.location,spawn_point1.rotation)
spawn_point4.location.x += 4
spawn_point4.location.y += 15
vehicle4 = world.spawn_actor(vehicle_bp, spawn_point4)
spawn_point5 = carla.Transform(spawn_point1.location,spawn_point1.rotation)
spawn_point5.location.x += 5
spawn_point5.location.y -= 15
spawn_point5.rotation.yaw += 13
vehicle5 = world.spawn_actor(vehicle_bp, spawn_point5)
spectator = world.get_spectator()
transform = vehicle.get_transform()
spectator.set_transform(carla.Transform(transform.location + carla.Location(z=150), carla.Rotation(pitch=-90)))
#======end line===============
max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle
print('max_steer_angle',max_steer_angle) #70 degree
# make tires less slippery
# wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 2326
# physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.13))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
vehicle_state = VehicleState()
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(lambda imu: imu_callback(imu, vehicle_state))
gps_bp = blueprint_library.find('sensor.other.gnss')
gps = world.spawn_actor(gps_bp, transform, attach_to=vehicle)
gps.listen(lambda gps: gps_callback(gps, vehicle_state))
# # Get radar blueprint
# radar_bp = blueprint_library.filter('sensor.other.radar')[0]
# # Set Radar attributes, by default are:
# radar_bp.set_attribute('horizontal_fov', '30') # degrees
# radar_bp.set_attribute('vertical_fov', '30') # degrees
# # radar_bp.set_attribute('points_per_second', '1500')
# radar_bp.set_attribute('range', '100') # meters
# # Spawn the radar
# radar = world.spawn_actor(radar_bp, transform, attach_to=vehicle, attachment_type=carla.AttachmentType.Rigid)
# # weak_radar = weakref.ref(radar)
# # radar.listen(lambda sensor_data: radar_callback(weak_radar, sensor_data))
# radar.listen(lambda sensor_data: radar_callback(sensor_data))
# # radar.listen(radar_callback)
#collision sensor detector
colsensor_bp = blueprint_library.find("sensor.other.collision")
colsensor = world.spawn_actor(colsensor_bp, transform, attach_to=vehicle)
colsensor.listen(lambda colevent: collision_callback(colevent))
#lane invasion
laneInvasion_bp = blueprint_library.find("sensor.other.lane_invasion")
laneInvasion = world.spawn_actor(laneInvasion_bp, transform, attach_to=vehicle)
laneInvasion.listen(lambda LaneInvasionEvent: laneInvasion_callback(LaneInvasionEvent))
# launch fake car threads
threads = []
exit_event = threading.Event()
threads.append(threading.Thread(target=panda_state_function, args=(exit_event,)))
threads.append(threading.Thread(target=fake_driver_monitoring, args=(exit_event,)))
threads.append(threading.Thread(target=can_function_runner, args=(vehicle_state, exit_event,)))
for t in threads:
t.start()
time.sleep(1)
# can loop
rk = Ratekeeper(100, print_delay_threshold=0.05) #rate =100, T=1/100s=10ms
# init
throttle_ease_out_counter = REPEAT_COUNTER
brake_ease_out_counter = REPEAT_COUNTER
steer_ease_out_counter = REPEAT_COUNTER
vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)
is_openpilot_engaged = False
throttle_out = steer_out = brake_out = 0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0
old_steer = old_brake = old_throttle = 0
throttle_manual_multiplier = 0.7 #keyboard signal is always 1
brake_manual_multiplier = 0.7 #keyboard signal is always 1
steer_manual_multiplier = 45 * STEER_RATIO #keyboard signal is always 1
tm = client.get_trafficmanager(8008)
# vehicle2.set_autopilot(True,8008)
tm.vehicle_percentage_speed_difference(vehicle2,-args.cruise_lead) #Sets the difference the vehicle's intended speed and its current speed limit.
# tm.distance_to_leading_vehicle(vehicle2,5)
if Other_vehicles_Enable:
tm.vehicle_percentage_speed_difference(vehicle3,-200)
is_autopilot_engaged =False #vehicle2
fp_res = open('results/data_ADS1_{}mph_{}m_{}V0_{}V0.csv'.format(vEgo,args.init_dist,args.cruise_lead,args.cruise_lead2),'w')
fp_res.write("frameIdx,distance(m),speed(m/s),acceleration(m/s2),angle_steer,gas,brake,steer_torque,actuators_steeringAngleDeg,actuators_steer,actuators_accel,d_rel(m),v_rel(m/s),c_path(m),faultinjection,faultType,alert,hazard,hazardType,alertMsg,hazardMsg,laneInvasion,yPos,Laneline1,Laneline2,Laneline3,Laneline4,leftPath,rightPath,leftEdge,rightEdge,vel_pos.x,vel_pos.y,vel2_pos.x,vel2_pos.y,vel4_pos.x,vel4_pos.y\n")
speed = 0
throttle_out_hist = 0
FI_duration = 1000# set to be a larget value like 10 seconds so it won't be reached in the normal case with human driver engagement #250*10ms =2.5s
Num_laneInvasion = 0
t_laneInvasion = 0
pathleft = pathright = 0
roadEdgeLeft = roadEdgeRight = 0
laneLineleft=-1.85
laneLineright = 1.85
Lead_vehicle_in_vision = False #lead vehicle is captured in the camera
faulttime = -1
alerttime = -1
hazardtime = -1
fault_duration = 0
driver_alerted_time = -1
H2_count = 0
hazMsg = ""
hazard = False
hazType =0x0
alertType_list =[]
alertText1_list = []
alertText2_list = []
FI_flag = 0
FI_Type = 0
frameIdx = 0
FI_H3_combine_enable = 0
while frameIdx<5000:
altMsg = ""
alert = False
if is_openpilot_engaged:
frameIdx += 1
#simulate button Enable event
if rk.frame == 800:
q.put("cruise_up")
if frameIdx == 1000:
if args.cruise_lead != args.cruise_lead2: #change the speed of vehicle2
print("===========change Lead vehicle cruise speed from {}mph to {}mph".format(args.cruise_lead,args.cruise_lead2))
tm.vehicle_percentage_speed_difference(vehicle2,-args.cruise_lead2)
# if frameIdx >2000:
# q.put("quit")
# 1. Read the throttle, steer and brake from op or manual controls
# 2. Set instructions in Carla
# 3. Send current carstate to op via can
cruise_button = 0
throttle_out = steer_out = brake_out = 0.0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0.0
actuators_steeringAngleDeg = actuators_steer = actuators_accel = 0
dRel = 0
yRel = 2.5
vRel = 0
vLead = 0
yPos = 0
ylaneLines = []
yroadEdges = []
# --------------Step 1-------------------------------
if not q.empty():
message = q.get()
m = message.split('_')
print(message)
if m[0] == "steer":
steer_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "throttle":
throttle_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "brake":
brake_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "reverse":
#in_reverse = not in_reverse
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "cruise":
vehicle2.set_autopilot(True,8008)
if Other_vehicles_Enable:
vehicle3.set_autopilot(True,8008)
vehicle4.set_autopilot(True,8008)
vehicle5.set_autopilot(True,8008)
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
is_openpilot_engaged = True
elif m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
is_openpilot_engaged = True
elif m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "quit":
vehicle2.set_autopilot(False,8008)
break
throttle_out = throttle_manual * throttle_manual_multiplier
steer_out = steer_manual * steer_manual_multiplier
brake_out = brake_manual * brake_manual_multiplier
#steer_out = steer_out
# steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
old_throttle = throttle_out
old_brake = brake_out
# print('message',old_throttle, old_steer, old_brake)
if is_openpilot_engaged:
sm.update(0)
# TODO gas and brake is deprecated
throttle_op = clip(sm['carControl'].actuators.accel/4.0, 0.0, 1.0)
brake_op = clip(-sm['carControl'].actuators.accel/4.0, 0.0, 1.0)
steer_op = sm['carControl'].actuators.steeringAngleDeg
actuators = sm['carControl'].actuators
actuators_accel = actuators.accel
actuators_steer = actuators.steer
actuators_steeringAngleDeg = actuators.steeringAngleDeg
throttle_out = throttle_op
steer_out = steer_op
brake_out = brake_op
steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
dRel = sm['radarState'].leadOne.dRel
yRel = sm['radarState'].leadOne.yRel #y means lateral direction
vRel = sm['radarState'].leadOne.vRel
vLead = sm['radarState'].leadOne.vLead
if not sm['radarState'].leadOne.status:
Lead_vehicle_in_vision = False
else:
Lead_vehicle_in_vision = True
md = sm['modelV2']
if len(md.position.y)>0:
yPos = round(md.position.y[0],2) # position
ylaneLines = [round(md.laneLines[0].y[0],2),round(md.laneLines[1].y[0],2),round(md.laneLines[2].y[0],2),round(md.laneLines[3].y[0],2)]
yroadEdges = [round(md.roadEdges[0].y[0],2), round(md.roadEdges[1].y[0],2)] #left and right roadedges
# print(ylaneLines[2] - yPos)
if len(ylaneLines)>2:
laneLineleft = ylaneLines[1]
laneLineright = ylaneLines[2]
pathleft = yPos- laneLineleft
pathright = laneLineright-yPos
roadEdgeLeft = yroadEdges[0]
roadEdgeRight = yroadEdges[1]
#controlsState
alertText1 = sm['controlsState'].alertText1
alertText2 = sm['controlsState'].alertText2
alertType = sm['controlsState'].alertType
if alertType and alertType not in alertType_list and alertText1 not in alertText1_list:
alertText1_list.append(alertText1)
alertType_list.append(alertType)
if(alerttime== -1 and 'startupMaster/permanent' not in alertType and 'buttonEnable/enable' not in alertType):
alerttime = frameIdx
alert = True
print("=================Alert============================")
print(alertType,":",alertText1,alertText2)
else:
if throttle_out==0 and old_throttle>0:
if throttle_ease_out_counter>0:
throttle_out = old_throttle
throttle_ease_out_counter += -1
else:
throttle_ease_out_counter = REPEAT_COUNTER
old_throttle = 0
if brake_out==0 and old_brake>0:
if brake_ease_out_counter>0:
brake_out = old_brake
brake_ease_out_counter += -1
else:
brake_ease_out_counter = REPEAT_COUNTER
old_brake = 0
if steer_out==0 and old_steer!=0:
if steer_ease_out_counter>0:
steer_out = old_steer
steer_ease_out_counter += -1
else:
steer_ease_out_counter = REPEAT_COUNTER
old_steer = 0
# --------------Step 2-------------------------------
steer_carla = steer_out / (max_steer_angle * STEER_RATIO * -1)
steer_carla = np.clip(steer_carla, -1,1)
steer_out = steer_carla * (max_steer_angle * STEER_RATIO * -1)
old_steer = steer_carla * (max_steer_angle * STEER_RATIO * -1)
if speed:
headway_time = dRel/speed
else:
headway_time = 100
RSpeed = -vRel #v_Ego -V_Lead
if FI_Enable == True:
if Mode_FI_duration>0: #
if FI_flag>0: #reset FI
FI_flag = 0
FI_Type = 0
#*********************************************#
#condition to activate fault injection
#throttle:HOOK#
# manual FI examples
# if headway_time<=2.0 and RSpeed>=0 and vLead!=0:
# FI_Type |= 0x01
# FI_flag = 1
# FI_duration = 100
# FI_H3_combine_enable = 1
# if frameIdx>1000 and (headway_time>2.0 and RSpeed<0 and Lead_vehicle_in_vision or Lead_vehicle_in_vision==False):
# FI_Type |= 0x02
# FI_flag=1
if FI_H3_combine_enable:
if speed>15 and laneLineleft>-1.25:
FI_Type |= 0x04
FI_flag=1
if speed>15 and laneLineleft<1.25:
FI_Type |= 0x08 #0b1000
FI_flag=1
#*********************************************#
#condition to stop fault injection and start human driver engagement if FI
if Driver_react_Enable == True:
if driver_alerted_time >= 0 and frameIdx >=250 + driver_alerted_time: #average reaction time 2.5s
#stop fault injection
FI_flag = -1
#human driver reaction # full brake
if FI_Type&0x01: # max gas
throttle_out = 0
brake_out = 1
steer_carla = 0
#execute fault injection
if FI_flag > 0:
if fault_duration < FI_duration: #time budget
if faulttime == -1:
faulttime = frameIdx
fault_duration += 1
if FI_Type&0x01: # max gas
throttle_out=0.6
brake_out=0
if FI_Type&0x02: #max brake
throttle_out=0
brake_out = 1
if FI_Type&0x04: #max left steer
steer_carla = vc.steer - 0.5/(max_steer_angle * STEER_RATIO ) #maximum change 0.5 degree at each step
steer_carla = np.clip(steer_carla, -1,1)
if FI_Type&0x08: #max right steer
steer_carla = vc.steer + 0.5/(max_steer_angle * STEER_RATIO ) #maximum change 0.5 degree at each step
steer_carla = np.clip(steer_carla, -1,1)
else:
FI_flag = 0
vc.throttle = throttle_out/0.6
vc.steer = steer_carla
vc.brake = brake_out
vehicle.apply_control(vc)
# vehicle2.apply_control(vc)
# measurements, sensor_data = client.read_data()
# control = measurements.player_measurements.autopilot_control
# client.send_control(control)
# --------------Step 3-------------------------------
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2) # in m/s
acc = vehicle.get_acceleration()
acceleration = math.sqrt(acc.x**2 + acc.y**2 + acc.z**2) # in m/s^2
if speed==acceleration==0:
acceleration =1
vehicle_state.speed = speed
vehicle_state.vel = vel
vehicle_state.angle = steer_out
vehicle_state.cruise_button = cruise_button
vehicle_state.is_engaged = is_openpilot_engaged
vel_pos = vehicle.get_transform().location
vel2_pos = vehicle2.get_transform().location
vel4_pos = vel2_pos
if Other_vehicles_Enable:
vel4_pos = vehicle4.get_transform().location
#-----------------------------------------------------
if frameIdx == 1000:
if speed <0.02 and throttle_out <0.02 and brake_out <0.02: #fail to start
reInitialize_bridge = True
print("reInitialize bridge.py...\n")
break
#------------------------------------------------------
if driver_alerted_time == -1 and fault_duration>0 and (alert or throttle_out>= 0.6 or speed>1.1*vEgo or brake_out>0.95): #max gas//max brake//exceed speed limit
driver_alerted_time =frameIdx #driver is alerted
#Accident: collision
if len(collision_hist):
print(collision_hist[0],collision_hist[0].other_actor)
# print(vehicle2)
if collision_hist[0].other_actor.id == vehicle2.id: #collide with vehicle2:
dRel = -0.1
if "lead" not in hazMsg:
hazMsg +="||collide with lead vihecle||"
else:
if "curb" not in hazMsg:
hazMsg +="||collide with curb||"
if hazType&0x04 == 0:
hazard = True
hazardtime =frameIdx
hazMsg +="H3"
hazType |= 0x04 #0b 100
#if laneInvasion
laneInvasion_Flag = False
if len(laneInvasion_hist)>Num_laneInvasion:
# hazard = True
laneInvasion_Flag =True
Num_laneInvasion = len(laneInvasion_hist)
t_laneInvasion = frameIdx
print(Num_laneInvasion,laneInvasion_hist[-1],laneInvasion_hist[-1].crossed_lane_markings)
# del(laneInvasion_hist[0])
#lable hazard
if dRel <0.5 and Lead_vehicle_in_vision and 'curb' not in hazMsg: # unsafe distance # collide with curb is not H1
if hazType&0x01 == 0:
hazard = True
hazardtime =frameIdx
hazMsg +="H1"
hazType |= 0x01 #0b 001
if speed<0.02 and (dRel >50 or Lead_vehicle_in_vision==False) and fault_duration>0: #decrease the speed to full stop without a lead vehicle
if hazType&0x02 == 0:
H2_count += 1
if H2_count>100: #last for 1 second
hazard = True
hazardtime =frameIdx
hazMsg +="H2"
hazType |= 0x02 #0b 100
else:
H2_count = 0
if Num_laneInvasion > 0 and (roadEdgeRight <3.7 and (pathright <1.15) or roadEdgeRight>7.4): #lane width = 3.7m vehicle width =2.3m or(ylaneLines[3] -ylaneLines[2] <1.15)
if hazType&0x04 == 0:
hazard = True
hazardtime =frameIdx
hazMsg +="H3"
hazType |= 0x04 #0b 100
#result print out
# if rk.frame%PRINT_DECIMATION == 0:
if rk.frame%PRINT_DECIMATION == 0 or dRel<1 and Lead_vehicle_in_vision:
print("Frame ID:",frameIdx,"frame: ", rk.frame,"engaged:", is_openpilot_engaged, "; throttle: ", round(vc.throttle, 3), "acc:" ,round(acceleration,2),round(throttle_out_hist/acceleration,2),"; steer(c/deg): ", round(vc.steer, 3), round(steer_out, 3), "; brake: ", round(vc.brake, 3),\
"speed:",round(speed,2),'vLead:',round(vLead,2),"vRel",round(-vRel,2),"drel:",round(dRel,2),round(yRel,2),'Lanelines',yPos,ylaneLines,yroadEdges,"FI:",FI_flag,"Hazard:",hazard)
#result record in files
if is_openpilot_engaged :#and (frameIdx%20==0 or (dRel<1 and Lead_vehicle_in_vision)): #record every 20*10=0.2s
fp_res.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(frameIdx,0,speed,acceleration,steer_out,vc.throttle,vc.brake,vc.steer,actuators_steeringAngleDeg,actuators_steer,actuators_accel, dRel,-vRel,yRel,FI_flag>0,FI_Type if FI_flag else 0 ,alert,hazard,hazType,altMsg,hazMsg, laneInvasion_Flag,yPos,ylaneLines,pathleft,pathright,roadEdgeLeft,roadEdgeRight,vel_pos.x,vel_pos.y,vel2_pos.x,vel2_pos.y,vel4_pos.x,vel4_pos.y))
rk.keep_time()
throttle_out_hist = vc.throttle
#brake with hazard
if hazard:# or FI_flag ==-1 and speed<0.01:
if 'collide' in hazMsg or frameIdx - hazardtime >250: #terminate the simulation right after any collision or wait 2 seconds after any hazard
break
#store alert,hazard message to a file, which will be stored in a summary file
Alert_flag = len(alertType_list)>0 and 'startupMaster/permanent' not in alertType_list and 'buttonEnable/enable' not in alertType_list
fp_temp = open("temp.txt",'w')
fp_temp.write("{},{},{},{},{},{},{},{},{}".format("||".join(alertType_list),hazMsg,faulttime,alerttime,hazardtime, Alert_flag,hazard,fault_duration,Num_laneInvasion ))
fp_temp.close()
# Clean up resources in the opposite order they were created.
exit_event.set()
for t in reversed(threads):
t.join()
# t.stop()
gps.destroy()
imu.destroy()
camera.destroy()
vehicle.destroy()
colsensor.destroy()
vehicle2.set_autopilot(False,8008)
vehicle2.destroy()
if Other_vehicles_Enable:
vehicle3.set_autopilot(False,8008)
vehicle3.destroy()
vehicle4.set_autopilot(False,8008)
vehicle4.destroy()
vehicle5.set_autopilot(False,8008)
vehicle5.destroy()
fp_res.close()
# os.killpg(os.getpgid(os.getpid()), signal.SIGINT) #kill the remaining threads
sys.exit(0)
# exit()
def bridge_keep_alive(q: Any):
while 1:
try:
bridge(q)
break
except RuntimeError:
print("Restarting bridge...")
if __name__ == "__main__":
# print(os.getcwd())
# os.system('rm ./results/*')
# make sure params are in a good state
set_params_enabled()
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = 20
msg.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]
Params().put("CalibrationParams", msg.to_bytes())
q: Any = Queue()
#=================================================
# p = Process(target=bridge_keep_alive, args=(q,), daemon=True)
# p.start()
# if 0:#args.joystick:
# # start input poll for joystick
# from lib.manual_ctrl import wheel_poll_thread
# wheel_poll_thread(q)
# p.join()
# else:
# # start input poll for keyboard
# from lib.keyboard_ctrl import keyboard_poll_thread
# keyboard_poll_thread(q)
##===========================================
# # start input poll for keyboard
# from lib.keyboard_ctrl import keyboard_poll_thread
# p_keyboard = Process(target=keyboard_poll_thread, args=(q,), daemon=True)
# p_keyboard.start()
bridge_keep_alive(q)
# if reInitialize_bridge: #if fail to intialize, do it again
# q: Any = Queue()
# bridge_keep_alive(q)
# p_keyboard.join()
| StarcoderdataPython |
1710300 | <filename>tests/test_dependencies.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
"""Tests for `dparse.dependencies`"""
import pytest
from dparse.dependencies import Dependency, DependencyFile
from dparse import filetypes, parse, parser, errors
def test_dependency_serialize():
dep = Dependency(
name="foo",
specs=(),
line="foo==1.2.3"
)
serialized = dep.serialize()
assert dep.name == serialized["name"]
assert dep.specs == serialized["specs"]
assert dep.line == serialized["line"]
dep.extras = "some-extras"
dep.line_numbers = (0, 4)
dep.index_server = "some-foo-server"
dep.hashes = {
"method": "sha256",
"hash": "the hash"
}
dep.dependency_type = filetypes.requirements_txt
serialized = dep.serialize()
assert dep.extras == serialized["extras"]
assert dep.line_numbers == serialized["line_numbers"]
assert dep.hashes == serialized["hashes"]
assert dep.dependency_type == serialized["dependency_type"]
def test_dependency_deserialize():
d = {
"name": "foo",
"specs": [],
"line": "foo==1.2.3"
}
dep = Dependency.deserialize(d)
assert d["name"] == dep.name
assert d["specs"] == dep.specs
assert d["line"] == dep.line
d["extras"] = "some-extras"
d["line_numbers"] = (0, 4)
d["index_server"] = "some-foo-server"
d["hashes"] = {
"method": "sha256",
"hash": "the hash"
}
d["dependency_type"] = filetypes.requirements_txt
dep = Dependency.deserialize(d)
assert d["extras"] == dep.extras
assert d["line_numbers"] == dep.line_numbers
assert d["index_server"] == dep.index_server
assert d["hashes"] == dep.hashes
assert d["dependency_type"] == dep.dependency_type
def test_dependency_file_serialize():
content = "django==1.2\nrequests==1.2.3"
dep_file = parse(
content=content,
file_type=filetypes.requirements_txt,
path="req.txt",
sha="sha"
)
serialized = dep_file.serialize()
assert serialized["file_type"] == dep_file.file_type
assert serialized["content"] == dep_file.content
assert serialized["path"] == dep_file.path
assert serialized["sha"] == dep_file.sha
assert serialized["dependencies"][0]["name"] == "django"
assert serialized["dependencies"][1]["name"] == "requests"
def test_dependency_file_deserialize():
d = {
'file_type': 'requirements.txt',
'content': 'django==1.2\nrequests==1.2.3',
'sha': 'sha',
'dependencies': [
{
'hashes': [],
'line_numbers': None,
'extras': (),
'name': 'django',
'index_server': None,
'dependency_type': 'requirements.txt',
'line': 'django==1.2',
'specs': [('==', '1.2')]
},
{
'hashes': [],
'line_numbers': None,
'extras': (), 'name':
'requests',
'index_server': None,
'dependency_type': 'requirements.txt',
'line': 'requests==1.2.3',
'specs': [('==', '1.2.3')]}],
'path': 'req.txt'
}
dep_file = DependencyFile.deserialize(d)
assert d['file_type'] == dep_file.file_type
assert d['content'] == dep_file.content
assert d['sha'] == dep_file.sha
assert d['path'] == dep_file.path
assert "django" == dep_file.dependencies[0].name
assert "requests" == dep_file.dependencies[1].name
def test_parser_class():
dep_file = parse("", file_type=filetypes.requirements_txt)
assert isinstance(dep_file.parser, parser.RequirementsTXTParser)
dep_file = parse("", path="req.txt")
assert isinstance(dep_file.parser, parser.RequirementsTXTParser)
dep_file = parse("", file_type=filetypes.tox_ini)
assert isinstance(dep_file.parser, parser.ToxINIParser)
dep_file = parse("", path="tox.ini")
assert isinstance(dep_file.parser, parser.ToxINIParser)
dep_file = parse("", file_type=filetypes.conda_yml)
assert isinstance(dep_file.parser, parser.CondaYMLParser)
dep_file = parse("", path="conda.yml")
assert isinstance(dep_file.parser, parser.CondaYMLParser)
dep_file = parse("", parser=parser.CondaYMLParser)
assert isinstance(dep_file.parser, parser.CondaYMLParser)
with pytest.raises(errors.UnknownDependencyFileError) as e:
parse("")
| StarcoderdataPython |
3352821 | import os
from textwrap import dedent
def aws_creds_setup(config):
if config.getboolean('AWS', 'setupAwsKeys') is True:
access_key = config.get('AWS', 'awsAccessKeyId', fallback='')
secret_key = config.get('AWS', 'awsSecretAccessKey', fallback='')
region = config.get('AWS', 'region', fallback='')
aws_path = os.path.join(os.path.expanduser('~'), '.aws')
if os.path.exists(os.path.join(aws_path, 'credentials')):
print("\nSkipping...\naws credentials already set up in `~/.aws/credentials`\n")
return
if not os.path.exists(aws_path):
os.mkdir(aws_path)
with open(os.path.join(aws_path, 'credentials'), 'w') as f:
creds_setup = f"""
[default]
aws_access_key_id = {access_key}
aws_secret_access_key = {secret_key}
region = {region}
"""
f.write(dedent(creds_setup).strip())
print("\naws credentials set up in `~/.aws/credentials/\n")
else:
print("\nSkipping AWS Credential setup\n")
return | StarcoderdataPython |
1799887 | <reponame>lengstrom/fastargs
STATE = {
'config': None
}
def get_current_config():
if STATE['config'] is None:
from .config import Config
STATE['config'] = Config()
return STATE['config']
def set_current_config(config):
STATE['config'] = config
| StarcoderdataPython |
95560 | # Generated by Django 3.1.13 on 2022-04-24 15:45
import app.storage
from django.conf import settings
from django.db import migrations, models, transaction
import django.db.models.deletion
import uuid
import versatileimagefield.fields
def create_slack_users(apps, schema_editor):
SlackUser = apps.get_model("messaging", "SlackUser")
User = apps.get_model("user", "User")
with transaction.atomic():
for user in User.objects.filter(slack_id__isnull=False):
SlackUser.objects.create(
user=user,
external_id=user.slack_id,
token=user.slack_token,
scopes=user.slack_scopes,
display_name=user.slack_display_name,
)
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("messaging", "0005_slacklog_type"),
]
operations = [
migrations.CreateModel(
name="SlackUser",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("external_id", models.CharField(max_length=255)),
("token", models.CharField(blank=True, max_length=255, null=True)),
("scopes", models.CharField(blank=True, max_length=255, null=True)),
(
"display_name",
models.CharField(blank=True, max_length=255, null=True),
),
(
"picture",
versatileimagefield.fields.VersatileImageField(
blank=True,
null=True,
storage=app.storage.OverwriteStorage(),
upload_to="messaging/slackuser/picture/",
verbose_name="Slack image",
),
),
(
"picture_hash",
models.CharField(blank=True, max_length=255, null=True),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.PROTECT,
related_name="slack_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.RunPython(create_slack_users, migrations.RunPython.noop),
]
| StarcoderdataPython |
1726216 | <reponame>gerritholl/sattools<gh_stars>0
"""Test visualisation routines."""
import datetime
from unittest.mock import patch, MagicMock
import pytest
import pyresample
from . import utils
def test_show(fakescene, fakearea, tmp_path):
"""Test showing a scene and area."""
import sattools.vis
from satpy import Scene
comps = ["raspberry", "blueberry"]
chans = ["maroshki", "strawberry"]
areas = ["native"]
with patch("satpy.Scene") as sS:
sS.return_value = fakescene
S = sattools.vis.show(
["/tmp/animals/pinguin", "/tmp/animals/polarbear"],
comps, chans, areas,
tmp_path / "out", "{label:s}_{area:s}_{dataset:s}.tiff",
reader="pranksat",
label="fish")
assert S == {tmp_path / "out" /
f"fish_{area:s}_{ds:s}.tiff"
for ds in ["raspberry", "blueberry", "maroshki", "strawberry"]
for area in ["native"]}
for f in S:
assert f.exists()
fakescene.save_dataset = MagicMock()
fakescene.resample = MagicMock()
fakescene.resample.return_value = fakescene
with patch("satpy.Scene") as sS:
sS.return_value = fakescene
S = sattools.vis.show(
["/tmp/animals/pinguin", "/tmp/animals/polarbear"],
comps, chans, ["fribbulus xax"],
tmp_path / "out", "{label:s}_{area:s}_{dataset:s}.tiff",
reader="pranksat",
show_only_coastlines="blueberry",
path_to_coastlines="/coast", label="fish")
S = sattools.vis.show(
["/tmp/animals/pinguin", "/tmp/animals/polarbear"],
comps, chans, ["fribbulus xax"],
tmp_path / "out", "{label:s}_{area:s}_{dataset:s}.tiff",
reader="pranksat",
show_only_coastlines=fakearea,
path_to_coastlines="/coast", label="fish")
assert S
empty = Scene()
with patch("satpy.Scene") as sS:
sS.return_value = empty
S = sattools.vis.show(
["/tmp/penguin"], [], [], ["home"],
tmp_path / "not", "nowhere", reader="pranksat",
show_only_coastlines=False, path_to_coastlines="/coast",
label="bird")
assert S == set()
@patch("satpy.MultiScene.from_files", autospec=True)
def test_show_video(sMf, fake_multiscene2, fake_multiscene3, tmp_path):
"""Test showing an ABI/GLM video from files."""
from sattools.vis import show_video_abi_glm
sMf.return_value = fake_multiscene2
mm = MagicMock()
fake_multiscene2.resample = mm
mm.return_value.scenes = fake_multiscene2.scenes[:1]*3
for sc in fake_multiscene2.scenes:
sc.save_datasets = MagicMock()
show_video_abi_glm(
["fake_in1", "fake_in2"], tmp_path)
mm.return_value.save_animation.assert_called_once()
sMf.return_value = fake_multiscene3
fake_multiscene3.resample = MagicMock()
fake_multiscene3.resample.return_value = fake_multiscene3
with pytest.raises(ValueError):
show_video_abi_glm(
["fake_in1", "fake_in2"], tmp_path)
def test_flatten_areas():
"""Test flattening a stacked area definition."""
from sattools.area import flatten_areas
ars = [pyresample.create_area_def(
"test-area",
{"proj": "eqc", "lat_ts": 0, "lat_0": 0, "lon_0": 0,
"x_0": 0, "y_0": 0, "ellps": "sphere", "units": "m",
"no_defs": None, "type": "crs"},
units="m",
shape=(r, r),
resolution=1000,
center=(0, 0)) for r in (5, 6)]
sar = pyresample.geometry.StackedAreaDefinition(*ars)
sar2 = pyresample.geometry.StackedAreaDefinition(sar, sar)
flat = list(flatten_areas([*ars, sar, sar2]))
assert all(isinstance(ar, pyresample.geometry.AreaDefinition)
for ar in flat)
assert len(flat) == 8
def test_show_video_from_times(
monkeypatch, tmp_path,
better_glmc_pattern, more_glmc_files, fakearea):
"""Test showing an ABI/GLM video from times."""
from sattools.vis import show_video_abi_glm_times
def fake_ensure_glm(start_date, end_date, sector="C", lat=0, lon=0):
return utils.create_fake_glm_for_period(tmp_path, start_date,
end_date, sector)
def fake_get_abi(start_date, end_date, sector, chans):
return utils.create_fake_abi_for_period(tmp_path, start_date, end_date,
sector, chans)
monkeypatch.setenv("NAS_DATA", str(tmp_path / "nas"))
with patch("sattools.abi.get_fsfiles", new=fake_get_abi), \
patch("sattools.glm.ensure_glm_for_period", new=fake_ensure_glm):
show_video_abi_glm_times(
datetime.datetime(1900, 1, 1, 0, 0),
datetime.datetime(1900, 1, 1, 0, 20),
out_dir=tmp_path / "show-vid",
vid_out="test.mp4",
enh_args={})
assert (tmp_path / "show-vid" / "test.mp4").exists()
| StarcoderdataPython |
73783 | <reponame>testtech-solutions/ProcessPlot
"""
Copyright (c) 2021 <NAME> <EMAIL>, <NAME> <EMAIL>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import numpy as np
from gi.repository import Gdk, GdkPixbuf, GObject, Gtk
from classes.database import PenSettings
class Pen(object):
__log = logging.getLogger("ProcessPlot.classes.Pen")
orm_model = PenSettings
@classmethod
def get_params_from_orm(cls, result):
"""
pass in an orm result (database query result) and this will update the params dictionary
with the table columns. the params object is used to pass into a widget's init
"""
params = {
"id": result.id,
"chart_id": result.chart_id,
"tag_id": result.tag_id,
"connection_id": result.connection_id,
"visible": result.visible,
"color": result.color,
"weigth": result.weight,
"scale_minimum": result.scale_minimum,
"scale_maximum": result.scale_maximum,
"scale_lock": result.scale_lock,
"scale_auto": result.scale_auto,
}
return params
@GObject.Property(type=int, flags=GObject.ParamFlags.READABLE)
def id(self):
return self._id
@GObject.Property(type=int, flags=GObject.ParamFlags.READWRITE)
def chart_id(self):
return self._chart_id
@chart_id.setter
def chart_id(self, value):
self._chart_id = value
#self.move()
@GObject.Property(type=int, flags=GObject.ParamFlags.READWRITE)
def tag_id(self):
return self._tag_id
@tag_id.setter
def tag_id(self, value):
self._tag_id = value
#self.move()
@GObject.Property(type=int, flags=GObject.ParamFlags.READWRITE)
def connection_id(self):
return self._connection_id
@connection_id.setter
def connection_id(self, value):
self._connection_id = value
#self.resize()
@GObject.Property(type=bool, default=False, flags=GObject.ParamFlags.READABLE)
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
#self.resize()
@GObject.Property(type=str, flags=GObject.ParamFlags.READWRITE)
def color(self):
return self._color
@color.setter
def color(self, value):
self._color = value
#self.resize()
@GObject.Property(type=int, flags=GObject.ParamFlags.READWRITE)
def weight(self):
return self._weight
@weight.setter
def weight(self, value):
self._weight = value
#self.resize()
@GObject.Property(type=str, flags=GObject.ParamFlags.READWRITE)
def scale_minimum(self):
return self._scale_minimum
@scale_minimum.setter
def scale_minimum(self, value):
self._scale_minimum = value
#self.resize()
@GObject.Property(type=str, flags=GObject.ParamFlags.READWRITE)
def scale_maximum(self):
return self._scale_maximum
@scale_maximum.setter
def scale_maximum(self, value):
self._scale_maximum = value
#self.resize()
@GObject.Property(type=bool, default=False, flags=GObject.ParamFlags.READABLE)
def scale_lock(self):
return self._scale_lock
@scale_lock.setter
def scale_lock(self, value):
self._scale_lock = value
#self.resize()
@GObject.Property(type=bool, default=False, flags=GObject.ParamFlags.READABLE)
def scale_auto(self):
return self._scale_auto
@scale_auto.setter
def scale_auto(self, value):
self._scale_auto = value
#self.resize()
def __init__(self, chart, params) -> None:
super().__init__()
self.chart = chart
self.app = chart.app
self.buffer = np.ndarray(shape=(2,0xFFFFF), dtype=float)
self.params = params
self.initialize_params()
def initialize_params(self, *args):
#private settings
try:
self._chart_id = self.params.chart_id
self._connection_id = self.params.connection_id
self._tag_id = self.params.tag_id
self._color = self.params.color
self._visible = self.params.visible
self._weight = self.params.weight
self._scale_minimum = self.params.scale_minimum
self._scale_maximum = self.params.scale_maximum
self._scale_lock = self.params.scale_lock
self._scale_auto = self.params.scale_auto
except:
pass | StarcoderdataPython |
1618657 | '''
@author <NAME>
Please contact <EMAIL>
'''
import torch
import torch.nn.functional as F
from torch.autograd import Variable
'''
<NAME>., <NAME>., & <NAME>. (2014).
Neural machine translation by jointly learning to align and translate.
arXiv preprint arXiv:1409.0473.
<NAME>., <NAME>., & <NAME>. (2015).
Effective approaches to attention-based neural machine translation.
arXiv preprint arXiv:1508.04025.
<NAME>., <NAME>., & <NAME>. (2017).
A deep reinforced model for abstractive summarization.
arXiv preprint arXiv:1705.04304.
<NAME>., <NAME>., & <NAME>. (2017).
Get To The Point: Summarization with Pointer-Generator Networks.
arXiv preprint arXiv:1704.04368.
'''
class AttentionEncoder(torch.nn.Module):
def __init__(
self,
hidden_size,
attn_method,
coverage,
):
super(AttentionEncoder, self).__init__()
self.method = attn_method.lower()
self.hidden_size = hidden_size
self.coverage = coverage
if self.method == 'luong_concat':
self.attn_en_in = torch.nn.Linear(
self.hidden_size,
self.hidden_size,
bias=True).cuda()
self.attn_de_in = torch.nn.Linear(
self.hidden_size,
self.hidden_size,
bias=False).cuda()
self.attn_cv_in = torch.nn.Linear(1, self.hidden_size, bias=False).cuda()
self.attn_warp_in = torch.nn.Linear(self.hidden_size, 1, bias=False).cuda()
if self.method == 'luong_general':
self.attn_in = torch.nn.Linear(
self.hidden_size,
self.hidden_size,
bias=False).cuda()
def forward(self, dehy, enhy, past_attn):
# attention score
if self.method == 'luong_concat':
attn_agg = self.attn_en_in(enhy) + self.attn_de_in(dehy.unsqueeze(1))
if self.coverage[:4] == 'asee':
attn_agg = attn_agg + self.attn_cv_in(past_attn.unsqueeze(2))
attn_agg = F.tanh(attn_agg)
attn_ee = self.attn_warp_in(attn_agg).squeeze(2)
else:
if self.method == 'luong_general':
enhy_new = self.attn_in(enhy)
attn_ee = torch.bmm(enhy_new, dehy.unsqueeze(2)).squeeze(2)
else:
attn_ee = torch.bmm(enhy, dehy.unsqueeze(2)).squeeze(2)
# coverage and attention weights
if self.coverage == 'temporal':
attn_ee = torch.exp(attn_ee)
attn = attn_ee/past_attn
nm = torch.norm(attn, 1, 1).unsqueeze(1)
attn = attn/nm
else:
attn = F.softmax(attn_ee, dim=1)
# context vector
attn2 = attn.unsqueeze(1)
c_encoder = torch.bmm(attn2, enhy).squeeze(1)
return c_encoder, attn, attn_ee
'''
Intra-decoder
<NAME>., <NAME>., & <NAME>. (2017).
A deep reinforced model for abstractive summarization.
arXiv preprint arXiv:1705.04304.
'''
class AttentionDecoder(torch.nn.Module):
def __init__(
self,
hidden_size,
attn_method
):
super(AttentionDecoder, self).__init__()
self.method = attn_method.lower()
self.hidden_size = hidden_size
if self.method == 'luong_concat':
self.attn_en_in = torch.nn.Linear(
self.hidden_size,
self.hidden_size,
bias=True).cuda()
self.attn_de_in = torch.nn.Linear(
self.hidden_size,
self.hidden_size,
bias=False).cuda()
self.attn_warp_in = torch.nn.Linear(self.hidden_size, 1, bias=False).cuda()
if self.method == 'luong_general':
self.attn_in = torch.nn.Linear(
self.hidden_size,
self.hidden_size,
bias=False).cuda()
def forward(self, dehy, past_hy):
# attention score
if self.method == 'luong_concat':
attn_agg = self.attn_en_in(past_hy) + self.attn_de_in(dehy.unsqueeze(1))
attn_agg = F.tanh(attn_agg)
attn = self.attn_warp_in(attn_agg).squeeze(2)
else:
if self.method == 'luong_general':
past_hy_new = self.attn_in(past_hy)
attn = torch.bmm(past_hy_new, dehy.unsqueeze(2)).squeeze(2)
else:
attn = torch.bmm(past_hy, dehy.unsqueeze(2)).squeeze(2)
attn = F.softmax(attn, dim=1)
# context vector
attn2 = attn.unsqueeze(1)
c_decoder = torch.bmm(attn2, past_hy).squeeze(1)
return c_decoder, attn
'''
LSTM decoder
'''
class LSTMDecoder(torch.nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers,
attn_method,
coverage,
batch_first,
pointer_net,
attn_decoder
):
super(LSTMDecoder, self).__init__()
# parameters
self.input_size = input_size
self.hidden_size = hidden_size
self.n_layer = num_layers
self.batch_first = batch_first
self.attn_method = attn_method.lower()
self.coverage = coverage
self.pointer_net = pointer_net
self.attn_decoder = attn_decoder
self.lstm_ = torch.nn.LSTMCell(
self.input_size+self.hidden_size,
self.hidden_size).cuda()
self.encoder_attn_layer = AttentionEncoder(
hidden_size=self.hidden_size,
attn_method=self.attn_method,
coverage=self.coverage).cuda()
# intra-decoder
if self.attn_decoder:
self.decoder_attn_layer = AttentionDecoder(
hidden_size=self.hidden_size,
attn_method=self.attn_method).cuda()
self.attn_out = torch.nn.Linear(
self.hidden_size*3,
self.hidden_size,
bias=True).cuda()
else:
self.attn_out = torch.nn.Linear(
self.hidden_size*2,
self.hidden_size,
bias=True).cuda()
# pointer generator network
if self.pointer_net:
if self.attn_decoder:
self.pt_out = torch.nn.Linear(
self.input_size+self.hidden_size*3, 1).cuda()
else:
self.pt_out = torch.nn.Linear(
self.input_size+self.hidden_size*2, 1).cuda()
def forward(
self, idx, input_, hidden_, h_attn,
encoder_hy, past_attn, p_gen, past_dehy):
if self.batch_first:
input_ = input_.transpose(0,1)
batch_size = input_.size(1)
output_ = []
out_attn = []
loss_cv = Variable(torch.zeros(1)).cuda()
batch_size = input_.size(1)
for k in range(input_.size(0)):
x_input = torch.cat((input_[k], h_attn), 1)
hidden_ = self.lstm_(x_input, hidden_)
# attention encoder
c_encoder, attn, attn_ee = self.encoder_attn_layer(
hidden_[0], encoder_hy, past_attn)
# attention decoder
if self.attn_decoder:
if k + idx == 0:
c_decoder = Variable(torch.zeros(
batch_size, self.hidden_size)).cuda()
else:
c_decoder, attn_de = self.decoder_attn_layer(
hidden_[0], past_dehy)
past_dehy = past_dehy.transpose(0, 1) # seqL*batch*hidden
de_idx = past_dehy.size(0)
if k + idx == 0:
past_dehy = hidden_[0].unsqueeze(0) # seqL*batch*hidden
past_dehy = past_dehy.transpose(0, 1) # batch*seqL*hidden
else:
past_dehy = past_dehy.contiguous().view(-1, self.hidden_size) # seqL*batch**hidden
past_dehy = torch.cat((past_dehy, hidden_[0]), 0) # (seqL+1)*batch**hidden
past_dehy = past_dehy.view(de_idx+1, batch_size, self.hidden_size) # (seqL+1)*batch*hidden
past_dehy = past_dehy.transpose(0, 1) # batch*(seqL+1)*hidden
h_attn = self.attn_out(torch.cat((c_encoder, c_decoder, hidden_[0]), 1))
else:
h_attn = self.attn_out(torch.cat((c_encoder, hidden_[0]), 1))
# coverage
if self.coverage == 'asee_train':
lscv = torch.cat((past_attn.unsqueeze(2), attn.unsqueeze(2)), 2)
lscv = lscv.min(dim=2)[0]
try:
loss_cv = loss_cv + torch.mean(lscv)
except:
loss_cv = torch.mean(lscv)
if self.coverage[:4] == 'asee':
past_attn = past_attn + attn
if self.coverage == 'temporal':
if k + idx == 0:
past_attn = past_attn*0.0
past_attn = past_attn + attn_ee
# output
output_.append(h_attn)
out_attn.append(attn)
# pointer
if self.pointer_net:
if self.attn_decoder:
pt_input = torch.cat((input_[k], hidden_[0], c_encoder, c_decoder), 1)
else:
pt_input = torch.cat((input_[k], hidden_[0], c_encoder), 1)
p_gen[:, k] = F.sigmoid(self.pt_out(pt_input))
len_seq = input_.size(0)
batch_size, hidden_size = output_[0].size()
output_ = torch.cat(output_, 0).view(
len_seq, batch_size, hidden_size)
out_attn = torch.cat(out_attn, 0).view(
len_seq, attn.size(0), attn.size(1))
if self.batch_first:
output_ = output_.transpose(0,1)
return output_, hidden_, h_attn, out_attn, past_attn, p_gen, past_dehy, loss_cv
'''
GRU decoder
'''
class GRUDecoder(torch.nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers,
attn_method,
coverage,
batch_first,
pointer_net,
attn_decoder
):
super(GRUDecoder, self).__init__()
# parameters
self.input_size = input_size
self.hidden_size = hidden_size
self.n_layer = num_layers
self.batch_first = batch_first
self.attn_method = attn_method.lower()
self.coverage = coverage
self.pointer_net = pointer_net
self.attn_decoder = attn_decoder
self.gru_ = torch.nn.GRUCell(
self.input_size+self.hidden_size,
self.hidden_size).cuda()
self.encoder_attn_layer = AttentionEncoder(
hidden_size=self.hidden_size,
attn_method=self.attn_method,
coverage=self.coverage).cuda()
# intra-decoder
if self.attn_decoder:
self.decoder_attn_layer = AttentionDecoder(
hidden_size=self.hidden_size,
attn_method=self.attn_method).cuda()
self.attn_out = torch.nn.Linear(
self.hidden_size*3,
self.hidden_size,
bias=True).cuda()
else:
self.attn_out = torch.nn.Linear(
self.hidden_size*2,
self.hidden_size,
bias=True).cuda()
# pointer generator network
if self.pointer_net:
if self.attn_decoder:
self.pt_out = torch.nn.Linear(
self.input_size+self.hidden_size*3, 1).cuda()
else:
self.pt_out = torch.nn.Linear(
self.input_size+self.hidden_size*2, 1).cuda()
def forward(
self, idx, input_, hidden_, h_attn,
encoder_hy, past_attn, p_gen, past_dehy):
if self.batch_first:
input_ = input_.transpose(0,1)
batch_size = input_.size(1)
output_ = []
out_attn = []
loss_cv = Variable(torch.zeros(1)).cuda()
batch_size = input_.size(1)
for k in range(input_.size(0)):
x_input = torch.cat((input_[k], h_attn), 1)
hidden_ = self.gru_(x_input, hidden_)
# attention encoder
c_encoder, attn, attn_ee = self.encoder_attn_layer(
hidden_, encoder_hy, past_attn)
# attention decoder
if self.attn_decoder:
if k + idx == 0:
c_decoder = Variable(torch.zeros(
batch_size, self.hidden_size)).cuda()
else:
c_decoder, attn_de = self.decoder_attn_layer(
hidden_, past_dehy)
past_dehy = past_dehy.transpose(0, 1) # seqL*batch*hidden
de_idx = past_dehy.size(0)
if k + idx == 0:
past_dehy = hidden_.unsqueeze(0) # seqL*batch*hidden
past_dehy = past_dehy.transpose(0, 1) # batch*seqL*hidden
else:
past_dehy = past_dehy.contiguous().view(-1, self.hidden_size) # seqL*batch**hidden
past_dehy = torch.cat((past_dehy, hidden_), 0) # (seqL+1)*batch**hidden
past_dehy = past_dehy.view(de_idx+1, batch_size, self.hidden_size) # (seqL+1)*batch*hidden
past_dehy = past_dehy.transpose(0, 1) # batch*(seqL+1)*hidden
h_attn = self.attn_out(torch.cat((c_encoder, c_decoder, hidden_), 1))
else:
h_attn = self.attn_out(torch.cat((c_encoder, hidden_), 1))
# coverage
if self.coverage == 'asee_train':
lscv = torch.cat((past_attn.unsqueeze(2), attn.unsqueeze(2)), 2)
lscv = lscv.min(dim=2)[0]
try:
loss_cv = loss_cv + torch.mean(lscv)
except:
loss_cv = torch.mean(lscv)
if self.coverage[:4] == 'asee':
past_attn = past_attn + attn
if self.coverage == 'temporal':
if k + idx == 0:
past_attn = past_attn*0.0
past_attn = past_attn + attn_ee
# output
output_.append(h_attn)
out_attn.append(attn)
if self.pointer_net:
if self.attn_decoder:
pt_input = torch.cat((input_[k], hidden_, c_encoder, c_decoder), 1)
else:
pt_input = torch.cat((input_[k], hidden_, c_encoder), 1)
p_gen[:, k] = F.sigmoid(self.pt_out(pt_input))
len_seq = input_.size(0)
batch_size, hidden_size = output_[0].size()
output_ = torch.cat(output_, 0).view(
len_seq, batch_size, hidden_size)
out_attn = torch.cat(out_attn, 0).view(
len_seq, attn.size(0), attn.size(1))
if self.batch_first:
output_ = output_.transpose(0,1)
return output_, hidden_, h_attn, out_attn, past_attn, p_gen, past_dehy, loss_cv
'''
sequence to sequence model
'''
class Seq2Seq(torch.nn.Module):
def __init__(
self,
src_emb_dim=128,
trg_emb_dim=128,
src_hidden_dim=256,
trg_hidden_dim=256,
src_vocab_size=999,
trg_vocab_size=999,
src_nlayer=2,
trg_nlayer=1,
batch_first=True,
src_bidirect=True,
dropout=0.0,
attn_method='vanilla',
coverage='vanilla',
network_='lstm',
pointer_net=False,
shared_emb=True,
attn_decoder=False,
share_emb_weight=False
):
super(Seq2Seq, self).__init__()
# parameters
self.src_emb_dim = src_emb_dim
self.trg_emb_dim = trg_emb_dim
self.src_hidden_dim = src_hidden_dim
self.trg_hidden_dim = trg_hidden_dim
self.src_vocab_size = src_vocab_size
self.trg_vocab_size = trg_vocab_size
self.src_nlayer = src_nlayer
self.trg_nlayer = trg_nlayer
self.batch_first = batch_first
self.src_bidirect = src_bidirect
self.dropout = dropout
self.attn_method = attn_method.lower()
self.coverage = coverage.lower()
self.network_ = network_.lower()
self.pointer_net = pointer_net
self.shared_emb = shared_emb
self.attn_decoder = attn_decoder
self.share_emb_weight = share_emb_weight
# bidirection encoder
self.src_num_directions = 1
if self.src_bidirect:
self.src_hidden_dim = src_hidden_dim // 2
self.src_num_directions = 2
# source embedding and target embedding
if self.shared_emb:
self.embedding = torch.nn.Embedding(
self.trg_vocab_size,
self.src_emb_dim).cuda()
torch.nn.init.uniform(self.embedding.weight, -1.0, 1.0)
else:
self.src_embedding = torch.nn.Embedding(
self.src_vocab_size,
self.src_emb_dim).cuda()
torch.nn.init.uniform(self.src_embedding.weight, -1.0, 1.0)
self.trg_embedding = torch.nn.Embedding(
self.trg_vocab_size,
self.trg_emb_dim).cuda()
torch.nn.init.uniform(self.trg_embedding.weight, -1.0, 1.0)
# network structure
if self.network_ == 'lstm':
# encoder
self.encoder = torch.nn.LSTM(
input_size=self.src_emb_dim,
hidden_size=self.src_hidden_dim,
num_layers=self.src_nlayer,
batch_first=self.batch_first,
dropout=self.dropout,
bidirectional=self.src_bidirect).cuda()
# decoder
self.decoder = LSTMDecoder(
input_size=self.trg_emb_dim,
hidden_size=self.trg_hidden_dim,
num_layers=self.trg_nlayer,
attn_method=self.attn_method,
coverage=self.coverage,
batch_first=self.batch_first,
pointer_net=self.pointer_net,
attn_decoder=self.attn_decoder
).cuda()
elif self.network_ == 'gru':
# encoder
self.encoder = torch.nn.GRU(
input_size=self.src_emb_dim,
hidden_size=self.src_hidden_dim,
num_layers=self.src_nlayer,
batch_first=self.batch_first,
dropout=self.dropout,
bidirectional=self.src_bidirect).cuda()
# decoder
self.decoder = GRUDecoder(
input_size=self.trg_emb_dim,
hidden_size=self.trg_hidden_dim,
num_layers=self.trg_nlayer,
attn_method=self.attn_method,
coverage=self.coverage,
batch_first=self.batch_first,
pointer_net=self.pointer_net,
attn_decoder=self.attn_decoder
).cuda()
# encoder to decoder
self.encoder2decoder = torch.nn.Linear(
self.src_hidden_dim*self.src_num_directions,
self.trg_hidden_dim).cuda()
# decoder to vocab
if self.share_emb_weight:
self.decoder2proj = torch.nn.Linear(
self.trg_hidden_dim,
self.src_emb_dim,
bias=False).cuda()
self.proj2vocab = torch.nn.Linear(
self.src_emb_dim,
self.trg_vocab_size,
bias=True).cuda()
self.proj2vocab.weight.data = self.embedding.weight.data
else:
self.decoder2vocab = torch.nn.Linear(
self.trg_hidden_dim,
self.trg_vocab_size,
bias=True).cuda()
def forward(self, input_src, input_trg):
# parameters
src_seq_len = input_src.size(1)
trg_seq_len = input_trg.size(1)
# embedding
if self.shared_emb:
src_emb = self.embedding(input_src)
trg_emb = self.embedding(input_trg)
else:
src_emb = self.src_embedding(input_src)
trg_emb = self.trg_embedding(input_trg)
batch_size = input_src.size(1)
if self.batch_first:
batch_size = input_src.size(0)
# Variables
h0_encoder = Variable(torch.zeros(
self.encoder.num_layers*self.src_num_directions,
batch_size, self.src_hidden_dim)).cuda()
if self.coverage == 'temporal':
past_attn = Variable(torch.ones(
batch_size, src_seq_len)).cuda()
else:
past_attn = Variable(torch.zeros(
batch_size, src_seq_len)).cuda()
h_attn = Variable(torch.zeros(
batch_size, self.trg_hidden_dim)).cuda()
p_gen = Variable(torch.zeros(
batch_size, trg_seq_len)).cuda()
past_dehy = Variable(torch.zeros(1, 1)).cuda()
# network
if self.network_ == 'lstm':
c0_encoder = Variable(torch.zeros(
self.encoder.num_layers*self.src_num_directions,
batch_size, self.src_hidden_dim)).cuda()
# encoder
encoder_hy, (src_h_t, src_c_t) = self.encoder(
src_emb, (h0_encoder, c0_encoder))
if self.src_bidirect:
h_t = torch.cat((src_h_t[-1], src_h_t[-2]), 1)
c_t = torch.cat((src_c_t[-1], src_c_t[-2]), 1)
else:
h_t = src_h_t[-1]
c_t = src_c_t[-1]
decoder_h0 = self.encoder2decoder(h_t)
decoder_h0 = F.tanh(decoder_h0)
decoder_c0 = c_t
# decoder
trg_h, (_, _), _, attn_, _, p_gen, _, loss_cv = self.decoder(
0, trg_emb,
(decoder_h0, decoder_c0),
h_attn, encoder_hy,
past_attn, p_gen, past_dehy)
elif self.network_ == 'gru':
# encoder
encoder_hy, src_h_t = self.encoder(
src_emb, h0_encoder)
if self.src_bidirect:
h_t = torch.cat((src_h_t[-1], src_h_t[-2]), 1)
else:
h_t = src_h_t[-1]
decoder_h0 = self.encoder2decoder(h_t)
decoder_h0 = F.tanh(decoder_h0)
# decoder
trg_h, _, _, attn_, _, p_gen, _, loss_cv = self.decoder(
0, trg_emb,
decoder_h0, h_attn,
encoder_hy, past_attn, p_gen, past_dehy)
# prepare output
trg_h_reshape = trg_h.contiguous().view(
trg_h.size(0)*trg_h.size(1), trg_h.size(2))
# consume a lot of memory.
if self.share_emb_weight:
decoder_proj = self.decoder2proj(trg_h_reshape)
decoder_output = self.proj2vocab(decoder_proj)
else:
decoder_output = self.decoder2vocab(trg_h_reshape)
decoder_output = decoder_output.view(
trg_h.size(0), trg_h.size(1), decoder_output.size(1))
return decoder_output, attn_, p_gen, loss_cv
def forward_encoder(self, input_src):
# parameters
src_seq_len = input_src.size(1)
# embedding
if self.shared_emb:
src_emb = self.embedding(input_src)
else:
src_emb = self.src_embedding(input_src)
batch_size = input_src.size(1)
if self.batch_first:
batch_size = input_src.size(0)
# Variables
h0_encoder = Variable(torch.zeros(
self.encoder.num_layers*self.src_num_directions,
batch_size, self.src_hidden_dim)).cuda()
if self.coverage == 'temporal':
past_attn = Variable(torch.ones(
batch_size, src_seq_len)).cuda()
else:
past_attn = Variable(torch.zeros(
batch_size, src_seq_len)).cuda()
h_attn = Variable(torch.zeros(
batch_size, self.trg_hidden_dim)).cuda()
past_dehy = Variable(torch.zeros(1, 1)).cuda()
# network
if self.network_ == 'lstm':
c0_encoder = Variable(torch.zeros(
self.encoder.num_layers*self.src_num_directions,
batch_size, self.src_hidden_dim)).cuda()
# encoder
encoder_hy, (src_h_t, src_c_t) = self.encoder(
src_emb,
(h0_encoder, c0_encoder))
if self.src_bidirect:
h_t = torch.cat((src_h_t[-1], src_h_t[-2]), 1)
c_t = torch.cat((src_c_t[-1], src_c_t[-2]), 1)
else:
h_t = src_h_t[-1]
c_t = src_c_t[-1]
decoder_h0 = self.encoder2decoder(h_t)
decoder_h0 = F.tanh(decoder_h0)
decoder_c0 = c_t
return encoder_hy, (decoder_h0, decoder_c0), h_attn, past_attn, past_dehy
elif self.network_ == 'gru':
# encoder
encoder_hy, src_h_t = self.encoder(
src_emb, h0_encoder)
if self.src_bidirect:
h_t = torch.cat((src_h_t[-1], src_h_t[-2]), 1)
else:
h_t = src_h_t[-1]
decoder_h0 = self.encoder2decoder(h_t)
decoder_h0 = F.tanh(decoder_h0)
return encoder_hy, decoder_h0, h_attn, past_attn, past_dehy
def forward_onestep_decoder(
self,
idx,
input_trg,
hidden_decoder,
h_attn,
encoder_hy,
past_attn,
past_dehy
):
if self.shared_emb:
trg_emb = self.embedding(input_trg)
else:
trg_emb = self.trg_embedding(input_trg)
batch_size = input_trg.size(1)
if self.batch_first:
batch_size = input_trg.size(0)
# pointer weight
p_gen = Variable(torch.zeros(batch_size, 1)).cuda()
# decoder
if self.network_ == 'lstm':
trg_h, hidden_decoder, h_attn, attn_, past_attn, p_gen, past_dehy, loss_cv = self.decoder(
idx, trg_emb, hidden_decoder, h_attn,
encoder_hy, past_attn, p_gen, past_dehy)
if self.network_ == 'gru':
trg_h, hidden_decoder, h_attn, attn_, past_attn, p_gen, past_dehy, loss_cv = self.decoder(
idx, trg_emb, hidden_decoder, h_attn,
encoder_hy, past_attn, p_gen, past_dehy)
# prepare output
trg_h_reshape = trg_h.contiguous().view(
trg_h.size(0) * trg_h.size(1), trg_h.size(2))
if self.share_emb_weight:
decoder_proj = self.decoder2proj(trg_h_reshape)
decoder_output = self.proj2vocab(decoder_proj)
else:
decoder_output = self.decoder2vocab(trg_h_reshape)
decoder_output = decoder_output.view(
trg_h.size(0), trg_h.size(1), decoder_output.size(1))
return decoder_output, hidden_decoder, h_attn, past_attn, p_gen, attn_, past_dehy
def cal_dist(self, input_src, logits_, attn_, p_gen, src_vocab2id):
# parameters
src_seq_len = input_src.size(1)
trg_seq_len = logits_.size(1)
batch_size = input_src.size(0)
vocab_size = len(src_vocab2id)
attn_ = attn_.transpose(0, 1)
# calculate index matrix
pt_idx = Variable(torch.FloatTensor(torch.zeros(1, 1, 1))).cuda()
pt_idx = pt_idx.repeat(batch_size, src_seq_len, vocab_size)
pt_idx.scatter_(2, input_src.unsqueeze(2), 1.0)
return p_gen.unsqueeze(2)*logits_ + (1.0-p_gen.unsqueeze(2))*torch.bmm(attn_, pt_idx)
def cal_dist_explicit(self, input_src, logits_, attn_, p_gen, vocab2id, ext_id2oov):
# parameters
src_seq_len = input_src.size(1)
trg_seq_len = logits_.size(1)
batch_size = input_src.size(0)
vocab_size = len(vocab2id) + len(ext_id2oov)
# extend current structure
logits_ex = Variable(torch.zeros(1, 1, 1)).cuda()
logits_ex = logits_ex.repeat(batch_size, trg_seq_len, len(ext_id2oov))
if len(ext_id2oov) > 0:
logits_ = torch.cat((logits_, logits_ex), -1)
# pointer
attn_ = attn_.transpose(0, 1)
# calculate index matrix
pt_idx = Variable(torch.FloatTensor(torch.zeros(1, 1, 1))).cuda()
pt_idx = pt_idx.repeat(batch_size, src_seq_len, vocab_size)
pt_idx.scatter_(2, input_src.unsqueeze(2), 1.0)
return p_gen.unsqueeze(2)*logits_ + (1.0-p_gen.unsqueeze(2))*torch.bmm(attn_, pt_idx)
| StarcoderdataPython |
1707385 | <reponame>arvind-iyer/impersonator<gh_stars>0
import cv2
import numpy as np
from matplotlib import pyplot as plt
HMR_IMG_SIZE = 224
IMG_SIZE = 256
def read_cv2_img(path):
"""
Read color images
:param path: Path to image
:return: Only returns color images
"""
if type(path) is str:
img = cv2.imread(path, -1)
else:
# path must be the BGR image
img = path
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def save_cv2_img(img, path, image_size=None, normalize=False):
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# print('normalize = {}'.format(normalize))
if image_size is not None:
img = cv2.resize(img, (image_size, image_size))
if normalize:
img = (img + 1) / 2.0 * 255
img = img.astype(np.uint8)
cv2.imwrite(path, img)
return img
def transform_img(image, image_size, transpose=False):
image = cv2.resize(image, (image_size, image_size))
image = image.astype(np.float32)
image /= 255.0
if transpose:
image = image.transpose((2, 0, 1))
return image
def resize_img_with_scale(img, scale_factor):
new_size = (np.floor(np.array(img.shape[0:2]) * scale_factor)).astype(int)
new_img = cv2.resize(img, (new_size[1], new_size[0]))
# This is scale factor of [height, width] i.e. [y, x]
actual_factor = [
new_size[0] / float(img.shape[0]), new_size[1] / float(img.shape[1])
]
return new_img, actual_factor
def kp_to_bbox_param(kp, vis_thresh=0, diag_len=150.0):
"""
Finds the bounding box parameters from the 2D keypoints.
Args:
kp (Kx3): 2D Keypoints.
vis_thresh (float): Threshold for visibility.
diag_len(float): diagonal length of bbox of each person
Returns:
[center_x, center_y, scale]
"""
if kp is None:
return
if kp.shape[1] == 3:
vis = kp[:, 2] > vis_thresh
if not np.any(vis):
return
min_pt = np.min(kp[vis, :2], axis=0)
max_pt = np.max(kp[vis, :2], axis=0)
else:
min_pt = np.min(kp, axis=0)
max_pt = np.max(kp, axis=0)
person_height = np.linalg.norm(max_pt - min_pt)
if person_height < 0.5:
return
center = (min_pt + max_pt) / 2.
scale = diag_len / person_height
return np.append(center, scale)
def cal_process_params(im_path, bbox_param, rescale=None, image=None, image_size=IMG_SIZE, proc=False):
"""
Args:
im_path (str): the path of image.
image (np.ndarray or None): if it is None, then loading the im_path, else use image.
bbox_param (3,) : [cx, cy, scale].
rescale (float, np.ndarray or None): rescale factor.
proc (bool): the flag to return processed image or not.
image_size (int): the cropped image.
Returns:
proc_img (np.ndarray): if proc is True, return the process image, else return the original image.
"""
if image is None:
image = read_cv2_img(im_path)
orig_h, orig_w = image.shape[0:2]
center = bbox_param[:2]
scale = bbox_param[2]
if rescale is not None:
scale = rescale
if proc:
image_scaled, scale_factors = resize_img_with_scale(image, scale)
resized_h, resized_w = image_scaled.shape[:2]
else:
scale_factors = [scale, scale]
resized_h = orig_h * scale
resized_w = orig_w * scale
center_scaled = np.round(center * scale_factors).astype(np.int)
if proc:
# Make sure there is enough space to crop image_size x image_size.
image_padded = np.pad(
array=image_scaled,
pad_width=((image_size,), (image_size,), (0,)),
mode='edge'
)
padded_h, padded_w = image_padded.shape[0:2]
else:
padded_h = resized_h + image_size * 2
padded_w = resized_w + image_size * 2
center_scaled += image_size
# Crop image_size x image_size around the center.
margin = image_size // 2
start_pt = (center_scaled - margin).astype(int)
end_pt = (center_scaled + margin).astype(int)
end_pt[0] = min(end_pt[0], padded_w)
end_pt[1] = min(end_pt[1], padded_h)
if proc:
proc_img = image_padded[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
height, width = image_scaled.shape[:2]
else:
height, width = end_pt[1] - start_pt[1], end_pt[0] - start_pt[0]
proc_img = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
# proc_img = None
center_scaled -= start_pt
im_shape = [height, width]
return {
# return original too with info.
'image': proc_img,
'im_path': im_path,
'im_shape': im_shape,
'orig_im_shape': [orig_h, orig_w],
'center': center_scaled,
'scale': scale,
'start_pt': start_pt,
}
def cam_denormalize(cam, N):
# This is camera in crop image coord.
new_cam = np.hstack([N * cam[0] * 0.5, cam[1:] + (2. / cam[0]) * 0.5])
return new_cam
def cam_init2orig(cam, scale, start_pt, N=HMR_IMG_SIZE):
"""
Args:
cam (3,): (s, tx, ty)
scale (float): scale = resize_h / orig_h
start_pt (2,): (lt_x, lt_y)
N (int): hmr_image_size (224) or IMG_SIZE
Returns:
cam_orig (3,): (s, tx, ty), camera in original image coordinates.
"""
# This is camera in crop image coord.
cam_crop = np.hstack([N * cam[0] * 0.5, cam[1:] + (2. / cam[0]) * 0.5])
print('cam_init', cam)
print('cam_crop', cam_crop)
# This is camera in orig image coord
cam_orig = np.hstack([
cam_crop[0] / scale,
cam_crop[1:] + (start_pt - N) / cam_crop[0]
])
print('cam_orig', cam_orig)
return cam_orig
def cam_orig2crop(cam, scale, start_pt, N=IMG_SIZE, normalize=True):
"""
Args:
cam (3,): (s, tx, ty), camera in orginal image coordinates.
scale (float): scale = resize_h / orig_h or (resize_w / orig_w)
start_pt (2,): (lt_x, lt_y)
N (int): hmr_image_size (224) or IMG_SIZE
normalize (bool)
Returns:
"""
cam_recrop = np.hstack([
cam[0] * scale,
cam[1:] + (N - start_pt) / (scale * cam[0])
])
if normalize:
cam_norm = np.hstack([
cam_recrop[0] * (2. / N),
cam_recrop[1:] - N / (2 * cam_recrop[0])
])
else:
cam_norm = cam_recrop
return cam_norm
def cam_process(cam_init, scale_150, start_pt_150, scale_proc, start_pt_proc, image_size):
"""
Args:
cam_init:
scale_150:
start_pt_150:
scale_proc:
start_pt_proc:
image_size
Returns:
"""
cam_orig = cam_init2orig(cam_init, scale=scale_150, start_pt=start_pt_150, N=HMR_IMG_SIZE)
cam_crop = cam_orig2crop(cam_orig, scale=scale_proc, start_pt=start_pt_proc, N=image_size, normalize=True)
return cam_crop
def show_cv2_img(img, title='img'):
'''
Display cv2 image
:param img: cv::mat
:param title: title
:return: None
'''
plt.imshow(img)
plt.title(title)
plt.axis('off')
plt.show()
def show_images_row(imgs, titles, rows=1):
"""
Display grid of cv2 images image
:param img: list [cv::mat]
:param title: titles
:return: None
"""
assert ((titles is None) or (len(imgs) == len(titles)))
num_images = len(imgs)
if titles is None:
titles = ['Image (%d)' % i for i in range(1, num_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(imgs, titles)):
ax = fig.add_subplot(rows, np.ceil(num_images / float(rows)), n + 1)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
ax.set_title(title)
plt.axis('off')
plt.show()
def intrinsic_mtx(f, c):
"""
Obtain intrisic camera matrix.
Args:
f: np.array, 1 x 2, the focus lenth of camera, (fx, fy)
c: np.array, 1 x 2, the center of camera, (px, py)
Returns:
- cam_mat: np.array, 3 x 3, the intrisic camera matrix.
"""
return np.array([[f[1], 0, c[1]],
[0, f[0], c[0]],
[0, 0, 1]], dtype=np.float32)
def extrinsic_mtx(rt, t):
"""
Obtain extrinsic matrix of camera.
Args:
rt: np.array, 1 x 3, the angle of rotations.
t: np.array, 1 x 3, the translation of camera center.
Returns:
- ext_mat: np.array, 3 x 4, the extrinsic matrix of camera.
"""
# R is (3, 3)
R = cv2.Rodrigues(rt)[0]
t = np.reshape(t, newshape=(3, 1))
Rc = np.dot(R, t)
ext_mat = np.hstack((R, -Rc))
ext_mat = np.vstack((ext_mat, [0, 0, 0, 1]))
ext_mat = ext_mat.astype(np.float32)
return ext_mat
def extrinsic(rt, t):
"""
Obtain extrinsic matrix of camera.
Args:
rt: np.array, 1 x 3, the angle of rotations.
t: np.array, 1 x 3, or (3,) the translation of camera center.
Returns:
- R: np.ndarray, 3 x 3
- t: np.ndarray, 1 x 3
"""
R = cv2.Rodrigues(rt)[0]
t = np.reshape(t, newshape=(1, 3))
return R, t
def euler2matrix(rt):
"""
Obtain rotation matrix from euler angles
Args:
rt: np.array, (3,)
Returns:
R: np.array, (3,3)
"""
Rx = np.array([[1, 0, 0],
[0, np.cos(rt[0]), -np.sin(rt[0])],
[0, np.sin(rt[0]), np.cos(rt[0])]], dtype=np.float32)
Ry = np.array([[np.cos(rt[1]), 0, np.sin(rt[1])],
[0, 1, 0],
[-np.sin(rt[1]), 0, np.cos(rt[1])]], dtype=np.float32)
Rz = np.array([[np.cos(rt[2]), -np.sin(rt[2]), 0],
[np.sin(rt[2]), np.cos(rt[2]), 0],
[0, 0, 1]], dtype=np.float32)
return np.dot(Rz, np.dot(Ry, Rx))
def get_rotated_smpl_pose(pose, theta):
"""
:param pose: (72,)
:param theta: rotation angle of y axis
:return:
"""
global_pose = pose[:3]
R, _ = cv2.Rodrigues(global_pose)
Ry = np.array([
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]
])
new_R = np.matmul(R, Ry)
new_global_pose, _ = cv2.Rodrigues(new_R)
new_global_pose = new_global_pose.reshape(3)
rotated_pose = pose.copy()
rotated_pose[:3] = new_global_pose
return rotated_pose
if __name__ == '__main__':
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
R = euler2matrix(np.array([0, 90, 0], dtype=np.float32))
print(isRotationMatrix(R))
| StarcoderdataPython |
3200695 | """
Installs dependencies in current conda environment.
Args:
--gpu: Force installing a gpu version. When not specified, the script only installs a gpu version when cuda is detected.
"""
import sys
import subprocess
import argparse
from cuda_check import get_cuda_version
def is_torch_installed() -> bool:
try:
import torch
except Exception:
return False
return True
def install_package(pkg_name:str):
pkg_name = pkg_name.strip('\n')
pkg_name_list = pkg_name.split(' ')
subprocess.check_call([sys.executable, '-m', 'pip', 'install'] + pkg_name_list)
global_parser = argparse.ArgumentParser()
global_parser.add_argument("requirement_file", type=str, nargs='?', default="requirements.txt")
global_parser.add_argument("--gpu", dest='forcegpu', action='store_true')
# global_parser.add_argument("--no-gpu", dest='forcegpu', action='store_false')
global_parser.add_argument("--cpu", dest='forcecpu', action='store_true')
global_parser.set_defaults(forcecpu=False)
global_parser.set_defaults(forcegpu=False)
def cuda_canonical_version(v) ->str:
if type(v) == int:
# from the API (like 9020 for 9.2)
major = v // 1000
minor = (v - major * 1000) // 10
elif type(v) == str:
# from torch (like '10.2')
major = int(v.split('.')[0])
minor = int(v.split('.')[1])
return 'cu' + str(major) + str(minor)
def main():
global_args = global_parser.parse_args()
if global_args.forcegpu and global_args.forcecpu:
print("Conflicting args.")
exit(-1)
# is torch installed?
b_torch_installed = is_torch_installed()
cuda_version = 'cpu'
torch_version = ''
if b_torch_installed:
print('Pytorch is installed. Determinating cuda version from pytorch...')
# is cuda installed?
import torch
torch_version = torch.version.__version__
if torch.version.cuda:
cuda_version = cuda_canonical_version(torch.version.cuda)
if cuda_version == 'cpu':
print('Pytorch is CPU version')
else:
print(f'Pytorch Cuda version:{cuda_version}')
else:
# determine cuda version using our method
try:
cuda_ver_int = get_cuda_version()
cuda_version = cuda_canonical_version(cuda_ver_int)
except Exception as e:
print('Get CUDA version failed. Fall back to cpu version')
cuda_version = 'cpu'
if global_args.forcegpu:
# force installing our specified cuda version
subprocess.check_call(['conda', 'install', 'cudatoolkit=10.2'])
cuda_version = 'cu102'
pass
if global_args.forcecpu:
cuda_version = 'cpu'
# install torch
torch_package_name = f'torch==1.8.1+{cuda_version} -f https://download.pytorch.org/whl/torch_stable.html'
install_package(torch_package_name)
# torch-scatter and torch-sparse
TORCH_EXTENSION_URL = 'https://pytorch-geometric.com/whl/torch-1.8.0+{cuda_version}.html'
torch_ext_dependencies = [
'torch-sparse==0.6.9',
'torch-scatter==2.0.6'
]
torch_ext_dependencies = [' '.join([d, '-f', TORCH_EXTENSION_URL]) for d in torch_ext_dependencies]
for d in torch_ext_dependencies:
install_package(d)
# other regular dependencies that can be easily installed via pypi
with open(global_args.requirement_file) as f_req:
dependencies = f_req.readlines()
dependencies = [l for l in dependencies if not l.startswith('#')]
for d in dependencies:
install_package(d)
pass
if __name__ == '__main__':
main() | StarcoderdataPython |
3361769 | class Solution:
def XXX(self, height: List[int]) -> int:
maxl = 0
maxr = len(height) - 1
left = maxl
right = maxr
maxq = (right - left) * min(height[left],height[right])
while left < right :
if height[maxl] < height[maxr]:
while height[maxl] >= height[left] and left < right:
left = left + 1
maxl = left
maxq = max(((maxr - maxl) * min(height[maxl], height[maxr])),maxq)
else:
while height[maxr] >= height[right] and left < right:
right = right - 1
maxr = right
maxq = max(((maxr - maxl) * min(height[maxl], height[maxr])), maxq)
return maxq
| StarcoderdataPython |
4839166 | <gh_stars>0
# coding=utf-8
# Exemplos para entendiemnto
"""nome = input('Qual seu nome?' )
if nome == 'Rodrigo' or nome == 'RAYANNE':
print('Que nome lindo vocé tem!')
else:
print('Que nome tão normal!!!')
print('Bom dia, {}'.format(nome))"""
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = (n1 + n2) / 2
print('A sua média foi: {:.1f}'.format(m))
print('A sua media foi boa!' if m >= 6.0 else 'Sua media foi ruim,estude mais!')
"""if m >= 6.0:
print('Sua média foi boa!')
else:
print('A sua média foi ruim,estude mais!')"""
| StarcoderdataPython |
3340285 | # Build external deps.
{
'variables': { 'target_arch%': 'x64' },
'target_defaults': {
'default_configuration': 'Debug',
'configuration': {
'Debug': {
'defines': [ 'DEBUG', '_DEBUG' ],
'msvs_settings': {
'VSSLCompilerTool': {
'RuntimeLibrary': 1, #static debug
},
},
},
'Release': {
'defines': [ 'NODEBUG' ],
'msvs_settings': {
'VSSLCompilerTool': {
'RuntimeLibrary': 0, #static release
},
},
},
},
'msvs_settings': {
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
},
},
},
'targets': [
{
'target_name': 'libspeexdsp',
'type': 'static_library',
'sources': [
'speex-1.2rc1/libspeex/preprocess.c',
'speex-1.2rc1/libspeex/jitter.c',
'speex-1.2rc1/libspeex/mdf.c',
'speex-1.2rc1/libspeex/fftwrap.c',
'speex-1.2rc1/libspeex/filterbank.c',
'speex-1.2rc1/libspeex/resample.c',
'speex-1.2rc1/libspeex/buffer.c',
'speex-1.2rc1/libspeex/scal.c',
'speex-1.2rc1/libspeex/smallft.c',
'speex-1.2rc1/libspeex/kiss_fft.c',
'speex-1.2rc1/libspeex/_kiss_fft_guts.h',
'speex-1.2rc1/libspeex/kiss_fft.h',
'speex-1.2rc1/libspeex/kiss_fftr.c',
'speex-1.2rc1/libspeex/kiss_fftr.h'
],
'cflags': [
'-fvisibility=hidden',
'-W',
'-Wstrict-prototypes',
'-Wno-parentheses',
'-Wno-unused-parameter',
'-Wno-sign-compare',
'-Wno-unused-variable',
],
'include_dirs': [
'config/speex-1.2rc1/<(OS)/<(target_arch)',
'speex-1.2rc1/include',
],
'defines': [
'PIC',
'HAVE_CONFIG_H',
'_USE_MATH_DEFINES',
]
}
]
}
| StarcoderdataPython |
3255922 | import os
from clint.textui import puts, indent, columns, colored
from pytube import YouTube, exceptions
def draw_progress_bar(stream=None, chunk=None, file_handle=None, remaining=None):
file_size = stream.filesize
percent = (100 * (file_size - remaining)) / file_size
puts('\r', '')
with indent(4):
puts(columns(
["{:.2f}".format(remaining * 0.000001) + ' MB', 8],
['/',1],
["{:.2f}".format(file_size * 0.000001) + ' MB', 8],
["({:.2f} %)".format(percent), 10]
), '')
def download_video(video_url, video_ref, row):
# Create YouTube video object
try:
video = YouTube(video_url, on_progress_callback=draw_progress_bar)
except Exception as e:
if row[3] != '':
video_title = row[3]
else:
video_title = '[Error]'
# Print error line
with indent(4):
puts('\r', '')
puts(columns(
[colored.blue(video_ref), 14],
[video_title[:48], 50],
[colored.red('Error: Invalid name'), 50]
))
return False
# Work out new title
if row[3] != '':
video_title = row[3]
else:
video_title = video.title
# Work out path
folder = ''
if row[1] != '':
folder = row[1]
video_path = os.path.join(folder, video_title)
full_path = os.path.join(os.getcwd(),folder)
# Create required directory
if not os.path.exists(full_path):
os.makedirs(full_path)
# Do Download
try:
if row[2] == 'audio':
# Only load audio streams
audio_stream = stream = video.streams.filter(only_audio=True).first()
audio_stream.download(output_path=full_path, filename=video_title + '_audio')
size = format(stream.filesize * 0.000001, '.2f') + ' MB'
elif row[2] == 'high' or row[2] == 'split':
# Load split streams (With higher quality)
video_stream = video.streams.filter(adaptive=True).first()
audio_stream = video.streams.filter(only_audio=True).first()
audio_stream.download(output_path=full_path, filename=video_title+'_audio')
video_stream.download(output_path=full_path, filename=video_title+'_video')
size = format(video_stream.filesize * 0.000001, '.2f') + ' MB'
else:
# Download streams
stream = video.streams.filter(progressive=True).first()
stream.download(output_path=full_path, filename=video_title)
size = format(stream.filesize * 0.000001, '.2f') + ' MB'
# Print line
with indent(4):
puts('\r','')
puts(columns(
[colored.blue(video_ref), 14],
[video_path[:48], 50],
[size, 11],
[colored.green('DONE'), 20]
))
except Exception as e:
# Print error line
with indent(4):
puts('\r', '')
puts(columns(
[colored.blue(video_ref), 14],
[video_path[:48], 50],
[colored.red('ERROR: Unable to download video'), 50]
)) | StarcoderdataPython |
1776150 | <gh_stars>0
import time
from bs4 import BeautifulSoup
import lib.longtask as longtask
from datastore.models import Indexer, WebResource
from google.appengine.api import memcache
__author__ = 'Lorenzo'
def store_feed(e):
"""
store a single entry from the feedparser
:param e: the entry
:return: if succeed the stored key else None
"""
query = WebResource.query().filter(WebResource.url == e["link"])
if query.count() == 0:
print "STORING: " + e["link"]
try:
if 'summary' in e:
s, t = BeautifulSoup(e['summary'], "lxml"), BeautifulSoup(e['title'], "lxml")
e['summary'], e['title'] = s.get_text(), t.get_text()
else:
t = BeautifulSoup(e['title'], "lxml")
e['summary'], e['title'] = None , t.get_text()
k = WebResource.store_feed(e)
print "STORED: " + str(k)
return k
except Exception as e:
print "Cannot Store: " + str(e)
return None
else:
print "Resource already stored"
return None
class storeFeeds(longtask.LongRunningTaskHandler):
"""
Handle the long task for storing feeds.
#TO-DO: move memcache in handler Scrawler
"""
def execute_task(self, *args):
from flankers.scrawler import Scrawler
RSS_FEEDS_CACHE = memcache.get('RSS_FEEDS_CACHE')
if not RSS_FEEDS_CACHE or len(RSS_FEEDS_CACHE) == 0:
RSS_FEEDS_CACHE = Scrawler.load_links()
memcache.set('RSS_FEEDS_CACHE', RSS_FEEDS_CACHE)
print len(RSS_FEEDS_CACHE)
l = RSS_FEEDS_CACHE.pop()
print l
entries = Scrawler.read_feed(l)
if entries:
for entry in entries:
#
# Store feed
#
store_feed(entry)
memcache.set('RSS_FEEDS_CACHE', RSS_FEEDS_CACHE)
return None
memcache.set('RSS_FEEDS_CACHE', RSS_FEEDS_CACHE)
print "This Feed has no entries"
return None
class storeTweets(longtask.LongRunningTaskHandler):
i = 0
def recurring(self, timeline):
for twt in timeline:
WebResource.store_tweet(twt)
self.i += 1
print "Total tweets in list: " + str(self.i)
def execute_task(self, timeline, remain=list()):
"""
#TO-DO: make this recursive
:param timeline:
:param remain:
:return:
"""
for twt in timeline:
if isinstance(twt, list):
print "twt is a list"
self.recurring(twt)
else:
WebResource.store_tweet(twt)
self.i += 1
print "Total tweets: " + str(self.i)
class storeIndexer(longtask.LongRunningTaskHandler):
def execute_task(self, *args):
item, key = args
from flankers.textsemantics import find_related_concepts
if not (item.title == '' and item.abstract == ''):
# if item is not a media or a link from Twitter
# it is or a feed or a tweet
text = item.abstract if len(item.abstract) != 0 else item.title
labels = find_related_concepts(text)
for l in labels:
if Indexer.query().filter(Indexer.webres == key).count() == 0:
index = Indexer(keyword=l.strip(), webres=key)
index.put()
print "indexing stored: " + item.url + ">" + l
| StarcoderdataPython |
3221957 | <reponame>emsalinha/videosum-eval<filename>score_evaluators/rank_corr_evaluator.py<gh_stars>0
# %% md
# Compute rank order statistics on annotated frame importance scores
import sys
import numpy as np
from scipy.stats import kendalltau, spearmanr
from scipy.stats import rankdata
sys.path.append('/home/emma/summary_evaluation/')
from summ_evaluator import SummaryEvaluator
class RankCorrelationEvaluator(SummaryEvaluator):
def __init__(self, ds_name, model_name, num_classes):
super().__init__(ds_name=ds_name, model_name=model_name, num_classes=num_classes)
self.corr = None
self.p_value = None
def rc_func(self, x, y, metric):
if metric == 'kendalltau':
return kendalltau(rankdata(-x), rankdata(-y))
elif metric == 'spearmanr':
return spearmanr(x, y)
else:
raise RuntimeError
def get_metrics(self, metric='kendalltau', random=False, threshold=None, print_output=False):
y = self.get_y(random, threshold)
self.corr, self.p_value = self.rc_func(self.y_true, y, metric)
if print_output:
print(self.rc_func(self.y_true, y, metric))
return self.corr
def plot(self):
pass
if __name__ == '__main__':
metric = 'kendalltau'
rank_corr_eval = RankCorrelationEvaluator('run_1_two', num_classes=2)
mean_res = rank_corr_eval.get_metrics(metric)
print('pred' + ': mean %.3f' % (mean_res))
| StarcoderdataPython |
1754877 | import logging
from typing import Any, Dict, Text
from rasa.shared.nlu.constants import INTENT, ENTITIES, TEXT
from rasa.shared.nlu.training_data.formats.readerwriter import JsonTrainingDataReader
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
logger = logging.getLogger(__name__)
class WitReader(JsonTrainingDataReader):
def read_from_json(self, js: Dict[Text, Any], **kwargs: Any):
"""Loads training data stored in the WIT.ai data format."""
training_examples = []
for s in js["data"]:
entities = s.get(ENTITIES)
if entities is None:
continue
text = s.get(TEXT)
intents = [e["value"] for e in entities if e["entity"] == INTENT]
intent = intents[0].strip('"') if intents else None
entities = [
e
for e in entities
if ("start" in e and "end" in e and e["entity"] != INTENT)
]
for e in entities:
# for some reason wit adds additional quotes around entities
e["value"] = e["value"].strip('"')
data = {}
if intent:
data[INTENT] = intent
if entities is not None:
data[ENTITIES] = entities
data[TEXT] = text
training_examples.append(Message(data=data))
return TrainingData(training_examples)
| StarcoderdataPython |
1641498 | <gh_stars>1-10
"""empty message
Revision ID: d2d91cf1ca9
Revises: None
Create Date: 2015-10-20 21:24:42.398717
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('componentIncome',
sa.Column('component', sa.String(length=80), nullable=False),
sa.Column('year', sa.Integer(), nullable=False),
sa.Column('all_units', sa.Integer(), nullable=True),
sa.Column('less_than_five', sa.Integer(), nullable=True),
sa.Column('five_to_ten', sa.Integer(), nullable=True),
sa.Column('ten_to_fifteen', sa.Integer(), nullable=True),
sa.Column('fifteen_to_twenty', sa.Integer(), nullable=True),
sa.Column('twenty_to_thirty', sa.Integer(), nullable=True),
sa.Column('thirty_to_fourty', sa.Integer(), nullable=True),
sa.Column('fourty_to_fifty', sa.Integer(), nullable=True),
sa.Column('fifty_to_seventy', sa.Integer(), nullable=True),
sa.Column('seventy_or_more', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('component', 'year')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('componentIncome')
### end Alembic commands ###
| StarcoderdataPython |
1717535 | <gh_stars>10-100
"""
mainly pulser tagging
- gaussian_cut (fits data to a gaussian, returns mean +/- cut_sigma values)
- xtalball_cut (fits data to a crystalball, returns mean +/- cut_sigma values)
- find_pulser_properties (find pulser by looking for which peak has a constant time between events)
- tag_pulsers
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
from scipy import stats
from .peak_fitting import *
def gaussian_cut(data, cut_sigma=3, plotAxis=None):
'''
fits data to a gaussian, returns mean +/- cut_sigma values for a cut
'''
nbins = 100
median = np.median(data)
width = (np.percentile(data, 80) - np.percentile(data, 20))
good_data = data[(data > (median - 4 * width)) &
(data < (median + 4 * width))]
hist, bins = np.histogram(good_data, bins=101) #np.linspace(1,5,101)
bin_centers = bins[:-1] + (bins[1] - bins[0]) / 2
#fit gaussians to that
# result = fit_unbinned(gauss, hist, [median, width/2] )
# print("unbinned: {}".format(result))
result = fit_binned(
gauss, hist, bin_centers,
[median, width / 2,
np.amax(hist) * (width / 2) * np.sqrt(2 * np.pi)])
# print("binned: {}".format(result))
cut_lo = result[0] - cut_sigma * result[1]
cut_hi = result[0] + cut_sigma * result[1]
if plotAxis is not None:
plotAxis.plot(
bin_centers, hist, ls="steps-mid", color="k", label="data")
fit = gauss(bin_centers, *result)
plotAxis.plot(bin_centers, fit, label="gaussian fit")
plotAxis.axvline(result[0], color="g", label="fit mean")
plotAxis.axvline(
cut_lo, color="r", label="+/- {} sigma".format(cut_sigma))
plotAxis.axvline(cut_hi, color="r")
plotAxis.legend()
# plt.xlabel(params[i])
return cut_lo, cut_hi, result[0], cut_sigma
def xtalball_cut(data, cut_sigma=3, plotFigure=None):
'''
fits data to a crystalball, returns mean +/- cut_sigma values for a cut
'''
nbins = 100
median = np.median(data)
width = (np.percentile(data, 80) - np.percentile(data, 20))
good_data = data[(data > (median - 4 * width)) &
(data < (median + 4 * width))]
hist, bins = np.histogram(good_data, bins=101) #np.linspace(1,5,101)
bin_centers = bins[:-1] + (bins[1] - bins[0]) / 2
#fit gaussians to that
# result = fit_unbinned(gauss, hist, [median, width/2] )
# print("unbinned: {}".format(result))
p0 = get_gaussian_guess(hist, bin_centers)
bounds = [(p0[0] * .5, p0[1] * .5, p0[2] * .2, 0, 1),
(p0[0] * 1.5, p0[1] * 1.5, p0[2] * 5, np.inf, np.inf)]
result = fit_binned(
xtalball,
hist,
bin_centers, [p0[0], p0[1], p0[2], 10, 1],
bounds=bounds)
# print("binned: {}".format(result))
cut_lo = result[0] - cut_sigma * result[1]
cut_hi = result[0] + cut_sigma * result[1]
if plotFigure is not None:
plt.figure(plotFigure.number)
plt.plot(bin_centers, hist, ls="steps-mid", color="k", label="data")
fit = xtalball(bin_centers, *result)
plt.plot(bin_centers, fit, label="xtalball fit")
plt.axvline(result[0], color="g", label="fit mean")
plt.axvline(cut_lo, color="r", label="+/- {} sigma".format(cut_sigma))
plt.axvline(cut_hi, color="r")
plt.legend()
# plt.xlabel(params[i])
return cut_lo, cut_hi
def find_pulser_properties(df, energy="trap_max"):
from .calibration import get_most_prominent_peaks
# print (df[energy])
# exit()
#find pulser by looking for which peak has a constant time between events
#df should already be grouped by channel
peak_energies, peak_e_err = get_most_prominent_peaks(
df[energy], max_num_peaks=10)
peak_e_err *= 3
for e in peak_energies:
e_cut = (df[energy] > e - peak_e_err) & (df[energy] < e + peak_e_err)
df_peak = df[e_cut]
# df_after_0 = df_peak.iloc[1:]
time_since_last = df_peak.timestamp.values[
1:] - df_peak.timestamp.values[:-1]
tsl = time_since_last[
(time_since_last >= 0) &
(time_since_last < np.percentile(time_since_last, 99.9))]
last_ten = np.percentile(tsl, 97) - np.percentile(tsl, 90)
first_ten = np.percentile(tsl, 10) - np.percentile(tsl, 3)
# print("{:e}, {:e}".format(last_ten,first_ten))
if last_ten > first_ten:
# print("...no pulser?")
continue
else:
# df["pulser_energy"] = e
pulser_e = e
period = stats.mode(tsl).mode[0]
return pulser_e, peak_e_err, period, energy
return None
def tag_pulsers(df, chan_info, window=250):
chan = df.channel.unique()[0]
df["isPulser"] = 0
try:
pi = chan_info.loc[chan]
except KeyError:
return df
energy_name = pi.energy_name
pulser_energy = pi.pulser_energy
period = pi.pulser_period
peak_e_err = pi.peak_e_err
# pulser_energy, peak_e_err, period, energy_name = chan_info
e_cut = (df[energy_name] < pulser_energy + peak_e_err) & (
df[energy_name] > pulser_energy - peak_e_err)
df_pulser = df[e_cut]
time_since_last = np.zeros(len(df_pulser))
time_since_last[
1:] = df_pulser.timestamp.values[1:] - df_pulser.timestamp.values[:-1]
# plt.figure()
# plt.hist(time_since_last, bins=1000)
# plt.show()
mode_idxs = (time_since_last > period - window) & (time_since_last <
period + window)
pulser_events = np.count_nonzero(mode_idxs)
# print("pulser events: {}".format(pulser_events))
if pulser_events < 3: return df
df_pulser = df_pulser[mode_idxs]
ts = df_pulser.timestamp.values
diff_zero = np.zeros(len(ts))
diff_zero[1:] = np.around((ts[1:] - ts[:-1]) / period)
diff_cum = np.cumsum(diff_zero)
z = np.polyfit(diff_cum, ts, 1)
p = np.poly1d(z)
# plt.figure()
# xp = np.linspace(0, diff_cum[-1])
# plt.plot(xp,p(xp))
# plt.scatter(diff_cum,ts)
# plt.show()
period = z[0]
phase = z[1]
mod = np.abs(df.timestamp - phase) % period
# pulser_mod =np.abs(df_pulser.timestamp - phase) %period
# pulser_mod[ pulser_mod > 10*window] = period - pulser_mod[ pulser_mod > 10*window]
# plt.hist(pulser_mod , bins="auto")
# plt.show()
period_cut = (mod < window) | ((period - mod) < window)
# print("pulser events: {}".format(np.count_nonzero(e_cut & period_cut)))
df.loc[e_cut & period_cut, "isPulser"] = 1
return df
| StarcoderdataPython |
56523 | <filename>cookies_website/apps.py
from django.apps import AppConfig
class CookiesWebsiteConfig(AppConfig):
name = 'cookies_website'
| StarcoderdataPython |
1790956 | <reponame>pyansys/pyaedt
import sys
import threading
import warnings
from pyaedt.generic.general_methods import is_ironpython
if not is_ironpython:
try:
import numpy as np
except ImportError:
warnings.warn(
"The NumPy module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install numpy\n\nRequires CPython."
)
class ThreadTrace(threading.Thread):
"""Control a thread with python"""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
self.__run_backup = self.run
self.run = self.__run
threading.Thread.start(self)
def __run(self):
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, event, arg):
if event == "call":
return self.localtrace
else:
return None
def localtrace(self, frame, event, arg):
if self.killed:
if event == "line":
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
class GeneticAlgorithm(object):
"""Genetic Algorithm for Python
Basic implementation of elitist genetic algorithm for solving problems with integers, continuous, boolean
or mixed variables.
Parameters
----------
function : callable
The Objective function to be minimized. This implementation minimizes the given objective function.
dim : int
Number of variables
reference_file : str, optional
Reference file to create the cromosomes. If it is not specified, the function should create the cromose.
goal : float, optional
If after 'max_iteration_no_improv' iterations the goal is not improvedaf, the algorithm stops
var_type: str
Type of the optimization variables. The default is 'bool'.
Other options are: 'int' if all variables are integer, and 'real' if all variables are
real value or continuous
boundaries: <numpy array/None>
By default is None. None if var_type is 'bool', otherwise provide an array of tuples
of length two as boundaries for each variable, the length of the array must be equal dimension.
For example, np.array([0,100],[0,200]) determines lower boundary 0 and upper boundary 100 for first
and upper boundary 200 for second variable where dimension is 2.
var_type_mixed: <numpy array/None> -
By default is None. None if all variables have the same type, otherwise this can be used to specify the type of
each variable separately.
For example if the first variable is integer but the second one is real the input is:
np.array(['int'],['real']). NOTE: it does not accept 'bool'. If variable type is Boolean use 'int' and provide
a boundary as [0,1] in variable_boundaries.
function_timeout: float
If the given function does not provide output before function_timeout (unit is seconds)
the algorithm raise error.
For example, when there is an infinite loop in the given function.
algorithm_parameters: dict
Genetic algorithm parameters:
max_num_iteration : int
population_size : int
crossover_prob: float
parents_portion: float
crossover_type: string
The default is 'uniform'. Other options are 'one_point' or 'two_point'
mutation_prob : float
elite_ration : float
max_iteration_no_improv: int
Successive iterations without improvement. If None it is ineffective
progress_bar: bool
Show progress bar. The default is True.
Examples
--------
Optimize a defined function using a genetic algorithm.
>>>import numpy as np
>>>from pyaedt.generic.python_optimizers import GeneticAlgorithm as ga
>>> def f(X):
>>> return np.sum(X)
>>>varbound = np.array([[0, 10]] * 3)
>>>model = ga(function=f, dimension=3, var_type='real', variable_boundaries=varbound)
>>>model.run()
"""
def __init__(
self,
function,
dim,
reference_file=None,
population_file=None,
goal=0,
var_type="bool",
boundaries=None,
var_type_mixed=None,
function_timeout=0,
algorithm_parameters=None,
progress_bar=True,
):
self.population_file = None
self.goal = 1e10
if population_file:
self.population_file = population_file
self.function = function
self.dim = int(dim)
self.goal = float(goal)
if not var_type == "bool" and not var_type == "int" and not var_type == "real":
raise ValueError("Variable type is not correct")
if var_type_mixed is None:
if var_type == "real":
self.var_type = np.array([["real"]] * self.dim)
else:
self.var_type = np.array([["int"]] * self.dim)
else:
if type(var_type_mixed).__module__ != "numpy":
raise ValueError("var_type must be numpy array")
if len(var_type_mixed) != self.dim:
raise ValueError("var_type must have a length equal dimension")
self.var_type = var_type_mixed
if var_type != "bool" or type(var_type_mixed).__module__ == "numpy":
if len(boundaries) != self.dim:
raise ValueError("boundaries must have a length equal dimension")
if type(boundaries).__module__ != "numpy":
raise ValueError("boundaries must be numpy array")
for i in boundaries:
if len(i) != 2:
raise ValueError("boundary for each variable must be a tuple of length two")
if i[0] > i[1]:
raise ValueError("lower boundaries must be smaller than upper_boundaries")
self.var_bound = boundaries
else:
self.var_bound = np.array([[0, 1]] * self.dim)
self.timeout = float(function_timeout)
if progress_bar:
self.progress_bar = True
else:
self.progress_bar = False
# GA parameters
if not algorithm_parameters:
algorithm_parameters = {
"max_num_iteration": None,
"population_size": 50,
"crossover_prob": 0.5,
"parents_portion": 0.3,
"crossover_type": "uniform",
"mutation_prob": 0.2,
"elite_ratio": 0.05,
"max_iteration_no_improv": None,
}
self.ga_param = algorithm_parameters
if not (1 >= self.ga_param["parents_portion"] >= 0):
raise ValueError("parents_portion must be in range [0,1]")
self.population_size = int(self.ga_param["population_size"])
self.par_s = int(self.ga_param["parents_portion"] * self.population_size)
trl = self.population_size - self.par_s
if trl % 2 != 0:
self.par_s += 1
self.prob_mut = self.ga_param["mutation_prob"]
if not (1 >= self.prob_mut >= 0):
raise ValueError("mutation_prob must be in range [0,1]")
self.prob_cross = self.ga_param["crossover_prob"]
if not (1 >= self.prob_cross >= 0):
raise ValueError("prob_cross must be in range [0,1]")
if not (1 >= self.ga_param["elite_ratio"] >= 0):
raise ValueError("elite_ratio must be in range [0,1]")
trl = self.population_size * self.ga_param["elite_ratio"]
if trl < 1 and self.ga_param["elite_ratio"] > 0:
self.num_elit = 1
else:
self.num_elit = int(trl)
if self.par_s < self.num_elit:
raise ValueError("number of parents must be greater than number of elits")
if self.ga_param["max_num_iteration"] is None:
self.iterate = 0
for i in range(0, self.dim):
if self.var_type[i] == "int":
self.iterate += (
(self.var_bound[i][1] - self.var_bound[i][0]) * self.dim * (100 / self.population_size)
)
else:
self.iterate += (self.var_bound[i][1] - self.var_bound[i][0]) * 50 * (100 / self.population_size)
self.iterate = int(self.iterate)
if (self.iterate * self.population_size) > 10000000:
self.iterate = 10000000 / self.population_size
else:
self.iterate = int(self.ga_param["max_num_iteration"])
self.crossover_type = self.ga_param["crossover_type"]
if (
not self.crossover_type == "uniform"
and not self.crossover_type == "one_point"
and not self.crossover_type == "two_point"
):
raise ValueError("crossover_type must 'uniform', 'one_point', or 'two_point'")
self.stop_iterations = False
if self.ga_param["max_iteration_no_improv"] is None:
self.stop_iterations = self.iterate + 1
else:
self.stop_iterations = int(self.ga_param["max_iteration_no_improv"])
self.integers = np.where(self.var_type == "int")
self.reals = np.where(self.var_type == "real")
self.report = []
self.best_function = []
self.best_variable = []
self.output_dict = {}
self.pop = []
self.reference_file = reference_file
def run(self):
"""Implements the genetic algorithm"""
# Init Population
pop = np.array([np.zeros(self.dim + 1)] * self.population_size)
solo = np.zeros(self.dim + 1)
var = np.zeros(self.dim)
for p in range(0, self.population_size):
for i in self.integers[0]:
var[i] = np.random.randint(self.var_bound[i][0], self.var_bound[i][1] + 1)
solo[i] = var[i].copy()
for i in self.reals[0]:
var[i] = self.var_bound[i][0] + np.random.random() * (self.var_bound[i][1] - self.var_bound[i][0])
solo[i] = var[i].copy()
obj = self.sim(var)
solo[self.dim] = obj
pop[p] = solo.copy()
# Sort
pop = pop[pop[:, self.dim].argsort()]
self.best_function = pop[0, self.dim].copy()
self.best_variable = pop[0, : self.dim].copy()
t = 1
counter = 0
while t <= self.iterate:
if self.population_file:
# Save Population in CSV
np.savetxt(self.population_file, pop, delimiter=",")
if self.progress_bar:
self.progress(t, self.iterate, status="GA is running...")
# Sort
pop = pop[pop[:, self.dim].argsort()]
if pop[0, self.dim] < self.best_function:
self.best_function = pop[0, self.dim].copy()
self.best_variable = pop[0, : self.dim].copy()
if pop[0, self.dim] > self.goal:
counter = 0
else:
counter += 1
# Report
self.report.append(pop[0, self.dim])
# Normalizing objective function
# normobj = np.zeros(self.population_size)
minobj = pop[0, self.dim]
if minobj < 0:
normobj = pop[:, self.dim] + abs(minobj)
else:
normobj = pop[:, self.dim].copy()
maxnorm = np.amax(normobj)
normobj = maxnorm - normobj + 1
# Calculate probability
sum_normobj = np.sum(normobj)
# prob = np.zeros(self.population_size)
prob = normobj / sum_normobj
cumprob = np.cumsum(prob)
# Select parents
par = np.array([np.zeros(self.dim + 1)] * self.par_s)
# Elite
for k in range(0, self.num_elit):
par[k] = pop[k].copy()
# Random population. Not repeated parents
for k in range(self.num_elit, self.par_s):
repeated_parent = True
count = 0
while repeated_parent:
count += 1
index = np.searchsorted(cumprob, np.random.random())
is_in_list = np.any(np.all(pop[index] == par, axis=1))
if count >= 10 or not is_in_list:
repeated_parent = False
par[k] = pop[index].copy()
ef_par_list = np.array([False] * self.par_s)
par_count = 0
while par_count == 0:
for k in range(0, self.par_s):
if np.random.random() <= self.prob_cross:
ef_par_list[k] = True
par_count += 1
ef_par = par[ef_par_list].copy()
# New generation
pop = np.array([np.zeros(self.dim + 1)] * self.population_size)
# Parents
for k in range(0, self.par_s):
pop[k] = par[k].copy()
# Children. If children is repeated, try up to 10 times
for k in range(self.par_s, self.population_size, 2):
repeated_children = True
count = 0
while repeated_children:
r1 = np.random.randint(0, par_count)
r2 = np.random.randint(0, par_count)
pvar1 = ef_par[r1, : self.dim].copy()
pvar2 = ef_par[r2, : self.dim].copy()
ch = self.cross(pvar1, pvar2, self.crossover_type)
ch1 = ch[0].copy()
ch2 = ch[1].copy()
ch1 = self.mut(ch1)
ch2 = self.mutmiddle(ch2, pvar1, pvar2)
count += 1
for population in pop:
is_in_list_ch1 = np.all(ch1 == population[:-1])
is_in_list_ch2 = np.all(ch2 == population[:-1])
if count >= 1000 or (not is_in_list_ch1 and not is_in_list_ch2):
repeated_children = False
elif is_in_list_ch1 or is_in_list_ch2:
repeated_children = True
break
solo[: self.dim] = ch1.copy()
obj = self.sim(ch1)
solo[self.dim] = obj
pop[k] = solo.copy()
solo[: self.dim] = ch2.copy()
obj = self.sim(ch2)
solo[self.dim] = obj
pop[k + 1] = solo.copy()
t += 1
if counter > self.stop_iterations or self.best_function == 0:
pop = pop[pop[:, self.dim].argsort()]
text = str(t - 1)
print("\nInfo: GA is terminated after " + text + " iterations")
break
# Last generation Info
# Sort
if t - 1 == self.iterate:
text = str(t - 1)
print("\nInfo: GA is terminated after " + text + " iterations")
pop = pop[pop[:, self.dim].argsort()]
self.pop = pop
self.best_function = pop[0, self.dim].copy()
self.best_variable = pop[0, : self.dim].copy()
# Report
self.report.append(pop[0, self.dim])
self.output_dict = {"variable": self.best_variable, "function": self.best_function}
if self.progress_bar:
show = " " * 100
sys.stdout.write("\r%s" % (show))
sys.stdout.flush()
sys.stdout.write("\r Best solution:\n %s" % (self.best_variable))
sys.stdout.write("\n\n Objective:\n %s\n" % (self.best_function))
return True
def cross(self, x, y, c_type):
ofs1 = x.copy()
ofs2 = y.copy()
if c_type == "one_point":
ran = np.random.randint(0, self.dim)
for i in range(0, ran):
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
if c_type == "two_point":
ran1 = np.random.randint(0, self.dim)
ran2 = np.random.randint(ran1, self.dim)
for i in range(ran1, ran2):
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
if c_type == "uniform":
for i in range(0, self.dim):
ran = np.random.random()
if ran < 0.5:
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
return np.array([ofs1, ofs2])
def mut(self, x):
for i in self.integers[0]:
ran = np.random.random()
if ran < self.prob_mut:
x[i] = np.random.randint(self.var_bound[i][0], self.var_bound[i][1] + 1)
for i in self.reals[0]:
ran = np.random.random()
if ran < self.prob_mut:
x[i] = self.var_bound[i][0] + np.random.random() * (self.var_bound[i][1] - self.var_bound[i][0])
return x
def mutmiddle(self, x, p1, p2):
for i in self.integers[0]:
ran = np.random.random()
if ran < self.prob_mut:
if p1[i] < p2[i]:
x[i] = np.random.randint(p1[i], p2[i])
elif p1[i] > p2[i]:
x[i] = np.random.randint(p2[i], p1[i])
else:
x[i] = np.random.randint(self.var_bound[i][0], self.var_bound[i][1] + 1)
for i in self.reals[0]:
ran = np.random.random()
if ran < self.prob_mut:
if p1[i] < p2[i]:
x[i] = p1[i] + np.random.random() * (p2[i] - p1[i])
elif p1[i] > p2[i]:
x[i] = p2[i] + np.random.random() * (p1[i] - p2[i])
else:
x[i] = self.var_bound[i][0] + np.random.random() * (self.var_bound[i][1] - self.var_bound[i][0])
return x
def evaluate(self):
self.goal = 1e10
if not self.reference_file:
self.goal = self.function(self.temp)
return True
else:
self.goal = self.function(self.temp, self.reference_file)
return True
def sim(self, X):
self.temp = X.copy()
if self.timeout > 0:
thread = ThreadTrace(target=self.evaluate, daemon=None)
thread.start()
thread.join(timeout=self.timeout)
if thread.is_alive():
print("After " + str(self.timeout) + " seconds delay the given function does not provide any output")
thread.kill()
# after the kill, you must call join to really kill it.
thread.join()
else:
self.evaluate()
return self.goal
def progress(self, count, total, status=""):
bar_len = 50
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = "|" * filled_len + "_" * (bar_len - filled_len)
sys.stdout.write("\r%s %s%s %s" % (bar, percents, "%", status))
sys.stdout.flush()
| StarcoderdataPython |
181810 | import os
import os.path as op
import sys
import re
import logging
import shutil as sh
import tqdm
import time
from tqdm import trange
from time import sleep
def main():
pbar1 = tqdm.tqdm(
total=100,
position=0,
colour="green",
desc="First",
ncols=80,
mininterval=0.02,
unit=" files",
)
pbar2 = tqdm.tqdm(
total=100, position=1, colour="red", desc="Second", ncols=80, mininterval=0.02
)
pbar3 = tqdm.tqdm(
total=100, position=2, colour="blue", desc="Third", ncols=80, mininterval=0.02
)
pbar4 = tqdm.tqdm(
total=100, position=3, colour="cyan", desc="Fourth", ncols=80, mininterval=0.02
)
for i in range(100):
for bar in [pbar1, pbar2, pbar3, pbar4]:
bar.update(1)
time.sleep(0.02)
if __name__ == "__main__":
main()
| StarcoderdataPython |
43665 | <reponame>surily/Udacity-Data-Science
from setuptools import setup
setup(name='gaussian_bionary_dist_prob',
version='0.1',
description='Gaussian and Binomial distributions',
packages=['gaussian_bionary_dist_prob'],
author = '<NAME>',
author_email = '<EMAIL>',
zip_safe=False)
| StarcoderdataPython |
3381555 | <gh_stars>10-100
# -*- coding: utf-8 -*-
'''
Copy this as a secrets.py
'''
token = '-----------------------------------------------------------------------'
client_secret = '--------------------'
app_id = 1111111
| StarcoderdataPython |
3297271 | class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
N = len(A)
# create hash table to record C+D combinations:
cd = {}
for i in range(N):
for j in range(N):
if C[i] + D[j] not in cd:
cd[C[i] + D[j]] = 1
else:
cd[C[i] + D[j]] += 1
res = 0
# loop through A+B combinations
for i in range(N):
for j in range(N):
if -1 * (A[i] + B[j]) in cd:
res += cd[-1 * (A[i] + B[j])]
return res
| StarcoderdataPython |
3370316 | stack_server_config: dict = {
"host": "172.17.0.1",
"disableHostCheck": True,
"publicPath": "/",
"public": "https://dev.flexio.io/devui",
"sockPath": "/socketjs",
"proxy": [
{
"context": [
"//[a-z]+/*"
],
"logLevel": "debug",
"target": "https://dev.flexio.io/devui",
"secure": False,
"pathRewrite": {
"^//[a-z]+/*": "/"
}
}
]
}
| StarcoderdataPython |
1753938 | <gh_stars>10-100
def method1_iterative(n: int, l: list) -> int:
low = 0
high = len(l) - 1
mid = 0
while low <= high:
mid = (low + high) // 2
if l[mid] < n:
low = mid + 1
elif l[mid] > n:
high = mid - 1
else:
return mid
return -1
if __name__ == "__main__":
"""
n = 9999
l = [i for i in range(10000)] # [0, 1, 3, ...., 9999]
from timeit import timeit
print(timeit(lambda: method1_iterative(l, 0, len(l)-1, n), number=10000)) # 0.03099031199963065
"""
| StarcoderdataPython |
3301829 | <reponame>jacaboyjr/pythonbirds
"""
Programa destinado a ler o dia data e ano de nascimento do usuário
"""
dia = int(input('Qual foi o dia do seu nascimento? '))
mes = str(input('Qual foi o mes do seu nascimento? '))
ano = int(input('Qual foi o ano do seu nascimento? '))
print(f'Você nasceu no DIA {dia} de {mes} de {ano}, correto? ') | StarcoderdataPython |
3248861 | import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import auc, roc_curve
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import LinearSVC
def mean_on_fold(train, test, y):
return np.ones((test.sum(), 1)) * y[train].mean(), None
def random_on_fold(test):
return np.random.randn(test.sum(), 1), None
def xu_et_al_on_fold(test, X, learn_options):
coef = pd.read_csv(learn_options["xu_matrix_file"], skiprows=1, delimiter="\t")
coef = coef[["A", "T", "C", "G"]] # swap columns so that they are in correct order
coef = coef.values.flatten()[:, None]
X = X.copy()
X = np.append(X, np.zeros((X.shape[0], 3 * 4)), axis=1)
X = X[:, 3 * 4 :]
y_pred = 1.0 / (1 + np.exp(-np.dot(X[test], coef)))
return y_pred, coef
def doench_on_fold(train, test, y, y_all, X, learn_options):
auto_class_weight = None # 'auto'/None
verbose = False
penalty = [0.005 * pow(1.15, x) for x in range(0, 45)]
y_bin = y_all[learn_options["binary target name"]].values[:, None]
label_encoder = LabelEncoder()
label_encoder.fit(y_all["Target gene"].values[train])
gene_classes = label_encoder.transform(y_all["Target gene"].values[train])
skf = StratifiedKFold(n_splits=10, shuffle=True)
cv = skf.split(np.zeros(len(gene_classes), dtype=np.bool), gene_classes)
cv_results = np.zeros((10, len(penalty)))
for j, split in enumerate(cv):
train_inner, test_inner = split
for i, c in enumerate(penalty):
# fit an L1-penalized SVM classifier
clf = LinearSVC(
penalty="l1", C=c, dual=False, class_weight=auto_class_weight
)
clf.fit(X[train][train_inner], y_bin[train][train_inner].flatten())
# pass features with non-zero coeff to Logistic with l2 penalty (original code?)
non_zero_coeff = clf.coef_ != 0.0
if np.all(non_zero_coeff is False):
# if all are zero, turn one on so as to be able to run the code.
non_zero_coeff[0] = True
clf = LogisticRegression(penalty="l2", class_weight=auto_class_weight)
clf.fit(
X[train][train_inner][:, non_zero_coeff.flatten()],
y[train][train_inner].flatten(),
)
y_test = clf.predict_proba(
X[train][test_inner][:, non_zero_coeff.flatten()]
)[:, 1]
fpr, tpr, _ = roc_curve(y_bin[train][test_inner], y_test)
if np.nan in fpr:
raise AssertionError("found nan fpr")
if np.nan in tpr:
raise AssertionError("found nan tpr")
roc_auc = auc(fpr, tpr)
if verbose:
print(j, i, roc_auc)
cv_results[j][i] = roc_auc
best_penalty = penalty[np.argmax(np.mean(cv_results, axis=0))]
print(f"best AUC for penalty: {np.median(cv_results, axis=0)}")
clf = LinearSVC(
penalty="l1", C=best_penalty, dual=False, class_weight=auto_class_weight
)
clf.fit(X[train], y_bin[train].flatten())
non_zero_coeff = clf.coef_ != 0.0
clf = LogisticRegression(penalty="l2", class_weight=auto_class_weight)
clf.fit(X[train][:, non_zero_coeff.flatten()], y[train].flatten())
y_pred = clf.predict_proba(X[test][:, non_zero_coeff.flatten()])[:, 1:2]
return y_pred, clf
def sgrna_from_doench_on_fold(feature_sets, test, X):
if len(feature_sets) != 1:
raise AssertionError("should only use sgRNA Score here")
if list(feature_sets.keys())[0] != "sgRNA Score":
raise AssertionError()
y_pred = X[test][:, 0]
return y_pred, None
def SVC_on_fold(train, test, y_all, X, learn_options):
y_bin = y_all[learn_options["binary target name"]].values[:, None]
clf = LinearSVC(penalty="l2", dual=False)
clf.fit(X[train], y_bin[train].flatten())
y_pred = clf.decision_function(X[test])[:, None]
return y_pred, clf
| StarcoderdataPython |
100743 | """"""
from collections import OrderedDict
import logging
import mmap
from struct import unpack
from pydcmjpeg._markers import MARKERS
from pydcmjpeg.jpeg import get_jpeg
LOGGER = logging.getLogger('pdcmjpeg')
def jpgmap(fpath):
"""Return a memory-mapped representation of the JPEG file at `fpath`."""
LOGGER.debug('Mapping file: {}'.format(fpath))
with open(fpath, 'r+b') as fp:
# Size 0 means whole file
mm = mmap.mmap(fp.fileno(), 0)
return JPEGDict(map_jpg(mm))
def jpgread(fpath):
"""Return a represention of the JPEG file at `fpath`."""
LOGGER.debug("Reading file: {}".format(fpath))
with open(fpath, 'rb') as fp:
info = parse_jpg(fp)
LOGGER.debug("File parsed successfully")
return get_jpeg(fp, info)
def jpgwrite(fpath, jpg):
"""Write the JPEG object `jpg` to `fpath`."""
raise NotImplementedError('Writing JPEG files is not supported')
def _marker_key(name, offset):
"""Return a string combining `name` and `offset`"""
return '{0}@{1}'.format(name, offset - 2)
def map_jpg(mm):
pass
def parse_jpg(fp):
"""Return a JPEG but don't decode yet."""
# Passing 10918-2 Process 1 compliance tests
if fp.read(1) != b'\xff':
fp.seek(0)
raise ValueError('File is not JPEG')
JPEG_TYPE = 'JPEG'
_fill_bytes = 0
while fp.read(1) == b'\xff':
_fill_bytes += 1
pass
#if _fill_bytes == 1:
# _fill_bytes = 0
fp.seek(-2, 1)
# Confirm SOI or SOC marker
marker = fp.read(2)
if marker not in [b'\xFF\xD8', b'\xFF\x4F']:
raise ValueError('No SOI or SOC marker found')
if marker == b'\xFF\xD8':
info = OrderedDict()
info[_marker_key('SOI', fp.tell())] = (unpack('>H', b'\xFF\xD8')[0], _fill_bytes, {})
elif marker == b'\xFF\x4F':
info = OrderedDict()
info[_marker_key('SOC', fp.tell())] = (unpack('>H', b'\xFF\x4F')[0], _fill_bytes, {})
START_OFFSET = None
while True:
_fill_bytes = 0
# Skip fill
next_byte = fp.read(1)
while next_byte == b'\xFF':
_fill_bytes += 1
next_byte = fp.read(1)
# Remove the byte thats actually part of the marker
if _fill_bytes:
_fill_bytes -= 1
fp.seek(-2, 1)
_marker = unpack('>H', fp.read(2))[0]
mm = MARKERS.get(_marker, 'UNKNOWN')[0]
print('{}@{} : {}'.format(hex(_marker), fp.tell() - 2, mm))
if _marker in MARKERS:
name, description, handler = MARKERS[_marker]
#print(hex(_marker), name, fp.tell() - 2)
key = _marker_key(name, fp.tell())
if name not in ['SOS', 'EOI', 'LSE', 'QCC', 'SOD', 'SOT', 'COC']:
if handler is None:
#length = unpack('>H', fp.read(2))[0] - 2
#fp.seek(length, 1)
#fp.seek(2, 1)
info[key] = (_marker, _fill_bytes, {})
continue
info[key] = (_marker, _fill_bytes , handler(fp))
print(key, _marker, info[key])
elif name is 'SOT':
START_OFFSET = fp.tell() - 2
print('SOT offset', START_OFFSET)
info[key] = (_marker, _fill_bytes, handler(fp))
print(key, _marker, info[key])
elif name is 'SOD':
info[key] = (_marker, _fill_bytes, handler(fp))
print(key, _marker, info[key])
# Tile part length
# get last SOT marker
sot_keys = [kk for kk in info.keys() if 'SOT' in kk]
sot = info[sot_keys[-1]]
tile_length = sot[2]['Psot']
print(tile_length)
if tile_length == 0:
# Tile goes to EOC
pass
else:
# tile_length is from first byte of SOT to end of tile-part
fp.seek(START_OFFSET)
fp.seek(START_OFFSET + tile_length)
print(START_OFFSET + tile_length, fp.tell())
elif name in ['QCC', 'COC']:
# JPEG2000
csiz = None
for kk in info:
if 'SIZ' in kk:
csiz = info[kk][2]['Csiz']
if not csiz:
raise ValueError('Bad order')
info[key] = (_marker, _fill_bytes, handler(fp, csiz))
print(key, _marker, info[key])
elif name is 'SOS':
# SOS's info dict contains an extra 'encoded_data' keys
# which use RSTN@offset and ENC@offset
for kk in info:
if 'SOF55' in kk:
JPEG_TYPE = 'JPEG-LS'
break
info[key] = [_marker, _fill_bytes, handler(fp, jpg=JPEG_TYPE)]
print(key, _marker, info[key])
sos_info = {}
encoded_data = bytearray()
_enc_start = fp.tell()
while True:
_enc_key = _marker_key('ENC', _enc_start)
prev_byte = fp.read(1)
if prev_byte != b'\xFF':
encoded_data.extend(prev_byte)
continue
# To get here next_byte must be 0xFF
# If the next byte is 0x00 then keep reading
# JPEGLS: if the next bit is 0, discard the inserted bit
# if the next bit is 1, then is a marker
# If the next byte is 0xFF then keep reading until
# a non-0xFF byte is found
# If the marker is a RST marker then keep reading
# Otherwise rewind to the start of the fill bytes and break
next_byte = fp.read(1)
if JPEG_TYPE == 'JPEG':
if next_byte == b'\x00':
# Skip padding bytes
# The previous byte wasn't added so do it now
encoded_data.extend(prev_byte)
#encoded_data.extend(next_byte)
continue
elif JPEG_TYPE == 'JPEG-LS':
encoded_data.extend(prev_byte)
encoded_data.extend(next_byte)
next_byte = unpack('B', next_byte)[0]
#print('nb', next_byte, bin(next_byte))
#print('{:>08b}'.format(next_byte))
#print('{:>08b}'.format(next_byte)[0])
#print('{:>08b}'.format(next_byte)[0] == '0')
if '{:>08b}'.format(next_byte)[0] == '0':
#print('MSB is 0')
continue
# To get here next_byte must be non-padding (non 0x00)
# so we must be at the end of the encoded data
info[key][2].update({_enc_key : encoded_data})
encoded_data = bytearray()
# The number of 0xFF bytes before the marker
# i.e. 0xFF 0xFF 0xFF 0xD9 is 2 fill bytes
_sos_fill_bytes = 0
# While we still have 0xFF bytes
while next_byte == b'\xFF':
_sos_fill_bytes += 1
next_byte = fp.read(1)
# Check to see if marker is RST_m
if next_byte in [b'\xD0', b'\xD1', b'\xD2', b'\xD3',
b'\xD4', b'\xD5', b'\xD6', b'\xD7']:
_sos_marker = unpack('>H', b'\xFF' + next_byte)[0]
_sos_marker, _, _ = MARKERS[_sos_marker]
_sos_key = _marker_key(_sos_marker, fp.tell())
info[key][2].update({_sos_key : None})
_enc_start = fp.tell()
continue
# End of the current scan, rewind and break
# Back up to the start of the 0xFF
# Currently position at first byte after marker
fp.seek(-2 - _sos_fill_bytes, 1)
break
elif name is 'EOI':
info[key] = (_marker, _fill_bytes, {})
print(key, _marker, info[key])
break
elif name is 'LSE':
# JPEG-LS
info[key] = (_marker, _fill_bytes, handler(fp, info))
print(key, _marker, info[key])
else:
print('Unknown marker {0} at offset {1}'
.format(hex(_marker), fp.tell() - 2))
raise NotImplementedError
return info
| StarcoderdataPython |
35756 | <reponame>raistlin7447/AoC2021<gh_stars>0
with open("day6_input.txt") as f:
initial_fish = list(map(int, f.readline().strip().split(",")))
fish = [0] * 9
for initial_f in initial_fish:
fish[initial_f] += 1
for day in range(80):
new_fish = [0] * 9
for state in range(9):
if state == 0:
new_fish[6] += fish[0]
new_fish[8] += fish[0]
else:
new_fish[state-1] += fish[state]
fish = new_fish
print(sum(fish))
| StarcoderdataPython |
1704107 | """
This example code illustrates how to access and reproject a TerraFusion
Advanced Fusion file in Python.
Usage: save this script and run
$python modis2ug.rn.py
The HDF file must be in your current working directory.
Tested under: Python 3.6.6 :: Anaconda custom (64-bit)
Last updated: 2019-04-05
"""
import h5py
import pytaf
import numpy as np
# Open AF file.
file_name = 'misr_on_modis_SrcLowAnAfBlueGreen_Trg1KM8_9_69365.h5'
# Generate 1-d lat/lon.
cellSize = 0.05
x0, xinc, y0, yinc = (-180, cellSize, 90, -cellSize)
nx, ny = (360*20, 180*20)
x = np.linspace(x0, x0 + xinc*nx, nx)
y = np.linspace(y0, y0 + yinc*ny, ny)
with h5py.File(file_name, 'r') as f:
# Read MODIS Radiance dataset.
modis_dset = f['/Target/Data_Fields/MODIS_Radiance']
modis_data = modis_dset[0,:,:].astype(np.float64)
print(modis_data[0,0:10])
# Read source lat/lon dataset.
modis_ds_lat = f['/Geolocation/Latitude']
modis_lat = modis_ds_lat[:,:].astype(np.float64)
modis_ds_lon = f['/Geolocation/Longitude']
modis_lon = modis_ds_lon[:,:].astype(np.float64)
f.close()
# Set max radius.
M_PI=3.14159265358979323846
earthRadius = 6367444
max_r = earthRadius * cellSize * M_PI / 180
index = np.arange(nx*ny, dtype=np.int32)
distance = np.arange(nx*ny, dtype=np.float64).reshape((ny,nx))
# Kent: try nnInterploate first.
# In the summaryInterpolate, tarSD and nSouPixels are also output parameters.
n_src = modis_lat.size;
print(n_src)
n_trg = nx * ny;
print(n_trg)
# Find indexes of nearest neighbor point.
trg_data = pytaf.resample_n_g(modis_lat, modis_lon,
x, y, modis_data, max_r)
print(trg_data)
print('Finished retrieving data with index.')
# Open file for writing.
f2 = h5py.File('modis2ug.rn.h5', 'w')
dset = f2.create_dataset('/UG_Radiance', data=trg_data)
dset_lat = f2.create_dataset('/Latitude', data=y)
dset_lon = f2.create_dataset('/Longitude', data=x)
# TODO: Add CF attributes on dataset.
f2.close()
| StarcoderdataPython |
1632031 | <gh_stars>1-10
""" Test example of LAR of a 2-complex with non-contractible and non-manifold cells"""
from larlib import *
V = [[0.0989,0.492],[0.5,0.492],[0.708,0.6068],[0.2966,0.6068],[1.0,0.0],
[0.0,0.0],[1.0,1.0],[0.0,1.0],[0.5,0.2614],[0.0989,0.2614],[0.8034,
0.1273],[0.8034,0.0386],[0.0989,0.9068],[0.892,0.9068],[0.708,0.7886],
[0.9375,0.1273],[0.0989,0.1273],[0.8034,0.492],[0.7693,0.4009],[0.892,
0.492],[0.183,0.3097],[0.3193,0.4182],[0.6761,0.1773],[0.2966,0.7886]]
FV = [[0,4,5,6,7,9,10,11,12,13,15,16,17,19],[10,11,15],[0,20,21],[8,18,
22], [0,1,8,9,20,21],[1,8,9,10,16,17,18,22],[0,1,2,3,12,13,14,17,19,23],
[2,3,14,23]]
EV = [(18,22),(10,11),(10,17),(0,20),(1,17),(8,9),(0,21),(12,13),(14,23),
(17,19),(4,5),(3,23),(0,12),(9,16),(2,3),(13,19),(6,7),(2,14),(0,1),
(10,15),(4,6),(8,22),(5,7),(20,21),(1,8),(8,18),(10,16),(0,9),(11,15)]
VV = AA(LIST)(range(len(V)))
submodel = STRUCT(MKPOLS((V,EV)))
VIEW(larModelNumbering(1,1,1)(V,[VV,EV,FV],submodel,0.3))
| StarcoderdataPython |
1739556 | <gh_stars>0
import collections
import enum
import time
import numpy as np
from collections import namedtuple, deque
# one single experience step
from common.environments import Status
from common.fast_rl.common.statistics import StatisticsForValueBasedRL, StatisticsForPolicyBasedRL
from common.fast_rl.rl_agent import BaseAgent
ExperienceWithNoise = namedtuple('ExperienceWithNoise', ['state', 'action', 'noise', 'reward', 'done', 'agent_type'])
ExperienceFirstLastWithNoise = collections.namedtuple(
'ExperienceFirstLastWithNoise', ['state', 'action', 'noise', 'reward', 'last_state', 'last_step', 'done', 'agent_type']
)
class AgentType(enum.Enum):
SWING_UP_AGENT = 0,
BALANCING_AGENT = 1
class ExperienceSourceSingleEnvDdpgTwo:
"""
Simple n-step experience source using only SINGLE environment
Every experience contains n list of Experience entries
"""
def __init__(self, params, env, agent_swing_up, agent_balancing, steps_count=2, step_length=-1, render=False):
assert isinstance(agent_swing_up, BaseAgent)
assert isinstance(agent_balancing, BaseAgent)
assert isinstance(steps_count, int)
assert steps_count >= 1
self.params = params
self.env = env
self.agent_swing_up = agent_swing_up
self.agent_balancing = agent_balancing
self.steps_count = steps_count
self.step_length = step_length # -1 이면 MLP, 1 이상의 값이면 RNN
self.render = render
self.episode_reward_and_info_lst = []
self.state_deque = deque(maxlen=30)
self.current_agent = self.agent_swing_up
self.current_agent_type = AgentType.SWING_UP_AGENT
def get_processed_state(self, new_state):
self.state_deque.append(new_state)
if self.step_length == -1:
next_state = np.array(self.state_deque[-1])
elif self.step_length >= 1:
if len(self.state_deque) < self.step_length:
next_state = list(self.state_deque)
for _ in range(self.step_length - len(self.state_deque)):
next_state.insert(0, np.zeros(shape=self.env.observation_space.shape))
next_state = np.array(next_state)
else:
next_state = np.array(
[
self.state_deque[-self.step_length + offset] for offset in range(self.step_length)
]
)
else:
raise ValueError()
return next_state
def set_current_agent(self, state):
status_value = state[-1]
if status_value in [Status.SWING_UP.value, Status.SWING_UP_TO_BALANCING.value]:
self.current_agent = self.agent_swing_up
self.current_agent_type = AgentType.SWING_UP_AGENT
else:
self.current_agent = self.agent_balancing
self.current_agent_type = AgentType.BALANCING_AGENT
def __iter__(self):
state = self.env.reset()
history = deque(maxlen=self.steps_count)
cur_episode_reward = 0.0
cur_step = 0
agent_state = self.current_agent.initial_agent_state()
iter_idx = 0
while True:
if self.render:
self.env.render()
self.set_current_agent(state)
states_input = []
processed_state = self.get_processed_state(state)
states_input.append(processed_state)
agent_states_input = []
agent_states_input.append(agent_state)
actions, noises, new_agent_states = self.current_agent(states_input, agent_states_input)
noise = noises[0]
agent_state = new_agent_states[0]
action = actions[0]
next_state, r, is_done, info = self.env.step(action)
cur_episode_reward += r
cur_step += 1
if state is not None:
history.append(
ExperienceWithNoise(
state=processed_state,
action=action,
noise=noise,
reward=r,
done=is_done,
agent_type=self.current_agent_type
)
)
if len(history) == self.steps_count:
yield tuple(history)
state = next_state
if is_done:
# in case of very short episode (shorter than our steps count), send gathered history
if 0 < len(history) < self.steps_count:
yield tuple(history)
# generate tail of history
while len(history) > 1:
# removes the element (the old one) from the left side of the deque and returns the value
history.popleft()
yield tuple(history)
state = self.env.reset()
agent_state = self.current_agent.initial_agent_state()
self.episode_reward_and_info_lst.append((cur_episode_reward, info))
cur_episode_reward = 0.0
cur_step = 0
history.clear()
iter_idx += 1
def pop_episode_reward_and_info_lst(self):
episode_reward_and_info = self.episode_reward_and_info_lst
if episode_reward_and_info:
self.episode_reward_and_info_lst = []
return episode_reward_and_info
class ExperienceSourceSingleEnvFirstLastDdpgTwo(ExperienceSourceSingleEnvDdpgTwo):
def __init__(self, params, env, agent_swing_up, agent_balancing, gamma, steps_count=1, step_length=-1, render=False):
assert isinstance(gamma, float)
super(ExperienceSourceSingleEnvFirstLastDdpgTwo, self).__init__(
params, env, agent_swing_up, agent_balancing, steps_count + 1, step_length, render
)
self.gamma = gamma
self.steps_count = steps_count
def __iter__(self):
for exp in super(ExperienceSourceSingleEnvFirstLastDdpgTwo, self).__iter__():
if exp[-1].done and len(exp) <= self.steps_count:
last_state = None
elems = exp
else:
last_state = exp[-1].state
elems = exp[:-1]
total_reward = 0.0
for e in reversed(elems):
total_reward *= self.gamma
total_reward += e.reward
exp = ExperienceFirstLastWithNoise(
state=exp[0].state, action=exp[0].action, noise=exp[0].noise, reward=total_reward,
last_state=last_state, last_step=len(elems), done=exp[-1].done, agent_type=exp[0].agent_type
)
yield exp
class RewardTrackerMatlabPendulum:
def __init__(self, params, stop_mean_episode_reward, average_size_for_stats, frame=True, draw_viz=True, stat=None, logger=None):
self.params = params
self.min_ts_diff = 1 # 1 second
self.stop_mean_episode_reward = stop_mean_episode_reward
self.stat = stat
self.average_size_for_stats = average_size_for_stats
self.draw_viz = draw_viz
self.frame = frame
self.episode_reward_list = None
self.done_episodes = 0
self.mean_episode_reward = 0.0
self.logger = logger
def __enter__(self):
self.start_ts = time.time()
self.ts = time.time()
self.ts_frame = 0
self.episode_reward_list = []
return self
def start_reward_track(self):
self.__enter__()
def __exit__(self, *args):
pass
def set_episode_reward(self, episode_reward_and_info, episode_done_step, epsilon):
self.done_episodes += 1
self.episode_reward_list.append(episode_reward_and_info[0])
episode_info = episode_reward_and_info[1]
self.mean_episode_reward = np.mean(self.episode_reward_list[-self.average_size_for_stats:])
current_ts = time.time()
elapsed_time = current_ts - self.start_ts
ts_diff = current_ts - self.ts
is_print_performance = False
if ts_diff > self.min_ts_diff:
is_print_performance = True
self.print_performance(
episode_done_step, current_ts, ts_diff, self.mean_episode_reward,
epsilon, elapsed_time, episode_info
)
if self.mean_episode_reward > self.stop_mean_episode_reward:
if not is_print_performance:
self.print_performance(
episode_done_step, current_ts, ts_diff, self.mean_episode_reward,
epsilon, elapsed_time, episode_info
)
if self.frame:
msg = "Solved in {0} frames and {1} episodes!".format(episode_done_step, self.done_episodes)
print(msg)
self.logger.info(msg)
else:
msg = "Solved in {0} steps and {1} episodes!".format(episode_done_step, self.done_episodes)
print(msg)
self.logger.info(msg)
return True, self.mean_episode_reward
return False, self.mean_episode_reward
def print_performance(
self, episode_done_step, current_ts, ts_diff, mean_episode_reward, epsilon,
elapsed_time, episode_info
):
speed = (episode_done_step - self.ts_frame) / ts_diff
self.ts_frame = episode_done_step
self.ts = current_ts
if isinstance(epsilon, tuple) or isinstance(epsilon, list):
epsilon_str = "{0:5.3f}, {1:5.3f}".format(
epsilon[0] if epsilon[0] else 0.0,
epsilon[1] if epsilon[1] else 0.0
)
else:
epsilon_str = "{0:5.3f}".format(
epsilon if epsilon else 0.0,
)
episode_reward_str = "{0:7.3f} [{1:7.3f}, {2:6.2f}, {3:6.2f}]".format(
self.episode_reward_list[-1],
episode_info["episode_position_reward_list"],
episode_info["episode_pendulum_velocity_reward"],
episode_info["episode_action_reward"]
)
msg = "[{0:6}/{1}] done {2:4} games, episode_reward: {3}, mean_{4}_episode_reward: {5:7.3f}, " \
"status: [{6:3d}|{7:3d}], epsilon: {8}, speed: {9:5.2f}{10}, elapsed time: {11}".format(
episode_done_step,
self.params.MAX_GLOBAL_STEP,
len(self.episode_reward_list),
episode_reward_str,
self.average_size_for_stats,
mean_episode_reward,
episode_info["count_swing_up_states"],
episode_info["count_balancing_states"],
epsilon_str,
speed,
"fps" if self.frame else "steps/sec.",
time.strftime("%Hh %Mm %Ss", time.gmtime(elapsed_time)),
)
print(msg, flush=True)
self.logger.info(msg)
if self.draw_viz and self.stat:
if isinstance(self.stat, StatisticsForValueBasedRL):
self.stat.draw_performance(episode_done_step, mean_episode_reward, speed, epsilon)
elif isinstance(self.stat, StatisticsForPolicyBasedRL):
self.stat.draw_performance(episode_done_step, mean_episode_reward, speed)
else:
raise ValueError() | StarcoderdataPython |
128468 | # Generated by Django 2.2.5 on 2019-10-16 18:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wiki', '0004_auto_20190929_1232'),
]
operations = [
migrations.AddField(
model_name='wikipage',
name='author',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='author', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='wikipage',
name='editors',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='wikipage',
name='last_editor',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='last_editor', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='wikipage',
name='content',
field=tinymce.models.HTMLField(max_length=5000, verbose_name='Content'),
),
migrations.AlterField(
model_name='wikipage',
name='url',
field=models.CharField(default='djangodbmodelsfieldsCharField', max_length=64),
),
]
| StarcoderdataPython |
1715496 | from django.contrib import admin
from django.urls import include, path
from . import views
# For production, do not store static files in Django.
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from django.conf import settings
from articles import views as article_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', article_views.article_list, name="home"),
path('about/', views.about),
path('articles/', include('articles.urls')),
path('accounts/', include('accounts.urls')),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT) | StarcoderdataPython |
3362988 | <filename>REIP/image_processing/restore_blur.py
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
from IPython.display import clear_output
from numpy import expand_dims
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import ImageDataGenerator
from matplotlib import pyplot
def show_img(img, bigger=False):
if bigger:
plt.figure(figsize=(10,10))
image_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
plt.show()
def sharpen(img, sigma=200):
# sigma = 5、15、25
blur_img = cv2.GaussianBlur(img, (0, 0), sigma)
usm = cv2.addWeighted(img, 1.5, blur_img, -0.5, 0)
return usm
# sharpen
def img_processing(img):
# do something here
img = sharpen(img)
return img
# like sharpen
def enhance_details(img):
hdr = cv2.detailEnhance(img, sigma_s=12, sigma_r=0.15)
return hdr
# restoring models
def edsr(origin_img):
sr = cv2.dnn_superres.DnnSuperResImpl_create()
path = "EDSR_x4.pb"
sr.readModel(path)
sr.setModel("edsr",4)
result = sr.upsample(origin_img)
return result
def espcn(origin_img):
sr = cv2.dnn_superres.DnnSuperResImpl_create()
path = "ESPCN_x4.pb"
sr.readModel(path)
sr.setModel("espcn",4)
result = sr.upsample(origin_img)
return result
def fsrcnn(origin_img):
sr = cv2.dnn_superres.DnnSuperResImpl_create()
path = "FSRCNN_x4.pb"
sr.readModel(path)
sr.setModel("fsrcnn",4)
result = sr.upsample(origin_img)
return result
def lapsrn(origin_img):
sr = cv2.dnn_superres.DnnSuperResImpl_create()
path = "LapSRN_x4.pb"
sr.readModel(path)
sr.setModel("lapsrn",4)
result = sr.upsample(origin_img)
return result
def uint_to_float(img, method='NTSC'):
img = img.astype(np.float32) / 255
b,g,r = cv2.split(img)
if method == 'average':
gray = (r + g + b) / 3
elif method == 'NTSC':
gray = 0.2989*r + 0.5870*g + 0.1140*b
#gray = (gray*255).astype('uint8')
return gray | StarcoderdataPython |
3399858 | <reponame>Tontolda/genui
from django.contrib import admin
import genui.generators.extensions.genuidrugex.models
from . import models
@admin.register(models.Generator)
class GeneratorAdmin(admin.ModelAdmin):
pass
@admin.register(genui.generators.extensions.genuidrugex.models.DrugExNet)
class DrugExNetAdmin(admin.ModelAdmin):
pass
@admin.register(genui.generators.extensions.genuidrugex.models.DrugExAgent)
class DrugExAgentAdmin(admin.ModelAdmin):
pass
| StarcoderdataPython |
133426 | <reponame>erathorus/rainbow
import argparse
from rainbow import Rainbow
def main():
# Setup flags
parser = argparse.ArgumentParser(description='Bridge Contentful and Hugo')
parser.add_argument('store-id')
parser.add_argument('access-token')
parser.add_argument('content-directory')
args = vars(parser.parse_args())
rainbow = Rainbow(args['store-id'], args['access-token'], args['content-directory'])
rainbow.save_posts()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3322840 | import sys
with open(sys.argv[1]) as f:
lines = f.readlines()
f1s = []
passed_ceafm = False
for line in lines:
if "Coreference" in line:
if len(f1s) == 2 and not passed_ceafm:
passed_ceafm = True
continue
f1s.append(float(line.split()[-1].split('%')[0]))
if len(f1s) == 3:
break
print(sys.argv[2] + ' CoNLL F1: ' + str(float(sum(f1s))/len(f1s)))
| StarcoderdataPython |
3287200 | <reponame>forbug/mcs-cycic-analysis<gh_stars>0
from dataclasses import dataclass
from mcs_cycic_analysis.models.cycic3_label import Cycic3Label
from mcs_cycic_analysis.models.cycic3_question import Cycic3Question
@dataclass
class Cycic3EntangledQuestionPair:
part_a_question: Cycic3Question
part_a_label: Cycic3Label
part_b_question: Cycic3Question
part_b_label: Cycic3Label
| StarcoderdataPython |
46416 | <reponame>drewsynan/ariadne_django<filename>ariadne_django/__init__.py
import django
if django.VERSION < (3, 2):
default_app_config = "ariadne_django.apps.AriadneDjangoConfig"
| StarcoderdataPython |
4837818 | <gh_stars>0
#T.BRADFORD
#July 2021
import numpy as np
import sqlalchemy
import datetime as dt
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def home():
"""List all available api routes."""
return (
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
# Query
last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
year_ago = dt.date(2017,8,23) - dt.timedelta(days=365)
prcp_scores = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= year_ago).\
order_by(Measurement.date).all()
session.close()
# Convert list of tuples into normal list
prcp_scores = list(np.ravel(prcp_scores))
return jsonify(prcp_scores)
@app.route("/api/v1.0/stations")
def stations():
# Create our session (link) from Python to the DB
session = Session(engine)
# Query
station_activity = (session.query(Measurement.station, func.count(Measurement.station))
.group_by(Measurement.station)
.order_by(func.count(Measurement.station).desc())
.all())
session.close()
# Convert list of tuples into normal list
station_activity = list(np.ravel(station_activity))
return jsonify(station_activity)
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
# Query
lowest_temp = session.query(func.min(Measurement.tobs)).\
filter(Measurement.station == "USC00519281").all()
highest_temp = session.query(func.max(Measurement.tobs)).\
filter(Measurement.station == "USC00519281").all()
average_temp = session.query(func.avg(Measurement.tobs)).\
filter(Measurement.station == "USC00519281").all()
session.close()
# Convert list of tuples into normal list
lowest_temp = list(np.ravel(lowest_temp))
highest_temp = list(np.ravel(highest_temp))
average_temp = list(np.ravel(average_temp))
return jsonify(lowest_temp,highest_temp,average_temp)
@app.route("/api/v1.0/<start>")
def start_end(start):
# # Create our session (link) from Python to the DB
session = Session(engine)
# Query
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results = session.query(*sel).filter(Measurement.date >= start).all()
session.close()
# Append data to empty dictionary then append dictionary to empty list
start_date = []
for min, avg, max in results:
start_date_dict = {}
start_date_dict[f"The Minimum Temperature on {start} was"] = min
start_date_dict[f"The Average Temperature on {start} was"] = avg
start_date_dict[f"The Maximum Temperature on {start} was"] = max
start_date.append(start_date_dict)
return jsonify(start_date)
@app.route("/api/v1.0/<start>/<end>")
def Start_end_date(start, end):
# Create our session (link) from Python to the DB
session = Session(engine)
# Query
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results = session.query(*sel).filter(Measurement.date >= start).filter(Measurement.date <= end).all()
session.close()
# Append data to empty dictionary then append dictionary to empty list
end_date = []
for min, avg, max in results:
end_date_dict = {}
end_date_dict[f"The Minimum Temperature from {start} through {end} was"] = min
end_date_dict[f"The Average Temperature from {start} through {end} was"] = avg
end_date_dict[f"The Maximum Temperature from {start} through {end} was"] = max
end_date.append(end_date_dict)
return jsonify(end_date)
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
1737156 | <filename>lib/python2.7/site-packages/praw/decorator_helpers.py
"""Internal helper functions used by praw.decorators."""
import inspect
from requests.compat import urljoin
import six
import sys
def _get_captcha(reddit_session, captcha_id):
"""Prompt user for captcha solution and return a prepared result."""
url = urljoin(reddit_session.config['captcha'],
captcha_id + '.png')
sys.stdout.write('Captcha URL: {0}\nCaptcha: '.format(url))
sys.stdout.flush()
raw = sys.stdin.readline()
if not raw: # stdin has reached the end of file
# Trigger exception raising next time through. The request is
# cached so this will not require and extra request and delay.
sys.stdin.close()
return None
return {'iden': captcha_id, 'captcha': raw.strip()}
def _is_mod_of_all(user, subreddit):
mod_subs = user.get_cached_moderated_reddits()
subs = six.text_type(subreddit).lower().split('+')
return all(sub in mod_subs for sub in subs)
def _make_func_args(function):
if six.PY3 and not hasattr(sys, 'pypy_version_info'):
# CPython3 uses inspect.signature(), not inspect.getargspec()
# see #551 and #541 for more info
func_items = inspect.signature(function).parameters.items()
func_args = [name for name, param in func_items
if param.kind == param.POSITIONAL_OR_KEYWORD]
else:
func_args = inspect.getargspec(function).args
return func_args
| StarcoderdataPython |
70303 | #!/usr/bin/env python
"""
Example of training DCGAN on MNIST using PBT with Tune's function API.
"""
import ray
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
import argparse
import os
from filelock import FileLock
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import numpy as np
from common import beta1, MODEL_PATH
from common import demo_gan, get_data_loader, plot_images, train, weights_init
from common import Discriminator, Generator, Net
# __Train_begin__
def dcgan_train(config, checkpoint_dir=None):
step = 0
use_cuda = config.get("use_gpu") and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
netD = Discriminator().to(device)
netD.apply(weights_init)
netG = Generator().to(device)
netG.apply(weights_init)
criterion = nn.BCELoss()
optimizerD = optim.Adam(
netD.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999))
optimizerG = optim.Adam(
netG.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999))
with FileLock(os.path.expanduser("~/.data.lock")):
dataloader = get_data_loader()
if checkpoint_dir is not None:
path = os.path.join(checkpoint_dir, "checkpoint")
checkpoint = torch.load(path)
netD.load_state_dict(checkpoint["netDmodel"])
netG.load_state_dict(checkpoint["netGmodel"])
optimizerD.load_state_dict(checkpoint["optimD"])
optimizerG.load_state_dict(checkpoint["optimG"])
step = checkpoint["step"]
if "netD_lr" in config:
for param_group in optimizerD.param_groups:
param_group["lr"] = config["netD_lr"]
if "netG_lr" in config:
for param_group in optimizerG.param_groups:
param_group["lr"] = config["netG_lr"]
while True:
lossG, lossD, is_score = train(netD, netG, optimizerG, optimizerD,
criterion, dataloader, step, device,
config["mnist_model_ref"])
step += 1
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save({
"netDmodel": netD.state_dict(),
"netGmodel": netG.state_dict(),
"optimD": optimizerD.state_dict(),
"optimG": optimizerG.state_dict(),
"step": step,
}, path)
tune.report(lossg=lossG, lossd=lossD, is_score=is_score)
# __Train_end__
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
import urllib.request
# Download a pre-trained MNIST model for inception score calculation.
# This is a tiny model (<100kb).
if not os.path.exists(MODEL_PATH):
print("downloading model")
os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
urllib.request.urlretrieve(
"https://github.com/ray-project/ray/raw/master/python/ray/tune/"
"examples/pbt_dcgan_mnist/mnist_cnn.pt", MODEL_PATH)
dataloader = get_data_loader()
if not args.smoke_test:
plot_images(dataloader)
# __tune_begin__
# load the pretrained mnist classification model for inception_score
mnist_cnn = Net()
mnist_cnn.load_state_dict(torch.load(MODEL_PATH))
mnist_cnn.eval()
# Put the model in Ray object store.
mnist_model_ref = ray.put(mnist_cnn)
scheduler = PopulationBasedTraining(
perturbation_interval=5,
hyperparam_mutations={
# distribution for resampling
"netG_lr": lambda: np.random.uniform(1e-2, 1e-5),
"netD_lr": lambda: np.random.uniform(1e-2, 1e-5),
})
tune_iter = 5 if args.smoke_test else 300
analysis = tune.run(
dcgan_train,
name="pbt_dcgan_mnist",
scheduler=scheduler,
verbose=1,
stop={
"training_iteration": tune_iter,
},
metric="is_score",
mode="max",
num_samples=8,
config={
"netG_lr": tune.choice([0.0001, 0.0002, 0.0005]),
"netD_lr": tune.choice([0.0001, 0.0002, 0.0005]),
"mnist_model_ref": mnist_model_ref
})
# __tune_end__
# demo of the trained Generators
if not args.smoke_test:
all_trials = analysis.trials
checkpoint_paths = [
os.path.join(analysis.get_best_checkpoint(t), "checkpoint")
for t in all_trials
]
demo_gan(analysis, checkpoint_paths)
| StarcoderdataPython |
3366677 | from dotenv import load_dotenv
import fetch_calendar
import fetch_formatted_text
from inky.auto import auto
import os
import time
# loads environment variables froma .gitignore'd .env file
load_dotenv()
# RC calendar token from .env
token = os.getenv('ICS_TOKEN')
inky_display = auto()
inked_name = ''
inked_location = ''
inked_start = ''
inked_end = ''
while True:
event = fetch_calendar.getNextEvent(token)
if not event:
print("No more events today")
else:
if event['name'] == inked_name and event['location'] == inked_location and event['start'] == inked_start and event['end'] == inked_end:
print("We've already drawn this one to the display!")
else:
inked_name = event['name']
inked_location = event['location']
inked_start = event['start']
inked_end = event['end']
event_name = event['name'][:31]
event_range = event['start'] + ' - ' + event['end']
event_location = event['location'].split('/')[-1]
####### WORKSHOP CODE GOES HERE #######
print(event_name)
print(event_location)
print(event_range)
img = fetch_formatted_text.get_text_image(inky_display, (event_name, event_range, event_location))
# send to inky (comment out only on local machine)
# fetch_formatted_text.rgb_to_inky(inky_display, img)
# sleep 1 minute before polling RC calendar again
time.sleep(60)
| StarcoderdataPython |
168086 | <reponame>kayew/aoc-2020<gh_stars>0
#!/usr/bin/env python3
from sys import argv
from math import cos, sin
from math import radians as toR
data = [x.strip() for x in open(argv[1]).readlines()]
x = 0
y = 0
wp = [10, 1] # x, y / EW, NS
for s in data:
dir = s[0]
amnt = int(s[1:])
if dir == "N":
wp[1] += amnt
elif dir == "E":
wp[0] += amnt
elif dir == "S":
wp[1] -= amnt
elif dir == "W":
wp[0] -= amnt
elif dir == "R":
# P2.x = P.x * cos(R) - P.y * sin(R)
# P2.y = P.x * sin(R) + P.y * cos(R)
x2 = (wp[0] * cos(-1 * toR(amnt))) - (wp[1] * sin(-1 * toR(amnt)))
y2 = (wp[0] * sin(-1 * toR(amnt))) + (wp[1] * cos(-1 * toR(amnt)))
wp[0] = x2
wp[1] = y2
elif dir == "L":
x2 = (wp[0] * cos(toR(amnt))) - (wp[1] * sin(toR(amnt)))
y2 = (wp[0] * sin(toR(amnt))) + (wp[1] * cos(toR(amnt)))
wp[0] = x2
wp[1] = y2
elif dir == "F":
x += wp[0] * amnt
y += wp[1] * amnt
manhattan = abs(x) + abs(y)
print(f"{manhattan:.0f}")
| StarcoderdataPython |
3381793 | <gh_stars>1-10
# pylint: disable=C0321, C0114, W0702, C0103, C0301, R1710, W0603, W0621
"""
Adds support for Telegram Bot messaging.
To enable, provide a TELEGRAM_TOKEN environment variable.
"""
import logging
from os import getenv
from asyncio import sleep
from aiogram import Bot, Dispatcher, types
from aiogram.utils import exceptions, executor
TELEGRAM_TOKEN = getenv('TELEGRAM_TOKEN')
TELEGRAM_USERS = getenv('TELEGRAM_USERS').split(',')
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('telegam')
log.info("Module loaded.")
bot = Bot(token=TELEGRAM_TOKEN, parse_mode=types.ParseMode.HTML)
dp = Dispatcher(bot)
async def send_message(user_id: int, text: str, disable_notification: bool = False) -> bool:
"""
Safe messages sender
:param user_id:
:param text:
:param disable_notification:
:return:
"""
try:
await bot.send_message(user_id, text, disable_notification=disable_notification)
except exceptions.BotBlocked:
log.error("Target [%s]: blocked by user", user_id)
except exceptions.ChatNotFound:
log.error("Target [%s]: invalid user ID", user_id)
except exceptions.RetryAfter as e:
log.error("Target [%s]: flood limited, sleeping %d.",
user_id, e.timeout)
await sleep(e.timeout)
return await send_message(user_id, text)
except exceptions.UserDeactivated:
log.error("Target [ID:%s]: user is deactivated", user_id)
except exceptions.TelegramAPIError:
log.exception("Target [ID:%s]: failed", user_id)
else:
log.info("Target [ID:%s]: success", user_id)
return True
return False
def send(message="RING"):
"""
Sends a message to the envrionment-programmed recipients
Keyword arguments:
message -- Any string
"""
for t in TELEGRAM_USERS:
executor.start(dp, send_message(t, message))
| StarcoderdataPython |
1766347 | import warnings
from torch.optim.lr_scheduler import ReduceLROnPlateau
from .base import BaseHook
from .registry import HOOKS
@HOOKS.register_module
class LRSchedulerHook(BaseHook):
def __init__(self, monitor_metric="loss", by_epoch=True):
self.monitor_metric = monitor_metric
self.by_epoch = by_epoch
def after_val_epoch(self, runner):
if self.by_epoch:
self.step(runner)
def after_train_iter(self, runner):
if not self.by_epoch:
self.step(runner)
def step(self, runner):
with warnings.catch_warnings():
# https://discuss.pytorch.org/t/cyclic-learning-rate-how-to-use/53796/2
warnings.filterwarnings("ignore", category=UserWarning)
if isinstance(runner.scheduler, ReduceLROnPlateau):
runner.scheduler.step(runner.epoch_outputs[self.monitor_metric])
else:
runner.scheduler.step()
| StarcoderdataPython |
3274916 | import docker
class DockerLite:
def __init__(self):
self.client = docker.from_env()
def build_image(self, path_to_dir, resulting_image_name):
"""A method to build a Docker image from a Dockerfile.
Args:
path_to_dockerfile: string: the path to the Dockerfile
resulting_image_name: string: unique name for the image
Returns:
response: Python object: A given image.
"""
response = self.client.images.build(
path=path_to_dir,
tag=resulting_image_name)
return response
def list_containers(self, all=None):
"""A method for listing Docker containers.
Returns only running Docker containers by default.
Args:
all: bool: optional
Returns:
response: List: A list of container objects.
"""
if all:
response = self.client.containers.list(all=True)
else:
response = self.client.containers.list()
return response
def get_container_by_name(self, existing_container_name):
"""A method for getting a Python object that represents
a given Docker container.
Args:
existing_container_name: string: the name of the Docker container
Returns:
response: Python object: a given Docker container
"""
response = self.client.containers.get(existing_container_name)
return response
def run_container(self, image_name, resulting_container_name, command=None, volumes=None):
"""A method for running a Docker container.
Requires a name for the container.
Args:
image_name: string: the name of the Docker image to run
can be local or in Docker Hub.
resulting_container_name: string: the name to set to the container
command: string: the command to run at startup: optional
Returns:
response: Python object: the container being run.
"""
response = self.client.containers.run(
image=image_name,
name=resulting_container_name,
command=command,
remove=True,
detach=True,
volumes=None)
return response
def exec_into_running_container(self, existing_container_name, command):
container = self.get_container_by_name(existing_container_name)
response = container.exec_run(command)
return response
def kill_container(self, existing_container_name):
"""A methond for stopping and removing a Docker container.
Args:
existing_container_name: string: the container to tear down
Returns:
0
"""
container = self.get_container_by_name(existing_container_name)
container.stop()
return 0
def list_images(self):
"""A method for listing all images on the system.
Args:
None
Returns:
image_list: List: a list of Python objects
representing all images on the system.
"""
image_list = self.client.images.list()
return image_list
def remove_unused_images(self):
"""A method for removing unused images.
Args:
None
Returns:
0
"""
self.client.images.prune()
return 0
def remove_all_images(self):
"""A method for removing ALL images.
Args:
None
Returns:
0
"""
image_list = self.list_images()
for image in image_list:
self.client.images.remove(image.id, force=True)
return 0
| StarcoderdataPython |
36628 | <reponame>Joyoe/Magisk-nosbin_magisk-nohide
# Copyright (C) 2007-2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# policygentool is a tool for the initial generation of SELinux policy
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
#
########################### tmp Template File #############################
te_types="""
type TEMPLATETYPE_rw_t;
files_type(TEMPLATETYPE_rw_t)
"""
te_rules="""
manage_dirs_pattern(TEMPLATETYPE_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
manage_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
manage_lnk_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
"""
########################### Interface File #############################
if_rules="""
########################################
## <summary>
## Search TEMPLATETYPE rw directories.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_search_rw_dir',`
gen_require(`
type TEMPLATETYPE_rw_t;
')
allow $1 TEMPLATETYPE_rw_t:dir search_dir_perms;
files_search_rw($1)
')
########################################
## <summary>
## Read TEMPLATETYPE rw files.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_read_rw_files',`
gen_require(`
type TEMPLATETYPE_rw_t;
')
read_files_pattern($1, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
allow $1 TEMPLATETYPE_rw_t:dir list_dir_perms;
files_search_rw($1)
')
########################################
## <summary>
## Manage TEMPLATETYPE rw files.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_manage_rw_files',`
gen_require(`
type TEMPLATETYPE_rw_t;
')
manage_files_pattern($1, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
')
########################################
## <summary>
## Create, read, write, and delete
## TEMPLATETYPE rw dirs.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_manage_rw_dirs',`
gen_require(`
type TEMPLATETYPE_rw_t;
')
manage_dirs_pattern($1, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
')
"""
te_stream_rules="""
manage_sock_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t)
"""
if_stream_rules="""\
########################################
## <summary>
## Connect to TEMPLATETYPE over a unix stream socket.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_stream_connect',`
gen_require(`
type TEMPLATETYPE_t, TEMPLATETYPE_rw_t;
')
stream_connect_pattern($1, TEMPLATETYPE_rw_t, TEMPLATETYPE_rw_t, TEMPLATETYPE_t)
')
"""
if_admin_types="""
type TEMPLATETYPE_rw_t;"""
if_admin_rules="""
files_search_etc($1)
admin_pattern($1, TEMPLATETYPE_rw_t)
"""
########################### File Context ##################################
fc_file="""
FILENAME -- gen_context(system_u:object_r:TEMPLATETYPE_rw_t,s0)
"""
fc_sock_file="""\
FILENAME -s gen_context(system_u:object_r:TEMPLATETYPE_etc_rw_t,s0)
"""
fc_dir="""
FILENAME(/.*)? gen_context(system_u:object_r:TEMPLATETYPE_rw_t,s0)
"""
| StarcoderdataPython |
3229010 | import datetime
from Models.Rental import Rental
from pyforms import BaseWidget
from pyforms.Controls import ControlButton, ControlLabel
from pyforms.Controls import ControlText
# TO DO: write specifications
class RentalWindow(Rental, BaseWidget):
def __init__(self, isCreating):
Rental.__init__(self, '', '', '', '', '', '')
BaseWidget.__init__(self, 'Rental')
self.__isCreating = isCreating
self._idField = ControlText("Id")
self._clientIdField = ControlText("Client Id")
self._movieIdField = ControlText("Movie Id")
self._dueDateField = ControlText("Due Date")
self._rentedDateField = ControlText("Rented Date")
self._returnedDateField = ControlText("Returned Date")
self._buttonField = ControlButton('Rent a new Movie')
self._buttonField.value = self._updateAction
self._rentedDateField.enabled = False
self._returnedDateField.enabled = False
self._label = ControlLabel("")
if not isCreating:
self._idField.enabled = False
self._clientIdField.enabled = False
self._movieIdField.enabled = False
self._dueDateField.enabled = False
self._returnedDateField.enabled = False
self._rentedDateField.enabled = False
self._buttonField.value = self.returnMovie
self._buttonField.name = "Return movie"
def returnMovie(self):
self.parent.returnAMovie(self._idField.value)
def getData(self, id, movieId, clientId, rentedDate, dueDate, returnedDate):
self._idField.value = id
self._movieIdField.value = movieId
self._clientIdField.value = clientId
self._rentedDateField.value = rentedDate
self._dueDateField.value = dueDate
self._returnedDateField.value = returnedDate
if str(self._returnedDateField) != '-1':
self._buttonField.enabled = False
def _updateAction(self):
self.id = self._idField.value
self.movieId = self._movieIdField.value
self.clientId = self._clientIdField.value
self.returnedDate = self._returnedDateField.value
self.rentedDate = self._rentedDateField.value
self.dueDate = self._dueDateField.value
self._label.hide()
try:
if self.parent is not None:
if self.__isCreating:
self.parent.addRental(str(self.id), self.movieId, self.clientId,
datetime.datetime.today().strftime('%Y-%m-%d'), self.dueDate,
'-1')
else:
self.parent.returnAMovie(self.id)
except Exception as e:
self._label.show()
self._label.value = str(e)
| StarcoderdataPython |
1601095 | from pathlib import Path
import numpy as np
import torch.nn as nn
class FeatureExtractor(object):
def __init__(self):
super(FeatureExtractor).__init__()
def initialize(self, trainer):
self.feature_path = trainer.logger.log_path / 'features'
if not self.feature_path.exists():
self.feature_path.mkdir()
self.k = 0
trainer.logger.print('Extract features after convolution and linear layers')
trainer.logger.print(f'The features are saved at:{self.feature_path}')
self.register_module_hook(trainer)
def keep_feature(self, module, input, output):
mat = output.cpu().numpy()
if mat.ndim == 4:
mat = np.mean(mat, axis=(2,3))
if module.extracted is None:
module.extracted = mat
else:
module.extracted = np.vstack((module.extracted, mat))
def register_module_hook(self, trainer):
self.first_module = None
for name, module in trainer.model.named_modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
if self.first_module == None:
self.first_module = module
module.extracted = None
module.register_forward_hook(self.keep_feature)
temp_path = self.feature_path / name
if not temp_path.exists():
temp_path.mkdir()
def save_feature(self, trainer):
for name, module in trainer.model.named_modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
np.save(self.feature_path / name / (str(self.k).zfill(1) + '.npy'), module.extracted)
module.extracted = None
self.k += 1
def check_feature(self, trainer):
if self.first_module.extracted.shape[0] > 10000:
self.save_feature(trainer) | StarcoderdataPython |
3344325 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interface for the requests abstraction layer
"""
from rucio.api import permission
from rucio.common import exception
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import api_update_return_dict
from rucio.core import request
from rucio.core.rse import get_rse_id
def queue_requests(requests, issuer, vo='def'):
"""
Submit transfer or deletion requests on destination RSEs for data identifiers.
:param requests: List of dictionaries containing 'scope', 'name', 'dest_rse_id', 'request_type', 'attributes'
:param issuer: Issuing account as a string.
:param vo: The VO to act on.
:returns: List of Request-IDs as 32 character hex strings
"""
kwargs = {'requests': requests, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='queue_requests', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s can not queue request' % locals())
for req in requests:
req['scope'] = InternalScope(req['scope'], vo=vo)
if 'account' in req:
req['account'] = InternalAccount(req['account'], vo=vo)
new_requests = request.queue_requests(requests)
return [api_update_return_dict(r) for r in new_requests]
def cancel_request(request_id, issuer, account, vo='def'):
"""
Cancel a request.
:param request_id: Request Identifier as a 32 character hex string.
:param issuer: Issuing account as a string.
:param account: Account identifier as a string.
:param vo: The VO to act on.
"""
kwargs = {'account': account, 'issuer': issuer, 'request_id': request_id}
if not permission.has_permission(issuer=issuer, vo=vo, action='cancel_request_', kwargs=kwargs):
raise exception.AccessDenied('%s cannot cancel request %s' % (account, request_id))
raise NotImplementedError
def cancel_request_did(scope, name, dest_rse, request_type, issuer, account, vo='def'):
"""
Cancel a request based on a DID and request type.
:param scope: Data identifier scope as a string.
:param name: Data identifier name as a string.
:param dest_rse: RSE name as a string.
:param request_type: Type of the request as a string.
:param issuer: Issuing account as a string.
:param account: Account identifier as a string.
:param vo: The VO to act on.
"""
dest_rse_id = get_rse_id(rse=dest_rse, vo=vo)
kwargs = {'account': account, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='cancel_request_did', kwargs=kwargs):
raise exception.AccessDenied('%(account)s cannot cancel %(request_type)s request for %(scope)s:%(name)s' % locals())
scope = InternalScope(scope, vo=vo)
return request.cancel_request_did(scope, name, dest_rse_id, request_type)
def get_next(request_type, state, issuer, account, vo='def'):
"""
Retrieve the next request matching the request type and state.
:param request_type: Type of the request as a string.
:param state: State of the request as a string.
:param issuer: Issuing account as a string.
:param account: Account identifier as a string.
:param vo: The VO to act on.
:returns: Request as a dictionary.
"""
kwargs = {'account': account, 'issuer': issuer, 'request_type': request_type, 'state': state}
if not permission.has_permission(issuer=issuer, vo=vo, action='get_next', kwargs=kwargs):
raise exception.AccessDenied('%(account)s cannot get the next request of type %(request_type)s in state %(state)s' % locals())
reqs = request.get_next(request_type, state)
return [api_update_return_dict(r) for r in reqs]
def get_request_by_did(scope, name, rse, issuer, vo='def'):
"""
Retrieve a request by its DID for a destination RSE.
:param scope: The scope of the data identifier as a string.
:param name: The name of the data identifier as a string.
:param rse: The destination RSE of the request as a string.
:param issuer: Issuing account as a string.
:param vo: The VO to act on.
:returns: Request as a dictionary.
"""
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='get_request_by_did', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s cannot retrieve the request DID %(scope)s:%(name)s to RSE %(rse)s' % locals())
scope = InternalScope(scope, vo=vo)
req = request.get_request_by_did(scope, name, rse_id)
return api_update_return_dict(req)
def get_request_history_by_did(scope, name, rse, issuer, vo='def'):
"""
Retrieve a historical request by its DID for a destination RSE.
:param scope: The scope of the data identifier as a string.
:param name: The name of the data identifier as a string.
:param rse: The destination RSE of the request as a string.
:param issuer: Issuing account as a string.
:param vo: The VO to act on.
:returns: Request as a dictionary.
"""
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='get_request_history_by_did', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s cannot retrieve the request DID %(scope)s:%(name)s to RSE %(rse)s' % locals())
scope = InternalScope(scope, vo=vo)
req = request.get_request_history_by_did(scope, name, rse_id)
return api_update_return_dict(req)
def list_requests(src_rses, dst_rses, states, issuer, vo='def'):
"""
List all requests in a specific state from a source RSE to a destination RSE.
:param src_rses: source RSEs.
:param dst_rses: destination RSEs.
:param states: list of request states.
:param issuer: Issuing account as a string.
"""
src_rse_ids = [get_rse_id(rse=rse, vo=vo) for rse in src_rses]
dst_rse_ids = [get_rse_id(rse=rse, vo=vo) for rse in dst_rses]
kwargs = {'src_rse_id': src_rse_ids, 'dst_rse_id': dst_rse_ids, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='list_requests', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s cannot list requests from RSE %(src_rse)s to RSE %(dst_rse)s' % locals())
for req in request.list_requests(src_rse_ids, dst_rse_ids, states):
req = req.to_dict()
yield api_update_return_dict(req)
def list_requests_history(src_rses, dst_rses, states, issuer, vo='def', offset=None, limit=None):
"""
List all historical requests in a specific state from a source RSE to a destination RSE.
:param src_rses: source RSEs.
:param dst_rses: destination RSEs.
:param states: list of request states.
:param issuer: Issuing account as a string.
:param offset: offset (for paging).
:param limit: limit number of results.
"""
src_rse_ids = [get_rse_id(rse=rse, vo=vo) for rse in src_rses]
dst_rse_ids = [get_rse_id(rse=rse, vo=vo) for rse in dst_rses]
kwargs = {'src_rse_id': src_rse_ids, 'dst_rse_id': dst_rse_ids, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='list_requests_history', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s cannot list requests from RSE %(src_rse)s to RSE %(dst_rse)s' % locals())
for req in request.list_requests_history(src_rse_ids, dst_rse_ids, states, offset, limit):
req = req.to_dict()
yield api_update_return_dict(req)
| StarcoderdataPython |
180299 | import sys
if __name__ == "__main__":
from common import powerset
from log_star_decider import _is_log_star_solvable
else:
from .common import powerset
from .log_star_decider import _is_log_star_solvable
from .constant_synthesizer import find_algorithm
VERBOSE = False
def is_constant_solvable(constraints):
labels = set("".join(constraints))
for reduced_labels in powerset(labels):
reduced_constraints = [constraint for constraint in constraints if
(constraint[0] in reduced_labels and constraint[1] in reduced_labels and constraint[
2] in reduced_labels)]
for label in reduced_labels:
for constraint in reduced_constraints:
if constraint.startswith(label + label) or constraint.endswith(label + label):
if _is_log_star_solvable(reduced_constraints, list(reduced_labels), label):
if VERBOSE:
find_algorithm(reduced_constraints)
return True
return False
if __name__ == "__main__":
if len(sys.argv) == 2 and (sys.argv[1] == "-v" or sys.argv[1] == "--verbose"):
VERBOSE = True
constraints = input().split()
if is_constant_solvable(constraints):
print("O(1)")
else:
print("ω(1)")
| StarcoderdataPython |
4839961 | <gh_stars>1-10
"""A module for the Genomic Uncertain Deletion Classifier."""
from typing import List
from .set_based_classifier import SetBasedClassifier
from variation.schemas.classification_response_schema import ClassificationType
class GenomicUncertainDeletionClassifier(SetBasedClassifier):
"""The Genomic Uncertain Deletion Classifier class."""
def classification_type(self) -> ClassificationType:
"""Return the Genomic Uncertain Deletion classification type."""
return ClassificationType.GENOMIC_UNCERTAIN_DELETION
def exact_match_candidates(self) -> List[List[str]]:
"""Return the exact match token type candidates."""
return [
['Chromosome', 'GenomicUncertainDeletion'],
['GeneSymbol', 'GenomicUncertainDeletion'],
['GenomicUncertainDeletion', 'GeneSymbol'],
['HGVS', 'GenomicUncertainDeletion'],
['ReferenceSequence', 'GenomicUncertainDeletion'],
['LocusReferenceGenomic', 'GenomicUncertainDeletion']
]
| StarcoderdataPython |
170656 | <reponame>ingjavierpinilla/youBot-Gazebo-Publisher
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
import trajectory_msgs.msg as tm
from numpy import inf, zeros, ones
from geometry_msgs.msg import Twist
def moveArm():
armPublisher = rospy.Publisher(
"/arm_1/arm_controller/command", tm.JointTrajectory, queue_size=1
)
jointvalues = [2.95, 1.05, -2.44, 1.73, 2.95]
msg = createArmPositionCommand(jointvalues)
armPublisher.publish(msg)
print("Para arriba")
rospy.sleep(3)
jointvalues = [0.11, 0.11, -0.11, 0.11, 0.11]
msg = createArmPositionCommand(jointvalues)
armPublisher.publish(msg)
print("Para abajo")
rospy.sleep(3)
def createArmPositionCommand(newPositions):
msg = tm.JointTrajectory()
point = tm.JointTrajectoryPoint()
point.positions = newPositions
vel = 30.0
point.velocities = ones(len(newPositions))*vel
point.accelerations = ones(len(newPositions))*vel
point.time_from_start = rospy.Duration(0.5)
msg.points = [point]
jointNames = []
for i in range(5):
jointNames.append("arm_joint_" + str(i + 1))
msg.joint_names = jointNames
msg.header.frame_id = "arm_link_0"
msg.header.stamp = rospy.Time.now()
return msg
def createGripperPositionCommand(newPosition):
msg = tm.JointTrajectory()
point = tm.JointTrajectoryPoint()
point.positions = [newPosition, newPosition]
point.velocities = ones(2)
point.accelerations = zeros(2)
point.time_from_start = rospy.Duration(0.5)
msg.points = [point]
msg.joint_names = ["gripper_finger_joint_l", "gripper_finger_joint_r"]
msg.header.frame_id = "gripper_finger_joint_l"
msg.header.stamp = rospy.Time.now()
return msg
def moveGripper():
gripperPublisher = rospy.Publisher(
"/arm_1/gripper_controller/command", tm.JointTrajectory, queue_size=1
)
msg = createGripperPositionCommand(0.11)
gripperPublisher.publish(msg)
print("Open")
rospy.sleep(3)
msg = createGripperPositionCommand(0.0)
gripperPublisher.publish(msg)
print("Close")
rospy.sleep(3)
def gripper():
rospy.init_node('youbot_teleop')
pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
twist = Twist()
vel = 1.5
twist.linear.x = vel
twist.linear.y = vel
twist.linear.z = vel
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = vel
# Publish commands to the robot
pub.publish(twist)
moveGripper()
rospy.spin()
if __name__ == '__main__':
gripper()
| StarcoderdataPython |
1645611 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def generateCounts(self, root):
if not root:
return 0
else:
root.counts = [self.generateCounts(root.left), self.generateCounts(root.right)]
return 1 + sum(root.counts)
def _recursiveKthSmallest(self, root, k):
leftCount, rightCount = root.counts
if k == leftCount + 1:
return root.val
elif k <= leftCount:
return self.kthSmallest(root.left, k)
else:
return self.kthSmallest(root.right, k - (leftCount+1))
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
self.generateCounts(root)
return self._recursiveKthSmallest(root, k)
| StarcoderdataPython |
3263542 | <gh_stars>1-10
from qittle.types.responses import hook
from .base import Base
class HookCategory(Base):
async def register(
self,
param: str,
hook_type: int = 1,
txn_type: str = 2,
**kwargs
) -> hook.DescriptionModel:
params = self.get_set_params(locals())
return hook.DescriptionModel(
**await self.api.request(
"PUT", "payment-notifier/v1/hooks",
params
)
)
async def get(
self,
**kwargs,
) -> hook.DescriptionModel:
params = self.get_set_params(locals())
return hook.DescriptionModel(
**await self.api.request(
"GET", "payment-notifier/v1/hooks/active",
params
)
)
async def delete(
self,
hook_id: str,
**kwargs,
) -> hook.ResponseModel:
params = self.get_set_params(locals())
return hook.ResponseModel(
**await self.api.request(
"DELETE", f"payment-notifier/v1/hooks/{hook_id}",
params
)
)
async def trigger(
self,
**kwargs,
) -> hook.ResponseModel:
params = self.get_set_params(locals())
return hook.ResponseModel(
**await self.api.request(
"GET", "payment-notifier/v1/hooks/test",
params
)
)
| StarcoderdataPython |
67319 | from backtrader import indicators
from src.analyzer.backtrader_wrapper.base_screener import BaseScreener
from src.analyzer.backtrader_wrapper.base_strategy import BaseStrategy
from src.analyzer.backtrader_wrapper.interface_algo import AlgoInterface
class SimpleMovingAverageAlgo(AlgoInterface):
params = dict(maperiod=20)
def qualifier(self, data, *args, **kwargs) -> dict:
return {
"sma": indicators.MovingAverageSimple(data, period=self.params.maperiod)
}
def shouldBuy(self, data, qualifiers: dict) -> bool:
return data > qualifiers["sma"].lines.sma
def shouldSell(self, data, qualifiers: dict) -> bool:
return data < qualifiers["sma"].lines.sma
class SimpleMovingAverageStrategy(BaseStrategy, SimpleMovingAverageAlgo):
pass
class SimpleMovingAverageScreener(BaseScreener, SimpleMovingAverageAlgo):
pass
| StarcoderdataPython |
3382510 | <filename>ermaket/api/queries/__init__.py
from .read import *
from .change import *
from .errors_parser import *
from .sql import *
| StarcoderdataPython |
58090 | import os
import re
import time
import shutil
from tempfile import mkdtemp
import operator
from collections.abc import Mapping
from pathlib import Path
import datetime
from .log import Handle
logger = Handle(__name__)
_FLAG_FIRST = object()
class Timewith:
def __init__(self, name=""):
"""Timewith context manager."""
self.name = name
self.start = time.time()
self.checkpoints = []
@property
def elapsed(self):
return time.time() - self.start
def checkpoint(self, name=""):
elapsed = self.elapsed
msg = "{time} {timer}: {checkpoint} in {elapsed:.3f} s.".format(
timer=self.name,
time=datetime.datetime.now().strftime("%H:%M:%S"),
checkpoint=name,
elapsed=elapsed,
).strip()
logger.info(msg)
self.checkpoints.append((name, elapsed))
def __enter__(self):
"""Object returned on entry."""
return self
def __exit__(self, type, value, traceback):
"""Code to execute on exit."""
self.checkpoint("Finished")
self.checkpoints.append(("Finished", self.elapsed))
def temp_path(suffix=""):
"""Return the path of a temporary directory."""
directory = mkdtemp(suffix=suffix)
return Path(directory)
def flatten_dict(d, climb=False, safemode=False):
"""
Flattens a nested dictionary containing only string keys.
This will work for dictionaries which don't have two equivalent
keys at the same level. If you're worried about this, use safemode=True.
Partially taken from https://stackoverflow.com/a/6043835.
Parameters
----------
climb: :class:`bool`, :code:`False`
Whether to keep trunk or leaf-values, for items with the same key.
safemode: :class:`bool`, :code:`True`
Whether to keep all keys as a tuple index, to avoid issues with
conflicts.
Returns
-------
:class:`dict`
Flattened dictionary.
"""
lift = lambda x: (x,)
join = operator.add
results = []
def visit(subdict, results, partialKey):
for k, v in subdict.items():
if partialKey == _FLAG_FIRST:
newKey = lift(k)
else:
newKey = join(partialKey, lift(k))
if isinstance(v, Mapping):
visit(v, results, newKey)
else:
results.append((newKey, v))
visit(d, results, _FLAG_FIRST)
if safemode:
pick_key = lambda keys: keys
else:
pick_key = lambda keys: keys[-1]
sort = map(
lambda x: x[:2],
sorted([(pick_key(k), v, len(k)) for k, v in results], key=lambda x: x[-1]),
) # sorted by depth
if not climb:
# We go down the tree, and prioritise the trunk values
items = sort
else:
# We prioritise the leaf values
items = [i for i in sort][::-1]
return dict(items)
def swap_item(startlist: list, pull: object, push: object):
"""
Swap a specified item in a list for another.
Parameters
----------
startlist : :class:`list`
List to replace item within.
pull
Item to replace in the list.
push
Item to add into the list.
Returns
-------
list
"""
return [[i, push][i == pull] for i in startlist]
def copy_file(src, dst, ext=None, permissions=None):
"""
Copy a file from one place to another.
Uses the full filepath including name.
Parameters
----------
src : :class:`str` | :class:`pathlib.Path`
Source filepath.
dst : :class:`str` | :class:`pathlib.Path`
Destination filepath or directory.
ext : :class:`str`, :code:`None`
Optional file extension specification.
"""
src = Path(src)
dst = Path(dst)
if dst.is_dir():
dst = dst / src.name
if ext is not None:
src = src.with_suffix(ext)
dst = dst.with_suffix(ext)
logger.debug("Copying from {} to {}".format(src, dst))
with open(str(src), "rb") as fin:
with open(str(dst), "wb") as fout:
shutil.copyfileobj(fin, fout)
if permissions is not None:
os.chmod(str(dst), permissions)
def remove_tempdir(directory):
"""
Remove a specific directory, contained files and sub-directories.
Parameters
----------
directory: str, Path
Path to directory.
"""
directory = Path(directory)
try:
shutil.rmtree(str(directory))
assert not directory.exists()
except PermissionError:
pass
| StarcoderdataPython |
38031 | <gh_stars>0
"""
Date: 2022.04.13 10:28
Description: Omit
LastEditors: <NAME>
LastEditTime: 2022.04.13 10:28
"""
import tempfile
from pathlib import Path
from create_config_file.notes import notes_append_header
def test_notes_append_header():
path = tempfile.gettempdir()
file = Path(path) / "file.txt"
file.write_text("old_content")
notes_append_header(file)
print(file.read_text())
file.unlink()
file.write_text("---old_content")
notes_append_header(file)
print(file.read_text())
file.unlink()
| StarcoderdataPython |
1704720 | <reponame>zerontech-company/pytorch-code-server
from dagster import get_dagster_logger, job, op, In
from my_mnist import *
@op
def setHyper():
epoch = 20
hyper = { "batch_size": 50, "num_classes": 10, "learning_rate": 0.001, "num_epochs": epoch }
return hyper
@op(ins={'msg': In(int)})
def print_test(msg):
logger = get_dagster_logger()
logger.info(f"sunny dbg: {msg}")
@job
def startTrainMnist():
for i in range(2):
epoch = i*2 + 10
hyper = { "batch_size": 50, "num_classes": 10, "learning_rate": 0.001, "num_epochs": epoch }
#hyper = [ 50, 10, 0.001, epoch ]
hyper = setHyper()
#print_test(100)
#setHyperParam(hyper)
doTrainMNIST(hyper)
| StarcoderdataPython |
3266906 | <reponame>giumas/testwheel
from __future__ import (absolute_import, division, print_function)
import pytest
import testwheel
class TestBase(object):
def test_zero(self):
""" Testing author name string. """
assert "giumas" == testwheel.__author__
| StarcoderdataPython |
1671180 | <filename>ana/debug/polarization.py<gh_stars>10-100
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
"""
import os, logging, numpy as np
log = logging.getLogger(__name__)
from opticks.ana.base import opticks_environment
from opticks.ana.evt import Evt
deg = np.pi/180.
if __name__ == '__main__':
pass
logging.basicConfig(level=logging.INFO)
opticks_environment()
spol, ppol = "5", "6"
g = Evt(tag="-"+spol, det="rainbow", label="S G4")
o = Evt(tag=spol, det="rainbow", label="S Op")
# check magnitude of polarization
for e in [g,o]:
mag = np.linalg.norm(e.rpol_(0),2,1)
assert mag.max() < 1.01 and mag.min() > 0.99
| StarcoderdataPython |
118008 | # 面向过程的程序设计,把计算机程序视为一系列的命令集合,即一组函数的顺序执行。为了简化程序设计,面向过程把函数继续切分为子函数,
# 即把大块函数通过切割成小块函数来降低系统的复杂度。
# 面向对象编程,OOP,Object Oriented Programming,一种程序设计思想。OOP把对象作为程序的基本单元,一个对象包含了数据和操作数据的函数。
# 面向对象的程序设计把计算机程序视为一组对象的集合,而每个对象都可以接收其他对象发过来的消息,并处理这些消息,
# 计算机程序的执行就是一系列消息在各个对象之间传递。
# 所以,面向对象的设计思想是抽象出Class,根据Class创建Instance,面向对象的抽象程度又比函数要高,因为一个Class既包含数据,又包含操作数据的方法。
# 数据封装、继承和多态是面向对象的三大特点。
# 在Python中,所有数据类型都可以视为对象,当然也可以自定义对象。自定义的对象数据类型就是面向对象中的类(Class)的概念。
# 首选思考的不是程序的执行流程,而是Student这种数据类型应该被视为一个对象,这个对象拥有name和score这两个属性(Property)
# 如果要打印一个学生的成绩,首先必须创建出这个学生对应的对象,然后,给对象发一个print_score消息,让对象自己把自己的数据打印出来。
class Student(object):
def __init__(self, name, score): # __init__方法的第一个参数永远是self,表示创建的实例本身
self.name = name
self.score = score
def print_score(self): # 第一个参数永远是实例变量self
print('%s: %s' %(self.name, self.score))
# 给对象发消息实际上就是调用对象对应的关联函数,我们称之为对象的方法Method
# 面向对象编程的一个重要特点就是数据封装。在一个Student类中,每个实例就拥有各自的数据。
lisa = Student('<NAME>', 88)
lisa.print_score()
# 访问限制
class Teacher(object):
def __init__(self, name, age):
self.__name = name # 实例的变量名如果以__开头,就变成了一个私有变量(private),内部属性不被外部访问
self.__age = age
def get_name(self):
return self.__name
def get_age(self):
return self.__age
def set_age(self,age):
if 0 <= age <= 100:
self.__age = age
else:
raise ValueError('bad age')
# 特殊变量 ,__xxx__ , 双下划线且以双下划线结尾,可以直接访问的
# 私有变量 ,__xxx , 双下划线,可以直接访问的, 内部属性不被外部访问. 因Python解释器对外把__name变量改成了_Student__name,
# 故仍然可以通过_Student__name来访问__name变量。 Python本身没有任何机制阻止你干坏事,一切全靠自觉
# 继承可以把父类的所有功能都直接拿过来,子类只需要新增自己特有的方法,也可以把父类不适合的方法覆盖重写。
# 多态,一种接口,多种实现,实现代码重用,接口重用;
# 动态语言的“鸭子类型”,它并不要求严格的继承体系,一个对象只要“看起来像鸭子,走起路来像鸭子”,那它就可以被看做是鸭子。
# 获取对象信息
# type(), 判断对象类型
type(123) # <class 'int'>
type(abs) # <class 'builtin_function_or_method'>
# types, 判断一个对象是否是函数
type(abs)==types.BuiltinFunctionType # True
# isinstance() 总是优先使用isinstance()判断类型,可以将指定类型及其子类“一网打尽”。
isinstance('a', str) # True
isinstance((1, 2, 3), (list, tuple)) # True
# dir() 获得一个对象的所有属性和方法,它返回一个包含字符串的list
dir('ABC')
# ['__add__', '__class__', '__contains__', ... , 'translate', 'upper', 'zfill']
# getattr()、setattr() 、hasattr() , 直接操作一个对象的状态
hasattr(Student, 'y') # 有属性'y'吗? # False
# 类属性和实例属性
# 实例属性属于各个实例所有,互不干扰;类属性属于类所有,所有实例共享一个属性;不要对实例属性和类属性使用相同的名字,否则将产生难以发现的错误。
class School(object):
name = 'Peking University' # 类属性,类本身需要绑定一个属性
def __init__(self, location): # 给实例绑定属性的方法是通过实例变量,或者通过self变量
self.location = location
| StarcoderdataPython |
30366 | <filename>spraycharles/utils/notify.py<gh_stars>0
import pymsteams
from discord_webhook import DiscordWebhook
from notifiers import get_notifier
def slack(webhook, host):
slack = get_notifier("slack")
slack.notify(message=f"Credentials guessed for host: {host}", webhook_url=webhook)
def teams(webhook, host):
notify = pymsteams.connectorcard(webhook)
notify.text(f"Credentials guessed for host: {host}")
notify.send()
def discord(webhook, host):
notify = DiscordWebhook(
url=webhook, content=f"Credentials guessed for host: {host}"
)
response = webhook.execute()
| StarcoderdataPython |
3307244 | <reponame>IMULMUL/etl-parser
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Kernel-IO
GUID : abf1f586-2e50-4ba8-928d-49044e6f0db7
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("abf1f586-2e50-4ba8-928d-49044e6f0db7"), event_id=1, version=0)
class Microsoft_Windows_Kernel_IO_1_0(Etw):
pattern = Struct(
"VolumeGuid" / Guid,
"VolumeNameLength" / Int16ul,
"VolumeName" / Bytes(lambda this: this.VolumeNameLength)
)
@declare(guid=guid("abf1f586-2e50-4ba8-928d-49044e6f0db7"), event_id=2, version=0)
class Microsoft_Windows_Kernel_IO_2_0(Etw):
pattern = Struct(
"VolumeGuid" / Guid,
"VolumeNameLength" / Int16ul,
"VolumeName" / Bytes(lambda this: this.VolumeNameLength)
)
@declare(guid=guid("abf1f586-2e50-4ba8-928d-49044e6f0db7"), event_id=3, version=0)
class Microsoft_Windows_Kernel_IO_3_0(Etw):
pattern = Struct(
"VolumeGuid" / Guid,
"VolumeNameLength" / Int16ul,
"VolumeName" / Bytes(lambda this: this.VolumeNameLength),
"Error" / Int32ul
)
@declare(guid=guid("abf1f586-2e50-4ba8-928d-49044e6f0db7"), event_id=1205, version=0)
class Microsoft_Windows_Kernel_IO_1205_0(Etw):
pattern = Struct(
"FilterNameLength" / Int16ul,
"FilterName" / Bytes(lambda this: this.FilterNameLength)
)
@declare(guid=guid("abf1f586-2e50-4ba8-928d-49044e6f0db7"), event_id=1206, version=0)
class Microsoft_Windows_Kernel_IO_1206_0(Etw):
pattern = Struct(
"FilterNameLength" / Int16ul,
"FilterName" / Bytes(lambda this: this.FilterNameLength),
"VolumeNameLength" / Int16ul,
"VolumeName" / Bytes(lambda this: this.VolumeNameLength)
)
@declare(guid=guid("abf1f586-2e50-4ba8-928d-49044e6f0db7"), event_id=1207, version=0)
class Microsoft_Windows_Kernel_IO_1207_0(Etw):
pattern = Struct(
"DumpEncryptionFailureReason" / Int32ul
)
| StarcoderdataPython |
3346155 | import os
import uuid
from flask import Flask,render_template,request, make_response
from utils.run_model import getBaseMap
app=Flask(__name__)
@app.route("/")
def index():
return render_template("upload.html")
@app.route("/validate")
def validate():
return render_template("validate.html")
@app.route('/loading', methods=['POST'])
def loading():
target='./static'
if not os.path.isdir(target):
os.mkdir(target)
else :
# remove older images
for fname in os.listdir(target):
if fname.endswith('.jpg') or fname.endswith('.png'):
os.remove(os.path.join(target, fname))
for f in request.files.getlist("file"):
filename="input.jpg"
dest=os.path.join(target, filename)
f.save(dest)
return render_template('loading.html')
@app.route("/upload", methods=['GET', 'POST'])
def upload():
if request.method=='GET':
filename = None
# file used for storing base map name between requests
with open("./static/filename.txt", 'r') as f:
filename = f.read()
return render_template("complete.html" , image_name=filename)
if request.method=='POST':
# save basemap to static as map_token.png
# token is a 6 digit unique id for each map output (browser cache solution)
token = uuid.uuid4().hex[:6]
resp = getBaseMap(token)
if type(resp) == str:
return resp
filename = "map_{}.png".format(token)
# file used for storing base map name between requests
with open("./static/filename.txt", 'w') as f:
f.write(filename)
return make_response('POST request successful', 200)
if __name__=="__main__":
app.run(debug=True)
| StarcoderdataPython |
1780036 | import pygame
import time
from pygame.constants import MOUSEBUTTONDOWN
# board = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2 ],
# [ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
# [ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
# [ 2, 6, 3, 0, 1, 5, 9, 8, 7 ],
# [ 9, 7, 4, 8, 6, 0, 1, 2, 5 ],
# [ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
# [ 1, 3, 8, 0, 4, 7, 2, 0, 6 ],
# [ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
# [ 7, 4, 5, 0, 8, 6, 3, 1, 0 ] ]
# board = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2 ],
# [ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
# [ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
# [ 2, 6, 3, 0, 1, 5, 9, 8, 7 ],
# [ 9, 7, 4, 8, 6, 0, 1, 2, 5 ],
# [ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
# [ 1, 3, 8, 0, 4, 7, 2, 0, 6 ],
# [ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
# [ 7, 4, 5, 0, 8, 6, 3, 1, 0 ] ]
# All colors used
white = (255, 255, 255)
yellow = (255, 255, 102)
black = (0, 0, 0)
red = (213, 50, 80)
green = (0, 255, 0)
green2 = (92,182,107)
dark_green = (4,75,20)
blue = (50, 153, 213)
pygame.init()
dis_width = 450
dis_height = 500
strike_img = pygame.image.load(r'./strike.png') # Snake's head image
dis = pygame.display.set_mode((dis_width,dis_height))
pygame.display.set_caption('Sudoku by Nassos')
myFont = pygame.font.SysFont("Arial", 25, True)
clock = pygame.time.Clock()
solved_board = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2],
[ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
[ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
[ 2, 6, 3, 4, 1, 5, 9, 8, 7 ],
[ 9, 7, 4, 8, 6, 3, 1, 2, 5 ],
[ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
[ 1, 3, 8, 9, 4, 7, 2, 5, 6 ],
[ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
[ 7, 4, 5, 2, 8, 6, 3, 1, 9 ]]
# board = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2 ],
# [ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
# [ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
# [ 2, 6, 3, 0, 1, 5, 9, 8, 7 ],
# [ 9, 7, 4, 8, 6, 0, 1, 2, 5 ],
# [ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
# [ 1, 3, 8, 0, 4, 7, 2, 0, 6 ],
# [ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
# [ 7, 4, 5, 0, 8, 6, 3, 1, 0 ] ]
# board2 = [ [ 3, 1, 6, 5, 7, 8, 4, 9, 2 ],
# [ 5, 2, 9, 1, 3, 4, 7, 6, 8 ],
# [ 4, 8, 7, 6, 2, 9, 5, 3, 1 ],
# [ 2, 6, 3, 0, 1, 5, 9, 8, 7 ],
# [ 9, 7, 4, 8, 6, 0, 1, 2, 5 ],
# [ 8, 5, 1, 7, 9, 2, 6, 4, 3 ],
# [ 1, 3, 8, 0, 4, 7, 2, 0, 6 ],
# [ 6, 9, 2, 3, 5, 1, 8, 7, 4 ],
# [ 7, 4, 5, 0, 8, 6, 3, 1, 0 ] ]
def backtrack():
for i in range(9):
for j in range(9):
if board[i][j] == 0: # It must be solved
flag = False
for z in range(9):
if isSafe(i,j,z+1) == True:
board[i][j] = z + 1;
flag = True
if isSolved() == True:
print("EEEEEEEEE")
return True
# backtrack()
if flag == False: # number is 0 and it didnt changed
print("--------------")
for k in range(9):
print(board[k])
if isSolved() == True:
print("EEEEEEEEE")
return True
backtrack()
# def backtrack2():
# for i in range(9):
# for j in range(9):
# if ((board[i][j] == 0) or (board[i][j] == 0 and (isSafe(i,j, board[i][j]) == False))): # It must be solved
# print("gia row " , i , " col " ,j , " num: " , board[i][j])
# for z in range(9):
# if isSafe(i,j,z+1) == True:
# if i == 8 and j == 8:
# if isSolved == True:
# print("EEEEEEEEE")
# return True
# else:
# flag = True
# # backtrack()
# backtrack()
def isSolved(boole = False):
for i in range(9):
for j in range(9):
if isSafe(i, j, board[i][j]) == False or board[i][j] == 0:
print("row " , i , " col " ,j , " num: " , board[i][j])
return False
return True
# Checks whether it will be legal to assign num to the given row, col
def isSafe(row, col, num):
# Check if we find the same num in the similar row , we return false
for x in range(9):
if board[row][x] == num and x != col:
return False
# Check if we find the same num in the similar column , we retutn false
for x in range(9):
if board[x][col] == num and x != row:
return False
# Check if we find the same num in the particular 3*3 matrix, return false
startRow = row - row % 3
startCol = col - col % 3
for i in range(3):
for j in range(3):
if board[i + startRow][j + startCol] == num and i + startRow != row and j + startCol != col:
return False
return True
def drawGrid():
blockSize = 50 # Set the size of the grid block
# Draw the horizontal lines
pygame.draw.line(dis, black, (0, 150), (dis_width, 150), 4)
pygame.draw.line(dis, black, (0, 300), (dis_width, 300), 4)
pygame.draw.line(dis, black, (0, 450), (dis_width, 450), 4)
# Draw the vertical lines
pygame.draw.line(dis, black, (150, 0), (150, dis_height - 50), 4)
pygame.draw.line(dis, black, (300, 0), (300, dis_height - 50), 4)
for x in range(0, dis_width, blockSize):
for y in range(0, dis_height - 50, blockSize):
rect = pygame.Rect(x, y, blockSize, blockSize)
pygame.draw.rect(dis, black, rect, 1)
def displayBoard(tempBoard, strikes):
dis.fill(white)
drawGrid()
displayNumbers(tempBoard)
strike_width = 5
# Print the strikes
for x in range(strikes):
dis.blit(pygame.transform.scale(strike_img, (30, 30)), (strike_width, dis_height - 35))
strike_width += 35
pygame.display.update()
def gameLoop():
strikes = 0
game_over = False
game_close = False
startTime = time.time()
white_rect = pygame.Rect(0, 455, dis_width, 455)
pygame.draw.rect(dis, white, white_rect)
displayBoard(board, strikes)
while not game_over:
small_white_rect = pygame.Rect(dis_width - 100, 455, dis_width, 455)
pygame.draw.rect(dis, white, small_white_rect)
randNumLabel = myFont.render("Time: " + str(time.time() - startTime), 1, black)
dis.blit(randNumLabel, (340, 465))
pygame.display.update()
while game_close == True:
# message("You Lost! Press C-Play Again or Q-Quit", red)
# Your_score(Length_of_snake - 1)
# pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
game_over = True
game_close = False
if event.key == pygame.K_c:
gameLoop()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
game_over = True
if event.type == pygame.MOUSEBUTTONDOWN:
posx = pos[0]
posy = pos[1]
x_block = (pos[0] // 50) * 50
y_block = (pos[1] // 50) * 50
if y_block > 400:
continue
displayBoard(board, strikes)
pygame.draw.rect(dis, red, (x_block, y_block, 50, 50), 4) # Draw the small red rectangle
# pygame.display.update()
print("to x einai: ",x_block , " kai to y einai: " , y_block)
if event.type == pygame.KEYDOWN: # Get the number pressed
num = -10
if event.key == pygame.K_1:
num = 1
if event.key == pygame.K_2:
num = 2
if event.key == pygame.K_3:
num = 3
if event.key == pygame.K_4:
num = 4
if event.key == pygame.K_5:
num = 5
if event.key == pygame.K_6:
num = 6
if event.key == pygame.K_7:
num = 7
if event.key == pygame.K_8:
num = 8
if event.key == pygame.K_9:
num = 9
if event.key == pygame.K_SPACE: # Give the answer
print("PATHSES SPACE \n")
displayBoard(solved_board, strikes)
# game_over = True
continue
if num == -10:
print("\nPlease enter a number [1-9] \n")
continue
solved_board_i = posx // 50;
solved_board_j = posy // 50;
# If the number is correct
if num == solved_board[solved_board_j][solved_board_i]:
board[solved_board_j][solved_board_i] = num # Enter the number
displayBoard(board, strikes)
if isSolved():
print("\nCongrats you solved the board!\n")
else: # Get a strike
strikes += 1
displayBoard(board, strikes)
if strikes == 3:
game_over = True
def sameBoards():
for i in range(9):
for j in range(9):
if board[i][j] != solved_board[i][j]:
return False
return True
# ---------------------------------------------------------------- MAIN ---------------------------------------------------------------- #
def displayNumbers(tempBoard):
width = 19;
height = 12;
for i in range(9):
for j in range(9):
if tempBoard[i][j] != 0:
randNumLabel = myFont.render(str(tempBoard[i][j]), 1, black)
dis.blit(randNumLabel, (width, height))
width+=50
height+=50
width = 19
# backtrack()
print( "----------------------------------------------------------------" )
# Open the file and read the input.txt
with open('input.txt') as f:
lines = f.readlines()
# Initialize the board
board = [[]] * 9
for i in range(9):
board[i] = lines[i].split()
# Convert everything in the list into integers
for i in range(9):
for j in range(9):
board[i][j] = int(board[i][j])
gameLoop()
| StarcoderdataPython |
134918 | <gh_stars>0
for i in range(int(input())):
H, W ,N = map(int,input().split())
temp = N
tmp = 1
#층
if N%H == 0:
temp = H
else:
temp = N%H
#호수
if N/H > int(N/H):
tmp = int(N/H)+1
else:
tmp = int(N/H)
if tmp < 10:
print(temp,"0",tmp,sep="")
else:
print(temp,tmp,sep="") | StarcoderdataPython |
127762 | #!/usr/bin/env python
# encoding=utf-8
import ConfigParser
import sys, os
sys.path.append("..")
PROC_DIR = os.path.abspath('..')
class LoadConfig:
cf = ''
filepath = PROC_DIR + "/conf/default.ini"
def __init__(self):
try:
f = open(self.filepath, 'r')
except IOError, e:
print "\"%s\" Config file not found." % (self.filepath)
sys.exit(1)
f.close()
self.cf = ConfigParser.ConfigParser()
self.cf.read(self.filepath)
def getSectionValue(self, section, key):
return self.getFormat(self.cf.get(section, key))
def getSectionOptions(self, section):
return self.cf.options(section)
def getSectionItems(self, section):
return self.cf.items(section)
def getFormat(self, string):
return string.strip("'").strip('"').replace(" ","") | StarcoderdataPython |
4838012 | import os
import pytest
from db_transfer.transfer import Transfer, sent_env
def yaml_transfer():
os.environ['yaml_file_path'] = './test_yaml.yaml'
@sent_env('yaml', 'FILE_LOCAL', 'yaml_file_path')
class TestHandlerYaml(Transfer):
pass
yaml_transfer = TestHandlerYaml(namespace='namespace_1', adapter_name='yaml')
return yaml_transfer
@pytest.fixture()
def yaml_transfer_write():
return yaml_transfer()
@pytest.fixture()
def yaml_transfer_read():
return yaml_transfer()
def test_yaml_string(yaml_transfer_write, yaml_transfer_read):
with yaml_transfer_write:
yaml_transfer_write['key_1'] = 'value'
yaml_transfer_write['key_2:key_3'] = 'value'
yaml_transfer_write['key_4'] = 'value'
yaml_transfer_write['key_2:key_5'] = 'value'
assert str(yaml_transfer_read['key_1']) == 'value'
assert str(yaml_transfer_read['key_2:key_3']) == 'value'
assert str(yaml_transfer_read['key_4']) == 'value'
assert str(yaml_transfer_read['key_2:key_5']) == 'value'
def test_yaml_list(yaml_transfer_write, yaml_transfer_read):
with yaml_transfer_write:
yaml_transfer_write['key_1:key_2'] = ['list_element_1', 'list_element_2']
yaml_transfer_write['key_3:key_4'] = [['list_element_1', 'list_element_2']]
yaml_transfer_write['key_5'] = [{'key': 'value', 'foo': 'bar'}, {'key': 'value'}]
assert list(yaml_transfer_read['key_1:key_2']) == ['list_element_1', 'list_element_2']
assert list(yaml_transfer_read['key_3:key_4']) == [['list_element_1', 'list_element_2']]
assert list(yaml_transfer_read['key_5']) == [{'key': 'value', 'foo': 'bar'}, {'key': 'value'}]
def test_yaml_set(yaml_transfer_write, yaml_transfer_read):
with yaml_transfer_write:
yaml_transfer_write['key_1:key_2'] = set(['list_element_1', 'list_element_2'])
assert set(yaml_transfer_read['key_1:key_2']) == {'list_element_1', 'list_element_2'}
def test_yaml_dict(yaml_transfer_write, yaml_transfer_read):
test_dict = {'foo': 'bar', 'doo': {'goo': 'gar'}, 'zoo': [1, 2, 3, {'foo': 'bar'}]}
with yaml_transfer_write:
yaml_transfer_write['hash_key'] = test_dict
assert yaml_transfer_read['hash_key'] == test_dict
assert yaml_transfer_read['hash_key']['foo'] == test_dict['foo']
assert yaml_transfer_read['hash_key']['doo'] == test_dict['doo']
assert yaml_transfer_read['hash_key']['zoo'] == test_dict['zoo']
for key, value in yaml_transfer_read['hash_key'].items():
assert test_dict[key] == value
def test_yaml_iterator(yaml_transfer_write, yaml_transfer_read):
test_dict = {'foo': 'bar', 'doo': {'goo': 'gar'}, 'zoo': [1, 2, 3, {'foo': 'bar'}]}
with yaml_transfer_write:
yaml_transfer_write['hash_key'] = test_dict
for key, value in iter(yaml_transfer_read['hash_key'].items()):
assert test_dict[key] == value
def test_yaml_delete(yaml_transfer_write, yaml_transfer_read):
with yaml_transfer_write:
yaml_transfer_write['some_key_1'] = 'some_value'
assert str(yaml_transfer_read['some_key_1']) == 'some_value'
with yaml_transfer_write:
del yaml_transfer_write['some_key_1']
assert yaml_transfer_read['some_key_1'] is None
| StarcoderdataPython |
4837573 | <gh_stars>0
import argparse
import logging
import os
import sys
from collections import OrderedDict
from os.path import join, splitext
from autoanalysis.processmodules.imagecrop.TIFFImageCropper import TIFFImageCropper
from autoanalysis.processmodules.imagecrop.BioformatsImageReader import BioformatsImageReader
class SlideCropperAPI(object):
"""
Main Class for using SlideCropper functionality. All methods are class method based.
"""
def __init__(self, datafile, outputdir):
# Set config values
self.cfg = self.getConfigurables()
self.status = 0
self.imgfile = None
# Load data
if os.access(datafile, os.R_OK):
ext_check = self.get_extension(datafile)
if ext_check.lower() != ".ims":
raise TypeError("{} is currently not a supported file type".format(ext_check))
self.imgfile = datafile
msg = "Image file loaded from %s" % self.imgfile
print(msg)
logging.info(msg)
else:
raise IOError('Unable to access datafile: {0}'.format(datafile))
# Output
if os.access(outputdir, os.W_OK):
self.outputdir = outputdir
else:
raise IOError('Unable to write to output directory: {0}'.format(outputdir))
def get_extension(self, file_path):
"""
:return: The extension of the inputted file.
"""
path_split = splitext(file_path)
return path_split[1]
def getConfigurables(self):
'''
List of configurable parameters in order with defaults
:return:
'''
cfg = OrderedDict()
cfg['BORDER_FACTOR'] = 2 # %of pixels for border
#cfg['IMAGE_TYPE'] = '.ims' # File type of original
cfg['CROPPED_IMAGE_FILES'] = 'cropped' # output directory
cfg['MAX_MEMORY'] = 80 # % of memory to quit
cfg['LIGHT_BG_THRESHOLD'] = 'auto'
cfg['DARK_BG_THRESHOLD'] = 'auto'
cfg['OFFSET'] = 0 # range from 0-2 smaller is less shift
cfg['RESOLUTION'] = 'High' # 'High', 'Low', or 'Both'
return cfg
def setConfigurables(self, cfg):
'''
Merge any variables set externally
:param cfg:
:return:
'''
if self.cfg is None:
self.cfg = self.getConfigurables()
for cf in cfg.keys():
self.cfg[cf] = cfg[cf]
logging.debug("SlideCropperAPI:Config loaded")
def isRunning(self):
return self.status == 1
def run(self):
self.status = 1
try:
if self.imgfile is not None:
# Load to Image Cropper
border_factor = int(self.cfg['BORDER_FACTOR'])
memmax = int(self.cfg['MAX_MEMORY'])
lightbg = self.cfg['LIGHT_BG_THRESHOLD']
darkbg = self.cfg['DARK_BG_THRESHOLD']
offset = float(self.cfg['OFFSET'])
resolution = self.cfg['RESOLUTION']
mim = BioformatsImageReader(self.imgfile, self.outputdir)
mim.make_metadata()
# get resolution in px/cm for all resolution levels
xyres = mim.get_xyres()
tic = TIFFImageCropper(self.imgfile, border_factor, self.outputdir, memmax, lightbg, darkbg, offset, resolution, xyres)
pid_list = tic.crop_input_images()
tic.image.close_file()
msg = 'Run: cropping done - new images in %s [%d pages]' % (self.outputdir, pid_list)
logging.info(msg)
print(msg)
else:
raise ValueError('Run failed: Image not loaded')
except Exception as e:
raise e
finally:
self.status = 0
logging.info("Run finished")
"""
Testing Methods for API.
"""
def create_parser():
"""
Create commandline parser
:return:
"""
parser = argparse.ArgumentParser(prog=sys.argv[0],
description='''\
Crops serial section images in large image files into separate images
''')
parser.add_argument('--datafile', action='store', help='Data file', default="Matisse 1~B.ims")
parser.add_argument('--outputdir', action='store', help='Output directory', default="C:\\Users\\uqathom9\\Documents\\Microscopy\\BatchCrop")
parser.add_argument('--inputdir', action='store', help='Input directory', default="C:\\Users\\uqathom9\\Desktop\Batchcrop")
parser.add_argument('--imagetype', action='store', help='Type of images to processed', default='.ims')
return parser
####################################################################################################################
if __name__ == "__main__":
from autoanalysis.gui.ImageViewer import ImageViewer
from glob import iglob
import wx
from os.path import basename, splitext
parser = create_parser()
args = parser.parse_args()
slidecropper = SlideCropperAPI(join(args.inputdir, args.datafile), args.outputdir)
slidecropper.run()
# check output with ImageViewer
imgapp = wx.App()
imglist = [x for x in iglob(join(args.outputdir, splitext(basename(args.datafile))[0], "*.tiff"))]
frame = ImageViewer(imglist)
imgapp.MainLoop()
| StarcoderdataPython |
1704435 | s = None
def Oracle_Connect():
import socket
global s
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(('172.16.31.10', 80))
except socket.error as e:
print e
return -1
print "Connected to server successfully."
return 0
def Oracle_Disconnect():
if not s:
print "[WARNING]: You haven't connected to the server yet."
return -1
s.close()
print "Connection closed successfully."
return 0
# Packet Structure: < num_blocks(1) || ciphertext(16*num_blocks) || null-terminator(1) >
def Oracle_Send(ctext, num_blocks):
if not s:
print "[WARNING]: You haven't connected to the server yet."
return -1
msg = ctext[:]
msg.insert(0, num_blocks)
msg.append(0)
s.send(bytearray(msg))
recvbit = s.recv(2)
try:
return int(recvbit)
except ValueError as e:
return int(recvbit[0])
| StarcoderdataPython |
1683542 | <reponame>psbsgic/rabbitai
import logging
from typing import Optional
from flask import flash, request, Response
from flask_appbuilder import expose
from flask_appbuilder.security.decorators import has_access_api
from werkzeug.utils import redirect
from rabbitai import db, event_logger
from rabbitai.models import core as models
from rabbitai.typing import FlaskResponse
from rabbitai.views.base import BaseRabbitaiView
logger = logging.getLogger(__name__)
class R(BaseRabbitaiView):
"""used for short urls"""
@staticmethod
def _validate_url(url: Optional[str] = None) -> bool:
if url and (
url.startswith("//rabbitai/dashboard/")
or url.startswith("//rabbitai/explore/")
):
return True
return False
@event_logger.log_this
@expose("/<int:url_id>")
def index(self, url_id: int) -> FlaskResponse:
url = db.session.query(models.Url).get(url_id)
if url and url.url:
explore_url = "//rabbitai/explore/?"
if url.url.startswith(explore_url):
explore_url += f"r={url_id}"
return redirect(explore_url[1:])
if self._validate_url(url.url):
return redirect(url.url[1:])
return redirect("/")
flash("URL to nowhere...", "danger")
return redirect("/")
@event_logger.log_this
@has_access_api
@expose("/shortner/", methods=["POST"])
def shortner(self) -> FlaskResponse:
url = request.form.get("data")
if not self._validate_url(url):
logger.warning("Invalid URL: %s", url)
return Response(f"Invalid URL: {url}", 400)
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
return Response(
"{scheme}://{request.headers[Host]}/r/{obj.id}".format(
scheme=request.scheme, request=request, obj=obj
),
mimetype="text/plain",
)
| StarcoderdataPython |
3236001 | from django_tgbot.decorators import processor
from django_tgbot.state_manager import message_types, update_types, state_types
from django_tgbot.types.update import Update
from ..bot import state_manager, TelegramBot
from ..models import TelegramState
from ..BotSetting import BotName, ChannelName
from .BotDialog import go_home
state_manager.set_default_update_types(update_types.Message)
@processor(
state_manager,
from_states=state_types.Reset,
success='/Home',
update_types=update_types.Message,
message_types=message_types.Text,
)
def start(bot: TelegramBot, update: Update, state: TelegramState):
chat_id = update.get_chat().get_id()
user_name = update.get_chat().get_username()
message = f"سلام " \
f"{user_name}" \
f" به ربات " \
f"{BotName}" \
f" خوش اومدی" \
f"\n" \
f"یادت نره که قوانین رو برای معاملات بخونی، چون اگه متضرر بشی ما هیچ مسئولیتی نداریم" \
f"\n" \
f"در آخر برای استفاده از ربات باید در کانال عضو بشی " \
f"{ChannelName}" \
f" و یک پروفایل هم بسازی"
bot.sendMessage(chat_id, message)
go_home(chat_id, bot)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.