id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3285274 | """
Support for Xiaomi Mi WiFi Repeater 2.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/device_tracker.xiaomi_miio/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (DOMAIN, PLATFORM_SCHEMA,
DeviceScanner)
from homeassistant.const import (CONF_HOST, CONF_TOKEN)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
})
REQUIREMENTS = ['python-miio==0.4.0', 'construct==2.9.41']
def get_scanner(hass, config):
"""Return a Xiaomi MiIO device scanner."""
from miio import WifiRepeater, DeviceException
scanner = None
host = config[DOMAIN].get(CONF_HOST)
token = config[DOMAIN].get(CONF_TOKEN)
_LOGGER.info(
"Initializing with host %s (token %s...)", host, token[:5])
try:
device = WifiRepeater(host, token)
device_info = device.info()
_LOGGER.info("%s %s %s detected",
device_info.model,
device_info.firmware_version,
device_info.hardware_version)
scanner = XiaomiMiioDeviceScanner(device)
except DeviceException as ex:
_LOGGER.error("Device unavailable or token incorrect: %s", ex)
return scanner
class XiaomiMiioDeviceScanner(DeviceScanner):
"""This class queries a Xiaomi Mi WiFi Repeater."""
def __init__(self, device):
"""Initialize the scanner."""
self.device = device
async def async_scan_devices(self):
"""Scan for devices and return a list containing found device ids."""
from miio import DeviceException
devices = []
try:
station_info = await self.hass.async_add_job(self.device.status)
_LOGGER.debug("Got new station info: %s", station_info)
for device in station_info['mat']:
devices.append(device['mac'])
except DeviceException as ex:
_LOGGER.error("Got exception while fetching the state: %s", ex)
return devices
async def async_get_device_name(self, device):
"""The repeater doesn't provide the name of the associated device."""
return None
| StarcoderdataPython |
1603045 | #!/usr/bin/env python3
import subprocess
import tempfile
import os
import os.path
import collections
import json
import sys
ProcessedTrack = collections.namedtuple("ProcessedTrack", ["dfpwm_file", "artist", "track"])
def convert_wav_dfpwm(infile, outfile):
subprocess.run(["java", "-jar", "LionRay.jar", infile, outfile])
def convert_any_wav(infile, outfile):
subprocess.run(["ffmpeg", "-hide_banner", "-i", infile, "-ac", "1", outfile], stderr=subprocess.PIPE)
def process_file(filename):
parts = list(map(str.strip, os.path.splitext(os.path.basename(filename))[0].split("-")))
artist = parts[0]
track = parts[1]
wav_dest = tempfile.mktemp(".wav")
convert_any_wav(filename, wav_dest)
dfpwm_dest = tempfile.mktemp(".dfpwm")
convert_wav_dfpwm(wav_dest, dfpwm_dest)
os.remove(wav_dest)
return ProcessedTrack(dfpwm_dest, artist, track)
def read_binary(filename):
with open(filename, "rb") as f:
return f.read()
def process_dir(dirname):
tracks = []
for file in os.listdir(dirname):
tracks.append(process_file(os.path.join(dirname, file)))
tape_image = b""
tracks_meta = []
for track in tracks:
track_meta = {}
track_meta["start"] = len(tape_image) + 8193
data = read_binary(track.dfpwm_file)
os.remove(track.dfpwm_file)
track_meta["end"] = track_meta["start"] + len(data)
track_meta["artist"] = track.artist
track_meta["title"] = track.track
tape_image += data
tracks_meta.append(track_meta)
print(track.track, track.artist)
meta = json.dumps({ "tracks": tracks_meta }).encode("utf-8").ljust(8192, b"\0")
tape_image = meta + tape_image
with open("tape.bin", "wb") as f:
f.write(tape_image)
process_dir(sys.argv[1]) | StarcoderdataPython |
159939 | #! /usr/bin/env python
import sys
if sys.version_info[0] == 2:
from .avro_py2 import schema
from .avro_py2 import io
from .avro_py2 import protocol
from .avro_py2 import ipc
from .avro_py2 import datafile
from .avro_py2 import tool
#from .avro_py2 import txipc
else:
from .avro_py3 import schema
from .avro_py3 import io
from .avro_py3 import protocol
from .avro_py3 import ipc
from .avro_py3 import datafile
from .avro_py3 import tool
#from .avro_py3 import txipc
| StarcoderdataPython |
3209503 | #Problem:https://www.hackerrank.com/challenges/tree-huffman-decoding/problem
import queue
def decodeHuff(root, s):
current = root
result = []
for char in s:
#traverse the tree
if char is '1':
current = current.right
else:
current = current.left
#if we are at a character node, reset traversal and append current character to result
if current.left is None and current.right is None:
result.append(current.data)
current = root
print(''.join(result))
return ''.join(result)
| StarcoderdataPython |
106541 | import distutils.util
class Config:
def __init__(self, auto_off, max_seats, station_id, frequency_wait, history_path, jwt_token, api_endpoint,
max_charge, debug):
self.auto_off = bool(distutils.util.strtobool(auto_off))
self.max_seats = int(max_seats)
self.station_id = int(station_id)
self.frequency_wait = float(frequency_wait)
self.history_path = str(history_path)
self.jwt_token = str(jwt_token)
self.api_endpoint = str(api_endpoint)
self.max_charge = float(max_charge)
self.debug = bool(distutils.util.strtobool(debug))
| StarcoderdataPython |
3387406 | <reponame>AaronFriel/pulumi-google-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetServicePerimeterResult',
'AwaitableGetServicePerimeterResult',
'get_service_perimeter',
'get_service_perimeter_output',
]
@pulumi.output_type
class GetServicePerimeterResult:
def __init__(__self__, description=None, name=None, perimeter_type=None, status=None, title=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if perimeter_type and not isinstance(perimeter_type, str):
raise TypeError("Expected argument 'perimeter_type' to be a str")
pulumi.set(__self__, "perimeter_type", perimeter_type)
if status and not isinstance(status, dict):
raise TypeError("Expected argument 'status' to be a dict")
pulumi.set(__self__, "status", status)
if title and not isinstance(title, str):
raise TypeError("Expected argument 'title' to be a str")
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the `ServicePerimeter` and its use. Does not affect behavior.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name for the ServicePerimeter. The `short_name` component must begin with a letter and only include alphanumeric and '_'. Format: `accessPolicies/{policy_id}/servicePerimeters/{short_name}`
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="perimeterType")
def perimeter_type(self) -> str:
"""
Perimeter type indicator. A single project is allowed to be a member of single regular perimeter, but multiple service perimeter bridges. A project cannot be a included in a perimeter bridge without being included in regular perimeter. For perimeter bridges, restricted/unrestricted service lists as well as access lists must be empty.
"""
return pulumi.get(self, "perimeter_type")
@property
@pulumi.getter
def status(self) -> 'outputs.ServicePerimeterConfigResponse':
"""
Current ServicePerimeter configuration. Specifies sets of resources, restricted/unrestricted services and access levels that determine perimeter content and boundaries.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def title(self) -> str:
"""
Human readable title. Must be unique within the Policy.
"""
return pulumi.get(self, "title")
class AwaitableGetServicePerimeterResult(GetServicePerimeterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServicePerimeterResult(
description=self.description,
name=self.name,
perimeter_type=self.perimeter_type,
status=self.status,
title=self.title)
def get_service_perimeter(access_policy_id: Optional[str] = None,
service_perimeter_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServicePerimeterResult:
"""
Get a Service Perimeter by resource name.
"""
__args__ = dict()
__args__['accessPolicyId'] = access_policy_id
__args__['servicePerimeterId'] = service_perimeter_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:accesscontextmanager/v1beta:getServicePerimeter', __args__, opts=opts, typ=GetServicePerimeterResult).value
return AwaitableGetServicePerimeterResult(
description=__ret__.description,
name=__ret__.name,
perimeter_type=__ret__.perimeter_type,
status=__ret__.status,
title=__ret__.title)
@_utilities.lift_output_func(get_service_perimeter)
def get_service_perimeter_output(access_policy_id: Optional[pulumi.Input[str]] = None,
service_perimeter_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServicePerimeterResult]:
"""
Get a Service Perimeter by resource name.
"""
...
| StarcoderdataPython |
3246649 | #import dependencies
import pandas as pd
from bs4 import BeautifulSoup as bs
import requests
import json
from config import api_key
#datasource url
url = "https://inciweb.nwcg.gov/feeds/rss/incidents/"
state_fetch_url = "https://maps.googleapis.com/maps/api/geocode/json?latlng="
key_string = "&key=" + api_key
def state_extract(obj):
stateval = ""
for eachitem in obj:
for k, v in eachitem.items():
if k == "long_name":
stateval = v
elif isinstance(v, list):
for item in v:
if item == "administrative_area_level_1":
return stateval
return stateval
# instanciate landing dataframe
starting_data = pd.DataFrame()
# request xml code from url
xml_data = requests.get(url).content
#parse the xml response
soup = bs(xml_data, "xml")
# Find all text in the data
texts = str(soup.findAll(text=True)).replace('\\n','')
#Find the tag/child
child = soup.find("item")
# instatiate column lists
title = []
published = []
lat = []
lon = []
link = []
description = []
state=[]
#loop trough each "item" in the xml response and store the target data
while True:
try:
title.append(" ".join(child.find('title')))
except:
title.append(" ")
try:
published.append(" ".join(child.find('published')))
except:
published.append(" ")
try:
lat.append(" ".join(child.find('geo:lat')))
except:
lat.append(" ")
try:
lon.append(" ".join(child.find('geo:long')))
except:
lon.append(" ")
try:
link.append(" ".join(child.find('link')))
except:
link.append(" ")
try:
description.append(" ".join(child.find('description')))
except:
description.append(" ")
try:
latlng = " ".join(child.find('geo:lat')) + ","+ " ".join(child.find('geo:long'))
resp_data = requests.get(state_fetch_url + latlng + key_string).json()
state.append(state_extract(resp_data.get('results')[0].get('address_components')))
except:
state.append(" ")
try:
# Next sibling of child, here: 'item'
child = child.find_next_sibling('item')
except:
break
#create dataframe
data = pd.DataFrame({"title":title,
"published":published,
"lat":lat,
"lon":lon,
"link_url":link,
"description": description,
"state": state})
starting_data = starting_data.append(data, ignore_index = True)
# drop duplicate rows
unique_data = starting_data.drop_duplicates(keep="first",ignore_index="True")
# go the the link url for each rown and extract additional data (cause, size)
#instatiate landing lists
causes = []
sizes = []
#loop through each row of data
for x in range(len(unique_data)):
#find the link in the row
url = starting_data.loc[x,"link_url"]
#go to the page and grap all the tables
tables = pd.read_html(url)
#the number of tables the page has will determine which tables we look in for data.
#if there are more than one tables...
if len(tables)>1:
try:
# find the "cause" in the first table on the page (if it exists)
cause = tables[0].iloc[2,1]
except:
cause = "unknown"
try:
# find the "size" in the second table on the page (if it exists)
size = tables[1].loc[(tables[1][0]) == "Size",1].item()
except:
size = "unknown"
#if there is only one table on the page...
else:
try:
# find the "cause" in the first table on the page (if it exists)
cause = tables[0].iloc[1,1]
# no size data is available
size = "n/a"
except:
cause = "unknown"
#add cause and size to their lists
causes.append(cause)
sizes.append(size)
#print progress
print(f"{x+1} of {len(unique_data)}")
# if cause has the word "investigation" in it, set cause to "unknown"
for y in range(len(causes)):
if "Investigation" in causes[y]:
causes[y] = "Unknown"
# remove the word "Acres" from the size data
sizes = [s.replace(" Acres","") for s in sizes]
sizes = [s.replace(",","") for s in sizes]
# add causes and sizes to the dataframe
unique_data["cause"] = causes
unique_data["acres"] = sizes
# see the counts of each fire cause for reference
grouped_df = unique_data.groupby(["cause"])
state_grouped_df = unique_data.groupby(["state"])
# save the dataframe as "clean_data"
clean_data = unique_data
#store as csv for testing
clean_data.to_csv("data.csv") | StarcoderdataPython |
1739041 | <reponame>praekeltfoundation/mote
import logging
from django.conf import settings
from django.template.loaders.cached import Loader as CachedLoader
from mote import _thread_locals
logger = logging.getLogger(__name__)
class Loader(CachedLoader):
def get_template(self, *args, **kwargs):
if settings.DEBUG:
# The cache is only valid per request and can thus not be
# instantiated in __init__. This method provides a convenient
# instantiation point.
cache = getattr(_thread_locals, "_cache", None)
if cache is None:
setattr(
_thread_locals,
"_cache",
{
"template_cache": {},
"find_template_cache": {},
"get_template_cache": {}
}
)
cache = _thread_locals._cache
self.template_cache = cache["template_cache"]
self.find_template_cache = cache["find_template_cache"]
self.get_template_cache = cache["get_template_cache"]
else:
logger.info(
"mote.loaders.cached is only intended for use when DEBUG=True"
)
return super(Loader, self).get_template(*args, **kwargs)
| StarcoderdataPython |
1643996 | """
G2P testing on the test data
"""
import unittest
import ga4gh.server.datamodel as datamodel
import ga4gh.server.frontend as frontend
import tests.paths as paths
import ga4gh.schemas.protocol as protocol
class TestG2P(unittest.TestCase):
exampleUrl = 'www.example.com'
phenotypeAssociationSetId = ""
@classmethod
def setUpClass(cls):
config = {
"DATA_SOURCE": paths.testDataRepo,
"DEBUG": False
}
frontend.reset()
frontend.configure(
baseConfig="DevelopmentConfig", extraConfig=config)
cls.app = frontend.app.test_client()
def sendSearchRequest(self, path, request, responseClass):
"""
Sends the specified protocol request instance as JSON, and
parses the result into an instance of the specified response.
"""
response = self.sendJsonPostRequest(path, protocol.toJson(request))
self.assertEqual(200, response.status_code)
responseData = protocol.fromJson(response.data, responseClass)
self.assertTrue(
protocol.validate(protocol.toJson(responseData), responseClass))
return responseData
def sendGetRequest(self, path):
"""
Sends a get request to the specified URL and returns the response.
"""
return self.app.get(path)
def getPhenotypeAssociationSetId(self):
"""
Gets the dataset phenotype association set ID
"""
request = protocol.SearchDatasetsRequest()
response = self.sendSearchRequest(
"datasets/search",
request,
protocol.SearchDatasetsResponse)
datasetId = response.datasets[0].id
request = protocol.SearchPhenotypeAssociationSetsRequest()
request.dataset_id = datasetId
response = self.sendPostRequest(
"phenotypeassociationsets/search", request)
response = protocol.fromJson(
response.data, protocol.SearchPhenotypeAssociationSetsResponse)
return response.phenotype_association_sets[0].id
def sendPostRequest(self, path, request):
"""
Sends the specified GA request object and returns the response.
"""
headers = {
'Content-type': 'application/json',
'Origin': self.exampleUrl,
}
return self.app.post(
path, headers=headers, data=protocol.toJson(request))
def sendJsonPostRequest(self, path, data):
"""
Sends a JSON request to the specified path with the specified data
and returns the response.
"""
return self.app.post(
path, headers={'Content-type': 'application/json'},
data=data)
def testPhenotypeAssociationSetSearch(self):
request = protocol.SearchDatasetsRequest()
response = self.sendSearchRequest(
"datasets/search",
request,
protocol.SearchDatasetsResponse)
datasetId = response.datasets[0].id
request = protocol.SearchPhenotypeAssociationSetsRequest()
request.dataset_id = datasetId
response = self.sendSearchRequest(
"phenotypeassociationsets/search",
request,
protocol.SearchPhenotypeAssociationSetsResponse)
# there should be an array
self.assertIsNotNone(response.phenotype_association_sets)
# there should be at least one entry
self.assertGreater(len(response.phenotype_association_sets), 0)
def getAllDatasets(self):
"""
Gets all datasets available
"""
path = 'datasets/search'
request = protocol.SearchDatasetsRequest()
responseData = self.sendSearchRequest(
path, request, protocol.SearchDatasetsResponse)
return responseData.datasets
def getAllFeatureSets(self):
"""
Gets all feature sets available
"""
datasetId = self.getAllDatasets()[0].id
datasetName = self.getAllDatasets()[0].name
path = 'featuresets/search'
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = datasetId
responseData = self.sendSearchRequest(
path, request, protocol.SearchFeatureSetsResponse)
return datasetName, responseData.feature_sets
def getCGDDataSetFeatureSet(self):
"""
Gets CGD data feature set
"""
datasetName, featureSets = self.getAllFeatureSets()
for featureSet in featureSets:
if featureSet.name == 'cgd':
return datasetName, featureSet
def getObfuscatedFeatureCompoundId(
self, dataSetName, featureSetName, featureId):
"""
Gets the obfuscated feature compound Id
"""
splits = [
dataSetName,
featureSetName,
featureId]
joined = datamodel.FeatureSetCompoundId.join(splits)
obfuscated = datamodel.FeatureCompoundId.obfuscate(joined)
return obfuscated
def testEnsureCGDFeatureSet(self):
datasetName, featureSet = self.getCGDDataSetFeatureSet()
self.assertIsNotNone(datasetName)
self.assertIsNotNone(featureSet)
self.assertIsNotNone(featureSet.name)
def testEnsureCGDFeatureId(self):
datasetName, featureSet = self.getCGDDataSetFeatureSet()
featureId = \
"http://cancer.sanger.ac.uk/cosmic/mutation/overview?id=736"
obfuscated = self.getObfuscatedFeatureCompoundId(
datasetName, featureSet.name, featureId)
compoundId = datamodel.FeatureCompoundId.parse(obfuscated)
self.assertEqual(featureId, compoundId.featureId)
def testCompoundFeatureSearch(self):
datasetName, featureSet = self.getCGDDataSetFeatureSet()
featureId = \
"http://cancer.sanger.ac.uk/cosmic/mutation/overview?id=736"
obfuscated = self.getObfuscatedFeatureCompoundId(
datasetName, featureSet.name, featureId)
request = protocol.GetFeatureRequest
request.feature_id = obfuscated
response = self.sendGetRequest('/features/{}'.format(obfuscated))
feature = protocol.fromJson(response.data, protocol.Feature)
self.assertIsNotNone(feature)
featureId = feature.id
request = protocol.SearchGenotypePhenotypeRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
request.feature_ids.append(featureId)
response = self.sendSearchRequest(
'/featurephenotypeassociations/search',
request,
protocol.SearchGenotypePhenotypeResponse)
self.assertEqual(1, len(response.associations))
self.assertEqual(1, len(response.associations[0].feature_ids))
def testFeaturesSearchById(self):
datasetName, featureSet = self.getCGDDataSetFeatureSet()
featureId = \
"http://cancer.sanger.ac.uk/cosmic/mutation/overview?id=965"
obfuscated = self.getObfuscatedFeatureCompoundId(
datasetName, featureSet.name, featureId)
request = protocol.GetFeatureRequest
request.feature_id = obfuscated
response = self.sendGetRequest(
'/features/{}'.format(obfuscated))
feature = protocol.fromJson(response.data, protocol.Feature)
self.assertIsNotNone(feature)
self.assertEqual(request.feature_id, feature.id)
self.assertIsNotNone(feature.feature_type)
self.assertIsNotNone(feature.feature_type.term_id)
self.assertEqual(feature.reference_name, "chr10")
self.assertEqual(feature.start, 43617416)
self.assertEqual(feature.end, 43617416)
def testGenotypesSearchByName(self):
# setup phenotype query
request = protocol.SearchFeaturesRequest()
datasetName, featureSet = self.getCGDDataSetFeatureSet()
request.feature_set_id = featureSet.id
request.name = "RET M918T missense mutation"
postUrl = "features/search"
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchFeaturesResponse)
self.assertEqual(1, len(response.features))
self.assertEqual(
"http://cancer.sanger.ac.uk/cosmic/mutation/overview?id=965",
datamodel.FeatureCompoundId
.parse(response.features[0].id)
.featureId
)
self.assertEqual(
request.name,
response.features[0].name
)
def testGenotypesSearchByNameKIT(self):
request = protocol.SearchFeaturesRequest()
datasetName, featureSet = self.getCGDDataSetFeatureSet()
request.feature_set_id = featureSet.id
request.name = \
"KIT *wild"
postUrl = "features/search"
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchFeaturesResponse)
self.assertEqual(3, len(response.features))
def testPhenotypesSearchById(self):
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
# setup phenotype query
request.id = "http://ohsu.edu/cgd/30ebfd1a"
postUrl = '/phenotypes/search'
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchPhenotypesResponse)
self.assertEqual(request.id, response.phenotypes[0].id)
def testPhenotypesSearchOntologyTerm(self):
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
request.type.term_id = "http://ohsu.edu/cgd/5c895709"
postUrl = '/phenotypes/search'
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchPhenotypesResponse)
self.assertGreater(len(response.phenotypes), 0)
def testPhenotypeSearchQualifiersSensitivity(self):
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
ontologyterm = protocol.OntologyTerm()
ontologyterm.term_id = "http://ohsu.edu/cgd/sensitivity"
request.qualifiers.extend([ontologyterm])
postUrl = '/phenotypes/search'
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchPhenotypesResponse)
self.assertGreater(len(response.phenotypes), 0)
def testPhenotypeSearchQualifiersSensitivityPATO_0000396(self):
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
ontologyterm = protocol.OntologyTerm()
ontologyterm.term_id = "http://purl.obolibrary.org/obo/PATO_0000396"
request.qualifiers.extend([ontologyterm])
postUrl = '/phenotypes/search'
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchPhenotypesResponse)
self.assertGreater(len(response.phenotypes), 0)
def testPhenotypeSearchMultipleQualifiers(self):
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
ontologyterm = protocol.OntologyTerm()
ontologyterm.term_id = "http://purl.obolibrary.org/obo/PATO_0000396"
ontologyterm2 = protocol.OntologyTerm()
ontologyterm2.term_id = "http://purl.obolibrary.org/obo/PATO_0000460"
request.qualifiers.extend([ontologyterm, ontologyterm2])
postUrl = '/phenotypes/search'
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchPhenotypesResponse)
self.assertGreater(len(response.phenotypes), 0)
@unittest.skip
def testPhenotypesSearchDescription(self):
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
request.description = \
"Papillary thyroid carcinoma with sensitivity to therapy" # noqa
postUrl = '/phenotypes/search'
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchPhenotypesResponse)
self.assertGreater(len(response.phenotypes), 0)
def testPhenotypesSearchDescriptionWildcard(self):
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
request.description = ".*sensitivity.*"
postUrl = '/phenotypes/search'
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchPhenotypesResponse)
self.assertEquals(7, len(response.phenotypes))
def testPhenotypesSearchMultipleTerms(self):
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
request.description = "Melanoma, NOS with response to therapy"
request.age_of_onset.term_id = \
"http://purl.obolibrary.org/obo/HP_0003581"
postUrl = '/phenotypes/search'
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchPhenotypesResponse)
self.assertGreater(len(response.phenotypes), 0)
def testGenotypePhenotypeSearchFeature(self):
"""
Search for associations given a feature
"""
# simulate user interacting with sequenceAnnotations
request = protocol.SearchGenotypePhenotypeRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
datasetName, featureSet = self.getCGDDataSetFeatureSet()
featureId = \
"http://ohsu.edu/cgd/27d2169c"
obfuscated = self.getObfuscatedFeatureCompoundId(
datasetName, featureSet.name, featureId)
# use the feature to look up associations
request.feature_ids.extend([obfuscated])
response = self.sendSearchRequest(
'/featurephenotypeassociations/search',
request,
protocol.SearchGenotypePhenotypeResponse)
self.assertEqual(1, len(response.associations[0].feature_ids))
def testGenotypePhenotypeSearchEvidence(self):
"""
Search for associations given an evidence
"""
request = protocol.SearchGenotypePhenotypeRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
eq = protocol.EvidenceQuery()
eq.description = "imatinib"
request.evidence.extend([eq])
response = self.sendSearchRequest(
'/featurephenotypeassociations/search',
request,
protocol.SearchGenotypePhenotypeResponse)
self.assertEqual(1, len(response.associations[0].feature_ids))
def testGenotypePhenotypeSearchPhenotype(self):
"""
Search for associations given a phenotype
"""
request = protocol.SearchGenotypePhenotypeRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
request.phenotype_ids.extend(["http://ohsu.edu/cgd/25abbb09"])
response = self.sendSearchRequest(
'/featurephenotypeassociations/search',
request,
protocol.SearchGenotypePhenotypeResponse)
self.assertEqual(1, len(response.associations[0].feature_ids))
def testNoFind(self):
request = protocol.SearchGenotypePhenotypeRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
request.feature_ids.extend(["FOOBAR"])
response = self.sendSearchRequest(
'/featurephenotypeassociations/search',
request,
protocol.SearchGenotypePhenotypeResponse)
self.assertEqual(0, len(response.associations))
def testGenotypePhenotypeSearchEnsureEvidence(self):
"""
Ensure evidence level is serialized in responses
"""
request = protocol.SearchGenotypePhenotypeRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
datasetName, featureSet = self.getCGDDataSetFeatureSet()
featureId = \
"http://ohsu.edu/cgd/27d2169c"
obfuscated = self.getObfuscatedFeatureCompoundId(datasetName,
featureSet.name,
featureId)
request.feature_ids.extend([obfuscated])
response = self.sendSearchRequest(
'/featurephenotypeassociations/search',
request,
protocol.SearchGenotypePhenotypeResponse)
self.assertEqual(1, len(response.associations[0].evidence))
evidence = response.associations[0].evidence[0]
self.assertEqual('decreased_sensitivity', evidence.description)
def testGenotypePhenotypeSearchEnsureEnvironment(self):
request = protocol.SearchGenotypePhenotypeRequest()
request.phenotype_association_set_id = \
self.getPhenotypeAssociationSetId()
datasetName, featureSet = self.getCGDDataSetFeatureSet()
featureId = \
"http://ohsu.edu/cgd/27d2169c"
obfuscated = self.getObfuscatedFeatureCompoundId(
datasetName, featureSet.name, featureId)
request.feature_ids.extend([obfuscated])
eq = protocol.EvidenceQuery()
eq.description = "imatinib"
request.evidence.extend([eq])
response = self.sendSearchRequest(
'/featurephenotypeassociations/search',
request,
protocol.SearchGenotypePhenotypeResponse)
self.assertEqual(
1, len(response.associations[0].environmental_contexts))
environmentalContext = response.associations[0] \
.environmental_contexts[0]
self.assertEqual('imatinib', environmentalContext.description)
def _createPagingRequest(self):
request = protocol.SearchFeaturesRequest()
datasetName, featureSet = self.getCGDDataSetFeatureSet()
request.feature_set_id = featureSet.id
request.name = "KIT *wild"
return request
def testGenotypeSearchFeaturePagingOne(self):
"""
If page size is set to 1 only one association should be returned
"""
request = self._createPagingRequest()
request.page_size = 1
postUrl = "features/search"
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchFeaturesResponse)
self.assertEqual(1, len(response.features))
self.assertIsNotNone(response.next_page_token)
def testGenotypeSearchFeaturePagingMore(self):
"""
If page size is not set to more than one association should be returned
"""
request = self._createPagingRequest()
postUrl = "features/search"
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchFeaturesResponse)
self.assertGreater(len(response.features), 1)
self.assertEqual(response.next_page_token, '')
def testGenotypeSearchFeaturePagingAll(self):
"""
Loop through all pages
"""
request = self._createPagingRequest()
request.page_size = 1
feature_set_id = request.feature_set_id
postUrl = "features/search"
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchFeaturesResponse)
self.assertEqual(1, len(response.features))
self.assertIsNotNone(response.next_page_token)
pageCount = 1
while response.next_page_token:
previous_id = response.features[0].id
request = protocol.SearchFeaturesRequest()
request.feature_set_id = feature_set_id
request.page_size = 1
request.page_token = response.next_page_token
request.name = "KIT *wild"
response = self.sendSearchRequest(
postUrl,
request,
protocol.SearchFeaturesResponse)
self.assertEqual(1, len(response.features))
self.assertNotEqual(previous_id, response.features[0].id)
pageCount += 1
self.assertEqual(3, pageCount)
def testGenotypesSearchByNameError(self):
"""
Search for feature by name with a malformed regular expression.
"""
# setup phenotype query
request = protocol.SearchFeaturesRequest()
datasetName, featureSet = self.getCGDDataSetFeatureSet()
request.feature_set_id = featureSet.id
request.name = "*" # invalid regular expression
postUrl = "features/search"
response = self.sendJsonPostRequest(postUrl, protocol.toJson(request))
self.assertEqual(400, response.status_code)
| StarcoderdataPython |
3282400 | <gh_stars>10-100
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.natural_ventilation_and_duct_leakage import AirflowNetworkDistributionComponentHeatExchanger
log = logging.getLogger(__name__)
class TestAirflowNetworkDistributionComponentHeatExchanger(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_airflownetworkdistributioncomponentheatexchanger(self):
pyidf.validation_level = ValidationLevel.error
obj = AirflowNetworkDistributionComponentHeatExchanger()
# object-list
var_heatexchanger_name = "object-list|HeatExchanger Name"
obj.heatexchanger_name = var_heatexchanger_name
# alpha
var_heatexchanger_object_type = "HeatExchanger:AirToAir:FlatPlate"
obj.heatexchanger_object_type = var_heatexchanger_object_type
# real
var_air_path_length = 0.0001
obj.air_path_length = var_air_path_length
# real
var_air_path_hydraulic_diameter = 0.0001
obj.air_path_hydraulic_diameter = var_air_path_hydraulic_diameter
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].heatexchanger_name, var_heatexchanger_name)
self.assertEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].heatexchanger_object_type, var_heatexchanger_object_type)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].air_path_length, var_air_path_length)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].air_path_hydraulic_diameter, var_air_path_hydraulic_diameter) | StarcoderdataPython |
1608952 | <filename>deepxde/utils/pytorch.py
"""Utilities of pytorch."""
| StarcoderdataPython |
1657155 | <filename>Instagram/data.py
data = [
{
"name":"<NAME>",
"caption":"You can't like a full life on an empty stomach",
"profile_pic":"assets/img/badoo1.jfif",
"likes":"1.5M",
"comments":"18.9K",
"post_pics":["assets/img/badoo2.jfif", "assets/img/badoo3.jfif"],
"comment":[
{
"name":"<NAME>",
"profile_pic":"assets/img/wiz1.jfif",
"comment": "Tell them...."
},
{
"name":"rema_",
"profile_pic":"assets/img/rema1.png",
"comment": "BOOOSS!!!"
},
{
"name":"theweeknd",
"profile_pic":"assets/img/weeknd1.jfif",
"comment": "Nice Photography"
},
{
"name":"reekado",
"profile_pic":"assets/img/random1.jpg",
"comment": "Nice Photography"
},
{
"name":"<NAME>",
"profile_pic":"assets/img/random2.jpg",
"comment": "Nice Photography"
}
],
"reactors":["assets/img/weeknd1.jfif", "assets/img/wiz1.jfif", "assets/img/rema1.png"],
"verified": True
},
{
"name":"<NAME>",
"caption":"Friday, my second favorite F word",
"profile_pic":"assets/img/tate1.jfif",
"likes":"5M",
"comments":"8.9K",
"post_pics":["assets/img/tate2.jfif", "assets/img/tate3.jfif", "assets/img/tate4.png"],
"comment":[
{
"name":"Jamiee",
"profile_pic":"assets/img/random3.jpg",
"comment": "Tell them...."
},
{
"name":"jamal",
"profile_pic":"assets/img/random4.jpg",
"comment": "Way to gooo girl"
},
{
"name":"<NAME>",
"profile_pic":"assets/img/tate1.jfif",
"comment": "Lol"
},
{
"name":"reekado",
"profile_pic":"assets/img/random1.jpg",
"comment": "How do you do it"
},
],
"reactors":["assets/img/random3.jpg", "assets/img/random4.jpg", "assets/img/random1.jpg"],
"verified": False
},
{
"name":"<NAME>",
"caption":"When I wanna Ice cream, ICE cream it",
"profile_pic":"assets/img/saweetie1.png",
"likes":"5M",
"comments":"8.9K",
"post_pics":["assets/img/saweetie1.png", "assets/img/saweetie2.jfif", "assets/img/saweetie3.png"],
"comment":[
{
"name":"Jamiee",
"profile_pic":"assets/img/random3.jpg",
"comment": "Queen"
},
{
"name":"jamal",
"profile_pic":"assets/img/random4.jpg",
"comment": "Saweeetea"
},
{
"name":"<NAME>",
"profile_pic":"assets/img/tate1.jfif",
"comment": "Nice Pun"
},
{
"name":"reekado",
"profile_pic":"assets/img/random1.jpg",
"comment": "Hahaha"
},
],
"reactors":["assets/img/random3.jpg", "assets/img/random4.jpg", "assets/img/random1.jpg"],
"verified": False
},
{
"name":"zayn_malik",
"caption":"No one will ever be as entertained by us as us",
"profile_pic":"assets/img/zayn1.png",
"likes":"5M",
"comments":"8.9K",
"post_pics":["assets/img/zayn2.jfif", "assets/img/zayn3.jfif"],
"comment":[
{
"name":"Jamiee",
"profile_pic":"assets/img/random3.jpg",
"comment": "Queen"
},
{
"name":"jamal",
"profile_pic":"assets/img/random4.jpg",
"comment": "Saweeetea"
},
{
"name":"<NAME>",
"profile_pic":"assets/img/tate1.jfif",
"comment": "Nice Pun"
},
{
"name":"reekado",
"profile_pic":"assets/img/random1.jpg",
"comment": "Hahaha"
},
],
"reactors":["assets/img/random3.jpg", "assets/img/random4.jpg", "assets/img/random1.jpg"],
"verified": True
},
{
"name":"heisrema",
"caption":"Another fine day ruined by responsibilties",
"profile_pic":"assets/img/rema3.jfif",
"likes":"15K",
"comments":"300",
"post_pics":["assets/img/rema1.png", "assets/img/rema2.png", "assets/img/rema4.jfif"],
"comment":[
{
"name":"Jamiee",
"profile_pic":"assets/img/random3.jpg",
"comment": "Queen"
},
{
"name":"jamal",
"profile_pic":"assets/img/random4.jpg",
"comment": "Saweeetea"
},
{
"name":"<NAME>",
"profile_pic":"assets/img/tate1.jfif",
"comment": "Nice Pun"
},
{
"name":"reekado",
"profile_pic":"assets/img/random1.jpg",
"comment": "Hahaha"
},
],
"reactors":["assets/img/random3.jpg", "assets/img/random4.jpg", "assets/img/random1.jpg"],
"verified": False
},
]
story = [
{
"name":"<NAME>",
"profile_pic":"assets/img/ali1.jfif",
"stories":["assets/img/ali2.jfif", "assets/img/ali1.jfif"],
"verified": False
},
{
"name":"Eminem",
"profile_pic":"assets/img/eminem1.jfif",
"stories":["assets/img/eminem2.jfif", "assets/img/eminem3.jfif", "assets/img/eminem1.jfif"],
"verified": True
},
{
"name":"theweeknd",
"profile_pic":"assets/img/weeknd1.jfif",
"stories":["assets/img/weeknd2.jfif", "assets/img/weeknd3.jfif"],
"verified": False
},
{
"name":"<NAME>",
"profile_pic":"assets/img/will1.jfif",
"stories":["assets/img/will2.jfif", "assets/img/will1.jfif", "assets/img/will3.png"],
"verified": True
},
]
| StarcoderdataPython |
3281412 | import os, sys
root_path = os.path.realpath(__file__).split('/evaluate/multipose_coco_eval.py')[0]
os.chdir(root_path)
sys.path.append(root_path)
from network.posenet import poseNet
from evaluate.tester import Tester
backbone = 'resnet101'
# Set Training parameters
params = Tester.TestParams()
params.subnet_name = 'both'
params.inp_size = 480 # input picture size = (inp_size, inp_size)
params.coeff = 2
params.in_thres = 0.21
params.coco_root = '/mnt/hdd10tb/Datasets/COCO2017/'
params.testresult_write_json = True # Whether to write json result
params.coco_result_filename = './demo/multipose_coco2017_results.json'
params.ckpt = '/home/vietnguyen/MultiPoseNet/extra/models/res50_detection_subnet/ckpt_39_0.59604.h5.best'
# model
model = poseNet(backbone)
for name, module in model.named_children():
for para in module.parameters():
para.requires_grad = False
tester = Tester(model, params)
tester.coco_eval() # pic_test
| StarcoderdataPython |
3202932 | from gym import spaces
import pybullet as p
from diy_gym.addons.addon import Addon
class StuckJointCost(Addon):
def __init__(self, parent, config):
super(StuckJointCost, self).__init__(parent, config)
self.uid = parent.uid
self.joint_ids = [i for i in range(p.getNumJoints(self.uid)) if p.getJointInfo(self.uid, i)[3] > -1]
self.multiplier = config.get('multiplier', 0.1)
joint_info = [p.getJointInfo(self.uid, i) for i in self.joint_ids]
lower_limit = np.array([info[8] for info in joint_info])
upper_limit = np.array([info[9] for info in joint_info])
def reward(self):
return -self.multiplier if np.any(
np.min(np.abs(lower_limit - position), np.abs(upper_limit - position)) > 0.01) else 0.0
| StarcoderdataPython |
3305444 | <reponame>cpieloth/PackBacker
__author__ = '<NAME>'
class Parameter(object):
"""Parameter for a consistent use in commands, tasks and job file."""
# Paths and files
CONFIG_FILE = 'cfg_file'
DEST_DIR = 'dest_dir'
VERSION = 'version' | StarcoderdataPython |
4800414 | # -*- coding: utf-8 -*-
"""Standardize methods for logfile handling.
Using Loguru establish basic console and filesystem outputs.
"""
import os
import tqdm
import datetime as dt
from loguru import logger
@logger.catch
def defineLoggers(filename, CONSOLE='INFO', FILELOG='DEBUG'):
"""Setup standardized basic logging using Loguru module.
Args:
filename (Str): Descriptive name to use for filesystem logs.
CONSOLE (Str): Logging level for display to console output.
FILELOG (Str): Logging level for Logfile.
Returns:
nothing:
"""
class Rotator:
# Custom rotation handler that combines filesize limits with time controlled rotation.
def __init__(self, *, size, at):
now = dt.datetime.now()
self._size_limit = size
self._time_limit = now.replace(hour=at.hour, minute=at.minute, second=at.second)
if now >= self._time_limit:
# The current time is already past the target time so it would rotate already.
# Add one day to prevent an immediate rotation.
self._time_limit += dt.timedelta(days=1)
def should_rotate(self, message, file):
file.seek(0, 2)
if file.tell() + len(message) > self._size_limit:
return True
if message.record["time"].timestamp() > self._time_limit.timestamp():
self._time_limit += dt.timedelta(days=1)
return True
return False
# set rotate file if over 500 MB or at midnight every day
rotator = Rotator(size=5e+8, at=dt.time(0, 0, 0))
# example useage: logger.add("file.log", rotation=rotator.should_rotate)
# Begin logging definition
logger.remove() # removes the default console logger provided by Loguru.
# I find it to be too noisy with details more appropriate for file logging.
# INFO and messages of higher priority only shown on the console.
# it uses the tqdm module .write method to allow tqdm to display correctly.
logger.add(lambda msg: tqdm.write(msg, end=""), format="{message}", level=CONSOLE)
logger.configure(handlers=[{"sink": os.sys.stderr, "level": CONSOLE}])
# this method automatically suppresses the default handler to modify the message level
logger.add(
"".join(["./LOGS/", filename, "_{time}.log"]),
rotation=rotator.should_rotate,
level=FILELOG,
encoding="utf8"
)
# create a new log file for each run of the program
return
| StarcoderdataPython |
1665937 | <reponame>Phid13/IP2<filename>main.py<gh_stars>0
import matplotlib.pyplot as plt
from model import *
from data import *
from labels import *
from skimage import measure
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
path = "data/RednBlue/Axial/Label"
gen_labels(path)
# change
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None)
model = unet()
model.fit(myGene,steps_per_epoch=300,epochs=1)
# model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
# model.fit(myGene,steps_per_epoch=300,epochs=1,callbacks=[model_checkpoint])
testGene = testGenerator("data/membrane/test")
results = model.predict(testGene,30,verbose=1)
saveResult("data/membrane/results",results)
# fig, ax = plt.subplots()
# flag_multi_class = False
# for i,item in enumerate(results):
# image = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]
# r = image
# ax.imshow(r, cmap=plt.cm.gray)
# # Find contours at a constant value of 0.8
# contours = measure.find_contours(r, 0.8)
# for contour in contours:
# ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
# ax.axis('image')
# ax.set_xticks([])
# ax.set_yticks([])
# plt.savefig("data/membrane/test/image_%d"%i)
| StarcoderdataPython |
132100 | from numpy import sum
from gwlfe.Memoization import memoize
def TotLAEU(NumAnimals, AvgAnimalWt):
result = 0
aeu3 = (NumAnimals[5] * AvgAnimalWt[5]) / 1000
aeu4 = (NumAnimals[4] * AvgAnimalWt[4]) / 1000
aeu5 = (NumAnimals[6] * AvgAnimalWt[6]) / 1000
aeu6 = (NumAnimals[0] * AvgAnimalWt[0]) / 1000
aeu7 = (NumAnimals[1] * AvgAnimalWt[1]) / 1000
result += aeu3 + aeu4 + aeu5 + aeu6 + aeu7
return result
@memoize
def TotLAEU_f(NumAnimals, AvgAnimalWt):
return sum(NumAnimals[[0, 1, 4, 5, 6]] * AvgAnimalWt[[0, 1, 4, 5, 6]] / 1000)
| StarcoderdataPython |
1695046 | <reponame>mexxexx/ionsrcopt
import pandas as pd
import numpy as np
import seaborn as sns
import sys, os
import matplotlib.pyplot as plt
import argparse
sys.path.insert(1, os.path.abspath("../ionsrcopt"))
import load_data as ld
from source_features import SourceFeatures
from processing_features import ProcessingFeatures
def main():
######################
###### SETTINGS ######
######################
clustered_data_folder = "../Data_Clustered/" # Base folder of clustered data
filename = "JanNov2016.csv" # The file to load
features = [
SourceFeatures.BIASDISCAQNV,
SourceFeatures.GASAQN,
SourceFeatures.OVEN1AQNP,
SourceFeatures.THOMSON_FORWARDPOWER,
SourceFeatures.SOLINJ_CURRENT,
SourceFeatures.SOLCEN_CURRENT,
SourceFeatures.SOLEXT_CURRENT,
SourceFeatures.SOURCEHTAQNI,
SourceFeatures.BCT25_CURRENT,
] # Features to be displayed
args = parse_args()
source_stability = args["source_stability"]
cluster = args["cluster"]
sample_size = args["sample_size"]
######################
######## CODE ########
######################
path = clustered_data_folder + filename
df = ld.read_data_from_csv(path, None, None)
df = ld.fill_columns(df, None, fill_nan_with_zeros=True)
df = ld.convert_column_types(df)
df = df.loc[(df[ProcessingFeatures.SOURCE_STABILITY] == source_stability)].copy()
if not cluster is None:
df = df.loc[(df[ProcessingFeatures.CLUSTER] == cluster)].copy()
index_length = len(df.index)
indices = np.random.permutation(range(index_length))[
: min(sample_size, index_length)
]
data = df.loc[df.index[indices]].copy()
sns.pairplot(data, vars=features, hue=ProcessingFeatures.CLUSTER)
plt.show()
def parse_args():
parser = argparse.ArgumentParser(description="View time development of clusters")
parser.add_argument(
"-s",
"--source_stability",
default=1,
type=int,
help="1 if you want to look at the stable source, 0 else",
)
parser.add_argument(
"-c",
"--cluster",
default=None,
type=int,
help="The cluster you want to look at, or None for all data",
)
parser.add_argument(
"-n",
"--sample_size",
default=1000,
type=int,
help="Number of datapoints to display",
)
args = parser.parse_args()
return {
"source_stability": args.source_stability,
"cluster": args.cluster,
"sample_size": args.sample_size,
}
if __name__ == "__main__":
main()
| StarcoderdataPython |
3276248 | <filename>dev/python/2018-10-03 protocol benchmark.py
"""
See how fast just the protocol can be read from ABF files
"""
import os
import sys
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_DATA = os.path.abspath(PATH_HERE+"../../../data/abfs/")
PATH_SRC = os.path.abspath(PATH_HERE+"../../../src/")
sys.path.insert(0, PATH_SRC)
import pyabf
import time
demoAbf2File = R"X:\Data\C57\Tat project\abfs-intrinsics\2018_07_24_DIC1_0000.abf";
timeStart = time.time()
benchmarkRepeats = 1000
for i in range(benchmarkRepeats):
abf = pyabf.ABF(demoAbf2File, loadData=False)
protocol = abf.protocol
timeElapsed = time.time() - timeStart
print(f"Reading full header from {benchmarkRepeats} ABF files with pyABF took {timeElapsed} sec") | StarcoderdataPython |
4831263 | from __future__ import print_function
import os
import sys
import subprocess
r_dependencies = ["RSQLite", "plyr", "gplots", "devtools", "ggplot2"]
r_github_dependencies = ["ucd-cws/wq-heatplot"]
def set_up_r_dependencies():
import launchR # imported here because it will be installed before this is called, but won't be installed at load time in all cases
R = launchR.Interpreter()
R.install_packages(["devtools"])
R.run("-e", ["\"devtools::install_github('r-lib/remotes', ref = 'e56a41e1d0cad55cbe7d60b274b99ab7b7a76b5c')\"",]) # this is a hopefully temporary fix, which should make it so that the other packages install correctly. I believe wq-heatplot wasn't installing correctly without this.
R.install_packages(r_dependencies, missing_only=False)
R.install_github(r_github_dependencies)
def find_wheels(path):
"""
find any Python wheel files in the directory provided by "path". We build wheels and they are distributed alongside
this file, so this loads and installs any wheels it finds.
:param path:
:return: list of files in the provided path
"""
return [f for f in os.listdir(path) if (os.path.isfile(os.path.join(path, f)) and f.endswith(".whl"))]
if __name__ == "__main__":
print("Removing old versions of the code, if they exist.")
subprocess.call([sys.executable, "-m", "pip", "uninstall", "arcproject_wq", "-q"])
try:
subprocess.check_output([sys.executable, "-m", "pip", "install", "--upgrade", "pip"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("arcproject package install failed {}.\nInstaller output the following while processing:\n{}".format(
e.returncode, e.output))
sys.exit(1)
install_folder = os.path.split(os.path.abspath(__file__))[0]
for wheel in find_wheels(install_folder):
print("Install wheel file {}".format(wheel))
try:
subprocess.check_output([sys.executable, "-m", "pip", "install", os.path.join(install_folder, wheel)],
stderr=subprocess.STDOUT) # should install requirements too
except subprocess.CalledProcessError as e:
print("arcproject package install failed {}.\nInstaller output the following while processing:\n{}".format(
e.returncode, e.output))
sys.exit(1)
set_up_r_dependencies()
print("Installation complete")
| StarcoderdataPython |
171525 | import warnings
from pymysql.tests import base
import pymysql.cursors
class CursorTest(base.PyMySQLTestCase):
def setUp(self):
super(CursorTest, self).setUp()
conn = self.connections[0]
self.safe_create_table(
conn,
"test", "create table test (data varchar(10))",
)
cursor = conn.cursor()
cursor.execute(
"insert into test (data) values "
"('row1'), ('row2'), ('row3'), ('row4'), ('row5')")
cursor.close()
self.test_connection = pymysql.connect(**self.databases[0])
self.addCleanup(self.test_connection.close)
def test_cleanup_rows_unbuffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.SSCursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
with warnings.catch_warnings(record=True) as log:
warnings.filterwarnings("always")
c2.execute("select 1")
self.assertGreater(len(log), 0)
self.assertEqual(
"Previous unbuffered result was left incomplete",
str(log[-1].message))
self.assertEqual(
c2.fetchone(), (1,)
)
self.assertIsNone(c2.fetchone())
def test_cleanup_rows_buffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.Cursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
c2.execute("select 1")
self.assertEqual(
c2.fetchone(), (1,)
)
self.assertIsNone(c2.fetchone())
| StarcoderdataPython |
75596 | from mongoengine import *
from flask_login import UserMixin
class User(Document, UserMixin, object):
username = StringField(required=True, unique=True)
email = StringField()
def get_id(self):
return str(self.id)
def __repr__(self):
return self.username
def __str__(self):
return self.username
| StarcoderdataPython |
1770930 | <reponame>nparkstar/nauta
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import Enum
from typing import Pattern, List
def filter_by_name_regex(resource_object_dict: dict, name_regex: Pattern = None, spec_location: bool = True):
if spec_location:
rod = resource_object_dict['spec']['name']
else:
rod = resource_object_dict['metadata']['name']
return name_regex.search(rod) if name_regex else True
def filter_by_state(resource_object_dict: dict, state: Enum = None):
return resource_object_dict['spec']['state'] == state.value if state else True
def filter_by_excl_state(resource_object_dict: dict, state: Enum = None):
return resource_object_dict['spec']['state'] != state.value if state else True
def filter_by_experiment_name(resource_object_dict: dict, exp_name: List[str] = None):
return resource_object_dict['spec']['experiment-name'] in exp_name if exp_name else True
| StarcoderdataPython |
3368536 | import sublime
import sublime_plugin
def clamp(value, low, high):
if value < low:
return low
if value > high:
return high
return value
# This is to keep the original anchor as long as we keep making vscode style
# column selections - otherwise clear it.
last_selection = None
# Use an EventListener rather than a ViewEventListener because Sublime Text
# builds below 3155 don't fire the ViewEventListener.on_text_command method.
class ColumnSelectionListener(sublime_plugin.EventListener):
def on_deactivated(self, view):
global last_selection
last_selection = None
def on_text_command(self, view, name, args):
global last_selection
if name != "column_selection":
last_selection = None
class ColumnSelectionCommand(sublime_plugin.TextCommand):
# Return true here in order to get mouse event passed to run.
def want_event(self):
return True
# Our actual command.
def run(self, _edit, event):
global last_selection
# Treat the first selection as the primary selection:
# Sublime Text has no concept of "primary selections", if you make
# multiple selections and then press `esc` to go back to one selection,
# then Sublime will always select the first (topmost) selection.
#
# This means the functionality may be slightly unexpected when compared
# to other editors, where for example, the primary selection is:
# Atom: the first selection made
# VSCode: the first selection made
# CodeMirror: the last selection made
# ACE: the last selection made
views_selections = self.view.sel()
# We save the last selection to better emulate the behaviour of vscode.
if last_selection is not None:
primary_selection = last_selection
else:
last_selection = primary_selection = views_selections[0]
# Use layout coordinates to find the start and end of each selection on
# each line. It's easier just to use `row`s and `col`s, but that won't
# be accurate for emoji and other non-monospaced characters.
# Think of `(x1, y1)` and `(x2, y2)` defining the edges of a rectangle
# which is our selection.
(x1, y1) = self.view.text_to_layout(primary_selection.a)
(x2, y2) = self.view.window_to_layout((event.get('x'), event.get('y')))
start_row = self.view.rowcol(primary_selection.a)[0]
click_point = self.view.layout_to_text((x2, y2))
click_row = self.view.rowcol(click_point)[0]
# Check if we should put a selection of every line.
all_empty = True
skipped_lines = []
# limit for ignoring selections - used to allow for the slightly off
# measurements that are present when there are non-monospaced characters.
limit = self.view.em_width() / 2
# Iterate through lines (from top to bottom) and create selection regions.
selections = []
first, last = min(start_row, click_row), max(start_row, click_row)
for row in range(first, last + 1):
# This line's region.
line = self.view.line(self.view.text_point(row, 0))
# Just remember the line's region if it's empty and continue.
if line.empty():
skipped_lines.append(line)
continue
# Find the first & last char at the start & click points on this line.
line_y = self.view.text_to_layout(line.a)[1]
a = clamp(self.view.layout_to_text((x1, line_y)), line.a, line.b)
b = clamp(self.view.layout_to_text((x2, line_y)), line.a, line.b)
region = sublime.Region(a, b)
# Skip lines that don't reach inside the column selection.
point = self.view.text_to_layout(a)[0]
if x1 < x2 and point < x1 - limit or x1 > x2 and point < x2 - limit:
skipped_lines.append(region)
continue
if all_empty and not region.empty():
all_empty = False
# Add region to selections.
selections.append(region)
# Place a selection on every line (even if it's not within the rect) if:
# the starting line is an empty line, or
# all of the selected regions are empty.
if self.view.line(primary_selection).empty() or all_empty:
selections.extend(skipped_lines)
# Replace the view's selections, if we have any to add.
if len(selections):
views_selections.clear()
views_selections.add_all(selections)
| StarcoderdataPython |
3397341 | import connexion
#app = connexion.FlaskApp(__name__)
app = connexion.AioHttpApp(__name__)
app.add_api("swagger/openapi.yaml")
application = app.app | StarcoderdataPython |
176035 | import numpy as np
import pandas as pd
from vimms.old_unused_experimental.PythonMzmine import get_base_scoring_df
from vimms.Roi import make_roi
QCB_MZML2CHEMS_DICT = {'min_ms1_intensity': 1.75E5,
'mz_tol': 2,
'mz_units': 'ppm',
'min_length': 1,
'min_intensity': 0,
'start_rt': 0,
'stop_rt': 1560}
def get_rois(mzml, min_roi_length, mzml2chems_dict=QCB_MZML2CHEMS_DICT):
good_roi, junk_roi = make_roi(mzml, mz_tol=mzml2chems_dict['mz_tol'], mz_units=mzml2chems_dict['mz_units'],
min_length=min_roi_length, min_intensity=mzml2chems_dict['min_intensity'],
start_rt=mzml2chems_dict['start_rt'], stop_rt=mzml2chems_dict['stop_rt'])
return good_roi, junk_roi
def mzml2classificationdata(mzmls, mzml_picked_peaks_files, min_roi_length=5, mzml2chems_dict=QCB_MZML2CHEMS_DICT,
mz_slack=0.01, drift_window_lengths=[5], rt_peak_tol=2, include_status=True):
rois = []
for i in range(len(mzmls)):
good_roi, junk_roi = get_rois(mzmls[i], min_roi_length, mzml2chems_dict)
rois.extend(good_roi)
picked_peaks = get_base_scoring_df(mzml_picked_peaks_files[i])
df_new = rois2classificationdata2(good_roi, picked_peaks, mz_slack=mz_slack,
drift_window_lengths=drift_window_lengths, rt_peak_tol=rt_peak_tol,
include_status=include_status)
if i == 0:
df = df_new
else:
df = pd.concat([df, df_new])
return df, rois
class get_prob_classifier(object):
def __init__(self, mzmls, mzml_picked_peaks_files, min_roi_length=5, mzml2chems_dict=QCB_MZML2CHEMS_DICT,
mz_slack=0.01, roi_change_n=5, rt_peak_tol=2):
self.roi_change_n = roi_change_n
df, rois = mzml2classificationdata(mzmls, mzml_picked_peaks_files, min_roi_length, mzml2chems_dict,
mz_slack, [roi_change_n], rt_peak_tol, include_status=True)
df = df.dropna(thresh=2)
base_classes = ['Decrease', 'Increase', 'Noise', 'Top']
self.probabilities = []
for i in range(int(max(df.iloc[:, 0]) + 1)):
i_classes = df['rt_status'].iloc[np.where(df.iloc[:, 0] == i)[0]]
probs = np.array([sum(i_classes == base) for base in base_classes]) / len(i_classes)
self.probabilities.append(probs)
def predict(self, value):
return self.probabilities[value]
def calculate_window_change(intensities, drift_window_len):
return sum((np.array(intensities)[-(drift_window_len - 1):] - np.array(intensities)[-drift_window_len:-1]) > 0)
def find_possible_peaks(roi, picked_peaks, mz_slack):
rt_check1 = (picked_peaks['rt min'] >= roi.rt_list[0]) & (roi.rt_list[-1] >= picked_peaks['rt min'])
rt_check2 = (picked_peaks['rt max'] >= roi.rt_list[0]) & (roi.rt_list[-1] >= picked_peaks['rt max'])
rt_check3 = (picked_peaks['rt min'] <= roi.rt_list[0]) & (picked_peaks['rt max'] >= roi.rt_list[-1])
rt_check = rt_check1 | rt_check2 | rt_check3
# logger.debug('rt len ' + len(rt_check))
# logger.debug('rt check ' + rt_check)
# plus and minus one is just slack for the initial check
initial_mz_check = (picked_peaks['m/z max'] + 1 >= roi.get_mean_mz()) & (
roi.get_mean_mz() >= picked_peaks['m/z min'] - 1)
# logger.debug('mz len ' + len(initial_mz_check))
# logger.debug('mz check ' + initial_mz_check)
possible_peaks = np.where(np.logical_and(rt_check, initial_mz_check))[0]
updated_possible_peaks = []
for j in possible_peaks:
peak = picked_peaks.iloc[j]
check_peak = np.nonzero((peak['rt min'] < roi.rt_list) & (roi.rt_list < peak['rt max']))[0]
mean_mz = np.mean(np.array(roi.mz_list)[check_peak])
if peak['m/z min'] - mz_slack < mean_mz < peak['m/z max'] + mz_slack:
updated_possible_peaks.append(j)
return updated_possible_peaks
def rois2classificationdata2(rois, picked_peaks, mz_slack=0.01, drift_window_lengths=[5], rt_peak_tol=2,
include_status=True):
roi_change_list = [[] for i in range(len(drift_window_lengths))]
rt_status_list = []
for roi in rois:
# get drift data
for window in range(len(drift_window_lengths)):
roi_change_list[window].extend([None for i in range(drift_window_lengths[window] - 1)])
roi_change = [calculate_window_change(roi.intensity_list[:i], drift_window_lengths[window])
for i in range(drift_window_lengths[window], roi.n + 1)]
roi_change_list[window].extend(roi_change)
# get possible peaks
if include_status:
possible_peaks = find_possible_peaks(roi, picked_peaks, mz_slack)
possible_peaks_list = picked_peaks.iloc[possible_peaks]
# get data
if not possible_peaks:
rt_status_list.extend([0 for rt in roi.rt_list])
else:
for rt in roi.rt_list:
rt_status = 0
for j in range(len(possible_peaks_list.index)):
if possible_peaks_list['rt centre'].iloc[j] - rt_peak_tol <= rt <= \
possible_peaks_list['rt centre'].iloc[j] + rt_peak_tol:
rt_status = max(3, rt_status)
elif possible_peaks_list['rt min'].iloc[j] <= rt <= possible_peaks_list['rt centre'].iloc[j]:
rt_status = max(2, rt_status)
elif possible_peaks_list['rt centre'].iloc[j] <= rt <= possible_peaks_list['rt max'].iloc[j]:
rt_status = max(1, rt_status)
else:
rt_status = max(0, rt_status)
rt_status_list.append(rt_status)
# convert rt status to classes
if include_status:
rt_status_list = np.array(rt_status_list)
rt_status_list_str = np.array(['Unknown' for i in range(len(rt_status_list))], dtype="<U10")
rt_status_list_str[np.where(rt_status_list == 0)[0]] = 'Noise'
rt_status_list_str[np.where(rt_status_list == 1)[0]] = 'Decrease'
rt_status_list_str[np.where(rt_status_list == 2)[0]] = 'Increase'
rt_status_list_str[np.where(rt_status_list == 3)[0]] = 'Top'
# save as data frame
df = pd.DataFrame()
for window in range(len(drift_window_lengths)):
df['roi_change_' + str(drift_window_lengths[window])] = roi_change_list[window]
if include_status:
df['rt_status'] = rt_status_list_str
return df
# def get_intensity_difference(roi_intensities, n, positive=True):
# # add exception for short roi
# difference = []
# for i in range(len(roi_intensities) - n):
# difference.append(np.log(roi_intensities[i + n]) - np.log(roi_intensities[i]))
# if positive:
# return max(difference)
# else:
# return min(difference)
#
#
# def get_max_increasing(roi_intensities, n_skip=0, increasing_TF=True):
# # add exception for short roi
# max_increasing = 0
# for i in range(len(roi_intensities)):
# current_increasing = 0
# current_skip = 0
# if len(roi_intensities[i:]) <= max_increasing:
# break
# for j in range(1, len(roi_intensities[i:])):
# if (roi_intensities[i:][j] > roi_intensities[i:][j - 1 - current_skip]) == increasing_TF:
# current_increasing += 1 + current_skip
# current_skip = 0
# else:
# current_skip += 1
# if current_skip > n_skip:
# max_increasing = max(max_increasing, current_increasing)
# break
# return max_increasing
#
#
# def get_intensity_list(roi, max_length):
# if max_length is None:
# return roi.intensity_list
# else:
# return roi.intensity_list[0:max_length]
# def rois2classificationdata(rois, picked_peaks, mz_slack=0.01):
# base_roi = []
# base_status = []
# split_roi = []
# split_status = []
# for roi in rois:
# rt_check1 = (picked_peaks['rt min'] >= roi.rt_list[0]) & (roi.rt_list[-1] >= picked_peaks['rt min'])
# rt_check2 = (picked_peaks['rt max'] >= roi.rt_list[0]) & (roi.rt_list[-1] >= picked_peaks['rt max'])
# rt_check3 = (picked_peaks['rt min'] <= roi.rt_list[0]) & (picked_peaks['rt max'] >= roi.rt_list[-1])
# rt_check = rt_check1 | rt_check2 | rt_check3
# # plus and minus one is just slack for the initial check
# initial_mz_check = (picked_peaks['m/z max'] + 1 >= roi.get_mean_mz()) & (
# roi.get_mean_mz() >= picked_peaks['m/z min'] - 1)
# possible_peaks = np.nonzero(rt_check & initial_mz_check)[0]
# if len(possible_peaks) == 0:
# base_roi.append(roi)
# split_roi.append(roi)
# base_status.append(0)
# split_status.append(0)
# else:
# updated_possible_peaks = []
# for j in possible_peaks:
# peak = picked_peaks.iloc[j]
# check_peak = np.nonzero((peak['rt min'] < roi.rt_list) & (roi.rt_list < peak['rt max']))[0]
# mean_mz = np.mean(np.array(roi.mz_list)[check_peak])
# if peak['m/z min'] - mz_slack < mean_mz < peak['m/z max'] + mz_slack:
# updated_possible_peaks.append(j)
# if len(updated_possible_peaks) == 0:
# base_roi.append(roi)
# split_roi.append(roi)
# base_status.append(0)
# split_status.append(0)
# else:
# if len(updated_possible_peaks) == 1:
# base_roi.append(roi)
# split_roi.append(roi)
# base_status.append(1)
# split_status.append(1)
# if len(updated_possible_peaks) > 1:
# base_roi.append(roi)
# base_status.append(1)
# df = picked_peaks.iloc[updated_possible_peaks]
# df = df.sort_values(by=['rt min'])
# splits = (np.array(df['rt min'][1:]) + np.array(df['rt max'][0:-1])) / 2
# splits = np.insert(np.insert(splits, 0, 0), len(splits) + 1, 2000)
# for j in range(len(splits) - 1):
# check_range1 = roi.rt_list > splits[j]
# check_range2 = roi.rt_list < splits[j + 1]
# mz = np.array(roi.mz_list)[np.nonzero(check_range1 & check_range2)[0]].tolist()
# rt = np.array(roi.rt_list)[np.nonzero(check_range1 & check_range2)[0]].tolist()
# intensity = np.array(roi.intensity_list)[np.nonzero(check_range1 & check_range2)].tolist()
# split_roi.append(Roi(mz, rt, intensity))
# split_status.append(1)
# return base_roi, base_status, split_roi, split_status
#
#
# def get_roi_classification_params(rois, roi_param_dict):
# df = pd.DataFrame()
# if roi_param_dict['include_log_max_intensity']:
# df['log_max_intensity'] = np.log([roi.get_max_intensity() for roi in rois])
# if roi_param_dict['include_log_intensity_difference']:
# df['log_intensity_difference'] = np.log(df['log_max_intensity']) - np.log([roi.get_min_intensity() for roi in rois])
# if roi_param_dict['consecutively_change_max'] > 0:
# for i in range(roi_param_dict['consecutively_change_max']):
# df['n_increase_' + str(i)] = [get_max_increasing(roi.intensity_list, i, True) for roi in rois]
# df['n_decrease_' + str(i)] = [get_max_increasing(roi.intensity_list, i, False) for roi in rois]
# df['n_interaction_' + str(i)] = df['n_increase_' + str(i)] * df['n_decrease_' + str(i)]
# if roi_param_dict['intensity_change_max'] > 0:
# for i in range(roi_param_dict['intensity_change_max']):
# df['intensity_increase_' + str(i)] = [get_intensity_difference(roi.intensity_list, i+1, True) for roi in rois]
# df['intensity_decrease_' + str(i)] = [get_intensity_difference(roi.intensity_list, i+1, False) for roi in rois]
# df['intensity_interaction_' + str(i)] = df['intensity_increase_' + str(i)] * df['intensity_decrease_' + str(i)]
# if roi_param_dict['lag_max'] > 0:
# for i in range(roi_param_dict['lag_max']):
# df['autocorrelation_' + str(i+1)] = [roi.get_autocorrelation(i+1) for roi in rois]
# return df
| StarcoderdataPython |
4801359 | <filename>nano/nano/report/sales_partner_commission_summary_report/sales_partner_commission_summary_report.py<gh_stars>0
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
data = get_entries(filters)
return columns, data
def get_columns(filters):
columns =[
{
"label": _("Sales Invoice"),
"options": "Sales Invoice",
"fieldname": "name",
"fieldtype": "Link",
"width": 120
},
{
"label": _("Customer"),
"options": "Customer",
"fieldname": "customer",
"fieldtype": "Link",
"width": 140
},
{
"label": _("Territory"),
"options": "Territory",
"fieldname": "territory",
"fieldtype": "Link",
"width": 100
},
{
"label": _("Posting Date"),
"fieldname": "posting_date",
"fieldtype": "Date",
"width": 110
},
{
"label": _("Grand Total"),
"fieldname": "grand_total",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Amount"),
"fieldname": "amount",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Outstanding"),
"fieldname": "outstanding_amount",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Sales Partner"),
"options": "Sales Partner",
"fieldname": "sales_partner",
"fieldtype": "Link",
"width": 140
},
{
"label": _("S.P Commission"),
"fieldname": "sales_partner_commission",
"fieldtype": "Currency",
"width": 160
},
{
"label": _("Sales Manager"),
"options": "Sales Partner",
"fieldname": "sales_manager",
"fieldtype": "Link",
"width": 140
},
{
"label": _("S.M Commission"),
"fieldname": "sales_manager_commission",
"fieldtype": "Currency",
"width": 160
}
]
return columns
def get_entries(filters):
date_field = "posting_date"
conditions = get_conditions(filters, date_field)
entries = frappe.db.sql("""
SELECT
name, customer, territory, posting_date,grand_total, base_net_total as amount,
sales_partner, sales_partner_commission, sales_manager, sales_manager_commission,outstanding_amount
FROM
`tabSales Invoice`
WHERE
{2} and docstatus = 1 and sales_partner is not null
and sales_partner != '' and sales_partner_commission > 0 order by name desc, sales_partner
""".format(date_field, filters.get('sales_invoice'), conditions), filters, as_dict=1)
return entries
def get_conditions(filters, date_field):
conditions = "1=1"
for field in ["company", "customer", "territory"]:
if filters.get(field):
conditions += " and {0} = %({1})s".format(field, field)
if filters.get("sales_partner"):
conditions += " and sales_partner = %(sales_partner)s"
if filters.get("from_date"):
conditions += " and {0} >= %(from_date)s".format(date_field)
if filters.get("to_date"):
conditions += " and {0} <= %(to_date)s".format(date_field)
if filters.get("unpaid1"):
conditions += " and paid = 0"
if filters.get("unpaid2"):
conditions += " and paid2 = 0"
return conditions | StarcoderdataPython |
1700954 | from .dtypes import *
# Python frontend
from .frontend.python.decorators import *
from .frontend.python.wrappers import *
from .frontend.python.ndloop import ndrange
from .frontend.python.simulator import simulate
from .config import Config
from .frontend.operations import *
from .sdfg import compile, SDFG, SDFGState
from .memlet import Memlet, EmptyMemlet
from .graph.edges import InterstateEdge
from .symbolic import symbol, eval
# Run Jupyter notebook code
from .jupyter import *
| StarcoderdataPython |
3361446 | <reponame>mecheng/mechcite
from mechcite import Bibliography
from functools import wraps
class cite(object):
def __init__(self, key):
self.key = key
self.used = False
self.bib = Bibliography()
def __call__(self, f):
if hasattr(f, '__call__'):
@wraps(f)
def wrapped_f(*args, **kwargs):
if not self.used:
self.bib.cite(self.key)
self.used = True
return f(*args, **kwargs)
return wrapped_f
else:
f._cite = self.key
return f
| StarcoderdataPython |
3271140 | from sensei import *
from vtk import VTK_MULTIBLOCK_DATA_SET, vtkDataObject, VTK_IMAGE_DATA, VTK_DOUBLE, VTK_FLOAT
# test setting and getting the members
md = MeshMetadata.New()
md.GlobalView = True
md.MeshName = "foo"
md.MeshType = VTK_MULTIBLOCK_DATA_SET
md.BlockType = VTK_IMAGE_DATA
md.NumBlocks = 2
md.NumBlocksLocal = [2]
md.Extent = [0,2,0,2,0,2]
md.Bounds = [0.,1.,0.,1.,0.,1.]
md.CoordinateType = VTK_DOUBLE
md.NumPoints = 3**3
md.NumCells = 2**3
md.CellArraySize = -1
md.NumArrays = 2
md.NumGhostCells = 1
md.NumGhostNodes = 0
md.NumLevels = 1
md.StaticMesh = 1
md.ArrayName = ['a1', 'a2']
md.ArrayCentering = [vtkDataObject.POINT, vtkDataObject.CELL]
md.ArrayComponents = [1, 3]
md.ArrayType = [VTK_DOUBLE, VTK_FLOAT]
md.ArrayRange = [[0.,1.],[0.,1.]]
md.BlockOwner = [0, 0]
md.BlockIds = [0,1]
md.BlockNumPoints = [2*4**2, 2*4**2]
md.BlockNumCells = [1*3**2, 1*3**2]
md.BlockCellArraySize = [-1,-1]
md.BlockExtents = [[0,1,0,2,0,2], [1,2,0,2,0,2]]
md.BlockBounds = [[0.,.5,0.,1.,0,1.], [0.5,1.,0.,1.,0.,1.]]
md.BlockArrayRange = [[[-1.,1.],[0.,1.]], [[1.,2.],[-1.,0.]]]
md.Flags = MeshMetadataFlags()
md.Flags.SetBlockDecomp()
md.Flags.SetBlockExtents()
md.Flags.SetBlockSize()
md.Flags.SetBlockBounds()
md.Flags.SetBlockArrayRange()
print(str(md))
| StarcoderdataPython |
4489 | <filename>ts_eval/utils/nans.py<gh_stars>1-10
import warnings
import numpy as np
def nans_in_same_positions(*arrays):
"""
Compares all provided arrays to see if they have NaNs in the same positions.
"""
if len(arrays) == 0:
return True
for arr in arrays[1:]:
if not (np.isnan(arrays[0]) == np.isnan(arr)).all():
return False
return True
def nanmeanw(arr, axis=None):
"""
Computes nanmean without raising a warning in case of NaNs in the dataset
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
return np.nanmean(arr, axis=axis)
| StarcoderdataPython |
192927 | """The machinery of importlib: finders, loaders, hooks, etc."""
from ._bootstrap import ModuleSpec
from ._bootstrap import BuiltinImporter
from ._bootstrap import FrozenImporter
from ._bootstrap_external import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES,
OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES,
EXTENSION_SUFFIXES)
from ._bootstrap_external import WindowsRegistryFinder
from ._bootstrap_external import PathFinder
from ._bootstrap_external import FileFinder
from ._bootstrap_external import SourceFileLoader
from ._bootstrap_external import SourcelessFileLoader
from ._bootstrap_external import ExtensionFileLoader
from ._bootstrap_external import NamespaceLoader
def all_suffixes():
"""Returns a list of all recognized module suffixes for this process"""
return SOURCE_SUFFIXES + BYTECODE_SUFFIXES + EXTENSION_SUFFIXES
| StarcoderdataPython |
4823603 | <reponame>ussaema/CT_Image_Reconstruction<filename>main.py
import tensorflow as tf
import numpy as np
from math import cos, sin, pi
import argparse
from tensorflow.python.framework import ops
import lme_custom_ops
import pyconrad as pyc
pyc.setup_pyconrad()
import os
import time
from PIL import Image
class CheckerDrawer:
# create a numpy array of zeros and ones
def draw(self, resolution, tile_size):
width = int(resolution / (tile_size * 2))
return np.vstack(width * (width * [0, 1], width * [1, 0]))
# ===================== Volume Parameters =====================
class Volume_Params:
def trajectory(self, number_of_projections, circle):
rays = np.zeros([number_of_projections, 2])
angular_increment = circle / number_of_projections
for i in np.arange(0, number_of_projections):
rays[i] = [cos(i * angular_increment), sin(i * angular_increment)]
return rays
def __init__(self):
# Volume Parameter:
self.volume_dim = 256
self.volume_shape = [self.volume_dim, self.volume_dim]
self.volume_spacing = 0.5
# Detector Parameter
self.detector_width = 512
self.detector_spacing = 0.5
# Trajectory Parameter
self.number_of_projections = 512
self.circle = 2 * pi
# Tensor Proto Stuff
self.volume_origin = tf.contrib.util.make_tensor_proto([-((self.volume_dim - 1) / 2 * self.detector_spacing),
-((self.volume_dim - 1) / 2 * self.detector_spacing)],
tf.float32)
self.volume_spacing = tf.contrib.util.make_tensor_proto([self.volume_spacing,
self.volume_spacing],
tf.float32)
self.sinogram_shape = [self.number_of_projections, self.detector_width]
self.detector_origin = tf.contrib.util.make_tensor_proto([-((self.detector_width - 1) / 2 * self.detector_spacing)],
tf.float32)
self.detector_spacing = tf.contrib.util.make_tensor_proto([self.detector_spacing], tf.float32)
self.ray_vectors = tf.contrib.util.make_tensor_proto(self.trajectory(self.number_of_projections, self.circle), tf.float32)
@ops.RegisterGradient( "ParallelBackprojection2D" )
def _backproject_grad( op, grad ):
volume_params = Volume_Params()
proj = lme_custom_ops.parallel_projection2d(
volume = grad,
volume_shape = volume_params.volume_shape,
projection_shape = volume_params.sinogram_shape,
volume_origin = volume_params.volume_origin,
detector_origin = volume_params.detector_origin,
volume_spacing = volume_params.volume_spacing,
detector_spacing = volume_params.detector_spacing,
ray_vectors = volume_params.ray_vectors,
)
return [ proj ]
def generateSinogram(phantom, sino_sess):
# Create VolumeParameters
volume_params = Volume_Params()
# TF Phantom Var
phantom_tf = tf.placeholder(tf.float32, shape=volume_params.volume_shape, name="input_phantom")
# TF Layer Object
forwardprojection_layer = lme_custom_ops.parallel_projection2d(phantom_tf,
volume_params.volume_shape,
volume_params.sinogram_shape,
volume_params.volume_origin,
volume_params.detector_origin,
volume_params.volume_spacing,
volume_params.detector_spacing,
volume_params.ray_vectors)
# ===================== TF Session =====================
# TF STUFF
init_op = tf.global_variables_initializer()
sino_sess.run(init_op)
# just do forward projection
sinogram = sino_sess.run(forwardprojection_layer, feed_dict={phantom_tf: phantom})
return sinogram
def main():
# Create VolumeParameters
volume_params = Volume_Params()
#data2 = np.load("data.npy")
raw_data = np.load("volumes.npy")
data = np.zeros((raw_data.shape[2], raw_data.shape[0], raw_data.shape[1]))
for i in range(raw_data.shape[2]):
data[i,:,:] = raw_data[:,:,i]
print("data2", data.shape)
train_phantoms = data[0:50]
test_phantoms = data[51:100]
np.save("./reconst/ground_truth.npy", test_phantoms[0])
# append some noise to train data
#train_phantoms = np.append(train_phantoms, np.random.uniform(size=(10,volume_params.volume_dim,volume_params.volume_dim)), axis=0)
#train_phantoms = np.random.uniform(size=(10,volume_params.volume_dim,volume_params.volume_dim))
# generate sinograms of training data
train_sinograms = np.zeros((train_phantoms.shape[0],)+tuple(volume_params.sinogram_shape))
test_sinograms = np.zeros((test_phantoms.shape[0],)+tuple(volume_params.sinogram_shape))
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sino_sess:
for i in range(train_phantoms.shape[0]):
train_sinograms[i] = generateSinogram(train_phantoms[i], sino_sess)
for i in range(test_phantoms.shape[0]):
test_sinograms[i] = generateSinogram(test_phantoms[i], sino_sess)
# checkerboard phantoms
checker_drawer = CheckerDrawer()
check = checker_drawer.draw(256, 1)
# Get Phantom
conrad_phantom_class = pyc.ClassGetter('edu.stanford.rsl.tutorial.phantoms')
phantom = conrad_phantom_class.SheppLogan(volume_params.volume_dim, False).as_numpy()
# random phantom
rand_phant = np.random.uniform(size=(volume_params.volume_dim, volume_params.volume_dim))
rand_phant2 = np.random.normal(size=(volume_params.volume_dim, volume_params.volume_dim))
#_______________ Build Network _______________
# TF Reco Var
sinogram_tf = tf.placeholder(tf.float32, shape=volume_params.sinogram_shape, name="input_sinogram")
#fft
fft_layer = tf.cast(tf.spectral.fft(tf.cast(sinogram_tf,dtype=tf.complex64)),tf.complex64)
print("fft done")
#tensorflow multiplication layer
frequencies = np.fft.fftfreq(n=volume_params.detector_width,d=1)
fourier_filter = np.abs(frequencies)
#filter_weights = tf.Variable(tf.convert_to_tensor(fourier_filter,dtype=tf.float32))
filter_weights = tf.Variable(tf.ones((volume_params.detector_width),dtype=tf.float32))
filter_layer = tf.multiply(fft_layer, tf.cast(filter_weights,dtype=tf.complex64))
print("filter done")
#ifft
ifft_layer = tf.cast(tf.spectral.ifft(tf.cast(filter_layer,dtype=tf.complex64)),dtype=tf.float32)
print("ifft done")
# reconstruct phantom again
backprojection_layer = lme_custom_ops.parallel_backprojection2d( sinogram=ifft_layer,
sinogram_shape=volume_params.sinogram_shape,
volume_shape=volume_params.volume_shape,
volume_origin=volume_params.volume_origin,
detector_origin=volume_params.detector_origin,
volume_spacing=volume_params.volume_spacing,
detector_spacing=volume_params.detector_spacing,
ray_vectors=volume_params.ray_vectors )
print("backprojection_layer done")
ground_truth_tf = tf.placeholder(tf.float32, shape=volume_params.volume_shape, name="ground_truth")
loss_fkt = tf.losses.mean_squared_error(ground_truth_tf, backprojection_layer)
learning_rate = 1e-5
epochs = 10000
g_opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = g_opt.minimize(loss_fkt)
# ===================== TF Session =====================
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, "./model.ckpt")
reco = sess.run(backprojection_layer, feed_dict={sinogram_tf: test_sinograms[0]})
np.save("./reconst/imgfinal.npy", reco)
assert False, 'blablabla'
reco = None
train_losses = []
test_losses = []
print("start training loop")
for epoch in range(epochs):
phantom = None
sinogram = None
#idx = np.random.randint(0, train_phantoms.shape[0])
for it in range(train_sinograms.shape[0]):
sinogram = train_sinograms[it]
phantom = train_phantoms[it]
training = sess.run(train_op, feed_dict={sinogram_tf: sinogram, ground_truth_tf: phantom})
# run tf session
if epoch%50 == 0 or epoch == 0:
filter_values = sess.run(filter_weights, feed_dict={sinogram_tf: sinogram, ground_truth_tf: phantom})
np.save("./filters/f"+str(epoch+1),filter_values)
train_loss_value = sess.run(loss_fkt, feed_dict={sinogram_tf: sinogram, ground_truth_tf: phantom})
print("epoch: ", epoch,"train loss value: ", train_loss_value)
idx = np.random.randint(0, test_phantoms.shape[0])
test_loss_value = sess.run(loss_fkt, feed_dict={sinogram_tf: test_sinograms[idx], ground_truth_tf: test_phantoms[idx]})
print("epoch: ", epoch,"test loss value: ", test_loss_value)
reco = sess.run(backprojection_layer, feed_dict={sinogram_tf: test_sinograms[0]})
np.save("./reconst/img"+str(epoch+1)+".npy", reco)
train_losses.append(train_loss_value)
test_losses.append(test_loss_value)
np.savetxt("train_losses.csv", train_losses, delimiter=",")
np.savetxt("test_losses.csv", test_losses, delimiter=",")
# save model
save_path = saver.save(sess, "./model.ckpt")
# run tf session, to get reco
pyc.imshow(reco, 'label reco')
if __name__ == '__main__':
main() | StarcoderdataPython |
1640780 | <gh_stars>10-100
class Solution:
def isScramble(self, s1: str, s2: str) -> bool:
@lru_cache(None)
def solve(l1, r1, l2, r2):
if r1 - l1 == 1: return s1[l1] == s2[l2]
if sorted(s1[l1:r1]) != sorted(s2[l2:r2]): return False
for k in range(1, r1 - l1):
if solve(l1, l1 + k, l2, l2 + k) and solve(l1 + k, r1, l2 + k, r2): return True
if solve(l1, l1 + k, r2 - k, r2) and solve(l1 + k, r1, l2, r2 - k): return True
return solve(0, len(s1), 0, len(s2))
| StarcoderdataPython |
190360 | # -*- coding: utf-8 -*-
# @author: yangyang
# @date 4/2/21 16:53
| StarcoderdataPython |
3385349 | import torch
import torch.nn as nn
import torch.nn.functional as F
class SpectralGraphConv(nn.Module):
def __init__(self, in_features, out_features, bias=True):
"""
(<NAME>, 2017)'s graph convolution layer from (https://arxiv.org/abs/1609.02907).
Args:
in_features (int): Number of features in each node of the input node feature matrix.
out_features (int): Number of features in each node of the output node feature matrix.
bias (boolean): Includes learnable additive bias if set to True
Default: ``True``
Attributes:
W: learnable weight parameter of the transformation.
b: learnable bias parameter of the transformation.
"""
super(SpectralGraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.W = torch.nn.Parameter(torch.randn(self.in_features, self.out_features), requires_grad=True)
nn.init.xavier_normal_(self.W.data)
if self.bias:
self.b = torch.nn.Parameter(torch.zeros(self.out_features, ), requires_grad=True)
def forward(self, A, x):
z = torch.matmul(A, torch.matmul(x, self.W))
if self.bias:
z = z + self.b
return z
class GAT(nn.Module):
"""
(<NAME> et al., 2018)'s Graph Attention layer from (https://arxiv.org/abs/1710.10903).
Args:
in_features (int): Number of features in each node of the input node feature matrix.
out_features (int): Number of features in each node of the output node feature matrix.
bias (boolean): Includes learnable additive bias if set to True
Default: ``True``
Attributes:
W: learnable weight parameter of the transformation.
b: learnable bias parameter of the transformation.
attention_mechanism: single feedforward attention transformation layer.
"""
def __init__(self, in_features, out_features, bias=True):
super(GAT, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.W = torch.nn.Parameter(torch.randn(self.in_features, self.out_features), requires_grad=True)
nn.init.xavier_normal_(self.W.data)
self.attention_mechanism = nn.Linear(2*self.out_features, 1, bias=self.bias)
def forward(self, A, x):
Wh = torch.matmul(x, self.W) # B x N x F_prime
B, N = x.shape[0], x.shape[1]
Wh_concat = torch.cat([Wh.view(B, N, 1, -1).repeat(1, 1, N, 1), Wh.view(B, 1, N, -1).repeat(1, N, 1, 1)], dim=-1) # B x N x N x 2F_prime
a = F.leaky_relu(self.attention_mechanism(Wh_concat), negative_slope=0.2).squeeze()
a = self.masked_softmax(a, A, dim=2)
return torch.matmul(a, Wh)
def masked_softmax(self, x, A, dim=2, epsilon=1e-6):
x = x - torch.max(x, dim=-1, keepdim=True)[0] # for numerical stability
exps = torch.exp(x)
masked_exps = exps * A.float()
masked_sums = masked_exps.sum(dim=dim, keepdim=True)
diag = torch.diagonal(A, dim1=1, dim2=2).unsqueeze(dim=-1)
masked_sums = masked_sums + (1 - diag) + epsilon
return (masked_exps / masked_sums) * A.float()
class MultiHeadGAT(nn.Module):
"""
(<NAME>'s et al., 2018)'s Multi-Head Graph Attention layer from (https://arxiv.org/abs/1710.10903).
Args:
in_features (int): Number of features in each node of the input node feature matrix.
head_out_features (int): Number of features in each node of the output node feature matrix in each head the multihead attention layer.
n_heads (int): number of heads in the multihead attention layer.
Default: 3
multihead_agg (string): ``'concat'`` or ``'average'``. Aggregation function.
Default: ``'concat'``
"""
def __init__(self, in_features, head_out_features, n_heads=3, multihead_agg='concat'):
super(MultiHeadGAT, self).__init__()
self.in_features = in_features
self.head_out_features = head_out_features
self.n_heads = n_heads
self.multihead_agg = multihead_agg
for i in range(self.n_heads):
setattr(self, 'GAT_head_{}'.format(i), GAT(self.in_features, self.head_out_features))
def forward(self, A, x):
if self.multihead_agg == 'concat':
head_outputs = [getattr(self, 'GAT_head_{}'.format(i))(A, x) for i in range(self.n_heads)]
h = torch.cat(head_outputs, dim=-1)
elif self.multihead_agg == 'average':
B, N = x.shape[0], x.shape[1]
h = torch.zeros(size=(B, N, self.head_out_features))
for i in range(self.n_heads):
h += getattr(self, 'GAT_head_{}'.format(i))(A, x)
h = h / self.n_heads
else:
raise ValueError('Multihead aggregation function must be either \'concat\' or \'average\'.')
return h
class GIN(nn.Module):
"""
(<NAME> Hu et al., 2019)'s Graph Isomorphism Network from (https://arxiv.org/abs/1810.00826).
Args:
in_features (int): Number of features in each node of the input node feature matrix.
out_features (int): Number of features in each node of the output node feature matrix.
Attributes:
epsilon (torch.nn.Parameter): learnable epsilon parameter.
mlp (torch.nn.Linear): transformation function for aggregated node feature matrix.
"""
def __init__(self, in_features, out_features):
super(GIN, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.epsilon = torch.nn.Parameter(torch.zeros(1), requires_grad=True)
self.mlp = nn.Linear(self.in_features, self.out_features)
def forward(self, A, x):
return self.mlp((1 + self.epsilon)*x + torch.matmul(A, x))
class ARMAConvGCSLayer(nn.Module):
"""
Graph Convolutional Skip (GCS) layer, which makes up a single element of the stack of GCS
layers in an ARMAConv layer.
Args:
in_features (int): Number of features in each node of the input node feature matrix.
out_features (int): Number of features in each node of the output node feature matrix.
timesteps (int): number of recursive updates.
Default: 1
activation: torch.nn activation function used in each recursive update.
Default: ``nn.ReLU()``
Attributes:
V_t (torch.nn.Parameter): Trainable parameter for input feature transformation.
W_1 (torch.nn.Parameter): Trainable parameter for first iteration.
W_t (torch.nn.Parameter): Trainable parameter for t-th iteration.
"""
def __init__(self, in_features, out_features, timesteps=1, activation=nn.ReLU()):
super(ARMAConvGCSLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.timesteps = timesteps
self.activation = activation
self.V_t = torch.nn.Parameter(torch.randn(self.in_features, self.out_features), requires_grad=True)
nn.init.xavier_normal_(self.V_t.data)
self.W_1 = torch.nn.Parameter(torch.randn(self.in_features, self.out_features), requires_grad=True)
nn.init.xavier_normal_(self.W_1.data)
if self.timesteps > 1:
for i in range(2, self.timesteps+1):
setattr(self, 'W_{}'.format(i), torch.nn.Parameter(torch.randn(self.out_features, self.out_features), requires_grad=True))
def forward(self, L, x):
x_t = x
for i in range(1, self.timesteps+1):
W_t = getattr(self, 'W_{}'.format(i))
x_t = self.activation(torch.matmul(L, torch.matmul(x_t, W_t)) + torch.matmul(x, self.V_t))
return x_t
class ARMAConv(nn.Module):
"""
(<NAME>., 2019)'s Convolutional ARMA Filter from (https://arxiv.org/abs/1901.01343).
Args:
in_features (int): Number of features in each node of the input node feature matrix.
out_features (int): Number of features in each node of the output node feature matrix.
timesteps (int): Number of recursive updates.
Default: 1
k (int): Number of parallel stacks.
Default: 3
dropout_p (float): Dropout probability.
Default: 0.2
Attributes:
GCS_k (ARMAConvGCSLayer): GCS layer of the k-th stack in the ARMAConv layer.
"""
def __init__(self, in_features, out_features, timesteps=1, k=3, dropout_p=0.2):
super(ARMAConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.timesteps = timesteps
self.k = k
self.dropout_p = dropout_p
for i in range(1, self.k+1):
setattr(self, 'GCS_{}'.format(i), ARMAConvGCSLayer(self.in_features, self.out_features, self.timesteps))
def forward(self, L, x):
B, N = x.shape[0], x.shape[1]
X_out = torch.zeros(B, N, self.out_features)
for i in range(1, self.k+1):
gcs_layer = getattr(self, 'GCS_{}'.format(i))
X_out += F.dropout(gcs_layer(L, x), p=0.2)
return X_out / self.k
class GatedGraphConv(nn.Module):
"""
(<NAME> Laurent, 2018)'s Gated Graph Convolution layer from (https://arxiv.org/abs/1711.07553).
Args:
in_features (int): Number of features in each node of the input node feature matrix.
out_features (int): Number of features in each node of the output node feature matrix.
Attributes:
U (torch.nn.Parameter): Trainable parameter for input feature transformation.
V (torch.nn.Parameter): Trainable parameter for input feature transformation.
A (torch.nn.Parameter): Trainable parameter for edge gate transformation.
B (torch.nn.Parameter): Trainable parameter for edge gate transformation.
"""
def __init__(self, in_features, out_features):
super(GatedGraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
# Graph-conv params
self.U = torch.nn.Parameter(torch.randn(self.in_features, self.out_features), requires_grad=True)
nn.init.xavier_normal_(self.U.data)
self.V = torch.nn.Parameter(torch.randn(self.in_features, self.out_features), requires_grad=True)
nn.init.xavier_normal_(self.V.data)
# Edge-gate params
self.A = torch.nn.Parameter(torch.randn(self.in_features, self.out_features), requires_grad=True)
nn.init.xavier_normal_(self.A.data)
self.B = torch.nn.Parameter(torch.randn(self.in_features, self.out_features), requires_grad=True)
nn.init.xavier_normal_(self.B.data)
def forward(self, A, x):
B, N = x.shape[0], x.shape[1]
h_i = x.view(B, N, 1, -1).repeat(1, 1, N, 1)
h_j = x.view(B, 1, N, -1).repeat(1, N, 1, 1)
Ah_i = torch.matmul(h_i, self.A)
Bh_j = torch.matmul(h_j, self.B)
edge_gates = torch.sigmoid(Ah_i + Bh_j) * A.unsqueeze(-1)
edge_gated_nbrs = torch.sum(edge_gates*torch.matmul(h_j, self.V), dim=2)
return torch.matmul(x, self.U) + edge_gated_nbrs
class GraphSAGE(nn.Module):
"""
(Hamilton and Ying et al., 2018)'s GraphSAGE layer from (https://arxiv.org/abs/1706.02216).
Args:
in_features (int): Number of features in each node of the input node feature matrix.
out_features (int): Number of features in each node of the output node feature matrix.
Attributes:
linear (torch.nn.Linear): Fully connected dense transformation layer.
"""
def __init__(self, in_features, out_features):
super(GraphSAGE, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(2*self.in_features, self.out_features)
def forward(self, A, x):
mean_aggregate = torch.matmul(A, x) / (torch.sum(A, dim=-1, keepdim=True) + 1e-6)
h = torch.cat([x, mean_aggregate], dim=-1)
return self.linear(h) | StarcoderdataPython |
93873 | from src.commit import * | StarcoderdataPython |
150374 | <filename>tests/functional/intfunc/avg/test_06.py<gh_stars>0
#coding:utf-8
#
# id: functional.intfunc.avg.06
# title: AVG - Integer OverFlow
# decription:
# Refactored 14.10.2019: adjusted expected_stdout/stderr
# 25.06.2020, 4.0.0.2076: changed types in SQLDA from numeric to int128 // after discuss with Alex about CORE-6342.
# 09.07.2020, 4.0.0.2091:
# NO more overflow since INT128 was introduced. AVG() is evaluated successfully.
# Removed error message from expected_stderr, added result into expected_stdout.
#
# tracker_id:
# min_versions: []
# versions: 3.0, 4.0
# qmid: functional.intfunc.avg.avg_06
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = [('[ \t]+', ' ')]
init_script_1 = """
recreate table test( id integer not null);
insert into test values(2100000000);
insert into test values(2100000000);
insert into test values(2100000000);
insert into test values(2100000000);
commit;
create or alter view v_test as select avg(2100000000*id)as avg_result from test;
commit;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
set sqlda_display on;
select * from v_test;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
INPUT message field count: 0
OUTPUT message field count: 1
01: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: AVG_RESULT alias: AVG_RESULT
: table: V_TEST owner: SYSDBA
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 22003
Integer overflow. The result of an integer operation caused the most significant bit of the result to carry.
"""
@pytest.mark.version('>=3.0,<4.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_expected_stderr == act_1.clean_stderr
assert act_1.clean_expected_stdout == act_1.clean_stdout
# version: 4.0
# resources: None
substitutions_2 = [('^((?!sqltype|AVG_RESULT).)*$', ''), ('[ \t]+', ' ')]
init_script_2 = """
recreate table test( id integer not null);
insert into test values(2100000000);
insert into test values(2100000000);
insert into test values(2100000000);
insert into test values(2100000000);
commit;
create or alter view v_test as select avg(2100000000*id)as avg_result from test;
commit;
"""
db_2 = db_factory(sql_dialect=3, init=init_script_2)
test_script_2 = """
set list on;
set sqlda_display on;
select * from v_test;
"""
act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2)
expected_stdout_2 = """
01: sqltype: 32752 INT128 Nullable scale: 0 subtype: 0 len: 16
: name: AVG_RESULT alias: AVG_RESULT
: table: V_TEST owner: SYSDBA
AVG_RESULT 4410000000000000000
"""
@pytest.mark.version('>=4.0')
def test_2(act_2: Action):
act_2.expected_stdout = expected_stdout_2
act_2.execute()
assert act_2.clean_expected_stdout == act_2.clean_stdout
| StarcoderdataPython |
4830282 | <reponame>mshulman/response<filename>response/templatetags/unslackify.py<gh_stars>1-10
import logging
import emoji_data_python
from django import template
from response.slack.cache import get_user_profile
from response.slack.client import slack_to_human_readable
register = template.Library()
logger = logging.getLogger(__name__)
@register.filter
def unslackify(value):
"""Takes a string and turns slack style :emoji: into unicode"""
value = slack_to_human_readable(value)
# replace all colon style :emoji: with unicode
value = emoji_data_python.replace_colons(value)
return value
@register.filter
def slack_id_to_fullname(value):
profile = get_user_profile(value)
if profile:
return profile["fullname"]
| StarcoderdataPython |
1797652 | <gh_stars>1-10
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
from nova.api.openstack import common
class ViewBuilder(object):
def build(self, flavor_obj, is_detail=False):
"""Generic method used to generate a flavor entity."""
if is_detail:
flavor = self._build_detail(flavor_obj)
else:
flavor = self._build_simple(flavor_obj)
self._build_extra(flavor)
return flavor
def _build_simple(self, flavor_obj):
"""Build a minimal representation of a flavor."""
return {
"id": flavor_obj["flavorid"],
"name": flavor_obj["name"],
}
def _build_detail(self, flavor_obj):
"""Build a more complete representation of a flavor."""
simple = self._build_simple(flavor_obj)
detail = {
"ram": flavor_obj["memory_mb"],
"disk": flavor_obj["local_gb"],
}
for key in ("vcpus", "swap", "rxtx_quota", "rxtx_cap"):
detail[key] = flavor_obj.get(key, "")
detail.update(simple)
return detail
def _build_extra(self, flavor_obj):
"""Hook for version-specific changes to newly created flavor object."""
pass
class ViewBuilderV11(ViewBuilder):
"""Openstack API v1.1 flavors view builder."""
def __init__(self, base_url, project_id=""):
"""
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
self.project_id = project_id
def _build_extra(self, flavor_obj):
flavor_obj["links"] = self._build_links(flavor_obj)
def _build_links(self, flavor_obj):
"""Generate a container of links that refer to the provided flavor."""
href = self.generate_href(flavor_obj["id"])
bookmark = self.generate_bookmark(flavor_obj["id"])
links = [
{
"rel": "self",
"href": href,
},
{
"rel": "bookmark",
"href": bookmark,
},
]
return links
def generate_href(self, flavor_id):
"""Create an url that refers to a specific flavor id."""
return os.path.join(self.base_url, self.project_id,
"flavors", str(flavor_id))
def generate_bookmark(self, flavor_id):
"""Create an url that refers to a specific flavor id."""
return os.path.join(common.remove_version_from_href(self.base_url),
self.project_id, "flavors", str(flavor_id))
| StarcoderdataPython |
3371815 | import pygame
from tools.colours import WHITE
from tools.globals import SCREEN_HEIGHT, SCREEN_WIDTH
class Enemy(pygame.sprite.Sprite):
def __init__(self, health, image, spawn_pos_x, spawn_pos_y):
super().__init__()
# temp
self.image = image
self.health = health
self.rect = self.image.get_rect()
self.rect.x = spawn_pos_x
self.rect.y = spawn_pos_y
def move_right(self, pixels):
if not self.rect.x > 30 + SCREEN_WIDTH:
self.rect.x += pixels
def move_left(self, pixels):
if not self.rect.x < 0:
self.rect.x -= pixels
def move_down(self, pixels):
if not self.rect.y + 30 > SCREEN_HEIGHT:
self.rect.y += pixels
def move_up(self, pixels):
if not self.rect.y < 0:
self.rect.y -= pixels
def move_upright(self, pixels):
self.move_right(pixels)
self.move_up(pixels)
def move_upleft(self, pixels):
self.move_left(pixels)
self.move_up(pixels)
def move_downright(self, pixels):
self.move_right(pixels)
self.move_down(pixels)
def move_downleft(self, pixels):
self.move_left(pixels)
self.move_down(pixels)
class EnemyBullet(pygame.sprite.Sprite):
def __init__(self, width, height, color, spawn_pos_x, spawn_pos_y):
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(WHITE)
self.image.set_colorkey(WHITE)
pygame.draw.rect(self.image, color, [0, 0, width, height])
self.rect = self.image.get_rect()
self.rect.x = spawn_pos_x
self.rect.y = spawn_pos_y
| StarcoderdataPython |
3321891 | <reponame>etienne-monier/inpystem
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This small module only contain sec2str, which is a function to display
time in human-readable format.
"""
def sec2str(t):
"""Returns a human-readable time str from a duration in s.
Arguments
---------
t: float
Duration in seconds.
Returns
-------
str
Human-readable time str.
Example
-------
>>> from inpystem.tools.sec2str import sec2str
>>> sec2str(5.2056)
5.21s
>>> sec2str(3905)
'1h 5m 5s'
"""
# Decompose into hour, minute and seconds.
h = int(t // 3600)
m = int((t - 3600 * h) // 60)
s = t - 3600 * h - 60 * m
# Print digits if non-int seconds
if isinstance(s, int):
s_str = '{:d}'.format(s)
else:
s_str = '{:.2f}'.format(float(s))
# Display info depending on available elements.
if h == 0 and m == 0:
return "{}s".format(s_str)
elif h == 0 and m != 0:
return "{:d}m {}s".format(m, s_str)
else:
return "{:d}h {:d}m {}s".format(h, m, s_str)
| StarcoderdataPython |
3327203 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import json
import logging
import os
import sys
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Optional
import datasets
import numpy as np
import transformers
from datasets import load_dataset, load_metric
from transformers import EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TrainingArguments
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from onnxruntime.quantization import QuantFormat, QuantizationMode, QuantType
from optimum.onnxruntime import ORTQuantizer
from optimum.onnxruntime.configuration import AutoCalibrationConfig, ORTConfig, QuantizationConfig
from optimum.onnxruntime.model import ORTModel
from optimum.onnxruntime.preprocessors import QuantizationPreprocessor
from optimum.onnxruntime.preprocessors.passes import (
ExcludeGeLUNodes,
ExcludeLayerNormNodes,
ExcludeNodeAfter,
ExcludeNodeFollowedBy,
)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.15.0")
require_version(
"datasets>=1.8.0", "To fix: pip install -r examples/onnxruntime/quantization/text-classification/requirements.txt"
)
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
)
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys.keys():
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
elif self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.")
else:
train_extension = self.train_file.split(".")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
validation_extension = self.validation_file.split(".")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
execution_provider: str = field(
default="CPUExecutionProvider",
metadata={"help": "ONNX Runtime execution provider to use for inference."},
)
@dataclass
class OptimizationArguments:
"""
Arguments pertaining to what type of optimization we are going to apply on the model.
"""
opset: Optional[int] = field(
default=None,
metadata={"help": "ONNX opset version to export the model with."},
)
quantization_approach: str = field(
default="dynamic",
metadata={"help": "The quantization approach. Supported approach are static and dynamic."},
)
per_channel: bool = field(
default=False,
metadata={"help": "Whether to quantize the weights per channel."},
)
reduce_range: bool = field(
default=False,
metadata={
"help": "Whether to quantize the weights with 7-bits. It may improve the accuracy for some models running "
"on non-VNNI machine, especially for per-channel mode."
},
)
calibration_method: str = field(
default="minmax",
metadata={
"help": "The method chosen to calculate the activation quantization parameters using the calibration "
"dataset. Current supported calibration methods are minmax, entropy and percentile."
},
)
num_calibration_samples: int = field(
default=100,
metadata={"help": "Number of examples to use for the calibration step resulting from static quantization."},
)
num_calibration_shards: int = field(
default=1,
metadata={
"help": "How many shards to split the calibration dataset into. Useful for the entropy and percentile "
"calibration method."
},
)
calibration_batch_size: int = field(
default=8,
metadata={"help": "The batch size for the calibration step."},
)
calibration_histogram_percentile: float = field(
default=99.999,
metadata={"help": "The percentile used for the percentile calibration method."},
)
calibration_moving_average: bool = field(
default=False,
metadata={
"help": "Whether to compute the moving average of the minimum and maximum values for the minmax "
"calibration method."
},
)
calibration_moving_average_constant: float = field(
default=0.01,
metadata={
"help": "Constant smoothing factor to use when computing the moving average of the minimum and maximum "
"values. Effective only when the selected calibration method is minmax and `calibration_moving_average` is "
"set to True."
},
)
def main():
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, optim_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args, optim_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Optimization with the following parameters {optim_args}")
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
os.makedirs(training_args.output_dir, exist_ok=True)
model_path = os.path.join(training_args.output_dir, "model.onnx")
quantized_model_path = os.path.join(training_args.output_dir, "model-quantized.onnx")
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir)
elif data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if not is_regression:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
# Preprocessing the raw_datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
def preprocess_function(examples, tokenizer: PreTrainedTokenizer, max_length: Optional[int] = None):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(
*args, padding="max_length", max_length=min(max_length, tokenizer.model_max_length), truncation=True
)
return result
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
else:
metric = load_metric("accuracy")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Create the quantizer
quantizer = ORTQuantizer.from_pretrained(
model_args.model_name_or_path, feature="sequence-classification", opset=optim_args.opset
)
# Run the tokenizer on the dataset
preprocessed_datasets = raw_datasets.map(
partial(preprocess_function, tokenizer=quantizer.tokenizer, max_length=data_args.max_seq_length),
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
apply_static_quantization = optim_args.quantization_approach == "static"
# Create the quantization configuration containing all the quantization parameters
qconfig = QuantizationConfig(
is_static=apply_static_quantization,
format=QuantFormat.QDQ if apply_static_quantization else QuantFormat.QOperator,
mode=QuantizationMode.QLinearOps if apply_static_quantization else QuantizationMode.IntegerOps,
activations_dtype=QuantType.QInt8 if apply_static_quantization else QuantType.QUInt8,
weights_dtype=QuantType.QInt8,
per_channel=optim_args.per_channel,
reduce_range=optim_args.reduce_range,
operators_to_quantize=["MatMul", "Add"],
)
ranges = None
# Create a quantization preprocessor to determine the nodes to exclude
quantization_preprocessor = QuantizationPreprocessor()
if apply_static_quantization:
# Create the calibration dataset used for the calibration step
calibration_dataset = preprocessed_datasets["train"]
if optim_args.num_calibration_samples is not None:
calibration_dataset = calibration_dataset.select(range(optim_args.num_calibration_samples))
# Remove the unnecessary columns of the calibration dataset before the calibration step
calibration_dataset = quantizer.clean_calibration_dataset(calibration_dataset)
# Create the calibration configuration given the selected calibration method
if optim_args.calibration_method == "entropy":
calibration_config = AutoCalibrationConfig.entropy(calibration_dataset)
elif optim_args.calibration_method == "percentile":
calibration_config = AutoCalibrationConfig.percentiles(
calibration_dataset,
percentile=optim_args.calibration_histogram_percentile,
)
else:
calibration_config = AutoCalibrationConfig.minmax(
calibration_dataset,
optim_args.calibration_moving_average,
optim_args.calibration_moving_average_constant,
)
if not 1 <= optim_args.num_calibration_shards <= len(calibration_dataset):
raise ValueError(
f"Invalid value of number of shards {optim_args.num_calibration_shards} chosen to split the calibration"
f" dataset, should be higher than 0 and lower or equal to the number of samples "
f"{len(calibration_dataset)}."
)
for i in range(optim_args.num_calibration_shards):
shard = calibration_dataset.shard(optim_args.num_calibration_shards, i)
quantizer.partial_fit(
dataset=shard,
calibration_config=calibration_config,
onnx_model_path=model_path,
operators_to_quantize=qconfig.operators_to_quantize,
batch_size=optim_args.calibration_batch_size,
use_external_data_format=False,
)
ranges = quantizer.compute_ranges()
# Exclude the nodes constituting LayerNorm
quantization_preprocessor.register_pass(ExcludeLayerNormNodes())
# Exclude the nodes constituting GELU
quantization_preprocessor.register_pass(ExcludeGeLUNodes())
# Exclude the residual connection Add nodes
quantization_preprocessor.register_pass(ExcludeNodeAfter("Add", "Add"))
# Exclude the Add nodes following the Gather operator
quantization_preprocessor.register_pass(ExcludeNodeAfter("Gather", "Add"))
# Exclude the Add nodes followed by the Softmax operator
quantization_preprocessor.register_pass(ExcludeNodeFollowedBy("Add", "Softmax"))
# Export the quantized model
quantizer.export(
onnx_model_path=model_path,
onnx_quantized_model_output_path=quantized_model_path,
calibration_tensors_range=ranges,
quantization_config=qconfig,
preprocessor=quantization_preprocessor,
)
# Create the ONNX Runtime configuration summarizing all the parameters related to ONNX IR export and quantization
ort_config = ORTConfig(opset=quantizer.opset, quantization=qconfig)
# Save the configuration
ort_config.save_pretrained(training_args.output_dir)
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
if "validation" not in preprocessed_datasets and "validation_matched" not in preprocessed_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = preprocessed_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
ort_model = ORTModel(
quantized_model_path,
quantizer._onnx_config,
execution_provider=model_args.execution_provider,
compute_metrics=compute_metrics,
label_names=["label"],
)
outputs = ort_model.evaluation_loop(eval_dataset)
# Save metrics
with open(os.path.join(training_args.output_dir, f"eval_results.json"), "w") as f:
json.dump(outputs.metrics, f, indent=4, sort_keys=True)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
if "test" not in preprocessed_datasets and "test_matched" not in preprocessed_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = preprocessed_datasets["test_matched" if data_args.task_name == "mnli" else "test"]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
ort_model = ORTModel(
quantized_model_path, quantizer._onnx_config, execution_provider=model_args.execution_provider
)
outputs = ort_model.evaluation_loop(predict_dataset)
predictions = np.squeeze(outputs.predictions) if is_regression else np.argmax(outputs.predictions, axis=1)
# Save predictions
output_predictions_file = os.path.join(training_args.output_dir, f"prediction.txt")
with open(output_predictions_file, "w") as writer:
logger.info(f"***** Predict results {data_args.task_name} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| StarcoderdataPython |
147423 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 16 14:50:14 2018
@author: Kaushik
"""
'''
# Known symmatric distances
locations = ["New York", "Los Angeles", "Chicago", "Minneapolis", "Denver", "Dallas", "Seattle",
"Boston", "San Francisco", "St. Louis", "Houston", "Phoenix", "Salt Lake City"]
dist_matrix = [
[ 0, 2451, 713, 1018, 1631, 1374, 2408, 213, 2571, 875, 1420, 2145, 1972], # New York
[2451, 0, 1745, 1524, 831, 1240, 959, 2596, 403, 1589, 1374, 357, 579], # Los Angeles
[ 713, 1745, 0, 355, 920, 803, 1737, 851, 1858, 262, 940, 1453, 1260], # Chicago
[1018, 1524, 355, 0, 700, 862, 1395, 1123, 1584, 466, 1056, 1280, 987], # Minneapolis
[1631, 831, 920, 700, 0, 663, 1021, 1769, 949, 796, 879, 586, 371], # Denver
[1374, 1240, 803, 862, 663, 0, 1681, 1551, 1765, 547, 225, 887, 999], # Dallas
[2408, 959, 1737, 1395, 1021, 1681, 0, 2493, 678, 1724, 1891, 1114, 701], # Seattle
[ 213, 2596, 851, 1123, 1769, 1551, 2493, 0, 2699, 1038, 1605, 2300, 2099], # Boston
[2571, 403, 1858, 1584, 949, 1765, 678, 2699, 0, 1744, 1645, 653, 600], # San Francisco
[ 875, 1589, 262, 466, 796, 547, 1724, 1038, 1744, 0, 679, 1272, 1162], # St. Louis
[1420, 1374, 940, 1056, 879, 225, 1891, 1605, 1645, 679, 0, 1017, 1200], # Houston
[2145, 357, 1453, 1280, 586, 887, 1114, 2300, 653, 1272, 1017, 0, 504], # Phoenix
[1972, 579, 1260, 987, 371, 999, 701, 2099, 600, 1162, 1200, 504, 0]] # Salt Lake City
'''
# Locations
def create_data_array():
locations = [[288, 149], [288, 129], [270, 133], [256, 141], [256, 157], [246, 157], [236, 169],
[228, 169], [228, 161], [220, 169], [212, 169], [204, 169], [196, 169], [188, 169], [196, 161],
[188, 145], [172, 145], [164, 145], [156, 145], [148, 145], [140, 145], [148, 169], [164, 169],
[172, 169], [156, 169], [140, 169], [132, 169], [124, 169], [116, 161], [104, 153], [104, 161],
[104, 169], [90, 165], [80, 157], [64, 157], [64, 165], [56, 169], [56, 161], [56, 153], [56, 145],
[56, 137], [56, 129], [56, 121], [40, 121], [40, 129], [40, 137], [40, 145], [40, 153], [40, 161],
[40, 169], [32, 169], [32, 161], [32, 153], [32, 145], [32, 137], [32, 129], [32, 121], [32, 113],
[40, 113], [56, 113], [56, 105], [48, 99], [40, 99], [32, 97], [32, 89], [24, 89], [16, 97],
[16, 109], [8, 109], [8, 97], [8, 89], [8, 81], [8, 73], [8, 65], [8, 57], [16, 57], [8, 49],
[8, 41], [24, 45], [32, 41], [32, 49], [32, 57], [32, 65], [32, 73], [32, 81], [40, 83], [40, 73],
[40, 63], [40, 51], [44, 43], [44, 35], [44, 27], [32, 25], [24, 25], [16, 25], [16, 17], [24, 17],
[32, 17], [44, 11], [56, 9], [56, 17], [56, 25], [56, 33], [56, 41], [64, 41], [72, 41], [72, 49],
[56, 49], [48, 51], [56, 57], [56, 65], [48, 63], [48, 73], [56, 73], [56, 81], [48, 83], [56, 89],
[56, 97], [104, 97], [104, 105], [104, 113], [104, 121], [104, 129], [104, 137], [104, 145],
[116, 145], [124, 145], [132, 145], [132, 137], [140, 137], [148, 137], [156, 137], [164, 137],
[172, 125], [172, 117], [172, 109], [172, 101], [172, 93], [172, 85], [180, 85], [180, 77],
[180, 69], [180, 61], [180, 53], [172, 53], [172, 61], [172, 69], [172, 77], [164, 81], [148, 85],
[124, 85], [124, 93], [124, 109], [124, 125], [124, 117], [124, 101], [104, 89], [104, 81],
[104, 73], [104, 65], [104, 49], [104, 41], [104, 33], [104, 25], [104, 17], [92, 9], [80, 9],
[72, 9], [64, 21], [72, 25], [80, 25], [80, 25], [80, 41], [88, 49], [104, 57], [124, 69],
[124, 77], [132, 81], [140, 65], [132, 61], [124, 61], [124, 53], [124, 45], [124, 37], [124, 29],
[132, 21], [124, 21], [120, 9], [128, 9], [136, 9], [148, 9], [162, 9], [156, 25], [172, 21],
[180, 21], [180, 29], [172, 29], [172, 37], [172, 45], [180, 45], [180, 37], [188, 41], [196, 49],
[204, 57], [212, 65], [220, 73], [228, 69], [228, 77], [236, 77], [236, 69], [236, 61], [228, 61],
[228, 53], [236, 53], [236, 45], [228, 45], [228, 37], [236, 37], [236, 29], [228, 29], [228, 21],
[236, 21], [252, 21], [260, 29], [260, 37], [260, 45], [260, 53], [260, 61], [260, 69], [260, 77],
[276, 77], [276, 69], [276, 61], [276, 53], [284, 53], [284, 61], [284, 69], [284, 77], [284, 85],
[284, 93], [284, 101], [288, 109], [280, 109], [276, 101], [276, 93], [276, 85], [268, 97],
[260, 109], [252, 101], [260, 93], [260, 85], [236, 85], [228, 85], [228, 93], [236, 93],
[236, 101], [228, 101], [228, 109], [228, 117], [228, 125], [220, 125], [212, 117], [204, 109],
[196, 101], [188, 93], [180, 93], [180, 101], [180, 109], [180, 117], [180, 125], [196, 145],
[204, 145], [212, 145], [220, 145], [228, 145], [236, 145], [246, 141], [252, 125], [260, 129],
[280, 133]]
return locations
# Imports
import numpy as np
# from ortools.linear_solver import pywraplp
from ortools.constraint_solver import pywrapcp, routing_enums_pb2
# Euclidean distance between points.
def euclid_distance(x1, y1, x2, y2):
dist = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
return dist
# Manhattan distance between points.
def manhattan_distance(x1, y1, x2, y2):
dist = abs(x1 - x2) + abs(y1 - y2)
return dist
# Create the distance matrix (symmetric).
def create_distance_matrix(locations, distance_func):
size = len(locations)
dist_matrix = {}
for from_node in range(size):
dist_matrix[from_node] = {}
for to_node in range(size):
x1 = locations[from_node][0]
y1 = locations[from_node][1]
x2 = locations[to_node][0]
y2 = locations[to_node][1]
dist_matrix[from_node][to_node] = distance_func(x1, y1, x2, y2)
return dist_matrix
# Create a callback to calculate distances between cities.
def create_distance_callback(dist_matrix):
# For each pair of nodes
def distance_callback(from_node, to_node):
return int(dist_matrix[from_node][to_node])
return distance_callback
# Set search parameters
def set_search_parameters(guided_local_search=False, timeout=30000):
search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters() # Default
if guided_local_search:
search_parameters.local_search_metaheuristic = (routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH) # Guided local search to avoid a local minimum
search_parameters.time_limit_ms = timeout # Need to set a time limit for guided local search
return search_parameters
# Display the solution
def display_solution(assignment, routing):
# Solution cost.
print("\nMinimum total distance found: " + str(assignment.ObjectiveValue()) + " miles\n")
# Display the routes
for vehicle_number in range(routing.vehicles()):
print("\nBest route found for vehicle ", vehicle_number, ":\n")
node = routing.Start(vehicle_number) # Index of the variable for the starting node.
route = ''
while not routing.IsEnd(node):
# Convert variable indices to node indices in the displayed route.
route += str(locations[routing.IndexToNode(node)]) + ' -> '
node = assignment.Value(routing.NextVar(node))
route += str(locations[routing.IndexToNode(node)])
print(route)
# Wrapper over Google OR Tools
def google_or_wrapper(tsp_size, num_vehicles, depot, locations, dist_matrix):
if tsp_size > 0:
# Create the routing model
routing = pywrapcp.RoutingModel(tsp_size, num_vehicles, depot)
# Set the search parameters
search_parameters = set_search_parameters(True)
# Create the distance callback.
dist_callback = create_distance_callback(dist_matrix)
routing.SetArcCostEvaluatorOfAllVehicles(dist_callback)
# Solve
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
# Display the solution
display_solution(assignment, routing)
else:
print('No solution found.')
else:
print("Specify an instance greater than 0.")
# Main
if __name__ == "__main__":
# Create the data.
locations = create_data_array()
dist_matrix = create_distance_matrix(locations, euclid_distance)
# Initialize parameters
tsp_size = len(locations) # Number of cities.
num_vehicles = 1 # Number of routes (i.e. number of vehicles), which is 1 for a TSP
depot = 0 # Start and end node of the route
# Solve using Google OR Tools
google_or_wrapper(tsp_size, num_vehicles, depot, locations, dist_matrix)
| StarcoderdataPython |
1753538 | <gh_stars>1-10
from sympy.abc import x
from sympy.core.numbers import (I, Rational)
from sympy.core.singleton import S
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.polys import Poly, cyclotomic_poly
from sympy.polys.domains import FF, QQ
from sympy.polys.matrices import DomainMatrix, DM
from sympy.polys.matrices.exceptions import DMRankError
from sympy.polys.numberfields.utilities import (
AlgIntPowers, coeff_search, extract_fundamental_discriminant,
isolate, supplement_a_subspace,
)
from sympy.printing.lambdarepr import IntervalPrinter
from sympy.testing.pytest import raises
def test_AlgIntPowers_01():
T = Poly(cyclotomic_poly(5))
zeta_pow = AlgIntPowers(T)
raises(ValueError, lambda: zeta_pow[-1])
for e in range(10):
a = e % 5
if a < 4:
c = zeta_pow[e]
assert c[a] == 1 and all(c[i] == 0 for i in range(4) if i != a)
else:
assert zeta_pow[e] == [-1] * 4
def test_AlgIntPowers_02():
T = Poly(x**3 + 2*x**2 + 3*x + 4)
m = 7
theta_pow = AlgIntPowers(T, m)
for e in range(10):
computed = theta_pow[e]
coeffs = (Poly(x)**e % T + Poly(x**3)).rep.rep[1:]
expected = [c % m for c in reversed(coeffs)]
assert computed == expected
def test_coeff_search():
C = []
search = coeff_search(2, 1)
for i, c in enumerate(search):
C.append(c)
if i == 12:
break
assert C == [[1, 1], [1, 0], [1, -1], [0, 1], [2, 2], [2, 1], [2, 0], [2, -1], [2, -2], [1, 2], [1, -2], [0, 2], [3, 3]]
def test_extract_fundamental_discriminant():
# To extract, integer must be 0 or 1 mod 4.
raises(ValueError, lambda: extract_fundamental_discriminant(2))
raises(ValueError, lambda: extract_fundamental_discriminant(3))
# Try many cases, of different forms:
cases = (
(0, {}, {0: 1}),
(1, {}, {}),
(8, {2: 3}, {}),
(-8, {2: 3, -1: 1}, {}),
(12, {2: 2, 3: 1}, {}),
(36, {}, {2: 1, 3: 1}),
(45, {5: 1}, {3: 1}),
(48, {2: 2, 3: 1}, {2: 1}),
(1125, {5: 1}, {3: 1, 5: 1}),
)
for a, D_expected, F_expected in cases:
D, F = extract_fundamental_discriminant(a)
assert D == D_expected
assert F == F_expected
def test_supplement_a_subspace_1():
M = DM([[1, 7, 0], [2, 3, 4]], QQ).transpose()
# First supplement over QQ:
B = supplement_a_subspace(M)
assert B[:, :2] == M
assert B[:, 2] == DomainMatrix.eye(3, QQ).to_dense()[:, 0]
# Now supplement over FF(7):
M = M.convert_to(FF(7))
B = supplement_a_subspace(M)
assert B[:, :2] == M
# When we work mod 7, first col of M goes to [1, 0, 0],
# so the supplementary vector cannot equal this, as it did
# when we worked over QQ. Instead, we get the second std basis vector:
assert B[:, 2] == DomainMatrix.eye(3, FF(7)).to_dense()[:, 1]
def test_supplement_a_subspace_2():
M = DM([[1, 0, 0], [2, 0, 0]], QQ).transpose()
with raises(DMRankError):
supplement_a_subspace(M)
def test_IntervalPrinter():
ip = IntervalPrinter()
assert ip.doprint(x**Rational(1, 3)) == "x**(mpi('1/3'))"
assert ip.doprint(sqrt(x)) == "x**(mpi('1/2'))"
def test_isolate():
assert isolate(1) == (1, 1)
assert isolate(S.Half) == (S.Half, S.Half)
assert isolate(sqrt(2)) == (1, 2)
assert isolate(-sqrt(2)) == (-2, -1)
assert isolate(sqrt(2), eps=Rational(1, 100)) == (Rational(24, 17), Rational(17, 12))
assert isolate(-sqrt(2), eps=Rational(1, 100)) == (Rational(-17, 12), Rational(-24, 17))
raises(NotImplementedError, lambda: isolate(I))
| StarcoderdataPython |
37446 | from mongoengine import connect
from config import Config
from db.models.subscriptions import Subscriptions
class Db:
Subscriptions = None
def __init__(self, createClient=True):
config = Config()
self.db = {}
self.Subscriptions = Subscriptions
self.createClient = createClient
self.initConnection(config)
def initConnection(self, config):
connect(
db=config.data['database']['dbName'],
host=config.data['database']['host'],
port=config.data['database']['port'],
username=config.data['database']['username'],
password=config.data['database']['password'],
authentication_source=config.data['database']['dbName'],
connect=self.createClient)
| StarcoderdataPython |
3206049 | from typing import Optional, Tuple, List
import networkx as nx
from pydantic import Field
from .graph import CapGraph
from .molecule import Unit
class Cap(Unit):
graph: CapGraph = Field(default_factory=CapGraph)
compatible_rs: Optional[Tuple[int, ...]] = tuple()
compatible_smiles: Optional[Tuple[str, ...]] = tuple()
r: Optional[int] = None
def __post_init__(self):
if not self.name:
self.name = "Cap"
super().__post_init__()
if self.r is None:
r = self.graph_.nodes[self.graph._node].get("atom_map_number")
if r:
self.r = r
HYDROGEN_CAP = Cap.from_smiles("[R][H]", name="H")
ACE_CAP = Cap.from_smiles("CC(=O)-[R]", name="Ace",
compatible_smiles=["[R]N([H])C([*])C(=O)-[*]"])
NME_CAP = Cap.from_smiles("[R]NC", name="Nme",
compatible_smiles=["[*]N([H])C([*])C(=O)-[R]"])
| StarcoderdataPython |
3244874 | import requests
import json
import time
#from configs import *
def get_organ_id(stockId):
# 巨潮资讯网站
_req = requests.post(
'http://www.cninfo.com.cn/new/information/topSearch/query?keyWord=' + str(stockId) + '&maxNum=10')
if not _req.status_code == 200:
raise _req
# response.json()的作用就是将API页面的json转化为字典
data = _req.json()
# 返回orgid
return data[0]['orgId']
def search(keyword, page=1, limit=20):
org_id = get_organ_id(keyword)
target_url = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
params = {
'stock': str(keyword) + ',' + str(org_id),
'pageSize': limit,
'pageNum': page
}
_req = requests.post(target_url, data=params)
if _req.status_code != 200:
raise Exception('Post Error')
if _req.headers['Content-Type'] != 'application/json;charset=UTF-8':
raise Exception('API Failed')
data = _req.json()
prefix = 'http://static.cninfo.com.cn/%s'
announcements = data['announcements']
return [{
'time': time.strftime("%Y-%m-%d", time.localtime(n['announcementTime'] / 1000)),
'title': n['announcementTitle'],
'pdf_path': lambda: prefix % n['adjunctUrl']
} for n in announcements]
if __name__ == '__main__':
result = search('601668')
print(result)
| StarcoderdataPython |
3396248 | import numpy as np
import networkx as nx
import random
import sys
from tqdm import *
import os
import pickle
from recommenders.models.deeprec.deeprec_utils import cal_metric
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.tree import _tree
from sklearn import preprocessing
class FeatureBased(object):
def __init__(self, seed = 10):
self.seed = seed
random.seed(self.seed)
np.random.seed(self.seed)
self.coe = None
self.model = None
self.normalized_feature = True
def fit_file(self, trainfile):
train_features, train_labels = self.load_instances(trainfile)
print('#. train features is {0}'.format(len(train_features)))
if self.normalized_feature:
self.feat_norm = preprocessing.StandardScaler().fit(train_features)
self.fit(train_features=self.feat_norm.transform(train_features) if self.normalized_feature else train_features, train_labels=train_labels)
def eval_file(self, testfile):
features, labels = self.load_instances(testfile)
res = self.eval(self.feat_norm.transform(features) if self.normalized_feature else features, labels)
print(res)
return res
def infer_file(self, testfile, outfile ):
test_features, _ = self.load_instances(testfile)
print('infer scores of {0}...'.format(os.path.basename(testfile)))
with open(outfile, 'w') as wt:
scores = self.model.predict_proba(self.feat_norm.transform(test_features) if self.normalized_feature else test_features)
for v in scores:
wt.write('{0}\n'.format(v[1]))
def get_file_line_cnt(self, file):
cnt = 0
with open(file, 'r') as rd:
while True:
line = rd.readline()
if not line:
break
cnt += 1
if cnt % 10000 == 0:
print('\rloading file {0} line {1}'.format(os.path.basename(file), cnt), end=' ')
return cnt
def load_instances(self, file):
file_line_cnt = self.get_file_line_cnt(file)
print()
features, labels = [], []
with open(file, 'r') as rd:
# for _ in tqdm(range(file_line_cnt), desc='loading {0}'.format(os.path.basename(file)), position=0, leave=True):
for i in range(file_line_cnt):
if i % 10000 == 0:
print('\r processing line {0} / {1}'.format(i, file_line_cnt), end=' ')
line = rd.readline()
words = line[:-1].split(',')
labels.append(int(words[0]))
cur_instance = []
for word in words[1:]:
cur_instance.append(float(word))
features.append(np.asarray(cur_instance, dtype=np.float32))
return features, labels
def fit(self, train_features = [], train_labels = []):
print('Training Features Based algorithm...')
print('number of instances={0}'.format(len(train_features)))
# model = LogisticRegression(solver='sag', random_state=10, C=10, max_iter=50)
# model = DecisionTreeClassifier(max_depth=10) #5 0.568664
### A_feature_v4_max200_min1000_seq10_neg9_newfeature 0.5784
# model = MLPClassifier(solver='adam', learning_rate_init=0.001, alpha=1e-7, hidden_layer_sizes=(16,), random_state=1, max_iter=50) #alpha=1e-5 : 0.580038 1e-6:0.5822 1e-7: 0.5833
### A_feature_v4_max200_min1000_seq10_neg9_newfeature 0.580197
### 0.585648 A_feature_v4_max200_min1000_seq10_neg9
model = MLPClassifier(solver='adam', learning_rate_init=0.001, alpha=1e-7, hidden_layer_sizes=(32,16), random_state=1, max_iter=100)
model.fit(train_features, train_labels)
# self.coe = list(model.coef_[0])
# print('coe: {0}'.format(self.coe))
self.model = model
def eval(self, test_features = [], test_labels = []):
print('evalation starts...')
print('number of instances: {0}'.format(len(test_features)))
scores = self.model.predict_proba(test_features)
# print('eval : {0}'.format(scores[:10]))
preds_rs = [v[1] + random.random() * 1e-6 for v in scores]
# preds_rs = np.array(preds_rs, dtype=np.float64)
print('calculating metrics...')
res = cal_metric(labels = np.array(test_labels, dtype=np.int32), preds = preds_rs, metrics = ["auc"])
print('Feature Based algorithm done')
return res
def save_model(self, outfile):
with open(outfile, 'wb') as wt:
pickle.dump(self.model, wt)
def load_model(self, infile):
with open(infile, 'rb') as rd:
self.model = pickle.load(rd)
if __name__ == '__main__':
inpath = '/home/jialia/wsdm/seq_datasets/A_feature_v4_max200_min1000_seq10_neg9' ##A_feature_v4_max200_min1000_seq10_neg9 A_edge_seqmerge_neg9_test
train_file = os.path.join(inpath, 'my_train.csv')
valid_file = os.path.join(inpath, 'my_valid.csv')
test_file = os.path.join(inpath, 'valid.csv')
inter_test_file = os.path.join(inpath, 'inter_test.csv')
final_test_file = os.path.join(inpath, 'final_test.csv')
outpath = os.path.join(inpath, 'output')
if not os.path.exists(outpath):
os.mkdir(outpath)
model = FeatureBased()
model.fit_file(train_file)
res = model.eval_file(valid_file)
print('evaluation on my valid file is {0}'.format(res))
res = model.eval_file(test_file)
print('evaluation on valid file is {0}'.format(res))
if hasattr(model.model, 'tree_'):
feat_importance = model.model.tree_.compute_feature_importances(normalize=False)
print("feat importance = ")
for i, v in enumerate(feat_importance):
print('{0}\t{1}'.format(i, v))
model.infer_file(
inter_test_file,
os.path.join(outpath, 'inter_test_output.txt')
)
model.infer_file(
final_test_file,
os.path.join(outpath, 'final_test_output.txt')
)
# model02 = FeatureBased()
# model02.load_model(os.path.join(args['output_path'], 'feature-based-ranker{0}.pkl'.format(flag)))
# print('model02')
# model02.eval_file(os.path.join(args['data_path'], 'test.feature'), 100)
# export_code(model02.model, os.path.join(args['output_path'], 'feature-based-ranker{0}-exported.py'.format(flag)), head_indent_num=2)
print('Job: A_feature_v4_max200_min1000_seq10_neg9')
## A_edge_seqmerge_neg9_test: 0.579693
## A_feature_v4_max200_min1000_seq10_neg9_newfeature 0.580197
## A_feature_edge_seqmerge_neg9_vip5k_newfeature 0.582098 | StarcoderdataPython |
62214 | from __future__ import print_function
import os
import time
import torch
import torchvision.transforms as transforms
from Dataset import DeblurDataset
from torch.utils.data import DataLoader
from utils import *
from network import *
from Dataset import DeblurDataset, RealImage
def test(args):
device = torch.device("cuda" if torch.cuda.is_available() and args.gpu >= 0 else "cpu")
model_G = Generator(args, device)
model_D = Classifier(args, device)
if torch.cuda.device_count() > 1 and args.gpu >= 0:
print("Let's use", torch.cuda.device_count(), "GPUs!")
else:
print("Let's use CPUs!")
model_G = nn.DataParallel(model_G)
model_D = nn.DataParallel(model_D)
print('===> Loading models')
net_g_path = "checkpoint/netG"
net_d_path = "checkpoint/netD"
if not find_latest_model(net_g_path) or not find_latest_model(net_d_path):
print(" [!] Load failed...")
raise Exception('No model to load for testing!')
else:
print(" [*] Load SUCCESS")
model_path_G = find_latest_model(net_g_path)
checkpointG = torch.load(model_path_G, map_location=device)
model_G.load_state_dict(checkpointG['model_state_dict'])
model_path_D = find_latest_model(net_d_path)
checkpointD = torch.load(model_path_D, map_location=device)
model_D.load_state_dict(checkpointD['model_state_dict'])
netG = model_G.to(device)
netD = model_D.to(device)
netG.eval()
netD.eval()
print("====> Loading data")
############################
# For DeblurMicroscope dataset
###########################
f_test = open("./dataset/test_instance_names.txt", "r")
test_data = f_test.readlines()
test_data = [line.rstrip() for line in test_data]
f_test.close()
test_data_loader = DataLoader(DeblurDataset(test_data, args, False), batch_size=1, shuffle=False)
all_psnr = []
all_ssim = []
start_time = time.time()
netG_S2B = BlurModel(args, device)
with torch.no_grad():
for batch in test_data_loader:
real_B, real_S, img_name = batch[0], batch[1], batch[2]
real_B, real_S = real_B.to(device), real_S.to(device)
# B = (B, 1, 64, 64), S = (B, 1, 256, 256)
pred_S = netG(real_B)
pred_S0 = pred_S[0]
pred_S1 = pred_S[1]
pred_S = pred_S[-1]
real_B1 = F.interpolate(pred_S0, (args.fine_size * 2, args.fine_size * 2), mode="bilinear")
real_B2 = F.interpolate(pred_S1, (args.fine_size * 4, args.fine_size * 4), mode="bilinear")
recov_B = netG_S2B(real_S)
recov_B = recov_B[0]
pred_label = netD(pred_S)
cur_psnr, cur_ssim = compute_metrics(real_S, pred_S)
all_psnr.append(cur_psnr)
all_ssim.append(cur_ssim)
if img_name[0][-2:] == '01':
img_roi = pred_label.detach().squeeze(0).cpu()
img_roi = (img_roi * 2 - 1.)
save_img(img_roi, '{}/roi_'.format(args.valid_dir) + img_name[0])
img_rec = recov_B.detach().squeeze(0).cpu()
save_img(img_rec, '{}/rec_'.format(args.valid_dir) + img_name[0])
img_S = real_B.detach().squeeze(0).cpu()
save_img(img_S, '{}/input0_'.format(args.test_dir) + img_name[0])
img_S = real_B1.detach().squeeze(0).cpu()
save_img(img_S, '{}/input1_'.format(args.valid_dir) + img_name[0])
img_S = real_B2.detach().squeeze(0).cpu()
save_img(img_S, '{}/input2_'.format(args.valid_dir) + img_name[0])
img_S = pred_S0.detach().squeeze(0).cpu()
save_img(img_S, '{}/output0_'.format(args.valid_dir) + img_name[0])
img_S = pred_S1.detach().squeeze(0).cpu()
save_img(img_S, '{}/output1_'.format(args.valid_dir) + img_name[0])
img_S = pred_S.detach().squeeze(0).cpu()
save_img(img_S, '{}/test_'.format(args.test_dir) + img_name[0])
print('test_{}: PSNR = {} dB, SSIM = {}'
.format(img_name[0], cur_psnr, cur_ssim))
total_time = time.time() - start_time
ave_psnr = sum(all_psnr) / len(test_data_loader)
ave_ssim = sum(all_ssim) / len(test_data_loader)
ave_time = total_time / len(test_data_loader)
print("Average PSNR = {}, SSIM = {}, Processing time = {}".format(ave_psnr, ave_ssim, ave_time))
def test_real(args):
if torch.cuda.device_count() > 1 and args.gpu >= 0:
print("Let's use", torch.cuda.device_count(), "GPUs!")
else:
print("Let's use CPUs!")
device = torch.device("cuda" if torch.cuda.is_available() and args.gpu >= 0 else "cpu")
model_G = Generator(args, device)
model_G = nn.DataParallel(model_G)
print('===> Loading models')
net_g_path = "checkpoint/netG"
netG = model_G.to(device)
if not find_latest_model(net_g_path):
print(" [!] Load failed...")
raise Exception('No model to load!')
else:
print(" [*] Load SUCCESS")
model_path_G = find_latest_model(net_g_path)
checkpointG = torch.load(model_path_G, map_location=device)
netG.load_state_dict(checkpointG['model_state_dict'])
netG.eval()
print("====> Loading data")
############################
# For Real Images
###########################
if not os.path.exists(args.input_dir):
raise Exception("Input folder not exist!")
else:
image_dir = args.input_dir
image_filenames = [image_dir + x[0:-4] for x in os.listdir(image_dir) if x[-4:] in set([".png", ".jpg"])]
test_data_loader = DataLoader(RealImage(image_filenames, args, False), batch_size=1, shuffle=False)
start_time = time.time()
with torch.no_grad():
for batch in test_data_loader:
real_B, img_name = batch[0], batch[1]
real_B = real_B.to(device)
pred_S = netG(real_B)
pred_S = pred_S[-1]
img_S = pred_S.detach().squeeze(0).cpu()
save_img(img_S, '{}/result_'.format(args.output_dir) + img_name[0])
total_time = time.time() - start_time
ave_time = total_time / len(test_data_loader)
print("Processing time = {}".format(ave_time))
| StarcoderdataPython |
1692903 | # Generated by Django 2.2 on 2022-01-08 22:17
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0004_auto_20220108_2213'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='images_path',
),
migrations.AddField(
model_name='image',
name='image_path',
field=models.CharField(default=django.utils.timezone.now, max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='video',
name='video_path',
field=models.CharField(max_length=255),
),
]
| StarcoderdataPython |
1630628 | import glob
import os
import subprocess
from typing import Any
TEST_CONFIG_DIR = os.path.dirname(__file__)
TEST_CONFIG_PATH = TEST_CONFIG_DIR + "/fixtures/config/test-simulation-config.json"
def test_help_command() -> None:
command = ["python", "-m", "tokesim"]
command.extend(["--help"])
output = subprocess.check_output(command).decode("utf-8")
assert output.find("launches a simulation") > -1
assert output.find("creates an initial simu") > -1
def init_command(tmpdir: Any) -> str:
command = ["python", "-m", "tokesim"]
command.extend(["init", "--dir", str(tmpdir), "--agents", "10"])
output = subprocess.check_output(command).decode("utf-8")
return output
def run_command(tmpdir: Any) -> str:
command = ["python", "-m", "tokesim"]
command.extend(
["run", "--config", f"{str(tmpdir)}/simulation.json", "--port", "2000"]
)
output = subprocess.check_output(command).decode("utf-8")
return output
def test_init_command(tmpdir: Any) -> None:
output = init_command(tmpdir)
assert output.find("creating simulation") > -1
files = [filename for filename in glob.iglob(f"{str(tmpdir)}/**/*", recursive=True)]
expected_files = set(
[
"config_schema.json",
"simulation.json",
"contracts/SimpleToken_abi.json",
"contracts/SimpleToken.bin",
"simple_token_model.py",
"simple_token_agent.py",
]
)
expected_files = set([f"{str(tmpdir)}/{file}" for file in expected_files])
# assert that the files exist in the initialization directory
assert len(expected_files.intersection(files)) == len(expected_files)
def skip_test_run_command(tmpdir: Any) -> None:
command = ["python", "-m", "tokesim"]
command.extend(["run", "--config", str(tmpdir), "--port", "100"])
init_command(tmpdir)
run_command(tmpdir)
| StarcoderdataPython |
3297234 | <filename>tests/r/test_barley.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.barley import barley
def test_barley():
"""Test module barley.py by downloading
barley.csv and testing shape of
extracted data has 90 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = barley(test_path)
try:
assert x_train.shape == (90, 3)
except:
shutil.rmtree(test_path)
raise()
| StarcoderdataPython |
18912 | # 分析黑魔法防御课界面
import cv2
import sys
sys.path.append(r"C:\\Users\\SAT") # 添加自定义包的路径
from UniversalAutomaticAnswer.conf.confImp import get_yaml_file
from UniversalAutomaticAnswer.screen.screenImp import ScreenImp # 加入自定义包
from UniversalAutomaticAnswer.ocr.ocrImp import OCRImp
from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState
from paddleocr import PaddleOCR
# 获取配置文件
conf_path = 'conf/conf.yml'
conf_data = get_yaml_file(conf_path)
# 初始化ocr模型
ocr = OCRImp(conf_data)
# 初始化屏幕操作模块
screen = ScreenImp(conf_data)
# left click
import win32api
import win32con
def left_click(x,y,times=4):
win32api.SetCursorPos((x,y))
import time
while times:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
times -= 1
walk_coordinate = [[330,640],[1260,630],[740,550]] # 左 右 中
card_coordinate = [[522,820],[695,798],[838,821],[987,818],[1185,830]] # ~ 1 2 3 4
# charms_coordinate = [[200,770,300,855],[630,700,676,777],[765,690,818,778],[910,700,960,775],[1060,700,1108,786],[556, 878,637, 922]] # states: steps 1 2 3 4 HP
# copy_coordinate = [[540,400,650,500],[980,345,1090,445],[1160,320,1260,420]]
win_rect, img= screen.get_screenshot()
# img_path = './img/harry_charmsclass.png'
# img = cv2.imread(img_path)
# img_steps = img[770:855,200:300]
# img1 = img[700:800,600:700]
# img2 = img[690:778,765:818] # 点击 850 716
# img3 = img[700:775,910:960]
# img4 = img[700:786,1060:1108]
# img5 = img[878:932,556:637] # 蓝条
# walk_coordinate = [[850,716],[846,712],[854,720]]
# card_coordinate = [[522,820],[695,798],[838,821],[987,818],[1122,830]] # ~ 1 2 3 4
import matplotlib.pyplot as plt
# result = ocr.ocr(img, det=True, cls=True)
# print(result)
# plt.imshow(img)
# plt.show()
# """
def is_start(img, str_start):
img_start = screen.get_startMatchBtn(img)
result_start = ocr.ocr(img_start)
content_start = ocr.ocr_content(result_start)
content_start = filterLine(content_start)
if len(content_start)>0 and content_start[0] == str_start:
time.sleep(5)
x, y = 1300, 840
left_click(win_rect[0]+x,win_rect[1]+y,2)
return True
return False
count_steps = 0
epoch_num = 3
while True:
if epoch_num == 0:
break
import time
time.sleep(2)
win_rect, img= screen.get_screenshot()
# img_path = './img/harry_darkclass3.png' #
# img = cv2.imread(img_path)
# print(img.shape)
# img = img[875:920,1185:1300] # [1185, 875, 1300, 920] 点击继续
# img = img[830:880, 1234:1414] # [1234,830,1414,880] 匹配上课
# 识别匹配上课
flag1 = is_start(img, '匹配上课')
flag2 = is_start(img, '学院活动匹配')
if flag1 or flag2: # 识别到了就跳过,重新截图
epoch_num -= 1
continue
# 识别继续按钮
img_continue = img[875:920,1185:1300]
result_continue = ocr.ocr(img_continue)
content_continue = ocr.ocr_content(result_continue)
content_continue = filterLine(content_continue)
if len(content_continue)>0 and content_continue[0] == '点击继续':
x, y = 1200, 890
left_click(win_rect[0]+x,win_rect[1]+y,2)
time.sleep(1)
continue
img_steps, img_1, img_2, img_3, img_4, img_5 = '-1', '15', '15', '15', '15', '11'
img_steps = img[800:850, 200:265]
img_1 = img[710:777, 615:665] # 1
img_2 = img[710:777, 770:820] # 2
img_3 = img[710:777, 920:970] # 3
img_4 = img[720:787, 1060:1110] # 4
img_nextcard = img[768:816, 1205:1246,::-1] # 下一张卡
img_5 = img[878:932,556:637] # 蓝条
result_steps = ocr.ocr(img_steps)
result_1 = ocr.ocr(img_1)
result_2 = ocr.ocr(img_2)
result_3 = ocr.ocr(img_3)
result_4 = ocr.ocr(img_4)
result_nextcard = ocr.ocr(img_nextcard)
result_5 = ocr.ocr(img_5)
result_steps = ocr.ocr_content(result_steps)
result_steps = filterLine(result_steps)
result_1 = ocr.ocr_content(result_1)
result_1 = filterLine(result_1)
result_2 = ocr.ocr_content(result_2)
result_2 = filterLine(result_2)
result_3 = ocr.ocr_content(result_3)
result_3 = filterLine(result_3)
result_4 = ocr.ocr_content(result_4)
result_4 = filterLine(result_4)
result_5 = ocr.ocr_content(result_5)
result_5 = filterLine(result_5)
if (result_steps!=None) and len(result_steps) > 0 and result_steps[0].isdigit():
result_steps = int(result_steps[0][0][0])
else:
result_steps = 0
if (result_1!=None) and len(result_1) > 0 and result_1[0].isdigit():
result_1 = int(result_1[0][0][0])
else:
result_1 = 15
if (result_2!=None) and len(result_2) > 0 and result_2[0].isdigit():
result_2 = int(result_2[0][0][0])
else:
result_2 = 15
if (result_3!=None) and len(result_3) > 0 and result_3[0].isdigit():
result_3 = int(result_3[0][0][0])
else:
result_3 = 15
if (result_4!=None) and len(result_4) > 0 and result_4[0].isdigit():
result_4 = int(result_4[0][0][0])
else:
result_4 = 15
if (result_5!=None) and len(result_5) > 0 and result_5[0].isdigit():
result_5 = int(result_5[0][0][0])
else:
result_5 = -1
fee = [result_1,result_2,result_3,result_4]
idx = fee.index(min(fee))
import random
# idx = random.randint(0, 3)
# if fee[idx]>7:
# continue
walk_idx = random.randint(0, 2)
x_walk, y_walk = walk_coordinate[walk_idx][0], walk_coordinate[walk_idx][1]
x_0, y_0 = card_coordinate[0][0], card_coordinate[0][1] # 伙伴卡
x, y = card_coordinate[idx+1][0], card_coordinate[idx+1][1]
if result_5 == -1 or result_5 > 5:
if count_steps % 3 == 0:
left_click(win_rect[0]+x_walk,win_rect[1]+y_walk,4) # 走一步
left_click(win_rect[0]+x_0,win_rect[1]+y_0,4) # 点击伙伴卡
count_steps += 1
left_click(win_rect[0]+x,win_rect[1]+y,4) # 点击目标卡
print('所剩步数:',result_steps)
print('卡1费用:',result_1)
print('卡2费用:',result_2)
print('卡3费用:',result_3)
print('卡4费用:',result_4)
print('剩余费用:',result_5)
print('点击位置:', x, y)
# """
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# import matplotlib.pyplot as plt
# plt.imshow(img)
# plt.show()
# cv2.imwrite('./img/harry_charmsclass.png',img) | StarcoderdataPython |
108250 | <reponame>Jihunn-Kim/khu_capstone_1
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
import torch
import pycuda.autoinit
import dataset
import model
import time
# print(dir(trt))
tensorrt_file_name = 'bert.plan'
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(tensorrt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
context = engine.create_execution_context()
# class HostDeviceMem(object):
# def __init__(self, host_mem, device_mem):
# self.host = host_mem
# self.device = device_mem
# def __str__(self):
# return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
# def __repr__(self):
# return self.__str__()
# inputs, outputs, bindings, stream = [], [], [], []
# for binding in engine:
# size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
# dtype = trt.nptype(engine.get_binding_dtype(binding))
# host_mem = cuda.pagelocked_empty(size, dtype)
# device_mem = cuda.mem_alloc(host_mem.nbytes)
# bindings.append(int(device_mem))
# if engine.binding_is_input(binding):
# inputs.append( HostDeviceMem(host_mem, device_mem) )
# else:
# outputs.append(HostDeviceMem(host_mem, device_mem))
# input_ids = np.ones([1, 1, 29, 29])
# numpy_array_input = [input_ids]
# hosts = [input.host for input in inputs]
# trt_types = [trt.int32]
# for numpy_array, host, trt_types in zip(numpy_array_input, hosts, trt_types):
# numpy_array = np.asarray(numpy_array).ravel()
# np.copyto(host, numpy_array)
# def do_inference(context, bindings, inputs, outputs, stream):
# [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# stream.synchronize()
# return [out.host for out in outputs]
# trt_outputs = do_inference(
# context=context,
# bindings=bindings,
# inputs=inputs,
# outputs=outputs,
# stream=stream)
def infer(context, input_img, output_size, batch_size):
# Load engine
# engine = context.get_engine()
# assert(engine.get_nb_bindings() == 2)
# Convert input data to float32
input_img = input_img.astype(np.float32)
# Create host buffer to receive data
output = np.empty(output_size, dtype = np.float32)
# Allocate device memory
d_input = cuda.mem_alloc(batch_size * input_img.size * input_img.dtype.itemsize)
d_output = cuda.mem_alloc(batch_size * output.size * output.dtype.itemsize)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
# Transfer input data to device
cuda.memcpy_htod_async(d_input, input_img, stream)
# Execute model
context.execute_async(batch_size, bindings, stream.handle, None)
# Transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
# Synchronize threads
stream.synchronize()
# Return predictions
return output
# kwargs = {"./dataset/DoS_dataset.csv" : './DoS_dataset.txt'}
# train_data_set, data_idx_map, net_class_count, net_data_count, test_data_set = dataset.GetCanDatasetUsingTxtKwarg(100, 0, **kwargs)
# testloader = torch.utils.data.DataLoader(test_data_set, batch_size=256,
# shuffle=False, num_workers=2)
check_time = time.time()
cnt = 0
temp = np.ones([256, 1, 29, 29])
for idx in range(100):
# for i, (inputs, labels) in enumerate(testloader):
trt_outputs = infer(context, temp, (256, 2), 256)
print(trt_outputs.shape)
# print(trt_outputs)
# print(np.argmax(trt_outputs, axis=0))
# cnt += 1
# if cnt == 100:
# break
print(time.time() - check_time)
tensorrt_file_name = 'bert_int.plan'
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(tensorrt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
context = engine.create_execution_context()
check_time = time.time()
cnt = 0
temp = np.ones([256, 1, 29, 29])
for idx in range(100):
# for i, (inputs, labels) in enumerate(testloader):
trt_outputs = infer(context, temp, (256, 2), 256)
print(trt_outputs.shape)
# print(trt_outputs)
# print(np.argmax(trt_outputs, axis=0))
# cnt += 1
# if cnt == 100:
# break
print(time.time() - check_time)
test_model = model.Net().cuda()
check_time = time.time()
cnt = 0
temp = torch.randn(256, 1, 29, 29).cuda()
for idx in range(100):
# for i, (inputs, labels) in enumerate(testloader):
# inputs = inputs.float().cuda()
normal_outputs = test_model(temp)
# print(normal_outputs)
print(normal_outputs.shape)
cnt += 1
if cnt == 100:
break
print(time.time() - check_time)
import tensorrt as trt
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import time
model_path = "bert.onnx"
input_size = 32
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# def build_engine(model_path):
# with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
# builder.max_workspace_size = 1<<20
# builder.max_batch_size = 1
# with open(model_path, "rb") as f:
# parser.parse(f.read())
# engine = builder.build_cuda_engine(network)
# return engine
def alloc_buf(engine):
# host cpu mem
h_in_size = trt.volume(engine.get_binding_shape(0))
h_out_size = trt.volume(engine.get_binding_shape(1))
h_in_dtype = trt.nptype(engine.get_binding_dtype(0))
h_out_dtype = trt.nptype(engine.get_binding_dtype(1))
in_cpu = cuda.pagelocked_empty(h_in_size, h_in_dtype)
out_cpu = cuda.pagelocked_empty(h_out_size, h_out_dtype)
# allocate gpu mem
in_gpu = cuda.mem_alloc(in_cpu.nbytes)
out_gpu = cuda.mem_alloc(out_cpu.nbytes)
stream = cuda.Stream()
return in_cpu, out_cpu, in_gpu, out_gpu, stream
def inference(engine, context, inputs, out_cpu, in_gpu, out_gpu, stream):
# async version
# with engine.create_execution_context() as context: # cost time to initialize
# cuda.memcpy_htod_async(in_gpu, inputs, stream)
# context.execute_async(1, [int(in_gpu), int(out_gpu)], stream.handle, None)
# cuda.memcpy_dtoh_async(out_cpu, out_gpu, stream)
# stream.synchronize()
# sync version
cuda.memcpy_htod(in_gpu, inputs)
context.execute(1, [int(in_gpu), int(out_gpu)])
cuda.memcpy_dtoh(out_cpu, out_gpu)
return out_cpu
if __name__ == "__main__":
inputs = np.random.random((1, 1, 29, 29)).astype(np.float32)
tensorrt_file_name = '/content/drive/My Drive/capstone1/CAN/bert.plan'
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(tensorrt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
# engine = build_engine(model_path)
context = engine.create_execution_context()
for _ in range(10):
t1 = time.time()
in_cpu, out_cpu, in_gpu, out_gpu, stream = alloc_buf(engine)
res = inference(engine, context, inputs.reshape(-1), out_cpu, in_gpu, out_gpu, stream)
print(res)
print("cost time: ", time.time()-t1) | StarcoderdataPython |
3368407 | <filename>day3/main.py
import math
def get_increased_modulo_row(start, inc, limit):
row = start
while True:
yield row
row = (row + inc) % limit
def get_hit_trees_for_slope(field, row_inc, col_inc):
trees_hit_count = 0
col_generator = get_increased_modulo_row(0, col_inc, 31)
for row in range(0, len(field), row_inc):
col = col_generator.__next__()
square = field[row][col]
if square == "#":
trees_hit_count += 1
print(f"[Slope: {row_inc} down, {col_inc} right] Trees hit: {trees_hit_count}")
return trees_hit_count
def part1(field):
print(f"[PART 1] Trees hit: {get_hit_trees_for_slope(field, 1, 3)}")
def part2(field):
slope_hit_count = [
get_hit_trees_for_slope(field, 1, 1),
get_hit_trees_for_slope(field, 1, 3),
get_hit_trees_for_slope(field, 1, 5),
get_hit_trees_for_slope(field, 1, 7),
get_hit_trees_for_slope(field, 2, 1),
]
print(f"[PART 2] Trees hit: {math.prod(slope_hit_count)}")
if __name__ == "__main__":
with open("input.txt", "r") as input_file:
field = [[square for square in row] for row in input_file.readlines()]
part1(field)
part2(field)
| StarcoderdataPython |
1613259 | <reponame>benchsci/pazel<gh_stars>0
"""Entrypoint for generating Bazel BUILD files for a Python project."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import json
from pazel.generate_rule import parse_script_and_generate_rule
from pazel.helpers import get_build_file_path
from pazel.helpers import is_ignored
from pazel.helpers import is_python_file
from pazel.helpers import extract_dependencies
from pazel.output_build import output_build_file
from pazel.parse_build import get_ignored_rules
from pazel.pazel_extensions import parse_pazel_extensions
def app(input_path, project_root, contains_pre_installed_packages, pazelrc_path, requirements_file):
"""Generate BUILD file(s) for a Python script or a directory of Python scripts.
Args:
input_path (str): Path to a Python file or to a directory containing Python file(s) for
which BUILD files are generated.
project_root (str): Imports in the Python files are relative to this path.
contains_pre_installed_packages (bool): Whether the environment is allowed to contain
pre-installed packages or whether only the Python standard library is available.
pazelrc_path (str): Path to .pazelrc config file for customizing pazel.
Raises:
RuntimeError: input_path does is not a directory or a Python file.
"""
# Parse user-defined extensions to pazel.
output_extension, custom_bazel_rules, custom_import_inference_rules, import_name_to_pip_name, \
local_import_name_to_dep, requirement_load = parse_pazel_extensions(pazelrc_path)
pipenv_packages = extract_dependencies(requirements_file)
# Handle directories.
if os.path.isdir(input_path):
# Traverse the directory recursively.
for dirpath, _, filenames in os.walk(input_path):
build_source = ''
# Parse ignored rules in an existing BUILD file, if any.
build_file_path = get_build_file_path(dirpath)
if os.path.exists(build_file_path):
continue
ignored_rules = get_ignored_rules(build_file_path)
for filename in sorted(filenames):
path = os.path.join(dirpath, filename)
# If a Python file is met and it is not in the list of ignored rules,
# generate a Bazel rule for it.
if is_python_file(path) and not is_ignored(path, ignored_rules):
new_rule = parse_script_and_generate_rule(path, project_root,
contains_pre_installed_packages,
custom_bazel_rules,
custom_import_inference_rules,
import_name_to_pip_name,
local_import_name_to_dep, pipenv_packages)
print(f"Generating build file for {path}")
# Add the new rule and a newline between it and any previous rules.
if new_rule:
if build_source:
build_source += 2*'\n'
for rule in new_rule:
build_source += rule + 2*'\n'
# If Python files were found, output the BUILD file.
if build_source != '' or ignored_rules:
output_build_file(build_source, ignored_rules, output_extension, custom_bazel_rules,
build_file_path, requirement_load)
# Handle single Python file.
elif is_python_file(input_path):
build_source = ''
# Parse ignored rules in an existing BUILD file, if any.
build_file_path = get_build_file_path(input_path)
ignored_rules = get_ignored_rules(build_file_path)
# Check that the script is not in the list of ignored rules.
if not is_ignored(input_path, ignored_rules):
build_source = parse_script_and_generate_rule(input_path, project_root,
contains_pre_installed_packages,
custom_bazel_rules,
custom_import_inference_rules,
import_name_to_pip_name,
local_import_name_to_dep, pipenv_packages)
# If Python files were found, output the BUILD file.
if build_source != '' or ignored_rules:
output_build_file(build_source, ignored_rules, output_extension, custom_bazel_rules,
build_file_path, requirement_load)
else:
raise RuntimeError("Invalid input path %s." % input_path)
def main():
"""Parse command-line flags and generate the BUILD files accordingly."""
parser = argparse.ArgumentParser(description='Generate Bazel BUILD files for a Python project.')
working_directory = os.getcwd()
default_pazelrc_path = os.path.join(working_directory, '.pazelrc')
requirements_file = os.path.join(working_directory, 'requirements.txt')
parser.add_argument('input_path', nargs='?', type=str, default=working_directory,
help='Target Python file or directory of Python files.'
' Defaults to the current working directory.')
parser.add_argument('-r', '--project-root', type=str, default=working_directory,
help='Project root directory. Imports are relative to this path.'
' Defaults to the current working directory.')
parser.add_argument('-p', '--pre-installed-packages', action='store_true',
help='Target will be run in an environment with packages pre-installed.'
' Affects which packages are listed as pip-installable.')
parser.add_argument('-c', '--pazelrc', type=str, default=default_pazelrc_path,
help='Path to .pazelrc file.')
parser.add_argument('-f', '--requirements', type=str, default=requirements_file,
help='Path to requirements.txt file.')
args = parser.parse_args()
if not os.path.exists(args.requirements):
raise Exception("create requirements.txt using pip-compile tool")
# If the user specified custom .pazelrc file, then check that it exists.
custom_pazelrc_path = args.pazelrc != default_pazelrc_path
if custom_pazelrc_path:
assert os.path.isfile(args.pazelrc), ".pazelrc file %s not found." % args.pazelrc
app(args.input_path, args.project_root, args.pre_installed_packages, args.pazelrc, args.requirements)
print('Generated BUILD files for %s.' % args.input_path)
if __name__ == "__main__":
main()
| StarcoderdataPython |
10696 | import logging
import time
from qupy.framing.slip import Slip
from qupy.interface.serial import SerialPort
from qupy.interface.errors import InterfaceTimeoutError, InterfaceIOError, InterfaceError
from qupy.comm.client import CommClient
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
s = SerialPort()
f = Slip()
c = CommClient(s, f)
connect = True
while True:
if connect:
try:
s.open()
except InterfaceIOError as e:
time.sleep(1.0)
continue
c.start()
connect = False
try:
print('ask...')
data = input()
d = c.ask(data.encode('utf-8'))
print('data:',d)
if len(d) > 0 and d[0] == ord('p'):
break
except InterfaceIOError as e:
print('ask io error', str(e))
c.stop()
s.close()
connect = True
except InterfaceTimeoutError as e:
print('timeout')
c.stop()
s.close()
| StarcoderdataPython |
1656363 | <filename>skfore/preprocessing/Transformer.py
"""
Transformer: time series' transformation module
===============================================================================
Overview
-------------------------------------------------------------------------------
This module contains time series' transformation methods (log, log10, sqrt,
cbrt, boxcox), trending (linear, cuadratic, cubic, diff1, diff2) and seasonal
(poly2, diff) removal methods.
Examples
-------------------------------------------------------------------------------
Transform
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trans = 'log')
>>> mytransform
Transformer(trans = log, trend = None, seasonal = None)
>>> transformed_ts = mytransform.fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 7.942718
1964-02-01 7.890583
1964-03-01 7.921173
1964-04-01 7.908755
1964-05-01 7.988204
1964-06-01 8.018296
1964-07-01 7.732808
1964-08-01 7.701652
1964-09-01 7.980024
1964-10-01 8.366603
1964-11-01 8.659387
1964-12-01 8.897272
Name: Perrin, dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(trans = 'log10').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 3.449478
1964-02-01 3.426836
1964-03-01 3.440122
1964-04-01 3.434729
1964-05-01 3.469233
1964-06-01 3.482302
1964-07-01 3.358316
1964-08-01 3.344785
1964-09-01 3.465680
1964-10-01 3.633569
1964-11-01 3.760724
1964-12-01 3.864036
Name: Perrin, dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(trans = 'sqrt').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 53.056574
1964-02-01 51.691392
1964-03-01 52.488094
1964-04-01 52.163205
1964-05-01 54.277067
1964-06-01 55.099909
1964-07-01 47.770284
1964-08-01 47.031904
1964-09-01 54.055527
1964-10-01 65.582010
1964-11-01 75.921012
1964-12-01 85.510233
Name: Perrin, dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(trans = 'cbrt').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 14.119722
1964-02-01 13.876464
1964-03-01 14.018683
1964-04-01 13.960775
1964-05-01 14.335436
1964-06-01 14.479956
1964-07-01 13.165536
1964-08-01 13.029519
1964-09-01 14.296402
1964-10-01 16.262594
1964-11-01 17.929767
1964-12-01 19.409398
Name: Perrin, dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(trans = 'boxcox').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 0.504795
1964-02-01 0.504795
1964-03-01 0.504795
1964-04-01 0.504795
1964-05-01 0.504795
1964-06-01 0.504795
1964-07-01 0.504795
1964-08-01 0.504795
1964-09-01 0.504795
1964-10-01 0.504795
1964-11-01 0.504795
1964-12-01 0.504795
Name: Perrin, dtype: float64
Removing trend
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(trend = 'linear').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 993.871795
1964-02-01 549.592075
1964-03-01 331.312354
1964-04-01 -3.967366
1964-05-01 -80.247086
1964-06-01 -291.526807
1964-07-01 -1346.806527
1964-08-01 -1718.086247
1964-09-01 -1309.365967
1964-10-01 -231.645688
1964-11-01 930.074592
1964-12-01 2176.794872
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(trend = 'cuadratic').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 -578.005495
1964-02-01 -164.897602
1964-03-01 302.732767
1964-04-01 481.885614
1964-05-01 748.560939
1964-06-01 708.758741
1964-07-01 -346.520979
1964-08-01 -889.278222
1964-09-01 -823.512987
1964-10-01 -260.225275
1964-11-01 215.584915
1964-12-01 604.917582
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(trend = 'cubic').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 196.725275
1964-02-01 -235.327672
1964-03-01 -190.277722
1964-04-01 -105.031635
1964-05-01 302.503830
1964-06-01 544.421911
1964-07-01 -182.184149
1964-08-01 -443.221112
1964-09-01 -236.595738
1964-10-01 232.785215
1964-11-01 286.014985
1964-12-01 -169.813187
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(trend = 'diff1').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 0
1964-02-01 -143
1964-03-01 83
1964-04-01 -34
1964-05-01 225
1964-06-01 90
1964-07-01 -754
1964-08-01 -70
1964-09-01 710
1964-10-01 1379
1964-11-01 1463
1964-12-01 1548
dtype: int64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(trend = 'diff2').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 0
1964-02-01 0
1964-03-01 -60
1964-04-01 49
1964-05-01 191
1964-06-01 315
1964-07-01 -664
1964-08-01 -824
1964-09-01 640
1964-10-01 2089
1964-11-01 2842
1964-12-01 3011
dtype: int64
Removing seasonality
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(seasonal = 'poly2').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 -578.005495
1964-02-01 -164.897602
1964-03-01 302.732767
1964-04-01 481.885614
1964-05-01 748.560939
1964-06-01 708.758741
1964-07-01 -346.520979
1964-08-01 -889.278222
1964-09-01 -823.512987
1964-10-01 -260.225275
1964-11-01 215.584915
1964-12-01 604.917582
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> transformed_ts = Transformer(seasonal = 'diff').fit(ts = ts)
>>> transformed_ts
Month
1964-01-01 0
1964-02-01 0
1964-03-01 0
1964-04-01 0
1964-05-01 0
1964-06-01 0
1964-07-01 0
1964-08-01 0
1964-09-01 0
1964-10-01 0
1964-11-01 0
1964-12-01 0
dtype: int64
Restore seasonality
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(seasonal = 'poly2')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.0
1964-02-01 2672.0
1964-03-01 2755.0
1964-04-01 2721.0
1964-05-01 2946.0
1964-06-01 3036.0
1964-07-01 2282.0
1964-08-01 2212.0
1964-09-01 2922.0
1964-10-01 4301.0
1964-11-01 5764.0
1964-12-01 7312.0
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(seasonal = 'diff')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815
1964-02-01 2672
1964-03-01 2755
1964-04-01 2721
1964-05-01 2946
1964-06-01 3036
1964-07-01 2282
1964-08-01 2212
1964-09-01 2922
1964-10-01 4301
1964-11-01 5764
1964-12-01 7312
dtype: int64
inverse_transform trending
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trend = 'linear')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.0
1964-02-01 2672.0
1964-03-01 2755.0
1964-04-01 2721.0
1964-05-01 2946.0
1964-06-01 3036.0
1964-07-01 2282.0
1964-08-01 2212.0
1964-09-01 2922.0
1964-10-01 4301.0
1964-11-01 5764.0
1964-12-01 7312.0
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trend = 'cuadratic')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.0
1964-02-01 2672.0
1964-03-01 2755.0
1964-04-01 2721.0
1964-05-01 2946.0
1964-06-01 3036.0
1964-07-01 2282.0
1964-08-01 2212.0
1964-09-01 2922.0
1964-10-01 4301.0
1964-11-01 5764.0
1964-12-01 7312.0
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trend = 'cuadratic')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.0
1964-02-01 2672.0
1964-03-01 2755.0
1964-04-01 2721.0
1964-05-01 2946.0
1964-06-01 3036.0
1964-07-01 2282.0
1964-08-01 2212.0
1964-09-01 2922.0
1964-10-01 4301.0
1964-11-01 5764.0
1964-12-01 7312.0
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trend = 'cubic')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.0
1964-02-01 2672.0
1964-03-01 2755.0
1964-04-01 2721.0
1964-05-01 2946.0
1964-06-01 3036.0
1964-07-01 2282.0
1964-08-01 2212.0
1964-09-01 2922.0
1964-10-01 4301.0
1964-11-01 5764.0
1964-12-01 7312.0
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trend = 'diff1')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815
1964-02-01 2672
1964-03-01 2755
1964-04-01 2721
1964-05-01 2946
1964-06-01 3036
1964-07-01 2282
1964-08-01 2212
1964-09-01 2922
1964-10-01 4301
1964-11-01 5764
1964-12-01 7312
dtype: int64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trend = 'diff2')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815
1964-02-01 2672
1964-03-01 2755
1964-04-01 2721
1964-05-01 2946
1964-06-01 3036
1964-07-01 2282
1964-08-01 2212
1964-09-01 2922
1964-10-01 4301
1964-11-01 5764
1964-12-01 7312
dtype: int64
inverse_transform transformation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trans = 'log')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.0
1964-02-01 2672.0
1964-03-01 2755.0
1964-04-01 2721.0
1964-05-01 2946.0
1964-06-01 3036.0
1964-07-01 2282.0
1964-08-01 2212.0
1964-09-01 2922.0
1964-10-01 4301.0
1964-11-01 5764.0
1964-12-01 7312.0
Name: Perrin, dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trans = 'log10')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.0
1964-02-01 2672.0
1964-03-01 2755.0
1964-04-01 2721.0
1964-05-01 2946.0
1964-06-01 3036.0
1964-07-01 2282.0
1964-08-01 2212.0
1964-09-01 2922.0
1964-10-01 4301.0
1964-11-01 5764.0
1964-12-01 7312.0
Name: Perrin, dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trans = 'sqrt')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.0
1964-02-01 2672.0
1964-03-01 2755.0
1964-04-01 2721.0
1964-05-01 2946.0
1964-06-01 3036.0
1964-07-01 2282.0
1964-08-01 2212.0
1964-09-01 2922.0
1964-10-01 4301.0
1964-11-01 5764.0
1964-12-01 7312.0
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trans = 'cbrt')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.0
1964-02-01 2672.0
1964-03-01 2755.0
1964-04-01 2721.0
1964-05-01 2946.0
1964-06-01 3036.0
1964-07-01 2282.0
1964-08-01 2212.0
1964-09-01 2922.0
1964-10-01 4301.0
1964-11-01 5764.0
1964-12-01 7312.0
dtype: float64
>>> ts = pandas.Series.from_csv('datasets/champagne_short.csv', index_col = 0, header = 0)
>>> mytransform = Transformer(trans = 'boxcox')
>>> transformed = mytransform.fit(ts)
>>> original = mytransform.inverse_transform(transformed)
>>> original
Month
1964-01-01 2815.000000
1964-02-01 2671.999999
1964-03-01 2754.999999
1964-04-01 2721.000001
1964-05-01 2946.000000
1964-06-01 3036.000001
1964-07-01 2282.000000
1964-08-01 2212.000001
1964-09-01 2921.999999
1964-10-01 4300.999998
1964-11-01 5763.999989
1964-12-01 7311.999999
Name: Perrin, dtype: float64
"""
from skfore.extras import *
import pandas
import numpy
import scipy
import sklearn
from sklearn import linear_model
class Transformer():
""" Class to transform the series
Args:
trans (log, log10, sqrt, cbrt, boxcox): Transformation to apply
trend (linear, cuadratic, cubic, diff1, diff2): Trend to apply
seasonal (poly2, diff): Seasonality to apply
"""
def __init__(self, trans = None, trend = None, seasonal = None):
self.trans = trans
self.trend = trend
self.seasonal = seasonal
self.ts = None
""" Frequency integer of the transformed time series """
self.intfrq = None
""" Time series after transformation and inverse_transform """
self.residuals = None
self.original = None
""" Transformation values to inverse_transform series """
self.fitting = None
self.diff = None
self.model = None
""" Box Cox transformation lambda (if necessary) """
self.lmbda = None
def __repr__(self):
return 'Transformer(trans = ' + str(self.trans) + ', trend = ' + str(self.trend) + ', seasonal = ' + str(self.seasonal) +')'
def fit(self, ts):
""" Return the transformed series
Args:
ts: Time series to apply transformation
"""
self.ts = ts
# Get frequency integer
self.intfrq = get_frequency(ts)
# Transform
if (self.trans == 'log'):
ts_trans = numpy.log(ts)
elif (self.trans == 'log10'):
ts_trans = numpy.log10(ts)
elif (self.trans == 'sqrt'):
ts_trans = numpy.sqrt(ts)
elif (self.trans == 'cbrt'):
ts_trans = numpy.cbrt(ts)
elif (self.trans == 'boxcox'):
bc, lmb = scipy.stats.boxcox(ts)
self.lmbda = lmb
ts_trans = pandas.Series((v for v in bc), index = ts.index, name = ts.name)
elif (self.trans == None):
ts_trans = ts
else:
message_trans = 'Invalid transformation value: ' + self.trans
raise ValueError(message_trans)
# Removing trend
if (self.trend == 'linear'):
X = ts_trans.index.factorize()[0].reshape(-1,1)
y = ts_trans
model = sklearn.linear_model.LinearRegression()
fitting = model.fit(X, y)
self.fitting = fitting
trend = pandas.Series(fitting.predict(X), index = y.index)
ts_trend = y.subtract(trend)
elif (self.trend == 'cuadratic'):
X = ts_trans.index.factorize()[0].reshape(-1,1)
y = ts_trans
model = sklearn.preprocessing.PolynomialFeatures(degree=2)
self.model = model
X_ = model.fit(X)
model = linear_model.LinearRegression()
fitting = model.fit(X_, y)
self.fitting = fitting
trend = fitting.predict(X_)
ts_trend = y.subtract(trend)
elif (self.trend == 'cubic'):
X = ts_trans.index.factorize()[0].reshape(-1,1)
y = ts_trans
model = sklearn.preprocessing.PolynomialFeatures(degree=3)
self.model = model
X_ = model.fit(X)
model = linear_model.LinearRegression()
fitting = model.fit(X_, y)
self.fitting = fitting
trend = fitting.predict(X_)
ts_trend = y.subtract(trend)
elif (self.trend == 'diff1'):
y = ts_trans
diff = list()
diff.append(0)
self.diff = list()
self.diff.append(y[0])
for i in range(1, len(y)):
value = y[i] - y[i-1]
diff.append(value)
trend = diff
detrended = pandas.Series((v for v in trend), index = ts_trans.index)
ts_trend = detrended
elif (self.trend == 'diff2'):
y = ts_trans
diff = list()
diff.append(0)
diff.append(0)
self.diff = list()
self.diff.append(y[0])
self.diff.append(y[1])
for i in range(2, len(y)):
value = y[i] - y[i - 2]
diff.append(value)
trend = diff
detrended = pandas.Series((v for v in trend), index = ts_trans.index)
ts_trend = detrended
elif (self.trend == None):
ts_trend = ts_trans
trend = [0 for i in range(0, len(ts_trans))]
else:
message_trend = 'Invalid trending value: ' + self.trend
raise ValueError(message_trend)
# Removing seasonality
if (self.seasonal == 'poly2'):
X = ts_trend.index.factorize()[0].reshape(-1,1)
X = X%self.intfrq
y = ts_trend
model = sklearn.preprocessing.PolynomialFeatures(degree=2)
self.model = model
X_ = model.fit(X)
model = linear_model.LinearRegression()
fitting = model.fit(X_, y)
seasonality = fitting.predict(X_)
deseasonal = pandas.Series((v for v in seasonality), index = ts_trend.index)
ts_seasonal = y.subtract(deseasonal)
elif (self.seasonal == 'diff'):
y = ts_trend
diff = list()
self.diff = list()
for j in range(self.intfrq):
diff.append(0)
self.diff.append(y[j])
for i in range(self.intfrq, len(y)):
value = y[i] - y[i - self.intfrq]
diff.append(value)
seasonality = diff
deseasonal = pandas.Series((v for v in seasonality), index = ts_trend.index)
ts_seasonal = deseasonal
elif (self.seasonal == None):
ts_seasonal = ts_trend
seasonality = [0 for i in range(0, len(ts_trend))]
else:
message_seasonal = 'Invalid seasonal value: ' + self.seasonal
raise ValueError(message_seasonal)
if (self.seasonal == 'poly2' or self.trans == 'linear' or self.trend == 'cuadratic' or self.trend == 'cubic'):
self.fitting = fitting
self.residuals = ts_seasonal
return self.residuals
def inverse_transform(self, ts):
""" inverse_transform series to its original values
Args:
ts: Time series to inverse_transform
"""
# inverse_transform seasonality
if (self.seasonal == 'poly2'):
X = ts.index.factorize()[0].reshape(-1,1)
X = X%self.intfrq
X_ = self.model.fit(X)
seasonality = self.fitting.predict(X_)
ts_deseasonal = [ts[i] + seasonality[i] for i in range(len(ts))]
ts_deseasonal = pandas.Series((v for v in ts_deseasonal), index = ts.index)
elif (self.seasonal == 'diff'):
len_forecast = len(ts)
#ts = self.ts.append(ts)
ts_deseasonal = list()
for j in range(0, self.intfrq):
ts_deseasonal.append(self.diff[j])
for i in range(self.intfrq, len(ts)):
value = ts[i] + ts_deseasonal[i-self.intfrq]
ts_deseasonal.append(value)
ts_deseasonal = pandas.Series((v for v in ts_deseasonal), index = ts.index)
ts_deseasonal = ts_deseasonal[-len_forecast:]
else:
ts_deseasonal = ts
# inverse_transform trending
if (self.trend == 'linear'):
X = ts.index.factorize()[0].reshape(-1,1)
trending = self.fitting.predict(X)
ts_detrend = [ts_deseasonal[i] + trending[i] for i in range(len(ts_deseasonal))]
ts_detrend = pandas.Series((v for v in ts_detrend), index = ts_deseasonal.index)
elif (self.trend == 'cuadratic' or self.trend == 'cubic'):
X = ts.index.factorize()[0].reshape(-1,1)
X_ = self.model.fit(X)
trending = self.fitting.predict(X_)
ts_detrend = [ts_deseasonal[i] + trending[i] for i in range(len(ts_deseasonal))]
ts_detrend = pandas.Series((v for v in ts_detrend), index = ts_deseasonal.index)
elif (self.trend == 'diff1'):
ts_detrend = list()
ts_detrend.append(self.diff[0])
for i in range(1,len(ts_deseasonal)):
value = ts_deseasonal[i] + ts_detrend[i-1]
ts_detrend.append(value)
ts_detrend = pandas.Series((v for v in ts_detrend), index = ts_deseasonal.index)
elif (self.trend == 'diff2'):
ts_detrend = list()
ts_detrend.append(self.diff[0])
ts_detrend.append(self.diff[1])
for i in range(2,len(ts_deseasonal)):
value = ts_deseasonal[i] + ts_detrend[i-2]
ts_detrend.append(value)
ts_detrend = pandas.Series((v for v in ts_detrend), index = ts_deseasonal.index)
else:
ts_detrend = ts_deseasonal
# inverse_transform transformation
if (self.trans == 'log'):
ts_detrans = numpy.exp(ts_detrend)
elif (self.trans == 'log10'):
ts_detrans = scipy.special.exp10(ts_detrend)
elif (self.trans == 'sqrt'):
ts_detrans = [ts_detrend[i]**2 for i in range(len(ts_detrend))]
ts_detrans = pandas.Series((v for v in ts_detrans), index = ts_detrend.index)
elif (self.trans == 'cbrt'):
ts_detrans = [ts_detrend[i]**3 for i in range(len(ts_detrend))]
ts_detrans = pandas.Series((v for v in ts_detrans), index = ts_detrend.index)
elif (self.trans == 'boxcox'):
ts_detrans = scipy.special.inv_boxcox(ts_detrend, self.lmbda)
else:
ts_detrans = ts_detrend
self.original = ts_detrans
return self.original
if __name__ == "__main__":
import doctest
doctest.testmod()
| StarcoderdataPython |
3239155 | <reponame>IronHeart7334/Maelstrom
"""
This module replaces the old distinction between player and AI teams
"""
from maelstrom.dataClasses.characterManager import manageCharacter
from maelstrom.inputOutput.teamDisplay import getDetailedTeamData
from maelstrom.inputOutput.screens import Screen
from maelstrom.util.serialize import AbstractJsonSerialable
class User(AbstractJsonSerialable):
"""
A User simply contains a name, team, and inventory.
Future versions will also store campaign info and other choices in this
"""
def __init__(self, **kwargs):
"""
required kwargs:
- name: str
- team: Team
- inventory: list of Items (defaults to [])
"""
super().__init__(**dict(kwargs, type="User"))
self.name = kwargs["name"]
self.team = kwargs["team"]
self.inventory = kwargs.get("inventory", [])
self.addSerializedAttributes("name", "team", "inventory")
def acquire(self, item):
self.inventory.append(item)
def getAvailableItems(self):
return [item for item in self.inventory if not item.equipped]
def manage(self):
"""
Displays the team management menu
"""
screen = Screen()
screen.setTitle(f'Manage {self.name}')
options = ["Exit"]
for member in self.team.members:
screen.addBodyRow(member.getDisplayData())
options.insert(0, member)
for option in options:
screen.addOption(option)
managing = screen.displayAndChoose("Who do you wish to manage?")
if managing is not "Exit":
manageCharacter(managing)
def getDisplayData(self)->"List<str>":
return [
f'User {self.name}',
f'Team:',
"\n".join(getDetailedTeamData(self.team)),
f'Items: {", ".join([str(item) for item in self.inventory])}'
]
| StarcoderdataPython |
3382110 | <gh_stars>0
import configparser
def get_configuration():
config = configparser.ConfigParser()
config.read("RHardstyleSpotifyAccess.ini")
access = config["RHARDSTYLESPOTIFY_ACCESS"]
return access | StarcoderdataPython |
1637722 | <reponame>yc19890920/DBlog
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import json
from django import forms
from app.blog.models import Tag, Category, Article, Suggest, BlogComment, CKeditorPictureFile
from django.utils.translation import ugettext_lazy as _
from libs.tools import clearHtmlTags
from auditlog.models import AuditlogContentype, AUDITLOG_EXTEND_TYPE
P = re.compile('^(\w|[-+=.])+@\w+([-.]\w+)*\.(\w+)$')
PicP = re.compile(ur'src="(\/media\/ckupload\/.*?)"')
class AdminLogForm(forms.Form):
start_time = forms.DateTimeField(label=_(u'开始时间'), required=False)
end_time = forms.DateTimeField(label=_(u'结束时间'), required=False)
content_type = forms.ChoiceField(label=_(u'类型'), required=False, initial="")
def __init__(self, *args, **kwargs):
super(AdminLogForm, self).__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].widget.attrs.update({'style': 'width: 170px;'})
content_types = []
lists = AuditlogContentype.objects.all()
for o in lists:
content_types.append((o.content_type_id, o.model_class))
content_types.extend(list(AUDITLOG_EXTEND_TYPE))
content_types.insert(0, ('', _(u'所有')))
self.fields['content_type'].choices = content_types
class ShowForm(forms.Form):
name_bak = forms.CharField(label=_(u'名称'), max_length=20, required=True)
# name_bak = forms.CharField(_(u'名称'), required=True, max_length=20)
class TagForm(forms.ModelForm):
class Meta:
model = Tag
fields = ["name"]
def clean_name(self):
name = self.cleaned_data['name'].strip()
if not name:
raise forms.ValidationError(_(u"输入为空,操作失败"))
if Tag.objects.exclude(id=self.instance.pk).filter(name=name).exists():
raise forms.ValidationError(_(u"重复添加,添加失败"))
return name
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ["name"]
def clean_name(self):
name = self.cleaned_data['name'].strip()
if not name:
raise forms.ValidationError(_(u"输入为空,操作失败"))
if Category.objects.exclude(id=self.instance.pk).filter(name=name).exists():
raise forms.ValidationError(_(u"重复添加,添加失败"))
return name
class ArticleForm(forms.ModelForm):
tags = forms.ModelMultipleChoiceField(
label=_(u'标签'),
queryset=None,
# queryset=Tag.objects.all(),
required=True,
widget=forms.SelectMultiple(attrs={
"data-placeholder": _(u"请选择或输入查询"),
"autocomplete": "off",
"class": "select2 ",
}), help_text=_(u"可选多个标签"))
def __init__(self, *args, **kwargs):
super(ArticleForm, self).__init__(*args, **kwargs)
self.fields['tags'].queryset=Tag.objects.all()
def clean_title(self):
data = self.cleaned_data['title'].strip()
if not data:
raise forms.ValidationError(_(u"请填写标题"))
return data
def clean_content(self):
data = self.cleaned_data['content'].strip()
if not data:
raise forms.ValidationError(_(u"请填写正文"))
return data
def clean_auth(self):
data = self.cleaned_data['auth'].strip()
if not data:
raise forms.ValidationError(_(u"请填写作者"))
return data
def clean_abstract(self):
data = self.cleaned_data['abstract'].strip()
if not data:
raise forms.ValidationError(_(u"请填写摘要"))
return data
def referPicture(self, obj):
article_id = obj.id
abstract = self.cleaned_data['abstract']
content = self.cleaned_data['content']
lists = PicP.findall(content)
lists2 = PicP.findall(abstract)
l = list( set(lists) | set(lists2) )
CKeditorPictureFile.objects.filter(article_id=article_id).update(article_id=0)
for i in l:
objpic = CKeditorPictureFile.objects.filter(filepath=i).first()
objpic.article_id = article_id
objpic.save()
class Meta:
model = Article
fields = ["title", "content", "abstract", 'auth', 'source', "status", "topped", 'category', 'tags']
class SuggestForm(forms.ModelForm):
def clean_username(self):
data = self.cleaned_data['username'].strip()
if not data:
raise forms.ValidationError(_(u"请填写您的姓名"))
return data
def clean_content(self):
data = self.cleaned_data['content'].strip()
if not data:
raise forms.ValidationError(_(u"请填写您的留言"))
return data
def clean_email(self):
data = self.cleaned_data['email'].strip()
if not P.match(data):
raise forms.ValidationError(_(u"请填写正确的邮箱"))
return data
class Meta:
model = Suggest
fields = ["username", "email", "content"]
# widgets = {
# 'content': forms.Textarea(attrs={
# 'placeholder': u'写下你的意见吧~',
# 'class': 'form-control',
# 'rows': 4,
# 'cols': 80,
# })
# }
class BlogCommentForm(forms.ModelForm):
article = forms.CharField(label=_(u'文章'), required=False, widget=forms.HiddenInput())
def __init__(self, article, *args, **kwargs):
super(BlogCommentForm, self).__init__(*args, **kwargs)
self.article = article
def clean_article(self):
return self.article
def clean_username(self):
data = self.cleaned_data['username'].strip()
if not data:
raise forms.ValidationError(_(u"请填写您的姓名"))
return data
def clean_content(self):
data = self.cleaned_data['content'].strip()
if not data:
raise forms.ValidationError(_(u"请填写您的留言"))
return data
def clean_email(self):
data = self.cleaned_data['email'].strip()
if not P.match(data):
raise forms.ValidationError(_(u"请填写正确的邮箱"))
return data
class Meta:
model = BlogComment
fields = ["article", "username", "email", "content"]
| StarcoderdataPython |
4836736 | """
all the routes and structure of app defined here.
"""
import os
import ast
import datetime
import bleach
from functools import wraps
from persiantools.jdatetime import JalaliDateTime
from flask import render_template, flash, redirect, url_for, request, abort
from flask_login import current_user, login_required
from werkzeug.utils import secure_filename
from app import app, db
from app.models import Products, Cart, Gallery, Category,\
User, Orders, Comments
from app.forms import AddProductForm, AddCategoryForm,\
CheckoutForm, CommentSectionForm
def login_required_role(role="ANY"):
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
if not current_user.is_authenticated:
abort(404)
if current_user.role != role and role != "ANY":
abort(404)
return fn(*args, **kwargs)
return decorated_view
return wrapper
# executes before any tasks
@app.before_first_request
def create_admin():
"""
Create Admin User
-----------------
first before app executes any task
we create a user and pass 'Admin' to User.role.
"""
if not User.query.filter_by(email="<EMAIL>").first():
user = User(name="admin", email="<EMAIL>", role="Admin")
user.set_password("<PASSWORD>")
db.session.add(user)
db.session.commit()
@app.route("/")
def main_page():
"""
Main Page | products
--------------------
this page contains all the products cards.
if there is no products then we flash a message.
"""
page = request.args.get("page", 1, type=int)
all_products = Products.query.paginate(page=page, per_page=12)
if not all_products.items:
flash("محصولی موجود نیست", category="info")
return render_template(
"main_page.html", all_products=all_products, title="محصولات")
@app.route("/add/product", methods=["POST", "GET"])
@login_required_role(role="Admin")
def add_product():
"""
Add New Product | *Available For Admin*
---------------
"""
form = AddProductForm()
if request.method == "POST":
if form.validate_on_submit():
c = Category.query.filter_by(name=str(form.category.data)).first()
uploaded_file = form.photo.data
filename = secure_filename(uploaded_file.filename)
uploaded_file.save(
os.path.join(app.config["UPLOAD_PATH"], filename)
)
url = os.path.join("/images", filename)
# clean html editor data in add product form using bleach
desc_cleaned_data = bleach.clean(
request.form.get('editordata'),
tags=app.config.ALLOWED_TAGS,
attributes=app.config.ALLOWED_ATTRIBUTES,
styles=app.config.ALLOWED_STYLES,
)
p = Products(
title=form.title.data, price=form.price.data,
discounted=form.discounted.data, sold=0, rate=0,
short_desc=form.short_desc.data, photo=url,
inventory=form.inventory.data,
category_id=c.id, desc=desc_cleaned_data,
)
db.session.add(p)
db.session.commit()
images = form.photos.data
for img in images:
filename = secure_filename(img.filename)
img.save(os.path.join(app.config["UPLOAD_GALLERY"], filename))
url = os.path.join("/gallery", filename)
files = Gallery(pics=url, p_id=p.id)
db.session.add(files)
db.session.commit()
return redirect(url_for("main_page"))
return render_template(
"add_product.html", form=form, title="ایجاد محصول جدید")
@app.route("/add/category", methods=["POST", "GET"])
@login_required_role(role="Admin")
def add_category():
"""
Add New Category | *Available For Admin*
----------------
"""
form = AddCategoryForm()
if form.validate_on_submit():
category = Category(name=form.name.data)
db.session.add(category)
db.session.commit()
flash("دسته بندی جدید ایجاد شد", category="info")
return redirect(url_for("add_product"))
category = Category.query.all()
return render_template("add_category.html", form=form, category=category,
title="ایجاد دسته بندی جدید")
@app.route("/manage", methods=["POST", "GET"])
@login_required_role(role="Admin")
def manage_products():
"""
Manage Products Page | *Available For Admin*
--------------------
"""
all_products = Products.query.all()
if not all_products:
flash("محصولی موجود نیست", category="info")
return render_template(
"manage_products.html", products=all_products, title="مدیریت محصولات"
)
@app.route("/delete/product/<int:product_id>", methods=["GET", "POST"])
@login_required_role(role="Admin")
def delete(product_id):
"""
Delete Products | *Available For Admin*
---------------
"""
g = Gallery.query.all()
for i in g:
if i.p_id == product_id:
name = i.pics.split("/")[2]
db.session.delete(i)
os.remove(app.config["UPLOAD_GALLERY"] + "/" + name)
product = Products.query.get(product_id)
db.session.delete(product)
db.session.commit()
name = product.photo.split("/")[2]
os.remove(app.config["UPLOAD_PATH"] + "/" + name)
c = Cart.query.get(product_id)
if c:
db.session.delete(c)
db.session.commit()
return redirect(url_for("manage_products"))
@app.route("/product/<int:product_id>", methods=["GET", "POST"])
def product_detail(product_id):
"""
Products Details
---------------
"""
form = CommentSectionForm()
product = Products.query.filter_by(id=product_id).first_or_404()
category = Category.query.filter_by(id=product.category_id).first_or_404()
gallery = Gallery.query.all()
return render_template(
"product_detail.html", category=category.name,
product=product, gallery=gallery, title=product.title,
form=form, JalaliDateTime=JalaliDateTime
)
@app.route("/add/comment/<int:id>", methods=["GET", "POST"])
@login_required
def comments(id):
form = CommentSectionForm()
if form.validate_on_submit():
comment = Comments(
name=form.name.data,
email=form.email.data,
comment=form.comment.data,
create_date=datetime.datetime.now(),
product_id=id,
)
db.session.add(comment)
flash(" دیدگاه شما ثبت شد", category="success")
db.session.commit()
return redirect(url_for("product_detail", product_id=id))
@app.route("/add/cart/product/<int:id>", methods=["POST", "GET"])
@login_required
def cart(id):
"""
Add product to user cart
------------------------
"""
# if a product inventory is 0, we don't add it to cart
if Products.query.filter(Products.id == id).first().inventory == 0:
flash("این محصول موجود نیست", category="danger")
return redirect(url_for("main_page"))
# check if the product is already in cart
ca = Cart.query.filter(
Cart.product_id == id, Cart.cart_id == current_user.id
).first()
if ca:
flash("این محصول قبلا اضافه شده است", category="danger")
return redirect(url_for("show_cart"))
else:
# number input in product detail page
n = request.form.get("number")
c = Cart(product_id=id, number=n, amount=0, total=0, cart_id=current_user.id)
db.session.add(c)
flash("به سبد خرید اضافه شد", category="success")
# if product add from main page-there is no input number so number is 1
if not n:
c.number = 1
db.session.commit()
return redirect(url_for("main_page"))
@app.route("/cart", methods=["POST", "GET"])
@login_required
def show_cart():
"""
User Cart
---------
"""
user_cart = current_user.cart
if not user_cart:
flash("سبد خرید شما خالی است", category="info")
cart_products = [
Products.query.filter(Products.id == i.product_id).first()
for i in current_user.cart
]
return render_template(
"cart.html",
cart_products=cart_products,
user_cart=user_cart,
zip=zip,
title="سبد خرید",
)
@app.route("/delete/cart/product/<int:id>", methods=["GET", "POST"])
def delete_cart(id):
"""
Remove Items From Cart
----------------------
"""
c = Cart.query.filter(
Cart.cart_id == current_user.id, Cart.product_id == id
).first_or_404()
db.session.delete(c)
db.session.commit()
return redirect(url_for("show_cart", id=current_user.id))
@app.route("/add/cart/number/product/<int:product_id>", methods=["POST", "GET"])
def add_num(product_id):
"""
Add Product Number In Cart
--------------------------
"""
c = Cart.query.filter(
Cart.product_id == product_id, Cart.cart_id == current_user.id
).first()
p = Products.query.filter(Products.id == product_id).first()
while c.number < p.inventory:
c.number += 1
db.session.commit()
return redirect(url_for("show_cart"))
else:
flash("بیشتر از این تعداد موجود نیست", category="danger")
return redirect(url_for("show_cart"))
@app.route("/reduce/cart/number/product/<int:product_id>", methods=["POST", "GET"])
def reduce_num(product_id):
"""
Reduce Product Number In Cart
-----------------------------
"""
c = Cart.query.filter(
Cart.product_id == product_id, Cart.cart_id == current_user.id
).first()
if c.number > 1:
c.number -= 1
db.session.commit()
# if number in cart == 0, remove item from cart
else:
return redirect(url_for("delete_cart", id=product_id))
return redirect(url_for("show_cart"))
@app.route("/checkout", methods=["POST", "GET"])
@login_required
def checkout():
"""
Checkout Page
-------------
"""
form = CheckoutForm()
c = current_user.cart
p = [Products.query.filter(Products.id == i.product_id).first() for i in c]
return render_template(
"checkout.html", c=c, p=p, form=form, zip=zip, title="ثبت سفارش"
)
@app.route("/payment", methods=["POST", "GET"])
@login_required
def payment():
"""
Payment Page
------------
"""
user_cart = current_user.cart
# extracting user info entered in checkout form
form = CheckoutForm()
name = form.name.data
city = form.city.data
address = form.address.data
phone = form.phone.data
email = form.email.data
# insert user order's data to Orders table
orders = Orders(
status="در انتظار پرداخت",
user_id=current_user.id,
payment_method="آنلاین",
name=name,
city=city,
address=address,
phone=phone,
email=email,
total=user_cart[-1].total,
create_order_date=datetime.datetime.now(),
number=str([user_cart[i].number for i in range(len(user_cart))]),
product_id=str([user_cart[i].product_id for i in range(len(user_cart))]),
)
db.session.add(orders)
db.session.commit()
# find user order
orders = Orders.query.filter_by(user_id=current_user.id).first()
# if user select cash *نقدی* option in form
if form.payment.data == "نقدی":
for i in range(len(user_cart)):
# reduce product inventory
Products.query.filter(
Products.id == user_cart[i].product_id
).first().inventory -= user_cart[i].number
# add product sold number
Products.query.filter(
Products.id == user_cart[i].product_id
).first().sold += user_cart[i].number
# change payment status to 'نقدی', which was 'آنلاین'
orders.payment_method = "نقدی"
flash("سفارش ثبت شد", category="success")
# delete user cart's items
for i in user_cart:
db.session.delete(i)
db.session.commit()
return redirect(url_for("main_page"))
# track date and time when user goes to payment gateway
orders.start_payment_date = datetime.datetime.now()
db.session.commit()
return redirect(url_for("main_page"))
@app.route("/orders", methods=["POST", "GET"])
@login_required_role(role="Admin")
def orders_list():
"""
show all orders | *Available For Admin*
---------------
"""
# take all the users that have orders.
orders = Orders.query.filter(Orders.user_id).all()
if not orders:
flash("سفارشی ثبت نشده است", category="info")
return render_template("orders.html", orders=orders,
JalaliDateTime=JalaliDateTime, title="لیست سفارشات",
)
@app.route("/order/<int:order_id>", methods=["POST", "GET"])
@login_required_role(role="Admin")
def order_line(order_id):
"""
show each order details | *Available For Admin*
-----------------------------------------------
we receive the list of product ID and product's number
(the number selected by user in the cart ) as a list.
First we return them from string to list.
find products using our products id: img title and price will use.
also shown number in template.
"""
order = Orders.query.filter_by(id=order_id).first()
products_id = ast.literal_eval(order.product_id)
products_number = ast.literal_eval(order.number)
p = [Products.query.filter(Products.id == i).first() for i in products_id]
return render_template(
"order_line.html", number=products_number,
product=p, order=order, zip=zip,
JalaliDateTime=JalaliDateTime,title="جزئیات سفارش",
)
@app.route("/fa", methods=["GET", "POST"])
def final_amount():
"""
Calculate Final Amount
----------------------
"""
c = current_user.cart
p = [Products.query.filter(Products.id == i.product_id).first() for i in c]
total = []
for a, i in zip(c, p):
if not i.discounted:
amount = a.number * i.price
else:
amount = a.number * i.discounted
total.append(amount)
total_amount = sum(total)
a.amount = amount
a.total = total_amount
db.session.commit()
return redirect(url_for("checkout"))
@app.route("/product/rate/<int:product_id>/<string:name>")
def product_rate(product_id, name):
"""
Add Products Rate
---------------
"""
if name == "add":
p = Products.query.filter_by(id=product_id).first()
p.rate += 1
if name == "reduce":
p = Products.query.filter_by(id=product_id).first()
p.rate -= 1
db.session.commit()
flash("امتیاز ثبت شد", category="success")
return redirect(url_for("product_detail", product_id=product_id))
| StarcoderdataPython |
3286043 | import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
import core.utils as utils
from core.config import cfg
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
import cv2
import numpy as np
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
import time
from flask import Flask, request, Response, jsonify, send_from_directory, abort
import os
import json
import requests
framework = 'tf'
weights_path = './checkpoints/yolov4-416'
size = 416
tiny = False
model = 'yolov4'
output_path = './detections/'
iou = 0.45
score = 0.25
class Flag:
tiny = tiny
model = model
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
FLAGS = Flag
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = size
# load model
if framework == 'tflite':
interpreter = tf.lite.Interpreter(model_path=weights_path)
else:
saved_model_loaded = tf.saved_model.load(weights_path, tags=[tag_constants.SERVING])
# Initialize Flask application
app = Flask(__name__)
print("loaded")
# API that returns JSON with classes found in images
@app.route('/detections/by-image-files', methods=['POST'])
def get_detections_by_image_files():
images = request.files.getlist("images")
image_path_list = []
for image in images:
image_name = image.filename
image_path_list.append("./temp/" + image_name)
image.save(os.path.join(os.getcwd(), "temp/", image_name))
# create list for final response
response = []
# loop through images in list and run Yolov4 model on each
for count, image_path in enumerate(image_path_list):
# create list of responses for current image
responses = []
try:
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
except cv2.error:
# remove temporary images
for name in image_path_list:
os.remove(name)
abort(404, "it is not an image file or image file is an unsupported format. try jpg or png")
except Exception as e:
# remove temporary images
for name in image_path_list:
os.remove(name)
print(e.__class__)
print(e)
abort(500)
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
if framework == 'tflite':
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if model == 'yolov3' and tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
t1 = time.time()
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
t2 = time.time()
print('time: {}'.format(t2 - t1))
t1 = time.time()
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
t2 = time.time()
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
print('time: {}'.format(t2 - t1))
for i in range(valid_detections[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
responses.append({
"class": class_names[int(classes[0][i])],
"confidence": float("{0:.2f}".format(np.array(scores[0][i]) * 100)),
"box": np.array(boxes[0][i]).tolist()
})
response.append({
"image": image_path_list[count][7:],
"detections": responses
})
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to allow detections for only people)
# allowed_classes = ['person']
image = utils.draw_bbox(original_image, pred_bbox, allowed_classes=allowed_classes)
image = Image.fromarray(image.astype(np.uint8))
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imwrite(output_path + 'detection' + str(count) + '.png', image)
# remove temporary images
for name in image_path_list:
os.remove(name)
try:
return Response(response=json.dumps({"response": response}), mimetype="application/json")
except FileNotFoundError:
abort(404)
# API that returns image with detections on it
@app.route('/image/by-image-file', methods=['POST'])
def get_image_by_image_file():
image = request.files["images"]
image_path = "./temp/" + image.filename
image.save(os.path.join(os.getcwd(), image_path[2:]))
try:
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
except cv2.error:
# remove temporary image
os.remove(image_path)
abort(404, "it is not an image file or image file is an unsupported format. try jpg or png")
except Exception as e:
# remove temporary image
os.remove(image_path)
print(e.__class__)
print(e)
abort(500)
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
if framework == 'tflite':
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if model == 'yolov3' and tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
t1 = time.time()
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
t2 = time.time()
print('time: {}'.format(t2 - t1))
t1 = time.time()
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
t2 = time.time()
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
print('time: {}'.format(t2 - t1))
for i in range(valid_detections[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to allow detections for only people)
# allowed_classes = ['person']
image = utils.draw_bbox(original_image, pred_bbox, allowed_classes=allowed_classes)
image = Image.fromarray(image.astype(np.uint8))
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imwrite(output_path + 'detection' + '.png', image)
# prepare image for response
_, img_encoded = cv2.imencode('.png', image)
response = img_encoded.tostring()
# remove temporary image
os.remove(image_path)
try:
return Response(response=response, status=200, mimetype='image/png')
except FileNotFoundError:
abort(404)
# API that returns JSON with classes found in images from url list
@app.route('/detections/by-url-list', methods=['POST'])
def get_detections_by_url_list():
image_urls = request.get_json()["images"]
raw_image_list = []
if not isinstance(image_urls, list):
abort(400, "can't find image list")
image_names = []
custom_headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
}
for i, image_url in enumerate(image_urls):
image_name = "Image" + str(i + 1)
image_names.append(image_name)
try:
resp = requests.get(image_url, headers=custom_headers)
img_raw = np.asarray(bytearray(resp.content), dtype="uint8")
img_raw = cv2.imdecode(img_raw, cv2.IMREAD_COLOR)
except cv2.error:
abort(404, "it is not image url or that image is an unsupported format. try jpg or png")
except requests.exceptions.MissingSchema:
abort(400, "it is not url form")
except Exception as e:
print(e.__class__)
print(e)
abort(500)
raw_image_list.append(img_raw)
# create list for final response
response = []
# loop through images in list and run Yolov4 model on each
for count, raw_image in enumerate(raw_image_list):
# create list of responses for current image
responses = []
original_image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
if framework == 'tflite':
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if model == 'yolov3' and tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
t1 = time.time()
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
t2 = time.time()
print('time: {}'.format(t2 - t1))
t1 = time.time()
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
t2 = time.time()
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
print('time: {}'.format(t2 - t1))
for i in range(valid_detections[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
responses.append({
"class": class_names[int(classes[0][i])],
"confidence": float("{0:.2f}".format(np.array(scores[0][i]) * 100)),
"box": np.array(boxes[0][i]).tolist()
})
response.append({
"image": image_names[count],
"detections": responses
})
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to allow detections for only people)
# allowed_classes = ['person']
image = utils.draw_bbox(original_image, pred_bbox, allowed_classes=allowed_classes)
image = Image.fromarray(image.astype(np.uint8))
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imwrite(output_path + 'detection' + str(count) + '.png', image)
try:
return Response(response=json.dumps({"response": response}), mimetype="application/json")
except FileNotFoundError:
abort(404)
# API that returns image with detections on it from url
@app.route('/image/by-url', methods=['POST'])
def get_image_by_url():
image_urls = request.get_json()["images"]
if not isinstance(image_urls, list):
abort(400, "can't find image list")
image_names = []
custom_headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
}
image_name = "Image" + str(1)
image_names.append(image_name)
try:
resp = requests.get(image_urls[0], headers=custom_headers)
img_raw = np.asarray(bytearray(resp.content), dtype="uint8")
img_raw = cv2.imdecode(img_raw, cv2.IMREAD_COLOR)
except cv2.error:
abort(404, "it is not image url or that image is an unsupported format. try jpg or png")
except requests.exceptions.MissingSchema:
abort(400, "it is not url form")
except Exception as e:
print(e.__class__)
print(e)
abort(500)
# loop through images in list and run Yolov4 model on each
original_image = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
if framework == 'tflite':
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if model == 'yolov3' and tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
t1 = time.time()
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
t2 = time.time()
print('time: {}'.format(t2 - t1))
t1 = time.time()
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
t2 = time.time()
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
print('time: {}'.format(t2 - t1))
for i in range(valid_detections[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to allow detections for only people)
# allowed_classes = ['person']
image = utils.draw_bbox(original_image, pred_bbox, allowed_classes=allowed_classes)
image = Image.fromarray(image.astype(np.uint8))
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imwrite(output_path + 'detection' + str(0) + '.png', image)
# prepare image for response
_, img_encoded = cv2.imencode('.png', image)
response = img_encoded.tostring()
try:
return Response(response=response, status=200, mimetype='image/png')
except FileNotFoundError:
abort(404)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5050)
| StarcoderdataPython |
1629996 | import re
import requests
from django.conf import settings
from django.core.exceptions import ValidationError
from .backends import BaseBackend
class SMSBackend(BaseBackend):
def __init__(self, host=None, port=None, sms_route=None, auth_key=None, headers=None,
fail_silently=False, **kwargs):
self.host = settings.POSTMAN_HOST
self.route = settings.POSTMAN_SMS_ROUTE
self.sms_route = '{host}{route}'.format(host=self.host, route=self.route)
self.auth_key = settings.POSTMAN_AUTHKEY
self.headers = {'application-key': self.auth_key}
@property
def connection(self):
request = requests.head(self.sms_route, headers=self.headers)
if request.status_code == 200:
return True
return False
def _send_msg_to_postman(self, postman_body):
request = requests.post(self.sms_route, json=postman_body, headers=self.headers)
if request.status_code == 201:
return True, request.json()
return False, request.json()
def validate_phone_number(self, value):
if not re.match('\d{10}', value):
raise ValidationError(u'%s is not valid phone number' % value)
def send_messages(self, reciever, message, sender_id=None, route=None):
# validate phone number
reciever = str(reciever)
self.validate_phone_number(reciever)
postman_body = {
"receiver": {
"contact": reciever
},
"data": {
"body": message
},
"extras": {
"sender_id": sender_id if sender_id is not None else 'ENGDUN',
"route": route if route is not None else 'clickhere'
}
}
# send message to postman
return self._send_msg_to_postman(postman_body)
| StarcoderdataPython |
1730642 | # -*- coding: utf-8 -*-
from configparser import ConfigParser
from flask import Flask, jsonify, request
import logging.handlers
import logging
from bin.ram import RAM
from bin.cpu import CPU
from bin.network import Network
from bin.load_avg import LoadAvg
from bin.boot_time import BootTime
from bin.disk import Disk
# convert human sizes to bytes
def convert_bytes(byts):
try:
if byts.endswith('kb'):
return int(byts[0:-2]) * 1024
elif byts.endswith('mb'):
return int(byts[0:-2]) * 1024 * 1024
elif byts.endswith('gb'):
return int(byts[0:-2]) * 1024 * 1024 * 1024
else:
raise IOError('Invalid input. Correct format: #kb/#mb/#gb like 10gb or 5mb')
except ValueError:
raise IOError('Invalid input. Correct format: #kb/#mb/#gb like 10gb or 5mb')
# load config
config = ConfigParser()
config.read('config.ini')
err_type = ''
log_file = ''
log_size_limit = ''
log_file_number_limit = 0
flsk_host = ''
flsk_port = 0
try:
# log values
err_type = 'Log > Name'
log_file = config.get('Log', 'Name', fallback='agent.log')
err_type = 'Log > Size_limit'
log_size_limit = config.get('Log', 'Size_limit', fallback='5mb')
log_size_limit = convert_bytes(log_size_limit)
err_type = 'Log > File_Limit'
log_file_number_limit = config.getint('Log', 'File_Limit', fallback=10)
# flask values
err_type = 'Flask > Host'
flsk_host = config.get('Flask', 'Host', fallback='0.0.0.0')
err_type = 'Flask > Port'
flsk_port = config.getint('Flask', 'Port', fallback=5000)
except IOError as e:
print('CONFIG ERROR: Unable to load values from \"{}\"! STACKTRACE: {}'.format(err_type, e.args[1]))
print('CONFIG ERROR: Force closing program...')
exit()
# prepare logging
try:
logger = logging.getLogger('AtomicMonitor Agent')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.handlers.RotatingFileHandler(log_file, maxBytes=log_size_limit,
backupCount=log_file_number_limit))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(asctime)s | %(levelname)-8s | %(topic)-5s | %(message)s'))
logger.addHandler(ch)
except IOError as e:
print('FILE ERROR: Unable to prepare log file! STACETRACE: {}'.format(e.args[1]))
print('FILE ERROR: Force closing program...')
exit()
# setup variables
sram = RAM()
scpu = CPU()
net = Network()
load = LoadAvg()
boot = BootTime()
sdisk = Disk()
app = Flask(__name__)
# display current specs
@app.route('/now')
def web_now():
# retrieve current system specs
ram_percent, ram_used, ram_total = sram.get_memory_usage()
cpu_percent = scpu.get_usage()
boot_time = boot.get_boot_time()
disk_io = sdisk.get_disk_io()
# create json object
json_data = {
'ram': {
'percent_used': ram_percent,
'used': ram_used,
'total': ram_total
},
'cpu': {
'percent_used': cpu_percent
},
'boot': {
'start_timestamp': boot_time
},
'disk_io': disk_io
}
logging.info('Retrieved now status for IP: {}'.format(request.remote_addr), extra={'topic': 'AGENT'})
# print json data
return jsonify(json_data)
# display full system specs
@app.route('/')
def web_all():
# retrieve current system specs
ram_percent, ram_used, ram_total = sram.get_memory_usage()
swap_percent, swap_used, swap_total = sram.get_swap_usage()
cpu_usage = scpu.get_usage()
nics_bytes = net.get_nic_status()
nic_names, nic_sent, nic_recvs = [], [], []
for nic in nics_bytes:
nic_names.append(nic.get_name())
nic_sent.append(nic.get_sent())
nic_recvs.append(nic.get_recv())
islinux, load_1m, load_5m, load_15m = load.get_load()
if not islinux:
load_1m = 'NULL'
load_5m = 'NULL'
load_15m = 'NULL'
boot_time = boot.get_boot_time()
disks = sdisk.get_disks()
disk_names, disk_percents, disk_uses, disk_totals = [], [], [], []
for disk in disks:
disk_names.append(disk.get_name())
disk_percents.append(disk.get_percent())
disk_uses.append(disk.get_used())
disk_totals.append(disk.get_total())
disk_io = sdisk.get_disk_io()
# create json object
json_data = {
'memory': {
'ram': {
'percent_used': ram_percent,
'used': ram_used,
'total': ram_total
},
'swap': {
'percent_used': swap_percent,
'used': swap_used,
'total': swap_total
}
},
'cpu': {
'percent_used': cpu_usage
},
'network': [
{
'name': name,
'mb_sent': sent,
'mb_recieved': recv
}
for name, sent, recv in zip(nic_names, nic_sent, nic_recvs)
],
'load': {
'1min': load_1m,
'5min': load_5m,
'15min': load_15m
},
'boot': {
'time': {
'timestamp': boot_time
}
},
'disks': {
'io': disk_io,
'list': [
{
'name': name,
'percent_used': percent,
'used': used,
'total': total
}
for name, percent, used, total in zip(disk_names, disk_percents, disk_uses, disk_totals)
]
}
}
logging.info('Retrieved all status for IP: {}'.format(request.remote_addr), extra={'topic': 'AGENT'})
# print json data
return jsonify(json_data)
# start flask process
if __name__ == '__main__':
logging.info('Starting program...', extra={'topic': 'AGENT'})
# start Flask service
app.run(host=flsk_host, port=flsk_port)
| StarcoderdataPython |
170508 | <reponame>ioyy900205/vit-pytorch<gh_stars>1-10
import math
from functools import reduce
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat
# helpers
def prob_mask_like(t, prob):
batch, seq_length, _ = t.shape
return torch.zeros((batch, seq_length)).float().uniform_(0, 1) < prob
def get_mask_subset_with_prob(patched_input, prob):
batch, seq_len, _, device = *patched_input.shape, patched_input.device
max_masked = math.ceil(prob * seq_len)
rand = torch.rand((batch, seq_len), device=device)
_, sampled_indices = rand.topk(max_masked, dim=-1)
new_mask = torch.zeros((batch, seq_len), device=device)
new_mask.scatter_(1, sampled_indices, 1)
return new_mask.bool()
# mpp loss
class MPPLoss(nn.Module):
def __init__(self, patch_size, channels, output_channel_bits,
max_pixel_val):
super(MPPLoss, self).__init__()
self.patch_size = patch_size
self.channels = channels
self.output_channel_bits = output_channel_bits
self.max_pixel_val = max_pixel_val
def forward(self, predicted_patches, target, mask):
# reshape target to patches
p = self.patch_size
target = rearrange(target,
"b c (h p1) (w p2) -> b (h w) c (p1 p2) ",
p1=p,
p2=p)
avg_target = target.mean(dim=3)
bin_size = self.max_pixel_val / self.output_channel_bits
channel_bins = torch.arange(bin_size, self.max_pixel_val, bin_size).to(avg_target.device)
discretized_target = torch.bucketize(avg_target, channel_bins)
discretized_target = F.one_hot(discretized_target,
self.output_channel_bits)
c, bi = self.channels, self.output_channel_bits
discretized_target = rearrange(discretized_target,
"b n c bi -> b n (c bi)",
c=c,
bi=bi)
bin_mask = 2**torch.arange(c * bi - 1, -1,
-1).to(discretized_target.device,
discretized_target.dtype)
target_label = torch.sum(bin_mask * discretized_target, -1)
predicted_patches = predicted_patches[mask]
target_label = target_label[mask]
loss = F.cross_entropy(predicted_patches, target_label)
return loss
# main class
class MPP(nn.Module):
def __init__(self,
transformer,
patch_size,
dim,
output_channel_bits=3,
channels=3,
max_pixel_val=1.0,
mask_prob=0.15,
replace_prob=0.5,
random_patch_prob=0.5):
super().__init__()
self.transformer = transformer
self.loss = MPPLoss(patch_size, channels, output_channel_bits,
max_pixel_val)
# output transformation
self.to_bits = nn.Linear(dim, 2**(output_channel_bits * channels))
# vit related dimensions
self.patch_size = patch_size
# mpp related probabilities
self.mask_prob = mask_prob
self.replace_prob = replace_prob
self.random_patch_prob = random_patch_prob
# token ids
self.mask_token = nn.Parameter(torch.randn(1, 1, dim * channels))
def forward(self, input, **kwargs):
transformer = self.transformer
# clone original image for loss
img = input.clone().detach()
# reshape raw image to patches
p = self.patch_size
input = rearrange(input,
'b c (h p1) (w p2) -> b (h w) (p1 p2 c)',
p1=p,
p2=p)
mask = get_mask_subset_with_prob(input, self.mask_prob)
# mask input with mask patches with probability of `replace_prob` (keep patches the same with probability 1 - replace_prob)
masked_input = input.clone().detach()
# if random token probability > 0 for mpp
if self.random_patch_prob > 0:
random_patch_sampling_prob = self.random_patch_prob / (
1 - self.replace_prob)
random_patch_prob = prob_mask_like(input,
random_patch_sampling_prob).to(mask.device)
bool_random_patch_prob = mask * (random_patch_prob == True)
random_patches = torch.randint(0,
input.shape[1],
(input.shape[0], input.shape[1]),
device=input.device)
randomized_input = masked_input[
torch.arange(masked_input.shape[0]).unsqueeze(-1),
random_patches]
masked_input[bool_random_patch_prob] = randomized_input[
bool_random_patch_prob]
# [mask] input
replace_prob = prob_mask_like(input, self.replace_prob).to(mask.device)
bool_mask_replace = (mask * replace_prob) == True
masked_input[bool_mask_replace] = self.mask_token
# linear embedding of patches
masked_input = transformer.to_patch_embedding[-1](masked_input)
# add cls token to input sequence
b, n, _ = masked_input.shape
cls_tokens = repeat(transformer.cls_token, '() n d -> b n d', b=b)
masked_input = torch.cat((cls_tokens, masked_input), dim=1)
# add positional embeddings to input
masked_input += transformer.pos_embedding[:, :(n + 1)]
masked_input = transformer.dropout(masked_input)
# get generator output and get mpp loss
masked_input = transformer.transformer(masked_input, **kwargs)
cls_logits = self.to_bits(masked_input)
logits = cls_logits[:, 1:, :]
mpp_loss = self.loss(logits, img, mask)
return mpp_loss
| StarcoderdataPython |
3350453 | # Generated by Django 2.2 on 2021-05-25 08:36
import agenda.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agenda', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='agenda',
name='notify_me_at',
field=models.TimeField(blank=True, default=agenda.models.default_notify_time, null=True),
),
]
| StarcoderdataPython |
103217 | <reponame>ckxz105/rltk
import sys
#: Accept all default values without asking user
SLIENTLY_ACCEPT_ALL_DEFAULT_VALUES = False
def prompt(text: str, *args, new_line: bool = True, **kwargs):
"""
Prompt in terminal (stdout).
Args:
text (str): Text.
*args: More text.
new_line (bool, optional): End with a new line. Defaults to True.
**kwargs: Other key word arguments used by :py:meth:`print`.
"""
line_end = '\n' if new_line else ''
print(text, *args, file=sys.stdout, end=line_end, **kwargs)
sys.stdout.flush()
def select(text: str, cases: list, default: int = None, case_sensitive: bool = False):
"""
Let user select one of the cases.
Args:
text (str): Text.
cases (list[tuple]): Cases, should be list of tuples. Each tuple is in form `('display text', 'user's input')`.
For example, `[('(Y)es, 'y'), ('(N)o', 'n')]`.
default (int, optional): Default case index in `cases`. Empty or space is treated as default case.
None means no default case. Defaults to None.
case_sensitive (bool, optional): If user's input is case sensitive, defaults to False.
Returns:
str: User's input.
"""
prompt(text)
case_text = []
for idx, c in enumerate(cases):
if default is not None and idx == default:
case_text.append('[{}]'.format(c[0]))
else:
case_text.append('{}'.format(c[0]))
prompt(' / '.join(case_text))
valid_cases = [c[1] for c in cases]
if default is not None:
valid_cases.append('')
if not case_sensitive:
valid_cases = list(map(lambda x: x.lower(), valid_cases))
while True:
user_input = ''
if not SLIENTLY_ACCEPT_ALL_DEFAULT_VALUES or default is None:
user_input = input().strip()
if not case_sensitive:
user_input = user_input.lower()
if user_input not in valid_cases:
prompt('Invalid input, please retry')
continue
if user_input == '' and default is not None:
return cases[default][1]
return user_input
def confirm(text: str, default: bool = None):
"""
Let user choose Yes or No.
Args:
text (str): Text.
default (bool, optional): True sets Yes as default case, False sets No. None means no default case.
Defaults to None.
Returns:
bool: True means Yes, False means No.
"""
if default is not None:
default = 0 if default else 1
return select(text, cases=[('(Y)es', 'y',), ('(N)o', 'n')], default=default, case_sensitive=False) == 'y'
class Progress(object):
"""
Progress status.
Args:
format_ (str, optional): Format of text.
start (str, optional): Text while starting.
end (str, optional): Text while ending.
Note:
Please use in `with` statement::
with rltk.cli.progress(format_='{}%') as p:
for i in range(11):
time.sleep(0.5)
p.update(i * 10)
"""
def __init__(self, format_: str = '{}', start: str = 'Starting...', end: str = 'Done!'):
self._format = format_
self._prev_len = 0
self._start = start
self._end = end
def update(self, *args):
"""
Update progress.
Args:
*args: Arguments which will be formatted by `format_`.
"""
text = self._format.format(*args)
# clean up
prompt('\r' + ' ' * self._prev_len, new_line=False)
# overwrite
prompt('\r' + text, new_line=False)
self._prev_len = len(text)
def __enter__(self):
"""
Start prompt.
"""
if self._start:
prompt(self._start, new_line=False)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
End prompt.
"""
# clean up
prompt('\r' + ' ' * self._prev_len, new_line=False)
if self._end:
prompt('\r' + self._end, new_line=False)
# new line
prompt('')
progress = Progress
def input_(text: str, default: str = None, type_: type = str):
"""
Input.
Args:
text (str): Text.
default (str, optional): Default value. Defaults to None which means no default value.
type_ (type, optional): Type of input value, defaults to `str`.
Returns:
object: User input in type `type_`.
Note:
Make sure default value can be converted by `type_`, otherwise exception will be raised.
"""
prompt(text)
while True:
if not SLIENTLY_ACCEPT_ALL_DEFAULT_VALUES or default is None:
user_input = input().strip()
try:
return type_(user_input)
except:
prompt('Invalid input, please retry')
else:
return type_(default)
| StarcoderdataPython |
3273718 | <gh_stars>0
####!/usr/bin/env python
"""
Author: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Purpose: Implementation of the interaction between the Gambler's problem environment
and the Monte Carlon agent using RL_glue.
For use in the Reinforcement Learning course, Fall 2017, University of Alberta
"""
from utils import *
from rl_glue import * # Required for RL-Glue
import sys
import time
import math
import numpy as np
import pickle
from rndmwalk_policy_evaluation import compute_value_function
import matplotlib.pyplot as plt
agents = ["agent1","agent2","agent3"]
for agent in agents:
RLGlue("env", agent)
if __name__ == "__main__":
total_episodes =2000
print("Doing " + agent)
V = np.load('TrueValueFunction.npy')
runs = 10
errors = np.zeros(total_episodes//10)
np.random.seed()
for run in range(0, runs):
print("Run : " +str(run+1))
RL_init()
for num_episodes in range (total_episodes) :
RL_episode(20000)
if (num_episodes//10 - (num_episodes-1)//10) != 0:
stateValues = RL_agent_message("ValueFunction")
errors[num_episodes//10] += math.sqrt(sum(np.power(V[1:] - stateValues, 2))/1000)
RL_cleanup()
errors = errors/runs
plt.figure(agents.index(agent)+1)
plt.plot(errors)
plt.xlabel('Episodes')
plt.ylabel('RMSVE')
plt.show()
if agent == "agent3":
print("Done ! ")
| StarcoderdataPython |
90740 | from django import forms
class RecipeModel(forms.ModelForm):
class Meta:
exclude = ["userTableForeignKey"]
fields = ["picture", "name", "description", "date", "creator", "edit"] | StarcoderdataPython |
3368694 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class OperationsInsightsWarehouseUser(object):
"""
OPSI warehouse User.
"""
#: A constant which can be used with the lifecycle_state property of a OperationsInsightsWarehouseUser.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a OperationsInsightsWarehouseUser.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a OperationsInsightsWarehouseUser.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a OperationsInsightsWarehouseUser.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a OperationsInsightsWarehouseUser.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a OperationsInsightsWarehouseUser.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new OperationsInsightsWarehouseUser object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param operations_insights_warehouse_id:
The value to assign to the operations_insights_warehouse_id property of this OperationsInsightsWarehouseUser.
:type operations_insights_warehouse_id: str
:param id:
The value to assign to the id property of this OperationsInsightsWarehouseUser.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this OperationsInsightsWarehouseUser.
:type compartment_id: str
:param name:
The value to assign to the name property of this OperationsInsightsWarehouseUser.
:type name: str
:param connection_password:
The value to assign to the connection_password property of this OperationsInsightsWarehouseUser.
:type connection_password: str
:param is_awr_data_access:
The value to assign to the is_awr_data_access property of this OperationsInsightsWarehouseUser.
:type is_awr_data_access: bool
:param is_em_data_access:
The value to assign to the is_em_data_access property of this OperationsInsightsWarehouseUser.
:type is_em_data_access: bool
:param is_opsi_data_access:
The value to assign to the is_opsi_data_access property of this OperationsInsightsWarehouseUser.
:type is_opsi_data_access: bool
:param freeform_tags:
The value to assign to the freeform_tags property of this OperationsInsightsWarehouseUser.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this OperationsInsightsWarehouseUser.
:type defined_tags: dict(str, dict(str, object))
:param system_tags:
The value to assign to the system_tags property of this OperationsInsightsWarehouseUser.
:type system_tags: dict(str, dict(str, object))
:param time_created:
The value to assign to the time_created property of this OperationsInsightsWarehouseUser.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this OperationsInsightsWarehouseUser.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this OperationsInsightsWarehouseUser.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this OperationsInsightsWarehouseUser.
:type lifecycle_details: str
"""
self.swagger_types = {
'operations_insights_warehouse_id': 'str',
'id': 'str',
'compartment_id': 'str',
'name': 'str',
'connection_password': '<PASSWORD>',
'is_awr_data_access': 'bool',
'is_em_data_access': 'bool',
'is_opsi_data_access': 'bool',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'system_tags': 'dict(str, dict(str, object))',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'lifecycle_details': 'str'
}
self.attribute_map = {
'operations_insights_warehouse_id': 'operationsInsightsWarehouseId',
'id': 'id',
'compartment_id': 'compartmentId',
'name': 'name',
'connection_password': '<PASSWORD>',
'is_awr_data_access': 'isAwrDataAccess',
'is_em_data_access': 'isEmDataAccess',
'is_opsi_data_access': 'isOpsiDataAccess',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'system_tags': 'systemTags',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'lifecycle_details': 'lifecycleDetails'
}
self._operations_insights_warehouse_id = None
self._id = None
self._compartment_id = None
self._name = None
self._connection_password = None
self._is_awr_data_access = None
self._is_em_data_access = None
self._is_opsi_data_access = None
self._freeform_tags = None
self._defined_tags = None
self._system_tags = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self._lifecycle_details = None
@property
def operations_insights_warehouse_id(self):
"""
**[Required]** Gets the operations_insights_warehouse_id of this OperationsInsightsWarehouseUser.
OPSI Warehouse OCID
:return: The operations_insights_warehouse_id of this OperationsInsightsWarehouseUser.
:rtype: str
"""
return self._operations_insights_warehouse_id
@operations_insights_warehouse_id.setter
def operations_insights_warehouse_id(self, operations_insights_warehouse_id):
"""
Sets the operations_insights_warehouse_id of this OperationsInsightsWarehouseUser.
OPSI Warehouse OCID
:param operations_insights_warehouse_id: The operations_insights_warehouse_id of this OperationsInsightsWarehouseUser.
:type: str
"""
self._operations_insights_warehouse_id = operations_insights_warehouse_id
@property
def id(self):
"""
**[Required]** Gets the id of this OperationsInsightsWarehouseUser.
Hub User OCID
:return: The id of this OperationsInsightsWarehouseUser.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this OperationsInsightsWarehouseUser.
Hub User OCID
:param id: The id of this OperationsInsightsWarehouseUser.
:type: str
"""
self._id = id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this OperationsInsightsWarehouseUser.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this OperationsInsightsWarehouseUser.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this OperationsInsightsWarehouseUser.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this OperationsInsightsWarehouseUser.
:type: str
"""
self._compartment_id = compartment_id
@property
def name(self):
"""
**[Required]** Gets the name of this OperationsInsightsWarehouseUser.
Username for schema which would have access to AWR Data, Enterprise Manager Data and Operations Insights OPSI Hub.
:return: The name of this OperationsInsightsWarehouseUser.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this OperationsInsightsWarehouseUser.
Username for schema which would have access to AWR Data, Enterprise Manager Data and Operations Insights OPSI Hub.
:param name: The name of this OperationsInsightsWarehouseUser.
:type: str
"""
self._name = name
@property
def connection_password(self):
"""
Gets the connection_password of this OperationsInsightsWarehouseUser.
User provided connection password for the AWR Data, Enterprise Manager Data and Operations Insights OPSI Hub.
:return: The connection_password of this OperationsInsightsWarehouseUser.
:rtype: str
"""
return self._connection_password
@connection_password.setter
def connection_password(self, connection_password):
"""
Sets the connection_password of this OperationsInsightsWarehouseUser.
User provided connection password for the AWR Data, Enterprise Manager Data and Operations Insights OPSI Hub.
:param connection_password: The connection_password of this OperationsInsightsWarehouseUser.
:type: str
"""
self._connection_password = connection_password
@property
def is_awr_data_access(self):
"""
**[Required]** Gets the is_awr_data_access of this OperationsInsightsWarehouseUser.
Indicate whether user has access to AWR data.
:return: The is_awr_data_access of this OperationsInsightsWarehouseUser.
:rtype: bool
"""
return self._is_awr_data_access
@is_awr_data_access.setter
def is_awr_data_access(self, is_awr_data_access):
"""
Sets the is_awr_data_access of this OperationsInsightsWarehouseUser.
Indicate whether user has access to AWR data.
:param is_awr_data_access: The is_awr_data_access of this OperationsInsightsWarehouseUser.
:type: bool
"""
self._is_awr_data_access = is_awr_data_access
@property
def is_em_data_access(self):
"""
Gets the is_em_data_access of this OperationsInsightsWarehouseUser.
Indicate whether user has access to EM data.
:return: The is_em_data_access of this OperationsInsightsWarehouseUser.
:rtype: bool
"""
return self._is_em_data_access
@is_em_data_access.setter
def is_em_data_access(self, is_em_data_access):
"""
Sets the is_em_data_access of this OperationsInsightsWarehouseUser.
Indicate whether user has access to EM data.
:param is_em_data_access: The is_em_data_access of this OperationsInsightsWarehouseUser.
:type: bool
"""
self._is_em_data_access = is_em_data_access
@property
def is_opsi_data_access(self):
"""
Gets the is_opsi_data_access of this OperationsInsightsWarehouseUser.
Indicate whether user has access to OPSI data.
:return: The is_opsi_data_access of this OperationsInsightsWarehouseUser.
:rtype: bool
"""
return self._is_opsi_data_access
@is_opsi_data_access.setter
def is_opsi_data_access(self, is_opsi_data_access):
"""
Sets the is_opsi_data_access of this OperationsInsightsWarehouseUser.
Indicate whether user has access to OPSI data.
:param is_opsi_data_access: The is_opsi_data_access of this OperationsInsightsWarehouseUser.
:type: bool
"""
self._is_opsi_data_access = is_opsi_data_access
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this OperationsInsightsWarehouseUser.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this OperationsInsightsWarehouseUser.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this OperationsInsightsWarehouseUser.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this OperationsInsightsWarehouseUser.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this OperationsInsightsWarehouseUser.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this OperationsInsightsWarehouseUser.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this OperationsInsightsWarehouseUser.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this OperationsInsightsWarehouseUser.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def system_tags(self):
"""
Gets the system_tags of this OperationsInsightsWarehouseUser.
System tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
:return: The system_tags of this OperationsInsightsWarehouseUser.
:rtype: dict(str, dict(str, object))
"""
return self._system_tags
@system_tags.setter
def system_tags(self, system_tags):
"""
Sets the system_tags of this OperationsInsightsWarehouseUser.
System tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
:param system_tags: The system_tags of this OperationsInsightsWarehouseUser.
:type: dict(str, dict(str, object))
"""
self._system_tags = system_tags
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this OperationsInsightsWarehouseUser.
The time at which the resource was first created. An RFC3339 formatted datetime string
:return: The time_created of this OperationsInsightsWarehouseUser.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this OperationsInsightsWarehouseUser.
The time at which the resource was first created. An RFC3339 formatted datetime string
:param time_created: The time_created of this OperationsInsightsWarehouseUser.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this OperationsInsightsWarehouseUser.
The time at which the resource was last updated. An RFC3339 formatted datetime string
:return: The time_updated of this OperationsInsightsWarehouseUser.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this OperationsInsightsWarehouseUser.
The time at which the resource was last updated. An RFC3339 formatted datetime string
:param time_updated: The time_updated of this OperationsInsightsWarehouseUser.
:type: datetime
"""
self._time_updated = time_updated
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this OperationsInsightsWarehouseUser.
Possible lifecycle states
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this OperationsInsightsWarehouseUser.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this OperationsInsightsWarehouseUser.
Possible lifecycle states
:param lifecycle_state: The lifecycle_state of this OperationsInsightsWarehouseUser.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this OperationsInsightsWarehouseUser.
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:return: The lifecycle_details of this OperationsInsightsWarehouseUser.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this OperationsInsightsWarehouseUser.
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param lifecycle_details: The lifecycle_details of this OperationsInsightsWarehouseUser.
:type: str
"""
self._lifecycle_details = lifecycle_details
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
3332493 | import psycopg2
con = psycopg2.connect(
host = "localhost",
database = "GymAutomationDB",
user = "postgres",
password = "<PASSWORD>"
) | StarcoderdataPython |
1675215 | <filename>demos/demo_ondisk_ivf.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import numpy as np
import faiss
from faiss_contrib.ondisk import merge_ondisk
#################################################################
# Small I/O functions
#################################################################
def ivecs_read(fname):
a = np.fromfile(fname, dtype='int32')
d = a[0]
return a.reshape(-1, d + 1)[:, 1:].copy()
def fvecs_read(fname):
return ivecs_read(fname).view('float32')
#################################################################
# Main program
#################################################################
stage = int(sys.argv[1])
tmpdir = '/tmp/'
if stage == 0:
# train the index
xt = fvecs_read("sift1M/sift_learn.fvecs")
index = faiss.index_factory(xt.shape[1], "IVF4096,Flat")
print("training index")
index.train(xt)
print("write " + tmpdir + "trained.index")
faiss.write_index(index, tmpdir + "trained.index")
if 1 <= stage <= 4:
# add 1/4 of the database to 4 independent indexes
bno = stage - 1
xb = fvecs_read("sift1M/sift_base.fvecs")
i0, i1 = int(bno * xb.shape[0] / 4), int((bno + 1) * xb.shape[0] / 4)
index = faiss.read_index(tmpdir + "trained.index")
print("adding vectors %d:%d" % (i0, i1))
index.add_with_ids(xb[i0:i1], np.arange(i0, i1))
print("write " + tmpdir + "block_%d.index" % bno)
faiss.write_index(index, tmpdir + "block_%d.index" % bno)
if stage == 5:
print('loading trained index')
# construct the output index
index = faiss.read_index(tmpdir + "trained.index")
block_fnames = [
tmpdir + "block_%d.index" % bno
for bno in range(4)
]
merge_ondisk(index, block_fnames, tmpdir + "merged_index.ivfdata")
print("write " + tmpdir + "populated.index")
faiss.write_index(index, tmpdir + "populated.index")
if stage == 6:
# perform a search from disk
print("read " + tmpdir + "populated.index")
index = faiss.read_index(tmpdir + "populated.index")
index.nprobe = 16
# load query vectors and ground-truth
xq = fvecs_read("sift1M/sift_query.fvecs")
gt = ivecs_read("sift1M/sift_groundtruth.ivecs")
D, I = index.search(xq, 5)
recall_at_1 = (I[:, :1] == gt[:, :1]).sum() / float(xq.shape[0])
print("recall@1: %.3f" % recall_at_1)
| StarcoderdataPython |
3329952 | import sys
import xml.etree.ElementTree as ET
import json
import os
import re
def get_parent_folder():
dir_path = os.path.dirname(os.path.realpath(__file__))
paths = dir_path.split('/')
paths.pop()
parent_path = '/'.join(paths)
print parent_path
return parent_path
parent_path = get_parent_folder()
sys.path.insert(0, parent_path)
import xml_utilities as xml
def main():
try:
if len(sys.argv) > 1:
filePath = sys.argv[1]
f = open(filePath, 'r')
fileName = os.path.basename(f.name)
g = xml.clean_file(f)
source = xml.get_XML_root(open(g, 'r'))
for book in source.findall('text/body/div1'):
print book.get('type')
print book.get('n')
'''
newTextElement = newRoot.find("body").find("text")
updateNewXMLHeader(source, newRoot)
# Start conversion process
iterateBooks(source, newTextElement)
# Write the resulting XML to file
newFileName = "output/gk_" + fileName
newFile.write(newFileName, encoding="UTF-8", xml_declaration=True)
f.close()
# Reformat the XML result
reformatXMLFile(newFileName, ["<div1", "<p"])
print ("Output file: " + newFileName)
'''
print ("Conversion complete")
except RuntimeError:
print ("There was an error opening the source file")
if __name__ == "__main__":
main() | StarcoderdataPython |
158149 | # Copyright 2017 Telstra Open Source
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from kilda.probe.entity.message import create_dump_state
def test_validate_message_request_format():
with open('./kilda/probe/test/res/CtrlRequest.json') as f:
etalon_request = json.load(f)
dump_state_command = create_dump_state(etalon_request['correlation_id'],
etalon_request['route'])
dump_state_command._timestamp = etalon_request['timestamp']
dump_state_command_dict = json.loads(
dump_state_command.serialize().decode("utf-8"))
assert etalon_request == dump_state_command_dict, \
'CtrlRequest.json format invalid'
| StarcoderdataPython |
157398 | <filename>SDWLE/cards_copy/spells/rogue.py
import copy
from SDWLE.cards.base import SpellCard
from SDWLE.tags.action import AddCard
from SDWLE.tags.base import Effect, BuffUntil, Buff, AuraUntil, ActionTag
from SDWLE.tags.condition import IsSpell
from SDWLE.tags.event import TurnStarted, TurnEnded, SpellCast
from SDWLE.tags.selector import PlayerSelector, CardSelector
from SDWLE.tags.status import Stealth, ChangeAttack, ManaChange
import SDWLE.targeting
from SDWLE.constants import CHARACTER_CLASS, CARD_RARITY
class Assassinate(SpellCard):
def __init__(self):
super().__init__("Assassinate", 5, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE,
target_func=SDWLE.targeting.find_enemy_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.die(self)
class Backstab(SpellCard):
def __init__(self):
super().__init__("Backstab", 0, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE,
target_func=SDWLE.targeting.find_minion_spell_target,
filter_func=lambda target: target.health == target.calculate_max_health() and
target.spell_targetable())
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(2), self)
class Betrayal(SpellCard):
def __init__(self):
super().__init__("Betrayal", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=SDWLE.targeting.find_enemy_minion_spell_target)
def use(self, player, game):
super().use(player, game)
left_minion = None
right_minion = None
index = self.target.index
if index > 0:
left_minion = game.other_player.minions[index - 1]
if index < min(len(game.other_player.minions) - 1, 6):
right_minion = game.other_player.minions[index + 1]
original_immune = self.target.immune
self.target.immune = True
if left_minion is not None:
left_minion.damage(self.target.calculate_attack(), self.target)
if right_minion is not None:
right_minion.damage(self.target.calculate_attack(), self.target)
self.target.immune = original_immune
class BladeFlurry(SpellCard):
def __init__(self):
super().__init__("Blade Flurry", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
if player.weapon is not None:
# Yes, this card is affected by spell damage cards.
# Source: http://www.hearthhead.com/card=1064/blade-flurry#comments:id=1927317
attack_power = player.effective_spell_damage(player.hero.calculate_attack())
player.weapon.destroy()
for minion in copy.copy(game.other_player.minions):
minion.damage(attack_power, self)
game.other_player.hero.damage(attack_power, self)
class ColdBlood(SpellCard):
def __init__(self):
super().__init__("Cold Blood", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=SDWLE.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
if player.cards_played > 0:
self.target.change_attack(4)
else:
self.target.change_attack(2)
class Conceal(SpellCard):
def __init__(self):
super().__init__("Conceal", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for minion in player.minions:
if not minion.stealth:
minion.add_buff(BuffUntil(Stealth(), TurnStarted()))
class DeadlyPoison(SpellCard):
def __init__(self):
super().__init__("Deadly Poison", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
player.weapon.base_attack += 2
player.hero.change_temp_attack(2)
def can_use(self, player, game):
return super().can_use(player, game) and player.weapon is not None
class Eviscerate(SpellCard):
def __init__(self):
super().__init__("Eviscerate", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=SDWLE.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
if player.cards_played > 0:
self.target.damage(player.effective_spell_damage(4), self)
else:
self.target.damage(player.effective_spell_damage(2), self)
class FanOfKnives(SpellCard):
def __init__(self):
super().__init__("Fan of Knives", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for minion in copy.copy(game.other_player.minions):
minion.damage(player.effective_spell_damage(1), self)
player.draw()
class Headcrack(SpellCard):
def __init__(self):
super().__init__("Headcrack", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
game.other_player.hero.damage(player.effective_spell_damage(2), self)
if player.cards_played > 0:
player.add_effect(Effect(TurnEnded(), ActionTag(AddCard(self), PlayerSelector())))
class Preparation(SpellCard):
def __init__(self):
super().__init__("Preparation", 0, CHARACTER_CLASS.ROGUE, CARD_RARITY.EPIC)
def use(self, player, game):
super().use(player, game)
player.add_aura(AuraUntil(ManaChange(-3), CardSelector(condition=IsSpell()), SpellCast()))
class Sap(SpellCard):
def __init__(self):
super().__init__("Sap", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE,
target_func=SDWLE.targeting.find_enemy_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.bounce()
class Shadowstep(SpellCard):
def __init__(self):
super().__init__("Shadowstep", 0, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=SDWLE.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.bounce()
self.target.card.add_buff(Buff(ManaChange(-3)))
class Shiv(SpellCard):
def __init__(self):
super().__init__("Shiv", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=SDWLE.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(1), self)
player.draw()
class SinisterStrike(SpellCard):
def __init__(self):
super().__init__("Sinister Strike", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
game.other_player.hero.damage(player.effective_spell_damage(3), self)
class Sprint(SpellCard):
def __init__(self):
super().__init__("Sprint", 7, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for i in range(0, 4):
player.draw()
class Vanish(SpellCard):
def __init__(self):
super().__init__("Vanish", 6, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
targets = copy.copy(game.other_player.minions)
targets.extend(player.minions)
# Minions are returned to a player's hand in the order in which they were played.
# Source: http://www.hearthhead.com/card=196/vanish#comments:id=1908549
for minion in sorted(targets, key=lambda m: m.born):
minion.bounce()
class TinkersSharpswordOil(SpellCard):
def __init__(self):
super().__init__("Tinker's Sharpsword Oil", 4, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
player.weapon.base_attack += 3
player.hero.change_temp_attack(3)
if player.cards_played > 0:
targets = SDWLE.targeting.find_friendly_minion_battlecry_target(player.game, lambda x: x)
if targets is not None:
target = player.game.random_choice(targets)
target.add_buff(Buff(ChangeAttack(3)))
def can_use(self, player, game):
return super().can_use(player, game) and player.weapon is not None
class Sabotage(SpellCard):
def __init__(self):
super().__init__("Sabotage", 4, CHARACTER_CLASS.ROGUE, CARD_RARITY.EPIC)
def use(self, player, game):
super().use(player, game)
targets = SDWLE.targeting.find_enemy_minion_battlecry_target(player.game, lambda x: True)
target = game.random_choice(targets)
target.die(None)
game.check_delayed()
if player.cards_played > 0 and game.other_player.weapon is not None:
game.other_player.weapon.destroy()
def can_use(self, player, game):
return super().can_use(player, game) and len(game.other_player.minions) >= 1
class GangUp(SpellCard):
def __init__(self):
super().__init__("Gang Up", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=SDWLE.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
for i in range(3):
player.put_back(type(self.target.card)())
| StarcoderdataPython |
3329908 | <filename>sign.py
#!/usr/bin/env python3
import ecc_ed25519
import sys, getopt
msg = ""
secret_key_path = "/etc/casper/validator_keys/secret_key.pem"
try:
opts, args = getopt.getopt(sys.argv[1:],"hm:k:",["message=","secretkey="])
except getopt.GetoptError:
print('sign.py -m YOURMESSAGE -k PATH-TO-YOUR-SECRET-KEY')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('sign.py -m YOURMESSAGE -k PATH-TO-YOUR-SECRET-KEY')
sys.exit()
elif opt in ("-m", "--message"):
msg = arg
elif opt in ("-k", "--secretkey"):
secret_key_path = arg
if msg == "":
print("Message can't be empty!")
sys.exit()
msg_as_bytes = str.encode(msg)
try:
signature = ecc_ed25519.get_signature_from_pem_file(msg_as_bytes, secret_key_path)
except FileNotFoundError:
print("ERROR: Couldn't access your private key at this location: ", secret_key_path)
print("Please make sure your secret_key.pem file is at the given location and is accessible by the current user.")
print("If you have your key at a different location, you can define its path by using the -k parameter.")
print("Usage: sign.py -m YOURMESSAGE -k PATH-TO-YOUR-SECRET-KEY")
sys.exit()
encoded_signature = signature.hex()
# Get public key hex from the secret PEM file for informational purposes
public_key_hex = ecc_ed25519.get_public_key_hex_from_pem_file(secret_key_path)
# Add prefix
public_key_hex = "01" + public_key_hex
print("Public Key:\n", public_key_hex)
print("Message:\n", msg)
print("Signature:\n", encoded_signature) | StarcoderdataPython |
3212543 | <filename>sde_module/mbar.py<gh_stars>0
# -----------------------------------------------------------------------------
# mbar.py --- widget class for SDE Tool
# -----------------------------------------------------------------------------
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from . import utils
# =============================================================================
# MenuBar
# menubar class (template)
# =============================================================================
class MenuBar(Gtk.Frame):
def __init__(self):
Gtk.Frame.__init__(self)
self.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
self.box = Gtk.Box()
self.add(self.box)
# -------------------------------------------------------------------------
# get_box
# get container instance for layouting widgets on it
#
# argument:
# (none)
#
# return
# Gtk.Box() layout instance
# -------------------------------------------------------------------------
def get_box(self):
return self.box
# -----------------------------------------------------------------------------
# menubar_button
# button class for menubar class
# -----------------------------------------------------------------------------
class menubar_button(Gtk.Button):
def __init__(self, name, image, tooltip=''):
Gtk.Button.__init__(self, name=name)
#self.add(utils.img().get_image(image))
self.set_image(utils.image(image))
self.set_tooltip_text(tooltip)
# =============================================================================
# implementation
# =============================================================================
# -----------------------------------------------------------------------------
# main
# menubar class for main panel of SDE Tool
# -----------------------------------------------------------------------------
class main(MenuBar):
def __init__(self):
MenuBar.__init__(self)
box = self.get_box()
# config button
self.but_config = menubar_button(name='Button', image='config', tooltip='App Config')
box.pack_start(self.but_config, expand=False, fill=True, padding=0)
# add supplier button
self.but_supplier = menubar_button(name='Button', image='add', tooltip='Add Supplier')
box.pack_start(self.but_supplier, expand=False, fill=True, padding=0)
# exit button
self.but_exit = menubar_button(name='Button', image='exit', tooltip='Exit this app')
box.pack_end(self.but_exit, expand=False, fill=True, padding=0)
# info button
self.but_info = menubar_button(name='Button', image='info', tooltip='About this app')
box.pack_end(self.but_info, expand=False, fill=True, padding=0)
# -------------------------------------------------------------------------
# get_obj
# get object instance of button
#
# argument:
# image : image name of button
# -------------------------------------------------------------------------
def get_obj(self, name_image):
if name_image == 'config':
return self.but_config
if name_image == 'add':
return self.but_supplier
if name_image == 'exit':
return self.but_exit
if name_image == 'info':
return self.but_info
# -----------------------------------------------------------------------------
# sub_add
# menubar class for sub panel, add button only
# -----------------------------------------------------------------------------
class sub_add(MenuBar):
def __init__(self):
MenuBar.__init__(self)
box = self.get_box()
# add supplier button
self.but_add = menubar_button(name='Button', image='add')
box.pack_start(self.but_add, expand=False, fill=True, padding=0)
# -------------------------------------------------------------------------
# get_obj
# get object instance of button
#
# argument:
# image : image name of button
# -------------------------------------------------------------------------
def get_obj(self, name_image):
if name_image == 'add':
return self.but_add
class spc(MenuBar):
def __init__(self):
MenuBar.__init__(self)
box = self.get_box()
# config button
self.but_folder = menubar_button(name='Button', image='folder', tooltip='open SPC file')
box.pack_start(self.but_folder, expand=False, fill=True, padding=0)
# exit button
self.but_exit = menubar_button(name='Button', image='exit', tooltip='Exit this app')
box.pack_end(self.but_exit, expand=False, fill=True, padding=0)
# -------------------------------------------------------------------------
# get_obj
# get object instance of button
#
# argument:
# image : image name of button
# -------------------------------------------------------------------------
def get_obj(self, name_image):
if name_image == 'folder':
return self.but_folder
if name_image == 'exit':
return self.but_exit
# ---
# END OF PROGRAM
| StarcoderdataPython |
137275 | <gh_stars>0
# This code is all CDS code
# Author: <NAME> (<EMAIL>)
import collections
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from scipy.stats import pearsonr
from tqdm import *
K_FOLD_NUMBER = 5
# Function to evaluate the performance via mse, rmse, mae, and r2 of a given set of predicted output variables vs the actual ground truth values
def evaluate(x, y):
predictions_list = list()
ground_truth_list = list()
# Intialize k-fold cv
kfold = KFold(K_FOLD_NUMBER, shuffle=True, random_state=0)
for train, test in kfold.split(x):
# Split dataset into train-test splits
train_dataset = x.iloc[train]
train_labels = y.iloc[train]
test_dataset = x.iloc[test]
test_labels = y.iloc[test]
# Train test model
model = RandomForestRegressor(n_estimators=100, max_depth=8, min_samples_leaf=5, n_jobs=-1, random_state=0)
model.fit(train_dataset, train_labels)
predictions = model.predict(test_dataset)
predictions_list += list(predictions)
ground_truth_list += test_labels.tolist()
# Return overall performance
return pearsonr(predictions_list, ground_truth_list)[0]
def main():
# Read input data
feature_matrix = pd.read_csv("achilles_data/feature_matrix.csv", header=0, index_col=0)
labels_matrix = pd.read_csv("achilles_data/labels_matrix.csv", header=0, index_col=0)
# Read metadata
with open('achilles_data/gene_to_features.pkl', 'rb') as f:
gene_to_features = pickle.load(f)
all_genes_df = pd.read_csv('achilles_data/to_run_genes_feature_selection.csv', header=0)
all_genes = all_genes_df['Gene'].tolist()
# Run for each gene, 5 fold cv and get correlation across all 5 folds
gene_to_performance = dict()
for gene in tqdm(all_genes):
final_features_to_use = gene_to_features[gene]
if len(final_features_to_use) == 0:
continue
final_features_to_use = list(final_features_to_use)
final_feature_matrix = feature_matrix[final_features_to_use]
correlation = evaluate(final_feature_matrix, labels_matrix[gene])
gene_to_performance[gene] = correlation
with open('results_dict.pkl', 'wb') as handle:
pickle.dump(gene_to_performance, handle)
main()
| StarcoderdataPython |
1767670 | <gh_stars>0
import smtplib
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.login("sender", "pass")
server.sendmail("reseiver",
"sender",
"Test Envyard")
server.quit() | StarcoderdataPython |
1617208 | <reponame>bioidiap/bob.bio.base<gh_stars>10-100
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
import numpy
import scipy.spatial
from .Algorithm import Algorithm
from .. import utils
import logging
logger = logging.getLogger("bob.bio.base")
class Distance (Algorithm):
"""This class defines a simple distance measure between two features.
Independent of the actual shape, each feature vector is treated as a one-dimensional vector, and the specified distance function is used to compute the distance between the two features.
If the given ``distance_function`` actually computes a distance, we simply return its negative value (as all :py:class:`Algorithm`'s are supposed to return similarity values).
If the ``distance_function`` computes similarities, the similarity value is returned unaltered.
**Parameters:**
``distance_function`` : callable
A function taking two 1D arrays and returning a ``float``
``is_distance_function`` : bool
Set this flag to ``False`` if the given ``distance_function`` computes a similarity value (i.e., higher values are better)
``kwargs`` : ``key=value`` pairs
A list of keyword arguments directly passed to the :py:class:`Algorithm` base class constructor.
"""
def __init__(
self,
distance_function = scipy.spatial.distance.euclidean,
is_distance_function = True,
**kwargs # parameters directly sent to the base class
):
# call base class constructor and register that the algorithm performs a projection
super(Distance, self).__init__(
distance_function = str(distance_function),
is_distance_function = is_distance_function,
**kwargs
)
self.distance_function = distance_function
self.factor = -1. if is_distance_function else 1.
def _check_feature(self, feature):
"""Checks that the features are appropriate"""
if not isinstance(feature, numpy.ndarray):
raise ValueError("The given feature should be of type numpy.ndarray")
def enroll(self, enroll_features):
"""enroll(enroll_features) -> model
Enrolls the model by storing all given input vectors.
**Parameters:**
``enroll_features`` : [:py:class:`numpy.ndarray`]
The list of projected features to enroll the model from.
**Returns:**
``model`` : 2D :py:class:`numpy.ndarray`
The enrolled model.
"""
assert len(enroll_features)
[self._check_feature(feature) for feature in enroll_features]
# just store all the features
return numpy.vstack(f.flatten() for f in enroll_features)
def score(self, model, probe):
"""score(model, probe) -> float
Computes the distance of the model to the probe using the distance function specified in the constructor.
**Parameters:**
``model`` : 2D :py:class:`numpy.ndarray`
The model storing all enrollment features
``probe`` : :py:class:`numpy.ndarray`
The probe feature vector
**Returns:**
``score`` : float
A similarity value between ``model`` and ``probe``
"""
self._check_feature(probe)
probe = probe.flatten()
# return the negative distance (as a similarity measure)
if model.ndim == 2:
# we have multiple models, so we use the multiple model scoring
return self.score_for_multiple_models(model, probe)
else:
# single model, single probe (multiple probes have already been handled)
return self.factor * self.distance_function(model, probe)
# re-define unused functions, just so that they do not get documented
def train_projector(*args,**kwargs): raise NotImplementedError()
def load_projector(*args,**kwargs): pass
def project(*args,**kwargs): raise NotImplementedError()
def write_feature(*args,**kwargs): raise NotImplementedError()
def read_feature(*args,**kwargs): raise NotImplementedError()
def train_enroller(*args,**kwargs): raise NotImplementedError()
def load_enroller(*args,**kwargs): pass
| StarcoderdataPython |
1600900 | import atexit
import os
import pickle
import shutil
import tempfile
from contextlib import contextmanager
from datetime import timedelta, datetime
from distutils.errors import DistutilsFileError
from pathlib import Path
from types import FunctionType
from typing import Callable, Any
from lztools import lzglobal
from lztools.enums import FileExtension
_expire_dir = lzglobal.storage_location().joinpath("expires")
_expire_dir.mkdir(exist_ok=True, parents=True)
def get_current_path() -> Path:
return Path(".").absolute()
def get_current_path_str() -> str:
return str(get_current_path())
def is_escaped(text, index) -> bool:
def __is_escaped(t, i, v) -> bool:
if t[i-1] == "\\":
return __is_escaped(t, i-1, not v)
else:
return v
return __is_escaped(text, index, False)
def name_and_ext(path) -> str:
rsplit = str(path).rsplit("/", 1)
return rsplit[1] if len(rsplit) > 1 else path
def remove_extension(path) -> str:
return str(path).rsplit(".", 1)[0]
def name(path) -> str:
return remove_extension(name_and_ext(path))
from distutils.dir_util import copy_tree
def move_to(path, relative=True):
if relative:
path = os.path.realpath(path)
if os.path.isfile(path):
path = os.path.dirname(path)
original = os.getcwd()
os.chdir(path)
def move_back():
os.chdir(original)
return move_back
@contextmanager
def TempPath(path):
move_back = move_to(path)
yield
move_back()
def on_all(action:Callable[[Path], Any], path:Path, subdirs:bool=True):
for p in path.absolute().iterdir():
if p.is_dir() and subdirs:
yield from on_all(action, p)
yield action(p)
def on_files(on_files:Callable[[Path], Any], path:Path, subdirs:bool=True):
def do(p:Path):
if p.is_file():
return on_files(p)
for result in on_all(do, path, subdirs):
if result is not None:
yield result
def on_dirs(on_dirs:Callable[[Path], Any], path:Path, subdirs:bool=True):
def do(p:Path):
if p.is_dir():
return on_dirs(p)
for result in on_all(do, path, subdirs):
if result is not None:
yield result
def is_empty(path:Path):
if path.is_file():
return path.stat().st_size == 0
elif path.is_dir():
for _ in path.iterdir():
return False
return True
else:
raise Exception(f"""Unhandled case! path:{type(path)} -> {path}""")
def get_temporary_file() -> Path:
"""Creates a temporary file withc is automatically deleted when the program exits"""
tmp_file = Path(tempfile.mkstemp()[1])
tmp_file.touch()
atexit.register(lambda: tmp_file.unlink())
return tmp_file
def copy_directory(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def copy_anything(src, dst):
try:
#copy_directory(src, dst)
copy_tree(str(src), str(dst))
except DistutilsFileError as exc:
shutil.copy(src, dst)
def scatter_files(base_path:Path, recursive:bool=True, scatter_name:str="_scatter_", sudo:bool=False):
if not base_path:
base_path = get_current_path()
for item in base_path.iterdir():
if item.name == scatter_name:
_scatter_now_routine(item, sudo)
if recursive and item.is_dir():
scatter_files(item, recursive)
def _scatter_now_routine(scatter_file:Path, sudo:bool=False):
text = scatter_file.read_text()
for line in text.strip().splitlines():
if "->" not in line:
continue
if line.startswith("#"):
continue
if line.startswith("¤ "):
line = line[2:]
split = line.split("->")
if not len(split) == 2:
continue
path_a:Path = scatter_file.parent.joinpath(split[0].strip())
path_b = Path(split[1].strip()).expanduser()
from lztools import lzglobal
if lzglobal.settings.verbose:
print(f"Copying: {path_a.absolute()} -> {path_b.absolute()}")
if not sudo:
copy_anything(path_a, path_b)
else:
os.system(f"sudo cp {path_a.absolute()} {path_b.absolute()}")
def collect_files(base_path:Path, recursive:bool=True, scatter_name:str="_scatter_", sudo:bool=False):
if not base_path:
base_path = get_current_path()
for item in base_path.iterdir():
if item.name == scatter_name:
_scatter_collect_routine(item, sudo)
if recursive and item.is_dir():
collect_files(item, recursive)
def _scatter_collect_routine(scatter_file:Path, sudo:bool=False):
text = scatter_file.read_text()
for line in text.strip().splitlines():
if "->" not in line:
continue
if not line.startswith("¤ "):
continue
line = line[2:]
split = line.split("->")
if not len(split) == 2:
continue
path_a:Path = scatter_file.parent.joinpath(split[0].strip())
path_b = Path(split[1].strip()).expanduser()
from lztools import lzglobal
#if lzglobal.settings.verbose:
print(f"Copying: {path_b.absolute()} -> {path_a.absolute()}")
# if not sudo:
# copy_anything(path_a, path_b)
# else:
# os.system(f"sudo cp {path_a.absolute()} {path_b.absolute()}")
def _gen_exp_fn(name):
return f"{name}¤{datetime.now()}.{FileExtension.expiring_file}"
def get_expiring_file(name:str, expires_after:timedelta) -> Path:
for file in _expire_dir.glob(f"{name}¤*.{FileExtension.expiring_file}"):
f_name, f_date = file.name.split("¤")
f_date = datetime.fromisoformat(f_date.split(f".{FileExtension.expiring_file}")[0])
delta = datetime.now() - f_date
if delta > expires_after:
file.unlink()
file = file.with_name(_gen_exp_fn(name))
file.touch(exist_ok=True)
return file
file = _expire_dir.joinpath(_gen_exp_fn(name))
file.touch()
return file
def get_self_renewing_file(name:str, renew_after:timedelta, renew:FunctionType, pickle_data=True) -> Path:
file = get_expiring_file(name, expires_after=renew_after)
if is_empty(file):
data = renew()
if pickle_data:
with file.open("wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
else:
file.write_text(data)
return data
with file.open("rb") as f:
return pickle.load(f)
| StarcoderdataPython |
1730167 | import numpy as np
from py_diff_stokes_flow.env.env_base import EnvBase
from py_diff_stokes_flow.common.common import ndarray
class FlowAveragerEnv3d(EnvBase):
def __init__(self, seed, folder):
np.random.seed(seed)
cell_nums = (64, 64, 4)
E = 100
nu = 0.499
vol_tol = 1e-2
edge_sample_num = 2
EnvBase.__init__(self, cell_nums, E, nu, vol_tol, edge_sample_num, folder)
# Initialize the parametric shapes.
self._parametric_shape_info = [ ('bezier', 11), ('bezier', 11), ('bezier', 11), ('bezier', 11) ]
# Initialize the node conditions.
self._node_boundary_info = []
inlet_range = ndarray([
[0.1, 0.4],
[0.6, 0.9],
])
outlet_range = ndarray([
[0.2, 0.4],
[0.6, 0.8]
])
cx, cy, _ = self.cell_nums()
nx, ny, nz = self.node_nums()
inlet_bd = inlet_range * cy
outlet_bd = outlet_range * cy
for j in range(ny):
for k in range(nz):
# Set the inlet at i = 0.
if inlet_bd[0, 0] < j < inlet_bd[0, 1]:
self._node_boundary_info.append(((0, j, k, 0), 1))
self._node_boundary_info.append(((0, j, k, 1), 0))
self._node_boundary_info.append(((0, j, k, 2), 0))
if inlet_bd[1, 0] < j < inlet_bd[1, 1]:
self._node_boundary_info.append(((0, j, k, 0), 0))
self._node_boundary_info.append(((0, j, k, 1), 0))
self._node_boundary_info.append(((0, j, k, 2), 0))
# Set the top and bottom plane.
for i in range(nx):
for j in range(ny):
for k in [0, nz - 1]:
self._node_boundary_info.append(((i, j, k, 2), 0))
# Initialize the interface.
self._interface_boundary_type = 'free-slip'
# Other data members.
self._inlet_range = inlet_range
self._outlet_range = outlet_range
self._inlet_bd = inlet_bd
self._outlet_bd = outlet_bd
def _variables_to_shape_params(self, x):
x = ndarray(x).copy().ravel()
assert x.size == 8
cx, cy, _ = self._cell_nums
lower = ndarray([
[1, self._outlet_range[0, 0]],
x[2:4],
x[:2],
[0, self._inlet_range[0, 0]],
])
right = ndarray([
[1, self._outlet_range[1, 0]],
[x[4], 1 - x[5]],
x[4:6],
[1, self._outlet_range[0, 1]],
])
upper = ndarray([
[0, self._inlet_range[1, 1]],
[x[0], 1 - x[1]],
[x[2], 1 - x[3]],
[1, self._outlet_range[1, 1]],
])
left = ndarray([
[0, self._inlet_range[0, 1]],
x[6:8],
[x[6], 1 - x[7]],
[0, self._inlet_range[1, 0]],
])
cxy = ndarray([cx, cy])
lower *= cxy
right *= cxy
upper *= cxy
left *= cxy
params = np.concatenate([lower.ravel(),
[0, -0.01, 1],
right.ravel(),
[0.01, 0, 1],
upper.ravel(),
[0, 0.01, 1],
left.ravel(),
[-0.01, 0, 1]
])
# Jacobian.
J = np.zeros((params.size, x.size))
J[2, 2] = J[3, 3] = 1
J[4, 0] = J[5, 1] = 1
J[13, 4] = 1
J[14, 5] = -1
J[15, 4] = J[16, 5] = 1
J[24, 0] = 1
J[25, 1] = -1
J[26, 2] = 1
J[27, 3] = -1
J[35, 6] = J[36, 7] = 1
J[37, 6] = 1
J[38, 7] = -1
J[:, ::2] *= cx
J[:, 1::2] *= cy
return ndarray(params).copy(), ndarray(J).copy()
def _loss_and_grad_on_velocity_field(self, u):
u_field = self.reshape_velocity_field(u)
grad = np.zeros(u_field.shape)
nx, ny, nz = self.node_nums()
loss = 0
cnt = 0
for j in range(ny):
for k in range(nz):
if self._outlet_bd[0, 0] < j < self._outlet_bd[0, 1] or \
self._outlet_bd[1, 0] < j < self._outlet_bd[1, 1]:
cnt += 1
u_diff = u_field[nx - 1, j, k] - ndarray([0.5, 0, 0])
loss += u_diff.dot(u_diff)
grad[nx - 1, j, k] += 2 * u_diff
loss /= cnt
grad /= cnt
return loss, ndarray(grad).ravel()
def _color_velocity(self, u):
return float(np.linalg.norm(u) / 3)
def sample(self):
return np.random.uniform(low=self.lower_bound(), high=self.upper_bound())
def lower_bound(self):
return ndarray([.01, .01, .49, .01, .49, .01, .01, .01])
def upper_bound(self):
return ndarray([.49, .49, .99, .49, .99, .49, .49, .49]) | StarcoderdataPython |
1742014 | from twitter import *
import pyttsx
import APIKEYS
''' MYCREDS.txt has the following format:
oauthtokenvalue
oauthsecretvalue
'''
def getTwitterByConfig():
oauth_token, oauth_secret = read_token_file("MYCREDS.txt")
twitter = Twitter(auth=OAuth(oauth_token, oauth_secret, APIKEYS.SPOKENTIMELINE_CONSUMERKEY, APIKEYS.SPOKENTIMELINE_CONSUMERSECRET))
return twitter
def printTimeline(timeline):
for tweet in timeline:
try:
print(tweet['user']['screen_name'] + " at " + tweet['created_at'] + " tweeted " + tweet['text']);
except UnicodeEncodeError:
pass;
def formatTimeline(timeline):
result = []
for tweet in timeline:
result.append(tweet['user']['screen_name'] + " at " + tweet['created_at'] + " tweeted " + tweet['text'])
return result
def getTimeline(twitter):
return twitter.statuses.home_timeline()
def getSpeechEngine():
return pyttsx.init()
def speakTimeline(engine, timeline):
for tweet in formatTimeline(timeline):
engine.say(tweet)
if __name__ == "__main__":
t = getTwitterByConfig()
timeline = getTimeline(t)
printTimeline(timeline)
engine = getSpeechEngine()
speakTimeline(engine, timeline) | StarcoderdataPython |
198390 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import pickle
import unittest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel
from vissl.config import AttrDict
from vissl.models import build_model
from vissl.models.heads import DINOHead
from vissl.models.heads.dino_head import DINOHeadFSDP
from vissl.models.trunks.vision_transformer import Block, PatchEmbed
from vissl.utils.fsdp_utils import fsdp_wrapper, is_valid_fsdp_model
from vissl.utils.hydra_config import compose_hydra_configuration, convert_to_attrdict
from vissl.utils.misc import torch_version
from vissl.utils.test_utils import gpu_test, init_distributed_on_file, with_temp_files
class TestVitFSDP(unittest.TestCase):
"""
---------------------------------------------------------------------------
Testing ViT individual blocks
---------------------------------------------------------------------------
"""
@gpu_test(gpu_count=2)
def test_blocks_fsdp_vs_ddp_convergence(self):
with_amp = True
with with_temp_files(count=2) as file_names:
self._run_block_training_loop(
with_fsdp=True, with_amp=with_amp, output_file_name=file_names[0]
)
self._run_block_training_loop(
with_fsdp=False, with_amp=with_amp, output_file_name=file_names[1]
)
results = []
for file_name in file_names:
with open(file_name, "rb") as f:
result = pickle.load(f)
results.append(result)
self.assertEqual(results[0], results[1], "DDP vs FSDP")
@classmethod
def _run_block_training_loop(
cls, with_fsdp: bool, with_amp: bool, output_file_name: str
):
with with_temp_files(count=1) as sync_file:
mp.spawn(
cls._block_worker,
(with_fsdp, with_amp, sync_file, output_file_name),
nprocs=2,
)
@staticmethod
def _block_worker(
gpu_id: int, with_fsdp: bool, with_amp: bool, sync_file: str, result_file: str
):
init_distributed_on_file(world_size=2, gpu_id=gpu_id, sync_file=sync_file)
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.cuda.reset_peak_memory_stats()
# Create the inputs
batch_size = 8
embed_dim = 384
batch = torch.randn(size=(batch_size, 3, 224, 224)).cuda()
# Create the model
num_blocks = 5
patch_embed = PatchEmbed(embed_dim=embed_dim).cuda()
blocks = [Block(dim=embed_dim, num_heads=6).cuda() for _ in range(num_blocks)]
norm = nn.LayerNorm(embed_dim).cuda()
# Wrap the model with FSDP or DDP
if with_fsdp:
fsdp_config = {
"flatten_parameters": True,
"mixed_precision": with_amp,
"fp32_reduce_scatter": False, # Only makes sense to be True when mixed_precision is True.
"compute_dtype": torch.float32,
"bucket_cap_mb": 0,
"clear_autocast_cache": True,
"verbose": True,
"reshard_after_forward": True,
}
blocks = [fsdp_wrapper(block, **fsdp_config) for block in blocks]
model = nn.Sequential(patch_embed, *blocks, norm)
model = fsdp_wrapper(model, **fsdp_config)
else:
model = nn.Sequential(patch_embed, *blocks, norm)
model = DistributedDataParallel(model, device_ids=[gpu_id])
# Print the model
if gpu_id == 0:
print(model)
# Create the optimizer
param_groups = [
{
"params": model.parameters(),
"lr": 1e-4,
"weight_decay": 1e-3,
}
]
optimizer = optim.AdamW(param_groups)
# Go through several training loops
losses = []
for step in range(5):
# Setup the AMP context if necessary
context = contextlib.suppress()
if with_amp:
context = torch.cuda.amp.autocast()
# Forward pass
with context:
out = model(batch)
out = out.mean()
# Backward pass
if torch_version() >= (1, 7, 0):
model.zero_grad(set_to_none=True)
else:
model.zero_grad()
out.backward()
optimizer.step()
# Report results and run schedulers
torch.distributed.all_reduce(out)
losses.append(out.item())
optimizer.param_groups[0].update(
{
"params": model.parameters(),
"lr": 1e-4 + step * 1e-4,
"weight_decay": 1e-3 + step * 1e-3,
}
)
# Report memory usage
if gpu_id == 0:
print(torch.cuda.max_memory_allocated() // 1e6)
# Dump the list of losses
if gpu_id == 0:
print(losses)
with open(result_file, "wb") as f:
pickle.dump(losses, f)
"""
---------------------------------------------------------------------------
Testing DINO Head FSDP
---------------------------------------------------------------------------
"""
@gpu_test(gpu_count=2)
def test_dino_head_fsdp(self):
with_amp = False
with with_temp_files(count=2) as file_names:
self._run_dino_head_loop(
with_fsdp=True, with_amp=with_amp, output_file_name=file_names[0]
)
self._run_dino_head_loop(
with_fsdp=False, with_amp=with_amp, output_file_name=file_names[1]
)
results = []
for file_name in file_names:
with open(file_name, "rb") as f:
result = pickle.load(f)
results.append(result)
self.assertEqual(results[0], results[1], "DDP vs FSDP")
@classmethod
def _run_dino_head_loop(
cls, with_fsdp: bool, with_amp: bool, output_file_name: str
):
with with_temp_files(count=1) as sync_file:
mp.spawn(
cls._dino_head_worker,
(with_fsdp, with_amp, sync_file, output_file_name),
nprocs=2,
)
@staticmethod
def _dino_head_worker(
gpu_id: int, with_fsdp: bool, with_amp, sync_file: str, result_file: str
):
init_distributed_on_file(world_size=2, gpu_id=gpu_id, sync_file=sync_file)
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.cuda.reset_peak_memory_stats()
# Create the inputs
batch_size = 8
embed_dim = 4
bottleneck_dim = 5
num_clusters = 16
batch = torch.randn(size=(batch_size, embed_dim)).cuda()
model_config = AttrDict(
{
"FSDP_CONFIG": {
"flatten_parameters": True,
"mixed_precision": with_amp,
"fp32_reduce_scatter": False, # Only makes sense to be True when mixed_precision is True.
"compute_dtype": torch.float32,
"bucket_cap_mb": 0,
"clear_autocast_cache": True,
"verbose": True,
"reshard_after_forward": True,
}
}
)
# Create the model
normalize_last_layer = True
if with_fsdp:
model = DINOHeadFSDP(
model_config=model_config,
in_dim=embed_dim,
num_clusters=[num_clusters],
bottleneck_dim=bottleneck_dim,
normalize_last_layer=normalize_last_layer,
).cuda()
model = fsdp_wrapper(model, **model_config.FSDP_CONFIG)
else:
model = DINOHead(
model_config=model_config,
in_dim=embed_dim,
num_clusters=[num_clusters],
bottleneck_dim=bottleneck_dim,
normalize_last_layer=normalize_last_layer,
).cuda()
model = DistributedDataParallel(model, device_ids=[gpu_id])
# Print the model
if gpu_id == 0:
print(model)
# Create the optimizer
param_groups = [
{
"params": model.parameters(),
"lr": 1e-4,
"weight_decay": 1e-3,
}
]
optimizer = optim.AdamW(param_groups)
# Go through several training loops
losses = []
for step in range(5):
# Setup the AMP context if necessary
context = contextlib.suppress()
if with_amp:
context = torch.cuda.amp.autocast()
# Forward pass
with context:
out = model(batch)
loss = out[0].mean()
# Backward pass
if torch_version() >= (1, 7, 0):
model.zero_grad(set_to_none=True)
else:
model.zero_grad()
loss.backward()
optimizer.step()
# Report results and run schedulers
torch.distributed.all_reduce(loss)
losses.append(loss.item())
optimizer.param_groups[0].update(
{
"params": model.parameters(),
"lr": 1e-4 + step * 1e-4,
"weight_decay": 1e-3 + step * 1e-3,
}
)
# Report memory usage
if gpu_id == 0:
print(torch.cuda.max_memory_allocated() // 1e6)
# Dump the list of losses
if gpu_id == 0:
print(losses)
with open(result_file, "wb") as f:
pickle.dump(losses, f)
"""
---------------------------------------------------------------------------
Testing ViT VISSL end-to-end implementation
---------------------------------------------------------------------------
"""
@staticmethod
def _create_dino_pretraining_config(
with_fsdp: bool,
with_mixed_precision: bool = False,
with_normalized_prototypes: bool = True,
):
cfg = compose_hydra_configuration(
[
"config=test/integration_test/quick_dino",
"config.SEED_VALUE=0",
]
)
args, config = convert_to_attrdict(cfg)
config["MODEL"]["TRUNK"]["VISION_TRANSFORMERS"]["NUM_LAYERS"] = 1
if with_fsdp:
config["MODEL"]["TRUNK"]["NAME"] = "vision_transformer_fsdp"
config["MODEL"]["HEAD"]["PARAMS"][0][0] = "dino_head_fsdp"
config.TRAINER.TASK_NAME = "self_supervision_fsdp_task"
config.MODEL.FSDP_CONFIG.mixed_precision = with_mixed_precision
config.MODEL.FSDP_CONFIG.fp32_reduce_scatter = with_mixed_precision
config.MODEL.FSDP_CONFIG.compute_dtype = torch.float32
config.MODEL.HEAD.PARAMS[0][1][
"normalize_last_layer"
] = with_normalized_prototypes
return config
@gpu_test(gpu_count=2)
def test_vit_fsdp_vs_ddp_convergence(self):
with_amp = False
with with_temp_files(count=2) as file_names:
self._run_vit_training_loop(
with_fsdp=True, with_amp=with_amp, output_file_name=file_names[0]
)
self._run_vit_training_loop(
with_fsdp=False, with_amp=with_amp, output_file_name=file_names[1]
)
results = []
for file_name in file_names:
with open(file_name, "rb") as f:
result = pickle.load(f)
results.append(result)
for r0, r1 in zip(results[0], results[1]):
print(r0, "VS", r1)
self.assertEqual(results[0], results[1], "DDP vs FSDP")
@classmethod
def _run_vit_training_loop(
cls, with_fsdp: bool, with_amp: bool, output_file_name: str
):
with with_temp_files(count=1) as sync_file:
mp.spawn(
cls._vit_worker,
(with_fsdp, with_amp, sync_file, output_file_name),
nprocs=2,
)
@classmethod
def _vit_worker(
cls,
gpu_id: int,
with_fsdp: bool,
with_amp: bool,
sync_file: str,
result_file: str,
):
init_distributed_on_file(world_size=2, gpu_id=gpu_id, sync_file=sync_file)
torch.manual_seed(gpu_id)
torch.backends.cudnn.deterministic = True
torch.cuda.reset_peak_memory_stats()
# Create the inputs
batch_size = 8
batch = torch.randn(size=(batch_size, 3, 224, 224)).cuda()
# Create the model
config = cls._create_dino_pretraining_config(with_fsdp=with_fsdp)
model = build_model(config["MODEL"], config["OPTIMIZER"]).cuda()
# Build the model with FSDP or DDP
if with_fsdp:
model = fsdp_wrapper(model, **config["MODEL"]["FSDP_CONFIG"])
assert is_valid_fsdp_model(model)
else:
model = DistributedDataParallel(model, device_ids=[gpu_id])
# Print the model
if gpu_id == 0:
print(model)
# Create the optimizer
param_groups = [
{
"params": model.parameters(),
"lr": 1e-4,
"weight_decay": 0.0,
}
]
optimizer = optim.AdamW(param_groups)
# Go through several training loops
losses = []
num_steps = 2
for step in range(num_steps):
# Setup the AMP context if necessary
context = contextlib.suppress()
if with_amp:
context = torch.cuda.amp.autocast()
# Forward pass
with context:
out = model(batch)
out = out[0][0].mean()
# Backward pass
if torch_version() >= (1, 7, 0):
model.zero_grad(set_to_none=True)
else:
model.zero_grad()
out.backward()
optimizer.step()
# Report results and run schedulers
torch.distributed.all_reduce(out)
losses.append(out.item())
optimizer.param_groups[0].update(
{
"params": model.parameters(),
"lr": 1e-4 + step * 1e-4,
"weight_decay": step * 1e-3,
}
)
# Report memory usage
if gpu_id == 0:
print(torch.cuda.max_memory_allocated() // 1e6)
# Dump the list of losses
if gpu_id == 0:
print(losses)
with open(result_file, "wb") as f:
pickle.dump(losses, f)
| StarcoderdataPython |
1651281 | <filename>Packages/Dead/demo/Script/tutorials/compare_datasets.py<gh_stars>10-100
# Import modules
import cdms2, cdutil, vcs, cdtime
import string, time, MV2, sys, os
from regrid2 import Regridder
from genutil import statistics
file1 = os.path.join(vcs.sample_data, 'era40_tas_sample.nc')
f1 = cdms2.open( file1 )
f1.showvariable()
f1.listdimension()
print f1.getAxis('time').asComponentTime()[0]
# 1990-1-1 0:0:0.0
print f1.getAxis('time').asComponentTime()[-1]
# 1993-12-1 0:0:0.0
file2 = os.path.join(vcs.sample_data, 'era15_tas_sample.nc')
f2 = cdms2.open( file2 )
f2.showvariable()
f2.listdimension()
print f2.getAxis('time').asComponentTime()[0]
# 1989-1-1 0:0:0.0
print f2.getAxis('time').asComponentTime()[-1]
# 1994-2-1 0:0:0.0
# get data with overlapping in overlapping time range
data1 = f1('tas', time = ('1991-1-1','1993-12-1'))
data2 = f2('tas', time = ('1991-1-1','1993-12-1'))
print data1.shape
# (48, 160, 320)
print data2.shape
# (48, 73, 144)
grid1=data1.getGrid()
print grid1
print 'original ERA40 data shape: ',data1.shape
# original ERA40 data shape: (48, 160, 320)
grid2 = data2.getGrid()
print grid2
regridfunc=Regridder(grid1,grid2)
data1=regridfunc(data1)
print 'new ERA40 data shape: ' ,data1.shape
cdutil.setTimeBoundsMonthly(data1)
cdutil.setTimeBoundsMonthly(data2)
start_time = cdtime.comptime(1991,1,1)
end_time = cdtime.comptime(1993,12,1)
ac1=cdutil.ANNUALCYCLE.climatology(data1(time=(start_time, end_time, 'cob')))
ac2=cdutil.ANNUALCYCLE.climatology(data2(time=(start_time, end_time, 'cob')))
print ac1
data1=cdutil.ANNUALCYCLE.departures(data1,ref=ac1)
data2=cdutil.ANNUALCYCLE.departures(data2,ref=ac2)
print data1.shape,data2.shape
tim = data2.getTime()
lat=data2.getLatitude()
lon=data2.getLongitude()
data1=cdms2.createVariable(data1,axes=[tim,lat,lon],typecode='f',id='tas')
diff=MV2.subtract(data1,data2)
# zonal differences
z_diff=MV2.average(diff,2)
print 'Zonal data shape (before): ',z_diff.shape
z_diff=MV2.transpose(z_diff,(1,0))
# add id to data
z_diff.id='zonal_diff'
print 'Zonal data shape (after): ',z_diff.shape
# global differences
gl_diff=cdutil.averager(diff,axis='xy')
x=vcs.init()
x.setcolormap('default')
fill=x.getisofill('default')
x.plot(z_diff,fill)
x.clear()
x.plot(gl_diff)
cor_t=statistics.correlation(data1,data2,axis='xy')
# temporal correlation map betwen these to time-series
cor_m=statistics.correlation(data1,data2,axis='t')
# temporal rms difference between the two time series
rms=statistics.rms(data1,data2,axis='t')
x.clear()
x.plot(cor_m,fill)
x.clear()
x.plot(cor_t)
x.clear()
x.plot(rms,fill)
slope1, intercept1 = statistics.linearregression(data1, axis='t')
slope2, intercept2 = statistics.linearregression(data2, axis='t')
# set the 'id'
slope1.id='linear_slope'
slope2.id='linear_slope'
dec_trnd_diff=(slope1*120.)-(slope2*120.)
dec_trnd_diff.id='decadal_trend_diff'
x.clear()
x.plot(dec_trnd_diff)
variance1=statistics.variance(data1)
variance2=statistics.variance(data2)
variance1.id='variance_data1'
variance2.id='variance_data2'
x.clear()
x.plot(variance1,fill)
x.clear()
x.plot(variance2,fill)
f=variance1/variance2
f.id='variance_data1_dividedby_variance_data2'
o=cdms2.open('tas_comparison.nc','w')
o.write(f)
o.write(variance1)
o.write(variance2)
o.write(dec_trnd_diff)
o.write(rms)
o.write(z_diff)
o.close()
f1.close()
f2.close()
| StarcoderdataPython |
3318524 | <filename>fdk_client/platform/models/AnnouncementSchema.py
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .ScheduleStartSchema import ScheduleStartSchema
class AnnouncementSchema(BaseSchema):
# Content swagger.json
announcement = fields.Str(required=False)
schedule = fields.Nested(ScheduleStartSchema, required=False)
| StarcoderdataPython |
1668710 | <filename>sanity/os_sdk.py
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import openstack.profile
import openstack.connection
import openstack.session
from positional import positional
from keystoneauth1 import exceptions
from keystoneauth1.identity.generic.password import Password as BasePassword
from oslo_config import cfg
CONF = cfg.CONF
class Password(BasePassword):
@positional()
def get_discovery(self, session, url, authenticated=None):
raise exceptions.DiscoveryFailure()
class Session(openstack.session.Session):
def request(self, *args, **kwargs):
kwargs.setdefault('connect_retries', 5)
return super(Session, self).request(*args, **kwargs)
def create_connection(auth_url, project_name, username, password,
endpoint_type='publicURL',
user_domain_id='default',
project_domain_id='default',
verify=True,
cert=None,
identity_version=None):
profile = openstack.profile.Profile()
profile.set_interface(profile.ALL, endpoint_type)
# This compute service override exists to support the new behaviour of the
# endpoint discovery where it uses the discovery endpoint instead of the
# service catalogue.
profile._services['compute']['requires_project_id'] = True
if not identity_version:
identity_version = CONF.keystone.version
if identity_version in '3' or 'v3' in auth_url:
identity_version = 'v3'
profile.set_version('identity', identity_version)
if identity_version == 'v3':
authenticator = Password(
auth_url=auth_url,
user_domain_id=user_domain_id,
project_name=project_name,
project_domain_id=project_domain_id,
username=username,
password=password)
else:
authenticator = Password(
auth_url=auth_url,
project_name=project_name,
username=username,
password=password)
session = Session(
profile,
user_agent='Sanity',
auth=authenticator,
verify=verify,
cert=cert)
return openstack.connection.Connection(
session=session,
authenticator=authenticator,
profile=profile)
| StarcoderdataPython |
1635394 | <reponame>fylux/pyre-check<filename>client/tests/source_database_buck_builder_test.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import shutil
import unittest
from pathlib import Path
from unittest.mock import MagicMock, call, patch
from .. import source_database_buck_builder
class SourceDatabaseBuckBuilderTest(unittest.TestCase):
def setUp(self) -> None:
self._query_arguments = [
"query",
"--json",
'kind("python_binary|python_library|python_test", "%s") '
"- attrfilter(labels, generated, '%s') "
"+ attrfilter(labels, unittest-library, '%s') "
"- attrfilter(labels, no_pyre, '%s')",
"//foo/bar/...",
"//bar:baz",
]
def test_get_buck_query_arguments(self) -> None:
arguments = source_database_buck_builder._get_buck_query_arguments(
specifications=["//foo/bar/...", "//bar:baz"], mode=None
)
self.assertEqual(arguments, self._query_arguments)
def test_get_buck_query_arguments__with_mode(self) -> None:
arguments = source_database_buck_builder._get_buck_query_arguments(
specifications=["//foo/bar/...", "//bar:baz"], mode="foo"
)
self.assertEqual(
arguments,
[
"query",
"--json",
"@mode/foo",
'kind("python_binary|python_library|python_test", "%s") '
"- attrfilter(labels, generated, '%s') "
"+ attrfilter(labels, unittest-library, '%s') "
"- attrfilter(labels, no_pyre, '%s')",
"//foo/bar/...",
"//bar:baz",
],
)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `tools.pyre.client.source_database_buck_builder` to decorator factory
# `unittest.mock.patch.object`.
@patch.object(source_database_buck_builder, "_buck")
def test_query_targets(self, buck: MagicMock) -> None:
query_output = {
"//foo/bar/...": ["//foo/bar:baz", "//foo/bar:tests-library"],
"//bar:baz": [
"//bar:baz",
"//bar:tests-mypy_ini",
"//bar:tests-library-testmodules-lib",
],
}
buck.return_value = json.dumps(query_output)
self.assertEqual(
source_database_buck_builder._query_targets(
["//foo/bar/...", "//bar:baz"], mode=None
),
["//foo/bar:baz", "//foo/bar:tests-library", "//bar:baz"],
)
def test_buck_build_arguments(self) -> None:
self.assertEqual(
source_database_buck_builder._get_buck_build_arguments(
["//foo/bar:baz", "//foo/bar:tests-library"]
),
[
"build",
"--show-full-json-output",
"//foo/bar:baz#source-db",
"//foo/bar:tests-library#source-db",
],
)
# pyre-fixme[56]: Argument `json` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(json, "loads")
@patch.object(Path, "read_text")
def test_load_source_databases(
self, read_text: MagicMock, loads: MagicMock
) -> None:
expected_database = {
"sources": {"bar.py": "some/other/bar.py"},
"dependencies": {"foo.py": "some/foo.py"},
}
loads.return_value = expected_database
source_databases = source_database_buck_builder._load_source_databases(
{"//foo:bar#source-db": "/some/bar#source-db/db.json"}
)
self.assertEqual(source_databases, {"//foo:bar#source-db": expected_database})
def test_merge_source_databases(self) -> None:
actual = source_database_buck_builder._merge_source_databases(
{
"hello": {
"sources": {
"foo.py": "foo.py",
"duplicate.py": "duplicate_in_hello.py",
},
"dependencies": {
"bar.pyi": "buck-out/bar.pyi",
"bar.cpp": "bar.cpp",
},
},
"foo": {
"sources": {},
"dependencies": {
"foo2.pyi": "buck-out/foo2.pyi",
"bar2.cpp": "bar2.cpp",
"duplicate.py": "duplicate_in_foo.py",
"__manifest__.py": "__manifest__.py",
"__test_modules__.py": "__test_modules__.py",
"__test_main__.py": "__test_main__.py",
},
},
}
)
self.assertEqual(
actual,
{
"foo.py": "foo.py",
"duplicate.py": "duplicate_in_foo.py",
"bar.pyi": "buck-out/bar.pyi",
"foo2.pyi": "buck-out/foo2.pyi",
},
)
# pyre-fixme[56]: Argument `shutil` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(shutil, "rmtree")
@patch.object(Path, "exists")
@patch.object(Path, "mkdir")
@patch.object(Path, "symlink_to")
def test_build_link_tree(
self,
symlink_to: MagicMock,
make_directory: MagicMock,
exists: MagicMock,
remove_tree: MagicMock,
) -> None:
source_database_buck_builder._build_link_tree(
{"foo.py": "foo.py", "bar/baz.pyi": "buck-out/bar.pyi"},
Path("foo_directory"),
Path("/root"),
)
self.assertEqual(
make_directory.call_args_list,
[
call(parents=True),
call(parents=True, exist_ok=True),
call(parents=True, exist_ok=True),
],
)
self.assertEqual(
symlink_to.call_args_list,
[call(Path("/root/foo.py")), call(Path("/root/buck-out/bar.pyi"))],
)
@patch.object(source_database_buck_builder, "_build_link_tree")
@patch.object(source_database_buck_builder, "_load_source_databases")
@patch.object(source_database_buck_builder, "_build_targets")
# pyre-fixme[56]: Argument
# `tools.pyre.tools.buck_project_builder.source_database_buck_builder` to
# decorator factory `unittest.mock.patch.object` could not be resolved in a global
# scope.
@patch.object(source_database_buck_builder, "_query_targets")
def test_build(
self,
query_targets: MagicMock,
build_targets: MagicMock,
load_source_databases: MagicMock,
build_link_tree: MagicMock,
) -> None:
load_source_databases.return_value = {
"hello": {"sources": {"foo.py": "foo.py"}, "dependencies": {}},
"foo": {"sources": {}, "dependencies": {"bar.pyi": "buck-out/bar.pyi"}},
}
source_database_buck_builder.build(
["//foo/bar/..."],
output_directory=Path("output_directory"),
buck_root=Path("buck_root"),
mode=None,
)
query_targets.assert_called_once()
build_targets.assert_called_once()
build_link_tree.assert_called_once_with(
{"foo.py": "foo.py", "bar.pyi": "buck-out/bar.pyi"},
Path("output_directory"),
Path("buck_root"),
)
| StarcoderdataPython |
1608612 | <gh_stars>1-10
# general libraries
import warnings
import numpy as np
# image processing libraries
from scipy import ndimage, interpolate, fft, signal
from scipy.optimize import fsolve
from skimage.feature import match_template
from skimage.transform import radon
from skimage.measure import ransac
from sklearn.cluster import KMeans
from eratosthenes.generic.filtering_statistical import make_2D_Gaussian, \
mad_filtering
from eratosthenes.generic.handler_im import get_grad_filters
from eratosthenes.preprocessing.shadow_transforms import pca
# spatial sub-pixel allignment functions
def simple_optical_flow(I1, I2, window_size, sampleI, sampleJ, tau=1e-2): # processing
"""
displacement estimation through optical flow
following Lucas & Kanade 1981
input: I1 array (n x m) image with intensities
I2 array (n x m) image with intensities
window_size integer kernel size of the neighborhood
sampleI array (k x l) grid with image coordinates
sampleJ array (k x l) grid with image coordinates
tau float smoothness parameter
output: Ugrd array (k x l) displacement estimate
Vgrd array (k x l) displacement estimate
"""
kernel_x = np.array(
[[-1., 1.],
[-1., 1.]]
)
kernel_t = np.array(
[[1., 1.],
[1., 1.]]
) * .25
fx = ndimage.convolve(I1, kernel_x)
fy = ndimage.convolve(I1, np.flip(np.transpose(kernel_x), axis=0))
ft = ndimage.convolve(I2, kernel_t) + ndimage.convolve(I1, -kernel_t)
# grid or single estimation
Ugrd = np.zeros((len(sampleI), len(sampleJ)))
Vgrd = np.zeros((len(sampleI), len(sampleJ)))
radius = np.floor(window_size / 2).astype(
'int') # window_size should be odd
for iIdx in range(sampleI.size):
iIm = sampleI.flat[iIdx]
jIm = sampleJ.flat[iIdx]
(iGrd, jGrd) = np.unravel_index(iIdx, sampleI.shape)
# get templates
Ix = fx[iIm - radius:iIm + radius + 1,
jIm - radius:jIm + radius + 1].flatten()
Iy = fy[iIm - radius:iIm + radius + 1,
jIm - radius:jIm + radius + 1].flatten()
It = ft[iIm - radius:iIm + radius + 1,
jIm - radius:jIm + radius + 1].flatten()
# look if variation is present
if np.std(It) != 0:
b = np.reshape(It, (It.shape[0], 1)) # get b here
A = np.vstack((Ix, Iy)).T # get A here
# threshold tau should be larger
# than the smallest eigenvalue of A'A
if np.min(abs(np.linalg.eigvals(np.matmul(A.T, A)))) >= tau:
nu = np.matmul(np.linalg.pinv(A), b) # get velocity here
Ugrd[iGrd, jGrd] = nu[0]
Vgrd[iGrd, jGrd] = nu[1]
return (Ugrd, Vgrd)
def affine_optical_flow(I1, I2, model='Affine', iteration=15):
"""
displacement estimation through optical flow
following Lucas & Kanade 1981 with an affine model
reference:
:param I1: NP.ARRAY (_,_)
image with intensities
:param I2: NP.ARRAY (_,_)
image with intensities
:param model: STRING
:options : Affine - affine and translation
Rotation - TODO
:param iteration: INTEGER
number of iterations used
:return u: FLOAT
displacement estimate
:return v: FLOAT
displacement estimate
"""
(kernel_j,_) = get_grad_filters('kroon')
kernel_t = np.array(
[[1., 1., 1.],
[1., 2., 1.],
[1., 1., 1.]]
) / 10
# smooth to not have very sharp derivatives
I1 = ndimage.convolve(I1, make_2D_Gaussian((3,3),fwhm=3))
I2 = ndimage.convolve(I2, make_2D_Gaussian((3,3),fwhm=3))
# calculate spatial and temporal derivatives
I_dj = ndimage.convolve(I1, kernel_j)
I_di = ndimage.convolve(I1, np.flip(np.transpose(kernel_j), axis=0))
# create local coordinate grid
(mI,nI) = I1.shape
mnI = I1.size
(grd_i,grd_j) = np.meshgrid(np.linspace(-(mI-1)/2, +(mI-1)/2, mI), \
np.linspace(-(nI-1)/2, +(nI-1)/2, nI), \
indexing='ij')
grd_j = np.flipud(grd_j)
stk_ij = np.vstack( (grd_i.flatten(), grd_j.flatten()) ).T
p = np.zeros((1,6), dtype=float)
p_stack = np.zeros((iteration,6), dtype=float)
res = np.zeros((iteration,1), dtype=float) # look at iteration evolution
for i in np.arange(iteration):
# affine transform
Aff = np.array([[1, 0, 0], [0, 1, 0]]) + p.reshape(3,2).T
grd_new = np.matmul(Aff,
np.vstack((stk_ij.T,
np.ones(mnI))))
new_i = np.reshape(grd_new[0,:], (mI, nI))
new_j = np.reshape(grd_new[1,:], (mI, nI))
# construct new templates
try:
I2_new = interpolate.griddata(stk_ij, I2.flatten().T,
(new_i,new_j), method='cubic')
except:
print('different number of values and points')
I_di_new = interpolate.griddata(stk_ij, I_di.flatten().T,
(new_i,new_j), method='cubic')
I_dj_new = interpolate.griddata(stk_ij, I_dj.flatten().T,
(new_i,new_j), method='cubic')
# I_dt = ndimage.convolve(I2_new, kernel_t) +
# ndimage.convolve(I1, -kernel_t)
I_dt_new = I2_new - I1
# compose Jacobian and Hessian
dWdp = np.array([ \
I_di_new.flatten()*grd_i.flatten(),
I_dj_new.flatten()*grd_i.flatten(),
I_di_new.flatten()*grd_j.flatten(),
I_dj_new.flatten()*grd_j.flatten(),
I_di_new.flatten(),
I_dj_new.flatten()])
# dWdp = np.array([ \
# I_di.flatten()*grd_i.flatten(),
# I_dj.flatten()*grd_i.flatten(),
# I_di.flatten()*grd_j.flatten(),
# I_dj.flatten()*grd_j.flatten(),
# I_di.flatten(),
# I_dj.flatten()])
# remove data outside the template
A, y = dWdp.T, I_dt_new.flatten()
IN = ~(np.any(np.isnan(A), axis=1) | np.isnan(y))
A = A[IN,:]
y = y[~np.isnan(y)]
#(dp,res[i]) = least_squares(A, y, mode='andrews', iterations=3)
if y.size>=6: # structure should not become ill-posed
try:
(dp,res[i],_,_) = np.linalg.lstsq(A, y, rcond=None)#[0]
except ValueError:
pass #print('something wrong?')
else:
break
p += dp
p_stack[i,:] = p
# only convergence is allowed
(up_idx,_) = np.where(np.sign(res-np.vstack(([1e3],res[:-1])))==1)
if up_idx.size != 0:
res = res[:up_idx[0]]
if res.size == 0: # sometimes divergence occurs
A = np.array([[1, 0], [0, 1]])
u, v, snr = 0, 0, 0
else:
Aff = np.array([[1, 0, 0], [0, 1, 0]]) + \
p_stack[np.argmin(res),:].reshape(3,2).T
u, v = Aff[0,-1], Aff[1,-1]
A = np.linalg.inv(Aff[:,0:2]).T
snr = np.min(res)
return (u, v, A, snr)
# spatial pattern matching functions
def normalized_cross_corr(I1, I2):
"""
Simple normalized cross correlation
:param I1: NP.ARRAY (_,_)
image with intensities
:param I2: NP.ARRAY (_,_)
image with intensities
:return result: NP.ARRAY (_,_)
similarity surface
"""
result = match_template(I2, I1)
return result
def cumulative_cross_corr(I1, I2):
"""
doing normalized cross correlation on distance imagery
:param I1: NP.ARRAY (_,_)
binary array
:param I2: NP.ARRAY (_,_)
binary array
:return result: NP.ARRAY (_,_)
similarity surface
"""
if isinstance(I1, np.floating):
# get cut-off value
cu = np.quantile(I1, 0.5)
I1 = I1<cu
if isinstance(I1, np.floating):
# get cut-off value
cu = np.quantile(I2, 0.5)
I2 = I2<cu
I1new = ndimage.distance_transform_edt(I1)
I2new = ndimage.distance_transform_edt(I2)
result = match_template(I2new, I1new)
return result
def sum_sq_diff(I1, I2):
"""
Simple normalized cross correlation
:param I1: NP.ARRAY (_,_)
image with intensities
:param I2: NP.ARRAY (_,_)
image with intensities
:return ssd: NP.ARRAY (_,_)
dissimilarity surface
"""
t_size = I1.shape
y = np.lib.stride_tricks.as_strided(I2,
shape=(I2.shape[0] - t_size[0] + 1,
I2.shape[1] - t_size[1] + 1,) +
t_size,
strides=I2.strides * 2)
ssd = np.einsum('ijkl,kl->ij', y, I1)
ssd *= - 2
ssd += np.einsum('ijkl, ijkl->ij', y, y)
ssd += np.einsum('ij, ij', I1, I1)
return ssd
def get_integer_peak_location(C):
max_corr = np.amax(C)
snr = max_corr/np.mean(C)
ij = np.unravel_index(np.argmax(C), C.shape, order='F') # 'C'
di, dj = ij[::-1]
di -= C.shape[0] // 2
dj -= C.shape[1] // 2
return di, dj, snr, max_corr
# sub-pixel localization of the correlation peak
def get_top_moment(C, ds=1, top=np.array([])):
""" find location of highest score through bicubic fitting
Parameters
----------
C : np.array, size=(_,_)
similarity surface
ds : integer, default=1
size of the radius to use neighboring information
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] <NAME> al. "A subpixel registration algorithm for low PSNR images"
IEEE international conference on advanced computational intelligence,
pp. 626-630, 2012.
[2] Messerli & Grinstad, "Image georectification and feature tracking
toolbox: ImGRAFT" Geoscientific instrumentation, methods and data systems,
vol. 4(1) pp. 23-34, 2015.
"""
(subJ,subI) = np.meshgrid(np.linspace(-ds,+ds, 2*ds+1), np.linspace(-ds,+ds, 2*ds+1))
subI = subI.ravel()
subJ = subJ.ravel()
if top.size==0:
# find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else:# estimate sub-pixel top
idx_mid = np.int(np.floor((2.*ds+1)**2/2))
Csub = C[i-ds:i+ds+1,j-ds:j+ds+1].ravel()
Csub = Csub - np.mean(np.hstack((Csub[0:idx_mid],Csub[idx_mid+1:])))
IN = Csub>0
m = np.array([ np.divide(np.sum(subI[IN]*Csub[IN]), np.sum(Csub[IN])) ,
np.divide(np.sum(subJ[IN]*Csub[IN]), np.sum(Csub[IN]))])
ddi, ddj = m[0], m[1]
return (ddi, ddj)
def get_top_blue(C, ds=1): # wip
(subJ,subI) = np.meshgrid(np.linspace(-ds,+ds, 2*ds+1), np.linspace(-ds,+ds, 2*ds+1))
subI = subI.ravel()
subJ = subJ.ravel()
# find highest score
y,x,max_corr,snr = get_integer_peak_location(C)
# estimate Jacobian
H_x = np.array([[-17., 0., 17.],
[-61., 0., 61.],
[-17., 0., 17.]]) / 95
# estimate Hessian
H_xx = 8 / np.array([[105, -46, 105],
[ 50, -23, 50],
[ 105, -46, 105]] )
H_xy = 11 / np.array([[-114, np.inf, +114],
[np.inf, np.inf, np.inf],
[+114, np.inf, -114]] )
# estimate sub-pixel top
Csub = C[y-ds:y+ds+1,x-ds:x+ds+1]
Jac = np.array([[Csub*H_x], [Csub*H_x.T]])
Hes = Jac = np.array([[Csub*H_xx , Csub*H_xy],
[Csub*H_xy.T, Csub*H_xx.T]])
m0 = np.array([[x], [y]]) - np.linalg.inv(Hes) * Jac
return (m0[0], m0[1])
def get_top_gaussian(C, top=np.array([])):
""" find location of highest score through 1D gaussian fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Argyriou & Vlachos, "A Study of sub-pixel motion estimation using
phase correlation" Proceeding of the British machine vision conference,
pp. 387-396), 2006.
"""
if top.size==0: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else: # estimate sub-pixel along each axis
ddi = (np.log(C[i+1,j]) - np.log(C[i-1,j])) / \
2*( (2*np.log(C[i,j])) -np.log(C[i-1,j]) -np.log(C[i+1,j]))
ddj = (np.log(C[i,j+1]) - np.log(C[i,j-1])) / \
2*( (2*np.log(C[i,j])) -np.log(C[i,j-1]) -np.log(C[i,j+1]))
return (ddi, ddj)
def get_top_parabolic(C, top=np.array([])):
""" find location of highest score through 1D parabolic fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Argyriou & Vlachos, "A Study of sub-pixel motion estimation using
phase correlation" Proceeding of the British machine vision conference,
pp. 387-396), 2006.
"""
if top.size==0: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else: # estimate sub-pixel along each axis
ddi = (C[i+1,j] - C[i-1,j]) / 2*( (2*C[i,j]) -C[i-1,j] -C[i+1,j])
ddj = (C[i,j+1] - C[i,j-1]) / 2*( (2*C[i,j]) -C[i,j-1] -C[i,j+1])
return (ddi, ddj)
def get_top_birchfield(C, top=np.array([])):
""" find location of highest score along each axis
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Birchfield & Tomasi. "Depth discontinuities by pixel-to-pixel stereo"
International journal of computer vision, vol. 35(3)3 pp. 269-293, 1999.
"""
if top.size==0: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else:
# estimate sub-pixel along each axis
I_m,I_p = .5*(C[i-1,j] + C[i,j]), .5*(C[i+1,j] + C[i,j])
I_min,I_max = np.amin([I_m, I_p, C[i,j]]), np.amax([I_m, I_p, C[i,j]])
# swapped, since Birchfield uses dissimilarity
ddi = np.amax([0, I_max-C[i,j], C[i,j]-I_min])
I_m,I_p = .5*(C[i,j-1] + C[i,j]), .5*(C[i,j+1] + C[i,j])
I_min,I_max = np.amin([I_m, I_p, C[i,j]]), np.amax([I_m, I_p, C[i,j]])
ddj = np.amax([0, I_max-C[i,j], C[i,j]-I_min])
return (ddi, ddj)
def get_top_ren(C, top=None):
""" find location of highest score
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Ren et al. "High-accuracy sub-pixel motion estimation from noisy
images in Fourier domain." IEEE transactions on image processing,
vol. 19(5) pp. 1379-1384, 2010.
"""
if top.size is None: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else:
# estimate sub-pixel along each axis
D_i = C[i+1,j] - C[i-1,j]
ddi = np.sign(D_i)/(1 + ( C[i,j] / np.abs(D_i) ))
D_j = C[i,j+1] - C[i,j-1]
ddj = np.sign(D_j)/(1 + ( C[i,j] / np.abs(D_j) ))
return (ddi, ddj)
def get_top_triangular(C, top=None):
""" find location of highest score through triangular fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] <NAME>, "Real-time vergence control for binocular robots"
International journal of computer vision, vol. 7(1), pp. 67-89, 1991.
"""
if top.size is None: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else:
# estimate sub-pixel along each axis
I_m,I_p = C[i-1,j], C[i+1,j]
I_min,I_max = np.amin([I_m, I_p]), np.amax([I_m, I_p])
I_sign = 2*(I_p>I_m)-1
ddi = I_sign * (1- (I_max-I_min)/(C[i,j]-I_min) )
I_m,I_p = C[i,j-1], C[i,j+1]
I_min,I_max = np.amin([I_m, I_p, C[i,j]]), np.amax([I_m, I_p, C[i,j]])
I_sign = 2*(I_p>I_m)-1
ddj = I_sign * (1- (I_max-I_min)/(C[i,j]-I_min) )
return (ddi, ddj)
def get_top_esinc(C, ds=1, top=None):
'''
find location of highest score using exponential esinc function
following Argyriou & Vlachos 2006
"A study of sub-pixel motion estimation using phase correlation"
:param C: NP.ARRAY (_,_)
similarity surface
:param top: NP.ARRAY (1,2)
location of the maximum score
:return iC: FLOAT
estimated subpixel location on the vertical axis of the peak
:return jC: FLOAT
estimated subpixel location on the horizontal axis of the peak
'''
if top is None: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
iC, jC = 0, 0
else:
# estimate sub-pixel per axis
Cj = C[i,j-ds:j+ds+1].ravel()
def funcJ(x):
a, b, c = x
return [(Cj[0] - a*np.exp(-(b*(-1-c))**2)* \
( np.sin(np.pi*(-1-c))/ np.pi*(-1-c)) )**2,
(Cj[1] - a*np.exp(-(b*(+0-c))**2)* \
( np.sin(np.pi*(+0-c))/ np.pi*(+0-c)) )**2,
(Cj[2] - a*np.exp(-(b*(+1-c))**2)* \
( np.sin(np.pi*(+1-c))/ np.pi*(+1-c)) )**2]
jA, jB, jC = fsolve(funcJ, (1.0, 1.0, 0.1))
Ci = C[i-ds:i+ds+1,j].ravel()
def funcI(x):
a, b, c = x
return [(Ci[0] - a*np.exp(-(b*(-1-c))**2)* \
( np.sin(np.pi*(-1-c))/ np.pi*(-1-c)) )**2,
(Ci[1] - a*np.exp(-(b*(+0-c))**2)* \
( np.sin(np.pi*(+0-c))/ np.pi*(+0-c)) )**2,
(Ci[2] - a*np.exp(-(b*(+1-c))**2)* \
( np.sin(np.pi*(+1-c))/ np.pi*(+1-c)) )**2]
iA, iB, iC = fsolve(funcI, (1.0, 1.0, 0.1))
return (iC, jC)
#todo: Nobach_05,
# paraboloid
# phase plane functions
def phase_tpss(Q, W, m, p=1e-4, l=4, j=5, n=3):
"""get phase plane of cross-spectrum through two point step size iteration
find slope of the phase plane through
two point step size for phase correlation minimization
Parameters
----------
following Leprince et al. 2007
:param Q: NP.ARRAY (_,_)
cross spectrum
:param m: NP.ARRAY (1,2)
similarity surface
:param p: FLOAT
closing error threshold
:param l: INTEGER
number of refinements in iteration
:param j: INTEGER
number of sub routines during an estimation
:param n: INTEGER
mask convergence factor
:return m: NP.ARRAY (2,1)
displacement estimate
:return snr: FLOAT
signal-to-noise ratio
Q : np.array, size=(_,_), dtype=complex
cross spectrum
m0 : np.array, size=(2,1)
initial displacement estimate
p : float, default=1e4
closing error threshold
l : integer, default=4
number of refinements in iteration
j : integer, default=5
number of sub routines during an estimation
n : integer, default=3
mask convergence factor
Returns
-------
m : np.array, size=(2,1)
sub-pixel displacement
snr: float
signal-to-noise ratio
See Also
--------
phase_svd, phase_radon, phase_difference
Notes
-----
[1] Leprince, et.al. "Automatic and precise orthorectification,
coregistration, and subpixel correlation of satellite images, application
to ground deformation measurements", IEEE Transactions on Geoscience and
Remote Sensing vol. 45.6 pp. 1529-1558, 2007.
"""
(m_Q, n_Q) = Q.shape
fy = 2*np.pi*(np.arange(0,m_Q)-(m_Q/2)) /m_Q
fx = 2*np.pi*(np.arange(0,n_Q)-(n_Q/2)) /n_Q
Fx = np.repeat(fx[np.newaxis,:],m_Q,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n_Q,axis=1)
Fx = np.fft.fftshift(Fx)
Fy = np.fft.fftshift(Fy)
# initialize
m_min = m + np.array([+.1, +.1])
C_min = 1j*-np.sin(Fx*m_min[1] + Fy*m_min[0])
C_min += np.cos(Fx*m_min[1] + Fy*m_min[0])
QC_min = Q-C_min # np.abs(Q-C_min) #Q-C_min np.abs(Q-C_min)
dXY_min = np.multiply(2*W, (QC_min * np.conjugate(QC_min)) )
g_min = np.real(np.array([np.nansum(Fy*dXY_min), \
np.nansum(Fx*dXY_min)]))
print(m)
for i in range(l):
k = 1
while True:
C = 1j*-np.sin(Fx*m[1] + Fy*m[0])
C += np.cos(Fx*m[1] + Fy*m[0])
QC = Q-C # np.abs(Q-C)#np.abs(Q-C)
dXY = 2*W*(QC*np.conjugate(QC))
g = np.real(np.array([np.nansum(np.multiply(Fy,dXY)), \
np.nansum(np.multiply(Fx,dXY))]))
# difference
dm = m - m_min
dg = g - g_min
alpha = np.dot(dm,dg)/np.dot(dg,dg)
#alpha = np.dot(dm,dm)/np.dot(dm,dg)
if (np.all(np.abs(m - m_min)<=p)) or (k>=j):
break
# update
m_min, g_min, dXY_min = np.copy(m), np.copy(g), np.copy(dXY)
m -= alpha*dg
print(m)
k += 1
# optimize weighting matrix
phi = np.abs(QC*np.conjugate(QC))/2
W = W*(1-(dXY/8))**n
snr = 1 - (np.sum(phi)/(4*np.sum(W)))
return (m, snr)
def phase_svd(Q, W, rad=0.1):
"""get phase plane of cross-spectrum through single value decomposition
find slope of the phase plane through
single value decomposition
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
cross spectrum
W : np.array, size=(m,n), dtype=float
weigthing matrix
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_radon, phase_difference
Notes
-----
[1] <NAME>. "A subspace identification extension to the phase
correlation method", IEEE transactions on medical imaging, vol. 22.2
pp. 277-280, 2003.
"""
# filtering through magnitude
# W: M = thresh_masking(S1, m=th, s=ker) th=0.001, ker=10
(m,n) = Q.shape
Q,W = np.fft.fftshift(Q), np.fft.fftshift(W)
# decompose axis
n_elements = 1
u,s,v = np.linalg.svd(W*Q) # singular-value decomposition
sig = np.zeros((m,n))
sig[:m,:m] = np.diag(s)
sig = sig[:,:n_elements] # select first element only
# v = v[:n_elements,:]
# reconstruct
# b = u.dot(sig.dot(v))
t_m = np.transpose(v).dot(sig)
t_n = u.dot(sig)# transform
idx_sub = np.arange(np.ceil((0.5-rad)*len(t_n)), \
np.ceil((0.5+rad)*len(t_n))+1).astype(int)
y_ang = np.unwrap(np.angle(t_n[idx_sub]),axis=0)
A = np.vstack([np.transpose(idx_sub-1), np.ones((len(idx_sub)))]).T
(dx,_,_,_) = np.linalg.lstsq(A, y_ang, rcond=None)
idx_sub = np.arange(np.ceil((0.5-rad)*len(t_m)), \
np.ceil((0.5+rad)*len(t_m))+1).astype(int)
y_ang = np.unwrap(np.angle(t_m[idx_sub]), axis=0)
(dy,_,_,_) = np.linalg.lstsq(A, y_ang, rcond=None)
dj = dx[0]*n / (np.pi)
di = dy[0]*m / (np.pi)
return di, dj
def phase_difference_1d(Q, axis=0):
"""get displacement from phase plane along one axis through differencing
find slope of the phase plane through
local difference of the pahse angles
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
normalized cross spectrum
Returns
-------
dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
Notes
-----
[1] <NAME>. "A fast and accurate frequency estimator", IEEE transactions on
acoustics, speech and signal processing, vol.37(12) pp. 1987-1990, 1989.
"""
if axis==0:
Q = np.transpose(Q)
m,n = Q.shape
# find coherent data
C = local_coherence(Q, ds=1)
C = np.minimum(C, np.roll(C, (0,1)))
#estimate period
Q_dj = np.roll(Q, (0,1))
Q_diff = np.multiply(np.conj(Q),Q_dj)
Delta_dj = np.angle(Q_diff)/np.pi
IN = C>.9
dj = np.median(Delta_dj[IN])*(m//2)
return dj
def phase_difference(Q):
"""get displacement from phase plane through neighbouring vector difference
find slope of the phase plane through
local difference of the pahse angles
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
normalized cross spectrum
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
Notes
-----
[1] <NAME>. "A fast and accurate frequency estimator", IEEE transactions on
acoustics, speech and signal processing, vol.37(12) pp. 1987-1990, 1989.
"""
di = phase_difference_1d(Q, axis=0)
dj = phase_difference_1d(Q, axis=1)
return di,dj
def phase_lsq(I, J, Q):
"""get phase plane of cross-spectrum through least squares plane fitting
find slope of the phase plane through
principle component analysis
Parameters
----------
I : np.array, size=(mn,1), dtype=float
vertical coordinate list
J : np.array, size=(mn,1), dtype=float
horizontal coordinate list
Q : np.array, size=(mn,1), dtype=complex
list with cross-spectrum complex values
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_radon, phase_svd, phase_difference, phase_pca
"""
A = np.array([I, J, np.ones_like(I)]).T
M = A.transpose().dot(A)
V = A.transpose().dot(np.angle(Q) / (2*np.pi))
# pseudoinverse:
Mp = np.linalg.inv(M.transpose().dot(M)).dot(M.transpose())
#Least-squares Solution
plane_normal = Mp.dot(V)
di = plane_normal[0]
dj = plane_normal[1]
return di, dj
class BaseModel(object):
def __init__(self):
self.params = None
class PlaneModel(BaseModel):
"""Least squares estimator for phase plane.
Vectors/lines are parameterized using polar coordinates as functional model::
z = x * dx + y * dy
This estimator minimizes the squared distances from all points to the
line, independent of distance::
min{ sum((dist - x_i * cos(theta) + y_i * sin(theta))**2) }
A minimum number of 2 points is required to solve for the parameters.
Attributes
----------
params : tuple
Plane model parameters in the following order `dist`, `theta`.
"""
def estimate(self, data):
"""Estimate plane from data using least squares.
Parameters
----------
data : (N, 3) array
N points with ``(x, y)`` coordinates of vector, respectively.
Returns
-------
success : bool
True, if model estimation succeeds.
"""
if data.shape[0] >= 2: # well determined
x_hat = np.linalg.lstsq(data[:,0:2], data[:,-1], rcond=None)[0]
else: # under-determined
raise ValueError('At least two vectors needed.')
self.params = (x_hat[0], x_hat[1])
return True
def residuals(self, data):
"""Determine residuals of data to model
For each point the shortest distance to the plane is returned.
Parameters
----------
data : (N, 3) array
N points with x, y coordinates and z values, respectively.
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
x_hat = self.params
Q_hat = data[:,0]*x_hat[0] + data[:,1]*x_hat[1]
residuals = np.abs(data[:,-1] - Q_hat)
return residuals
def predict_xy(self, xy, params=None):
"""Predict vector using the estimated heading.
Parameters
----------
xy : array
x,y-coordinates.
params : (2, ) array, optional
Optional custom parameter set.
Returns
-------
Q_hat : array
Predicted plane height at x,y-coordinates.
"""
if params is None:
params = self.params
x_hat = params
if xy.ndim<2:
Q_hat = xy[0]*x_hat[0] + xy[1]*x_hat[1]
else:
Q_hat = xy[:,0]*x_hat[0] + xy[:,1]*x_hat[1]
return Q_hat
def phase_ransac(Q, precision_threshold=.05):
"""robustly fit plane using RANSAC algorithm
find slope of the phase plane through
random sampling and consensus
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
normalized cross spectrum
Returns
-------
dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
Notes
-----
[1] Fischler & Bolles. "Random sample consensus: a paradigm for model
fitting with applications to image analysis and automated cartography"
Communications of the ACM vol.24(6) pp.381-395, 1981.
[2] Tong et al. "A novel subpixel phase correlation method using singular
value decomposition and unified random sample consensus" IEEE transactions
on geoscience and remote sensing vol.53(8) pp.4143-4156, 2015.
"""
(m,n) = Q.shape
fy = np.flip((np.arange(0,m)-(m/2)) /m)
fx = np.flip((np.arange(0,n)-(n/2)) /n)
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
data = np.vstack((Fx.flatten(),
Fy.flatten(),
np.angle(Q).flatten())).T
ransac_model, inliers = ransac(data, PlaneModel,
min_samples=int(2),
residual_threshold=precision_threshold,
max_trials=int(1e3))
IN = np.reshape(inliers, (m,n)) # what data is within error bounds
di = ransac_model.params[0]/(2.*np.pi)
dj = ransac_model.params[1]/(2.*np.pi)
return di, dj
def phase_pca(I, J, Q): # wip
"""get phase plane of cross-spectrum through principle component analysis
find slope of the phase plane through
principle component analysis
Parameters
----------
I : np.array, size=(mn,1), dtype=float
vertical coordinate list
J : np.array, size=(mn,1), dtype=float
horizontal coordinate list
Q : np.array, size=(mn,1), dtype=complex
list with cross-spectrum complex values
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_radon, phase_svd, phase_difference
"""
eigen_vecs, eigen_vals = pca(np.array([I, J, np.angle(Q)/np.pi]).T)
e3 = eigen_vecs[:,np.argmin(eigen_vals)] # normal vector
e3 = eigen_vecs[:,np.argmin(np.abs(eigen_vecs[-1,:]))]
di = np.sign(e3[-1])*e3[1]
dj = np.sign(e3[-1])*e3[0]
return di, dj
def phase_radon(Q): # wip
"""get direction and magnitude from phase plane through Radon transform
find slope of the phase plane through
single value decomposition
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
cross spectrum
Returns
-------
rho,theta : float
magnitude and direction of displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
Notes
-----
[1] Balci & Foroosh. "Subpixel registration directly from the phase
difference" EURASIP journal on advances in signal processing, pp.1-11, 2006.
"""
(m, n) = Q.shape
half = m // 2
Q = np.fft.fftshift(Q)
# estimate direction, through the radon transform
W = np.fft.fftshift(raised_cosine(Q, beta=1e-5)).astype(bool)
Q[~W] = 0 # make circular domain
theta = np.linspace(0., 180., max(m,n), endpoint=False)
R = radon(np.angle(Q), theta) # sinogram
#plt.imshow(R[:half,:]), plt.show()
#plt.imshow(np.flipud(R[half:,:])), plt.show()
R_fold = np.abs(np.multiply(R[:half,:], R[half:,:]))
radon_score = np.sum(R_fold, axis=0)
score_idx = np.argmax(radon_score)
theta = theta[score_idx]
del R_fold, radon_score, score_idx
# rotating coordinate frame, angle difference
# estimate magnitude
# peaks can also be seen
# plt.plot(R[:,score_idx]), plt.show()
return theta
#todo: Gonzalez_10:RANSAC->LDA, Konstantinidis_ PAC, Fienup_82: Gradient-descend, Yousef_05:
# frequency preparation
def perdecomp(img):
"""calculate the periodic and smooth components of an image
Parameters
----------
img : np.array, size=(m,n)
array with intensities
Returns
-------
per : np.array, size=(m,n)
periodic component
cor : np.array, size=(m,n)
smooth component
Notes
-----
[1] <NAME>. "Periodic plus smooth image decomposition", Journal of
mathematical imaging and vision vol. 39.2 pp. 161-179, 2011.
"""
img = img.astype(float)
if img.ndim==2:
(m, n) = img.shape
per = np.zeros((m, n), dtype=float)
per[+0,:] = +img[0,:] -img[-1,:]
per[-1,:] = -per[0,:]
per[:,+0] = per[:,+0] +img[:,+0] -img[:,-1]
per[:,-1] = per[:,-1] -img[:,+0] +img[:,-1]
elif img.ndim==3:
(m, n, b) = img.shape
per = np.zeros((m, n, b), dtype=float)
per[+0,:,:] = +img[0,:,:] -img[-1,:,:]
per[-1,:,:] = -per[0,:,:]
per[:,+0,:] = per[:,+0,:] +img[:,+0,:] -img[:,-1,:]
per[:,-1,:] = per[:,-1,:] -img[:,+0,:] +img[:,-1,:]
fy = np.cos( 2*np.pi*( np.arange(0,m) )/m )
fx = np.cos( 2*np.pi*( np.arange(0,n) )/n )
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
Fx[0,0] = 0
if img.ndim==3:
Fx = np.repeat(Fx[:,:,np.newaxis], b, axis=2)
Fy = np.repeat(Fy[:,:,np.newaxis], b, axis=2)
cor = np.real( np.fft.ifftn( np.fft.fft2(per) *.5/ (2-Fx-Fy)))
else:
cor = np.real( np.fft.ifft2( np.fft.fft2(per) *.5/ (2-Fx-Fy)))
per = img-cor
return (per, cor)
def normalize_power_spectrum(Q):
Qn = np.divide(Q, abs(Q), out=np.zeros_like(Q), where=Q!=0)
return Qn
# frequency matching filters
def raised_cosine(I, beta=0.35):
""" raised cosine filter
Parameters
----------
img : np.array, size=(m,n)
array with intensities
beta : float, default=0.35
roll-off factor
Returns
-------
W : np.array, size=(m,n), dtype=float
weighting mask
See Also
--------
tpss
Notes
-----
[1] Stone et al. "A fast direct Fourier-based algorithm for subpixel
registration of images." IEEE Transactions on geoscience and remote sensing
vol. 39(10) pp. 2235-2243, 2001.
[2] Leprince, et.al. "Automatic and precise orthorectification,
coregistration, and subpixel correlation of satellite images, application
to ground deformation measurements", IEEE Transactions on Geoscience and
Remote Sensing vol. 45.6 pp. 1529-1558, 2007.
"""
(m, n) = I.shape
fy = np.mod(.5 + np.arange(0,m)/m , 1) -.5 # fft shifted coordinate frame
fx = np.mod(.5 + np.arange(0,n)/n , 1) -.5
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
R = np.sqrt(Fx**2 + Fy**2) # radius
# filter formulation
Hamm = np.cos( (np.pi/(2*beta)) * (R - (.5-beta)))**2
selec = np.logical_and((.5 - beta) <= R , R<=.5)
# compose filter
W = np.zeros((m,n))
W[(.5 - beta) > R] = 1
W[selec] = Hamm[selec]
return W
# def hanning_window
def low_pass_rectancle(I, r=0.50):
""" create hard low-pass filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the rectangle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
low_pass_circle, low_pass_pyramid, low_pass_bell
Notes
-----
[1] Takita et al. "High-accuracy subpixel image registration based on
phase-only correlation" IEICE transactions on fundamentals of electronics,
communications and computer sciences, vol.86(8) pp.1925-1934, 2003.
"""
(m, n) = I.shape
fy = 2*np.mod(.5 + np.arange(0,m)/m , 1) -1 # fft shifted coordinate frame
fx = 2*np.mod(.5 + np.arange(0,n)/n , 1) -1
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
# filter formulation
W = np.logical_and(np.abs(Fx)<=r, np.abs(Fy)<=r)
return W
def low_pass_pyramid(I, r=0.50):
""" create low-pass filter with pyramid shape
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the mother rectangle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
low_pass_rectancle, low_pass_circle, low_pass_bell
Notes
-----
[1] Takita et al. "High-accuracy subpixel image registration based on
phase-only correlation" IEICE transactions on fundamentals of electronics,
communications and computer sciences, vol.86(8) pp.1925-1934, 2003.
"""
R = low_pass_rectancle(I, r)
W = signal.convolve2d(R.astype(float), R.astype(float), \
mode='same', boundary='wrap')
W = np.fft.fftshift(W/np.max(W))
return W
def low_pass_bell(I, r=0.50):
""" create low-pass filter with a bell shape
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the mother rectangle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
low_pass_rectancle, low_pass_circle, low_pass_pyramid
Notes
-----
[1] Takita et al. "High-accuracy subpixel image registration based on
phase-only correlation" IEICE transactions on fundamentals of electronics,
communications and computer sciences, vol.86(8) pp.1925-1934, 2003.
"""
R1 = low_pass_rectancle(I, r)
R2 = low_pass_pyramid(I, r)
W = signal.convolve2d(R1.astype(float), R2.astype(float), \
mode='same', boundary='wrap')
W = np.fft.fftshift(W/np.max(W))
return W
def low_pass_circle(I, r=0.50):
""" create hard low-pass filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the circle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
raised_cosine, cosine_bell, high_pass_circle
"""
(m, n) = I.shape
fy = np.mod(.5 + np.arange(0,m)/m , 1) -.5 # fft shifted coordinate frame
fx = np.mod(.5 + np.arange(0,n)/n , 1) -.5
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
R = np.sqrt(Fx**2 + Fy**2) # radius
# filter formulation
W = R<=r
return W
def high_pass_circle(I, r=0.50):
""" create hard high-pass filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the circle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
raised_cosine, cosine_bell, low_pass_circle
"""
(m, n) = I.shape
fy = np.mod(.5 + np.arange(0,m)/m , 1) -.5 # fft shifted coordinate frame
fx = np.mod(.5 + np.arange(0,n)/n , 1) -.5
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
R = np.sqrt(Fx**2 + Fy**2) # radius
# filter formulation
W = R>=r
return W
def cosine_bell(I):
""" cosine bell filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
Returns
-------
W : np.array, size=(m,n), dtype=float
weighting mask
See Also
--------
raised_cosine
"""
(m, n) = I.shape
fy = np.mod(.5 + np.arange(0,m)/m , 1) -.5 # fft shifted coordinate frame
fx = np.mod(.5 + np.arange(0,n)/n , 1) -.5
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
R = np.sqrt(Fx**2 + Fy**2) # radius
# filter formulation
W = .5*np.cos(2*R*np.pi) + .5
W[R>.5] = 0
return W
def cross_shading_filter(Q): #, az_1, az_2): # wip
(m,n) = Q.shape
Coh = local_coherence(np.fft.fftshift(Q))
R = np.fft.fftshift(low_pass_circle(Q, r=0.50))
Coh[R==0] = 0
theta = np.linspace(0., 180., max(m,n), endpoint=False)
S = radon(Coh, theta)/m # sinogram
# classify
s = S[m//2,:]
min_idx,max_idx = np.argmin(s), np.argmax(s)
# create circle
x,y = np.sin(np.radians(2*theta)), np.cos(np.radians(2*theta))
coh_circle = np.vstack((x,y,(s+.1)**2)).T
kmeans = KMeans(n_clusters=2, \
init=np.array([coh_circle[min_idx,:],
coh_circle[max_idx,:]]),
n_init=1
).fit(coh_circle)
grouping = kmeans.labels_ #.astype(np.float)
OUT = grouping==grouping[min_idx]
# construct filter
fy = (np.arange(0,m)-(m/2)) /m
fx = (np.arange(0,n)-(n/2)) /n
Fx = np.flip(np.repeat(fx[np.newaxis,:],m,axis=0), axis=1)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
Theta = np.round(np.degrees(np.arctan2(Fx,Fy) % np.pi)/360 *m) *360 /m
W = np.isin(Theta, theta[~OUT])
return W
# cross-spectral and frequency signal metrics for filtering
def thresh_masking(S, m=1e-4, s=10):
""" mask significant intensities in spectrum
Parameters
----------
S : np.array, size=(m,n), dtype=complex
array with spectrum, i.e.: S = np.fft.fft2(I)
m : float, default=1e-3
cut-off intensity in respect to maximum
s : integer, default=10
kernel size of the median filter
Returns
-------
M : np.array, size=(m,n), dtype=bool
frequency mask
See Also
--------
tpss
Notes
-----
[1] Stone et al. "A fast direct Fourier-based algorithm for subpixel
registration of images." IEEE Transactions on geoscience and remote sensing
vol. 39(10) pp. 2235-2243, 2001.
[2] Leprince, et.al. "Automatic and precise orthorectification,
coregistration, and subpixel correlation of satellite images, application
to ground deformation measurements", IEEE Transactions on Geoscience and
Remote Sensing vol. 45.6 pp. 1529-1558, 2007.
"""
Sbar = np.abs(S)
th = np.max(Sbar)*m
# compose filter
M = Sbar>th
M = ndimage.median_filter(M, size=(s,s))
return M
def local_coherence(Q, ds=1):
""" estimate the local coherence of a spectrum
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
array with cross-spectrum
ds : integer, default=10
kernel radius to describe the neighborhood
Returns
-------
M : np.array, size=(m,n), dtype=float
vector coherence from no to ideal, i.e.: 0...1
See Also
--------
thresh_masking
"""
diam = 2*ds+1
C = np.zeros_like(Q)
(isteps,jsteps) = np.meshgrid(np.linspace(-ds,+ds,2*ds+1, dtype=int), \
np.linspace(-ds,+ds,2*ds+1, dtype=int))
IN = np.ones(diam**2, dtype=bool)
IN[diam**2//2] = False
isteps,jsteps = isteps.flatten()[IN], jsteps.flatten()[IN]
for idx, istep in enumerate(isteps):
jstep = jsteps[idx]
Q_step = np.roll(Q, (istep,jstep))
# if the spectrum is normalized, then no division is needed
C += Q*np.conj(Q_step)
C = np.abs(C)/np.sum(IN)
return C
# frequency/spectrum matching functions
def create_complex_DCT(I, Cc, Cs):
Ccc, Css = Cc*I*Cc.T, Cs*I*Cs.T
Csc, Ccs = Cs*I*Cc.T, Cc*I*Cs.T
C_dct = Ccc-Css + 1j*(-(Ccs+Csc))
return C_dct
def get_cosine_matrix(I,N=None):
(L,_) = I.shape
if N==None:
N = np.copy(L)
C = np.zeros((L,L))
for k in range(L):
for n in range(N):
if k == 0:
C[k,n] = np.sqrt(1/L)
else:
C[k,n] = np.sqrt(2/L)*np.cos((np.pi*k*(1/2+n))/L)
return(C)
def get_sine_matrix(I,N=None):
(L,_) = I.shape
if N==None:
# make a square matrix
N = np.copy(L)
C = np.zeros((L,L))
for k in range(L):
for n in range(N):
if k == 0:
C[k,n] = np.sqrt(1/L)
else:
C[k,n] = np.sqrt(2/L)*np.sin((np.pi*k*(1/2+n))/L)
return(C)
def cosi_corr(I1, I2, beta1=.35, beta2=.50, m=1e-4):
mt,nt = I1.shape[0], I1.shape[1] # dimensions of the template
W1 = raised_cosine(np.zeros((mt,nt)), beta1)
W2 = raised_cosine(np.zeros((mt,nt)), beta2)
if I1.size==I2.size: # if templates are same size, no refinement is done
tries = [0]
else:
tries = [0, 1]
di,dj, m0 = 0,0,np.array([0, 0])
for trying in tries: # implement refinement step to have more overlap
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = reposition_templates_from_center(I1,I2,di,dj)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (W1*S1)*np.conj((W2*S2))
else:
Q_b = (W1*S1)*np.conj((W2*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = reposition_templates_from_center(I1,I2,di,dj)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (W1*S1)*np.conj((W2*S2))
# transform back to spatial domain
C = np.real(np.fft.fftshift(np.fft.ifft2(Q)))
ddi, ddj,_,_ = get_integer_peak_location(C)
m_int = np.round(np.array([ddi, ddj])).astype(int)
if np.amax(abs(np.array([ddi, ddj])))<.5:
break
else:
di,dj = m_int[0], m_int[1]
m0[0] += di
m0[1] += dj
WS = thresh_masking(S1, m)
Qn = normalize_power_spectrum(Q)
return Qn, WS, m0
def cosine_corr(I1, I2): # wip
""" match two imagery through discrete cosine transformation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
create_complex_DCT, sign_only_corr
Notes
-----
[1] Lie, et.al. "DCT-based phase correlation motion estimation",
IEEE international conference on image processing, vol. 1, 2004.
"""
# construct cosine and sine basis matrices
Cc, Cs = get_cosine_matrix(I1), get_sine_matrix(I1)
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
C1 = create_complex_DCT(I1bnd, Cc, Cs)
C2 = create_complex_DCT(I2bnd, Cc, Cs)
if i == 0:
Q = C1*np.conj(C2)
else:
Q_b = (C1)*np.conj(C2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
C1 = create_complex_DCT(I1sub, Cc, Cs)
C2 = create_complex_DCT(I2sub, Cc, Cs)
Q = (C1)*np.conj(C2)
return Q
def masked_cosine_corr(I1, I2, M1, M2): # wip
'''
work in progress
'''
M1, M2 = M1.astype(dtype=bool), M2.astype(dtype=bool)
# construct cosine and sine basis matrices
Cc, Cs = get_cosine_matrix(I1), get_sine_matrix(I1)
# look at how many frequencies can be estimated with this data
(m,n) = M1.shape
X1 = np.ones((m,n), dtype=bool)
min_span = int(np.floor(np.sqrt(min(np.sum(M1), np.sum(M2)))))
X1[min_span:,:] = False
X1[:,min_span:] = False
y = (I1[M1].astype(dtype=float)/255)-.5
# build matrix
Ccc = np.kron(Cc,Cc)
# shrink size
Ccc = Ccc[M1.flatten(),:] # remove rows, as these are missing
Ccc = Ccc[:,X1.flatten()] # remove collumns, since these can't be estimated
Icc = np.linalg.lstsq(Ccc, y, rcond=None)[0]
Icc = np.reshape(Icc, (min_span, min_span))
iCC = Ccc.T*y
np.reshape(Ccc.T*y, (min_span, min_span))
if I1.ndim==3: # multi-spectral frequency stacking
(mt,nt,bt) = I1.shape
(ms,ns,bs) = I2.shape
md, nd = np.round((ms-mt)/2).astype(int), np.round((ns-nt)/2).astype(int)
for i in range(bt): # loop through all bands
I1sub = I1[:,:,i]
I2sub = I2[md:-md, nd:-nd,i]
C1 = create_complex_DCT(I1sub, Cc, Cs)
C2 = create_complex_DCT(I2sub, Cc, Cs)
if i == 0:
Q = C1*np.conj(C2)
else:
Q_b = (C1)*np.conj(C2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
C1 = create_complex_DCT(I1sub, Cc, Cs)
C2 = create_complex_DCT(I2sub, Cc, Cs)
Q = (C1)*np.conj(C2)
return Q
def phase_only_corr(I1, I2):
""" match two imagery through phase only correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
phase_corr, symmetric_phase_corr, amplitude_comp_corr
Notes
-----
[1] Horner & Gianino, "Phase-only matched filtering", Applied optics,
vol. 23(6) pp.812--816, 1984.
[2] Kumar & Juday, "Design of phase-only, binary phase-only, and complex
ternary matched filters with increased signal-to-noise ratios for
colored noise", Optics letters, vol. 16(13) pp. 1025--1027, 1991.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
W2 = np.divide(1, np.abs(I2bnd),
out=np.zeros_like(I2bnd), where=np.abs(I2bnd)!=0)
if i == 0:
Q = (S1)*np.conj((W2*S2))
else:
Q_b = (S1)*np.conj((W2*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W2 = np.divide(1, np.abs(I2sub),
out=np.zeros_like(I2sub), where=np.abs(I2sub)!=0)
Q = (S1)*np.conj((W2*S2))
return Q
def sign_only_corr(I1, I2): # to do
""" match two imagery through phase only correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
C : np.array, size=(m,n), real
displacement surface
See Also
--------
cosine_corr
Notes
-----
[1] Ito & Kiya, "DCT sign-only correlation with application to image
matching and the relationship with phase-only correlation",
IEEE international conference on acoustics, speech and signal
processing, vol. 1, 2007.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
C1, C2 = np.sign(fft.dctn(I1bnd, 2)), np.sign(fft.dctn(I2bnd, 2))
if i == 0:
Q = C1*np.conj(C2)
else:
Q_b = (C1)*np.conj(C2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
C1, C2 = np.sign(fft.dctn(I1sub, 2)), np.sign(fft.dctn(I2sub, 2))
Q = (C1)*np.conj(C2)
C = fft.idctn(Q,2)
return C
def symmetric_phase_corr(I1, I2):
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W2 = np.divided(1, np.sqrt(abs(I1sub))*np.sqrt(abs(I2sub)) )
if i == 0:
Q = (S1)*np.conj((W2*S2))
else:
Q_b = (S1)*np.conj((W2*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W2 = np.divide(1, np.sqrt(abs(I1sub))*np.sqrt(abs(I2sub)) )
Q = (S1)*np.conj((W2*S2))
return Q
def amplitude_comp_corr(I1, I2, F_0=0.04):
""" match two imagery through amplitude compensated phase correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
F_0 : float, default=4e-2
cut-off intensity in respect to maximum
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
Notes
-----
[1] <NAME> al. "Amplitude-compensated matched filtering", Applied optics,
vol. 27(16) pp. 3461-3463, 1988.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
s_0 = F_0 * np.amax(abs(S2))
W = np.divide(1, abs(I2sub) )
A = np.divide(s_0, abs(I2sub)**2)
W[abs(S2)>s_0] = A
if i == 0:
Q = (S1)*np.conj((W*S2))
else:
Q_b = (S1)*np.conj((W*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
s_0 = F_0 * np.amax(abs(S2))
W = np.divide(1, abs(I2sub) )
A = np.divide(s_0, abs(I2sub)**2)
W[abs(S2)>s_0] = A[abs(S2)>s_0]
Q = (S1)*np.conj((W*S2))
return Q
def robust_corr(I1, I2):
""" match two imagery through fast robust correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
Notes
-----
[1] Fitch et al. "Fast robust correlation", IEEE transactions on image
processing vol. 14(8) pp. 1063-1073, 2005.
[2] Essannouni et al. "Adjustable SAD matching algorithm using frequency
domain" Journal of real-time image processing, vol.1 pp.257-265
"""
I1sub,I2sub = make_templates_same_size(I1,I2)
p_steps = 10**np.arange(0,1,.5)
for idx, p in enumerate(p_steps):
I1p = 1/p**(1/3) * np.exp(1j*(2*p -1)*I1sub)
I2p = 1/p**(1/3) * np.exp(1j*(2*p -1)*I2sub)
S1p, S2p = np.fft.fft2(I1p), np.fft.fft2(I2p)
if idx==0:
Q = (S1p)*np.conj(S2p)
else:
Q += (S1p)*np.conj(S2p)
return Q
def orientation_corr(I1, I2):
""" match two imagery through orientation correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
phase_corr, windrose_corr
Notes
-----
[1] Fitch et al. "Orientation correlation", Proceeding of the Britisch
machine vison conference, pp. 1--10, 2002.
[2] <NAME>. "Evaluation of existing image matching methods for
deriving glacier surface displacements globally from optical satellite
imagery", Remote sensing of environment, vol. 118 pp. 339-355, 2012.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
S1,S2 = normalize_power_spectrum(S1),normalize_power_spectrum(S2)
if i == 0:
Q = (S1)*np.conj(S2)
else:
Q_b = (S1)*np.conj(S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
S1,S2 = normalize_power_spectrum(S1),normalize_power_spectrum(S2)
Q = (S1)*np.conj(S2)
return Q
def windrose_corr(I1, I2):
""" match two imagery through windrose phase correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
orientation_corr, phase_only_corr
Notes
-----
[1] Kumar & Juday, "Design of phase-only, binary phase-only, and complex
ternary matched filters with increased signal-to-noise ratios for
colored noise", Optics letters, vol. 16(13) pp. 1025--1027, 1991.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
else:
Q_b = (S1)*np.conj(S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.sign(np.fft.fft2(I1sub)), np.sign(np.fft.fft2(I2sub))
Q = (S1)*np.conj(S2)
return Q
def phase_corr(I1, I2):
""" match two imagery through phase correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
orientation_corr, cross_corr
Notes
-----
[1] Kuglin & Hines. "The phase correlation image alignment method",
proceedings of the IEEE international conference on cybernetics and
society, pp. 163-165, 1975.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
Q = np.divide(Q, abs(Q))
else:
Q_b = (S1)*np.conj(S2)
Q_b = np.divide(Q_b, abs(Q))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (S1)*np.conj(S2)
Q = np.divide(Q, abs(Q))
return Q
def cross_corr(I1, I2):
""" match two imagery through cross correlation in FFT
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
phase_corr
Notes
-----
[1] Heid & Kääb. "Evaluation of existing image matching methods for
deriving glacier surface displacements globally from optical satellite
imagery", Remote sensing of environment, vol. 118 pp. 339-355, 2012.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
else:
Q_b = (S1)*np.conj(S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (S1)*np.conj(S2)
return Q
def binary_orientation_corr(I1, I2):
""" match two imagery through binary phase only correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
orientation_corr, phase_only_corr
Notes
-----
[1] Kumar & Juday, "Design of phase-only, binary phase-only, and complex
ternary matched filters with increased signal-to-noise ratios for
colored noise", Optics letters, vol. 16(13) pp. 1025--1027, 1991.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
W = np.sign(np.real(S2))
if i == 0:
Q = (S1)*np.conj(W*S2)
else:
Q_b = (S1)*np.conj(W*S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W = np.sign(np.real(S2))
Q = (S1)*np.conj(W*S2)
return Q
def masked_corr(I1, I2, M1, M2):
""" match two imagery through masked normalized cross-correlation in FFT
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
M1 : np.array, size=(m,n)
array with mask
M2 : np.array, size=(m,n)
array with mask
Returns
-------
NCC : np.array, size=(m,n)
correlation surface
Notes
-----
[1] Padfield. "Masked object registration in the Fourier domain",
IEEE transactions on image processing, vol. 21(5) pp. 2706-2718, 2011.
"""
I1sub,I2sub = make_templates_same_size(I1,I2)
M1sub,M2sub = make_templates_same_size(M1,M2)
I1f, I2f = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
M1f, M2f = np.fft.fft2(M1sub), np.fft.fft2(M2sub)
fF1F2 = np.fft.ifft2( I1f*np.conj(I2f) )
fM1M2 = np.fft.ifft2( M1f*np.conj(M2f) )
fM1F2 = np.fft.ifft2( M1f*np.conj(I2f) )
fF1M2 = np.fft.ifft2( I1f*np.conj(M2f) )
ff1M2 = np.fft.ifft2( np.fft.fft2(I1sub**2)*np.conj(M2f) )
fM1f2 = np.fft.ifft2( M1f*np.fft.fft2( np.flipud(I2sub**2) ) )
NCC_num = fF1F2 - \
(np.divide(
np.multiply( fF1M2, fM1F2 ), fM1M2 ))
NCC_den = np.multiply( \
np.sqrt(ff1M2 - np.divide( fF1M2**2, fM1M2) ),
np.sqrt(fM1f2 - np.divide( fM1F2**2, fM1M2) ))
NCC = np.divide(NCC_num, NCC_den)
return NCC
# binary transform functions
def affine_binairy_registration(B1, B2):
# preparation
pT = np.sum(B1) # Lebesgue integral
pO = np.sum(B2)
Jac = pO/pT # Jacobian
x = np.linspace(0,B1.shape[1]-1,B1.shape[1])
y = np.linspace(0,B1.shape[0]-1,B1.shape[0])
X1, Y1 = np.meshgrid(x,y)
del x, y
# calculating moments of the template
x11 = Jac* np.sum(X1 * B1)
x12 = Jac* np.sum(X1**2 * B1)
x13 = Jac* np.sum(X1**3 * B1)
x21 = Jac* np.sum(Y1 * B1)
x22 = Jac* np.sum(Y1**2 * B1)
x23 = Jac* np.sum(Y1**3 * B1)
del X1, Y1
x = np.linspace(0,B2.shape[1]-1,B2.shape[1])
y = np.linspace(0,B2.shape[0]-1,B2.shape[0])
X2, Y2 = np.meshgrid(x,y)
del x, y
# calculating moments of the observation
y1 = np.sum(X2 * B2)
y12 = np.sum(X2**2 * B2)
y13 = np.sum(X2**3 * B2)
y12y2= np.sum(X2**2*Y2 * B2)
y2 = np.sum(Y2 * B2)
y22 = np.sum(Y2**2 * B2)
y23 = np.sum(Y2**3 * B2)
y1y22= np.sum(X2*Y2**2 * B2)
y1y2 = np.sum(X2*Y2 * B2)
del X2, Y2
# estimation
mu = pO
def func1(x):
q11, q12, q13 = x
return [mu*q11 + y1*q12 + y2*q13 - x11,
mu*q11**2 + y12*q12**2 + y22*q13**2 + 2*y1*q11*q12 + \
2*y2*q11*q13 + 2*y1y2*q12*q13 - x12,
mu*q11**3 + y13*q12**3 + y23*q13**3 + 3*y1*q11**2*q12 + \
3*y2*q11**2*q13 + 3*y12*q12**2*q11 + 3*y12y2*q12**2*q13 + \
3*y22*q11*q13**2 + 3*y1y22*q12*q13**2 + \
6*y1y2*q11*q12*q13 - x13]
Q11, Q12, Q13 = fsolve(func1, (1.0, 1.0, 1.0))
# test for complex solutions, which should be excluded
def func2(x):
q21, q22, q23 = x
return [mu*q21 + y1*q22 + y2*q23 - x21,
mu*q21**2 + y12*q22**2 + y22*q23**2 + 2*y1*q21*q22 + \
2*y2*q21*q23 + 2*y1y2*q22*q23 - x22,
mu*q21**3 + y13*q22**3 + y23*q23**3 + 3*y1*q21**2*q22 + \
3*y2*q21**2*q23 + 3*y12*q22**2*q21 + 3*y12y2*q22**2*q23 + \
3*y22*q21*q23**2 + 3*y1y22*q22*q23**2 + \
6*y1y2*q21*q22*q23 - x23]
Q21, Q22, Q23 = fsolve(func2, (1.0, 1.0, 1.0))
# test for complex solutions, which should be excluded
Q = np.array([[Q12, Q13, Q11], [Q22, Q23, Q21]])
return Q
# boundary describtors
def get_relative_group_distances(x, K=5):
for i in range(1,K+1):
if i ==1:
x_minus = np.expand_dims(np.roll(x, +i), axis= 1)
x_plus = np.expand_dims(np.roll(x, -i), axis= 1)
else:
x_new = np.expand_dims(np.roll(x, +i), axis=1)
x_minus = np.concatenate((x_minus, x_new), axis=1)
x_new = np.expand_dims(np.roll(x, -i), axis=1)
x_plus = np.concatenate((x_plus, x_new), axis=1)
del x_new
dx_minus = x_minus - np.repeat(np.expand_dims(x, axis=1), K, axis=1)
dx_plus = x_plus - np.repeat(np.expand_dims(x, axis=1), K, axis=1)
return dx_minus, dx_plus
def get_relative_distances(x, x_id, K=5):
# minus
start_idx = x_id-K
ending_idx = x_id
ids = np.arange(start_idx,ending_idx)
x_min = x[ids % len(x)]
# plus
start_idx = x_id
ending_idx = x_id + K
ids = np.arange(start_idx,ending_idx)
x_plu = x[ids % len(x)]
dx_minus = x_min - np.repeat(x[x_id], K)
dx_plus = x_plu - np.repeat(x[x_id], K)
return dx_minus, dx_plus
def beam_angle_statistics(x, y, K=5, xy_id=None):
"""
implements beam angular statistics (BAS)
input:
output:
see Arica & Vural, 2003
BAS: a perceptual shape descriptoy based on the beam angle statistics
Pattern Recognition Letters 24: 1627-1639
debug:
x = np.random.randint(20, size=12)-10
y = np.random.randint(20, size=12)-10
"""
if xy_id is None: # make descriptors for all coordinate
dx_minus, dx_plus = get_relative_group_distances(x, K)
dy_minus, dy_plus = get_relative_group_distances(y, K)
ax = 1
else: # make descriptor for single coordinate
dx_minus, dx_plus = get_relative_distances(x, xy_id, K)
dy_minus, dy_plus = get_relative_distances(y, xy_id, K)
ax = 0
# dot product instead of argument
C_minus = np.arctan2(dy_minus, dx_minus)
C_plus = np.arctan2(dy_plus, dx_plus)
C = C_minus-C_plus
C_1, C_2 = np.mean(C, axis=ax), np.std(C, axis=ax) # estimate moments
BAS = np.concatenate((np.expand_dims(C_1, axis=ax),
np.expand_dims(C_2, axis=ax)), axis=ax)
return BAS
def cast_angle_neighbours(x, y, sun, K=5, xy_id=None):
'''
debug:
x = np.random.randint(20, size=12)-10
y = np.random.randint(20, size=12)-10
sun = sun/np.sqrt(np.sum(np.square(sun)))
'''
if xy_id is None: # make descriptors for all coordinate
dx_minus, dx_plus = get_relative_group_distances(x, K)
dy_minus, dy_plus = get_relative_group_distances(y, K)
ax = 1
else: # make descriptor for single coordinate
dx_minus, dx_plus = get_relative_distances(x, xy_id, K)
dy_minus, dy_plus = get_relative_distances(y, xy_id, K)
ax = 0
# rotate towards the sun
CAN = np.concatenate((np.arctan2(sun[0]*dx_minus + sun[1]*dy_minus,
-sun[1]*dx_minus + sun[0]*dy_minus),
np.arctan2(sun[0]*dx_plus + sun[1]*dy_plus,
-sun[1]*dx_plus + sun[0]*dy_plus)), axis=ax)
return CAN
def neighbouring_cast_distances(x, y, sun, K=5, xy_id=None):
'''
debug:
x = np.random.randint(20, size=12)-10
y = np.random.randint(20, size=12)-10
sun = sun/np.sqrt(np.sum(np.square(sun)))
'''
if xy_id is None: # make descriptors for all coordinate
dx_minus, dx_plus = get_relative_group_distances(x, K)
dy_minus, dy_plus = get_relative_group_distances(y, K)
ax = 1
else: # make descriptor for single coordinate
dx_minus, dx_plus = get_relative_distances(x, xy_id, K)
dy_minus, dy_plus = get_relative_distances(y, xy_id, K)
ax = 0
# rotate towards the sun and take only one axes
CD = np.concatenate((sun[0]*dx_minus + sun[1]*dy_minus,
sun[0]*dx_plus + sun[1]*dy_plus), axis=ax)
return CD
# supporting functions
def make_templates_same_size(I1,I2):
mt,nt = I1.shape[0],I1.shape[1] # dimenstion of the template
ms,ns = I2.shape[0],I2.shape[1] # dimension of the search space
assert ms>=mt # search domain should be of equal size or bigger
assert ns>=nt
assert I1.ndim==I2.ndim # should be the same dimension
md, nd = (ms-mt)//2, (ns-nt)//2
if md==0 | nd==0: # I2[+0:-0, ... does not seem to work
I2sub = I2
else:
if I1.ndim==3:
I2sub = I2[+md:-md, +nd:-nd, :]
else:
I2sub = I2[+md:-md, +nd:-nd]
return I1, I2sub
def test_bounds_reposition(d, temp_size, search_size):
"""
See Also
--------
reposition_templates_from_center
"""
space_bound = (search_size-temp_size) // 2
if abs(d) > space_bound:
warnings.warn("part of the template will be out of the image" +
"with this displacement estimate")
reposition_dir = np.sign(d)
d = reposition_dir * np.minimum(abs(d), space_bound)
return d
def reposition_templates_from_center(I1,I2,di,dj):
mt,nt = I1.shape[0],I1.shape[1] # dimenstion of the template
ms,ns = I2.shape[0],I2.shape[1] # dimension of the search space
di,dj = int(di),int(dj)
di,dj = test_bounds_reposition(di,mt,ms), test_bounds_reposition(dj,nt,ns)
assert ms>=mt # search domain should be of equal size or bigger
assert ns>=nt
assert I1.ndim==I2.ndim # should be the same dimension
mc,nc = ms//2, ns//2 # center location
if I1.ndim==3:
I2sub = I2[mc-(mt//2)-di : mc+(mt//2)-di, \
nc-(nt//2)-dj : nc+(nt//2)-dj, :]
else:
I2sub = I2[mc-(mt//2)-di : mc+(mt//2)-di, \
nc-(nt//2)-dj : nc+(nt//2)-dj]
return I1, I2sub
def get_coordinates_of_template_centers(grid, temp_size):
"""
When tiling an array into small templates, this function
gives the locations of the centers.
input: grid array (n x m) array with data values
temp_size integer size of the kernel in pixels
output: Iidx array (k x l) array with row coordinates
Jidx array (k x l) array with collumn coordinates
"""
radius = np.floor(temp_size / 2).astype('int')
Iidx = np.arange(radius, grid.shape[0] - radius, temp_size)
Jidx = np.arange(radius, grid.shape[1] - radius, temp_size)
# FN ###################################
# are the following lines equivalent to:
# Iidx, Jidx = np.meshgrid(Iidx, Jidx)
# It looks like, but Iidx and Jidx are switched!
IidxNew = np.repeat(np.transpose([Iidx]), len(Jidx), axis=1)
Jidx = np.repeat([Jidx], len(Iidx), axis=0)
Iidx = IidxNew
return Iidx, Jidx
def get_grid_at_template_centers(grid, temp_size):
"""
When tiling an array into small templates, this function
gives the value of the pixel in its center.
input: grid array (n x m) array with data values
temp_size integer size of the kernel in pixels
output: gridnew array (k x l) data value of the pixel in the
kernels center
"""
(Iidx, Jidx) = get_coordinates_of_template_centers(grid, temp_size)
return grid[Iidx, Jidx] | StarcoderdataPython |
3303157 | <gh_stars>1-10
"""Class with response details."""
from dataclasses import dataclass
from typing import Optional
@dataclass
class RequestResponse:
"""Class with response details. Independent of web library implementation."""
status_code: Optional[int]
text: Optional[str]
def is_token_expired(self) -> bool:
"""Method to check that is token_expired response status."""
return self.status_code == 429
def is_success(self) -> bool:
"""Method to check that response have success status."""
return self.status_code is not None and self.status_code < 300
| StarcoderdataPython |
26994 | <gh_stars>1-10
from mock import patch
@patch('topatch.afunction')
class TestToPatch():
def test_afunction(self, mock_afunction):
mock_afunction('foo', 'bar')
mock_afunction.assert_any_call('foo', 'bar')
| StarcoderdataPython |
57407 | <reponame>yhswjtuILMARE/Machine-Learning-Study-Notes
'''
Created on 2017年5月4日
@author: <NAME>
'''
from selenium import webdriver
import time
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import re
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
import os
def getAllFriendList(broswer, userID, pageNum, friendSet, currentLevel):
try:
broswer.get('https://www.zhihu.com%s?page=%s' % (userID, pageNum))
WebDriverWait(broswer, 10).until(
expected_conditions.presence_of_all_elements_located((By.CSS_SELECTOR, '.UserLink-link')))
except:
print('getAllFriendList异常')
else:
bsObj = BeautifulSoup(broswer.page_source, 'html.parser')
elts = bsObj.findAll('a', {'class':'UserLink-link'})
for elt in elts:
img = elt.find('img')
if img:
friendSet.add(elt)
print('......*' * currentLevel, 'https://www.zhihu.com%s' % (elt.attrs.get('href', 'no data')))
def getFriendList(broswer, userID, currentLevel=1):
try:
if currentLevel > totalLevel:
return
if userID == 'no data':
raise Exception()
nameTemp = userID.split('/')[2]
if not nameTemp in alreadyParse:
alreadyParse.add(nameTemp)
else:
return
print('......*' * currentLevel ,'正在解析用户:', nameTemp, '知乎首页:https://www.zhihu.com%s' % (userID), sep=' ')
friendSet = set()
broswer.get('https://www.zhihu.com%s' % (userID))
WebDriverWait(broswer, 10).until(
expected_conditions.presence_of_all_elements_located((By.CSS_SELECTOR, '.UserLink-link')))
elt = WebDriverWait(broswer, 10).until(
expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '.Avatar.Avatar--large.UserAvatar-inner')))
res = re.match('^(https://.*)[0-9]x$', elt.get_attribute('srcset'))
if res:
if not nameTemp in alreadyDownload:
alreadyDownload.add(nameTemp)
url = res.group(1)
writeToFile(url, '%s.%s' % (nameTemp ,url.split('.')[-1]))
print('......*' * currentLevel, '已经下载', nameTemp, '的用户头像', '知乎首页:https://www.zhihu.com%s' % (userID), sep=' ')
except:
print('......*' * currentLevel, 'getFriendList异常')
else:
print('......*' * currentLevel, '正在获取用户', nameTemp, '的关注列表...', sep=' ')
bsObj = BeautifulSoup(broswer.page_source, 'html.parser')
elts = bsObj.findAll('a', {'class':'UserLink-link'})
for elt in elts:
img = elt.find('img')
if img:
friendSet.add(elt)
print('......*' * currentLevel, 'https://www.zhihu.com%s' % (elt.attrs.get('href', 'no data')))
elts = bsObj.findAll('button', {'class':'Button PaginationButton Button--plain'})
if len(elts) != 0:
count = elts[len(elts) - 1].get_text()
for i in range(2, int(count) + 1):
getAllFriendList(broswer, userID, i, friendSet, currentLevel)
print('......*' * currentLevel, '用户', nameTemp, '的关注列表获取完毕', sep=' ')
for elt in friendSet:
href = elt.attrs.get('href', 'no data')
if currentLevel == totalLevel:
img = elt.find('img')
if img:
res = re.match('^(https://.*)[0-9]x$', img.attrs.get('srcset', 'no data'))
if res:
if not href.split('/')[2] in alreadyDownload:
alreadyDownload.add(href.split('/')[2])
url = res.group(1).replace('_xl', '_xll')
writeToFile(url, '%s.%s' % (href.split('/')[2] ,url.split('.')[-1]))
print('......*' * (currentLevel + 1), '已经下载用户',nameTemp, '的关注用户', href.split('/')[2], '的头像', sep=' ')
getFriendList(broswer, '%s/%s' % (href, userID.split('/')[3]), currentLevel + 1)
totalLevel = 5#递归层数
defaultPath = 'h:\\zhihu\\'#默认目录
currentPath = '%s%s' % (defaultPath, 'pic\\')#当前目录
alreadyDownload = set()#已经下载的用户头像
alreadyParse = set()#已经解析过的用户
totalUse = 0#文件写入次数
def writeToFile(url, fileName):
try:
global currentPath, totalUse, defaultPath
totalUse = totalUse + 1
if totalUse % 500 == 0:
tempPath = '{0}pic-{1}\\'.format(defaultPath, totalUse)
if not os.path.exists(tempPath):
os.mkdir(tempPath)
currentPath = '%s' % (tempPath)
if not os.path.exists(currentPath):
os.mkdir(currentPath)
urlretrieve(url, '%s%s' % (currentPath, fileName))
except:
print('writeToFile异常')
if __name__ == "__main__":
try:
start = time.clock()
time.sleep(5)
broswer = webdriver.PhantomJS(executable_path=
r"C:\phantomjs-2.1.1-windows\phantomjs-2.1.1-windows\bin\phantomjs.exe")
getFriendList(broswer, r'/people/tu-si-ji-63/following')
except:
print('顶层调用异常')
finally:
broswer.quit()
print('******', '共运行 {0:.3f}秒'.format(time.clock() - start), '一共扫描%d位用户的好友列表' % (len(alreadyParse)), '一共下载%d张用户头像' % (len(alreadyDownload)), sep=' ') | StarcoderdataPython |
3341732 | <reponame>sanyaade-teachings/cep
# Python code generated by CAIT's Visual Programming Interface
import cait.essentials
intention = None
entities = None
first_entity = None
def setup():
cait.essentials.initialize_component('nlp', 'english_default')
def main():
global intention, entities, first_entity
intention = cait.essentials.analyse_text('I am Michael')
print('Intention topic is: ' + str(intention['topic']))
entities = intention['entities']
first_entity = entities[0]
print('Entity name is: ' + str(first_entity['entity_name']))
print('Entity value is: ' + str(first_entity['entity_value']))
if __name__ == "__main__":
setup()
main() | StarcoderdataPython |
4808135 | from unittest import TestCase
from unittest.mock import create_autospec
from uuid import uuid4
import time
import calendar
from cadence.errors import WorkflowExecutionAlreadyStartedError, DomainAlreadyExistsError, EntityNotExistsError
from cadence.tchannel import TChannelException
from cadence.cadence_types import StartWorkflowExecutionRequest, TaskList, WorkflowType, StartWorkflowExecutionResponse, \
RegisterDomainRequest, PollForActivityTaskRequest, DescribeTaskListRequest, TaskListType, \
DescribeWorkflowExecutionRequest, WorkflowExecution, DescribeTaskListResponse, DescribeWorkflowExecutionResponse, \
QueryWorkflowRequest, WorkflowQuery, ResetStickyTaskListRequest, RespondQueryTaskCompletedRequest, \
QueryTaskCompletedType, ListClosedWorkflowExecutionsRequest, ListClosedWorkflowExecutionsResponse, StartTimeFilter, \
ListOpenWorkflowExecutionsRequest, TerminateWorkflowExecutionRequest, SignalWithStartWorkflowExecutionRequest, \
SignalWorkflowExecutionRequest, RequestCancelWorkflowExecutionRequest, RespondActivityTaskCanceledByIDRequest, \
RespondActivityTaskCanceledRequest, RespondActivityTaskFailedByIDRequest, RespondActivityTaskFailedRequest, \
RespondActivityTaskCompletedByIDRequest, RecordActivityTaskHeartbeatByIDRequest, RecordActivityTaskHeartbeatRequest, \
RespondDecisionTaskFailedRequest, DecisionTaskFailedCause, RespondDecisionTaskCompletedRequest, \
PollForDecisionTaskRequest, GetWorkflowExecutionHistoryRequest, DeprecateDomainRequest, UpdateDomainRequest, \
DomainConfiguration, ListDomainsRequest, DescribeDomainRequest
from cadence.workflowservice import WorkflowService
class TestStartWorkflow(TestCase):
def setUp(self) -> None:
self.service = WorkflowService.create("localhost", 7933)
self.request = request = StartWorkflowExecutionRequest()
request.domain = "test-domain"
request.request_id = str(uuid4())
request.task_list = TaskList()
request.task_list.name = "test-task-list"
request.input = "abc-firdaus"
request.workflow_id = str(uuid4())
request.workflow_type = WorkflowType()
request.workflow_type.name = "firdaus-workflow-type"
request.execution_start_to_close_timeout_seconds = 86400
request.task_start_to_close_timeout_seconds = 120
time.sleep(0.5)
def test_start_workflow(self):
(response, err) = self.service.start_workflow(self.request)
self.assertIsNotNone(response)
self.assertIsInstance(response, StartWorkflowExecutionResponse)
def test_duplicate_workflow_ids(self):
(response, err) = self.service.start_workflow(self.request)
self.request.request_id = str(uuid4())
(response, err) = self.service.start_workflow(self.request)
self.assertIsNotNone(err)
self.assertIsNone(response)
self.assertIsInstance(err, WorkflowExecutionAlreadyStartedError)
def test_register_domain(self):
request = RegisterDomainRequest()
request.name = str(uuid4())
request.description = ""
request.workflowExecutionRetentionPeriodInDays = 1
response, err = self.service.register_domain(request)
self.assertIsNone(response) # RegisterDomain returns void
self.assertIsNone(err)
def test_duplicate_domains(self):
request = RegisterDomainRequest()
request.name = str(uuid4())
request.description = ""
request.workflowExecutionRetentionPeriodInDays = 1
response, err = self.service.register_domain(request)
response, err = self.service.register_domain(request)
self.assertIsNotNone(err)
self.assertIsInstance(err, DomainAlreadyExistsError)
def test_describe_domain(self):
register_request = RegisterDomainRequest()
register_request.name = str(uuid4())
self.service.register_domain(register_request)
request = DescribeDomainRequest()
request.name = register_request.name
response, err = self.service.describe_domain(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
def test_list_domains(self):
request = ListDomainsRequest()
request.page_size = 20
response, err = self.service.list_domains(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertIsInstance(response.domains, list)
def test_update_domain(self):
register_request = RegisterDomainRequest()
register_request.name = str(uuid4())
self.service.register_domain(register_request)
request = UpdateDomainRequest()
request.name = register_request.name
request.configuration = DomainConfiguration()
request.configuration.workflow_execution_retention_period_in_days = 10
response, err = self.service.update_domain(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertEqual(10, response.configuration.workflow_execution_retention_period_in_days)
def test_deprecate_domain(self):
register_request = RegisterDomainRequest()
register_request.name = str(uuid4())
self.service.register_domain(register_request)
request = DeprecateDomainRequest()
request.name = register_request.name
response, err = self.service.deprecate_domain(request)
self.assertIsNone(err)
self.assertIsNone(response)
def test_get_workflow_execution_history(self):
response, err = self.service.start_workflow(self.request)
request = GetWorkflowExecutionHistoryRequest()
request.domain = "test-domain"
request.execution = WorkflowExecution()
request.execution.workflow_id = self.request.workflow_id
request.execution.run_id = response.run_id
response, err = self.service.get_workflow_execution_history(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertIsNotNone(response.history)
self.assertIsNotNone(response.history.events)
def test_poll_for_decision_task(self):
request = PollForDecisionTaskRequest()
request.identity = "123@localhost"
request.domain = "test-domain"
request.task_list = TaskList()
request.task_list.name = "test-task-list" + str(uuid4())
response, err = self.service.poll_for_decision_task(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertIsNone(response.task_token)
def test_respond_decision_task_completed(self):
request = RespondDecisionTaskCompletedRequest()
request.task_token = "{}"
request.identity = "123@localhost"
response, err = self.service.respond_decision_task_completed(request)
self.assertIsNotNone(err)
self.assertIsNone(response)
self.assertRegex(str(err), "Domain not set")
def test_respond_decision_task_failed(self):
request = RespondDecisionTaskFailedRequest()
request.task_token = "{}"
request.identity = "123@localhost"
request.cause = DecisionTaskFailedCause.BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES
response, err = self.service.respond_decision_task_failed(request)
self.assertIsNotNone(err)
self.assertIsNone(response)
self.assertRegex(str(err), "Domain not set")
def test_poll_for_activity_task_timeout(self):
request = PollForActivityTaskRequest()
request.domain = "test-domain"
request.identity = WorkflowService.get_identity()
request.task_list = TaskList()
request.task_list.name = "test-task-list"
response, err = self.service.poll_for_activity_task(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertIsNone(response.task_token)
def test_record_activity_task_heartbeat(self):
request = RecordActivityTaskHeartbeatRequest()
request.task_token = "{}"
request.identity = "123@localhost"
response, err = self.service.record_activity_task_heartbeat(request)
self.assertIsNotNone(err)
self.assertRegex(str(err), "Domain not set")
def test_record_activity_task_heartbeat_by_id(self):
start_response, _ = self.service.start_workflow(self.request)
request = RecordActivityTaskHeartbeatByIDRequest()
request.identity = "123@localhost"
request.domain = "test-domain"
request.workflow_id = self.request.workflow_id
request.run_id = start_response.run_id
request.activity_id = "dummy-activity-id"
response, err = self.service.record_activity_task_heartbeat_by_id(request)
self.assertIsNotNone(err)
self.assertIsNone(response)
self.assertRegex(str(err), "Cannot locate Activity ScheduleID")
def test_respond_query_task_completed_invalid(self):
request = RespondQueryTaskCompletedRequest()
request.task_token = "{}"
request.completed_type = QueryTaskCompletedType.COMPLETED
request.query_result = ""
response, err = self.service.respond_query_task_completed(request)
self.assertIsNotNone(err)
self.assertRegex(str(err), "Invalid TaskToken")
def test_respond_activity_task_completed_by_id(self):
start_response, _ = self.service.start_workflow(self.request)
request = RespondActivityTaskCompletedByIDRequest()
request.identity = "123@localhost"
request.domain = "test-domain"
request.workflow_id = self.request.workflow_id
request.run_id = start_response.run_id
request.activity_id = "dummy-activity-id"
response, err = self.service.respond_activity_task_completed_by_id(request)
self.assertIsNotNone(err)
self.assertIsNone(response)
self.assertRegex(str(err), "Cannot locate Activity ScheduleID")
def test_respond_activity_task_failed(self):
request = RespondActivityTaskFailedRequest()
request.task_token = '{"domainId": "%s", "workflowId": "%s"}' % (str(uuid4()), str(uuid4()))
request.identity = "123@localhost"
response, err = self.service.respond_activity_task_failed(request)
self.assertIsNotNone(err)
self.assertRegex(str(err), "Domain .* does not exist")
self.assertIsNone(response)
def test_respond_activity_task_failed_by_id_invalid(self):
start_response, _ = self.service.start_workflow(self.request)
request = RespondActivityTaskFailedByIDRequest()
request.identity = "123@localhost"
request.domain = "test-domain"
request.workflow_id = self.request.workflow_id
request.run_id = start_response.run_id
request.activity_id = "dummy-activity-id"
response, err = self.service.respond_activity_task_failed_by_id(request)
self.assertIsNotNone(err)
self.assertRegex(str(err), "Cannot locate Activity ScheduleID")
self.assertIsNone(response)
def test_respond_activity_task_canceled_invalid(self):
request = RespondActivityTaskCanceledRequest()
request.task_token = '{"domainId": "%s", "workflowId": "%s"}' % (str(uuid4()), str(uuid4()))
request.identity = "123@localhost"
response, err = self.service.respond_activity_task_canceled(request)
self.assertIsNotNone(err)
self.assertRegex(str(err), "Domain .* does not exist")
self.assertIsNone(response)
def test_respond_activity_task_canceled_by_id_invalid(self):
start_response, _ = self.service.start_workflow(self.request)
request = RespondActivityTaskCanceledByIDRequest()
request.domain = "test-domain"
request.workflow_id = self.request.workflow_id
request.run_id = start_response.run_id
request.activity_id = "dummy-activity-id"
response, err = self.service.respond_activity_task_canceled_by_id(request)
self.assertIsNone(response)
self.assertIsNotNone(err)
self.assertRegex(str(err), "Cannot locate Activity ScheduleID")
def test_request_cancel_workflow_execution(self):
start_response, _ = self.service.start_workflow(self.request)
request = RequestCancelWorkflowExecutionRequest()
request.domain = "test-domain"
request.workflow_execution = WorkflowExecution()
request.workflow_execution.workflow_id = self.request.workflow_id
request.workflow_execution.run_id = start_response.run_id
response, err = self.service.request_cancel_workflow_execution(request)
self.assertIsNone(err)
self.assertIsNone(response)
def test_signal_workflow_execution(self):
start_response, _ = self.service.start_workflow(self.request)
request = SignalWorkflowExecutionRequest()
request.domain = "test-domain"
request.signal_name = "dummy-signal"
request.workflow_execution = WorkflowExecution()
request.workflow_execution.workflow_id = self.request.workflow_id
request.workflow_execution.run_id = start_response.run_id
response, err = self.service.signal_workflow_execution(request)
self.assertIsNone(err)
self.assertIsNone(response)
def test_signal_with_start_workflow_execution(self):
request = SignalWithStartWorkflowExecutionRequest()
request.signal_name = "dummy-signal"
request.domain = "test-domain"
request.request_id = str(uuid4())
request.task_list = TaskList()
request.task_list.name = "test-task-list"
request.input = "abc-firdaus"
request.workflow_id = str(uuid4())
request.workflow_type = WorkflowType()
request.workflow_type.name = "firdaus-workflow-type"
request.execution_start_to_close_timeout_seconds = 86400
request.task_start_to_close_timeout_seconds = 120
response, err = self.service.signal_with_start_workflow_execution(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertIsInstance(response, StartWorkflowExecutionResponse)
def test_terminate_workflow_execution(self):
start_response, _ = self.service.start_workflow(self.request)
request = TerminateWorkflowExecutionRequest()
request.domain = "test-domain"
request.workflow_execution = WorkflowExecution()
request.workflow_execution.workflow_id = self.request.workflow_id
request.workflow_execution.run_id = start_response.run_id
response, err = self.service.terminate_workflow_execution(request)
self.assertIsNone(err)
self.assertIsNone(response)
def test_list_open_workflow_executions(self):
request = ListOpenWorkflowExecutionsRequest()
request.domain = "test-domain"
request.start_time_filter = StartTimeFilter()
request.maximum_page_size = 20
request.start_time_filter.earliest_time = 1
request.start_time_filter.latest_time = 2 ** 63 - 1
response, err = self.service.list_open_workflow_executions(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertIsNotNone(response.executions)
def test_list_closed_workflow_executions(self):
request = ListClosedWorkflowExecutionsRequest()
request.domain = "test-domain"
request.start_time_filter = StartTimeFilter()
# Nano seconds?
request.start_time_filter.earliest_time = calendar.timegm(time.gmtime()) * 1e+9
request.start_time_filter.latest_time = calendar.timegm(time.gmtime()) * 1e+9
response, err = self.service.list_closed_workflow_executions(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertIsInstance(response, ListClosedWorkflowExecutionsResponse)
def test_reset_sticky_task_list(self):
start_response, _ = self.service.start_workflow(self.request)
request = ResetStickyTaskListRequest()
request.domain = "test-domain"
request.execution = WorkflowExecution()
request.execution.workflow_id = self.request.workflow_id
request.execution.run_id = start_response.run_id
response, err = self.service.reset_sticky_task_list(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
def test_query_workflow_timeout(self):
start_response, _ = self.service.start_workflow(self.request)
request = QueryWorkflowRequest()
request.domain = "test-domain"
request.execution = WorkflowExecution()
request.execution.workflow_id = self.request.workflow_id
request.execution.run_id = start_response.run_id
request.query = WorkflowQuery()
request.query.query_type = "getDummy"
request.query.query_args = None
with self.assertRaisesRegex(TChannelException, "timeout") as context:
self.service.query_workflow(request)
def test_describe_workflow_execution(self):
start_response, _ = self.service.start_workflow(self.request)
request = DescribeWorkflowExecutionRequest()
request.domain = "test-domain"
request.execution = WorkflowExecution()
request.execution.workflow_id = self.request.workflow_id
request.execution.run_id = start_response.run_id
response, err = self.service.describe_workflow_execution(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertIsInstance(response, DescribeWorkflowExecutionResponse)
def test_describe_workflow_execution_invalid_workflow(self):
request = DescribeWorkflowExecutionRequest()
request.domain = "test-domain"
request.execution = WorkflowExecution()
request.execution.workflow_id = str(uuid4())
request.execution.run_id = str(uuid4())
response, err = self.service.describe_workflow_execution(request)
self.assertIsNone(response)
self.assertIsInstance(err, EntityNotExistsError)
def test_describe_task_list(self):
request = DescribeTaskListRequest()
request.task_list = TaskList()
request.task_list.name = "test-task-list"
request.task_list_type = TaskListType.Decision
request.domain = "test-domain"
response, err = self.service.describe_task_list(request)
self.assertIsNone(err)
self.assertIsNotNone(response)
self.assertIsInstance(response, DescribeTaskListResponse)
self.assertEqual(0, len(response.pollers))
def tearDown(self) -> None:
self.service.connection.close()
| StarcoderdataPython |
1778132 | import json
from logging import error, exception, warning, info
from requests import post as request_post
from uuid import UUID
from django.db.models import (
CharField,
DecimalField,
ForeignKey,
JSONField,
PositiveIntegerField,
PROTECT,
)
from django.db.utils import IntegrityError
from eth_utils import add_0x_prefix
from web3 import Web3, HTTPProvider
from web3.datastructures import AttributeDict
from web3.types import HexBytes
from base.models import AbstractBaseModel
from base.support_functions.near_blockchain import parce_near_receipt
from backend.consts import (
ETH_LIKE_HASH_LENGTH,
MAX_WEI_DIGITS,
NETWORK_ERROR,
NETWORK_NAMES,
RPC_PROVIDER_ERROR,
RPC_PROVIDER_INFO,
TRANSACTION_ERROR,
TRANSACTION_INFO,
TRANSACTION_WARNING,
)
from networks.types import ADDRESS_LIKE, HASH_LIKE
from .exceptions import (
CustomRpcProviderExceedListRange,
NearRpcProviderError,
NetworkNotFound,
ProviderNotConnected,
TransactionError,
)
from .services.functions import (
convert_to_checksum_address_format,
reset_connection,
)
DEFAULT_POLL_LATENCY = 1
DEFAULT_TXN_TIMEOUT = 120
class Network(AbstractBaseModel):
"""
Network model which represents blockchain network.
Used for interaction with blockchain
- title - blockchain name
- rpc_url_list - list of rpc url nodes
"""
title = CharField(
max_length=255,
verbose_name='Title',
)
rpc_url_list = JSONField(
verbose_name='RPC URL List',
default=list,
blank=True,
)
class Meta:
db_table = 'networks'
ordering = '-_created_at',
def __str__(self) -> str:
return f'{self.title} (id: {self.id})'
@property
def rpc_provider(self):
"""
Returns first working Web3 rpc provider
"""
message = ''
for rpc_url in self.rpc_url_list:
info(
RPC_PROVIDER_INFO.format(
f'Trying to connect to \"{self.title}\" '
f'node with url: \"{rpc_url}\"'
)
)
provider = Web3(HTTPProvider(rpc_url))
if provider.isConnected():
info(
RPC_PROVIDER_INFO.format(
f'Connection to \"{rpc_url}\" was successful'
)
)
return provider
message = (
f'RPC provider with the URL \"{rpc_url}\" not loaded.'
)
exception(RPC_PROVIDER_ERROR.format(message))
raise ProviderNotConnected(message)
def get_rpc_provider(self, url_number):
"""
Returns Web3 rpc provider from list by it's index
"""
if url_number >= len(self.rpc_url_list):
raise CustomRpcProviderExceedListRange(
f"Can't connect to \"{self.title}\" network"
)
rpc_url = self.rpc_url_list[url_number]
info(
RPC_PROVIDER_INFO.format(
f'Trying to connect to \"{self.title}\" '
f'node with url: {rpc_url}'
)
)
provider = Web3(HTTPProvider(rpc_url))
if provider.isConnected():
info(
RPC_PROVIDER_INFO.format(
f'Connection to \"{rpc_url}\" was successful'
)
)
return provider
message = (
f'RPC provider with the URL \"{rpc_url}\" not loaded'
)
exception(RPC_PROVIDER_ERROR.format(message))
raise ProviderNotConnected(message)
@classmethod
def get_network(cls, network_id: UUID):
network = cls.objects.filter(id=network_id).first()
if not network:
message = (
f'Network with the \"{network_id}\" id not found in database.'
)
exception(NETWORK_ERROR.format(message))
raise NetworkNotFound(message)
return network
def get_gas_price(self, provider: Web3 = None):
if not provider:
return self.rpc_provider.eth.gasPrice
return provider.eth.gasPrice
class CustomRpcProvider:
"""
That's class wraps methods of web3 rpc provider and switches to the
support node if connection errors happened
"""
def __init__(self, network: Network, url_number: int = 0):
self.network = network
self.url_number = url_number
@property
def rpc_provider(self):
rpc_provider = self.network.get_rpc_provider(self.url_number)
return rpc_provider
@reset_connection
def get_current_block_number(self):
return self.rpc_provider.eth.get_block_number()
@reset_connection
def get_contract(self, address: str, abi: str):
return self.rpc_provider.eth.contract(
address=convert_to_checksum_address_format(address),
abi=abi,
)
@reset_connection
def get_transaction(
self,
txn_hash: HASH_LIKE,
):
return self.rpc_provider.eth.getTransaction(txn_hash)
@reset_connection
def get_transaction_receipt(
self,
txn_hash: HASH_LIKE,
):
return self.rpc_provider.eth.getTransactionReceipt(txn_hash)
@reset_connection
def wait_for_transaction_receipt(
self,
txn_hash: HASH_LIKE,
timeout: int = DEFAULT_TXN_TIMEOUT,
poll_latency: int = DEFAULT_POLL_LATENCY
):
return self.rpc_provider.eth.waitForTransactionReceipt(
txn_hash,
timeout,
poll_latency,
)
@reset_connection
def get_balance(self, address: ADDRESS_LIKE):
return self.rpc_provider.eth.getBalance(
convert_to_checksum_address_format(address)
)
@reset_connection
def get_transaction_count(self, address: ADDRESS_LIKE):
return self.rpc_provider.eth.getTransactionCount(
convert_to_checksum_address_format(address),
'pending'
)
@reset_connection
def send_raw_transaction(self, signed_transaction):
return self.rpc_provider.eth.sendRawTransaction(signed_transaction)
@reset_connection
def get_logs(self, contract, event_name, from_block, to_block):
web3_contract_instance = contract.load_contract(
provider=self,
)
event = getattr(
web3_contract_instance.events,
event_name,
)
return event().getLogs(
fromBlock=from_block,
toBlock=to_block,
)
@reset_connection
def contract_function_call(
self,
contract,
contract_function_name: str,
params: tuple,
contract_address: str = None,
):
if not contract_address:
contract_address = contract.address
contract_function = contract.load_contract(
address=contract_address,
provider=self,
).get_function_by_name(contract_function_name)(
*params
)
return contract_function.call()
class NearRpcProvider:
def __init__(self, rpc_addr):
self._rpc_addr = rpc_addr
def rpc_addr(self):
return self._rpc_addr
def json_rpc(self, method, params, timeout=2):
j = {
'method': method,
'params': params,
'id': 'dontcare',
'jsonrpc': '2.0'
}
response = request_post(self.rpc_addr(), json=j, timeout=timeout)
response.raise_for_status()
content = json.loads(response.content)
if "error" in content:
raise NearRpcProviderError(content["error"])
return content["result"]
def get_tx(self, tx_hash, tx_recipient_id):
return self.json_rpc('tx', [
tx_hash,
tx_recipient_id,
])
def get_receipt(self, receipt_id):
return self.json_rpc('EXPERIMENTAL_receipt', [
receipt_id,
])
def get_account(self, account_id, finality='optimistic'):
return self.json_rpc('query', {
"request_type": "view_account",
"account_id": account_id,
"finality": finality,
})
def get_access_key_list(self, account_id, finality='optimistic'):
return self.json_rpc('query', {
"request_type": "view_access_key_list",
"account_id": account_id,
"finality": finality,
})
def get_access_key(self, account_id, public_key, finality='optimistic'):
return self.json_rpc('query', {
"request_type": "view_access_key",
"account_id": account_id,
"public_key": public_key,
"finality": finality,
})
class Transaction(AbstractBaseModel):
"""
Transaction model which represents transactions in blockchain
"""
network = ForeignKey(
to=Network,
on_delete=PROTECT,
related_name='network_transactions',
verbose_name='Network',
)
hash = CharField(
unique=True,
max_length=255,
verbose_name='Hash',
)
block_hash = CharField(
max_length=ETH_LIKE_HASH_LENGTH,
verbose_name='Block hash',
blank=True,
default='',
)
block_number = PositiveIntegerField(
verbose_name='Block number',
blank=True,
default=0,
)
sender = CharField(
max_length=255,
verbose_name='Sender (from)',
default='',
)
receiver = CharField(
max_length=255,
verbose_name='Receiver (to)',
default='',
)
gas = DecimalField(
max_digits=MAX_WEI_DIGITS,
decimal_places=0,
verbose_name='Gas limit',
default=0,
)
gas_price = DecimalField(
max_digits=MAX_WEI_DIGITS,
decimal_places=0,
verbose_name='Gas price',
default=0,
)
nonce = PositiveIntegerField(
verbose_name='Nonce',
default=0,
)
sign_r = CharField(
max_length=ETH_LIKE_HASH_LENGTH,
verbose_name='R',
default='',
)
sign_s = CharField(
max_length=ETH_LIKE_HASH_LENGTH,
verbose_name='S',
default='',
)
sign_v = CharField(
max_length=ETH_LIKE_HASH_LENGTH,
verbose_name='V',
default='',
)
index = PositiveIntegerField(
verbose_name='Index',
blank=True,
default=0,
)
type = CharField(
max_length=255,
verbose_name='Type',
default='',
blank=True,
)
value = DecimalField(
max_digits=MAX_WEI_DIGITS,
decimal_places=0,
verbose_name='Value',
default=0,
)
data = JSONField(
verbose_name='Data',
default=dict,
blank=True,
)
event_data = JSONField(
verbose_name='Event data',
default=dict,
blank=True,
)
logs = JSONField(
verbose_name='Logs',
default=dict,
blank=True,
),
class Meta:
db_table = 'transactions'
ordering = '-_created_at',
def __str__(self) -> str:
return f'{self.hash} in {self.network.title} (id: {self.id})'
def save(self, *args, **kwargs) -> None:
if self.block_number is None:
self.block_number = 0
if self.index is None:
self.index = 0
if self.block_hash is None:
self.block_hash = ''
elif isinstance(self.block_hash, HexBytes):
self.block_hash = self.block_hash.hex()
if not self.receiver:
self.receiver = ''
self.sign_r = self.sign_r.lower()
self.sign_s = self.sign_s.lower()
self.type = self.type.lower()
return super().save(*args, **kwargs)
def get_block_number(self) -> int:
return self.block_number
@classmethod
def get_last_block_number(cls, network_id: UUID) -> int:
transaction = cls.objects \
.filter(
network_id=network_id,
) \
.last()
if not transaction:
warning(
TRANSACTION_WARNING.format(
f'No transactions in the network with \"{network_id}\" id.'
)
)
return
return transaction.block_number
@classmethod
def get_transaction(cls, network_id: UUID, txn_hash: HASH_LIKE):
if isinstance(txn_hash, HexBytes):
txn_hash = txn_hash.hex()
transaction = cls.objects.filter(
network_id=network_id,
hash__iexact=txn_hash,
) \
.first()
if not transaction:
error(
TRANSACTION_ERROR.format(
f'Transaction with the \"{txn_hash}\" hash in'
f' the \"{network_id}\" network not found in database.'
)
)
transaction = Transaction.add_transaction(
network_id=network_id,
txn_hash=txn_hash
)
return transaction
@classmethod
def add_transaction(cls, network_id: UUID, txn_hash: HASH_LIKE):
network = Network.get_network(network_id=network_id)
try:
if network.title == NETWORK_NAMES.get('solana'):
transaction = cls.objects.create(
hash=txn_hash,
network=network,
)
elif network.title == NETWORK_NAMES.get('near'):
try:
receipt = NearRpcProvider(network.rpc_url_list[0]).get_receipt(
receipt_id=txn_hash,
)
data, event_data = parce_near_receipt(
receipt=receipt,
)
transaction = cls.objects.create(
hash=txn_hash,
network=network,
sender=receipt
.get('receipt', {})
.get('Action', {})
.get('signer_id', ''),
receiver=receipt.get('receiver_id', ''),
data=data,
event_data=event_data,
)
except Exception:
transaction = cls.objects.create(
hash=txn_hash,
network=network,
)
else:
transaction = CustomRpcProvider(network=network) \
.get_transaction(txn_hash=txn_hash)
contract_address = transaction.to
contract = network.network_contracts.filter(
address__iexact=contract_address
) \
.first()
if not contract:
txn_data_decoded_input = dict()
else:
txn_data_decoded_input = contract \
.get_decode_function_txn_input(
contract_blockchain_id=contract.blockchain_id,
txn_data_input=transaction.input,
)
# event_data = contract.get_event([])
transaction = cls.objects.create(
hash=txn_hash,
block_hash=transaction.blockHash,
block_number=transaction.blockNumber,
network=network,
sender=transaction.get('from'),
receiver=transaction.to,
gas=transaction.gas,
gas_price=transaction.gasPrice,
nonce=transaction.nonce,
sign_r=transaction.r.hex(),
sign_s=transaction.s.hex(),
sign_v=transaction.v,
index=transaction.transactionIndex,
value=transaction.value,
data=txn_data_decoded_input,
# event_data=event_data,
)
info(
TRANSACTION_INFO.format(
f'Transaction with the \"{transaction.hash}\"'
f' hash in the \"{network_id}\" created.'
)
)
return transaction
except IntegrityError as exception_error:
exception(
TRANSACTION_ERROR.format(
f'Transaction with the \"{transaction.hash}\" hash'
f' hash in the \"{network_id}\" created.'
f' Error descriptions: \"{exception_error}.\"'
)
)
raise TransactionError from exception_error
@staticmethod
def get_transaction_by_hash(
rpc_provider: CustomRpcProvider,
txn_hash: HASH_LIKE,
):
# TODO: Добавить обработку исключения TransactionNotFound.
transaction = Transaction.get_transaction(
rpc_provider.network.id,
txn_hash=txn_hash,
)
info(
TRANSACTION_INFO.format(
f'Searching transaction by hash result: {transaction}.'
)
)
return transaction
@staticmethod
def waiting_transaction_receipt(
rpc_provider: CustomRpcProvider,
txn_hash: HASH_LIKE,
timeout: int = DEFAULT_TXN_TIMEOUT,
poll_latency: int = 1
):
return rpc_provider.wait_for_transaction_receipt(
txn_hash,
timeout,
poll_latency,
)
@staticmethod
def get_transaction_receipt(
rpc_provider: CustomRpcProvider,
hash: HASH_LIKE,
):
return rpc_provider.get_transaction_receipt(hash)
@staticmethod
def get_transaction_status(
rpc_provider: CustomRpcProvider,
txn_hash: HASH_LIKE,
):
transaction = Transaction.waiting_transaction_receipt(
rpc_provider,
txn_hash,
)
return transaction.status
| StarcoderdataPython |
1758053 | <filename>sppas/sppas/src/annotations/Intsint/intsint.py
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.annotations.Intsint.intsint.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import math
from ..annotationsexc import SmallSizeInputError
# ----------------------------------------------------------------------------
BIG_NUMBER = 32764
# ----------------------------------------------------------------------------
def octave(value):
return math.log(value) / math.log(2)
# ----------------------------------------------------------------------------
def linear(value):
return 2 ** value
# -------------------------------------------------------------------
class Intsint(object):
"""Provide optimal INTSINT coding for anchor points.
:author: <NAME>
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: <EMAIL>
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
"""
# parameters for data checking.
MIN_F0 = 60 # (Hz)
MAX_F0 = 600 # (Hz)
# parameters for optimization.
MIN_PAUSE = 0.5 # seconds
MIN_RANGE = 0.5 # octaves
MAX_RANGE = 2.5 # octaves
STEP_RANGE = 0.1 # octaves
MEAN_SHIFT = 50 # (Hz)
STEP_SHIFT = 1 # (Hz)
# parameters for target estimation.
HIGHER = 0.5
LOWER = 0.5
UP = 0.25
DOWN = 0.25
# List of "absolute" tones
TONES_ABSOLUTE = ['T', 'M', 'B']
# List of "relative" tones
TONES_RELATIVE = ['H', 'L', 'U', 'D', 'S']
# All tones
TONES = TONES_ABSOLUTE + TONES_RELATIVE
# -------------------------------------------------------------------
def __init__(self):
"""Create a new Intsint instance."""
self.best_intsint = None
self.best_estimate = None
self.intsint = []
self.estimates = []
self.targets = []
self.time = []
self.mid = 0
self.top = 0
self.bottom = 0
self.last_estimate = 0
self.best_mid = 0
self.best_range = 0
self.min_mean = 0
self.max_mean = 0
self.min_ss_error = 0
# -------------------------------------------------------------------
def reset(self):
"""Fix all member to their initial value."""
self.best_intsint = None
self.best_estimate = None
self.intsint = []
self.estimates = []
self.targets = []
self.time = []
self.mid = 0
self.top = 0
self.bottom = 0
self.last_estimate = 0
self.best_mid = 0
self.best_range = 0
self.min_mean = 0
self.max_mean = 0
self.min_ss_error = 0
# -------------------------------------------------------------------
def adjust_f0(self, f0):
"""Return F0 value within self range of values.
:param f0: (float) Input pitch value.
:returns: (float) Normalized pitch value.
"""
if f0 < Intsint.MIN_F0:
return Intsint.MIN_F0
if f0 > Intsint.MAX_F0:
return Intsint.MAX_F0
return f0
# -------------------------------------------------------------------
def init(self, momel_anchors):
"""Initialize INTSINT attributes from a list of targets.
:param momel_anchors: (list of tuple) List of time
(in seconds) and anchors (Hz).
"""
self.reset()
for (time, target) in momel_anchors:
# Convert f0 to octave scale
self.targets.append(octave(self.adjust_f0(target)))
self.time.append(time)
self.intsint = [""]*len(self.targets)
self.estimates = [0]*len(self.targets)
sum_octave = sum(self.targets)
mean_f0 = float(sum_octave) / float(len(self.targets))
linear_mean_f0 = round(linear(mean_f0))
self.min_mean = linear_mean_f0 - Intsint.MEAN_SHIFT
self.max_mean = linear_mean_f0 + Intsint.MEAN_SHIFT
self.min_ss_error = BIG_NUMBER
# -------------------------------------------------------------------
def optimise(self, mid, _range):
"""Fix tones.
:param mid:
:param _range:
"""
self.top = mid + _range / 2
self.bottom = mid - _range / 2
f0 = self.targets[0]
if self.top - f0 < math.fabs(f0 - mid):
self.intsint[0] = "T"
elif f0 - self.bottom < math.fabs(f0 - mid):
self.intsint[0] = "B"
else:
self.intsint[0] = "M"
estimated = self.estimate(self.intsint[0], self.last_estimate)
self.estimates[0] = estimated
error = math.fabs(estimated - self.targets[0])
ss_error = error * error
self.last_estimate = estimated
for i in range(1, len(self.targets)):
target = self.targets[i]
# after pause choose from (MTB)
if self.time[i] - self.time[i - 1] > Intsint.MIN_PAUSE:
if self.top - target < math.fabs(target - mid):
self.intsint[i] = "T"
elif target - self.bottom < math.fabs(target - mid):
self.intsint[i] = "B"
else:
self.intsint[i] = "M"
# elsewhere any tone except M
else:
min_difference = BIG_NUMBER
best_tone = ""
for tone in Intsint.TONES:
if tone != "M":
estimate = self.estimate(tone, self.last_estimate)
difference = math.fabs(target - estimate)
if difference < min_difference:
min_difference = difference
best_tone = tone
self.intsint[i] = best_tone
estimate = self.estimate(self.intsint[i], self.last_estimate)
self.estimates[i] = estimate
error = math.fabs(estimate - self.targets[i])
ss_error += error * error
self.last_estimate = estimate
if ss_error < self.min_ss_error:
self.min_ss_error = ss_error
self.best_range = _range
self.best_mid = mid
self.best_intsint = self.intsint[:]
self.best_estimate = self.estimates[:]
# -------------------------------------------------------------------
def estimate(self, tone, last_anchor):
"""Estimate f0 from current tone and last target.
:param tone:
:param last_anchor:
"""
estimated = ""
if tone == "M":
estimated = self.mid
elif tone == "S":
estimated = last_anchor
elif tone == "T":
estimated = self.top
elif tone == "H":
estimated = last_anchor + \
(self.top - last_anchor) * Intsint.HIGHER
elif tone == "U":
estimated = last_anchor + \
(self.top - last_anchor) * Intsint.UP
elif tone == "B":
estimated = self.bottom
elif tone == "L":
estimated = last_anchor - \
(last_anchor - self.bottom) * Intsint.LOWER
elif tone == "D":
estimated = last_anchor - \
(last_anchor - self.bottom) * Intsint.DOWN
return estimated
# -------------------------------------------------------------------
def recode(self):
"""Recode within the parameters space.
mean +/- 50 Hz for key and [0.5..2.5 octaves] for range.
"""
_range = Intsint.MIN_RANGE
while _range < Intsint.MAX_RANGE:
lm = self.min_mean
while lm < self.max_mean:
self.mid = octave(lm)
self.optimise(self.mid, _range)
lm += Intsint.STEP_SHIFT
_range += Intsint.STEP_RANGE
# -------------------------------------------------------------------
def annotate(self, momel_anchors):
"""Provide optimal INTSINT coding for sequence of target points.
:param momel_anchors: (list of tuple) List of time (in seconds)
and anchors (Hz).
"""
if len(momel_anchors) < 2:
raise SmallSizeInputError(2)
self.init(momel_anchors)
self.recode()
return self.best_intsint
| StarcoderdataPython |
1658997 | # Copyright 2020 QuantStack
# Distributed under the terms of the Modified BSD License.
import uuid
from typing import Optional
from fastapi import HTTPException, status
from sqlalchemy.orm import Session
from .db_models import ApiKey, ChannelMember, PackageMember
OWNER = 'owner'
MAINTAINER = 'maintainer'
MEMBER = 'member'
ROLES = [OWNER, MAINTAINER, MEMBER]
class Rules:
def __init__(self, API_key: str, session: dict, db: Session):
self.API_key = API_key
self.session = session
self.db = db
def get_user(self) -> Optional[bytes]:
user_id = None
if self.API_key:
api_key = (
self.db.query(ApiKey).filter(ApiKey.key == self.API_key).one_or_none()
)
if api_key:
user_id = api_key.user_id
else:
user_id = self.session.get('user_id')
if user_id:
user_id = uuid.UUID(user_id).bytes
return user_id
def assert_user(self) -> bytes:
user_id = self.get_user()
if not user_id:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Not logged in',
)
return user_id
def has_channel_role(self, user_id, channel_name: str, roles: list):
return (
self.db.query(ChannelMember)
.filter(ChannelMember.user_id == user_id)
.filter(ChannelMember.channel_name == channel_name)
.filter(ChannelMember.role.in_(roles))
.one_or_none()
)
def require_channel_role(self, channel_name: str, roles: list):
user_id = self.assert_user()
if not self.has_channel_role(user_id, channel_name, roles):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail='No permission'
)
def has_package_role(
self, user_id, channel_name: str, package_name: str, roles: list
):
return (
self.db.query(PackageMember)
.filter(PackageMember.user_id == user_id)
.filter(PackageMember.channel_name == channel_name)
.filter(PackageMember.package_name == package_name)
.filter(PackageMember.role.in_(roles))
.one_or_none()
)
def has_channel_or_package_roles(
self,
user_id,
channel_name: str,
channel_roles: list,
package_name: str,
package_roles: list,
):
return self.has_channel_role(
user_id, channel_name, channel_roles
) or self.has_package_role(user_id, channel_name, package_name, package_roles)
def assert_channel_roles(self, channel_name: str, channel_roles: list):
user_id = self.assert_user()
if not self.has_channel_role(user_id, channel_name, channel_roles):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail='No permission'
)
def assert_channel_or_package_roles(
self,
channel_name: str,
channel_roles: list,
package_name: str,
package_roles: list,
):
user_id = self.assert_user()
if not self.has_channel_or_package_roles(
user_id, channel_name, channel_roles, package_name, package_roles
):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail='No permission'
)
def assert_add_channel_member(self, channel_name: str, role: str):
self.require_channel_role(channel_name, [OWNER])
def assert_remove_channel_member(self, channel_name: str, role: str):
self.require_channel_role(channel_name, [OWNER])
def assert_add_package_member(self, channel_name, package_name, role):
self.assert_channel_or_package_roles(
channel_name, [OWNER, MAINTAINER], package_name, [OWNER]
)
def assert_create_api_key_roles(self, roles):
for role in roles:
if role.package:
required_package_role = (
[OWNER] if role.role == OWNER else [OWNER, MAINTAINER]
)
self.assert_channel_or_package_roles(
role.channel,
[OWNER, MAINTAINER],
role.package,
required_package_role,
)
else:
required_channel_roles = (
[OWNER] if role.role == OWNER else [OWNER, MAINTAINER]
)
self.assert_channel_roles(role.channel, required_channel_roles)
def assert_upload_file(self, channel_name, package_name):
self.assert_channel_or_package_roles(
channel_name, [OWNER, MAINTAINER], package_name, [OWNER, MAINTAINER]
)
def assert_overwrite_package_version(self, channel_name, package_name):
self.assert_channel_or_package_roles(
channel_name, [OWNER], package_name, [OWNER]
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.